scsi: qla2xxx: Fix Management Server NPort handle reservation logic
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_init.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
73208dfd 8#include "qla_gbl.h"
1da177e4
LT
9
10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
0107109e 12#include <linux/vmalloc.h>
1da177e4
LT
13
14#include "qla_devtbl.h"
15
4e08df3f
DM
16#ifdef CONFIG_SPARC
17#include <asm/prom.h>
4e08df3f
DM
18#endif
19
2d70c103
NB
20#include <target/target_core_base.h>
21#include "qla_target.h"
22
1da177e4
LT
23/*
24* QLogic ISP2x00 Hardware Support Function Prototypes.
25*/
1da177e4 26static int qla2x00_isp_firmware(scsi_qla_host_t *);
1da177e4 27static int qla2x00_setup_chip(scsi_qla_host_t *);
1da177e4
LT
28static int qla2x00_fw_ready(scsi_qla_host_t *);
29static int qla2x00_configure_hba(scsi_qla_host_t *);
1da177e4
LT
30static int qla2x00_configure_loop(scsi_qla_host_t *);
31static int qla2x00_configure_local_loop(scsi_qla_host_t *);
1da177e4 32static int qla2x00_configure_fabric(scsi_qla_host_t *);
726b8548 33static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
1da177e4 34static int qla2x00_restart_isp(scsi_qla_host_t *);
1da177e4 35
4d4df193
HK
36static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37static int qla84xx_init_chip(scsi_qla_host_t *);
73208dfd 38static int qla25xx_init_queues(struct qla_hw_data *);
a5d42f4c 39static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
726b8548
QT
40static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
41 struct event_arg *);
a5d42f4c
DG
42static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
43 struct event_arg *);
a4239945 44static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
4d4df193 45
ac280b67
AV
46/* SRB Extensions ---------------------------------------------------------- */
47
9ba56b95 48void
8e5f4ba0 49qla2x00_sp_timeout(struct timer_list *t)
ac280b67 50{
8e5f4ba0 51 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
4916392b 52 struct srb_iocb *iocb;
25ff6af1 53 scsi_qla_host_t *vha = sp->vha;
ac280b67
AV
54 struct req_que *req;
55 unsigned long flags;
56
25ff6af1
JC
57 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
58 req = vha->hw->req_q_map[0];
ac280b67 59 req->outstanding_cmds[sp->handle] = NULL;
9ba56b95 60 iocb = &sp->u.iocb_cmd;
4916392b 61 iocb->timeout(sp);
25ff6af1 62 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
ac280b67
AV
63}
64
9ba56b95 65void
25ff6af1 66qla2x00_sp_free(void *ptr)
ac280b67 67{
25ff6af1 68 srb_t *sp = ptr;
9ba56b95 69 struct srb_iocb *iocb = &sp->u.iocb_cmd;
ac280b67 70
4d97cc53 71 del_timer(&iocb->timer);
25ff6af1 72 qla2x00_rel_sp(sp);
ac280b67
AV
73}
74
ac280b67
AV
75/* Asynchronous Login/Logout Routines -------------------------------------- */
76
a9b6f722 77unsigned long
5b91490e
AV
78qla2x00_get_async_timeout(struct scsi_qla_host *vha)
79{
80 unsigned long tmo;
81 struct qla_hw_data *ha = vha->hw;
82
83 /* Firmware should use switch negotiated r_a_tov for timeout. */
84 tmo = ha->r_a_tov / 10 * 2;
8ae6d9c7
GM
85 if (IS_QLAFX00(ha)) {
86 tmo = FX00_DEF_RATOV * 2;
87 } else if (!IS_FWI2_CAPABLE(ha)) {
5b91490e
AV
88 /*
89 * Except for earlier ISPs where the timeout is seeded from the
90 * initialization control block.
91 */
92 tmo = ha->login_timeout;
93 }
94 return tmo;
95}
ac280b67 96
726b8548 97void
9ba56b95 98qla2x00_async_iocb_timeout(void *data)
ac280b67 99{
25ff6af1 100 srb_t *sp = data;
ac280b67 101 fc_port_t *fcport = sp->fcport;
726b8548 102 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67 103
5c25d451
QT
104 if (fcport) {
105 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
106 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
107 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
108
6d674927 109 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
5c25d451
QT
110 } else {
111 pr_info("Async-%s timeout - hdl=%x.\n",
112 sp->name, sp->handle);
113 }
726b8548
QT
114
115 switch (sp->type) {
116 case SRB_LOGIN_CMD:
6ac52608
AV
117 /* Retry as needed. */
118 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
119 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
120 QLA_LOGIO_LOGIN_RETRIED : 0;
eaf75d18 121 sp->done(sp, QLA_FUNCTION_TIMEOUT);
726b8548
QT
122 break;
123 case SRB_LOGOUT_CMD:
726b8548
QT
124 case SRB_CT_PTHRU_CMD:
125 case SRB_MB_IOCB:
126 case SRB_NACK_PLOGI:
127 case SRB_NACK_PRLI:
128 case SRB_NACK_LOGO:
2853192e 129 case SRB_CTRL_VP:
25ff6af1 130 sp->done(sp, QLA_FUNCTION_TIMEOUT);
726b8548 131 break;
6ac52608 132 }
ac280b67
AV
133}
134
99b0bec7 135static void
25ff6af1 136qla2x00_async_login_sp_done(void *ptr, int res)
99b0bec7 137{
25ff6af1
JC
138 srb_t *sp = ptr;
139 struct scsi_qla_host *vha = sp->vha;
9ba56b95 140 struct srb_iocb *lio = &sp->u.iocb_cmd;
726b8548 141 struct event_arg ea;
9ba56b95 142
83548fe2 143 ql_dbg(ql_dbg_disc, vha, 0x20dd,
25ff6af1 144 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
726b8548 145
6d674927
QT
146 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
147
726b8548
QT
148 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
149 memset(&ea, 0, sizeof(ea));
150 ea.event = FCME_PLOGI_DONE;
151 ea.fcport = sp->fcport;
152 ea.data[0] = lio->u.logio.data[0];
153 ea.data[1] = lio->u.logio.data[1];
154 ea.iop[0] = lio->u.logio.iop[0];
155 ea.iop[1] = lio->u.logio.iop[1];
156 ea.sp = sp;
157 qla2x00_fcport_event_handler(vha, &ea);
158 }
9ba56b95 159
25ff6af1 160 sp->free(sp);
99b0bec7
AV
161}
162
48acad09
QT
163static inline bool
164fcport_is_smaller(fc_port_t *fcport)
165{
166 if (wwn_to_u64(fcport->port_name) <
167 wwn_to_u64(fcport->vha->port_name))
168 return true;
169 else
170 return false;
171}
172
173static inline bool
174fcport_is_bigger(fc_port_t *fcport)
175{
176 return !fcport_is_smaller(fcport);
177}
178
ac280b67
AV
179int
180qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
181 uint16_t *data)
182{
ac280b67 183 srb_t *sp;
4916392b 184 struct srb_iocb *lio;
726b8548
QT
185 int rval = QLA_FUNCTION_FAILED;
186
187 if (!vha->flags.online)
188 goto done;
189
9ba56b95 190 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
191 if (!sp)
192 goto done;
193
726b8548
QT
194 fcport->flags |= FCF_ASYNC_SENT;
195 fcport->logout_completed = 0;
196
a4239945 197 fcport->disc_state = DSC_LOGIN_PEND;
9ba56b95
GM
198 sp->type = SRB_LOGIN_CMD;
199 sp->name = "login";
a4239945
QT
200 sp->gen1 = fcport->rscn_gen;
201 sp->gen2 = fcport->login_gen;
9ba56b95
GM
202
203 lio = &sp->u.iocb_cmd;
3822263e 204 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
205 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
206
9ba56b95 207 sp->done = qla2x00_async_login_sp_done;
48acad09
QT
208 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
209 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
210 } else {
211 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
a5d42f4c 212
48acad09
QT
213 if (fcport->fc4f_nvme)
214 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
215
48acad09 216 }
a5d42f4c 217
ac280b67 218 rval = qla2x00_start_sp(sp);
080c9517 219 if (rval != QLA_SUCCESS) {
080c9517
CD
220 fcport->flags |= FCF_LOGIN_NEEDED;
221 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
ac280b67 222 goto done_free_sp;
080c9517 223 }
ac280b67 224
7c3df132 225 ql_dbg(ql_dbg_disc, vha, 0x2072,
726b8548
QT
226 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
227 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
cfb0919c
CD
228 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
229 fcport->login_retry);
ac280b67
AV
230 return rval;
231
232done_free_sp:
25ff6af1 233 sp->free(sp);
726b8548 234 fcport->flags &= ~FCF_ASYNC_SENT;
3dbec59b 235done:
fa83e658 236 fcport->flags &= ~FCF_ASYNC_ACTIVE;
ac280b67
AV
237 return rval;
238}
239
99b0bec7 240static void
25ff6af1 241qla2x00_async_logout_sp_done(void *ptr, int res)
99b0bec7 242{
25ff6af1 243 srb_t *sp = ptr;
9ba56b95 244
6d674927 245 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
eaf75d18
QT
246 sp->fcport->login_gen++;
247 qlt_logo_completion_handler(sp->fcport, res);
25ff6af1 248 sp->free(sp);
99b0bec7
AV
249}
250
ac280b67
AV
251int
252qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
253{
ac280b67 254 srb_t *sp;
4916392b 255 struct srb_iocb *lio;
3dbec59b
QT
256 int rval = QLA_FUNCTION_FAILED;
257
258 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
259 return rval;
ac280b67 260
726b8548 261 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 262 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
263 if (!sp)
264 goto done;
265
9ba56b95
GM
266 sp->type = SRB_LOGOUT_CMD;
267 sp->name = "logout";
9ba56b95
GM
268
269 lio = &sp->u.iocb_cmd;
3822263e 270 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
271 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
272
9ba56b95 273 sp->done = qla2x00_async_logout_sp_done;
ac280b67
AV
274 rval = qla2x00_start_sp(sp);
275 if (rval != QLA_SUCCESS)
276 goto done_free_sp;
277
7c3df132 278 ql_dbg(ql_dbg_disc, vha, 0x2070,
726b8548 279 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
cfb0919c 280 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
726b8548
QT
281 fcport->d_id.b.area, fcport->d_id.b.al_pa,
282 fcport->port_name);
ac280b67
AV
283 return rval;
284
285done_free_sp:
25ff6af1 286 sp->free(sp);
ac280b67 287done:
fa83e658 288 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
ac280b67
AV
289 return rval;
290}
11aea16a
QT
291
292void
293qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
294 uint16_t *data)
295{
fa83e658 296 fcport->flags &= ~FCF_ASYNC_ACTIVE;
11aea16a
QT
297 /* Don't re-login in target mode */
298 if (!fcport->tgt_session)
299 qla2x00_mark_device_lost(vha, fcport, 1, 0);
300 qlt_logo_completion_handler(fcport, data[0]);
301}
302
303static void
304qla2x00_async_prlo_sp_done(void *s, int res)
305{
306 srb_t *sp = (srb_t *)s;
307 struct srb_iocb *lio = &sp->u.iocb_cmd;
308 struct scsi_qla_host *vha = sp->vha;
309
fa83e658 310 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
11aea16a
QT
311 if (!test_bit(UNLOADING, &vha->dpc_flags))
312 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
313 lio->u.logio.data);
314 sp->free(sp);
315}
316
317int
318qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
319{
320 srb_t *sp;
321 struct srb_iocb *lio;
322 int rval;
323
324 rval = QLA_FUNCTION_FAILED;
325 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
326 if (!sp)
327 goto done;
328
329 sp->type = SRB_PRLO_CMD;
330 sp->name = "prlo";
11aea16a
QT
331
332 lio = &sp->u.iocb_cmd;
333 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
334 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
335
11aea16a
QT
336 sp->done = qla2x00_async_prlo_sp_done;
337 rval = qla2x00_start_sp(sp);
338 if (rval != QLA_SUCCESS)
339 goto done_free_sp;
340
341 ql_dbg(ql_dbg_disc, vha, 0x2070,
342 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
343 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
344 fcport->d_id.b.area, fcport->d_id.b.al_pa);
345 return rval;
346
347done_free_sp:
348 sp->free(sp);
349done:
fa83e658 350 fcport->flags &= ~FCF_ASYNC_ACTIVE;
11aea16a
QT
351 return rval;
352}
353
f13515ac
QT
354static
355void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
356{
0616e965
QT
357 struct fc_port *fcport = ea->fcport;
358
359 ql_dbg(ql_dbg_disc, vha, 0x20d2,
360 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
361 __func__, fcport->port_name, fcport->disc_state,
362 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
363 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
364
365 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
a4239945
QT
366 ql_dbg(ql_dbg_disc, vha, 0x2066,
367 "%s %8phC: adisc fail: post delete\n",
368 __func__, ea->fcport->port_name);
94cff6e1 369 qlt_schedule_sess_for_deletion(ea->fcport);
a4239945
QT
370 return;
371 }
a4239945
QT
372
373 if (ea->fcport->disc_state == DSC_DELETE_PEND)
374 return;
375
376 if (ea->sp->gen2 != ea->fcport->login_gen) {
377 /* target side must have changed it. */
378 ql_dbg(ql_dbg_disc, vha, 0x20d3,
0616e965
QT
379 "%s %8phC generation changed\n",
380 __func__, ea->fcport->port_name);
a4239945
QT
381 return;
382 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
383 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
384 __func__, __LINE__, ea->fcport->port_name);
385 qla24xx_post_gidpn_work(vha, ea->fcport);
386 return;
387 }
388
389 __qla24xx_handle_gpdb_event(vha, ea);
f13515ac 390}
ac280b67 391
5ff1d584 392static void
25ff6af1 393qla2x00_async_adisc_sp_done(void *ptr, int res)
5ff1d584 394{
25ff6af1
JC
395 srb_t *sp = ptr;
396 struct scsi_qla_host *vha = sp->vha;
f13515ac 397 struct event_arg ea;
0616e965 398 struct srb_iocb *lio = &sp->u.iocb_cmd;
f13515ac
QT
399
400 ql_dbg(ql_dbg_disc, vha, 0x2066,
401 "Async done-%s res %x %8phC\n",
402 sp->name, res, sp->fcport->port_name);
403
fa83e658
HR
404 sp->fcport->flags &= ~FCF_ASYNC_SENT;
405
f13515ac
QT
406 memset(&ea, 0, sizeof(ea));
407 ea.event = FCME_ADISC_DONE;
408 ea.rc = res;
0616e965
QT
409 ea.data[0] = lio->u.logio.data[0];
410 ea.data[1] = lio->u.logio.data[1];
411 ea.iop[0] = lio->u.logio.iop[0];
412 ea.iop[1] = lio->u.logio.iop[1];
f13515ac
QT
413 ea.fcport = sp->fcport;
414 ea.sp = sp;
415
416 qla2x00_fcport_event_handler(vha, &ea);
9ba56b95 417
25ff6af1 418 sp->free(sp);
5ff1d584
AV
419}
420
421int
422qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
423 uint16_t *data)
424{
5ff1d584 425 srb_t *sp;
4916392b 426 struct srb_iocb *lio;
5ff1d584
AV
427 int rval;
428
429 rval = QLA_FUNCTION_FAILED;
726b8548 430 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 431 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
5ff1d584
AV
432 if (!sp)
433 goto done;
434
9ba56b95
GM
435 sp->type = SRB_ADISC_CMD;
436 sp->name = "adisc";
9ba56b95
GM
437
438 lio = &sp->u.iocb_cmd;
3822263e 439 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
440 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
441
9ba56b95 442 sp->done = qla2x00_async_adisc_sp_done;
5ff1d584 443 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 444 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
5ff1d584
AV
445 rval = qla2x00_start_sp(sp);
446 if (rval != QLA_SUCCESS)
447 goto done_free_sp;
448
7c3df132 449 ql_dbg(ql_dbg_disc, vha, 0x206f,
f13515ac
QT
450 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
451 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
5ff1d584
AV
452 return rval;
453
454done_free_sp:
25ff6af1 455 sp->free(sp);
5ff1d584 456done:
fa83e658 457 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
f13515ac 458 qla2x00_post_async_adisc_work(vha, fcport, data);
726b8548
QT
459 return rval;
460}
461
462static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
463 struct event_arg *ea)
464{
465 fc_port_t *fcport, *conflict_fcport;
466 struct get_name_list_extended *e;
467 u16 i, n, found = 0, loop_id;
468 port_id_t id;
469 u64 wwn;
a4239945
QT
470 u16 data[2];
471 u8 current_login_state;
726b8548
QT
472
473 fcport = ea->fcport;
f352eeb7
QT
474 ql_dbg(ql_dbg_disc, vha, 0xffff,
475 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
476 __func__, fcport->port_name, fcport->disc_state,
477 fcport->fw_login_state, ea->rc,
478 fcport->login_gen, fcport->last_login_gen,
479 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
726b8548 480
a4239945
QT
481 if (fcport->disc_state == DSC_DELETE_PEND)
482 return;
483
726b8548
QT
484 if (ea->rc) { /* rval */
485 if (fcport->login_retry == 0) {
83548fe2
QT
486 ql_dbg(ql_dbg_disc, vha, 0x20de,
487 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
488 fcport->port_name, fcport->login_retry);
726b8548
QT
489 }
490 return;
491 }
492
493 if (fcport->last_rscn_gen != fcport->rscn_gen) {
83548fe2 494 ql_dbg(ql_dbg_disc, vha, 0x20df,
726b8548
QT
495 "%s %8phC rscn gen changed rscn %d|%d \n",
496 __func__, fcport->port_name,
497 fcport->last_rscn_gen, fcport->rscn_gen);
498 qla24xx_post_gidpn_work(vha, fcport);
499 return;
500 } else if (fcport->last_login_gen != fcport->login_gen) {
83548fe2 501 ql_dbg(ql_dbg_disc, vha, 0x20e0,
f352eeb7
QT
502 "%s %8phC login gen changed\n",
503 __func__, fcport->port_name);
726b8548
QT
504 return;
505 }
506
507 n = ea->data[0] / sizeof(struct get_name_list_extended);
508
83548fe2 509 ql_dbg(ql_dbg_disc, vha, 0x20e1,
726b8548
QT
510 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
511 __func__, __LINE__, fcport->port_name, n,
512 fcport->d_id.b.domain, fcport->d_id.b.area,
513 fcport->d_id.b.al_pa, fcport->loop_id);
514
515 for (i = 0; i < n; i++) {
516 e = &vha->gnl.l[i];
517 wwn = wwn_to_u64(e->port_name);
48acad09
QT
518 id.b.domain = e->port_id[2];
519 id.b.area = e->port_id[1];
520 id.b.al_pa = e->port_id[0];
521 id.b.rsvd_1 = 0;
726b8548
QT
522
523 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
524 continue;
525
48acad09
QT
526 if (IS_SW_RESV_ADDR(id))
527 continue;
528
726b8548 529 found = 1;
726b8548
QT
530
531 loop_id = le16_to_cpu(e->nport_handle);
532 loop_id = (loop_id & 0x7fff);
533
83548fe2
QT
534 ql_dbg(ql_dbg_disc, vha, 0x20e2,
535 "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
536 __func__, fcport->port_name,
537 e->current_login_state, fcport->fw_login_state,
538 id.b.domain, id.b.area, id.b.al_pa,
539 fcport->d_id.b.domain, fcport->d_id.b.area,
540 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
726b8548 541
48acad09
QT
542 switch (fcport->disc_state) {
543 case DSC_DELETE_PEND:
544 case DSC_DELETED:
545 break;
546 default:
547 if ((id.b24 != fcport->d_id.b24) ||
548 ((fcport->loop_id != FC_NO_LOOP_ID) &&
549 (fcport->loop_id != loop_id))) {
550 qlt_schedule_sess_for_deletion(fcport);
551 return;
552 }
553 break;
726b8548
QT
554 }
555
556 fcport->loop_id = loop_id;
557
558 wwn = wwn_to_u64(fcport->port_name);
559 qlt_find_sess_invalidate_other(vha, wwn,
560 id, loop_id, &conflict_fcport);
561
562 if (conflict_fcport) {
563 /*
564 * Another share fcport share the same loop_id &
565 * nport id. Conflict fcport needs to finish
566 * cleanup before this fcport can proceed to login.
567 */
568 conflict_fcport->conflict = fcport;
569 fcport->login_pause = 1;
570 }
571
48acad09 572 if (fcport->fc4f_nvme)
a5d42f4c
DG
573 current_login_state = e->current_login_state >> 4;
574 else
575 current_login_state = e->current_login_state & 0xf;
576
48acad09
QT
577 switch (vha->hw->current_topology) {
578 default:
579 switch (current_login_state) {
580 case DSC_LS_PRLI_COMP:
581 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
582 vha, 0x20e4, "%s %d %8phC post gpdb\n",
583 __func__, __LINE__, fcport->port_name);
a4239945 584
48acad09
QT
585 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
586 fcport->port_type = FCT_INITIATOR;
587 else
588 fcport->port_type = FCT_TARGET;
589 data[0] = data[1] = 0;
590 qla2x00_post_async_adisc_work(vha, fcport,
591 data);
592 break;
593 case DSC_LS_PORT_UNAVAIL:
594 default:
595 if (fcport->loop_id != FC_NO_LOOP_ID)
596 qla2x00_clear_loop_id(fcport);
a4239945 597
48acad09 598 fcport->loop_id = loop_id;
726b8548 599 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
48acad09
QT
600 qla24xx_fcport_handle_login(vha, fcport);
601 break;
726b8548 602 }
726b8548 603 break;
48acad09
QT
604 case ISP_CFG_N:
605 switch (current_login_state) {
606 case DSC_LS_PRLI_COMP:
607 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
608 fcport->port_type = FCT_INITIATOR;
609 else
610 fcport->port_type = FCT_TARGET;
611
612 data[0] = data[1] = 0;
613 qla2x00_post_async_adisc_work(vha, fcport,
614 data);
615 break;
616 case DSC_LS_PLOGI_COMP:
617 if (fcport_is_bigger(fcport)) {
618 /* local adapter is smaller */
619 if (fcport->loop_id != FC_NO_LOOP_ID)
620 qla2x00_clear_loop_id(fcport);
621
622 fcport->loop_id = loop_id;
623 qla24xx_fcport_handle_login(vha,
624 fcport);
625 break;
626 }
627 /* drop through */
628 default:
629 if (fcport_is_smaller(fcport)) {
630 /* local adapter is bigger */
631 if (fcport->loop_id != FC_NO_LOOP_ID)
632 qla2x00_clear_loop_id(fcport);
633
634 fcport->loop_id = loop_id;
635 qla24xx_fcport_handle_login(vha,
636 fcport);
637 }
638 break;
639 }
640 break;
641 } /* switch (ha->current_topology) */
726b8548
QT
642 }
643
644 if (!found) {
48acad09
QT
645 switch (vha->hw->current_topology) {
646 case ISP_CFG_F:
647 case ISP_CFG_FL:
648 for (i = 0; i < n; i++) {
649 e = &vha->gnl.l[i];
650 id.b.domain = e->port_id[0];
651 id.b.area = e->port_id[1];
652 id.b.al_pa = e->port_id[2];
653 id.b.rsvd_1 = 0;
654 loop_id = le16_to_cpu(e->nport_handle);
655
656 if (fcport->d_id.b24 == id.b24) {
657 conflict_fcport =
658 qla2x00_find_fcport_by_wwpn(vha,
659 e->port_name, 0);
660 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
661 vha, 0x20e5,
662 "%s %d %8phC post del sess\n",
663 __func__, __LINE__,
664 conflict_fcport->port_name);
665 qlt_schedule_sess_for_deletion
666 (conflict_fcport);
667 }
668 /*
669 * FW already picked this loop id for
670 * another fcport
671 */
672 if (fcport->loop_id == loop_id)
673 fcport->loop_id = FC_NO_LOOP_ID;
726b8548 674 }
48acad09
QT
675 qla24xx_fcport_handle_login(vha, fcport);
676 break;
677 case ISP_CFG_N:
678 /*
679 * FW handles the initial login for n2n.
680 * Do link reinit to trigger this auto login.
681 */
682 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
683 qla2xxx_wake_dpc(vha);
684 break;
685 default:
686 break;
726b8548 687 }
726b8548
QT
688 }
689} /* gnl_event */
690
691static void
25ff6af1 692qla24xx_async_gnl_sp_done(void *s, int res)
726b8548 693{
25ff6af1
JC
694 struct srb *sp = s;
695 struct scsi_qla_host *vha = sp->vha;
726b8548
QT
696 unsigned long flags;
697 struct fc_port *fcport = NULL, *tf;
698 u16 i, n = 0, loop_id;
699 struct event_arg ea;
700 struct get_name_list_extended *e;
701 u64 wwn;
702 struct list_head h;
a4239945 703 bool found = false;
726b8548 704
83548fe2 705 ql_dbg(ql_dbg_disc, vha, 0x20e7,
726b8548
QT
706 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
707 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
708 sp->u.iocb_cmd.u.mbx.in_mb[2]);
709
710 memset(&ea, 0, sizeof(ea));
711 ea.sp = sp;
712 ea.rc = res;
713 ea.event = FCME_GNL_DONE;
714
715 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
716 sizeof(struct get_name_list_extended)) {
717 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
718 sizeof(struct get_name_list_extended);
719 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
720 }
721
722 for (i = 0; i < n; i++) {
723 e = &vha->gnl.l[i];
724 loop_id = le16_to_cpu(e->nport_handle);
725 /* mask out reserve bit */
726 loop_id = (loop_id & 0x7fff);
727 set_bit(loop_id, vha->hw->loop_id_map);
728 wwn = wwn_to_u64(e->port_name);
729
83548fe2 730 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
726b8548
QT
731 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
732 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
733 e->port_id[0], e->current_login_state, e->last_login_state,
734 (loop_id & 0x7fff));
735 }
736
1c6cacf4 737 spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
726b8548
QT
738
739 INIT_LIST_HEAD(&h);
740 fcport = tf = NULL;
741 if (!list_empty(&vha->gnl.fcports))
742 list_splice_init(&vha->gnl.fcports, &h);
743
744 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
745 list_del_init(&fcport->gnl_entry);
1c6cacf4 746 spin_lock(&vha->hw->tgt.sess_lock);
6d674927 747 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1c6cacf4 748 spin_unlock(&vha->hw->tgt.sess_lock);
726b8548
QT
749 ea.fcport = fcport;
750
751 qla2x00_fcport_event_handler(vha, &ea);
752 }
1c6cacf4 753 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
726b8548 754
1c6cacf4 755 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
a4239945
QT
756 /* create new fcport if fw has knowledge of new sessions */
757 for (i = 0; i < n; i++) {
758 port_id_t id;
759 u64 wwnn;
760
761 e = &vha->gnl.l[i];
762 wwn = wwn_to_u64(e->port_name);
763
764 found = false;
765 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
766 if (!memcmp((u8 *)&wwn, fcport->port_name,
767 WWN_SIZE)) {
768 found = true;
769 break;
770 }
771 }
772
cf055fb0 773 id.b.domain = e->port_id[2];
a4239945 774 id.b.area = e->port_id[1];
cf055fb0 775 id.b.al_pa = e->port_id[0];
a4239945
QT
776 id.b.rsvd_1 = 0;
777
778 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
779 ql_dbg(ql_dbg_disc, vha, 0x2065,
cf055fb0
QT
780 "%s %d %8phC %06x post new sess\n",
781 __func__, __LINE__, (u8 *)&wwn, id.b24);
a4239945
QT
782 wwnn = wwn_to_u64(e->node_name);
783 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
784 (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
785 }
786 }
787
726b8548
QT
788 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
789
25ff6af1 790 sp->free(sp);
726b8548
QT
791}
792
793int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
794{
795 srb_t *sp;
796 struct srb_iocb *mbx;
797 int rval = QLA_FUNCTION_FAILED;
798 unsigned long flags;
799 u16 *mb;
800
3dbec59b
QT
801 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
802 return rval;
726b8548 803
83548fe2 804 ql_dbg(ql_dbg_disc, vha, 0x20d9,
726b8548
QT
805 "Async-gnlist WWPN %8phC \n", fcport->port_name);
806
1c6cacf4
HR
807 spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
808 if (!list_empty(&fcport->gnl_entry)) {
809 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
810 rval = QLA_SUCCESS;
811 goto done;
812 }
813
814 spin_lock(&vha->hw->tgt.sess_lock);
726b8548
QT
815 fcport->disc_state = DSC_GNL;
816 fcport->last_rscn_gen = fcport->rscn_gen;
817 fcport->last_login_gen = fcport->login_gen;
1c6cacf4 818 spin_unlock(&vha->hw->tgt.sess_lock);
726b8548
QT
819
820 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1c6cacf4 821 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
726b8548
QT
822
823 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
824 if (!sp)
825 goto done;
3dbec59b
QT
826
827 fcport->flags |= FCF_ASYNC_SENT;
726b8548
QT
828 sp->type = SRB_MB_IOCB;
829 sp->name = "gnlist";
830 sp->gen1 = fcport->rscn_gen;
831 sp->gen2 = fcport->login_gen;
832
e74e7d95
BH
833 mbx = &sp->u.iocb_cmd;
834 mbx->timeout = qla2x00_async_iocb_timeout;
726b8548
QT
835 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
836
837 mb = sp->u.iocb_cmd.u.mbx.out_mb;
838 mb[0] = MBC_PORT_NODE_NAME_LIST;
839 mb[1] = BIT_2 | BIT_3;
840 mb[2] = MSW(vha->gnl.ldma);
841 mb[3] = LSW(vha->gnl.ldma);
842 mb[6] = MSW(MSD(vha->gnl.ldma));
843 mb[7] = LSW(MSD(vha->gnl.ldma));
844 mb[8] = vha->gnl.size;
845 mb[9] = vha->vp_idx;
846
726b8548
QT
847 sp->done = qla24xx_async_gnl_sp_done;
848
849 rval = qla2x00_start_sp(sp);
850 if (rval != QLA_SUCCESS)
851 goto done_free_sp;
852
83548fe2
QT
853 ql_dbg(ql_dbg_disc, vha, 0x20da,
854 "Async-%s - OUT WWPN %8phC hndl %x\n",
855 sp->name, fcport->port_name, sp->handle);
726b8548
QT
856
857 return rval;
858
859done_free_sp:
25ff6af1 860 sp->free(sp);
726b8548 861 fcport->flags &= ~FCF_ASYNC_SENT;
3dbec59b 862done:
726b8548
QT
863 return rval;
864}
865
866int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
867{
868 struct qla_work_evt *e;
869
870 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
871 if (!e)
872 return QLA_FUNCTION_FAILED;
873
874 e->u.fcport.fcport = fcport;
6d674927 875 fcport->flags |= FCF_ASYNC_ACTIVE;
726b8548
QT
876 return qla2x00_post_work(vha, e);
877}
878
879static
25ff6af1 880void qla24xx_async_gpdb_sp_done(void *s, int res)
726b8548 881{
25ff6af1
JC
882 struct srb *sp = s;
883 struct scsi_qla_host *vha = sp->vha;
726b8548 884 struct qla_hw_data *ha = vha->hw;
726b8548
QT
885 fc_port_t *fcport = sp->fcport;
886 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
726b8548
QT
887 struct event_arg ea;
888
83548fe2 889 ql_dbg(ql_dbg_disc, vha, 0x20db,
726b8548
QT
890 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
891 sp->name, res, fcport->port_name, mb[1], mb[2]);
892
6d674927 893 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
726b8548 894
726b8548
QT
895 memset(&ea, 0, sizeof(ea));
896 ea.event = FCME_GPDB_DONE;
726b8548
QT
897 ea.fcport = fcport;
898 ea.sp = sp;
899
900 qla2x00_fcport_event_handler(vha, &ea);
901
902 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
903 sp->u.iocb_cmd.u.mbx.in_dma);
904
25ff6af1 905 sp->free(sp);
726b8548
QT
906}
907
a5d42f4c
DG
908static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
909{
910 struct qla_work_evt *e;
911
912 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
913 if (!e)
914 return QLA_FUNCTION_FAILED;
915
916 e->u.fcport.fcport = fcport;
917
918 return qla2x00_post_work(vha, e);
919}
920
921static void
922qla2x00_async_prli_sp_done(void *ptr, int res)
923{
924 srb_t *sp = ptr;
925 struct scsi_qla_host *vha = sp->vha;
926 struct srb_iocb *lio = &sp->u.iocb_cmd;
927 struct event_arg ea;
928
929 ql_dbg(ql_dbg_disc, vha, 0x2129,
930 "%s %8phC res %d \n", __func__,
931 sp->fcport->port_name, res);
932
933 sp->fcport->flags &= ~FCF_ASYNC_SENT;
934
935 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
936 memset(&ea, 0, sizeof(ea));
937 ea.event = FCME_PRLI_DONE;
938 ea.fcport = sp->fcport;
939 ea.data[0] = lio->u.logio.data[0];
940 ea.data[1] = lio->u.logio.data[1];
941 ea.iop[0] = lio->u.logio.iop[0];
942 ea.iop[1] = lio->u.logio.iop[1];
943 ea.sp = sp;
944
945 qla2x00_fcport_event_handler(vha, &ea);
946 }
947
948 sp->free(sp);
949}
950
951int
952qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
953{
954 srb_t *sp;
955 struct srb_iocb *lio;
956 int rval = QLA_FUNCTION_FAILED;
957
958 if (!vha->flags.online)
959 return rval;
960
961 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
a5d42f4c
DG
962 fcport->fw_login_state == DSC_LS_PRLI_PEND)
963 return rval;
964
965 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
966 if (!sp)
967 return rval;
968
969 fcport->flags |= FCF_ASYNC_SENT;
970 fcport->logout_completed = 0;
971
972 sp->type = SRB_PRLI_CMD;
973 sp->name = "prli";
a5d42f4c
DG
974
975 lio = &sp->u.iocb_cmd;
976 lio->timeout = qla2x00_async_iocb_timeout;
e74e7d95
BH
977 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
978
a5d42f4c
DG
979 sp->done = qla2x00_async_prli_sp_done;
980 lio->u.logio.flags = 0;
981
982 if (fcport->fc4f_nvme)
983 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
984
985 rval = qla2x00_start_sp(sp);
986 if (rval != QLA_SUCCESS) {
a5d42f4c
DG
987 fcport->flags |= FCF_LOGIN_NEEDED;
988 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
989 goto done_free_sp;
990 }
991
992 ql_dbg(ql_dbg_disc, vha, 0x211b,
993 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
994 fcport->port_name, sp->handle, fcport->loop_id,
995 fcport->d_id.b24, fcport->login_retry);
996
997 return rval;
998
999done_free_sp:
1000 sp->free(sp);
1001 fcport->flags &= ~FCF_ASYNC_SENT;
1002 return rval;
1003}
1004
a07fc0a4 1005int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
726b8548
QT
1006{
1007 struct qla_work_evt *e;
1008
1009 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1010 if (!e)
1011 return QLA_FUNCTION_FAILED;
1012
1013 e->u.fcport.fcport = fcport;
1014 e->u.fcport.opt = opt;
6d674927 1015 fcport->flags |= FCF_ASYNC_ACTIVE;
726b8548
QT
1016 return qla2x00_post_work(vha, e);
1017}
1018
1019int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1020{
1021 srb_t *sp;
1022 struct srb_iocb *mbx;
1023 int rval = QLA_FUNCTION_FAILED;
1024 u16 *mb;
1025 dma_addr_t pd_dma;
1026 struct port_database_24xx *pd;
1027 struct qla_hw_data *ha = vha->hw;
1028
3dbec59b
QT
1029 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1030 return rval;
726b8548 1031
726b8548
QT
1032 fcport->disc_state = DSC_GPDB;
1033
1034 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1035 if (!sp)
1036 goto done;
1037
3dbec59b 1038 fcport->flags |= FCF_ASYNC_SENT;
e0824e69
JC
1039 sp->type = SRB_MB_IOCB;
1040 sp->name = "gpdb";
1041 sp->gen1 = fcport->rscn_gen;
1042 sp->gen2 = fcport->login_gen;
e74e7d95
BH
1043
1044 mbx = &sp->u.iocb_cmd;
1045 mbx->timeout = qla2x00_async_iocb_timeout;
e0824e69
JC
1046 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1047
08eb7f45 1048 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
726b8548 1049 if (pd == NULL) {
83548fe2
QT
1050 ql_log(ql_log_warn, vha, 0xd043,
1051 "Failed to allocate port database structure.\n");
726b8548
QT
1052 goto done_free_sp;
1053 }
726b8548 1054
726b8548
QT
1055 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1056 mb[0] = MBC_GET_PORT_DATABASE;
1057 mb[1] = fcport->loop_id;
1058 mb[2] = MSW(pd_dma);
1059 mb[3] = LSW(pd_dma);
1060 mb[6] = MSW(MSD(pd_dma));
1061 mb[7] = LSW(MSD(pd_dma));
1062 mb[9] = vha->vp_idx;
1063 mb[10] = opt;
1064
726b8548
QT
1065 mbx->u.mbx.in = (void *)pd;
1066 mbx->u.mbx.in_dma = pd_dma;
1067
1068 sp->done = qla24xx_async_gpdb_sp_done;
1069
1070 rval = qla2x00_start_sp(sp);
1071 if (rval != QLA_SUCCESS)
1072 goto done_free_sp;
1073
83548fe2
QT
1074 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1075 "Async-%s %8phC hndl %x opt %x\n",
1076 sp->name, fcport->port_name, sp->handle, opt);
726b8548
QT
1077
1078 return rval;
1079
1080done_free_sp:
1081 if (pd)
1082 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1083
25ff6af1 1084 sp->free(sp);
726b8548 1085 fcport->flags &= ~FCF_ASYNC_SENT;
3dbec59b 1086done:
726b8548 1087 qla24xx_post_gpdb_work(vha, fcport, opt);
5ff1d584
AV
1088 return rval;
1089}
1090
726b8548 1091static
a4239945 1092void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
726b8548 1093{
726b8548
QT
1094 unsigned long flags;
1095
726b8548 1096 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
f13515ac 1097 ea->fcport->login_gen++;
726b8548
QT
1098 ea->fcport->deleted = 0;
1099 ea->fcport->logout_on_delete = 1;
1100
1101 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1102 vha->fcport_count++;
1103 ea->fcport->login_succ = 1;
1104
cc28e0ac
QT
1105 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1106 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
1107 __func__, __LINE__, ea->fcport->port_name,
1108 vha->fcport_count);
1109 qla24xx_post_upd_fcport_work(vha, ea->fcport);
414d9ff3
QT
1110 } else if (ea->fcport->login_succ) {
1111 /*
1112 * We have an existing session. A late RSCN delivery
1113 * must have triggered the session to be re-validate.
a4239945 1114 * Session is still valid.
414d9ff3 1115 */
5ef696aa
QT
1116 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1117 "%s %d %8phC session revalidate success\n",
a4239945 1118 __func__, __LINE__, ea->fcport->port_name);
8a7eac2f 1119 ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
726b8548
QT
1120 }
1121 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
a4239945
QT
1122}
1123
1124static
1125void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1126{
a4239945
QT
1127 fc_port_t *fcport = ea->fcport;
1128 struct port_database_24xx *pd;
1129 struct srb *sp = ea->sp;
2b5b9647 1130 uint8_t ls;
a4239945
QT
1131
1132 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1133
1134 fcport->flags &= ~FCF_ASYNC_SENT;
1135
1136 ql_dbg(ql_dbg_disc, vha, 0x20d2,
f352eeb7
QT
1137 "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
1138 fcport->disc_state, pd->current_login_state, ea->rc);
a4239945
QT
1139
1140 if (fcport->disc_state == DSC_DELETE_PEND)
1141 return;
726b8548 1142
2b5b9647
DT
1143 if (fcport->fc4f_nvme)
1144 ls = pd->current_login_state >> 4;
1145 else
1146 ls = pd->current_login_state & 0xf;
1147
1148 switch (ls) {
a4239945
QT
1149 case PDS_PRLI_COMPLETE:
1150 __qla24xx_parse_gpdb(vha, fcport, pd);
1151 break;
1152 case PDS_PLOGI_PENDING:
1153 case PDS_PLOGI_COMPLETE:
1154 case PDS_PRLI_PENDING:
1155 case PDS_PRLI2_PENDING:
8fde6977
QT
1156 /* Set discovery state back to GNL to Relogin attempt */
1157 if (qla_dual_mode_enabled(vha) ||
1158 qla_ini_mode_enabled(vha)) {
1159 fcport->disc_state = DSC_GNL;
1160 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1161 }
a4239945
QT
1162 return;
1163 case PDS_LOGO_PENDING:
1164 case PDS_PORT_UNAVAILABLE:
1165 default:
1166 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1167 __func__, __LINE__, fcport->port_name);
d8630bb9 1168 qlt_schedule_sess_for_deletion(fcport);
a4239945
QT
1169 return;
1170 }
1171 __qla24xx_handle_gpdb_event(vha, ea);
1172} /* gpdb event */
9cd883f0
QT
1173
1174static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1175{
1176 u8 login = 0;
040036bb 1177 int rc;
9cd883f0
QT
1178
1179 if (qla_tgt_mode_enabled(vha))
1180 return;
1181
1182 if (qla_dual_mode_enabled(vha)) {
1183 if (N2N_TOPO(vha->hw)) {
1184 u64 mywwn, wwn;
1185
1186 mywwn = wwn_to_u64(vha->port_name);
1187 wwn = wwn_to_u64(fcport->port_name);
1188 if (mywwn > wwn)
1189 login = 1;
1190 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1191 && time_after_eq(jiffies,
1192 fcport->plogi_nack_done_deadline))
1193 login = 1;
1194 } else {
1195 login = 1;
1196 }
1197 } else {
1198 /* initiator mode */
1199 login = 1;
1200 }
1201
1202 if (login) {
040036bb
QT
1203 if (fcport->loop_id == FC_NO_LOOP_ID) {
1204 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1205 rc = qla2x00_find_new_loop_id(vha, fcport);
1206 if (rc) {
1207 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1208 "%s %d %8phC post del sess - out of loopid\n",
1209 __func__, __LINE__, fcport->port_name);
1210 fcport->scan_state = 0;
94cff6e1 1211 qlt_schedule_sess_for_deletion(fcport);
040036bb
QT
1212 return;
1213 }
1214 }
9cd883f0
QT
1215 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1216 "%s %d %8phC post login\n",
1217 __func__, __LINE__, fcport->port_name);
9cd883f0
QT
1218 qla2x00_post_async_login_work(vha, fcport, NULL);
1219 }
1220}
1221
726b8548
QT
1222int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1223{
f13515ac 1224 u16 data[2];
a4239945 1225 u64 wwn;
726b8548 1226
83548fe2 1227 ql_dbg(ql_dbg_disc, vha, 0x20d8,
f352eeb7 1228 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
726b8548
QT
1229 __func__, fcport->port_name, fcport->disc_state,
1230 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1231 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
f352eeb7 1232 fcport->login_gen, fcport->login_retry,
a4239945 1233 fcport->loop_id, fcport->scan_state);
726b8548 1234
a4239945
QT
1235 if (fcport->scan_state != QLA_FCPORT_FOUND)
1236 return 0;
726b8548 1237
07ea4b60
HR
1238 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1239 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1240 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
726b8548
QT
1241 return 0;
1242
5b33469a 1243 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
9cd883f0
QT
1244 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1245 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5b33469a 1246 return 0;
9cd883f0 1247 }
5b33469a
QT
1248 }
1249
726b8548
QT
1250 /* for pure Target Mode. Login will not be initiated */
1251 if (vha->host->active_mode == MODE_TARGET)
1252 return 0;
1253
1254 if (fcport->flags & FCF_ASYNC_SENT) {
1255 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1256 return 0;
1257 }
1258
a4239945 1259
726b8548
QT
1260 switch (fcport->disc_state) {
1261 case DSC_DELETED:
23dd98a6 1262 fcport->login_retry--;
a4239945
QT
1263 wwn = wwn_to_u64(fcport->node_name);
1264 if (wwn == 0) {
1265 ql_dbg(ql_dbg_disc, vha, 0xffff,
1266 "%s %d %8phC post GNNID\n",
1267 __func__, __LINE__, fcport->port_name);
1268 qla24xx_post_gnnid_work(vha, fcport);
1269 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
83548fe2
QT
1270 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1271 "%s %d %8phC post gnl\n",
1272 __func__, __LINE__, fcport->port_name);
5d3300a9 1273 qla24xx_post_gnl_work(vha, fcport);
726b8548 1274 } else {
23dd98a6 1275 fcport->login_retry--;
9cd883f0 1276 qla_chk_n2n_b4_login(vha, fcport);
726b8548
QT
1277 }
1278 break;
1279
1280 case DSC_GNL:
1281 if (fcport->login_pause) {
1282 fcport->last_rscn_gen = fcport->rscn_gen;
1283 fcport->last_login_gen = fcport->login_gen;
1284 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1285 break;
1286 }
1287
9cd883f0 1288 qla_chk_n2n_b4_login(vha, fcport);
726b8548
QT
1289 break;
1290
1291 case DSC_LOGIN_FAILED:
23dd98a6 1292 fcport->login_retry--;
83548fe2
QT
1293 ql_dbg(ql_dbg_disc, vha, 0x20d0,
1294 "%s %d %8phC post gidpn\n",
1295 __func__, __LINE__, fcport->port_name);
9cd883f0
QT
1296 if (N2N_TOPO(vha->hw))
1297 qla_chk_n2n_b4_login(vha, fcport);
1298 else
1299 qla24xx_post_gidpn_work(vha, fcport);
726b8548
QT
1300 break;
1301
1302 case DSC_LOGIN_COMPLETE:
1303 /* recheck login state */
83548fe2 1304 ql_dbg(ql_dbg_disc, vha, 0x20d1,
f13515ac 1305 "%s %d %8phC post adisc\n",
83548fe2 1306 __func__, __LINE__, fcport->port_name);
23dd98a6 1307 fcport->login_retry--;
f13515ac
QT
1308 data[0] = data[1] = 0;
1309 qla2x00_post_async_adisc_work(vha, fcport, data);
726b8548
QT
1310 break;
1311
1cbc0efc
DT
1312 case DSC_LOGIN_PEND:
1313 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1314 qla24xx_post_prli_work(vha, fcport);
1315 break;
1316
726b8548
QT
1317 default:
1318 break;
1319 }
1320
1321 return 0;
1322}
1323
1324static
1325void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
1326{
1327 fcport->rscn_gen++;
1328
83548fe2
QT
1329 ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
1330 "%s %8phC DS %d LS %d\n",
1331 __func__, fcport->port_name, fcport->disc_state,
1332 fcport->fw_login_state);
726b8548
QT
1333
1334 if (fcport->flags & FCF_ASYNC_SENT)
1335 return;
1336
1337 switch (fcport->disc_state) {
1338 case DSC_DELETED:
1339 case DSC_LOGIN_COMPLETE:
5ef696aa 1340 qla24xx_post_gpnid_work(fcport->vha, &ea->id);
726b8548 1341 break;
726b8548
QT
1342 default:
1343 break;
1344 }
1345}
1346
1347int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
a4239945 1348 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
726b8548
QT
1349{
1350 struct qla_work_evt *e;
1351 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1352 if (!e)
1353 return QLA_FUNCTION_FAILED;
1354
1355 e->u.new_sess.id = *id;
1356 e->u.new_sess.pla = pla;
a4239945 1357 e->u.new_sess.fc4_type = fc4_type;
726b8548 1358 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
a4239945
QT
1359 if (node_name)
1360 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
726b8548
QT
1361
1362 return qla2x00_post_work(vha, e);
1363}
1364
726b8548
QT
1365static
1366void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1367 struct event_arg *ea)
1368{
1369 fc_port_t *fcport = ea->fcport;
1370
83548fe2
QT
1371 ql_dbg(ql_dbg_disc, vha, 0x2102,
1372 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1373 __func__, fcport->port_name, fcport->disc_state,
1374 fcport->fw_login_state, fcport->login_pause,
1375 fcport->deleted, fcport->conflict,
1376 fcport->last_rscn_gen, fcport->rscn_gen,
1377 fcport->last_login_gen, fcport->login_gen,
1378 fcport->flags);
726b8548
QT
1379
1380 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
726b8548
QT
1381 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1382 return;
1383
5b33469a 1384 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
9cd883f0
QT
1385 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1386 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5b33469a 1387 return;
9cd883f0 1388 }
5b33469a
QT
1389 }
1390
726b8548 1391 if (fcport->last_rscn_gen != fcport->rscn_gen) {
83548fe2 1392 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
726b8548
QT
1393 __func__, __LINE__, fcport->port_name);
1394
5d3300a9 1395 qla24xx_post_gidpn_work(vha, fcport);
726b8548
QT
1396 return;
1397 }
1398
1399 qla24xx_fcport_handle_login(vha, fcport);
1400}
1401
41dc529a 1402void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
726b8548 1403{
f352eeb7 1404 fc_port_t *f, *tf;
41dc529a 1405 uint32_t id = 0, mask, rid;
f352eeb7 1406 unsigned long flags;
bee8b846 1407 fc_port_t *fcport;
726b8548
QT
1408
1409 switch (ea->event) {
1410 case FCME_RELOGIN:
1411 if (test_bit(UNLOADING, &vha->dpc_flags))
1412 return;
5ff1d584 1413
726b8548
QT
1414 qla24xx_handle_relogin_event(vha, ea);
1415 break;
1416 case FCME_RSCN:
1417 if (test_bit(UNLOADING, &vha->dpc_flags))
1418 return;
41dc529a
QT
1419 switch (ea->id.b.rsvd_1) {
1420 case RSCN_PORT_ADDR:
bee8b846
QT
1421 fcport = qla2x00_find_fcport_by_nportid
1422 (vha, &ea->id, 1);
1423 if (fcport)
1424 fcport->rscn_rcvd = 1;
1425
f352eeb7
QT
1426 spin_lock_irqsave(&vha->work_lock, flags);
1427 if (vha->scan.scan_flags == 0) {
1428 ql_dbg(ql_dbg_disc, vha, 0xffff,
1429 "%s: schedule\n", __func__);
1430 vha->scan.scan_flags |= SF_QUEUED;
1431 schedule_delayed_work(&vha->scan.scan_work, 5);
41dc529a 1432 }
f352eeb7
QT
1433 spin_unlock_irqrestore(&vha->work_lock, flags);
1434
41dc529a
QT
1435 break;
1436 case RSCN_AREA_ADDR:
1437 case RSCN_DOM_ADDR:
1438 if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
1439 mask = 0xffff00;
83548fe2
QT
1440 ql_dbg(ql_dbg_async, vha, 0x5044,
1441 "RSCN: Area 0x%06x was affected\n",
1442 ea->id.b24);
41dc529a
QT
1443 } else {
1444 mask = 0xff0000;
83548fe2
QT
1445 ql_dbg(ql_dbg_async, vha, 0x507a,
1446 "RSCN: Domain 0x%06x was affected\n",
1447 ea->id.b24);
41dc529a 1448 }
726b8548 1449
41dc529a
QT
1450 rid = ea->id.b24 & mask;
1451 list_for_each_entry_safe(f, tf, &vha->vp_fcports,
1452 list) {
1453 id = f->d_id.b24 & mask;
1454 if (rid == id) {
1455 ea->fcport = f;
1456 qla24xx_handle_rscn_event(f, ea);
1457 }
726b8548 1458 }
41dc529a
QT
1459 break;
1460 case RSCN_FAB_ADDR:
1461 default:
83548fe2
QT
1462 ql_log(ql_log_warn, vha, 0xd045,
1463 "RSCN: Fabric was affected. Addr format %d\n",
1464 ea->id.b.rsvd_1);
41dc529a
QT
1465 qla2x00_mark_all_devices_lost(vha, 1);
1466 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1467 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
726b8548
QT
1468 }
1469 break;
1470 case FCME_GIDPN_DONE:
1471 qla24xx_handle_gidpn_event(vha, ea);
1472 break;
1473 case FCME_GNL_DONE:
1474 qla24xx_handle_gnl_done_event(vha, ea);
1475 break;
1476 case FCME_GPSC_DONE:
a4239945 1477 qla24xx_handle_gpsc_event(vha, ea);
726b8548
QT
1478 break;
1479 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1480 qla24xx_handle_plogi_done_event(vha, ea);
1481 break;
a5d42f4c
DG
1482 case FCME_PRLI_DONE:
1483 qla24xx_handle_prli_done_event(vha, ea);
1484 break;
726b8548
QT
1485 case FCME_GPDB_DONE:
1486 qla24xx_handle_gpdb_event(vha, ea);
1487 break;
1488 case FCME_GPNID_DONE:
1489 qla24xx_handle_gpnid_event(vha, ea);
1490 break;
d3bae931
DG
1491 case FCME_GFFID_DONE:
1492 qla24xx_handle_gffid_event(vha, ea);
1493 break;
f13515ac
QT
1494 case FCME_ADISC_DONE:
1495 qla24xx_handle_adisc_event(vha, ea);
1496 break;
a4239945
QT
1497 case FCME_GNNID_DONE:
1498 qla24xx_handle_gnnid_event(vha, ea);
1499 break;
1500 case FCME_GFPNID_DONE:
1501 qla24xx_handle_gfpnid_event(vha, ea);
1502 break;
726b8548
QT
1503 default:
1504 BUG_ON(1);
1505 break;
1506 }
5ff1d584
AV
1507}
1508
3822263e 1509static void
faef62d1 1510qla2x00_tmf_iocb_timeout(void *data)
3822263e 1511{
25ff6af1 1512 srb_t *sp = data;
faef62d1 1513 struct srb_iocb *tmf = &sp->u.iocb_cmd;
3822263e 1514
faef62d1
AB
1515 tmf->u.tmf.comp_status = CS_TIMEOUT;
1516 complete(&tmf->u.tmf.comp);
1517}
9ba56b95 1518
faef62d1 1519static void
25ff6af1 1520qla2x00_tmf_sp_done(void *ptr, int res)
faef62d1 1521{
25ff6af1 1522 srb_t *sp = ptr;
faef62d1 1523 struct srb_iocb *tmf = &sp->u.iocb_cmd;
25ff6af1 1524
faef62d1 1525 complete(&tmf->u.tmf.comp);
3822263e
MI
1526}
1527
1528int
faef62d1 1529qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
3822263e
MI
1530 uint32_t tag)
1531{
1532 struct scsi_qla_host *vha = fcport->vha;
faef62d1 1533 struct srb_iocb *tm_iocb;
3822263e 1534 srb_t *sp;
faef62d1 1535 int rval = QLA_FUNCTION_FAILED;
3822263e 1536
9ba56b95 1537 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3822263e
MI
1538 if (!sp)
1539 goto done;
1540
faef62d1 1541 tm_iocb = &sp->u.iocb_cmd;
9ba56b95
GM
1542 sp->type = SRB_TM_CMD;
1543 sp->name = "tmf";
e74e7d95
BH
1544
1545 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1546 init_completion(&tm_iocb->u.tmf.comp);
faef62d1 1547 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
e74e7d95 1548
faef62d1
AB
1549 tm_iocb->u.tmf.flags = flags;
1550 tm_iocb->u.tmf.lun = lun;
1551 tm_iocb->u.tmf.data = tag;
1552 sp->done = qla2x00_tmf_sp_done;
3822263e
MI
1553
1554 rval = qla2x00_start_sp(sp);
1555 if (rval != QLA_SUCCESS)
1556 goto done_free_sp;
1557
7c3df132 1558 ql_dbg(ql_dbg_taskm, vha, 0x802f,
cfb0919c
CD
1559 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1560 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1561 fcport->d_id.b.area, fcport->d_id.b.al_pa);
faef62d1
AB
1562
1563 wait_for_completion(&tm_iocb->u.tmf.comp);
1564
1565 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
1566 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1567
1568 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
1569 ql_dbg(ql_dbg_taskm, vha, 0x8030,
1570 "TM IOCB failed (%x).\n", rval);
1571 }
1572
1573 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1574 flags = tm_iocb->u.tmf.flags;
1575 lun = (uint16_t)tm_iocb->u.tmf.lun;
1576
1577 /* Issue Marker IOCB */
1578 qla2x00_marker(vha, vha->hw->req_q_map[0],
1579 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
1580 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1581 }
3822263e
MI
1582
1583done_free_sp:
25ff6af1 1584 sp->free(sp);
6d674927 1585 sp->fcport->flags &= ~FCF_ASYNC_SENT;
3822263e
MI
1586done:
1587 return rval;
1588}
1589
4440e46d
AB
1590static void
1591qla24xx_abort_iocb_timeout(void *data)
1592{
25ff6af1 1593 srb_t *sp = data;
4440e46d
AB
1594 struct srb_iocb *abt = &sp->u.iocb_cmd;
1595
1596 abt->u.abt.comp_status = CS_TIMEOUT;
1597 complete(&abt->u.abt.comp);
1598}
1599
1600static void
25ff6af1 1601qla24xx_abort_sp_done(void *ptr, int res)
4440e46d 1602{
25ff6af1 1603 srb_t *sp = ptr;
4440e46d
AB
1604 struct srb_iocb *abt = &sp->u.iocb_cmd;
1605
3a9910d7
BH
1606 if (del_timer(&sp->u.iocb_cmd.timer))
1607 complete(&abt->u.abt.comp);
4440e46d
AB
1608}
1609
15f30a57 1610int
4440e46d
AB
1611qla24xx_async_abort_cmd(srb_t *cmd_sp)
1612{
25ff6af1 1613 scsi_qla_host_t *vha = cmd_sp->vha;
4440e46d
AB
1614 fc_port_t *fcport = cmd_sp->fcport;
1615 struct srb_iocb *abt_iocb;
1616 srb_t *sp;
1617 int rval = QLA_FUNCTION_FAILED;
1618
1619 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1620 if (!sp)
1621 goto done;
1622
1623 abt_iocb = &sp->u.iocb_cmd;
1624 sp->type = SRB_ABT_CMD;
1625 sp->name = "abort";
e74e7d95
BH
1626
1627 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1628 init_completion(&abt_iocb->u.abt.comp);
4440e46d 1629 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
e74e7d95 1630
4440e46d 1631 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
b027a5ac
DT
1632
1633 if (vha->flags.qpairs_available && cmd_sp->qpair)
1634 abt_iocb->u.abt.req_que_no =
1635 cpu_to_le16(cmd_sp->qpair->req->id);
1636 else
1637 abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
1638
4440e46d 1639 sp->done = qla24xx_abort_sp_done;
4440e46d
AB
1640
1641 rval = qla2x00_start_sp(sp);
1642 if (rval != QLA_SUCCESS)
1643 goto done_free_sp;
1644
1645 ql_dbg(ql_dbg_async, vha, 0x507c,
1646 "Abort command issued - hdl=%x, target_id=%x\n",
1647 cmd_sp->handle, fcport->tgt_id);
1648
1649 wait_for_completion(&abt_iocb->u.abt.comp);
1650
1651 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1652 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1653
1654done_free_sp:
25ff6af1 1655 sp->free(sp);
4440e46d
AB
1656done:
1657 return rval;
1658}
1659
1660int
1661qla24xx_async_abort_command(srb_t *sp)
1662{
1663 unsigned long flags = 0;
1664
1665 uint32_t handle;
1666 fc_port_t *fcport = sp->fcport;
1667 struct scsi_qla_host *vha = fcport->vha;
1668 struct qla_hw_data *ha = vha->hw;
1669 struct req_que *req = vha->req;
1670
b027a5ac
DT
1671 if (vha->flags.qpairs_available && sp->qpair)
1672 req = sp->qpair->req;
1673
4440e46d
AB
1674 spin_lock_irqsave(&ha->hardware_lock, flags);
1675 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1676 if (req->outstanding_cmds[handle] == sp)
1677 break;
1678 }
1679 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1680 if (handle == req->num_outstanding_cmds) {
1681 /* Command not found. */
1682 return QLA_FUNCTION_FAILED;
1683 }
1684 if (sp->type == SRB_FXIOCB_DCMD)
1685 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1686 FXDISC_ABORT_IOCTL);
1687
1688 return qla24xx_async_abort_cmd(sp);
1689}
1690
a5d42f4c
DG
1691static void
1692qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1693{
1694 switch (ea->data[0]) {
1695 case MBS_COMMAND_COMPLETE:
1696 ql_dbg(ql_dbg_disc, vha, 0x2118,
1697 "%s %d %8phC post gpdb\n",
1698 __func__, __LINE__, ea->fcport->port_name);
1699
1700 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1701 ea->fcport->logout_on_delete = 1;
1702 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1703 break;
1704 default:
1cbc0efc
DT
1705 if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
1706 (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
1707 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1708 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
1709 break;
1710 }
1711
edd05de1
DG
1712 if (ea->fcport->n2n_flag) {
1713 ql_dbg(ql_dbg_disc, vha, 0x2118,
1714 "%s %d %8phC post fc4 prli\n",
1715 __func__, __LINE__, ea->fcport->port_name);
1716 ea->fcport->fc4f_nvme = 0;
1717 ea->fcport->n2n_flag = 0;
1718 qla24xx_post_prli_work(vha, ea->fcport);
1719 }
a5d42f4c
DG
1720 ql_dbg(ql_dbg_disc, vha, 0x2119,
1721 "%s %d %8phC unhandle event of %x\n",
1722 __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
1723 break;
1724 }
1725}
1726
726b8548
QT
1727static void
1728qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ac280b67 1729{
726b8548 1730 port_id_t cid; /* conflict Nport id */
a084fd68
QT
1731 u16 lid;
1732 struct fc_port *conflict_fcport;
82abdcaf 1733 unsigned long flags;
a4239945
QT
1734 struct fc_port *fcport = ea->fcport;
1735
f352eeb7
QT
1736 ql_dbg(ql_dbg_disc, vha, 0xffff,
1737 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
1738 __func__, fcport->port_name, fcport->disc_state,
1739 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
1740 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
1741 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
1742
a4239945
QT
1743 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1744 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
1745 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1746 "%s %d %8phC Remote is trying to login\n",
1747 __func__, __LINE__, fcport->port_name);
1748 return;
1749 }
1750
1751 if (fcport->disc_state == DSC_DELETE_PEND)
1752 return;
1753
1754 if (ea->sp->gen2 != fcport->login_gen) {
1755 /* target side must have changed it. */
1756 ql_dbg(ql_dbg_disc, vha, 0x20d3,
f352eeb7
QT
1757 "%s %8phC generation changed\n",
1758 __func__, fcport->port_name);
1759 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
a4239945
QT
1760 return;
1761 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1762 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
1763 __func__, __LINE__, fcport->port_name);
1764 qla24xx_post_gidpn_work(vha, fcport);
1765 return;
1766 }
ac280b67 1767
726b8548 1768 switch (ea->data[0]) {
ac280b67 1769 case MBS_COMMAND_COMPLETE:
a4f92a32
AV
1770 /*
1771 * Driver must validate login state - If PRLI not complete,
1772 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1773 * requests.
1774 */
a5d42f4c
DG
1775 if (ea->fcport->fc4f_nvme) {
1776 ql_dbg(ql_dbg_disc, vha, 0x2117,
1777 "%s %d %8phC post prli\n",
1778 __func__, __LINE__, ea->fcport->port_name);
1779 qla24xx_post_prli_work(vha, ea->fcport);
1780 } else {
1781 ql_dbg(ql_dbg_disc, vha, 0x20ea,
a084fd68
QT
1782 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
1783 __func__, __LINE__, ea->fcport->port_name,
1784 ea->fcport->loop_id, ea->fcport->d_id.b24);
1785
1786 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
82abdcaf 1787 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
a5d42f4c
DG
1788 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1789 ea->fcport->logout_on_delete = 1;
3515832c 1790 ea->fcport->send_els_logo = 0;
82abdcaf
QT
1791 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
1792 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1793
a5d42f4c
DG
1794 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1795 }
ac280b67
AV
1796 break;
1797 case MBS_COMMAND_ERROR:
83548fe2 1798 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
726b8548
QT
1799 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1800
1801 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1802 ea->fcport->disc_state = DSC_LOGIN_FAILED;
1803 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
ac280b67
AV
1804 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1805 else
726b8548 1806 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
ac280b67
AV
1807 break;
1808 case MBS_LOOP_ID_USED:
726b8548
QT
1809 /* data[1] = IO PARAM 1 = nport ID */
1810 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
1811 cid.b.area = (ea->iop[1] >> 8) & 0xff;
1812 cid.b.al_pa = ea->iop[1] & 0xff;
1813 cid.b.rsvd_1 = 0;
1814
83548fe2
QT
1815 ql_dbg(ql_dbg_disc, vha, 0x20ec,
1816 "%s %d %8phC LoopID 0x%x in use post gnl\n",
1817 __func__, __LINE__, ea->fcport->port_name,
1818 ea->fcport->loop_id);
726b8548
QT
1819
1820 if (IS_SW_RESV_ADDR(cid)) {
1821 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1822 ea->fcport->loop_id = FC_NO_LOOP_ID;
1823 } else {
1824 qla2x00_clear_loop_id(ea->fcport);
ac280b67 1825 }
726b8548
QT
1826 qla24xx_post_gnl_work(vha, ea->fcport);
1827 break;
1828 case MBS_PORT_ID_USED:
83548fe2
QT
1829 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1830 "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
1831 __func__, __LINE__, ea->fcport->port_name,
1832 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
1833 ea->fcport->d_id.b.al_pa);
726b8548 1834
a084fd68
QT
1835 lid = ea->iop[1] & 0xffff;
1836 qlt_find_sess_invalidate_other(vha,
1837 wwn_to_u64(ea->fcport->port_name),
1838 ea->fcport->d_id, lid, &conflict_fcport);
1839
1840 if (conflict_fcport) {
1841 /*
1842 * Another fcport share the same loop_id/nport id.
1843 * Conflict fcport needs to finish cleanup before this
1844 * fcport can proceed to login.
1845 */
1846 conflict_fcport->conflict = ea->fcport;
1847 ea->fcport->login_pause = 1;
1848
1849 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1850 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
1851 __func__, __LINE__, ea->fcport->port_name,
1852 ea->fcport->d_id.b24, lid);
1853 qla2x00_clear_loop_id(ea->fcport);
1854 qla24xx_post_gidpn_work(vha, ea->fcport);
1855 } else {
1856 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1857 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
1858 __func__, __LINE__, ea->fcport->port_name,
1859 ea->fcport->d_id.b24, lid);
1860
1861 qla2x00_clear_loop_id(ea->fcport);
1862 set_bit(lid, vha->hw->loop_id_map);
1863 ea->fcport->loop_id = lid;
1864 ea->fcport->keep_nport_handle = 0;
94cff6e1 1865 qlt_schedule_sess_for_deletion(ea->fcport);
a084fd68 1866 }
ac280b67
AV
1867 break;
1868 }
4916392b 1869 return;
ac280b67
AV
1870}
1871
4916392b 1872void
ac280b67
AV
1873qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1874 uint16_t *data)
1875{
a6ca8878 1876 qlt_logo_completion_handler(fcport, data[0]);
726b8548 1877 fcport->login_gen++;
fa83e658 1878 fcport->flags &= ~FCF_ASYNC_ACTIVE;
4916392b 1879 return;
ac280b67
AV
1880}
1881
4916392b 1882void
5ff1d584
AV
1883qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1884 uint16_t *data)
1885{
fa83e658 1886 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
5ff1d584
AV
1887 if (data[0] == MBS_COMMAND_COMPLETE) {
1888 qla2x00_update_fcport(vha, fcport);
1889
4916392b 1890 return;
5ff1d584
AV
1891 }
1892
1893 /* Retry login. */
5ff1d584
AV
1894 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
1895 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1896 else
80d79440 1897 qla2x00_mark_device_lost(vha, fcport, 1, 0);
5ff1d584 1898
4916392b 1899 return;
5ff1d584
AV
1900}
1901
1da177e4
LT
1902/****************************************************************************/
1903/* QLogic ISP2x00 Hardware Support Functions. */
1904/****************************************************************************/
1905
fa492630 1906static int
7d613ac6
SV
1907qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
1908{
1909 int rval = QLA_SUCCESS;
1910 struct qla_hw_data *ha = vha->hw;
1911 uint32_t idc_major_ver, idc_minor_ver;
711aa7f7 1912 uint16_t config[4];
7d613ac6
SV
1913
1914 qla83xx_idc_lock(vha, 0);
1915
1916 /* SV: TODO: Assign initialization timeout from
1917 * flash-info / other param
1918 */
1919 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
1920 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
1921
1922 /* Set our fcoe function presence */
1923 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
1924 ql_dbg(ql_dbg_p3p, vha, 0xb077,
1925 "Error while setting DRV-Presence.\n");
1926 rval = QLA_FUNCTION_FAILED;
1927 goto exit;
1928 }
1929
1930 /* Decide the reset ownership */
1931 qla83xx_reset_ownership(vha);
1932
1933 /*
1934 * On first protocol driver load:
1935 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
1936 * register.
1937 * Others: Check compatibility with current IDC Major version.
1938 */
1939 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
1940 if (ha->flags.nic_core_reset_owner) {
1941 /* Set IDC Major version */
1942 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
1943 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
1944
1945 /* Clearing IDC-Lock-Recovery register */
1946 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
1947 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
1948 /*
1949 * Clear further IDC participation if we are not compatible with
1950 * the current IDC Major Version.
1951 */
1952 ql_log(ql_log_warn, vha, 0xb07d,
1953 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
1954 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
1955 __qla83xx_clear_drv_presence(vha);
1956 rval = QLA_FUNCTION_FAILED;
1957 goto exit;
1958 }
1959 /* Each function sets its supported Minor version. */
1960 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
1961 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
1962 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
1963
711aa7f7
SK
1964 if (ha->flags.nic_core_reset_owner) {
1965 memset(config, 0, sizeof(config));
1966 if (!qla81xx_get_port_config(vha, config))
1967 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
1968 QLA8XXX_DEV_READY);
1969 }
1970
7d613ac6
SV
1971 rval = qla83xx_idc_state_handler(vha);
1972
1973exit:
1974 qla83xx_idc_unlock(vha, 0);
1975
1976 return rval;
1977}
1978
1da177e4
LT
1979/*
1980* qla2x00_initialize_adapter
1981* Initialize board.
1982*
1983* Input:
1984* ha = adapter block pointer.
1985*
1986* Returns:
1987* 0 = success
1988*/
1989int
e315cd28 1990qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1da177e4
LT
1991{
1992 int rval;
e315cd28 1993 struct qla_hw_data *ha = vha->hw;
73208dfd 1994 struct req_que *req = ha->req_q_map[0];
2533cf67 1995
fc90adaf
JC
1996 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
1997 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
1998
1da177e4 1999 /* Clear adapter flags. */
e315cd28 2000 vha->flags.online = 0;
2533cf67 2001 ha->flags.chip_reset_done = 0;
e315cd28 2002 vha->flags.reset_active = 0;
85880801
AV
2003 ha->flags.pci_channel_io_perm_failure = 0;
2004 ha->flags.eeh_busy = 0;
fabbb8df 2005 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
e315cd28
AC
2006 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2007 atomic_set(&vha->loop_state, LOOP_DOWN);
2008 vha->device_flags = DFLG_NO_CABLE;
2009 vha->dpc_flags = 0;
2010 vha->flags.management_server_logged_in = 0;
2011 vha->marker_needed = 0;
1da177e4
LT
2012 ha->isp_abort_cnt = 0;
2013 ha->beacon_blink_led = 0;
2014
73208dfd
AC
2015 set_bit(0, ha->req_qid_map);
2016 set_bit(0, ha->rsp_qid_map);
2017
cfb0919c 2018 ql_dbg(ql_dbg_init, vha, 0x0040,
7c3df132 2019 "Configuring PCI space...\n");
e315cd28 2020 rval = ha->isp_ops->pci_config(vha);
1da177e4 2021 if (rval) {
7c3df132
SK
2022 ql_log(ql_log_warn, vha, 0x0044,
2023 "Unable to configure PCI space.\n");
1da177e4
LT
2024 return (rval);
2025 }
2026
e315cd28 2027 ha->isp_ops->reset_chip(vha);
1da177e4 2028
e315cd28 2029 rval = qla2xxx_get_flash_info(vha);
c00d8994 2030 if (rval) {
7c3df132
SK
2031 ql_log(ql_log_fatal, vha, 0x004f,
2032 "Unable to validate FLASH data.\n");
7ec0effd
AD
2033 return rval;
2034 }
2035
2036 if (IS_QLA8044(ha)) {
2037 qla8044_read_reset_template(vha);
2038
2039 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2040 * If DONRESET_BIT0 is set, drivers should not set dev_state
2041 * to NEED_RESET. But if NEED_RESET is set, drivers should
2042 * should honor the reset. */
2043 if (ql2xdontresethba == 1)
2044 qla8044_set_idc_dontreset(vha);
c00d8994
AV
2045 }
2046
73208dfd 2047 ha->isp_ops->get_flash_version(vha, req->ring);
cfb0919c 2048 ql_dbg(ql_dbg_init, vha, 0x0061,
7c3df132 2049 "Configure NVRAM parameters...\n");
0107109e 2050
e315cd28 2051 ha->isp_ops->nvram_config(vha);
1da177e4 2052
d4c760c2
AV
2053 if (ha->flags.disable_serdes) {
2054 /* Mask HBA via NVRAM settings? */
7c3df132 2055 ql_log(ql_log_info, vha, 0x0077,
7b833558 2056 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
d4c760c2
AV
2057 return QLA_FUNCTION_FAILED;
2058 }
2059
cfb0919c 2060 ql_dbg(ql_dbg_init, vha, 0x0078,
7c3df132 2061 "Verifying loaded RISC code...\n");
1da177e4 2062
e315cd28
AC
2063 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2064 rval = ha->isp_ops->chip_diag(vha);
d19044c3
AV
2065 if (rval)
2066 return (rval);
e315cd28 2067 rval = qla2x00_setup_chip(vha);
d19044c3
AV
2068 if (rval)
2069 return (rval);
1da177e4 2070 }
a9083016 2071
4d4df193 2072 if (IS_QLA84XX(ha)) {
e315cd28 2073 ha->cs84xx = qla84xx_get_chip(vha);
4d4df193 2074 if (!ha->cs84xx) {
7c3df132 2075 ql_log(ql_log_warn, vha, 0x00d0,
4d4df193
HK
2076 "Unable to configure ISP84XX.\n");
2077 return QLA_FUNCTION_FAILED;
2078 }
2079 }
2d70c103 2080
ead03855 2081 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2d70c103
NB
2082 rval = qla2x00_init_rings(vha);
2083
2533cf67 2084 ha->flags.chip_reset_done = 1;
1da177e4 2085
9a069e19 2086 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
6c452a45 2087 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
9a069e19
GM
2088 rval = qla84xx_init_chip(vha);
2089 if (rval != QLA_SUCCESS) {
7c3df132
SK
2090 ql_log(ql_log_warn, vha, 0x00d4,
2091 "Unable to initialize ISP84XX.\n");
8d2b21db 2092 qla84xx_put_chip(vha);
9a069e19
GM
2093 }
2094 }
2095
7d613ac6
SV
2096 /* Load the NIC Core f/w if we are the first protocol driver. */
2097 if (IS_QLA8031(ha)) {
2098 rval = qla83xx_nic_core_fw_load(vha);
2099 if (rval)
2100 ql_log(ql_log_warn, vha, 0x0124,
2101 "Error in initializing NIC Core f/w.\n");
2102 }
2103
2f0f3f4f
MI
2104 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2105 qla24xx_read_fcp_prio_cfg(vha);
09ff701a 2106
c46e65c7
JC
2107 if (IS_P3P_TYPE(ha))
2108 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2109 else
2110 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2111
1da177e4
LT
2112 return (rval);
2113}
2114
2115/**
abbd8870 2116 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2db6228d 2117 * @vha: HA context
1da177e4
LT
2118 *
2119 * Returns 0 on success.
2120 */
abbd8870 2121int
e315cd28 2122qla2100_pci_config(scsi_qla_host_t *vha)
1da177e4 2123{
a157b101 2124 uint16_t w;
abbd8870 2125 unsigned long flags;
e315cd28 2126 struct qla_hw_data *ha = vha->hw;
3d71644c 2127 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 2128
1da177e4 2129 pci_set_master(ha->pdev);
af6177d8 2130 pci_try_set_mwi(ha->pdev);
1da177e4 2131
1da177e4 2132 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 2133 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
abbd8870
AV
2134 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2135
737faece 2136 pci_disable_rom(ha->pdev);
1da177e4
LT
2137
2138 /* Get PCI bus information. */
2139 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 2140 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1da177e4
LT
2141 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2142
abbd8870
AV
2143 return QLA_SUCCESS;
2144}
1da177e4 2145
abbd8870
AV
2146/**
2147 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2db6228d 2148 * @vha: HA context
abbd8870
AV
2149 *
2150 * Returns 0 on success.
2151 */
2152int
e315cd28 2153qla2300_pci_config(scsi_qla_host_t *vha)
abbd8870 2154{
a157b101 2155 uint16_t w;
abbd8870
AV
2156 unsigned long flags = 0;
2157 uint32_t cnt;
e315cd28 2158 struct qla_hw_data *ha = vha->hw;
3d71644c 2159 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 2160
abbd8870 2161 pci_set_master(ha->pdev);
af6177d8 2162 pci_try_set_mwi(ha->pdev);
1da177e4 2163
abbd8870 2164 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 2165 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1da177e4 2166
abbd8870
AV
2167 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2168 w &= ~PCI_COMMAND_INTX_DISABLE;
a157b101 2169 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1da177e4 2170
abbd8870
AV
2171 /*
2172 * If this is a 2300 card and not 2312, reset the
2173 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2174 * the 2310 also reports itself as a 2300 so we need to get the
2175 * fb revision level -- a 6 indicates it really is a 2300 and
2176 * not a 2310.
2177 */
2178 if (IS_QLA2300(ha)) {
2179 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 2180
abbd8870 2181 /* Pause RISC. */
3d71644c 2182 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
abbd8870 2183 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 2184 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
abbd8870 2185 break;
1da177e4 2186
abbd8870
AV
2187 udelay(10);
2188 }
1da177e4 2189
abbd8870 2190 /* Select FPM registers. */
3d71644c
AV
2191 WRT_REG_WORD(&reg->ctrl_status, 0x20);
2192 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
2193
2194 /* Get the fb rev level */
3d71644c 2195 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
abbd8870
AV
2196
2197 if (ha->fb_rev == FPM_2300)
a157b101 2198 pci_clear_mwi(ha->pdev);
abbd8870
AV
2199
2200 /* Deselect FPM registers. */
3d71644c
AV
2201 WRT_REG_WORD(&reg->ctrl_status, 0x0);
2202 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
2203
2204 /* Release RISC module. */
3d71644c 2205 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
abbd8870 2206 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 2207 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
abbd8870
AV
2208 break;
2209
2210 udelay(10);
1da177e4 2211 }
1da177e4 2212
abbd8870
AV
2213 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2214 }
1da177e4 2215
abbd8870
AV
2216 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2217
737faece 2218 pci_disable_rom(ha->pdev);
1da177e4 2219
abbd8870
AV
2220 /* Get PCI bus information. */
2221 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 2222 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
2223 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2224
2225 return QLA_SUCCESS;
1da177e4
LT
2226}
2227
0107109e
AV
2228/**
2229 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2db6228d 2230 * @vha: HA context
0107109e
AV
2231 *
2232 * Returns 0 on success.
2233 */
2234int
e315cd28 2235qla24xx_pci_config(scsi_qla_host_t *vha)
0107109e 2236{
a157b101 2237 uint16_t w;
0107109e 2238 unsigned long flags = 0;
e315cd28 2239 struct qla_hw_data *ha = vha->hw;
0107109e 2240 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
0107109e
AV
2241
2242 pci_set_master(ha->pdev);
af6177d8 2243 pci_try_set_mwi(ha->pdev);
0107109e
AV
2244
2245 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 2246 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
0107109e
AV
2247 w &= ~PCI_COMMAND_INTX_DISABLE;
2248 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2249
2250 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2251
2252 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
f85ec187
AV
2253 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2254 pcix_set_mmrbc(ha->pdev, 2048);
0107109e
AV
2255
2256 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 2257 if (pci_is_pcie(ha->pdev))
5ffd3a52 2258 pcie_set_readrq(ha->pdev, 4096);
0107109e 2259
737faece 2260 pci_disable_rom(ha->pdev);
0107109e 2261
44c10138 2262 ha->chip_revision = ha->pdev->revision;
a8488abe 2263
0107109e
AV
2264 /* Get PCI bus information. */
2265 spin_lock_irqsave(&ha->hardware_lock, flags);
2266 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
2267 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2268
2269 return QLA_SUCCESS;
2270}
2271
c3a2f0df
AV
2272/**
2273 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2db6228d 2274 * @vha: HA context
c3a2f0df
AV
2275 *
2276 * Returns 0 on success.
2277 */
2278int
e315cd28 2279qla25xx_pci_config(scsi_qla_host_t *vha)
c3a2f0df
AV
2280{
2281 uint16_t w;
e315cd28 2282 struct qla_hw_data *ha = vha->hw;
c3a2f0df
AV
2283
2284 pci_set_master(ha->pdev);
2285 pci_try_set_mwi(ha->pdev);
2286
2287 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2288 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2289 w &= ~PCI_COMMAND_INTX_DISABLE;
2290 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2291
2292 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 2293 if (pci_is_pcie(ha->pdev))
5ffd3a52 2294 pcie_set_readrq(ha->pdev, 4096);
c3a2f0df 2295
737faece 2296 pci_disable_rom(ha->pdev);
c3a2f0df
AV
2297
2298 ha->chip_revision = ha->pdev->revision;
2299
2300 return QLA_SUCCESS;
2301}
2302
1da177e4
LT
2303/**
2304 * qla2x00_isp_firmware() - Choose firmware image.
2db6228d 2305 * @vha: HA context
1da177e4
LT
2306 *
2307 * Returns 0 on success.
2308 */
2309static int
e315cd28 2310qla2x00_isp_firmware(scsi_qla_host_t *vha)
1da177e4
LT
2311{
2312 int rval;
42e421b1
AV
2313 uint16_t loop_id, topo, sw_cap;
2314 uint8_t domain, area, al_pa;
e315cd28 2315 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
2316
2317 /* Assume loading risc code */
fa2a1ce5 2318 rval = QLA_FUNCTION_FAILED;
1da177e4
LT
2319
2320 if (ha->flags.disable_risc_code_load) {
7c3df132 2321 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
1da177e4
LT
2322
2323 /* Verify checksum of loaded RISC code. */
e315cd28 2324 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
42e421b1
AV
2325 if (rval == QLA_SUCCESS) {
2326 /* And, verify we are not in ROM code. */
e315cd28 2327 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
42e421b1
AV
2328 &area, &domain, &topo, &sw_cap);
2329 }
1da177e4
LT
2330 }
2331
7c3df132
SK
2332 if (rval)
2333 ql_dbg(ql_dbg_init, vha, 0x007a,
2334 "**** Load RISC code ****.\n");
1da177e4
LT
2335
2336 return (rval);
2337}
2338
2339/**
2340 * qla2x00_reset_chip() - Reset ISP chip.
2db6228d 2341 * @vha: HA context
1da177e4
LT
2342 *
2343 * Returns 0 on success.
2344 */
abbd8870 2345void
e315cd28 2346qla2x00_reset_chip(scsi_qla_host_t *vha)
1da177e4
LT
2347{
2348 unsigned long flags = 0;
e315cd28 2349 struct qla_hw_data *ha = vha->hw;
3d71644c 2350 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 2351 uint32_t cnt;
1da177e4
LT
2352 uint16_t cmd;
2353
85880801
AV
2354 if (unlikely(pci_channel_offline(ha->pdev)))
2355 return;
2356
fd34f556 2357 ha->isp_ops->disable_intrs(ha);
1da177e4
LT
2358
2359 spin_lock_irqsave(&ha->hardware_lock, flags);
2360
2361 /* Turn off master enable */
2362 cmd = 0;
2363 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2364 cmd &= ~PCI_COMMAND_MASTER;
2365 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2366
2367 if (!IS_QLA2100(ha)) {
2368 /* Pause RISC. */
2369 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
2370 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2371 for (cnt = 0; cnt < 30000; cnt++) {
2372 if ((RD_REG_WORD(&reg->hccr) &
2373 HCCR_RISC_PAUSE) != 0)
2374 break;
2375 udelay(100);
2376 }
2377 } else {
2378 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2379 udelay(10);
2380 }
2381
2382 /* Select FPM registers. */
2383 WRT_REG_WORD(&reg->ctrl_status, 0x20);
2384 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2385
2386 /* FPM Soft Reset. */
2387 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
2388 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2389
2390 /* Toggle Fpm Reset. */
2391 if (!IS_QLA2200(ha)) {
2392 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
2393 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
2394 }
2395
2396 /* Select frame buffer registers. */
2397 WRT_REG_WORD(&reg->ctrl_status, 0x10);
2398 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2399
2400 /* Reset frame buffer FIFOs. */
2401 if (IS_QLA2200(ha)) {
2402 WRT_FB_CMD_REG(ha, reg, 0xa000);
2403 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2404 } else {
2405 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2406
2407 /* Read back fb_cmd until zero or 3 seconds max */
2408 for (cnt = 0; cnt < 3000; cnt++) {
2409 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2410 break;
2411 udelay(100);
2412 }
2413 }
2414
2415 /* Select RISC module registers. */
2416 WRT_REG_WORD(&reg->ctrl_status, 0);
2417 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
2418
2419 /* Reset RISC processor. */
2420 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2421 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2422
2423 /* Release RISC processor. */
2424 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2425 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2426 }
2427
2428 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
2429 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
2430
2431 /* Reset ISP chip. */
2432 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2433
2434 /* Wait for RISC to recover from reset. */
2435 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2436 /*
2437 * It is necessary to for a delay here since the card doesn't
2438 * respond to PCI reads during a reset. On some architectures
2439 * this will result in an MCA.
2440 */
2441 udelay(20);
2442 for (cnt = 30000; cnt; cnt--) {
2443 if ((RD_REG_WORD(&reg->ctrl_status) &
2444 CSR_ISP_SOFT_RESET) == 0)
2445 break;
2446 udelay(100);
2447 }
2448 } else
2449 udelay(10);
2450
2451 /* Reset RISC processor. */
2452 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2453
2454 WRT_REG_WORD(&reg->semaphore, 0);
2455
2456 /* Release RISC processor. */
2457 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2458 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2459
2460 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2461 for (cnt = 0; cnt < 30000; cnt++) {
ffb39f03 2462 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
1da177e4 2463 break;
1da177e4
LT
2464
2465 udelay(100);
2466 }
2467 } else
2468 udelay(100);
2469
2470 /* Turn on master enable */
2471 cmd |= PCI_COMMAND_MASTER;
2472 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2473
2474 /* Disable RISC pause on FPM parity error. */
2475 if (!IS_QLA2100(ha)) {
2476 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
2477 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
2478 }
2479
2480 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2481}
2482
b1d46989
MI
2483/**
2484 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2db6228d 2485 * @vha: HA context
b1d46989
MI
2486 *
2487 * Returns 0 on success.
2488 */
fa492630 2489static int
b1d46989
MI
2490qla81xx_reset_mpi(scsi_qla_host_t *vha)
2491{
2492 uint16_t mb[4] = {0x1010, 0, 1, 0};
2493
6246b8a1
GM
2494 if (!IS_QLA81XX(vha->hw))
2495 return QLA_SUCCESS;
2496
b1d46989
MI
2497 return qla81xx_write_mpi_register(vha, mb);
2498}
2499
0107109e 2500/**
88c26663 2501 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2db6228d 2502 * @vha: HA context
0107109e
AV
2503 *
2504 * Returns 0 on success.
2505 */
d14e72fb 2506static inline int
e315cd28 2507qla24xx_reset_risc(scsi_qla_host_t *vha)
0107109e
AV
2508{
2509 unsigned long flags = 0;
e315cd28 2510 struct qla_hw_data *ha = vha->hw;
0107109e 2511 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
52c82823 2512 uint32_t cnt;
335a1cc9 2513 uint16_t wd;
b1d46989 2514 static int abts_cnt; /* ISP abort retry counts */
d14e72fb 2515 int rval = QLA_SUCCESS;
0107109e 2516
0107109e
AV
2517 spin_lock_irqsave(&ha->hardware_lock, flags);
2518
2519 /* Reset RISC. */
2520 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2521 for (cnt = 0; cnt < 30000; cnt++) {
2522 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2523 break;
2524
2525 udelay(10);
2526 }
2527
d14e72fb
HM
2528 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
2529 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2530
2531 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2532 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2533 RD_REG_DWORD(&reg->hccr),
2534 RD_REG_DWORD(&reg->ctrl_status),
2535 (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
2536
0107109e
AV
2537 WRT_REG_DWORD(&reg->ctrl_status,
2538 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
335a1cc9 2539 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
88c26663 2540
335a1cc9 2541 udelay(100);
d14e72fb 2542
88c26663 2543 /* Wait for firmware to complete NVRAM accesses. */
52c82823 2544 RD_REG_WORD(&reg->mailbox0);
d14e72fb
HM
2545 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2546 rval == QLA_SUCCESS; cnt--) {
88c26663 2547 barrier();
d14e72fb
HM
2548 if (cnt)
2549 udelay(5);
2550 else
2551 rval = QLA_FUNCTION_TIMEOUT;
88c26663
AV
2552 }
2553
d14e72fb
HM
2554 if (rval == QLA_SUCCESS)
2555 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2556
2557 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2558 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2559 RD_REG_DWORD(&reg->hccr),
2560 RD_REG_DWORD(&reg->mailbox0));
2561
335a1cc9 2562 /* Wait for soft-reset to complete. */
52c82823 2563 RD_REG_DWORD(&reg->ctrl_status);
200ffb15 2564 for (cnt = 0; cnt < 60; cnt++) {
0107109e 2565 barrier();
d14e72fb
HM
2566 if ((RD_REG_DWORD(&reg->ctrl_status) &
2567 CSRX_ISP_SOFT_RESET) == 0)
2568 break;
2569
2570 udelay(5);
0107109e 2571 }
d14e72fb
HM
2572 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
2573 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2574
2575 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2576 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2577 RD_REG_DWORD(&reg->hccr),
2578 RD_REG_DWORD(&reg->ctrl_status));
0107109e 2579
b1d46989
MI
2580 /* If required, do an MPI FW reset now */
2581 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2582 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2583 if (++abts_cnt < 5) {
2584 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2585 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2586 } else {
2587 /*
2588 * We exhausted the ISP abort retries. We have to
2589 * set the board offline.
2590 */
2591 abts_cnt = 0;
2592 vha->flags.online = 0;
2593 }
2594 }
2595 }
2596
0107109e
AV
2597 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2598 RD_REG_DWORD(&reg->hccr);
2599
2600 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2601 RD_REG_DWORD(&reg->hccr);
2602
2603 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2604 RD_REG_DWORD(&reg->hccr);
2605
52c82823 2606 RD_REG_WORD(&reg->mailbox0);
200ffb15 2607 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
d14e72fb 2608 rval == QLA_SUCCESS; cnt--) {
0107109e 2609 barrier();
d14e72fb
HM
2610 if (cnt)
2611 udelay(5);
2612 else
2613 rval = QLA_FUNCTION_TIMEOUT;
0107109e 2614 }
d14e72fb
HM
2615 if (rval == QLA_SUCCESS)
2616 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2617
2618 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2619 "Host Risc 0x%x, mailbox0 0x%x\n",
2620 RD_REG_DWORD(&reg->hccr),
2621 RD_REG_WORD(&reg->mailbox0));
0107109e
AV
2622
2623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
124f85e6 2624
d14e72fb
HM
2625 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2626 "Driver in %s mode\n",
2627 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2628
124f85e6
AV
2629 if (IS_NOPOLLING_TYPE(ha))
2630 ha->isp_ops->enable_intrs(ha);
d14e72fb
HM
2631
2632 return rval;
0107109e
AV
2633}
2634
4ea2c9c7
JC
2635static void
2636qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2637{
2638 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2639
2640 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2641 *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2642
2643}
2644
2645static void
2646qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2647{
2648 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2649
2650 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2651 WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2652}
2653
2654static void
2655qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2656{
4ea2c9c7
JC
2657 uint32_t wd32 = 0;
2658 uint delta_msec = 100;
2659 uint elapsed_msec = 0;
2660 uint timeout_msec;
2661 ulong n;
2662
cc790764
JC
2663 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2664 vha->hw->pdev->subsystem_device != 0x0240)
4ea2c9c7
JC
2665 return;
2666
8dd7e3a5
JC
2667 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2668 udelay(100);
2669
4ea2c9c7
JC
2670attempt:
2671 timeout_msec = TIMEOUT_SEMAPHORE;
2672 n = timeout_msec / delta_msec;
2673 while (n--) {
2674 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2675 qla25xx_read_risc_sema_reg(vha, &wd32);
2676 if (wd32 & RISC_SEMAPHORE)
2677 break;
2678 msleep(delta_msec);
2679 elapsed_msec += delta_msec;
2680 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2681 goto force;
2682 }
2683
2684 if (!(wd32 & RISC_SEMAPHORE))
2685 goto force;
2686
2687 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2688 goto acquired;
2689
2690 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2691 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2692 n = timeout_msec / delta_msec;
2693 while (n--) {
2694 qla25xx_read_risc_sema_reg(vha, &wd32);
2695 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2696 break;
2697 msleep(delta_msec);
2698 elapsed_msec += delta_msec;
2699 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2700 goto force;
2701 }
2702
2703 if (wd32 & RISC_SEMAPHORE_FORCE)
2704 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2705
2706 goto attempt;
2707
2708force:
2709 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2710
2711acquired:
2712 return;
2713}
2714
88c26663
AV
2715/**
2716 * qla24xx_reset_chip() - Reset ISP24xx chip.
2db6228d 2717 * @vha: HA context
88c26663
AV
2718 *
2719 * Returns 0 on success.
2720 */
2721void
e315cd28 2722qla24xx_reset_chip(scsi_qla_host_t *vha)
88c26663 2723{
e315cd28 2724 struct qla_hw_data *ha = vha->hw;
85880801
AV
2725
2726 if (pci_channel_offline(ha->pdev) &&
2727 ha->flags.pci_channel_io_perm_failure) {
2728 return;
2729 }
2730
fd34f556 2731 ha->isp_ops->disable_intrs(ha);
88c26663 2732
4ea2c9c7
JC
2733 qla25xx_manipulate_risc_semaphore(vha);
2734
88c26663 2735 /* Perform RISC reset. */
e315cd28 2736 qla24xx_reset_risc(vha);
88c26663
AV
2737}
2738
1da177e4
LT
2739/**
2740 * qla2x00_chip_diag() - Test chip for proper operation.
2db6228d 2741 * @vha: HA context
1da177e4
LT
2742 *
2743 * Returns 0 on success.
2744 */
abbd8870 2745int
e315cd28 2746qla2x00_chip_diag(scsi_qla_host_t *vha)
1da177e4
LT
2747{
2748 int rval;
e315cd28 2749 struct qla_hw_data *ha = vha->hw;
3d71644c 2750 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
2751 unsigned long flags = 0;
2752 uint16_t data;
2753 uint32_t cnt;
2754 uint16_t mb[5];
73208dfd 2755 struct req_que *req = ha->req_q_map[0];
1da177e4
LT
2756
2757 /* Assume a failed state */
2758 rval = QLA_FUNCTION_FAILED;
2759
da4704d9
BVA
2760 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
2761 &reg->flash_address);
1da177e4
LT
2762
2763 spin_lock_irqsave(&ha->hardware_lock, flags);
2764
2765 /* Reset ISP chip. */
2766 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2767
2768 /*
2769 * We need to have a delay here since the card will not respond while
2770 * in reset causing an MCA on some architectures.
2771 */
2772 udelay(20);
2773 data = qla2x00_debounce_register(&reg->ctrl_status);
2774 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2775 udelay(5);
2776 data = RD_REG_WORD(&reg->ctrl_status);
2777 barrier();
2778 }
2779
2780 if (!cnt)
2781 goto chip_diag_failed;
2782
7c3df132
SK
2783 ql_dbg(ql_dbg_init, vha, 0x007c,
2784 "Reset register cleared by chip reset.\n");
1da177e4
LT
2785
2786 /* Reset RISC processor. */
2787 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2788 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2789
2790 /* Workaround for QLA2312 PCI parity error */
2791 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2792 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2793 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2794 udelay(5);
2795 data = RD_MAILBOX_REG(ha, reg, 0);
fa2a1ce5 2796 barrier();
1da177e4
LT
2797 }
2798 } else
2799 udelay(10);
2800
2801 if (!cnt)
2802 goto chip_diag_failed;
2803
2804 /* Check product ID of chip */
5a68a1c2 2805 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
1da177e4
LT
2806
2807 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2808 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
2809 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
2810 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
2811 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
2812 mb[3] != PROD_ID_3) {
7c3df132
SK
2813 ql_log(ql_log_warn, vha, 0x0062,
2814 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
2815 mb[1], mb[2], mb[3]);
1da177e4
LT
2816
2817 goto chip_diag_failed;
2818 }
2819 ha->product_id[0] = mb[1];
2820 ha->product_id[1] = mb[2];
2821 ha->product_id[2] = mb[3];
2822 ha->product_id[3] = mb[4];
2823
2824 /* Adjust fw RISC transfer size */
73208dfd 2825 if (req->length > 1024)
1da177e4
LT
2826 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
2827 else
2828 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
73208dfd 2829 req->length;
1da177e4
LT
2830
2831 if (IS_QLA2200(ha) &&
2832 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
2833 /* Limit firmware transfer size with a 2200A */
7c3df132 2834 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1da177e4 2835
ea5b6382 2836 ha->device_type |= DT_ISP2200A;
1da177e4
LT
2837 ha->fw_transfer_size = 128;
2838 }
2839
2840 /* Wrap Incoming Mailboxes Test. */
2841 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2842
7c3df132 2843 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
e315cd28 2844 rval = qla2x00_mbx_reg_test(vha);
7c3df132
SK
2845 if (rval)
2846 ql_log(ql_log_warn, vha, 0x0080,
2847 "Failed mailbox send register test.\n");
2848 else
1da177e4
LT
2849 /* Flag a successful rval */
2850 rval = QLA_SUCCESS;
1da177e4
LT
2851 spin_lock_irqsave(&ha->hardware_lock, flags);
2852
2853chip_diag_failed:
2854 if (rval)
7c3df132
SK
2855 ql_log(ql_log_info, vha, 0x0081,
2856 "Chip diagnostics **** FAILED ****.\n");
1da177e4
LT
2857
2858 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2859
2860 return (rval);
2861}
2862
0107109e
AV
2863/**
2864 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
2db6228d 2865 * @vha: HA context
0107109e
AV
2866 *
2867 * Returns 0 on success.
2868 */
2869int
e315cd28 2870qla24xx_chip_diag(scsi_qla_host_t *vha)
0107109e
AV
2871{
2872 int rval;
e315cd28 2873 struct qla_hw_data *ha = vha->hw;
73208dfd 2874 struct req_que *req = ha->req_q_map[0];
0107109e 2875
7ec0effd 2876 if (IS_P3P_TYPE(ha))
a9083016
GM
2877 return QLA_SUCCESS;
2878
73208dfd 2879 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
0107109e 2880
e315cd28 2881 rval = qla2x00_mbx_reg_test(vha);
0107109e 2882 if (rval) {
7c3df132
SK
2883 ql_log(ql_log_warn, vha, 0x0082,
2884 "Failed mailbox send register test.\n");
0107109e
AV
2885 } else {
2886 /* Flag a successful rval */
2887 rval = QLA_SUCCESS;
2888 }
2889
2890 return rval;
2891}
2892
ad0a0b01
QT
2893static void
2894qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
0107109e 2895{
a7a167bf 2896 int rval;
df613b96
AV
2897 dma_addr_t tc_dma;
2898 void *tc;
e315cd28 2899 struct qla_hw_data *ha = vha->hw;
a7a167bf 2900
ad0a0b01 2901 if (ha->eft) {
7c3df132 2902 ql_dbg(ql_dbg_init, vha, 0x00bd,
ad0a0b01
QT
2903 "%s: Offload Mem is already allocated.\n",
2904 __func__);
a7a167bf
AV
2905 return;
2906 }
d4e3e04d 2907
ad0a0b01 2908 if (IS_FWI2_CAPABLE(ha)) {
df613b96 2909 /* Allocate memory for Fibre Channel Event Buffer. */
f73cb695
CD
2910 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2911 !IS_QLA27XX(ha))
436a7b11 2912 goto try_eft;
df613b96 2913
f73cb695
CD
2914 if (ha->fce)
2915 dma_free_coherent(&ha->pdev->dev,
2916 FCE_SIZE, ha->fce, ha->fce_dma);
2917
2918 /* Allocate memory for Fibre Channel Event Buffer. */
0ea85b50
JP
2919 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
2920 GFP_KERNEL);
df613b96 2921 if (!tc) {
7c3df132
SK
2922 ql_log(ql_log_warn, vha, 0x00be,
2923 "Unable to allocate (%d KB) for FCE.\n",
2924 FCE_SIZE / 1024);
17d98630 2925 goto try_eft;
df613b96
AV
2926 }
2927
e315cd28 2928 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
df613b96
AV
2929 ha->fce_mb, &ha->fce_bufs);
2930 if (rval) {
7c3df132
SK
2931 ql_log(ql_log_warn, vha, 0x00bf,
2932 "Unable to initialize FCE (%d).\n", rval);
df613b96
AV
2933 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
2934 tc_dma);
2935 ha->flags.fce_enabled = 0;
17d98630 2936 goto try_eft;
df613b96 2937 }
cfb0919c 2938 ql_dbg(ql_dbg_init, vha, 0x00c0,
7c3df132 2939 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
df613b96 2940
df613b96
AV
2941 ha->flags.fce_enabled = 1;
2942 ha->fce_dma = tc_dma;
2943 ha->fce = tc;
f73cb695 2944
436a7b11 2945try_eft:
f73cb695
CD
2946 if (ha->eft)
2947 dma_free_coherent(&ha->pdev->dev,
2948 EFT_SIZE, ha->eft, ha->eft_dma);
2949
436a7b11 2950 /* Allocate memory for Extended Trace Buffer. */
0ea85b50
JP
2951 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
2952 GFP_KERNEL);
436a7b11 2953 if (!tc) {
7c3df132
SK
2954 ql_log(ql_log_warn, vha, 0x00c1,
2955 "Unable to allocate (%d KB) for EFT.\n",
2956 EFT_SIZE / 1024);
ad0a0b01 2957 goto eft_err;
436a7b11
AV
2958 }
2959
e315cd28 2960 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
436a7b11 2961 if (rval) {
7c3df132
SK
2962 ql_log(ql_log_warn, vha, 0x00c2,
2963 "Unable to initialize EFT (%d).\n", rval);
436a7b11
AV
2964 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
2965 tc_dma);
ad0a0b01 2966 goto eft_err;
436a7b11 2967 }
cfb0919c 2968 ql_dbg(ql_dbg_init, vha, 0x00c3,
7c3df132 2969 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
436a7b11 2970
436a7b11
AV
2971 ha->eft_dma = tc_dma;
2972 ha->eft = tc;
d4e3e04d 2973 }
f73cb695 2974
ad0a0b01
QT
2975eft_err:
2976 return;
2977}
2978
2979void
2980qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
2981{
2982 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
2983 eft_size, fce_size, mq_size;
2984 struct qla_hw_data *ha = vha->hw;
2985 struct req_que *req = ha->req_q_map[0];
2986 struct rsp_que *rsp = ha->rsp_q_map[0];
2987 struct qla2xxx_fw_dump *fw_dump;
2988
2989 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
2990 req_q_size = rsp_q_size = 0;
2991
2992 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2993 fixed_size = sizeof(struct qla2100_fw_dump);
2994 } else if (IS_QLA23XX(ha)) {
2995 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
2996 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
2997 sizeof(uint16_t);
2998 } else if (IS_FWI2_CAPABLE(ha)) {
2999 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3000 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3001 else if (IS_QLA81XX(ha))
3002 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3003 else if (IS_QLA25XX(ha))
3004 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3005 else
3006 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3007
3008 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3009 sizeof(uint32_t);
3010 if (ha->mqenable) {
3011 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
3012 mq_size = sizeof(struct qla2xxx_mq_chain);
3013 /*
3014 * Allocate maximum buffer size for all queues.
3015 * Resizing must be done at end-of-dump processing.
3016 */
3017 mq_size += ha->max_req_queues *
3018 (req->length * sizeof(request_t));
3019 mq_size += ha->max_rsp_queues *
3020 (rsp->length * sizeof(response_t));
3021 }
3022 if (ha->tgt.atio_ring)
3023 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3024 /* Allocate memory for Fibre Channel Event Buffer. */
3025 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3026 !IS_QLA27XX(ha))
3027 goto try_eft;
3028
3029 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3030try_eft:
3031 ql_dbg(ql_dbg_init, vha, 0x00c3,
3032 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3033 eft_size = EFT_SIZE;
3034 }
3035
f73cb695
CD
3036 if (IS_QLA27XX(ha)) {
3037 if (!ha->fw_dump_template) {
3038 ql_log(ql_log_warn, vha, 0x00ba,
3039 "Failed missing fwdump template\n");
3040 return;
3041 }
3042 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
3043 ql_dbg(ql_dbg_init, vha, 0x00fa,
3044 "-> allocating fwdump (%x bytes)...\n", dump_size);
3045 goto allocate;
3046 }
3047
73208dfd
AC
3048 req_q_size = req->length * sizeof(request_t);
3049 rsp_q_size = rsp->length * sizeof(response_t);
a7a167bf 3050 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2afa19a9 3051 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
bb99de67
AV
3052 ha->chain_offset = dump_size;
3053 dump_size += mq_size + fce_size;
d4e3e04d 3054
b945e777
QT
3055 if (ha->exchoffld_buf)
3056 dump_size += sizeof(struct qla2xxx_offld_chain) +
3057 ha->exchoffld_size;
3058 if (ha->exlogin_buf)
3059 dump_size += sizeof(struct qla2xxx_offld_chain) +
3060 ha->exlogin_size;
3061
f73cb695 3062allocate:
ad0a0b01
QT
3063 if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
3064 fw_dump = vmalloc(dump_size);
3065 if (!fw_dump) {
3066 ql_log(ql_log_warn, vha, 0x00c4,
3067 "Unable to allocate (%d KB) for firmware dump.\n",
3068 dump_size / 1024);
3069 } else {
3070 if (ha->fw_dump)
3071 vfree(ha->fw_dump);
3072 ha->fw_dump = fw_dump;
3073
3074 ha->fw_dump_len = dump_size;
3075 ql_dbg(ql_dbg_init, vha, 0x00c5,
3076 "Allocated (%d KB) for firmware dump.\n",
3077 dump_size / 1024);
3078
3079 if (IS_QLA27XX(ha))
3080 return;
3081
3082 ha->fw_dump->signature[0] = 'Q';
3083 ha->fw_dump->signature[1] = 'L';
3084 ha->fw_dump->signature[2] = 'G';
3085 ha->fw_dump->signature[3] = 'C';
3086 ha->fw_dump->version = htonl(1);
3087
3088 ha->fw_dump->fixed_size = htonl(fixed_size);
3089 ha->fw_dump->mem_size = htonl(mem_size);
3090 ha->fw_dump->req_q_size = htonl(req_q_size);
3091 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3092
3093 ha->fw_dump->eft_size = htonl(eft_size);
3094 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
3095 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
3096
3097 ha->fw_dump->header_size =
3098 htonl(offsetof(struct qla2xxx_fw_dump, isp));
a7a167bf 3099 }
a7a167bf 3100 }
0107109e
AV
3101}
3102
18e7555a
AV
3103static int
3104qla81xx_mpi_sync(scsi_qla_host_t *vha)
3105{
3106#define MPS_MASK 0xe0
3107 int rval;
3108 uint16_t dc;
3109 uint32_t dw;
18e7555a
AV
3110
3111 if (!IS_QLA81XX(vha->hw))
3112 return QLA_SUCCESS;
3113
3114 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3115 if (rval != QLA_SUCCESS) {
7c3df132
SK
3116 ql_log(ql_log_warn, vha, 0x0105,
3117 "Unable to acquire semaphore.\n");
18e7555a
AV
3118 goto done;
3119 }
3120
3121 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3122 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3123 if (rval != QLA_SUCCESS) {
7c3df132 3124 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
18e7555a
AV
3125 goto done_release;
3126 }
3127
3128 dc &= MPS_MASK;
3129 if (dc == (dw & MPS_MASK))
3130 goto done_release;
3131
3132 dw &= ~MPS_MASK;
3133 dw |= dc;
3134 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3135 if (rval != QLA_SUCCESS) {
7c3df132 3136 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
18e7555a
AV
3137 }
3138
3139done_release:
3140 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3141 if (rval != QLA_SUCCESS) {
7c3df132
SK
3142 ql_log(ql_log_warn, vha, 0x006d,
3143 "Unable to release semaphore.\n");
18e7555a
AV
3144 }
3145
3146done:
3147 return rval;
3148}
3149
8d93f550
CD
3150int
3151qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3152{
3153 /* Don't try to reallocate the array */
3154 if (req->outstanding_cmds)
3155 return QLA_SUCCESS;
3156
d7459527 3157 if (!IS_FWI2_CAPABLE(ha))
8d93f550
CD
3158 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3159 else {
03e8c680
QT
3160 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3161 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
8d93f550 3162 else
03e8c680 3163 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
8d93f550
CD
3164 }
3165
6396bb22
KC
3166 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3167 sizeof(srb_t *),
3168 GFP_KERNEL);
8d93f550
CD
3169
3170 if (!req->outstanding_cmds) {
3171 /*
3172 * Try to allocate a minimal size just so we can get through
3173 * initialization.
3174 */
3175 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
6396bb22
KC
3176 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3177 sizeof(srb_t *),
3178 GFP_KERNEL);
8d93f550
CD
3179
3180 if (!req->outstanding_cmds) {
3181 ql_log(ql_log_fatal, NULL, 0x0126,
3182 "Failed to allocate memory for "
3183 "outstanding_cmds for req_que %p.\n", req);
3184 req->num_outstanding_cmds = 0;
3185 return QLA_FUNCTION_FAILED;
3186 }
3187 }
3188
3189 return QLA_SUCCESS;
3190}
3191
e4e3a2ce
QT
3192#define PRINT_FIELD(_field, _flag, _str) { \
3193 if (a0->_field & _flag) {\
3194 if (p) {\
3195 strcat(ptr, "|");\
3196 ptr++;\
3197 leftover--;\
3198 } \
3199 len = snprintf(ptr, leftover, "%s", _str); \
3200 p = 1;\
3201 leftover -= len;\
3202 ptr += len; \
3203 } \
3204}
3205
3206static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
3207{
3208#define STR_LEN 64
3209 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
3210 u8 str[STR_LEN], *ptr, p;
3211 int leftover, len;
3212
3213 memset(str, 0, STR_LEN);
3214 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
3215 ql_dbg(ql_dbg_init, vha, 0x015a,
3216 "SFP MFG Name: %s\n", str);
3217
3218 memset(str, 0, STR_LEN);
3219 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
3220 ql_dbg(ql_dbg_init, vha, 0x015c,
3221 "SFP Part Name: %s\n", str);
3222
3223 /* media */
3224 memset(str, 0, STR_LEN);
3225 ptr = str;
3226 leftover = STR_LEN;
3227 p = len = 0;
3228 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
3229 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
3230 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
3231 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
3232 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
3233 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
3234 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
3235 ql_dbg(ql_dbg_init, vha, 0x0160,
3236 "SFP Media: %s\n", str);
3237
3238 /* link length */
3239 memset(str, 0, STR_LEN);
3240 ptr = str;
3241 leftover = STR_LEN;
3242 p = len = 0;
3243 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
3244 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
3245 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
3246 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
3247 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
3248 ql_dbg(ql_dbg_init, vha, 0x0196,
3249 "SFP Link Length: %s\n", str);
3250
3251 memset(str, 0, STR_LEN);
3252 ptr = str;
3253 leftover = STR_LEN;
3254 p = len = 0;
3255 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
3256 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
3257 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
3258 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
3259 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
3260 ql_dbg(ql_dbg_init, vha, 0x016e,
3261 "SFP FC Link Tech: %s\n", str);
3262
3263 if (a0->length_km)
3264 ql_dbg(ql_dbg_init, vha, 0x016f,
3265 "SFP Distant: %d km\n", a0->length_km);
3266 if (a0->length_100m)
3267 ql_dbg(ql_dbg_init, vha, 0x0170,
3268 "SFP Distant: %d m\n", a0->length_100m*100);
3269 if (a0->length_50um_10m)
3270 ql_dbg(ql_dbg_init, vha, 0x0189,
3271 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
3272 if (a0->length_62um_10m)
3273 ql_dbg(ql_dbg_init, vha, 0x018a,
3274 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
3275 if (a0->length_om4_10m)
3276 ql_dbg(ql_dbg_init, vha, 0x0194,
3277 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
3278 if (a0->length_om3_10m)
3279 ql_dbg(ql_dbg_init, vha, 0x0195,
3280 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
3281}
3282
3283
3284/*
3285 * Return Code:
3286 * QLA_SUCCESS: no action
3287 * QLA_INTERFACE_ERROR: SFP is not there.
3288 * QLA_FUNCTION_FAILED: detected New SFP
3289 */
3290int
3291qla24xx_detect_sfp(scsi_qla_host_t *vha)
3292{
3293 int rc = QLA_SUCCESS;
3294 struct sff_8247_a0 *a;
3295 struct qla_hw_data *ha = vha->hw;
3296
3297 if (!AUTO_DETECT_SFP_SUPPORT(vha))
3298 goto out;
3299
3300 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3301 if (rc)
3302 goto out;
3303
3304 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3305 qla2xxx_print_sfp_info(vha);
3306
3307 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
3308 /* long range */
3309 ha->flags.detected_lr_sfp = 1;
3310
3311 if (a->length_km > 5 || a->length_100m > 50)
3312 ha->long_range_distance = LR_DISTANCE_10K;
3313 else
3314 ha->long_range_distance = LR_DISTANCE_5K;
3315
3316 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
3317 ql_dbg(ql_dbg_async, vha, 0x507b,
3318 "Detected Long Range SFP.\n");
3319 } else {
3320 /* short range */
3321 ha->flags.detected_lr_sfp = 0;
3322 if (ha->flags.using_lr_setting)
3323 ql_dbg(ql_dbg_async, vha, 0x5084,
3324 "Detected Short Range SFP.\n");
3325 }
3326
3327 if (!vha->flags.init_done)
3328 rc = QLA_SUCCESS;
3329out:
3330 return rc;
3331}
3332
1da177e4
LT
3333/**
3334 * qla2x00_setup_chip() - Load and start RISC firmware.
2db6228d 3335 * @vha: HA context
1da177e4
LT
3336 *
3337 * Returns 0 on success.
3338 */
3339static int
e315cd28 3340qla2x00_setup_chip(scsi_qla_host_t *vha)
1da177e4 3341{
0107109e
AV
3342 int rval;
3343 uint32_t srisc_address = 0;
e315cd28 3344 struct qla_hw_data *ha = vha->hw;
3db0652e
AV
3345 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3346 unsigned long flags;
dda772e8 3347 uint16_t fw_major_version;
3db0652e 3348
7ec0effd 3349 if (IS_P3P_TYPE(ha)) {
a9083016 3350 rval = ha->isp_ops->load_risc(vha, &srisc_address);
14e303d9
AV
3351 if (rval == QLA_SUCCESS) {
3352 qla2x00_stop_firmware(vha);
a9083016 3353 goto enable_82xx_npiv;
14e303d9 3354 } else
b963752f 3355 goto failed;
a9083016
GM
3356 }
3357
3db0652e
AV
3358 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3359 /* Disable SRAM, Instruction RAM and GP RAM parity. */
3360 spin_lock_irqsave(&ha->hardware_lock, flags);
3361 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
3362 RD_REG_WORD(&reg->hccr);
3363 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3364 }
1da177e4 3365
18e7555a
AV
3366 qla81xx_mpi_sync(vha);
3367
1da177e4 3368 /* Load firmware sequences */
e315cd28 3369 rval = ha->isp_ops->load_risc(vha, &srisc_address);
0107109e 3370 if (rval == QLA_SUCCESS) {
7c3df132
SK
3371 ql_dbg(ql_dbg_init, vha, 0x00c9,
3372 "Verifying Checksum of loaded RISC code.\n");
1da177e4 3373
e315cd28 3374 rval = qla2x00_verify_checksum(vha, srisc_address);
1da177e4
LT
3375 if (rval == QLA_SUCCESS) {
3376 /* Start firmware execution. */
7c3df132
SK
3377 ql_dbg(ql_dbg_init, vha, 0x00ca,
3378 "Starting firmware.\n");
1da177e4 3379
b0d6cabd
HM
3380 if (ql2xexlogins)
3381 ha->flags.exlogins_enabled = 1;
3382
99e1b683 3383 if (qla_is_exch_offld_enabled(vha))
2f56a7f1
HM
3384 ha->flags.exchoffld_enabled = 1;
3385
e315cd28 3386 rval = qla2x00_execute_fw(vha, srisc_address);
1da177e4 3387 /* Retrieve firmware information. */
dda772e8 3388 if (rval == QLA_SUCCESS) {
e4e3a2ce
QT
3389 qla24xx_detect_sfp(vha);
3390
b0d6cabd
HM
3391 rval = qla2x00_set_exlogins_buffer(vha);
3392 if (rval != QLA_SUCCESS)
3393 goto failed;
3394
2f56a7f1
HM
3395 rval = qla2x00_set_exchoffld_buffer(vha);
3396 if (rval != QLA_SUCCESS)
3397 goto failed;
3398
a9083016 3399enable_82xx_npiv:
dda772e8 3400 fw_major_version = ha->fw_major_version;
7ec0effd 3401 if (IS_P3P_TYPE(ha))
3173167f 3402 qla82xx_check_md_needed(vha);
6246b8a1
GM
3403 else
3404 rval = qla2x00_get_fw_version(vha);
ca9e9c3e
AV
3405 if (rval != QLA_SUCCESS)
3406 goto failed;
2c3dfe3f 3407 ha->flags.npiv_supported = 0;
e315cd28 3408 if (IS_QLA2XXX_MIDTYPE(ha) &&
946fb891 3409 (ha->fw_attributes & BIT_2)) {
2c3dfe3f 3410 ha->flags.npiv_supported = 1;
4d0ea247
SJ
3411 if ((!ha->max_npiv_vports) ||
3412 ((ha->max_npiv_vports + 1) %
eb66dc60 3413 MIN_MULTI_ID_FABRIC))
4d0ea247 3414 ha->max_npiv_vports =
eb66dc60 3415 MIN_MULTI_ID_FABRIC - 1;
4d0ea247 3416 }
03e8c680 3417 qla2x00_get_resource_cnts(vha);
d743de66 3418
8d93f550
CD
3419 /*
3420 * Allocate the array of outstanding commands
3421 * now that we know the firmware resources.
3422 */
3423 rval = qla2x00_alloc_outstanding_cmds(ha,
3424 vha->req);
3425 if (rval != QLA_SUCCESS)
3426 goto failed;
3427
ad0a0b01
QT
3428 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
3429 qla2x00_alloc_offload_mem(vha);
3430
3431 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
08de2844 3432 qla2x00_alloc_fw_dump(vha);
ad0a0b01 3433
3b6e5b9d
CD
3434 } else {
3435 goto failed;
1da177e4
LT
3436 }
3437 } else {
7c3df132
SK
3438 ql_log(ql_log_fatal, vha, 0x00cd,
3439 "ISP Firmware failed checksum.\n");
3440 goto failed;
1da177e4 3441 }
c74d88a4
AV
3442 } else
3443 goto failed;
1da177e4 3444
3db0652e
AV
3445 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3446 /* Enable proper parity. */
3447 spin_lock_irqsave(&ha->hardware_lock, flags);
3448 if (IS_QLA2300(ha))
3449 /* SRAM parity */
3450 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
3451 else
3452 /* SRAM, Instruction RAM and GP RAM parity */
3453 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
3454 RD_REG_WORD(&reg->hccr);
3455 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3456 }
3457
f3982d89
CD
3458 if (IS_QLA27XX(ha))
3459 ha->flags.fac_supported = 1;
3460 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1d2874de
JC
3461 uint32_t size;
3462
3463 rval = qla81xx_fac_get_sector_size(vha, &size);
3464 if (rval == QLA_SUCCESS) {
3465 ha->flags.fac_supported = 1;
3466 ha->fdt_block_size = size << 2;
3467 } else {
7c3df132 3468 ql_log(ql_log_warn, vha, 0x00ce,
1d2874de
JC
3469 "Unsupported FAC firmware (%d.%02d.%02d).\n",
3470 ha->fw_major_version, ha->fw_minor_version,
3471 ha->fw_subminor_version);
1ca60e3b 3472
f73cb695 3473 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6246b8a1
GM
3474 ha->flags.fac_supported = 0;
3475 rval = QLA_SUCCESS;
3476 }
1d2874de
JC
3477 }
3478 }
ca9e9c3e 3479failed:
1da177e4 3480 if (rval) {
7c3df132
SK
3481 ql_log(ql_log_fatal, vha, 0x00cf,
3482 "Setup chip ****FAILED****.\n");
1da177e4
LT
3483 }
3484
3485 return (rval);
3486}
3487
3488/**
3489 * qla2x00_init_response_q_entries() - Initializes response queue entries.
2db6228d 3490 * @rsp: response queue
1da177e4
LT
3491 *
3492 * Beginning of request ring has initialization control block already built
3493 * by nvram config routine.
3494 *
3495 * Returns 0 on success.
3496 */
73208dfd
AC
3497void
3498qla2x00_init_response_q_entries(struct rsp_que *rsp)
1da177e4
LT
3499{
3500 uint16_t cnt;
3501 response_t *pkt;
3502
2afa19a9
AC
3503 rsp->ring_ptr = rsp->ring;
3504 rsp->ring_index = 0;
3505 rsp->status_srb = NULL;
e315cd28
AC
3506 pkt = rsp->ring_ptr;
3507 for (cnt = 0; cnt < rsp->length; cnt++) {
1da177e4
LT
3508 pkt->signature = RESPONSE_PROCESSED;
3509 pkt++;
3510 }
1da177e4
LT
3511}
3512
3513/**
3514 * qla2x00_update_fw_options() - Read and process firmware options.
2db6228d 3515 * @vha: HA context
1da177e4
LT
3516 *
3517 * Returns 0 on success.
3518 */
abbd8870 3519void
e315cd28 3520qla2x00_update_fw_options(scsi_qla_host_t *vha)
1da177e4
LT
3521{
3522 uint16_t swing, emphasis, tx_sens, rx_sens;
e315cd28 3523 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
3524
3525 memset(ha->fw_options, 0, sizeof(ha->fw_options));
e315cd28 3526 qla2x00_get_fw_options(vha, ha->fw_options);
1da177e4
LT
3527
3528 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3529 return;
3530
3531 /* Serial Link options. */
7c3df132
SK
3532 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3533 "Serial link options.\n");
3534 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
3535 (uint8_t *)&ha->fw_seriallink_options,
3536 sizeof(ha->fw_seriallink_options));
1da177e4
LT
3537
3538 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3539 if (ha->fw_seriallink_options[3] & BIT_2) {
3540 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3541
3542 /* 1G settings */
3543 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3544 emphasis = (ha->fw_seriallink_options[2] &
3545 (BIT_4 | BIT_3)) >> 3;
3546 tx_sens = ha->fw_seriallink_options[0] &
fa2a1ce5 3547 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
3548 rx_sens = (ha->fw_seriallink_options[0] &
3549 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3550 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3551 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3552 if (rx_sens == 0x0)
3553 rx_sens = 0x3;
3554 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3555 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3556 ha->fw_options[10] |= BIT_5 |
3557 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3558 (tx_sens & (BIT_1 | BIT_0));
3559
3560 /* 2G settings */
3561 swing = (ha->fw_seriallink_options[2] &
3562 (BIT_7 | BIT_6 | BIT_5)) >> 5;
3563 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3564 tx_sens = ha->fw_seriallink_options[1] &
fa2a1ce5 3565 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
3566 rx_sens = (ha->fw_seriallink_options[1] &
3567 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3568 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3569 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3570 if (rx_sens == 0x0)
3571 rx_sens = 0x3;
3572 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3573 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3574 ha->fw_options[11] |= BIT_5 |
3575 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3576 (tx_sens & (BIT_1 | BIT_0));
3577 }
3578
3579 /* FCP2 options. */
3580 /* Return command IOCBs without waiting for an ABTS to complete. */
3581 ha->fw_options[3] |= BIT_13;
3582
3583 /* LED scheme. */
3584 if (ha->flags.enable_led_scheme)
3585 ha->fw_options[2] |= BIT_12;
3586
48c02fde 3587 /* Detect ISP6312. */
3588 if (IS_QLA6312(ha))
3589 ha->fw_options[2] |= BIT_13;
3590
088d09d4
GM
3591 /* Set Retry FLOGI in case of P2P connection */
3592 if (ha->operating_mode == P2P) {
3593 ha->fw_options[2] |= BIT_3;
3594 ql_dbg(ql_dbg_disc, vha, 0x2100,
3595 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3596 __func__, ha->fw_options[2]);
3597 }
3598
1da177e4 3599 /* Update firmware options. */
e315cd28 3600 qla2x00_set_fw_options(vha, ha->fw_options);
1da177e4
LT
3601}
3602
0107109e 3603void
e315cd28 3604qla24xx_update_fw_options(scsi_qla_host_t *vha)
0107109e
AV
3605{
3606 int rval;
e315cd28 3607 struct qla_hw_data *ha = vha->hw;
0107109e 3608
7ec0effd 3609 if (IS_P3P_TYPE(ha))
a9083016
GM
3610 return;
3611
f198cafa
HM
3612 /* Hold status IOCBs until ABTS response received. */
3613 if (ql2xfwholdabts)
3614 ha->fw_options[3] |= BIT_12;
3615
088d09d4
GM
3616 /* Set Retry FLOGI in case of P2P connection */
3617 if (ha->operating_mode == P2P) {
3618 ha->fw_options[2] |= BIT_3;
3619 ql_dbg(ql_dbg_disc, vha, 0x2101,
3620 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3621 __func__, ha->fw_options[2]);
3622 }
3623
41dc529a 3624 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
3c4810ff
QT
3625 if (ql2xmvasynctoatio &&
3626 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
41dc529a
QT
3627 if (qla_tgt_mode_enabled(vha) ||
3628 qla_dual_mode_enabled(vha))
3629 ha->fw_options[2] |= BIT_11;
3630 else
3631 ha->fw_options[2] &= ~BIT_11;
3632 }
3633
f7e761f5
QT
3634 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3635 /*
3636 * Tell FW to track each exchange to prevent
3637 * driver from using stale exchange.
3638 */
3639 if (qla_tgt_mode_enabled(vha) ||
3640 qla_dual_mode_enabled(vha))
3641 ha->fw_options[2] |= BIT_4;
3642 else
3643 ha->fw_options[2] &= ~BIT_4;
9ecf0b0d
QT
3644
3645 /* Reserve 1/2 of emergency exchanges for ELS.*/
3646 if (qla2xuseresexchforels)
3647 ha->fw_options[2] |= BIT_8;
3648 else
3649 ha->fw_options[2] &= ~BIT_8;
f7e761f5
QT
3650 }
3651
83548fe2
QT
3652 ql_dbg(ql_dbg_init, vha, 0x00e8,
3653 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
3654 __func__, ha->fw_options[1], ha->fw_options[2],
3655 ha->fw_options[3], vha->host->active_mode);
3c4810ff
QT
3656
3657 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
3658 qla2x00_set_fw_options(vha, ha->fw_options);
41dc529a 3659
0107109e 3660 /* Update Serial Link options. */
f94097ed 3661 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
0107109e
AV
3662 return;
3663
e315cd28 3664 rval = qla2x00_set_serdes_params(vha,
f94097ed 3665 le16_to_cpu(ha->fw_seriallink_options24[1]),
3666 le16_to_cpu(ha->fw_seriallink_options24[2]),
3667 le16_to_cpu(ha->fw_seriallink_options24[3]));
0107109e 3668 if (rval != QLA_SUCCESS) {
7c3df132 3669 ql_log(ql_log_warn, vha, 0x0104,
0107109e
AV
3670 "Unable to update Serial Link options (%x).\n", rval);
3671 }
3672}
3673
abbd8870 3674void
e315cd28 3675qla2x00_config_rings(struct scsi_qla_host *vha)
abbd8870 3676{
e315cd28 3677 struct qla_hw_data *ha = vha->hw;
3d71644c 3678 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
73208dfd
AC
3679 struct req_que *req = ha->req_q_map[0];
3680 struct rsp_que *rsp = ha->rsp_q_map[0];
abbd8870
AV
3681
3682 /* Setup ring parameters in initialization control block. */
ad950360
BVA
3683 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3684 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3685 ha->init_cb->request_q_length = cpu_to_le16(req->length);
3686 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
3687 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3688 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3689 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3690 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
abbd8870
AV
3691
3692 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3693 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3694 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3695 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3696 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
3697}
3698
0107109e 3699void
e315cd28 3700qla24xx_config_rings(struct scsi_qla_host *vha)
0107109e 3701{
e315cd28 3702 struct qla_hw_data *ha = vha->hw;
118e2ef9 3703 device_reg_t *reg = ISP_QUE_REG(ha, 0);
73208dfd
AC
3704 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3705 struct qla_msix_entry *msix;
0107109e 3706 struct init_cb_24xx *icb;
73208dfd
AC
3707 uint16_t rid = 0;
3708 struct req_que *req = ha->req_q_map[0];
3709 struct rsp_que *rsp = ha->rsp_q_map[0];
0107109e 3710
6246b8a1 3711 /* Setup ring parameters in initialization control block. */
0107109e 3712 icb = (struct init_cb_24xx *)ha->init_cb;
ad950360
BVA
3713 icb->request_q_outpointer = cpu_to_le16(0);
3714 icb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3715 icb->request_q_length = cpu_to_le16(req->length);
3716 icb->response_q_length = cpu_to_le16(rsp->length);
3717 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3718 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3719 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3720 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
0107109e 3721
2d70c103 3722 /* Setup ATIO queue dma pointers for target mode */
ad950360 3723 icb->atio_q_inpointer = cpu_to_le16(0);
2d70c103
NB
3724 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3725 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
3726 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
3727
7c6300e3 3728 if (IS_SHADOW_REG_CAPABLE(ha))
ad950360 3729 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
7c6300e3 3730
f73cb695 3731 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ad950360
BVA
3732 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3733 icb->rid = cpu_to_le16(rid);
73208dfd
AC
3734 if (ha->flags.msix_enabled) {
3735 msix = &ha->msix_entries[1];
83548fe2 3736 ql_dbg(ql_dbg_init, vha, 0x0019,
7c3df132
SK
3737 "Registering vector 0x%x for base que.\n",
3738 msix->entry);
73208dfd
AC
3739 icb->msix = cpu_to_le16(msix->entry);
3740 }
3741 /* Use alternate PCI bus number */
3742 if (MSB(rid))
ad950360 3743 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
73208dfd
AC
3744 /* Use alternate PCI devfn */
3745 if (LSB(rid))
ad950360 3746 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
73208dfd 3747
3155754a 3748 /* Use Disable MSIX Handshake mode for capable adapters */
6246b8a1
GM
3749 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
3750 (ha->flags.msix_enabled)) {
ad950360 3751 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3155754a 3752 ha->flags.disable_msix_handshake = 1;
7c3df132
SK
3753 ql_dbg(ql_dbg_init, vha, 0x00fe,
3754 "MSIX Handshake Disable Mode turned on.\n");
3155754a 3755 } else {
ad950360 3756 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3155754a 3757 }
ad950360 3758 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
73208dfd
AC
3759
3760 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
3761 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
3762 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
3763 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
3764 } else {
3765 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
3766 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
3767 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
3768 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
3769 }
aa230bc5 3770 qlt_24xx_config_rings(vha);
2d70c103 3771
73208dfd
AC
3772 /* PCI posting */
3773 RD_REG_DWORD(&ioreg->hccr);
0107109e
AV
3774}
3775
1da177e4
LT
3776/**
3777 * qla2x00_init_rings() - Initializes firmware.
2db6228d 3778 * @vha: HA context
1da177e4
LT
3779 *
3780 * Beginning of request ring has initialization control block already built
3781 * by nvram config routine.
3782 *
3783 * Returns 0 on success.
3784 */
8ae6d9c7 3785int
e315cd28 3786qla2x00_init_rings(scsi_qla_host_t *vha)
1da177e4
LT
3787{
3788 int rval;
3789 unsigned long flags = 0;
29bdccbe 3790 int cnt, que;
e315cd28 3791 struct qla_hw_data *ha = vha->hw;
29bdccbe
AC
3792 struct req_que *req;
3793 struct rsp_que *rsp;
2c3dfe3f
SJ
3794 struct mid_init_cb_24xx *mid_init_cb =
3795 (struct mid_init_cb_24xx *) ha->init_cb;
1da177e4
LT
3796
3797 spin_lock_irqsave(&ha->hardware_lock, flags);
3798
3799 /* Clear outstanding commands array. */
2afa19a9 3800 for (que = 0; que < ha->max_req_queues; que++) {
29bdccbe 3801 req = ha->req_q_map[que];
cb43285f 3802 if (!req || !test_bit(que, ha->req_qid_map))
29bdccbe 3803 continue;
7c6300e3
JC
3804 req->out_ptr = (void *)(req->ring + req->length);
3805 *req->out_ptr = 0;
8d93f550 3806 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
29bdccbe 3807 req->outstanding_cmds[cnt] = NULL;
1da177e4 3808
2afa19a9 3809 req->current_outstanding_cmd = 1;
1da177e4 3810
29bdccbe
AC
3811 /* Initialize firmware. */
3812 req->ring_ptr = req->ring;
3813 req->ring_index = 0;
3814 req->cnt = req->length;
3815 }
1da177e4 3816
2afa19a9 3817 for (que = 0; que < ha->max_rsp_queues; que++) {
29bdccbe 3818 rsp = ha->rsp_q_map[que];
cb43285f 3819 if (!rsp || !test_bit(que, ha->rsp_qid_map))
29bdccbe 3820 continue;
7c6300e3
JC
3821 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
3822 *rsp->in_ptr = 0;
29bdccbe 3823 /* Initialize response queue entries */
8ae6d9c7
GM
3824 if (IS_QLAFX00(ha))
3825 qlafx00_init_response_q_entries(rsp);
3826 else
3827 qla2x00_init_response_q_entries(rsp);
29bdccbe 3828 }
1da177e4 3829
2d70c103
NB
3830 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
3831 ha->tgt.atio_ring_index = 0;
3832 /* Initialize ATIO queue entries */
3833 qlt_init_atio_q_entries(vha);
3834
e315cd28 3835 ha->isp_ops->config_rings(vha);
1da177e4
LT
3836
3837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3838
8ae6d9c7
GM
3839 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
3840
3841 if (IS_QLAFX00(ha)) {
3842 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
3843 goto next_check;
3844 }
3845
1da177e4 3846 /* Update any ISP specific firmware options before initialization. */
e315cd28 3847 ha->isp_ops->update_fw_options(vha);
1da177e4 3848
605aa2bc 3849 if (ha->flags.npiv_supported) {
45980cc2 3850 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
605aa2bc 3851 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
c48339de 3852 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
605aa2bc
LC
3853 }
3854
24a08138 3855 if (IS_FWI2_CAPABLE(ha)) {
ad950360 3856 mid_init_cb->options = cpu_to_le16(BIT_1);
24a08138 3857 mid_init_cb->init_cb.execution_throttle =
03e8c680 3858 cpu_to_le16(ha->cur_fw_xcb_count);
40f3862b
JC
3859 ha->flags.dport_enabled =
3860 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
3861 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
3862 (ha->flags.dport_enabled) ? "enabled" : "disabled");
3863 /* FA-WWPN Status */
2486c627 3864 ha->flags.fawwpn_enabled =
40f3862b 3865 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
83548fe2 3866 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
2486c627 3867 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
24a08138 3868 }
2c3dfe3f 3869
e315cd28 3870 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
8ae6d9c7 3871next_check:
1da177e4 3872 if (rval) {
7c3df132
SK
3873 ql_log(ql_log_fatal, vha, 0x00d2,
3874 "Init Firmware **** FAILED ****.\n");
1da177e4 3875 } else {
7c3df132
SK
3876 ql_dbg(ql_dbg_init, vha, 0x00d3,
3877 "Init Firmware -- success.\n");
4b60c827 3878 QLA_FW_STARTED(ha);
1da177e4
LT
3879 }
3880
3881 return (rval);
3882}
3883
3884/**
3885 * qla2x00_fw_ready() - Waits for firmware ready.
2db6228d 3886 * @vha: HA context
1da177e4
LT
3887 *
3888 * Returns 0 on success.
3889 */
3890static int
e315cd28 3891qla2x00_fw_ready(scsi_qla_host_t *vha)
1da177e4
LT
3892{
3893 int rval;
4d4df193 3894 unsigned long wtime, mtime, cs84xx_time;
1da177e4
LT
3895 uint16_t min_wait; /* Minimum wait time if loop is down */
3896 uint16_t wait_time; /* Wait time if loop is coming ready */
b5a340dd 3897 uint16_t state[6];
e315cd28 3898 struct qla_hw_data *ha = vha->hw;
1da177e4 3899
8ae6d9c7
GM
3900 if (IS_QLAFX00(vha->hw))
3901 return qlafx00_fw_ready(vha);
3902
1da177e4
LT
3903 rval = QLA_SUCCESS;
3904
33461491
CD
3905 /* Time to wait for loop down */
3906 if (IS_P3P_TYPE(ha))
3907 min_wait = 30;
3908 else
3909 min_wait = 20;
1da177e4
LT
3910
3911 /*
3912 * Firmware should take at most one RATOV to login, plus 5 seconds for
3913 * our own processing.
3914 */
3915 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
3916 wait_time = min_wait;
3917 }
3918
3919 /* Min wait time if loop down */
3920 mtime = jiffies + (min_wait * HZ);
3921
3922 /* wait time before firmware ready */
3923 wtime = jiffies + (wait_time * HZ);
3924
3925 /* Wait for ISP to finish LIP */
e315cd28 3926 if (!vha->flags.init_done)
7c3df132
SK
3927 ql_log(ql_log_info, vha, 0x801e,
3928 "Waiting for LIP to complete.\n");
1da177e4
LT
3929
3930 do {
5b939038 3931 memset(state, -1, sizeof(state));
e315cd28 3932 rval = qla2x00_get_firmware_state(vha, state);
1da177e4 3933 if (rval == QLA_SUCCESS) {
4d4df193 3934 if (state[0] < FSTATE_LOSS_OF_SYNC) {
e315cd28 3935 vha->device_flags &= ~DFLG_NO_CABLE;
1da177e4 3936 }
4d4df193 3937 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
7c3df132
SK
3938 ql_dbg(ql_dbg_taskm, vha, 0x801f,
3939 "fw_state=%x 84xx=%x.\n", state[0],
3940 state[2]);
4d4df193
HK
3941 if ((state[2] & FSTATE_LOGGED_IN) &&
3942 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
7c3df132
SK
3943 ql_dbg(ql_dbg_taskm, vha, 0x8028,
3944 "Sending verify iocb.\n");
4d4df193
HK
3945
3946 cs84xx_time = jiffies;
e315cd28 3947 rval = qla84xx_init_chip(vha);
7c3df132
SK
3948 if (rval != QLA_SUCCESS) {
3949 ql_log(ql_log_warn,
cfb0919c 3950 vha, 0x8007,
7c3df132 3951 "Init chip failed.\n");
4d4df193 3952 break;
7c3df132 3953 }
4d4df193
HK
3954
3955 /* Add time taken to initialize. */
3956 cs84xx_time = jiffies - cs84xx_time;
3957 wtime += cs84xx_time;
3958 mtime += cs84xx_time;
cfb0919c 3959 ql_dbg(ql_dbg_taskm, vha, 0x8008,
7c3df132
SK
3960 "Increasing wait time by %ld. "
3961 "New time %ld.\n", cs84xx_time,
3962 wtime);
4d4df193
HK
3963 }
3964 } else if (state[0] == FSTATE_READY) {
7c3df132
SK
3965 ql_dbg(ql_dbg_taskm, vha, 0x8037,
3966 "F/W Ready - OK.\n");
1da177e4 3967
e315cd28 3968 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1da177e4
LT
3969 &ha->login_timeout, &ha->r_a_tov);
3970
3971 rval = QLA_SUCCESS;
3972 break;
3973 }
3974
3975 rval = QLA_FUNCTION_FAILED;
3976
e315cd28 3977 if (atomic_read(&vha->loop_down_timer) &&
4d4df193 3978 state[0] != FSTATE_READY) {
1da177e4 3979 /* Loop down. Timeout on min_wait for states
fa2a1ce5
AV
3980 * other than Wait for Login.
3981 */
1da177e4 3982 if (time_after_eq(jiffies, mtime)) {
7c3df132 3983 ql_log(ql_log_info, vha, 0x8038,
1da177e4
LT
3984 "Cable is unplugged...\n");
3985
e315cd28 3986 vha->device_flags |= DFLG_NO_CABLE;
1da177e4
LT
3987 break;
3988 }
3989 }
3990 } else {
3991 /* Mailbox cmd failed. Timeout on min_wait. */
cdbb0a4f 3992 if (time_after_eq(jiffies, mtime) ||
7190575f 3993 ha->flags.isp82xx_fw_hung)
1da177e4
LT
3994 break;
3995 }
3996
3997 if (time_after_eq(jiffies, wtime))
3998 break;
3999
4000 /* Delay for a while */
4001 msleep(500);
1da177e4
LT
4002 } while (1);
4003
7c3df132 4004 ql_dbg(ql_dbg_taskm, vha, 0x803a,
b5a340dd
JC
4005 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4006 state[1], state[2], state[3], state[4], state[5], jiffies);
1da177e4 4007
cfb0919c 4008 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
7c3df132
SK
4009 ql_log(ql_log_warn, vha, 0x803b,
4010 "Firmware ready **** FAILED ****.\n");
1da177e4
LT
4011 }
4012
4013 return (rval);
4014}
4015
4016/*
4017* qla2x00_configure_hba
4018* Setup adapter context.
4019*
4020* Input:
4021* ha = adapter state pointer.
4022*
4023* Returns:
4024* 0 = success
4025*
4026* Context:
4027* Kernel context.
4028*/
4029static int
e315cd28 4030qla2x00_configure_hba(scsi_qla_host_t *vha)
1da177e4
LT
4031{
4032 int rval;
4033 uint16_t loop_id;
4034 uint16_t topo;
2c3dfe3f 4035 uint16_t sw_cap;
1da177e4
LT
4036 uint8_t al_pa;
4037 uint8_t area;
4038 uint8_t domain;
4039 char connect_type[22];
e315cd28 4040 struct qla_hw_data *ha = vha->hw;
61e1b269 4041 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
482c9dc7 4042 port_id_t id;
9d1aa4e1 4043 unsigned long flags;
1da177e4
LT
4044
4045 /* Get host addresses. */
e315cd28 4046 rval = qla2x00_get_adapter_id(vha,
2c3dfe3f 4047 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1da177e4 4048 if (rval != QLA_SUCCESS) {
e315cd28 4049 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
6246b8a1 4050 IS_CNA_CAPABLE(ha) ||
33135aa2 4051 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
7c3df132
SK
4052 ql_dbg(ql_dbg_disc, vha, 0x2008,
4053 "Loop is in a transition state.\n");
33135aa2 4054 } else {
7c3df132
SK
4055 ql_log(ql_log_warn, vha, 0x2009,
4056 "Unable to get host loop ID.\n");
61e1b269
JC
4057 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
4058 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
4059 ql_log(ql_log_warn, vha, 0x1151,
4060 "Doing link init.\n");
4061 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
4062 return rval;
4063 }
e315cd28 4064 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
33135aa2 4065 }
1da177e4
LT
4066 return (rval);
4067 }
4068
4069 if (topo == 4) {
7c3df132
SK
4070 ql_log(ql_log_info, vha, 0x200a,
4071 "Cannot get topology - retrying.\n");
1da177e4
LT
4072 return (QLA_FUNCTION_FAILED);
4073 }
4074
e315cd28 4075 vha->loop_id = loop_id;
1da177e4
LT
4076
4077 /* initialize */
4078 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4079 ha->operating_mode = LOOP;
2c3dfe3f 4080 ha->switch_cap = 0;
1da177e4
LT
4081
4082 switch (topo) {
4083 case 0:
7c3df132 4084 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
1da177e4
LT
4085 ha->current_topology = ISP_CFG_NL;
4086 strcpy(connect_type, "(Loop)");
4087 break;
4088
4089 case 1:
7c3df132 4090 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2c3dfe3f 4091 ha->switch_cap = sw_cap;
1da177e4
LT
4092 ha->current_topology = ISP_CFG_FL;
4093 strcpy(connect_type, "(FL_Port)");
4094 break;
4095
4096 case 2:
7c3df132 4097 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
1da177e4
LT
4098 ha->operating_mode = P2P;
4099 ha->current_topology = ISP_CFG_N;
4100 strcpy(connect_type, "(N_Port-to-N_Port)");
4101 break;
4102
4103 case 3:
7c3df132 4104 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2c3dfe3f 4105 ha->switch_cap = sw_cap;
1da177e4
LT
4106 ha->operating_mode = P2P;
4107 ha->current_topology = ISP_CFG_F;
4108 strcpy(connect_type, "(F_Port)");
4109 break;
4110
4111 default:
7c3df132
SK
4112 ql_dbg(ql_dbg_disc, vha, 0x200f,
4113 "HBA in unknown topology %x, using NL.\n", topo);
1da177e4
LT
4114 ha->current_topology = ISP_CFG_NL;
4115 strcpy(connect_type, "(Loop)");
4116 break;
4117 }
4118
4119 /* Save Host port and loop ID. */
4120 /* byte order - Big Endian */
482c9dc7
QT
4121 id.b.domain = domain;
4122 id.b.area = area;
4123 id.b.al_pa = al_pa;
4124 id.b.rsvd_1 = 0;
9d1aa4e1 4125 spin_lock_irqsave(&ha->hardware_lock, flags);
482c9dc7 4126 qlt_update_host_map(vha, id);
9d1aa4e1 4127 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2d70c103 4128
e315cd28 4129 if (!vha->flags.init_done)
7c3df132
SK
4130 ql_log(ql_log_info, vha, 0x2010,
4131 "Topology - %s, Host Loop address 0x%x.\n",
e315cd28 4132 connect_type, vha->loop_id);
1da177e4 4133
1da177e4
LT
4134 return(rval);
4135}
4136
a9083016 4137inline void
e315cd28
AC
4138qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4139 char *def)
9bb9fcf2
AV
4140{
4141 char *st, *en;
4142 uint16_t index;
e315cd28 4143 struct qla_hw_data *ha = vha->hw;
ab671149 4144 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
6246b8a1 4145 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
9bb9fcf2
AV
4146
4147 if (memcmp(model, BINZERO, len) != 0) {
4148 strncpy(ha->model_number, model, len);
4149 st = en = ha->model_number;
4150 en += len - 1;
4151 while (en > st) {
4152 if (*en != 0x20 && *en != 0x00)
4153 break;
4154 *en-- = '\0';
4155 }
4156
4157 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
4158 if (use_tbl &&
4159 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2 4160 index < QLA_MODEL_NAMES)
1ee27146
JC
4161 strncpy(ha->model_desc,
4162 qla2x00_model_name[index * 2 + 1],
4163 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
4164 } else {
4165 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
4166 if (use_tbl &&
4167 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2
AV
4168 index < QLA_MODEL_NAMES) {
4169 strcpy(ha->model_number,
4170 qla2x00_model_name[index * 2]);
1ee27146
JC
4171 strncpy(ha->model_desc,
4172 qla2x00_model_name[index * 2 + 1],
4173 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
4174 } else {
4175 strcpy(ha->model_number, def);
4176 }
4177 }
1ee27146 4178 if (IS_FWI2_CAPABLE(ha))
e315cd28 4179 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1ee27146 4180 sizeof(ha->model_desc));
9bb9fcf2
AV
4181}
4182
4e08df3f
DM
4183/* On sparc systems, obtain port and node WWN from firmware
4184 * properties.
4185 */
e315cd28 4186static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4e08df3f
DM
4187{
4188#ifdef CONFIG_SPARC
e315cd28 4189 struct qla_hw_data *ha = vha->hw;
4e08df3f 4190 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
4191 struct device_node *dp = pci_device_to_OF_node(pdev);
4192 const u8 *val;
4e08df3f
DM
4193 int len;
4194
4195 val = of_get_property(dp, "port-wwn", &len);
4196 if (val && len >= WWN_SIZE)
4197 memcpy(nv->port_name, val, WWN_SIZE);
4198
4199 val = of_get_property(dp, "node-wwn", &len);
4200 if (val && len >= WWN_SIZE)
4201 memcpy(nv->node_name, val, WWN_SIZE);
4202#endif
4203}
4204
1da177e4
LT
4205/*
4206* NVRAM configuration for ISP 2xxx
4207*
4208* Input:
4209* ha = adapter block pointer.
4210*
4211* Output:
4212* initialization control block in response_ring
4213* host adapters parameters in host adapter block
4214*
4215* Returns:
4216* 0 = success.
4217*/
abbd8870 4218int
e315cd28 4219qla2x00_nvram_config(scsi_qla_host_t *vha)
1da177e4 4220{
4e08df3f 4221 int rval;
0107109e
AV
4222 uint8_t chksum = 0;
4223 uint16_t cnt;
4224 uint8_t *dptr1, *dptr2;
e315cd28 4225 struct qla_hw_data *ha = vha->hw;
0107109e 4226 init_cb_t *icb = ha->init_cb;
281afe19
SJ
4227 nvram_t *nv = ha->nvram;
4228 uint8_t *ptr = ha->nvram;
3d71644c 4229 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 4230
4e08df3f
DM
4231 rval = QLA_SUCCESS;
4232
1da177e4 4233 /* Determine NVRAM starting address. */
0107109e 4234 ha->nvram_size = sizeof(nvram_t);
1da177e4
LT
4235 ha->nvram_base = 0;
4236 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
4237 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
4238 ha->nvram_base = 0x80;
4239
4240 /* Get NVRAM data and calculate checksum. */
e315cd28 4241 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
0107109e
AV
4242 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
4243 chksum += *ptr++;
1da177e4 4244
7c3df132
SK
4245 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
4246 "Contents of NVRAM.\n");
4247 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
4248 (uint8_t *)nv, ha->nvram_size);
1da177e4
LT
4249
4250 /* Bad NVRAM data, set defaults parameters. */
4251 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
4252 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
4253 /* Reset NVRAM data. */
7c3df132 4254 ql_log(ql_log_warn, vha, 0x0064,
9e336520 4255 "Inconsistent NVRAM "
7c3df132
SK
4256 "detected: checksum=0x%x id=%c version=0x%x.\n",
4257 chksum, nv->id[0], nv->nvram_version);
4258 ql_log(ql_log_warn, vha, 0x0065,
4259 "Falling back to "
4260 "functioning (yet invalid -- WWPN) defaults.\n");
4e08df3f
DM
4261
4262 /*
4263 * Set default initialization control block.
4264 */
4265 memset(nv, 0, ha->nvram_size);
4266 nv->parameter_block_version = ICB_VERSION;
4267
4268 if (IS_QLA23XX(ha)) {
4269 nv->firmware_options[0] = BIT_2 | BIT_1;
4270 nv->firmware_options[1] = BIT_7 | BIT_5;
4271 nv->add_firmware_options[0] = BIT_5;
4272 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 4273 nv->frame_payload_size = 2048;
4e08df3f
DM
4274 nv->special_options[1] = BIT_7;
4275 } else if (IS_QLA2200(ha)) {
4276 nv->firmware_options[0] = BIT_2 | BIT_1;
4277 nv->firmware_options[1] = BIT_7 | BIT_5;
4278 nv->add_firmware_options[0] = BIT_5;
4279 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 4280 nv->frame_payload_size = 1024;
4e08df3f
DM
4281 } else if (IS_QLA2100(ha)) {
4282 nv->firmware_options[0] = BIT_3 | BIT_1;
4283 nv->firmware_options[1] = BIT_5;
98aee70d 4284 nv->frame_payload_size = 1024;
4e08df3f
DM
4285 }
4286
ad950360
BVA
4287 nv->max_iocb_allocation = cpu_to_le16(256);
4288 nv->execution_throttle = cpu_to_le16(16);
4e08df3f
DM
4289 nv->retry_count = 8;
4290 nv->retry_delay = 1;
4291
4292 nv->port_name[0] = 33;
4293 nv->port_name[3] = 224;
4294 nv->port_name[4] = 139;
4295
e315cd28 4296 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4e08df3f
DM
4297
4298 nv->login_timeout = 4;
4299
4300 /*
4301 * Set default host adapter parameters
4302 */
4303 nv->host_p[1] = BIT_2;
4304 nv->reset_delay = 5;
4305 nv->port_down_retry_count = 8;
ad950360 4306 nv->max_luns_per_target = cpu_to_le16(8);
4e08df3f
DM
4307 nv->link_down_timeout = 60;
4308
4309 rval = 1;
1da177e4
LT
4310 }
4311
4312#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
4313 /*
4314 * The SN2 does not provide BIOS emulation which means you can't change
4315 * potentially bogus BIOS settings. Force the use of default settings
4316 * for link rate and frame size. Hope that the rest of the settings
4317 * are valid.
4318 */
4319 if (ia64_platform_is("sn2")) {
98aee70d 4320 nv->frame_payload_size = 2048;
1da177e4
LT
4321 if (IS_QLA23XX(ha))
4322 nv->special_options[1] = BIT_7;
4323 }
4324#endif
4325
4326 /* Reset Initialization control block */
0107109e 4327 memset(icb, 0, ha->init_cb_size);
1da177e4
LT
4328
4329 /*
4330 * Setup driver NVRAM options.
4331 */
4332 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4333 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4334 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4335 nv->firmware_options[1] &= ~BIT_4;
4336
4337 if (IS_QLA23XX(ha)) {
4338 nv->firmware_options[0] |= BIT_2;
4339 nv->firmware_options[0] &= ~BIT_3;
2d70c103 4340 nv->special_options[0] &= ~BIT_6;
0107109e 4341 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
1da177e4
LT
4342
4343 if (IS_QLA2300(ha)) {
4344 if (ha->fb_rev == FPM_2310) {
4345 strcpy(ha->model_number, "QLA2310");
4346 } else {
4347 strcpy(ha->model_number, "QLA2300");
4348 }
4349 } else {
e315cd28 4350 qla2x00_set_model_info(vha, nv->model_number,
9bb9fcf2 4351 sizeof(nv->model_number), "QLA23xx");
1da177e4
LT
4352 }
4353 } else if (IS_QLA2200(ha)) {
4354 nv->firmware_options[0] |= BIT_2;
4355 /*
4356 * 'Point-to-point preferred, else loop' is not a safe
4357 * connection mode setting.
4358 */
4359 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
4360 (BIT_5 | BIT_4)) {
4361 /* Force 'loop preferred, else point-to-point'. */
4362 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
4363 nv->add_firmware_options[0] |= BIT_5;
4364 }
4365 strcpy(ha->model_number, "QLA22xx");
4366 } else /*if (IS_QLA2100(ha))*/ {
4367 strcpy(ha->model_number, "QLA2100");
4368 }
4369
4370 /*
4371 * Copy over NVRAM RISC parameter block to initialization control block.
4372 */
4373 dptr1 = (uint8_t *)icb;
4374 dptr2 = (uint8_t *)&nv->parameter_block_version;
4375 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
4376 while (cnt--)
4377 *dptr1++ = *dptr2++;
4378
4379 /* Copy 2nd half. */
4380 dptr1 = (uint8_t *)icb->add_firmware_options;
4381 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
4382 while (cnt--)
4383 *dptr1++ = *dptr2++;
4384
5341e868
AV
4385 /* Use alternate WWN? */
4386 if (nv->host_p[1] & BIT_7) {
4387 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4388 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4389 }
4390
1da177e4
LT
4391 /* Prepare nodename */
4392 if ((icb->firmware_options[1] & BIT_6) == 0) {
4393 /*
4394 * Firmware will apply the following mask if the nodename was
4395 * not provided.
4396 */
4397 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4398 icb->node_name[0] &= 0xF0;
4399 }
4400
4401 /*
4402 * Set host adapter parameters.
4403 */
3ce8866c
SK
4404
4405 /*
4406 * BIT_7 in the host-parameters section allows for modification to
4407 * internal driver logging.
4408 */
0181944f 4409 if (nv->host_p[0] & BIT_7)
cfb0919c 4410 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
1da177e4
LT
4411 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
4412 /* Always load RISC code on non ISP2[12]00 chips. */
4413 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
4414 ha->flags.disable_risc_code_load = 0;
4415 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
4416 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
4417 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
06c22bd1 4418 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
d4c760c2 4419 ha->flags.disable_serdes = 0;
1da177e4
LT
4420
4421 ha->operating_mode =
4422 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
4423
4424 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
4425 sizeof(ha->fw_seriallink_options));
4426
4427 /* save HBA serial number */
4428 ha->serial0 = icb->port_name[5];
4429 ha->serial1 = icb->port_name[6];
4430 ha->serial2 = icb->port_name[7];
e315cd28
AC
4431 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4432 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1da177e4 4433
ad950360 4434 icb->execution_throttle = cpu_to_le16(0xFFFF);
1da177e4
LT
4435
4436 ha->retry_count = nv->retry_count;
4437
4438 /* Set minimum login_timeout to 4 seconds. */
5b91490e 4439 if (nv->login_timeout != ql2xlogintimeout)
1da177e4
LT
4440 nv->login_timeout = ql2xlogintimeout;
4441 if (nv->login_timeout < 4)
4442 nv->login_timeout = 4;
4443 ha->login_timeout = nv->login_timeout;
1da177e4 4444
00a537b8
AV
4445 /* Set minimum RATOV to 100 tenths of a second. */
4446 ha->r_a_tov = 100;
1da177e4 4447
1da177e4
LT
4448 ha->loop_reset_delay = nv->reset_delay;
4449
1da177e4
LT
4450 /* Link Down Timeout = 0:
4451 *
4452 * When Port Down timer expires we will start returning
4453 * I/O's to OS with "DID_NO_CONNECT".
4454 *
4455 * Link Down Timeout != 0:
4456 *
4457 * The driver waits for the link to come up after link down
4458 * before returning I/Os to OS with "DID_NO_CONNECT".
fa2a1ce5 4459 */
1da177e4
LT
4460 if (nv->link_down_timeout == 0) {
4461 ha->loop_down_abort_time =
354d6b21 4462 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
1da177e4
LT
4463 } else {
4464 ha->link_down_timeout = nv->link_down_timeout;
4465 ha->loop_down_abort_time =
4466 (LOOP_DOWN_TIME - ha->link_down_timeout);
fa2a1ce5 4467 }
1da177e4 4468
1da177e4
LT
4469 /*
4470 * Need enough time to try and get the port back.
4471 */
4472 ha->port_down_retry_count = nv->port_down_retry_count;
4473 if (qlport_down_retry)
4474 ha->port_down_retry_count = qlport_down_retry;
4475 /* Set login_retry_count */
4476 ha->login_retry_count = nv->retry_count;
4477 if (ha->port_down_retry_count == nv->port_down_retry_count &&
4478 ha->port_down_retry_count > 3)
4479 ha->login_retry_count = ha->port_down_retry_count;
4480 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4481 ha->login_retry_count = ha->port_down_retry_count;
4482 if (ql2xloginretrycount)
4483 ha->login_retry_count = ql2xloginretrycount;
4484
ad950360 4485 icb->lun_enables = cpu_to_le16(0);
1da177e4
LT
4486 icb->command_resource_count = 0;
4487 icb->immediate_notify_resource_count = 0;
ad950360 4488 icb->timeout = cpu_to_le16(0);
1da177e4
LT
4489
4490 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4491 /* Enable RIO */
4492 icb->firmware_options[0] &= ~BIT_3;
4493 icb->add_firmware_options[0] &=
4494 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4495 icb->add_firmware_options[0] |= BIT_2;
4496 icb->response_accumulation_timer = 3;
4497 icb->interrupt_delay_timer = 5;
4498
e315cd28 4499 vha->flags.process_response_queue = 1;
1da177e4 4500 } else {
4fdfefe5 4501 /* Enable ZIO. */
e315cd28 4502 if (!vha->flags.init_done) {
4fdfefe5
AV
4503 ha->zio_mode = icb->add_firmware_options[0] &
4504 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4505 ha->zio_timer = icb->interrupt_delay_timer ?
4506 icb->interrupt_delay_timer: 2;
4507 }
1da177e4
LT
4508 icb->add_firmware_options[0] &=
4509 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
e315cd28 4510 vha->flags.process_response_queue = 0;
4fdfefe5 4511 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d 4512 ha->zio_mode = QLA_ZIO_MODE_6;
4513
7c3df132 4514 ql_log(ql_log_info, vha, 0x0068,
4fdfefe5
AV
4515 "ZIO mode %d enabled; timer delay (%d us).\n",
4516 ha->zio_mode, ha->zio_timer * 100);
1da177e4 4517
4fdfefe5
AV
4518 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
4519 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
e315cd28 4520 vha->flags.process_response_queue = 1;
1da177e4
LT
4521 }
4522 }
4523
4e08df3f 4524 if (rval) {
7c3df132
SK
4525 ql_log(ql_log_warn, vha, 0x0069,
4526 "NVRAM configuration failed.\n");
4e08df3f
DM
4527 }
4528 return (rval);
1da177e4
LT
4529}
4530
19a7b4ae
JSEC
4531static void
4532qla2x00_rport_del(void *data)
4533{
4534 fc_port_t *fcport = data;
d97994dc 4535 struct fc_rport *rport;
044d78e1 4536 unsigned long flags;
d97994dc 4537
044d78e1 4538 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
ac280b67 4539 rport = fcport->drport ? fcport->drport: fcport->rport;
d97994dc 4540 fcport->drport = NULL;
044d78e1 4541 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
726b8548 4542 if (rport) {
83548fe2
QT
4543 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4544 "%s %8phN. rport %p roles %x\n",
4545 __func__, fcport->port_name, rport,
4546 rport->roles);
726b8548 4547
d97994dc 4548 fc_remote_port_delete(rport);
726b8548 4549 }
19a7b4ae
JSEC
4550}
4551
1da177e4
LT
4552/**
4553 * qla2x00_alloc_fcport() - Allocate a generic fcport.
2db6228d 4554 * @vha: HA context
1da177e4
LT
4555 * @flags: allocation flags
4556 *
4557 * Returns a pointer to the allocated fcport, or NULL, if none available.
4558 */
9a069e19 4559fc_port_t *
e315cd28 4560qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1da177e4
LT
4561{
4562 fc_port_t *fcport;
4563
bbfbbbc1
MK
4564 fcport = kzalloc(sizeof(fc_port_t), flags);
4565 if (!fcport)
4566 return NULL;
1da177e4
LT
4567
4568 /* Setup fcport template structure. */
e315cd28 4569 fcport->vha = vha;
1da177e4
LT
4570 fcport->port_type = FCT_UNKNOWN;
4571 fcport->loop_id = FC_NO_LOOP_ID;
ec426e10 4572 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
ad3e0eda 4573 fcport->supported_classes = FC_COS_UNSPECIFIED;
1da177e4 4574
726b8548
QT
4575 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4576 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
6cb3216a 4577 flags);
726b8548
QT
4578 fcport->disc_state = DSC_DELETED;
4579 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4580 fcport->deleted = QLA_SESS_DELETED;
4581 fcport->login_retry = vha->hw->login_retry_count;
726b8548
QT
4582 fcport->logout_on_delete = 1;
4583
4584 if (!fcport->ct_desc.ct_sns) {
83548fe2 4585 ql_log(ql_log_warn, vha, 0xd049,
726b8548
QT
4586 "Failed to allocate ct_sns request.\n");
4587 kfree(fcport);
4588 fcport = NULL;
4589 }
4590 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
4591 INIT_LIST_HEAD(&fcport->gnl_entry);
4592 INIT_LIST_HEAD(&fcport->list);
4593
bbfbbbc1 4594 return fcport;
1da177e4
LT
4595}
4596
726b8548
QT
4597void
4598qla2x00_free_fcport(fc_port_t *fcport)
4599{
4600 if (fcport->ct_desc.ct_sns) {
4601 dma_free_coherent(&fcport->vha->hw->pdev->dev,
4602 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
4603 fcport->ct_desc.ct_sns_dma);
4604
4605 fcport->ct_desc.ct_sns = NULL;
4606 }
4607 kfree(fcport);
4608}
4609
1da177e4
LT
4610/*
4611 * qla2x00_configure_loop
4612 * Updates Fibre Channel Device Database with what is actually on loop.
4613 *
4614 * Input:
4615 * ha = adapter block pointer.
4616 *
4617 * Returns:
4618 * 0 = success.
4619 * 1 = error.
4620 * 2 = database was full and device was not configured.
4621 */
4622static int
e315cd28 4623qla2x00_configure_loop(scsi_qla_host_t *vha)
1da177e4
LT
4624{
4625 int rval;
4626 unsigned long flags, save_flags;
e315cd28 4627 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
4628 rval = QLA_SUCCESS;
4629
4630 /* Get Initiator ID */
e315cd28
AC
4631 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
4632 rval = qla2x00_configure_hba(vha);
1da177e4 4633 if (rval != QLA_SUCCESS) {
7c3df132
SK
4634 ql_dbg(ql_dbg_disc, vha, 0x2013,
4635 "Unable to configure HBA.\n");
1da177e4
LT
4636 return (rval);
4637 }
4638 }
4639
e315cd28 4640 save_flags = flags = vha->dpc_flags;
7c3df132
SK
4641 ql_dbg(ql_dbg_disc, vha, 0x2014,
4642 "Configure loop -- dpc flags = 0x%lx.\n", flags);
1da177e4
LT
4643
4644 /*
4645 * If we have both an RSCN and PORT UPDATE pending then handle them
4646 * both at the same time.
4647 */
e315cd28
AC
4648 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4649 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1da177e4 4650
3064ff39
MH
4651 qla2x00_get_data_rate(vha);
4652
1da177e4
LT
4653 /* Determine what we need to do */
4654 if (ha->current_topology == ISP_CFG_FL &&
4655 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4656
1da177e4
LT
4657 set_bit(RSCN_UPDATE, &flags);
4658
4659 } else if (ha->current_topology == ISP_CFG_F &&
4660 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4661
1da177e4
LT
4662 set_bit(RSCN_UPDATE, &flags);
4663 clear_bit(LOCAL_LOOP_UPDATE, &flags);
21333b48
AV
4664
4665 } else if (ha->current_topology == ISP_CFG_N) {
4666 clear_bit(RSCN_UPDATE, &flags);
48acad09
QT
4667 if (qla_tgt_mode_enabled(vha)) {
4668 /* allow the other side to start the login */
9cd883f0
QT
4669 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4670 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
9cd883f0 4671 }
41dc529a
QT
4672 } else if (ha->current_topology == ISP_CFG_NL) {
4673 clear_bit(RSCN_UPDATE, &flags);
4674 set_bit(LOCAL_LOOP_UPDATE, &flags);
e315cd28 4675 } else if (!vha->flags.online ||
1da177e4 4676 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
1da177e4
LT
4677 set_bit(RSCN_UPDATE, &flags);
4678 set_bit(LOCAL_LOOP_UPDATE, &flags);
4679 }
4680
4681 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
7c3df132
SK
4682 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4683 ql_dbg(ql_dbg_disc, vha, 0x2015,
4684 "Loop resync needed, failing.\n");
1da177e4 4685 rval = QLA_FUNCTION_FAILED;
642ef983 4686 } else
e315cd28 4687 rval = qla2x00_configure_local_loop(vha);
1da177e4
LT
4688 }
4689
4690 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
7c3df132 4691 if (LOOP_TRANSITION(vha)) {
83548fe2 4692 ql_dbg(ql_dbg_disc, vha, 0x2099,
7c3df132 4693 "Needs RSCN update and loop transition.\n");
1da177e4 4694 rval = QLA_FUNCTION_FAILED;
7c3df132 4695 }
e315cd28
AC
4696 else
4697 rval = qla2x00_configure_fabric(vha);
1da177e4
LT
4698 }
4699
4700 if (rval == QLA_SUCCESS) {
e315cd28
AC
4701 if (atomic_read(&vha->loop_down_timer) ||
4702 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4
LT
4703 rval = QLA_FUNCTION_FAILED;
4704 } else {
e315cd28 4705 atomic_set(&vha->loop_state, LOOP_READY);
7c3df132
SK
4706 ql_dbg(ql_dbg_disc, vha, 0x2069,
4707 "LOOP READY.\n");
ec7193e2 4708 ha->flags.fw_init_done = 1;
3bb67df5
DKU
4709
4710 /*
4711 * Process any ATIO queue entries that came in
4712 * while we weren't online.
4713 */
ead03855
QT
4714 if (qla_tgt_mode_enabled(vha) ||
4715 qla_dual_mode_enabled(vha)) {
3bb67df5
DKU
4716 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
4717 spin_lock_irqsave(&ha->tgt.atio_lock,
4718 flags);
4719 qlt_24xx_process_atio_queue(vha, 0);
4720 spin_unlock_irqrestore(
4721 &ha->tgt.atio_lock, flags);
4722 } else {
4723 spin_lock_irqsave(&ha->hardware_lock,
4724 flags);
4725 qlt_24xx_process_atio_queue(vha, 1);
4726 spin_unlock_irqrestore(
4727 &ha->hardware_lock, flags);
4728 }
4729 }
1da177e4
LT
4730 }
4731 }
4732
4733 if (rval) {
7c3df132
SK
4734 ql_dbg(ql_dbg_disc, vha, 0x206a,
4735 "%s *** FAILED ***.\n", __func__);
1da177e4 4736 } else {
7c3df132
SK
4737 ql_dbg(ql_dbg_disc, vha, 0x206b,
4738 "%s: exiting normally.\n", __func__);
1da177e4
LT
4739 }
4740
cc3ef7bc 4741 /* Restore state if a resync event occurred during processing */
e315cd28 4742 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4 4743 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
e315cd28 4744 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
f4658b6c 4745 if (test_bit(RSCN_UPDATE, &save_flags)) {
e315cd28 4746 set_bit(RSCN_UPDATE, &vha->dpc_flags);
f4658b6c 4747 }
1da177e4
LT
4748 }
4749
4750 return (rval);
4751}
4752
1da177e4
LT
4753/*
4754 * qla2x00_configure_local_loop
4755 * Updates Fibre Channel Device Database with local loop devices.
4756 *
4757 * Input:
4758 * ha = adapter block pointer.
4759 *
4760 * Returns:
4761 * 0 = success.
4762 */
4763static int
e315cd28 4764qla2x00_configure_local_loop(scsi_qla_host_t *vha)
1da177e4
LT
4765{
4766 int rval, rval2;
4767 int found_devs;
4768 int found;
4769 fc_port_t *fcport, *new_fcport;
4770
4771 uint16_t index;
4772 uint16_t entries;
4773 char *id_iter;
4774 uint16_t loop_id;
4775 uint8_t domain, area, al_pa;
e315cd28 4776 struct qla_hw_data *ha = vha->hw;
41dc529a 4777 unsigned long flags;
1da177e4
LT
4778
4779 found_devs = 0;
4780 new_fcport = NULL;
642ef983 4781 entries = MAX_FIBRE_DEVICES_LOOP;
1da177e4 4782
1da177e4 4783 /* Get list of logged in devices. */
642ef983 4784 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
e315cd28 4785 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
1da177e4
LT
4786 &entries);
4787 if (rval != QLA_SUCCESS)
4788 goto cleanup_allocation;
4789
83548fe2 4790 ql_dbg(ql_dbg_disc, vha, 0x2011,
7c3df132
SK
4791 "Entries in ID list (%d).\n", entries);
4792 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
4793 (uint8_t *)ha->gid_list,
4794 entries * sizeof(struct gid_list_info));
1da177e4 4795
9cd883f0
QT
4796 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4797 fcport->scan_state = QLA_FCPORT_SCAN;
4798 }
4799
1da177e4 4800 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 4801 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4802 if (new_fcport == NULL) {
83548fe2 4803 ql_log(ql_log_warn, vha, 0x2012,
7c3df132 4804 "Memory allocation failed for fcport.\n");
1da177e4
LT
4805 rval = QLA_MEMORY_ALLOC_FAILED;
4806 goto cleanup_allocation;
4807 }
4808 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4809
1da177e4
LT
4810 /* Add devices to port list. */
4811 id_iter = (char *)ha->gid_list;
4812 for (index = 0; index < entries; index++) {
4813 domain = ((struct gid_list_info *)id_iter)->domain;
4814 area = ((struct gid_list_info *)id_iter)->area;
4815 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
abbd8870 4816 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1da177e4
LT
4817 loop_id = (uint16_t)
4818 ((struct gid_list_info *)id_iter)->loop_id_2100;
abbd8870 4819 else
1da177e4
LT
4820 loop_id = le16_to_cpu(
4821 ((struct gid_list_info *)id_iter)->loop_id);
abbd8870 4822 id_iter += ha->gid_list_info_size;
1da177e4
LT
4823
4824 /* Bypass reserved domain fields. */
4825 if ((domain & 0xf0) == 0xf0)
4826 continue;
4827
4828 /* Bypass if not same domain and area of adapter. */
f7d289f6 4829 if (area && domain &&
e315cd28 4830 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
1da177e4
LT
4831 continue;
4832
4833 /* Bypass invalid local loop ID. */
4834 if (loop_id > LAST_LOCAL_LOOP_ID)
4835 continue;
4836
41dc529a 4837 memset(new_fcport->port_name, 0, WWN_SIZE);
370d550e 4838
1da177e4
LT
4839 /* Fill in member data. */
4840 new_fcport->d_id.b.domain = domain;
4841 new_fcport->d_id.b.area = area;
4842 new_fcport->d_id.b.al_pa = al_pa;
4843 new_fcport->loop_id = loop_id;
9cd883f0 4844 new_fcport->scan_state = QLA_FCPORT_FOUND;
41dc529a 4845
e315cd28 4846 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
1da177e4 4847 if (rval2 != QLA_SUCCESS) {
83548fe2 4848 ql_dbg(ql_dbg_disc, vha, 0x2097,
7c3df132
SK
4849 "Failed to retrieve fcport information "
4850 "-- get_port_database=%x, loop_id=0x%04x.\n",
4851 rval2, new_fcport->loop_id);
edd05de1
DG
4852 /* Skip retry if N2N */
4853 if (ha->current_topology != ISP_CFG_N) {
4854 ql_dbg(ql_dbg_disc, vha, 0x2105,
4855 "Scheduling resync.\n");
4856 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4857 continue;
4858 }
1da177e4
LT
4859 }
4860
41dc529a 4861 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1da177e4
LT
4862 /* Check for matching device in port list. */
4863 found = 0;
4864 fcport = NULL;
e315cd28 4865 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
4866 if (memcmp(new_fcport->port_name, fcport->port_name,
4867 WWN_SIZE))
4868 continue;
4869
ddb9b126 4870 fcport->flags &= ~FCF_FABRIC_DEVICE;
1da177e4
LT
4871 fcport->loop_id = new_fcport->loop_id;
4872 fcport->port_type = new_fcport->port_type;
4873 fcport->d_id.b24 = new_fcport->d_id.b24;
4874 memcpy(fcport->node_name, new_fcport->node_name,
4875 WWN_SIZE);
9cd883f0 4876 fcport->scan_state = QLA_FCPORT_FOUND;
1da177e4
LT
4877 found++;
4878 break;
4879 }
4880
4881 if (!found) {
4882 /* New device, add to fcports list. */
e315cd28 4883 list_add_tail(&new_fcport->list, &vha->vp_fcports);
1da177e4
LT
4884
4885 /* Allocate a new replacement fcport. */
4886 fcport = new_fcport;
41dc529a
QT
4887
4888 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4889
e315cd28 4890 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
41dc529a 4891
1da177e4 4892 if (new_fcport == NULL) {
83548fe2 4893 ql_log(ql_log_warn, vha, 0xd031,
7c3df132 4894 "Failed to allocate memory for fcport.\n");
1da177e4
LT
4895 rval = QLA_MEMORY_ALLOC_FAILED;
4896 goto cleanup_allocation;
4897 }
41dc529a 4898 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1da177e4
LT
4899 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4900 }
4901
41dc529a
QT
4902 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4903
d8b45213 4904 /* Base iIDMA settings on HBA port speed. */
a3cbdfad 4905 fcport->fp_speed = ha->link_data_rate;
d8b45213 4906
1da177e4
LT
4907 found_devs++;
4908 }
4909
9cd883f0
QT
4910 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4911 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4912 break;
4913
4914 if (fcport->scan_state == QLA_FCPORT_SCAN) {
4915 if ((qla_dual_mode_enabled(vha) ||
4916 qla_ini_mode_enabled(vha)) &&
4917 atomic_read(&fcport->state) == FCS_ONLINE) {
4918 qla2x00_mark_device_lost(vha, fcport,
4919 ql2xplogiabsentdevice, 0);
4920 if (fcport->loop_id != FC_NO_LOOP_ID &&
4921 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
4922 fcport->port_type != FCT_INITIATOR &&
4923 fcport->port_type != FCT_BROADCAST) {
4924 ql_dbg(ql_dbg_disc, vha, 0x20f0,
4925 "%s %d %8phC post del sess\n",
4926 __func__, __LINE__,
4927 fcport->port_name);
4928
d8630bb9 4929 qlt_schedule_sess_for_deletion(fcport);
9cd883f0
QT
4930 continue;
4931 }
4932 }
4933 }
4934
4935 if (fcport->scan_state == QLA_FCPORT_FOUND)
4936 qla24xx_fcport_handle_login(vha, fcport);
4937 }
4938
1da177e4 4939cleanup_allocation:
c9475cb0 4940 kfree(new_fcport);
1da177e4
LT
4941
4942 if (rval != QLA_SUCCESS) {
83548fe2 4943 ql_dbg(ql_dbg_disc, vha, 0x2098,
7c3df132 4944 "Configure local loop error exit: rval=%x.\n", rval);
1da177e4
LT
4945 }
4946
1da177e4
LT
4947 return (rval);
4948}
4949
d8b45213 4950static void
e315cd28 4951qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
d8b45213 4952{
d8b45213 4953 int rval;
93f2bd67 4954 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 4955 struct qla_hw_data *ha = vha->hw;
d8b45213 4956
c76f2c01 4957 if (!IS_IIDMA_CAPABLE(ha))
d8b45213
AV
4958 return;
4959
c9afb9a2
GM
4960 if (atomic_read(&fcport->state) != FCS_ONLINE)
4961 return;
4962
39bd9622 4963 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
413c2f33
HM
4964 fcport->fp_speed > ha->link_data_rate ||
4965 !ha->flags.gpsc_supported)
d8b45213
AV
4966 return;
4967
e315cd28 4968 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
a3cbdfad 4969 mb);
d8b45213 4970 if (rval != QLA_SUCCESS) {
7c3df132 4971 ql_dbg(ql_dbg_disc, vha, 0x2004,
7b833558
OK
4972 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
4973 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
d8b45213 4974 } else {
7c3df132 4975 ql_dbg(ql_dbg_disc, vha, 0x2005,
33b28357 4976 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
d0297c9a 4977 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
33b28357 4978 fcport->fp_speed, fcport->port_name);
d8b45213
AV
4979 }
4980}
4981
cc28e0ac
QT
4982void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4983{
4984 qla2x00_iidma_fcport(vha, fcport);
4985 qla24xx_update_fcport_fcp_prio(vha, fcport);
4986}
4987
4988int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4989{
4990 struct qla_work_evt *e;
4991
4992 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
4993 if (!e)
4994 return QLA_FUNCTION_FAILED;
4995
4996 e->u.fcport.fcport = fcport;
4997 return qla2x00_post_work(vha, e);
4998}
4999
726b8548 5000/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
23be331d 5001static void
e315cd28 5002qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
8482e118 5003{
5004 struct fc_rport_identifiers rport_ids;
bdf79621 5005 struct fc_rport *rport;
044d78e1 5006 unsigned long flags;
8482e118 5007
b63d8b89
QT
5008 if (atomic_read(&fcport->state) == FCS_ONLINE)
5009 return;
5010
f8b02a85
AV
5011 rport_ids.node_name = wwn_to_u64(fcport->node_name);
5012 rport_ids.port_name = wwn_to_u64(fcport->port_name);
8482e118 5013 rport_ids.port_id = fcport->d_id.b.domain << 16 |
5014 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
77d74143 5015 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
e315cd28 5016 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
77d74143 5017 if (!rport) {
7c3df132
SK
5018 ql_log(ql_log_warn, vha, 0x2006,
5019 "Unable to allocate fc remote port.\n");
77d74143
AV
5020 return;
5021 }
2d70c103 5022
044d78e1 5023 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
19a7b4ae 5024 *((fc_port_t **)rport->dd_data) = fcport;
044d78e1 5025 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
d97994dc 5026
ad3e0eda 5027 rport->supported_classes = fcport->supported_classes;
77d74143 5028
8482e118 5029 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
5030 if (fcport->port_type == FCT_INITIATOR)
5031 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
5032 if (fcport->port_type == FCT_TARGET)
5033 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
726b8548 5034
83548fe2
QT
5035 ql_dbg(ql_dbg_disc, vha, 0x20ee,
5036 "%s %8phN. rport %p is %s mode\n",
5037 __func__, fcport->port_name, rport,
5038 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
726b8548 5039
77d74143 5040 fc_remote_port_rolechg(rport, rport_ids.roles);
1da177e4
LT
5041}
5042
23be331d
AB
5043/*
5044 * qla2x00_update_fcport
5045 * Updates device on list.
5046 *
5047 * Input:
5048 * ha = adapter block pointer.
5049 * fcport = port structure pointer.
5050 *
5051 * Return:
5052 * 0 - Success
5053 * BIT_0 - error
5054 *
5055 * Context:
5056 * Kernel context.
5057 */
5058void
e315cd28 5059qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
23be331d 5060{
e315cd28 5061 fcport->vha = vha;
8ae6d9c7 5062
726b8548
QT
5063 if (IS_SW_RESV_ADDR(fcport->d_id))
5064 return;
5065
b63d8b89
QT
5066 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5067 fcport->disc_state = DSC_LOGIN_COMPLETE;
5068 fcport->deleted = 0;
5069 fcport->logout_on_delete = 1;
5070 fcport->login_retry = vha->hw->login_retry_count;
23be331d 5071
dbe18018
DT
5072 qla2x00_iidma_fcport(vha, fcport);
5073
e84067d7
DG
5074 if (fcport->fc4f_nvme) {
5075 qla_nvme_register_remote(vha, fcport);
b63d8b89
QT
5076 fcport->disc_state = DSC_LOGIN_COMPLETE;
5077 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
e84067d7
DG
5078 return;
5079 }
5080
21090cbe 5081 qla24xx_update_fcport_fcp_prio(vha, fcport);
d20ed91b 5082
726b8548
QT
5083 switch (vha->host->active_mode) {
5084 case MODE_INITIATOR:
d20ed91b 5085 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
5086 break;
5087 case MODE_TARGET:
5088 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5089 !vha->vha_tgt.qla_tgt->tgt_stopped)
5090 qlt_fc_port_added(vha, fcport);
5091 break;
5092 case MODE_DUAL:
d20ed91b 5093 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
5094 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5095 !vha->vha_tgt.qla_tgt->tgt_stopped)
5096 qlt_fc_port_added(vha, fcport);
5097 break;
5098 default:
5099 break;
d20ed91b 5100 }
cc28e0ac
QT
5101
5102 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5103 if (fcport->id_changed) {
5104 fcport->id_changed = 0;
5105 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5106 "%s %d %8phC post gfpnid fcp_cnt %d\n",
5107 __func__, __LINE__, fcport->port_name,
5108 vha->fcport_count);
5109 qla24xx_post_gfpnid_work(vha, fcport);
5110 } else {
5111 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5112 "%s %d %8phC post gpsc fcp_cnt %d\n",
5113 __func__, __LINE__, fcport->port_name,
5114 vha->fcport_count);
5115 qla24xx_post_gpsc_work(vha, fcport);
5116 }
5117 }
b63d8b89 5118 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
23be331d
AB
5119}
5120
1da177e4
LT
5121/*
5122 * qla2x00_configure_fabric
5123 * Setup SNS devices with loop ID's.
5124 *
5125 * Input:
5126 * ha = adapter block pointer.
5127 *
5128 * Returns:
5129 * 0 = success.
5130 * BIT_0 = error
5131 */
5132static int
e315cd28 5133qla2x00_configure_fabric(scsi_qla_host_t *vha)
1da177e4 5134{
b3b02e6e 5135 int rval;
726b8548 5136 fc_port_t *fcport;
1da177e4 5137 uint16_t mb[MAILBOX_REGISTER_COUNT];
0107109e 5138 uint16_t loop_id;
1da177e4 5139 LIST_HEAD(new_fcports);
e315cd28 5140 struct qla_hw_data *ha = vha->hw;
df673274 5141 int discovery_gen;
1da177e4
LT
5142
5143 /* If FL port exists, then SNS is present */
e428924c 5144 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
5145 loop_id = NPH_F_PORT;
5146 else
5147 loop_id = SNS_FL_PORT;
e315cd28 5148 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
1da177e4 5149 if (rval != QLA_SUCCESS) {
83548fe2 5150 ql_dbg(ql_dbg_disc, vha, 0x20a0,
7c3df132 5151 "MBX_GET_PORT_NAME failed, No FL Port.\n");
1da177e4 5152
e315cd28 5153 vha->device_flags &= ~SWITCH_FOUND;
1da177e4
LT
5154 return (QLA_SUCCESS);
5155 }
e315cd28 5156 vha->device_flags |= SWITCH_FOUND;
1da177e4 5157
41dc529a
QT
5158
5159 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
5160 rval = qla2x00_send_change_request(vha, 0x3, 0);
5161 if (rval != QLA_SUCCESS)
5162 ql_log(ql_log_warn, vha, 0x121,
5163 "Failed to enable receiving of RSCN requests: 0x%x.\n",
5164 rval);
5165 }
5166
5167
1da177e4 5168 do {
726b8548
QT
5169 qla2x00_mgmt_svr_login(vha);
5170
cca5335c
AV
5171 /* FDMI support. */
5172 if (ql2xfdmienable &&
e315cd28
AC
5173 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
5174 qla2x00_fdmi_register(vha);
cca5335c 5175
1da177e4 5176 /* Ensure we are logged into the SNS. */
a14c7711 5177 loop_id = NPH_SNS_LID(ha);
0b91d116
CD
5178 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
5179 0xfc, mb, BIT_1|BIT_0);
a14c7711
JC
5180 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5181 ql_dbg(ql_dbg_disc, vha, 0x20a1,
5182 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
5183 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
0b91d116 5184 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
e452ceb6 5185 return rval;
0b91d116 5186 }
e315cd28
AC
5187 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
5188 if (qla2x00_rft_id(vha)) {
1da177e4 5189 /* EMPTY */
83548fe2 5190 ql_dbg(ql_dbg_disc, vha, 0x20a2,
7c3df132 5191 "Register FC-4 TYPE failed.\n");
b98ae0d7
QT
5192 if (test_bit(LOOP_RESYNC_NEEDED,
5193 &vha->dpc_flags))
5194 break;
1da177e4 5195 }
d3bae931 5196 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
1da177e4 5197 /* EMPTY */
83548fe2 5198 ql_dbg(ql_dbg_disc, vha, 0x209a,
7c3df132 5199 "Register FC-4 Features failed.\n");
b98ae0d7
QT
5200 if (test_bit(LOOP_RESYNC_NEEDED,
5201 &vha->dpc_flags))
5202 break;
1da177e4 5203 }
d3bae931
DG
5204 if (vha->flags.nvme_enabled) {
5205 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
5206 ql_dbg(ql_dbg_disc, vha, 0x2049,
5207 "Register NVME FC Type Features failed.\n");
5208 }
5209 }
e315cd28 5210 if (qla2x00_rnn_id(vha)) {
1da177e4 5211 /* EMPTY */
83548fe2 5212 ql_dbg(ql_dbg_disc, vha, 0x2104,
7c3df132 5213 "Register Node Name failed.\n");
b98ae0d7
QT
5214 if (test_bit(LOOP_RESYNC_NEEDED,
5215 &vha->dpc_flags))
5216 break;
e315cd28 5217 } else if (qla2x00_rsnn_nn(vha)) {
1da177e4 5218 /* EMPTY */
83548fe2 5219 ql_dbg(ql_dbg_disc, vha, 0x209b,
0bf0efa1 5220 "Register Symbolic Node Name failed.\n");
b98ae0d7
QT
5221 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5222 break;
1da177e4
LT
5223 }
5224 }
5225
827210ba 5226
df673274
AP
5227 /* Mark the time right before querying FW for connected ports.
5228 * This process is long, asynchronous and by the time it's done,
5229 * collected information might not be accurate anymore. E.g.
5230 * disconnected port might have re-connected and a brand new
5231 * session has been created. In this case session's generation
5232 * will be newer than discovery_gen. */
5233 qlt_do_generation_tick(vha, &discovery_gen);
5234
a4239945 5235 if (USE_ASYNC_SCAN(ha)) {
33b28357
QT
5236 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
5237 NULL);
a4239945
QT
5238 if (rval)
5239 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5240 } else {
f352eeb7
QT
5241 list_for_each_entry(fcport, &vha->vp_fcports, list)
5242 fcport->scan_state = QLA_FCPORT_SCAN;
5243
a4239945
QT
5244 rval = qla2x00_find_all_fabric_devs(vha);
5245 }
1da177e4
LT
5246 if (rval != QLA_SUCCESS)
5247 break;
1da177e4
LT
5248 } while (0);
5249
e84067d7
DG
5250 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
5251 qla_nvme_register_hba(vha);
5252
726b8548 5253 if (rval)
7c3df132
SK
5254 ql_dbg(ql_dbg_disc, vha, 0x2068,
5255 "Configure fabric error exit rval=%d.\n", rval);
1da177e4
LT
5256
5257 return (rval);
5258}
5259
1da177e4
LT
5260/*
5261 * qla2x00_find_all_fabric_devs
5262 *
5263 * Input:
5264 * ha = adapter block pointer.
5265 * dev = database device entry pointer.
5266 *
5267 * Returns:
5268 * 0 = success.
5269 *
5270 * Context:
5271 * Kernel context.
5272 */
5273static int
726b8548 5274qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
1da177e4
LT
5275{
5276 int rval;
5277 uint16_t loop_id;
726b8548 5278 fc_port_t *fcport, *new_fcport;
1da177e4
LT
5279 int found;
5280
5281 sw_info_t *swl;
5282 int swl_idx;
5283 int first_dev, last_dev;
1516ef44 5284 port_id_t wrap = {}, nxt_d_id;
e315cd28 5285 struct qla_hw_data *ha = vha->hw;
bb4cf5b7 5286 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
726b8548 5287 unsigned long flags;
1da177e4
LT
5288
5289 rval = QLA_SUCCESS;
5290
5291 /* Try GID_PT to get device list, else GAN. */
7a67735b 5292 if (!ha->swl)
642ef983 5293 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
7a67735b
AV
5294 GFP_KERNEL);
5295 swl = ha->swl;
bbfbbbc1 5296 if (!swl) {
1da177e4 5297 /*EMPTY*/
83548fe2 5298 ql_dbg(ql_dbg_disc, vha, 0x209c,
7c3df132 5299 "GID_PT allocations failed, fallback on GA_NXT.\n");
1da177e4 5300 } else {
642ef983 5301 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
e315cd28 5302 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
1da177e4 5303 swl = NULL;
b98ae0d7
QT
5304 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5305 return rval;
e315cd28 5306 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 5307 swl = NULL;
b98ae0d7
QT
5308 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5309 return rval;
e315cd28 5310 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 5311 swl = NULL;
b98ae0d7
QT
5312 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5313 return rval;
726b8548
QT
5314 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
5315 swl = NULL;
b98ae0d7
QT
5316 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5317 return rval;
1da177e4 5318 }
e8c72ba5
CD
5319
5320 /* If other queries succeeded probe for FC-4 type */
b98ae0d7 5321 if (swl) {
e8c72ba5 5322 qla2x00_gff_id(vha, swl);
b98ae0d7
QT
5323 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5324 return rval;
5325 }
1da177e4
LT
5326 }
5327 swl_idx = 0;
5328
5329 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 5330 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 5331 if (new_fcport == NULL) {
83548fe2 5332 ql_log(ql_log_warn, vha, 0x209d,
7c3df132 5333 "Failed to allocate memory for fcport.\n");
1da177e4
LT
5334 return (QLA_MEMORY_ALLOC_FAILED);
5335 }
5336 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
1da177e4
LT
5337 /* Set start port ID scan at adapter ID. */
5338 first_dev = 1;
5339 last_dev = 0;
5340
5341 /* Starting free loop ID. */
e315cd28
AC
5342 loop_id = ha->min_external_loopid;
5343 for (; loop_id <= ha->max_loop_id; loop_id++) {
5344 if (qla2x00_is_reserved_id(vha, loop_id))
1da177e4
LT
5345 continue;
5346
3a6478df
GM
5347 if (ha->current_topology == ISP_CFG_FL &&
5348 (atomic_read(&vha->loop_down_timer) ||
5349 LOOP_TRANSITION(vha))) {
bb2d52b2
AV
5350 atomic_set(&vha->loop_down_timer, 0);
5351 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5352 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4 5353 break;
bb2d52b2 5354 }
1da177e4
LT
5355
5356 if (swl != NULL) {
5357 if (last_dev) {
5358 wrap.b24 = new_fcport->d_id.b24;
5359 } else {
5360 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
5361 memcpy(new_fcport->node_name,
5362 swl[swl_idx].node_name, WWN_SIZE);
5363 memcpy(new_fcport->port_name,
5364 swl[swl_idx].port_name, WWN_SIZE);
d8b45213
AV
5365 memcpy(new_fcport->fabric_port_name,
5366 swl[swl_idx].fabric_port_name, WWN_SIZE);
5367 new_fcport->fp_speed = swl[swl_idx].fp_speed;
e8c72ba5 5368 new_fcport->fc4_type = swl[swl_idx].fc4_type;
1da177e4 5369
a5d42f4c 5370 new_fcport->nvme_flag = 0;
1a28faa0 5371 new_fcport->fc4f_nvme = 0;
a5d42f4c
DG
5372 if (vha->flags.nvme_enabled &&
5373 swl[swl_idx].fc4f_nvme) {
5374 new_fcport->fc4f_nvme =
5375 swl[swl_idx].fc4f_nvme;
5376 ql_log(ql_log_info, vha, 0x2131,
5377 "FOUND: NVME port %8phC as FC Type 28h\n",
5378 new_fcport->port_name);
5379 }
5380
1da177e4
LT
5381 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
5382 last_dev = 1;
5383 }
5384 swl_idx++;
5385 }
5386 } else {
5387 /* Send GA_NXT to the switch */
e315cd28 5388 rval = qla2x00_ga_nxt(vha, new_fcport);
1da177e4 5389 if (rval != QLA_SUCCESS) {
83548fe2 5390 ql_log(ql_log_warn, vha, 0x209e,
7c3df132
SK
5391 "SNS scan failed -- assuming "
5392 "zero-entry result.\n");
1da177e4
LT
5393 rval = QLA_SUCCESS;
5394 break;
5395 }
5396 }
5397
5398 /* If wrap on switch device list, exit. */
5399 if (first_dev) {
5400 wrap.b24 = new_fcport->d_id.b24;
5401 first_dev = 0;
5402 } else if (new_fcport->d_id.b24 == wrap.b24) {
83548fe2 5403 ql_dbg(ql_dbg_disc, vha, 0x209f,
7c3df132
SK
5404 "Device wrap (%02x%02x%02x).\n",
5405 new_fcport->d_id.b.domain,
5406 new_fcport->d_id.b.area,
5407 new_fcport->d_id.b.al_pa);
1da177e4
LT
5408 break;
5409 }
5410
2c3dfe3f 5411 /* Bypass if same physical adapter. */
e315cd28 5412 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
1da177e4
LT
5413 continue;
5414
2c3dfe3f 5415 /* Bypass virtual ports of the same host. */
bb4cf5b7
CD
5416 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
5417 continue;
2c3dfe3f 5418
f7d289f6
AV
5419 /* Bypass if same domain and area of adapter. */
5420 if (((new_fcport->d_id.b24 & 0xffff00) ==
e315cd28 5421 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
f7d289f6
AV
5422 ISP_CFG_FL)
5423 continue;
5424
1da177e4
LT
5425 /* Bypass reserved domain fields. */
5426 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
5427 continue;
5428
e8c72ba5 5429 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
4da26e16
CD
5430 if (ql2xgffidenable &&
5431 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
5432 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
e8c72ba5
CD
5433 continue;
5434
726b8548
QT
5435 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5436
1da177e4
LT
5437 /* Locate matching device in database. */
5438 found = 0;
e315cd28 5439 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
5440 if (memcmp(new_fcport->port_name, fcport->port_name,
5441 WWN_SIZE))
5442 continue;
5443
827210ba 5444 fcport->scan_state = QLA_FCPORT_FOUND;
b3b02e6e 5445
1da177e4
LT
5446 found++;
5447
d8b45213
AV
5448 /* Update port state. */
5449 memcpy(fcport->fabric_port_name,
5450 new_fcport->fabric_port_name, WWN_SIZE);
5451 fcport->fp_speed = new_fcport->fp_speed;
5452
1da177e4 5453 /*
b2032fd5
RD
5454 * If address the same and state FCS_ONLINE
5455 * (or in target mode), nothing changed.
1da177e4
LT
5456 */
5457 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
b2032fd5 5458 (atomic_read(&fcport->state) == FCS_ONLINE ||
726b8548 5459 (vha->host->active_mode == MODE_TARGET))) {
1da177e4
LT
5460 break;
5461 }
5462
5463 /*
5464 * If device was not a fabric device before.
5465 */
5466 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
5467 fcport->d_id.b24 = new_fcport->d_id.b24;
5f16b331 5468 qla2x00_clear_loop_id(fcport);
1da177e4
LT
5469 fcport->flags |= (FCF_FABRIC_DEVICE |
5470 FCF_LOGIN_NEEDED);
1da177e4
LT
5471 break;
5472 }
5473
5474 /*
5475 * Port ID changed or device was marked to be updated;
5476 * Log it out if still logged in and mark it for
5477 * relogin later.
5478 */
726b8548 5479 if (qla_tgt_mode_enabled(base_vha)) {
b2032fd5
RD
5480 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
5481 "port changed FC ID, %8phC"
5482 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
5483 fcport->port_name,
5484 fcport->d_id.b.domain,
5485 fcport->d_id.b.area,
5486 fcport->d_id.b.al_pa,
5487 fcport->loop_id,
5488 new_fcport->d_id.b.domain,
5489 new_fcport->d_id.b.area,
5490 new_fcport->d_id.b.al_pa);
5491 fcport->d_id.b24 = new_fcport->d_id.b24;
5492 break;
5493 }
5494
1da177e4
LT
5495 fcport->d_id.b24 = new_fcport->d_id.b24;
5496 fcport->flags |= FCF_LOGIN_NEEDED;
1da177e4
LT
5497 break;
5498 }
5499
9dd9686b
DT
5500 if (fcport->fc4f_nvme) {
5501 if (fcport->disc_state == DSC_DELETE_PEND) {
5502 fcport->disc_state = DSC_GNL;
5503 vha->fcport_count--;
5504 fcport->login_succ = 0;
5505 }
5506 }
5507
726b8548
QT
5508 if (found) {
5509 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1da177e4 5510 continue;
726b8548 5511 }
1da177e4 5512 /* If device was not in our fcports list, then add it. */
b2032fd5 5513 new_fcport->scan_state = QLA_FCPORT_FOUND;
726b8548
QT
5514 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5515
5516 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5517
1da177e4
LT
5518
5519 /* Allocate a new replacement fcport. */
5520 nxt_d_id.b24 = new_fcport->d_id.b24;
e315cd28 5521 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 5522 if (new_fcport == NULL) {
83548fe2 5523 ql_log(ql_log_warn, vha, 0xd032,
7c3df132 5524 "Memory allocation failed for fcport.\n");
1da177e4
LT
5525 return (QLA_MEMORY_ALLOC_FAILED);
5526 }
5527 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5528 new_fcport->d_id.b24 = nxt_d_id.b24;
5529 }
5530
726b8548
QT
5531 qla2x00_free_fcport(new_fcport);
5532
5533 /*
5534 * Logout all previous fabric dev marked lost, except FCP2 devices.
5535 */
5536 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5537 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5538 break;
5539
5540 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
5541 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
5542 continue;
5543
5544 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5545 if ((qla_dual_mode_enabled(vha) ||
5546 qla_ini_mode_enabled(vha)) &&
5547 atomic_read(&fcport->state) == FCS_ONLINE) {
5548 qla2x00_mark_device_lost(vha, fcport,
5549 ql2xplogiabsentdevice, 0);
5550 if (fcport->loop_id != FC_NO_LOOP_ID &&
5551 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5552 fcport->port_type != FCT_INITIATOR &&
5553 fcport->port_type != FCT_BROADCAST) {
83548fe2 5554 ql_dbg(ql_dbg_disc, vha, 0x20f0,
726b8548
QT
5555 "%s %d %8phC post del sess\n",
5556 __func__, __LINE__,
5557 fcport->port_name);
d8630bb9 5558 qlt_schedule_sess_for_deletion(fcport);
726b8548
QT
5559 continue;
5560 }
5561 }
5562 }
1da177e4 5563
726b8548
QT
5564 if (fcport->scan_state == QLA_FCPORT_FOUND)
5565 qla24xx_fcport_handle_login(vha, fcport);
5566 }
1da177e4
LT
5567 return (rval);
5568}
5569
5570/*
5571 * qla2x00_find_new_loop_id
5572 * Scan through our port list and find a new usable loop ID.
5573 *
5574 * Input:
5575 * ha: adapter state pointer.
5576 * dev: port structure pointer.
5577 *
5578 * Returns:
5579 * qla2x00 local function return status code.
5580 *
5581 * Context:
5582 * Kernel context.
5583 */
03bcfb57 5584int
e315cd28 5585qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
1da177e4
LT
5586{
5587 int rval;
e315cd28 5588 struct qla_hw_data *ha = vha->hw;
feafb7b1 5589 unsigned long flags = 0;
1da177e4
LT
5590
5591 rval = QLA_SUCCESS;
5592
5f16b331 5593 spin_lock_irqsave(&ha->vport_slock, flags);
1da177e4 5594
5f16b331
CD
5595 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
5596 LOOPID_MAP_SIZE);
5597 if (dev->loop_id >= LOOPID_MAP_SIZE ||
5598 qla2x00_is_reserved_id(vha, dev->loop_id)) {
5599 dev->loop_id = FC_NO_LOOP_ID;
5600 rval = QLA_FUNCTION_FAILED;
5601 } else
5602 set_bit(dev->loop_id, ha->loop_id_map);
1da177e4 5603
5f16b331 5604 spin_unlock_irqrestore(&ha->vport_slock, flags);
1da177e4 5605
5f16b331
CD
5606 if (rval == QLA_SUCCESS)
5607 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
5608 "Assigning new loopid=%x, portid=%x.\n",
5609 dev->loop_id, dev->d_id.b24);
5610 else
5611 ql_log(ql_log_warn, dev->vha, 0x2087,
5612 "No loop_id's available, portid=%x.\n",
5613 dev->d_id.b24);
1da177e4
LT
5614
5615 return (rval);
5616}
5617
1da177e4 5618
f6602f3b
QT
5619/* FW does not set aside Loop id for MGMT Server/FFFFFAh */
5620int
5621qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
5622{
5623 int loop_id = FC_NO_LOOP_ID;
5624 int lid = NPH_MGMT_SERVER - vha->vp_idx;
5625 unsigned long flags;
5626 struct qla_hw_data *ha = vha->hw;
5627
5628 if (vha->vp_idx == 0) {
5629 set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
5630 return NPH_MGMT_SERVER;
5631 }
5632
5633 /* pick id from high and work down to low */
5634 spin_lock_irqsave(&ha->vport_slock, flags);
5635 for (; lid > 0; lid--) {
5636 if (!test_bit(lid, vha->hw->loop_id_map)) {
5637 set_bit(lid, vha->hw->loop_id_map);
5638 loop_id = lid;
5639 break;
5640 }
5641 }
5642 spin_unlock_irqrestore(&ha->vport_slock, flags);
5643
5644 return loop_id;
5645}
5646
1da177e4
LT
5647/*
5648 * qla2x00_fabric_login
5649 * Issue fabric login command.
5650 *
5651 * Input:
5652 * ha = adapter block pointer.
5653 * device = pointer to FC device type structure.
5654 *
5655 * Returns:
5656 * 0 - Login successfully
5657 * 1 - Login failed
5658 * 2 - Initiator device
5659 * 3 - Fatal error
5660 */
5661int
e315cd28 5662qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1da177e4
LT
5663 uint16_t *next_loopid)
5664{
5665 int rval;
5666 int retry;
5667 uint16_t tmp_loopid;
5668 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 5669 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
5670
5671 retry = 0;
5672 tmp_loopid = 0;
5673
5674 for (;;) {
7c3df132
SK
5675 ql_dbg(ql_dbg_disc, vha, 0x2000,
5676 "Trying Fabric Login w/loop id 0x%04x for port "
5677 "%02x%02x%02x.\n",
5678 fcport->loop_id, fcport->d_id.b.domain,
5679 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
5680
5681 /* Login fcport on switch. */
0b91d116 5682 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
1da177e4
LT
5683 fcport->d_id.b.domain, fcport->d_id.b.area,
5684 fcport->d_id.b.al_pa, mb, BIT_0);
0b91d116
CD
5685 if (rval != QLA_SUCCESS) {
5686 return rval;
5687 }
1da177e4
LT
5688 if (mb[0] == MBS_PORT_ID_USED) {
5689 /*
5690 * Device has another loop ID. The firmware team
0107109e
AV
5691 * recommends the driver perform an implicit login with
5692 * the specified ID again. The ID we just used is save
5693 * here so we return with an ID that can be tried by
5694 * the next login.
1da177e4
LT
5695 */
5696 retry++;
5697 tmp_loopid = fcport->loop_id;
5698 fcport->loop_id = mb[1];
5699
7c3df132
SK
5700 ql_dbg(ql_dbg_disc, vha, 0x2001,
5701 "Fabric Login: port in use - next loop "
5702 "id=0x%04x, port id= %02x%02x%02x.\n",
1da177e4 5703 fcport->loop_id, fcport->d_id.b.domain,
7c3df132 5704 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
5705
5706 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
5707 /*
5708 * Login succeeded.
5709 */
5710 if (retry) {
5711 /* A retry occurred before. */
5712 *next_loopid = tmp_loopid;
5713 } else {
5714 /*
5715 * No retry occurred before. Just increment the
5716 * ID value for next login.
5717 */
5718 *next_loopid = (fcport->loop_id + 1);
5719 }
5720
5721 if (mb[1] & BIT_0) {
5722 fcport->port_type = FCT_INITIATOR;
5723 } else {
5724 fcport->port_type = FCT_TARGET;
5725 if (mb[1] & BIT_1) {
8474f3a0 5726 fcport->flags |= FCF_FCP2_DEVICE;
1da177e4
LT
5727 }
5728 }
5729
ad3e0eda
AV
5730 if (mb[10] & BIT_0)
5731 fcport->supported_classes |= FC_COS_CLASS2;
5732 if (mb[10] & BIT_1)
5733 fcport->supported_classes |= FC_COS_CLASS3;
5734
2d70c103
NB
5735 if (IS_FWI2_CAPABLE(ha)) {
5736 if (mb[10] & BIT_7)
5737 fcport->flags |=
5738 FCF_CONF_COMP_SUPPORTED;
5739 }
5740
1da177e4
LT
5741 rval = QLA_SUCCESS;
5742 break;
5743 } else if (mb[0] == MBS_LOOP_ID_USED) {
5744 /*
5745 * Loop ID already used, try next loop ID.
5746 */
5747 fcport->loop_id++;
e315cd28 5748 rval = qla2x00_find_new_loop_id(vha, fcport);
1da177e4
LT
5749 if (rval != QLA_SUCCESS) {
5750 /* Ran out of loop IDs to use */
5751 break;
5752 }
5753 } else if (mb[0] == MBS_COMMAND_ERROR) {
5754 /*
5755 * Firmware possibly timed out during login. If NO
5756 * retries are left to do then the device is declared
5757 * dead.
5758 */
5759 *next_loopid = fcport->loop_id;
e315cd28 5760 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
5761 fcport->d_id.b.domain, fcport->d_id.b.area,
5762 fcport->d_id.b.al_pa);
e315cd28 5763 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1da177e4
LT
5764
5765 rval = 1;
5766 break;
5767 } else {
5768 /*
5769 * unrecoverable / not handled error
5770 */
7c3df132
SK
5771 ql_dbg(ql_dbg_disc, vha, 0x2002,
5772 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
5773 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
5774 fcport->d_id.b.area, fcport->d_id.b.al_pa,
5775 fcport->loop_id, jiffies);
1da177e4
LT
5776
5777 *next_loopid = fcport->loop_id;
e315cd28 5778 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
5779 fcport->d_id.b.domain, fcport->d_id.b.area,
5780 fcport->d_id.b.al_pa);
5f16b331 5781 qla2x00_clear_loop_id(fcport);
0eedfcf0 5782 fcport->login_retry = 0;
1da177e4
LT
5783
5784 rval = 3;
5785 break;
5786 }
5787 }
5788
5789 return (rval);
5790}
5791
5792/*
5793 * qla2x00_local_device_login
5794 * Issue local device login command.
5795 *
5796 * Input:
5797 * ha = adapter block pointer.
5798 * loop_id = loop id of device to login to.
5799 *
5800 * Returns (Where's the #define!!!!):
5801 * 0 - Login successfully
5802 * 1 - Login failed
5803 * 3 - Fatal error
5804 */
5805int
e315cd28 5806qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
1da177e4
LT
5807{
5808 int rval;
5809 uint16_t mb[MAILBOX_REGISTER_COUNT];
5810
5811 memset(mb, 0, sizeof(mb));
e315cd28 5812 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
1da177e4
LT
5813 if (rval == QLA_SUCCESS) {
5814 /* Interrogate mailbox registers for any errors */
5815 if (mb[0] == MBS_COMMAND_ERROR)
5816 rval = 1;
5817 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
5818 /* device not in PCB table */
5819 rval = 3;
5820 }
5821
5822 return (rval);
5823}
5824
5825/*
5826 * qla2x00_loop_resync
5827 * Resync with fibre channel devices.
5828 *
5829 * Input:
5830 * ha = adapter block pointer.
5831 *
5832 * Returns:
5833 * 0 = success
5834 */
5835int
e315cd28 5836qla2x00_loop_resync(scsi_qla_host_t *vha)
1da177e4 5837{
73208dfd 5838 int rval = QLA_SUCCESS;
1da177e4 5839 uint32_t wait_time;
67c2e93a
AC
5840 struct req_que *req;
5841 struct rsp_que *rsp;
5842
d7459527 5843 req = vha->req;
67c2e93a 5844 rsp = req->rsp;
1da177e4 5845
e315cd28
AC
5846 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5847 if (vha->flags.online) {
5848 if (!(rval = qla2x00_fw_ready(vha))) {
1da177e4
LT
5849 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5850 wait_time = 256;
5851 do {
8ae6d9c7
GM
5852 if (!IS_QLAFX00(vha->hw)) {
5853 /*
5854 * Issue a marker after FW becomes
5855 * ready.
5856 */
5857 qla2x00_marker(vha, req, rsp, 0, 0,
5858 MK_SYNC_ALL);
5859 vha->marker_needed = 0;
5860 }
1da177e4
LT
5861
5862 /* Remap devices on Loop. */
e315cd28 5863 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4 5864
8ae6d9c7
GM
5865 if (IS_QLAFX00(vha->hw))
5866 qlafx00_configure_devices(vha);
5867 else
5868 qla2x00_configure_loop(vha);
5869
1da177e4 5870 wait_time--;
e315cd28
AC
5871 } while (!atomic_read(&vha->loop_down_timer) &&
5872 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5873 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
5874 &vha->dpc_flags)));
1da177e4 5875 }
1da177e4
LT
5876 }
5877
e315cd28 5878 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1da177e4 5879 return (QLA_FUNCTION_FAILED);
1da177e4 5880
e315cd28 5881 if (rval)
7c3df132
SK
5882 ql_dbg(ql_dbg_disc, vha, 0x206c,
5883 "%s *** FAILED ***.\n", __func__);
1da177e4
LT
5884
5885 return (rval);
5886}
5887
579d12b5
SK
5888/*
5889* qla2x00_perform_loop_resync
5890* Description: This function will set the appropriate flags and call
5891* qla2x00_loop_resync. If successful loop will be resynced
5892* Arguments : scsi_qla_host_t pointer
5893* returm : Success or Failure
5894*/
5895
5896int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
5897{
5898 int32_t rval = 0;
5899
5900 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
5901 /*Configure the flags so that resync happens properly*/
5902 atomic_set(&ha->loop_down_timer, 0);
5903 if (!(ha->device_flags & DFLG_NO_CABLE)) {
5904 atomic_set(&ha->loop_state, LOOP_UP);
5905 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
5906 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
5907 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
5908
5909 rval = qla2x00_loop_resync(ha);
5910 } else
5911 atomic_set(&ha->loop_state, LOOP_DEAD);
5912
5913 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
5914 }
5915
5916 return rval;
5917}
5918
d97994dc 5919void
67becc00 5920qla2x00_update_fcports(scsi_qla_host_t *base_vha)
d97994dc 5921{
5922 fc_port_t *fcport;
feafb7b1
AE
5923 struct scsi_qla_host *vha;
5924 struct qla_hw_data *ha = base_vha->hw;
5925 unsigned long flags;
d97994dc 5926
feafb7b1 5927 spin_lock_irqsave(&ha->vport_slock, flags);
d97994dc 5928 /* Go with deferred removal of rport references. */
feafb7b1
AE
5929 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
5930 atomic_inc(&vha->vref_count);
5931 list_for_each_entry(fcport, &vha->vp_fcports, list) {
8ae598d0 5932 if (fcport->drport &&
feafb7b1
AE
5933 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
5934 spin_unlock_irqrestore(&ha->vport_slock, flags);
67becc00 5935 qla2x00_rport_del(fcport);
df673274 5936
feafb7b1
AE
5937 spin_lock_irqsave(&ha->vport_slock, flags);
5938 }
5939 }
5940 atomic_dec(&vha->vref_count);
c4a9b538 5941 wake_up(&vha->vref_waitq);
feafb7b1
AE
5942 }
5943 spin_unlock_irqrestore(&ha->vport_slock, flags);
d97994dc 5944}
5945
7d613ac6
SV
5946/* Assumes idc_lock always held on entry */
5947void
5948qla83xx_reset_ownership(scsi_qla_host_t *vha)
5949{
5950 struct qla_hw_data *ha = vha->hw;
5951 uint32_t drv_presence, drv_presence_mask;
5952 uint32_t dev_part_info1, dev_part_info2, class_type;
5953 uint32_t class_type_mask = 0x3;
5954 uint16_t fcoe_other_function = 0xffff, i;
5955
7ec0effd
AD
5956 if (IS_QLA8044(ha)) {
5957 drv_presence = qla8044_rd_direct(vha,
5958 QLA8044_CRB_DRV_ACTIVE_INDEX);
5959 dev_part_info1 = qla8044_rd_direct(vha,
5960 QLA8044_CRB_DEV_PART_INFO_INDEX);
5961 dev_part_info2 = qla8044_rd_direct(vha,
5962 QLA8044_CRB_DEV_PART_INFO2);
5963 } else {
5964 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5965 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
5966 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
5967 }
7d613ac6
SV
5968 for (i = 0; i < 8; i++) {
5969 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
5970 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5971 (i != ha->portnum)) {
5972 fcoe_other_function = i;
5973 break;
5974 }
5975 }
5976 if (fcoe_other_function == 0xffff) {
5977 for (i = 0; i < 8; i++) {
5978 class_type = ((dev_part_info2 >> (i * 4)) &
5979 class_type_mask);
5980 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5981 ((i + 8) != ha->portnum)) {
5982 fcoe_other_function = i + 8;
5983 break;
5984 }
5985 }
5986 }
5987 /*
5988 * Prepare drv-presence mask based on fcoe functions present.
5989 * However consider only valid physical fcoe function numbers (0-15).
5990 */
5991 drv_presence_mask = ~((1 << (ha->portnum)) |
5992 ((fcoe_other_function == 0xffff) ?
5993 0 : (1 << (fcoe_other_function))));
5994
5995 /* We are the reset owner iff:
5996 * - No other protocol drivers present.
5997 * - This is the lowest among fcoe functions. */
5998 if (!(drv_presence & drv_presence_mask) &&
5999 (ha->portnum < fcoe_other_function)) {
6000 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
6001 "This host is Reset owner.\n");
6002 ha->flags.nic_core_reset_owner = 1;
6003 }
6004}
6005
fa492630 6006static int
7d613ac6
SV
6007__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
6008{
6009 int rval = QLA_SUCCESS;
6010 struct qla_hw_data *ha = vha->hw;
6011 uint32_t drv_ack;
6012
6013 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6014 if (rval == QLA_SUCCESS) {
6015 drv_ack |= (1 << ha->portnum);
6016 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6017 }
6018
6019 return rval;
6020}
6021
fa492630 6022static int
7d613ac6
SV
6023__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
6024{
6025 int rval = QLA_SUCCESS;
6026 struct qla_hw_data *ha = vha->hw;
6027 uint32_t drv_ack;
6028
6029 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6030 if (rval == QLA_SUCCESS) {
6031 drv_ack &= ~(1 << ha->portnum);
6032 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6033 }
6034
6035 return rval;
6036}
6037
fa492630 6038static const char *
7d613ac6
SV
6039qla83xx_dev_state_to_string(uint32_t dev_state)
6040{
6041 switch (dev_state) {
6042 case QLA8XXX_DEV_COLD:
6043 return "COLD/RE-INIT";
6044 case QLA8XXX_DEV_INITIALIZING:
6045 return "INITIALIZING";
6046 case QLA8XXX_DEV_READY:
6047 return "READY";
6048 case QLA8XXX_DEV_NEED_RESET:
6049 return "NEED RESET";
6050 case QLA8XXX_DEV_NEED_QUIESCENT:
6051 return "NEED QUIESCENT";
6052 case QLA8XXX_DEV_FAILED:
6053 return "FAILED";
6054 case QLA8XXX_DEV_QUIESCENT:
6055 return "QUIESCENT";
6056 default:
6057 return "Unknown";
6058 }
6059}
6060
6061/* Assumes idc-lock always held on entry */
6062void
6063qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
6064{
6065 struct qla_hw_data *ha = vha->hw;
6066 uint32_t idc_audit_reg = 0, duration_secs = 0;
6067
6068 switch (audit_type) {
6069 case IDC_AUDIT_TIMESTAMP:
6070 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
6071 idc_audit_reg = (ha->portnum) |
6072 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
6073 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6074 break;
6075
6076 case IDC_AUDIT_COMPLETION:
6077 duration_secs = ((jiffies_to_msecs(jiffies) -
6078 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
6079 idc_audit_reg = (ha->portnum) |
6080 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
6081 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6082 break;
6083
6084 default:
6085 ql_log(ql_log_warn, vha, 0xb078,
6086 "Invalid audit type specified.\n");
6087 break;
6088 }
6089}
6090
6091/* Assumes idc_lock always held on entry */
fa492630 6092static int
7d613ac6
SV
6093qla83xx_initiating_reset(scsi_qla_host_t *vha)
6094{
6095 struct qla_hw_data *ha = vha->hw;
6096 uint32_t idc_control, dev_state;
6097
6098 __qla83xx_get_idc_control(vha, &idc_control);
6099 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
6100 ql_log(ql_log_info, vha, 0xb080,
6101 "NIC Core reset has been disabled. idc-control=0x%x\n",
6102 idc_control);
6103 return QLA_FUNCTION_FAILED;
6104 }
6105
6106 /* Set NEED-RESET iff in READY state and we are the reset-owner */
6107 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6108 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
6109 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
6110 QLA8XXX_DEV_NEED_RESET);
6111 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
6112 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
6113 } else {
6114 const char *state = qla83xx_dev_state_to_string(dev_state);
6115 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
6116
6117 /* SV: XXX: Is timeout required here? */
6118 /* Wait for IDC state change READY -> NEED_RESET */
6119 while (dev_state == QLA8XXX_DEV_READY) {
6120 qla83xx_idc_unlock(vha, 0);
6121 msleep(200);
6122 qla83xx_idc_lock(vha, 0);
6123 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6124 }
6125 }
6126
6127 /* Send IDC ack by writing to drv-ack register */
6128 __qla83xx_set_drv_ack(vha);
6129
6130 return QLA_SUCCESS;
6131}
6132
6133int
6134__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
6135{
6136 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6137}
6138
7d613ac6
SV
6139int
6140__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
6141{
6142 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6143}
6144
fa492630 6145static int
7d613ac6
SV
6146qla83xx_check_driver_presence(scsi_qla_host_t *vha)
6147{
6148 uint32_t drv_presence = 0;
6149 struct qla_hw_data *ha = vha->hw;
6150
6151 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6152 if (drv_presence & (1 << ha->portnum))
6153 return QLA_SUCCESS;
6154 else
6155 return QLA_TEST_FAILED;
6156}
6157
6158int
6159qla83xx_nic_core_reset(scsi_qla_host_t *vha)
6160{
6161 int rval = QLA_SUCCESS;
6162 struct qla_hw_data *ha = vha->hw;
6163
6164 ql_dbg(ql_dbg_p3p, vha, 0xb058,
6165 "Entered %s().\n", __func__);
6166
6167 if (vha->device_flags & DFLG_DEV_FAILED) {
6168 ql_log(ql_log_warn, vha, 0xb059,
6169 "Device in unrecoverable FAILED state.\n");
6170 return QLA_FUNCTION_FAILED;
6171 }
6172
6173 qla83xx_idc_lock(vha, 0);
6174
6175 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
6176 ql_log(ql_log_warn, vha, 0xb05a,
6177 "Function=0x%x has been removed from IDC participation.\n",
6178 ha->portnum);
6179 rval = QLA_FUNCTION_FAILED;
6180 goto exit;
6181 }
6182
6183 qla83xx_reset_ownership(vha);
6184
6185 rval = qla83xx_initiating_reset(vha);
6186
6187 /*
6188 * Perform reset if we are the reset-owner,
6189 * else wait till IDC state changes to READY/FAILED.
6190 */
6191 if (rval == QLA_SUCCESS) {
6192 rval = qla83xx_idc_state_handler(vha);
6193
6194 if (rval == QLA_SUCCESS)
6195 ha->flags.nic_core_hung = 0;
6196 __qla83xx_clear_drv_ack(vha);
6197 }
6198
6199exit:
6200 qla83xx_idc_unlock(vha, 0);
6201
6202 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
6203
6204 return rval;
6205}
6206
81178772
SK
6207int
6208qla2xxx_mctp_dump(scsi_qla_host_t *vha)
6209{
6210 struct qla_hw_data *ha = vha->hw;
6211 int rval = QLA_FUNCTION_FAILED;
6212
6213 if (!IS_MCTP_CAPABLE(ha)) {
6214 /* This message can be removed from the final version */
6215 ql_log(ql_log_info, vha, 0x506d,
6216 "This board is not MCTP capable\n");
6217 return rval;
6218 }
6219
6220 if (!ha->mctp_dump) {
6221 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
6222 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
6223
6224 if (!ha->mctp_dump) {
6225 ql_log(ql_log_warn, vha, 0x506e,
6226 "Failed to allocate memory for mctp dump\n");
6227 return rval;
6228 }
6229 }
6230
6231#define MCTP_DUMP_STR_ADDR 0x00000000
6232 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
6233 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
6234 if (rval != QLA_SUCCESS) {
6235 ql_log(ql_log_warn, vha, 0x506f,
6236 "Failed to capture mctp dump\n");
6237 } else {
6238 ql_log(ql_log_info, vha, 0x5070,
6239 "Mctp dump capture for host (%ld/%p).\n",
6240 vha->host_no, ha->mctp_dump);
6241 ha->mctp_dumped = 1;
6242 }
6243
409ee0fe 6244 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
81178772
SK
6245 ha->flags.nic_core_reset_hdlr_active = 1;
6246 rval = qla83xx_restart_nic_firmware(vha);
6247 if (rval)
6248 /* NIC Core reset failed. */
6249 ql_log(ql_log_warn, vha, 0x5071,
6250 "Failed to restart nic firmware\n");
6251 else
6252 ql_dbg(ql_dbg_p3p, vha, 0xb084,
6253 "Restarted NIC firmware successfully.\n");
6254 ha->flags.nic_core_reset_hdlr_active = 0;
6255 }
6256
6257 return rval;
6258
6259}
6260
579d12b5 6261/*
8fcd6b8b 6262* qla2x00_quiesce_io
579d12b5
SK
6263* Description: This function will block the new I/Os
6264* Its not aborting any I/Os as context
6265* is not destroyed during quiescence
6266* Arguments: scsi_qla_host_t
6267* return : void
6268*/
6269void
8fcd6b8b 6270qla2x00_quiesce_io(scsi_qla_host_t *vha)
579d12b5
SK
6271{
6272 struct qla_hw_data *ha = vha->hw;
6273 struct scsi_qla_host *vp;
6274
8fcd6b8b
CD
6275 ql_dbg(ql_dbg_dpc, vha, 0x401d,
6276 "Quiescing I/O - ha=%p.\n", ha);
579d12b5
SK
6277
6278 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
6279 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6280 atomic_set(&vha->loop_state, LOOP_DOWN);
6281 qla2x00_mark_all_devices_lost(vha, 0);
6282 list_for_each_entry(vp, &ha->vp_list, list)
8fcd6b8b 6283 qla2x00_mark_all_devices_lost(vp, 0);
579d12b5
SK
6284 } else {
6285 if (!atomic_read(&vha->loop_down_timer))
6286 atomic_set(&vha->loop_down_timer,
6287 LOOP_DOWN_TIME);
6288 }
6289 /* Wait for pending cmds to complete */
6290 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
6291}
6292
a9083016
GM
6293void
6294qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
6295{
6296 struct qla_hw_data *ha = vha->hw;
579d12b5 6297 struct scsi_qla_host *vp;
feafb7b1 6298 unsigned long flags;
6aef87be 6299 fc_port_t *fcport;
7c3f8fd1 6300 u16 i;
a9083016 6301
e46ef004
SK
6302 /* For ISP82XX, driver waits for completion of the commands.
6303 * online flag should be set.
6304 */
7ec0effd 6305 if (!(IS_P3P_TYPE(ha)))
e46ef004 6306 vha->flags.online = 0;
a9083016
GM
6307 ha->flags.chip_reset_done = 0;
6308 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2be21fa2 6309 vha->qla_stats.total_isp_aborts++;
a9083016 6310
7c3df132
SK
6311 ql_log(ql_log_info, vha, 0x00af,
6312 "Performing ISP error recovery - ha=%p.\n", ha);
a9083016 6313
b2000805 6314 ha->flags.purge_mbox = 1;
e46ef004
SK
6315 /* For ISP82XX, reset_chip is just disabling interrupts.
6316 * Driver waits for the completion of the commands.
6317 * the interrupts need to be enabled.
6318 */
7ec0effd 6319 if (!(IS_P3P_TYPE(ha)))
a9083016
GM
6320 ha->isp_ops->reset_chip(vha);
6321
9cd883f0
QT
6322 SAVE_TOPO(ha);
6323 ha->flags.rida_fmt2 = 0;
ec7193e2
QT
6324 ha->flags.n2n_ae = 0;
6325 ha->flags.lip_ae = 0;
6326 ha->current_topology = 0;
6327 ha->flags.fw_started = 0;
6328 ha->flags.fw_init_done = 0;
b2000805
QT
6329 ha->chip_reset++;
6330 ha->base_qpair->chip_reset = ha->chip_reset;
7c3f8fd1
QT
6331 for (i = 0; i < ha->max_qpairs; i++) {
6332 if (ha->queue_pair_map[i])
6333 ha->queue_pair_map[i]->chip_reset =
6334 ha->base_qpair->chip_reset;
6335 }
726b8548 6336
b2000805
QT
6337 /* purge MBox commands */
6338 if (atomic_read(&ha->num_pend_mbx_stage3)) {
6339 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
6340 complete(&ha->mbx_intr_comp);
6341 }
6342
6343 i = 0;
6344 while (atomic_read(&ha->num_pend_mbx_stage3) ||
6345 atomic_read(&ha->num_pend_mbx_stage2) ||
6346 atomic_read(&ha->num_pend_mbx_stage1)) {
6347 msleep(20);
6348 i++;
6349 if (i > 50)
6350 break;
6351 }
6352 ha->flags.purge_mbox = 0;
6353
a9083016
GM
6354 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
6355 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6356 atomic_set(&vha->loop_state, LOOP_DOWN);
6357 qla2x00_mark_all_devices_lost(vha, 0);
feafb7b1
AE
6358
6359 spin_lock_irqsave(&ha->vport_slock, flags);
579d12b5 6360 list_for_each_entry(vp, &ha->vp_list, list) {
feafb7b1
AE
6361 atomic_inc(&vp->vref_count);
6362 spin_unlock_irqrestore(&ha->vport_slock, flags);
6363
a9083016 6364 qla2x00_mark_all_devices_lost(vp, 0);
feafb7b1
AE
6365
6366 spin_lock_irqsave(&ha->vport_slock, flags);
6367 atomic_dec(&vp->vref_count);
6368 }
6369 spin_unlock_irqrestore(&ha->vport_slock, flags);
a9083016
GM
6370 } else {
6371 if (!atomic_read(&vha->loop_down_timer))
6372 atomic_set(&vha->loop_down_timer,
6373 LOOP_DOWN_TIME);
6374 }
6375
6aef87be
AV
6376 /* Clear all async request states across all VPs. */
6377 list_for_each_entry(fcport, &vha->vp_fcports, list)
6378 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6379 spin_lock_irqsave(&ha->vport_slock, flags);
6380 list_for_each_entry(vp, &ha->vp_list, list) {
6381 atomic_inc(&vp->vref_count);
6382 spin_unlock_irqrestore(&ha->vport_slock, flags);
6383
6384 list_for_each_entry(fcport, &vp->vp_fcports, list)
6385 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6386
6387 spin_lock_irqsave(&ha->vport_slock, flags);
6388 atomic_dec(&vp->vref_count);
6389 }
6390 spin_unlock_irqrestore(&ha->vport_slock, flags);
6391
bddd2d65
LC
6392 if (!ha->flags.eeh_busy) {
6393 /* Make sure for ISP 82XX IO DMA is complete */
7ec0effd 6394 if (IS_P3P_TYPE(ha)) {
7190575f 6395 qla82xx_chip_reset_cleanup(vha);
7c3df132
SK
6396 ql_log(ql_log_info, vha, 0x00b4,
6397 "Done chip reset cleanup.\n");
a9083016 6398
e46ef004
SK
6399 /* Done waiting for pending commands.
6400 * Reset the online flag.
6401 */
6402 vha->flags.online = 0;
4d78c973 6403 }
a9083016 6404
bddd2d65
LC
6405 /* Requeue all commands in outstanding command list. */
6406 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6407 }
b6a029e1
AE
6408 /* memory barrier */
6409 wmb();
a9083016
GM
6410}
6411
1da177e4
LT
6412/*
6413* qla2x00_abort_isp
6414* Resets ISP and aborts all outstanding commands.
6415*
6416* Input:
6417* ha = adapter block pointer.
6418*
6419* Returns:
6420* 0 = success
6421*/
6422int
e315cd28 6423qla2x00_abort_isp(scsi_qla_host_t *vha)
1da177e4 6424{
476e8978 6425 int rval;
1da177e4 6426 uint8_t status = 0;
e315cd28
AC
6427 struct qla_hw_data *ha = vha->hw;
6428 struct scsi_qla_host *vp;
73208dfd 6429 struct req_que *req = ha->req_q_map[0];
feafb7b1 6430 unsigned long flags;
1da177e4 6431
e315cd28 6432 if (vha->flags.online) {
a9083016 6433 qla2x00_abort_isp_cleanup(vha);
1da177e4 6434
a6171297
SV
6435 if (IS_QLA8031(ha)) {
6436 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
6437 "Clearing fcoe driver presence.\n");
6438 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
6439 ql_dbg(ql_dbg_p3p, vha, 0xb073,
6440 "Error while clearing DRV-Presence.\n");
6441 }
6442
85880801
AV
6443 if (unlikely(pci_channel_offline(ha->pdev) &&
6444 ha->flags.pci_channel_io_perm_failure)) {
6445 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6446 status = 0;
6447 return status;
6448 }
6449
73208dfd 6450 ha->isp_ops->get_flash_version(vha, req->ring);
30c47662 6451
e315cd28 6452 ha->isp_ops->nvram_config(vha);
1da177e4 6453
e315cd28
AC
6454 if (!qla2x00_restart_isp(vha)) {
6455 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4 6456
e315cd28 6457 if (!atomic_read(&vha->loop_down_timer)) {
1da177e4
LT
6458 /*
6459 * Issue marker command only when we are going
6460 * to start the I/O .
6461 */
e315cd28 6462 vha->marker_needed = 1;
1da177e4
LT
6463 }
6464
e315cd28 6465 vha->flags.online = 1;
1da177e4 6466
fd34f556 6467 ha->isp_ops->enable_intrs(ha);
1da177e4 6468
fa2a1ce5 6469 ha->isp_abort_cnt = 0;
e315cd28 6470 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
476e8978 6471
6246b8a1
GM
6472 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
6473 qla2x00_get_fw_version(vha);
df613b96
AV
6474 if (ha->fce) {
6475 ha->flags.fce_enabled = 1;
6476 memset(ha->fce, 0,
6477 fce_calc_size(ha->fce_bufs));
e315cd28 6478 rval = qla2x00_enable_fce_trace(vha,
df613b96
AV
6479 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6480 &ha->fce_bufs);
6481 if (rval) {
7c3df132 6482 ql_log(ql_log_warn, vha, 0x8033,
df613b96
AV
6483 "Unable to reinitialize FCE "
6484 "(%d).\n", rval);
6485 ha->flags.fce_enabled = 0;
6486 }
6487 }
436a7b11
AV
6488
6489 if (ha->eft) {
6490 memset(ha->eft, 0, EFT_SIZE);
e315cd28 6491 rval = qla2x00_enable_eft_trace(vha,
436a7b11
AV
6492 ha->eft_dma, EFT_NUM_BUFFERS);
6493 if (rval) {
7c3df132 6494 ql_log(ql_log_warn, vha, 0x8034,
436a7b11
AV
6495 "Unable to reinitialize EFT "
6496 "(%d).\n", rval);
6497 }
6498 }
1da177e4 6499 } else { /* failed the ISP abort */
e315cd28
AC
6500 vha->flags.online = 1;
6501 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1da177e4 6502 if (ha->isp_abort_cnt == 0) {
7c3df132
SK
6503 ql_log(ql_log_fatal, vha, 0x8035,
6504 "ISP error recover failed - "
6505 "board disabled.\n");
fa2a1ce5 6506 /*
1da177e4
LT
6507 * The next call disables the board
6508 * completely.
6509 */
e315cd28
AC
6510 ha->isp_ops->reset_adapter(vha);
6511 vha->flags.online = 0;
1da177e4 6512 clear_bit(ISP_ABORT_RETRY,
e315cd28 6513 &vha->dpc_flags);
1da177e4
LT
6514 status = 0;
6515 } else { /* schedule another ISP abort */
6516 ha->isp_abort_cnt--;
7c3df132
SK
6517 ql_dbg(ql_dbg_taskm, vha, 0x8020,
6518 "ISP abort - retry remaining %d.\n",
6519 ha->isp_abort_cnt);
1da177e4
LT
6520 status = 1;
6521 }
6522 } else {
6523 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7c3df132
SK
6524 ql_dbg(ql_dbg_taskm, vha, 0x8021,
6525 "ISP error recovery - retrying (%d) "
6526 "more times.\n", ha->isp_abort_cnt);
e315cd28 6527 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1da177e4
LT
6528 status = 1;
6529 }
6530 }
fa2a1ce5 6531
1da177e4
LT
6532 }
6533
e315cd28 6534 if (!status) {
7c3df132 6535 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
1608cc4a 6536 qla2x00_configure_hba(vha);
feafb7b1
AE
6537 spin_lock_irqsave(&ha->vport_slock, flags);
6538 list_for_each_entry(vp, &ha->vp_list, list) {
6539 if (vp->vp_idx) {
6540 atomic_inc(&vp->vref_count);
6541 spin_unlock_irqrestore(&ha->vport_slock, flags);
6542
e315cd28 6543 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
6544
6545 spin_lock_irqsave(&ha->vport_slock, flags);
6546 atomic_dec(&vp->vref_count);
6547 }
e315cd28 6548 }
feafb7b1
AE
6549 spin_unlock_irqrestore(&ha->vport_slock, flags);
6550
7d613ac6
SV
6551 if (IS_QLA8031(ha)) {
6552 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
6553 "Setting back fcoe driver presence.\n");
6554 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
6555 ql_dbg(ql_dbg_p3p, vha, 0xb074,
6556 "Error while setting DRV-Presence.\n");
6557 }
e315cd28 6558 } else {
d8424f68
JP
6559 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
6560 __func__);
1da177e4
LT
6561 }
6562
6563 return(status);
6564}
6565
6566/*
6567* qla2x00_restart_isp
6568* restarts the ISP after a reset
6569*
6570* Input:
6571* ha = adapter block pointer.
6572*
6573* Returns:
6574* 0 = success
6575*/
6576static int
e315cd28 6577qla2x00_restart_isp(scsi_qla_host_t *vha)
1da177e4 6578{
c6b2fca8 6579 int status = 0;
e315cd28 6580 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
6581 struct req_que *req = ha->req_q_map[0];
6582 struct rsp_que *rsp = ha->rsp_q_map[0];
1da177e4
LT
6583
6584 /* If firmware needs to be loaded */
e315cd28
AC
6585 if (qla2x00_isp_firmware(vha)) {
6586 vha->flags.online = 0;
6587 status = ha->isp_ops->chip_diag(vha);
6588 if (!status)
6589 status = qla2x00_setup_chip(vha);
1da177e4
LT
6590 }
6591
e315cd28
AC
6592 if (!status && !(status = qla2x00_init_rings(vha))) {
6593 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2533cf67 6594 ha->flags.chip_reset_done = 1;
7108b76e 6595
73208dfd
AC
6596 /* Initialize the queues in use */
6597 qla25xx_init_queues(ha);
6598
e315cd28
AC
6599 status = qla2x00_fw_ready(vha);
6600 if (!status) {
0107109e 6601 /* Issue a marker after FW becomes ready. */
73208dfd 6602 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7108b76e 6603 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4
LT
6604 }
6605
6606 /* if no cable then assume it's good */
e315cd28 6607 if ((vha->device_flags & DFLG_NO_CABLE))
1da177e4 6608 status = 0;
1da177e4
LT
6609 }
6610 return (status);
6611}
6612
73208dfd
AC
6613static int
6614qla25xx_init_queues(struct qla_hw_data *ha)
6615{
6616 struct rsp_que *rsp = NULL;
6617 struct req_que *req = NULL;
6618 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6619 int ret = -1;
6620 int i;
6621
2afa19a9 6622 for (i = 1; i < ha->max_rsp_queues; i++) {
73208dfd 6623 rsp = ha->rsp_q_map[i];
cb43285f 6624 if (rsp && test_bit(i, ha->rsp_qid_map)) {
73208dfd 6625 rsp->options &= ~BIT_0;
618a7523 6626 ret = qla25xx_init_rsp_que(base_vha, rsp);
73208dfd 6627 if (ret != QLA_SUCCESS)
7c3df132
SK
6628 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
6629 "%s Rsp que: %d init failed.\n",
6630 __func__, rsp->id);
73208dfd 6631 else
7c3df132
SK
6632 ql_dbg(ql_dbg_init, base_vha, 0x0100,
6633 "%s Rsp que: %d inited.\n",
6634 __func__, rsp->id);
73208dfd 6635 }
2afa19a9
AC
6636 }
6637 for (i = 1; i < ha->max_req_queues; i++) {
73208dfd 6638 req = ha->req_q_map[i];
cb43285f
QT
6639 if (req && test_bit(i, ha->req_qid_map)) {
6640 /* Clear outstanding commands array. */
73208dfd 6641 req->options &= ~BIT_0;
618a7523 6642 ret = qla25xx_init_req_que(base_vha, req);
73208dfd 6643 if (ret != QLA_SUCCESS)
7c3df132
SK
6644 ql_dbg(ql_dbg_init, base_vha, 0x0101,
6645 "%s Req que: %d init failed.\n",
6646 __func__, req->id);
73208dfd 6647 else
7c3df132
SK
6648 ql_dbg(ql_dbg_init, base_vha, 0x0102,
6649 "%s Req que: %d inited.\n",
6650 __func__, req->id);
73208dfd
AC
6651 }
6652 }
6653 return ret;
6654}
6655
1da177e4
LT
6656/*
6657* qla2x00_reset_adapter
6658* Reset adapter.
6659*
6660* Input:
6661* ha = adapter block pointer.
6662*/
abbd8870 6663void
e315cd28 6664qla2x00_reset_adapter(scsi_qla_host_t *vha)
1da177e4
LT
6665{
6666 unsigned long flags = 0;
e315cd28 6667 struct qla_hw_data *ha = vha->hw;
3d71644c 6668 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 6669
e315cd28 6670 vha->flags.online = 0;
fd34f556 6671 ha->isp_ops->disable_intrs(ha);
1da177e4 6672
1da177e4
LT
6673 spin_lock_irqsave(&ha->hardware_lock, flags);
6674 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
6675 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
6676 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
6677 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
6678 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6679}
0107109e
AV
6680
6681void
e315cd28 6682qla24xx_reset_adapter(scsi_qla_host_t *vha)
0107109e
AV
6683{
6684 unsigned long flags = 0;
e315cd28 6685 struct qla_hw_data *ha = vha->hw;
0107109e
AV
6686 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
6687
7ec0effd 6688 if (IS_P3P_TYPE(ha))
a9083016
GM
6689 return;
6690
e315cd28 6691 vha->flags.online = 0;
fd34f556 6692 ha->isp_ops->disable_intrs(ha);
0107109e
AV
6693
6694 spin_lock_irqsave(&ha->hardware_lock, flags);
6695 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
6696 RD_REG_DWORD(&reg->hccr);
6697 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
6698 RD_REG_DWORD(&reg->hccr);
6699 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09ff36d3
AV
6700
6701 if (IS_NOPOLLING_TYPE(ha))
6702 ha->isp_ops->enable_intrs(ha);
0107109e
AV
6703}
6704
4e08df3f
DM
6705/* On sparc systems, obtain port and node WWN from firmware
6706 * properties.
6707 */
e315cd28
AC
6708static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
6709 struct nvram_24xx *nv)
4e08df3f
DM
6710{
6711#ifdef CONFIG_SPARC
e315cd28 6712 struct qla_hw_data *ha = vha->hw;
4e08df3f 6713 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
6714 struct device_node *dp = pci_device_to_OF_node(pdev);
6715 const u8 *val;
4e08df3f
DM
6716 int len;
6717
6718 val = of_get_property(dp, "port-wwn", &len);
6719 if (val && len >= WWN_SIZE)
6720 memcpy(nv->port_name, val, WWN_SIZE);
6721
6722 val = of_get_property(dp, "node-wwn", &len);
6723 if (val && len >= WWN_SIZE)
6724 memcpy(nv->node_name, val, WWN_SIZE);
6725#endif
6726}
6727
0107109e 6728int
e315cd28 6729qla24xx_nvram_config(scsi_qla_host_t *vha)
0107109e 6730{
4e08df3f 6731 int rval;
0107109e
AV
6732 struct init_cb_24xx *icb;
6733 struct nvram_24xx *nv;
6734 uint32_t *dptr;
6735 uint8_t *dptr1, *dptr2;
6736 uint32_t chksum;
6737 uint16_t cnt;
e315cd28 6738 struct qla_hw_data *ha = vha->hw;
0107109e 6739
4e08df3f 6740 rval = QLA_SUCCESS;
0107109e 6741 icb = (struct init_cb_24xx *)ha->init_cb;
281afe19 6742 nv = ha->nvram;
0107109e
AV
6743
6744 /* Determine NVRAM starting address. */
f73cb695 6745 if (ha->port_no == 0) {
e5b68a61
AC
6746 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
6747 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
6748 } else {
0107109e 6749 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6f641790 6750 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
6751 }
f73cb695 6752
e5b68a61
AC
6753 ha->nvram_size = sizeof(struct nvram_24xx);
6754 ha->vpd_size = FA_NVRAM_VPD_SIZE;
0107109e 6755
281afe19
SJ
6756 /* Get VPD data into cache */
6757 ha->vpd = ha->nvram + VPD_OFFSET;
e315cd28 6758 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
281afe19
SJ
6759 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
6760
6761 /* Get NVRAM data into cache and calculate checksum. */
0107109e 6762 dptr = (uint32_t *)nv;
e315cd28 6763 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
0107109e 6764 ha->nvram_size);
da08ef5c
JC
6765 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
6766 chksum += le32_to_cpu(*dptr);
0107109e 6767
7c3df132
SK
6768 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
6769 "Contents of NVRAM\n");
6770 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
6771 (uint8_t *)nv, ha->nvram_size);
0107109e
AV
6772
6773 /* Bad NVRAM data, set defaults parameters. */
6774 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
6775 || nv->id[3] != ' ' ||
ad950360 6776 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
0107109e 6777 /* Reset NVRAM data. */
7c3df132 6778 ql_log(ql_log_warn, vha, 0x006b,
9e336520 6779 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132
SK
6780 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
6781 ql_log(ql_log_warn, vha, 0x006c,
6782 "Falling back to functioning (yet invalid -- WWPN) "
6783 "defaults.\n");
4e08df3f
DM
6784
6785 /*
6786 * Set default initialization control block.
6787 */
6788 memset(nv, 0, ha->nvram_size);
ad950360
BVA
6789 nv->nvram_version = cpu_to_le16(ICB_VERSION);
6790 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 6791 nv->frame_payload_size = 2048;
ad950360
BVA
6792 nv->execution_throttle = cpu_to_le16(0xFFFF);
6793 nv->exchange_count = cpu_to_le16(0);
6794 nv->hard_address = cpu_to_le16(124);
4e08df3f 6795 nv->port_name[0] = 0x21;
f73cb695 6796 nv->port_name[1] = 0x00 + ha->port_no + 1;
4e08df3f
DM
6797 nv->port_name[2] = 0x00;
6798 nv->port_name[3] = 0xe0;
6799 nv->port_name[4] = 0x8b;
6800 nv->port_name[5] = 0x1c;
6801 nv->port_name[6] = 0x55;
6802 nv->port_name[7] = 0x86;
6803 nv->node_name[0] = 0x20;
6804 nv->node_name[1] = 0x00;
6805 nv->node_name[2] = 0x00;
6806 nv->node_name[3] = 0xe0;
6807 nv->node_name[4] = 0x8b;
6808 nv->node_name[5] = 0x1c;
6809 nv->node_name[6] = 0x55;
6810 nv->node_name[7] = 0x86;
e315cd28 6811 qla24xx_nvram_wwn_from_ofw(vha, nv);
ad950360
BVA
6812 nv->login_retry_count = cpu_to_le16(8);
6813 nv->interrupt_delay_timer = cpu_to_le16(0);
6814 nv->login_timeout = cpu_to_le16(0);
4e08df3f 6815 nv->firmware_options_1 =
ad950360
BVA
6816 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
6817 nv->firmware_options_2 = cpu_to_le32(2 << 4);
6818 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6819 nv->firmware_options_3 = cpu_to_le32(2 << 13);
6820 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
6821 nv->efi_parameters = cpu_to_le32(0);
4e08df3f 6822 nv->reset_delay = 5;
ad950360
BVA
6823 nv->max_luns_per_target = cpu_to_le16(128);
6824 nv->port_down_retry_count = cpu_to_le16(30);
6825 nv->link_down_timeout = cpu_to_le16(30);
4e08df3f
DM
6826
6827 rval = 1;
0107109e
AV
6828 }
6829
726b8548 6830 if (qla_tgt_mode_enabled(vha)) {
2d70c103 6831 /* Don't enable full login after initial LIP */
ad950360 6832 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
2d70c103 6833 /* Don't enable LIP full login for initiator */
ad950360 6834 nv->host_p &= cpu_to_le32(~BIT_10);
2d70c103
NB
6835 }
6836
6837 qlt_24xx_config_nvram_stage1(vha, nv);
6838
0107109e 6839 /* Reset Initialization control block */
e315cd28 6840 memset(icb, 0, ha->init_cb_size);
0107109e
AV
6841
6842 /* Copy 1st segment. */
6843 dptr1 = (uint8_t *)icb;
6844 dptr2 = (uint8_t *)&nv->version;
6845 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
6846 while (cnt--)
6847 *dptr1++ = *dptr2++;
6848
6849 icb->login_retry_count = nv->login_retry_count;
3ea66e28 6850 icb->link_down_on_nos = nv->link_down_on_nos;
0107109e
AV
6851
6852 /* Copy 2nd segment. */
6853 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
6854 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
6855 cnt = (uint8_t *)&icb->reserved_3 -
6856 (uint8_t *)&icb->interrupt_delay_timer;
6857 while (cnt--)
6858 *dptr1++ = *dptr2++;
6859
6860 /*
6861 * Setup driver NVRAM options.
6862 */
e315cd28 6863 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
9bb9fcf2 6864 "QLA2462");
0107109e 6865
2d70c103
NB
6866 qlt_24xx_config_nvram_stage2(vha, icb);
6867
ad950360 6868 if (nv->host_p & cpu_to_le32(BIT_15)) {
2d70c103 6869 /* Use alternate WWN? */
5341e868
AV
6870 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
6871 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
6872 }
6873
0107109e 6874 /* Prepare nodename */
ad950360 6875 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
0107109e
AV
6876 /*
6877 * Firmware will apply the following mask if the nodename was
6878 * not provided.
6879 */
6880 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
6881 icb->node_name[0] &= 0xF0;
6882 }
6883
6884 /* Set host adapter parameters. */
6885 ha->flags.disable_risc_code_load = 0;
0c8c39af
AV
6886 ha->flags.enable_lip_reset = 0;
6887 ha->flags.enable_lip_full_login =
6888 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
6889 ha->flags.enable_target_reset =
6890 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
0107109e 6891 ha->flags.enable_led_scheme = 0;
d4c760c2 6892 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
0107109e 6893
fd0e7e4d
AV
6894 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
6895 (BIT_6 | BIT_5 | BIT_4)) >> 4;
0107109e
AV
6896
6897 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
6898 sizeof(ha->fw_seriallink_options24));
6899
6900 /* save HBA serial number */
6901 ha->serial0 = icb->port_name[5];
6902 ha->serial1 = icb->port_name[6];
6903 ha->serial2 = icb->port_name[7];
e315cd28
AC
6904 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6905 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
0107109e 6906
ad950360 6907 icb->execution_throttle = cpu_to_le16(0xFFFF);
bc8fb3cb 6908
0107109e
AV
6909 ha->retry_count = le16_to_cpu(nv->login_retry_count);
6910
6911 /* Set minimum login_timeout to 4 seconds. */
6912 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6913 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6914 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 6915 nv->login_timeout = cpu_to_le16(4);
0107109e 6916 ha->login_timeout = le16_to_cpu(nv->login_timeout);
0107109e 6917
00a537b8
AV
6918 /* Set minimum RATOV to 100 tenths of a second. */
6919 ha->r_a_tov = 100;
0107109e
AV
6920
6921 ha->loop_reset_delay = nv->reset_delay;
6922
6923 /* Link Down Timeout = 0:
6924 *
6925 * When Port Down timer expires we will start returning
6926 * I/O's to OS with "DID_NO_CONNECT".
6927 *
6928 * Link Down Timeout != 0:
6929 *
6930 * The driver waits for the link to come up after link down
6931 * before returning I/Os to OS with "DID_NO_CONNECT".
6932 */
6933 if (le16_to_cpu(nv->link_down_timeout) == 0) {
6934 ha->loop_down_abort_time =
6935 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
6936 } else {
6937 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
6938 ha->loop_down_abort_time =
6939 (LOOP_DOWN_TIME - ha->link_down_timeout);
6940 }
6941
6942 /* Need enough time to try and get the port back. */
6943 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
6944 if (qlport_down_retry)
6945 ha->port_down_retry_count = qlport_down_retry;
6946
6947 /* Set login_retry_count */
6948 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
6949 if (ha->port_down_retry_count ==
6950 le16_to_cpu(nv->port_down_retry_count) &&
6951 ha->port_down_retry_count > 3)
6952 ha->login_retry_count = ha->port_down_retry_count;
6953 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
6954 ha->login_retry_count = ha->port_down_retry_count;
6955 if (ql2xloginretrycount)
6956 ha->login_retry_count = ql2xloginretrycount;
6957
4fdfefe5 6958 /* Enable ZIO. */
e315cd28 6959 if (!vha->flags.init_done) {
4fdfefe5
AV
6960 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
6961 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
6962 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6963 le16_to_cpu(icb->interrupt_delay_timer): 2;
6964 }
ad950360 6965 icb->firmware_options_2 &= cpu_to_le32(
4fdfefe5 6966 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
e315cd28 6967 vha->flags.process_response_queue = 0;
4fdfefe5 6968 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d 6969 ha->zio_mode = QLA_ZIO_MODE_6;
6970
7c3df132 6971 ql_log(ql_log_info, vha, 0x006f,
4fdfefe5
AV
6972 "ZIO mode %d enabled; timer delay (%d us).\n",
6973 ha->zio_mode, ha->zio_timer * 100);
6974
6975 icb->firmware_options_2 |= cpu_to_le32(
6976 (uint32_t)ha->zio_mode);
6977 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
e315cd28 6978 vha->flags.process_response_queue = 1;
4fdfefe5
AV
6979 }
6980
4e08df3f 6981 if (rval) {
7c3df132
SK
6982 ql_log(ql_log_warn, vha, 0x0070,
6983 "NVRAM configuration failed.\n");
4e08df3f
DM
6984 }
6985 return (rval);
0107109e
AV
6986}
6987
4243c115
SC
6988uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
6989{
6990 struct qla27xx_image_status pri_image_status, sec_image_status;
6991 uint8_t valid_pri_image, valid_sec_image;
6992 uint32_t *wptr;
6993 uint32_t cnt, chksum, size;
6994 struct qla_hw_data *ha = vha->hw;
6995
6996 valid_pri_image = valid_sec_image = 1;
6997 ha->active_image = 0;
6998 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
6999
7000 if (!ha->flt_region_img_status_pri) {
7001 valid_pri_image = 0;
7002 goto check_sec_image;
7003 }
7004
7005 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
7006 ha->flt_region_img_status_pri, size);
7007
7008 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
7009 ql_dbg(ql_dbg_init, vha, 0x018b,
7010 "Primary image signature (0x%x) not valid\n",
7011 pri_image_status.signature);
7012 valid_pri_image = 0;
7013 goto check_sec_image;
7014 }
7015
7016 wptr = (uint32_t *)(&pri_image_status);
7017 cnt = size;
7018
da08ef5c
JC
7019 for (chksum = 0; cnt--; wptr++)
7020 chksum += le32_to_cpu(*wptr);
41dc529a 7021
4243c115
SC
7022 if (chksum) {
7023 ql_dbg(ql_dbg_init, vha, 0x018c,
7024 "Checksum validation failed for primary image (0x%x)\n",
7025 chksum);
7026 valid_pri_image = 0;
7027 }
7028
7029check_sec_image:
7030 if (!ha->flt_region_img_status_sec) {
7031 valid_sec_image = 0;
7032 goto check_valid_image;
7033 }
7034
7035 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
7036 ha->flt_region_img_status_sec, size);
7037
7038 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
7039 ql_dbg(ql_dbg_init, vha, 0x018d,
7040 "Secondary image signature(0x%x) not valid\n",
7041 sec_image_status.signature);
7042 valid_sec_image = 0;
7043 goto check_valid_image;
7044 }
7045
7046 wptr = (uint32_t *)(&sec_image_status);
7047 cnt = size;
da08ef5c
JC
7048 for (chksum = 0; cnt--; wptr++)
7049 chksum += le32_to_cpu(*wptr);
4243c115
SC
7050 if (chksum) {
7051 ql_dbg(ql_dbg_init, vha, 0x018e,
7052 "Checksum validation failed for secondary image (0x%x)\n",
7053 chksum);
7054 valid_sec_image = 0;
7055 }
7056
7057check_valid_image:
7058 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
7059 ha->active_image = QLA27XX_PRIMARY_IMAGE;
7060 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
7061 if (!ha->active_image ||
7062 pri_image_status.generation_number <
7063 sec_image_status.generation_number)
7064 ha->active_image = QLA27XX_SECONDARY_IMAGE;
7065 }
7066
22ebde16 7067 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n",
4243c115
SC
7068 ha->active_image == 0 ? "default bootld and fw" :
7069 ha->active_image == 1 ? "primary" :
7070 ha->active_image == 2 ? "secondary" :
7071 "Invalid");
7072
7073 return ha->active_image;
7074}
7075
413975a0 7076static int
cbc8eb67
AV
7077qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
7078 uint32_t faddr)
d1c61909 7079{
73208dfd 7080 int rval = QLA_SUCCESS;
d1c61909 7081 int segments, fragment;
d1c61909
AV
7082 uint32_t *dcode, dlen;
7083 uint32_t risc_addr;
7084 uint32_t risc_size;
7085 uint32_t i;
e315cd28 7086 struct qla_hw_data *ha = vha->hw;
73208dfd 7087 struct req_que *req = ha->req_q_map[0];
eaac30be 7088
7c3df132 7089 ql_dbg(ql_dbg_init, vha, 0x008b,
cfb0919c 7090 "FW: Loading firmware from flash (%x).\n", faddr);
eaac30be 7091
d1c61909
AV
7092 rval = QLA_SUCCESS;
7093
7094 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 7095 dcode = (uint32_t *)req->ring;
d1c61909
AV
7096 *srisc_addr = 0;
7097
4243c115
SC
7098 if (IS_QLA27XX(ha) &&
7099 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
7100 faddr = ha->flt_region_fw_sec;
7101
d1c61909 7102 /* Validate firmware image by checking version. */
e315cd28 7103 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
d1c61909
AV
7104 for (i = 0; i < 4; i++)
7105 dcode[i] = be32_to_cpu(dcode[i]);
7106 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
7107 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
7108 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
7109 dcode[3] == 0)) {
7c3df132
SK
7110 ql_log(ql_log_fatal, vha, 0x008c,
7111 "Unable to verify the integrity of flash firmware "
7112 "image.\n");
7113 ql_log(ql_log_fatal, vha, 0x008d,
7114 "Firmware data: %08x %08x %08x %08x.\n",
7115 dcode[0], dcode[1], dcode[2], dcode[3]);
d1c61909
AV
7116
7117 return QLA_FUNCTION_FAILED;
7118 }
7119
7120 while (segments && rval == QLA_SUCCESS) {
7121 /* Read segment's load information. */
e315cd28 7122 qla24xx_read_flash_data(vha, dcode, faddr, 4);
d1c61909
AV
7123
7124 risc_addr = be32_to_cpu(dcode[2]);
7125 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
7126 risc_size = be32_to_cpu(dcode[3]);
7127
7128 fragment = 0;
7129 while (risc_size > 0 && rval == QLA_SUCCESS) {
7130 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
7131 if (dlen > risc_size)
7132 dlen = risc_size;
7133
7c3df132
SK
7134 ql_dbg(ql_dbg_init, vha, 0x008e,
7135 "Loading risc segment@ risc addr %x "
7136 "number of dwords 0x%x offset 0x%x.\n",
7137 risc_addr, dlen, faddr);
d1c61909 7138
e315cd28 7139 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
d1c61909
AV
7140 for (i = 0; i < dlen; i++)
7141 dcode[i] = swab32(dcode[i]);
7142
73208dfd 7143 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
d1c61909
AV
7144 dlen);
7145 if (rval) {
7c3df132
SK
7146 ql_log(ql_log_fatal, vha, 0x008f,
7147 "Failed to load segment %d of firmware.\n",
7148 fragment);
f261f7af 7149 return QLA_FUNCTION_FAILED;
d1c61909
AV
7150 }
7151
7152 faddr += dlen;
7153 risc_addr += dlen;
7154 risc_size -= dlen;
7155 fragment++;
7156 }
7157
7158 /* Next segment. */
7159 segments--;
7160 }
7161
f73cb695
CD
7162 if (!IS_QLA27XX(ha))
7163 return rval;
7164
7165 if (ha->fw_dump_template)
7166 vfree(ha->fw_dump_template);
7167 ha->fw_dump_template = NULL;
7168 ha->fw_dump_template_len = 0;
7169
7170 ql_dbg(ql_dbg_init, vha, 0x0161,
7171 "Loading fwdump template from %x\n", faddr);
7172 qla24xx_read_flash_data(vha, dcode, faddr, 7);
7173 risc_size = be32_to_cpu(dcode[2]);
7174 ql_dbg(ql_dbg_init, vha, 0x0162,
7175 "-> array size %x dwords\n", risc_size);
7176 if (risc_size == 0 || risc_size == ~0)
7177 goto default_template;
7178
7179 dlen = (risc_size - 8) * sizeof(*dcode);
7180 ql_dbg(ql_dbg_init, vha, 0x0163,
7181 "-> template allocating %x bytes...\n", dlen);
7182 ha->fw_dump_template = vmalloc(dlen);
7183 if (!ha->fw_dump_template) {
7184 ql_log(ql_log_warn, vha, 0x0164,
7185 "Failed fwdump template allocate %x bytes.\n", risc_size);
7186 goto default_template;
7187 }
7188
7189 faddr += 7;
7190 risc_size -= 8;
7191 dcode = ha->fw_dump_template;
7192 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
7193 for (i = 0; i < risc_size; i++)
7194 dcode[i] = le32_to_cpu(dcode[i]);
7195
7196 if (!qla27xx_fwdt_template_valid(dcode)) {
7197 ql_log(ql_log_warn, vha, 0x0165,
7198 "Failed fwdump template validate\n");
7199 goto default_template;
7200 }
7201
7202 dlen = qla27xx_fwdt_template_size(dcode);
7203 ql_dbg(ql_dbg_init, vha, 0x0166,
7204 "-> template size %x bytes\n", dlen);
7205 if (dlen > risc_size * sizeof(*dcode)) {
7206 ql_log(ql_log_warn, vha, 0x0167,
4fae52b5 7207 "Failed fwdump template exceeds array by %zx bytes\n",
383a298b 7208 (size_t)(dlen - risc_size * sizeof(*dcode)));
f73cb695
CD
7209 goto default_template;
7210 }
7211 ha->fw_dump_template_len = dlen;
7212 return rval;
7213
7214default_template:
7215 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
7216 if (ha->fw_dump_template)
7217 vfree(ha->fw_dump_template);
7218 ha->fw_dump_template = NULL;
7219 ha->fw_dump_template_len = 0;
7220
7221 dlen = qla27xx_fwdt_template_default_size();
7222 ql_dbg(ql_dbg_init, vha, 0x0169,
7223 "-> template allocating %x bytes...\n", dlen);
7224 ha->fw_dump_template = vmalloc(dlen);
7225 if (!ha->fw_dump_template) {
7226 ql_log(ql_log_warn, vha, 0x016a,
7227 "Failed fwdump template allocate %x bytes.\n", risc_size);
7228 goto failed_template;
7229 }
7230
7231 dcode = ha->fw_dump_template;
7232 risc_size = dlen / sizeof(*dcode);
7233 memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
7234 for (i = 0; i < risc_size; i++)
7235 dcode[i] = be32_to_cpu(dcode[i]);
7236
7237 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
7238 ql_log(ql_log_warn, vha, 0x016b,
7239 "Failed fwdump template validate\n");
7240 goto failed_template;
7241 }
7242
7243 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
7244 ql_dbg(ql_dbg_init, vha, 0x016c,
7245 "-> template size %x bytes\n", dlen);
7246 ha->fw_dump_template_len = dlen;
7247 return rval;
7248
7249failed_template:
7250 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
7251 if (ha->fw_dump_template)
7252 vfree(ha->fw_dump_template);
7253 ha->fw_dump_template = NULL;
7254 ha->fw_dump_template_len = 0;
d1c61909
AV
7255 return rval;
7256}
7257
e9454a88 7258#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
d1c61909 7259
0107109e 7260int
e315cd28 7261qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5433383e
AV
7262{
7263 int rval;
7264 int i, fragment;
7265 uint16_t *wcode, *fwcode;
7266 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
7267 struct fw_blob *blob;
e315cd28 7268 struct qla_hw_data *ha = vha->hw;
73208dfd 7269 struct req_que *req = ha->req_q_map[0];
5433383e
AV
7270
7271 /* Load firmware blob. */
e315cd28 7272 blob = qla2x00_request_firmware(vha);
5433383e 7273 if (!blob) {
7c3df132 7274 ql_log(ql_log_info, vha, 0x0083,
94bcf830 7275 "Firmware image unavailable.\n");
7c3df132
SK
7276 ql_log(ql_log_info, vha, 0x0084,
7277 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
5433383e
AV
7278 return QLA_FUNCTION_FAILED;
7279 }
7280
7281 rval = QLA_SUCCESS;
7282
73208dfd 7283 wcode = (uint16_t *)req->ring;
5433383e
AV
7284 *srisc_addr = 0;
7285 fwcode = (uint16_t *)blob->fw->data;
7286 fwclen = 0;
7287
7288 /* Validate firmware image by checking version. */
7289 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7c3df132 7290 ql_log(ql_log_fatal, vha, 0x0085,
5b5e0928 7291 "Unable to verify integrity of firmware image (%zd).\n",
5433383e
AV
7292 blob->fw->size);
7293 goto fail_fw_integrity;
7294 }
7295 for (i = 0; i < 4; i++)
7296 wcode[i] = be16_to_cpu(fwcode[i + 4]);
7297 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
7298 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
7299 wcode[2] == 0 && wcode[3] == 0)) {
7c3df132
SK
7300 ql_log(ql_log_fatal, vha, 0x0086,
7301 "Unable to verify integrity of firmware image.\n");
7302 ql_log(ql_log_fatal, vha, 0x0087,
7303 "Firmware data: %04x %04x %04x %04x.\n",
7304 wcode[0], wcode[1], wcode[2], wcode[3]);
5433383e
AV
7305 goto fail_fw_integrity;
7306 }
7307
7308 seg = blob->segs;
7309 while (*seg && rval == QLA_SUCCESS) {
7310 risc_addr = *seg;
7311 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
7312 risc_size = be16_to_cpu(fwcode[3]);
7313
7314 /* Validate firmware image size. */
7315 fwclen += risc_size * sizeof(uint16_t);
7316 if (blob->fw->size < fwclen) {
7c3df132 7317 ql_log(ql_log_fatal, vha, 0x0088,
5433383e 7318 "Unable to verify integrity of firmware image "
5b5e0928 7319 "(%zd).\n", blob->fw->size);
5433383e
AV
7320 goto fail_fw_integrity;
7321 }
7322
7323 fragment = 0;
7324 while (risc_size > 0 && rval == QLA_SUCCESS) {
7325 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
7326 if (wlen > risc_size)
7327 wlen = risc_size;
7c3df132
SK
7328 ql_dbg(ql_dbg_init, vha, 0x0089,
7329 "Loading risc segment@ risc addr %x number of "
7330 "words 0x%x.\n", risc_addr, wlen);
5433383e
AV
7331
7332 for (i = 0; i < wlen; i++)
7333 wcode[i] = swab16(fwcode[i]);
7334
73208dfd 7335 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5433383e
AV
7336 wlen);
7337 if (rval) {
7c3df132
SK
7338 ql_log(ql_log_fatal, vha, 0x008a,
7339 "Failed to load segment %d of firmware.\n",
7340 fragment);
5433383e
AV
7341 break;
7342 }
7343
7344 fwcode += wlen;
7345 risc_addr += wlen;
7346 risc_size -= wlen;
7347 fragment++;
7348 }
7349
7350 /* Next segment. */
7351 seg++;
7352 }
7353 return rval;
7354
7355fail_fw_integrity:
7356 return QLA_FUNCTION_FAILED;
7357}
7358
eaac30be
AV
7359static int
7360qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
0107109e
AV
7361{
7362 int rval;
7363 int segments, fragment;
7364 uint32_t *dcode, dlen;
7365 uint32_t risc_addr;
7366 uint32_t risc_size;
7367 uint32_t i;
5433383e 7368 struct fw_blob *blob;
f73cb695
CD
7369 const uint32_t *fwcode;
7370 uint32_t fwclen;
e315cd28 7371 struct qla_hw_data *ha = vha->hw;
73208dfd 7372 struct req_que *req = ha->req_q_map[0];
0107109e 7373
5433383e 7374 /* Load firmware blob. */
e315cd28 7375 blob = qla2x00_request_firmware(vha);
5433383e 7376 if (!blob) {
7c3df132 7377 ql_log(ql_log_warn, vha, 0x0090,
94bcf830 7378 "Firmware image unavailable.\n");
7c3df132
SK
7379 ql_log(ql_log_warn, vha, 0x0091,
7380 "Firmware images can be retrieved from: "
7381 QLA_FW_URL ".\n");
d1c61909 7382
eaac30be 7383 return QLA_FUNCTION_FAILED;
0107109e
AV
7384 }
7385
cfb0919c
CD
7386 ql_dbg(ql_dbg_init, vha, 0x0092,
7387 "FW: Loading via request-firmware.\n");
eaac30be 7388
0107109e
AV
7389 rval = QLA_SUCCESS;
7390
7391 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 7392 dcode = (uint32_t *)req->ring;
0107109e 7393 *srisc_addr = 0;
5433383e 7394 fwcode = (uint32_t *)blob->fw->data;
0107109e
AV
7395 fwclen = 0;
7396
7397 /* Validate firmware image by checking version. */
5433383e 7398 if (blob->fw->size < 8 * sizeof(uint32_t)) {
7c3df132 7399 ql_log(ql_log_fatal, vha, 0x0093,
5b5e0928 7400 "Unable to verify integrity of firmware image (%zd).\n",
5433383e 7401 blob->fw->size);
f73cb695 7402 return QLA_FUNCTION_FAILED;
0107109e
AV
7403 }
7404 for (i = 0; i < 4; i++)
7405 dcode[i] = be32_to_cpu(fwcode[i + 4]);
7406 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
7407 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
7408 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
7409 dcode[3] == 0)) {
7c3df132 7410 ql_log(ql_log_fatal, vha, 0x0094,
5b5e0928 7411 "Unable to verify integrity of firmware image (%zd).\n",
7c3df132
SK
7412 blob->fw->size);
7413 ql_log(ql_log_fatal, vha, 0x0095,
7414 "Firmware data: %08x %08x %08x %08x.\n",
7415 dcode[0], dcode[1], dcode[2], dcode[3]);
f73cb695 7416 return QLA_FUNCTION_FAILED;
0107109e
AV
7417 }
7418
7419 while (segments && rval == QLA_SUCCESS) {
7420 risc_addr = be32_to_cpu(fwcode[2]);
7421 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
7422 risc_size = be32_to_cpu(fwcode[3]);
7423
7424 /* Validate firmware image size. */
7425 fwclen += risc_size * sizeof(uint32_t);
5433383e 7426 if (blob->fw->size < fwclen) {
7c3df132 7427 ql_log(ql_log_fatal, vha, 0x0096,
5433383e 7428 "Unable to verify integrity of firmware image "
5b5e0928 7429 "(%zd).\n", blob->fw->size);
f73cb695 7430 return QLA_FUNCTION_FAILED;
0107109e
AV
7431 }
7432
7433 fragment = 0;
7434 while (risc_size > 0 && rval == QLA_SUCCESS) {
7435 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
7436 if (dlen > risc_size)
7437 dlen = risc_size;
7438
7c3df132
SK
7439 ql_dbg(ql_dbg_init, vha, 0x0097,
7440 "Loading risc segment@ risc addr %x "
7441 "number of dwords 0x%x.\n", risc_addr, dlen);
0107109e
AV
7442
7443 for (i = 0; i < dlen; i++)
7444 dcode[i] = swab32(fwcode[i]);
7445
73208dfd 7446 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
590f98e5 7447 dlen);
0107109e 7448 if (rval) {
7c3df132
SK
7449 ql_log(ql_log_fatal, vha, 0x0098,
7450 "Failed to load segment %d of firmware.\n",
7451 fragment);
f261f7af 7452 return QLA_FUNCTION_FAILED;
0107109e
AV
7453 }
7454
7455 fwcode += dlen;
7456 risc_addr += dlen;
7457 risc_size -= dlen;
7458 fragment++;
7459 }
7460
7461 /* Next segment. */
7462 segments--;
7463 }
f73cb695
CD
7464
7465 if (!IS_QLA27XX(ha))
7466 return rval;
7467
7468 if (ha->fw_dump_template)
7469 vfree(ha->fw_dump_template);
7470 ha->fw_dump_template = NULL;
7471 ha->fw_dump_template_len = 0;
7472
7473 ql_dbg(ql_dbg_init, vha, 0x171,
97ea702b
CD
7474 "Loading fwdump template from %x\n",
7475 (uint32_t)((void *)fwcode - (void *)blob->fw->data));
f73cb695
CD
7476 risc_size = be32_to_cpu(fwcode[2]);
7477 ql_dbg(ql_dbg_init, vha, 0x172,
7478 "-> array size %x dwords\n", risc_size);
7479 if (risc_size == 0 || risc_size == ~0)
7480 goto default_template;
7481
7482 dlen = (risc_size - 8) * sizeof(*fwcode);
7483 ql_dbg(ql_dbg_init, vha, 0x0173,
7484 "-> template allocating %x bytes...\n", dlen);
7485 ha->fw_dump_template = vmalloc(dlen);
7486 if (!ha->fw_dump_template) {
7487 ql_log(ql_log_warn, vha, 0x0174,
7488 "Failed fwdump template allocate %x bytes.\n", risc_size);
7489 goto default_template;
7490 }
7491
7492 fwcode += 7;
7493 risc_size -= 8;
7494 dcode = ha->fw_dump_template;
7495 for (i = 0; i < risc_size; i++)
7496 dcode[i] = le32_to_cpu(fwcode[i]);
7497
7498 if (!qla27xx_fwdt_template_valid(dcode)) {
7499 ql_log(ql_log_warn, vha, 0x0175,
7500 "Failed fwdump template validate\n");
7501 goto default_template;
7502 }
7503
7504 dlen = qla27xx_fwdt_template_size(dcode);
7505 ql_dbg(ql_dbg_init, vha, 0x0176,
7506 "-> template size %x bytes\n", dlen);
7507 if (dlen > risc_size * sizeof(*fwcode)) {
7508 ql_log(ql_log_warn, vha, 0x0177,
4fae52b5 7509 "Failed fwdump template exceeds array by %zx bytes\n",
383a298b 7510 (size_t)(dlen - risc_size * sizeof(*fwcode)));
f73cb695
CD
7511 goto default_template;
7512 }
7513 ha->fw_dump_template_len = dlen;
0107109e
AV
7514 return rval;
7515
f73cb695
CD
7516default_template:
7517 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
7518 if (ha->fw_dump_template)
7519 vfree(ha->fw_dump_template);
7520 ha->fw_dump_template = NULL;
7521 ha->fw_dump_template_len = 0;
7522
7523 dlen = qla27xx_fwdt_template_default_size();
7524 ql_dbg(ql_dbg_init, vha, 0x0179,
7525 "-> template allocating %x bytes...\n", dlen);
7526 ha->fw_dump_template = vmalloc(dlen);
7527 if (!ha->fw_dump_template) {
7528 ql_log(ql_log_warn, vha, 0x017a,
7529 "Failed fwdump template allocate %x bytes.\n", risc_size);
7530 goto failed_template;
7531 }
7532
7533 dcode = ha->fw_dump_template;
7534 risc_size = dlen / sizeof(*fwcode);
7535 fwcode = qla27xx_fwdt_template_default();
7536 for (i = 0; i < risc_size; i++)
7537 dcode[i] = be32_to_cpu(fwcode[i]);
7538
7539 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
7540 ql_log(ql_log_warn, vha, 0x017b,
7541 "Failed fwdump template validate\n");
7542 goto failed_template;
7543 }
7544
7545 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
7546 ql_dbg(ql_dbg_init, vha, 0x017c,
7547 "-> template size %x bytes\n", dlen);
7548 ha->fw_dump_template_len = dlen;
7549 return rval;
7550
7551failed_template:
7552 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
7553 if (ha->fw_dump_template)
7554 vfree(ha->fw_dump_template);
7555 ha->fw_dump_template = NULL;
7556 ha->fw_dump_template_len = 0;
7557 return rval;
0107109e 7558}
18c6c127 7559
eaac30be
AV
7560int
7561qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7562{
7563 int rval;
7564
e337d907
AV
7565 if (ql2xfwloadbin == 1)
7566 return qla81xx_load_risc(vha, srisc_addr);
7567
eaac30be
AV
7568 /*
7569 * FW Load priority:
7570 * 1) Firmware via request-firmware interface (.bin file).
7571 * 2) Firmware residing in flash.
7572 */
7573 rval = qla24xx_load_risc_blob(vha, srisc_addr);
7574 if (rval == QLA_SUCCESS)
7575 return rval;
7576
cbc8eb67
AV
7577 return qla24xx_load_risc_flash(vha, srisc_addr,
7578 vha->hw->flt_region_fw);
eaac30be
AV
7579}
7580
7581int
7582qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7583{
7584 int rval;
cbc8eb67 7585 struct qla_hw_data *ha = vha->hw;
eaac30be 7586
e337d907 7587 if (ql2xfwloadbin == 2)
cbc8eb67 7588 goto try_blob_fw;
e337d907 7589
eaac30be
AV
7590 /*
7591 * FW Load priority:
7592 * 1) Firmware residing in flash.
7593 * 2) Firmware via request-firmware interface (.bin file).
cbc8eb67 7594 * 3) Golden-Firmware residing in flash -- limited operation.
eaac30be 7595 */
cbc8eb67 7596 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
eaac30be
AV
7597 if (rval == QLA_SUCCESS)
7598 return rval;
7599
cbc8eb67
AV
7600try_blob_fw:
7601 rval = qla24xx_load_risc_blob(vha, srisc_addr);
7602 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
7603 return rval;
7604
7c3df132
SK
7605 ql_log(ql_log_info, vha, 0x0099,
7606 "Attempting to fallback to golden firmware.\n");
cbc8eb67
AV
7607 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
7608 if (rval != QLA_SUCCESS)
7609 return rval;
7610
7c3df132 7611 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
cbc8eb67 7612 ha->flags.running_gold_fw = 1;
cbc8eb67 7613 return rval;
eaac30be
AV
7614}
7615
18c6c127 7616void
e315cd28 7617qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
18c6c127
AV
7618{
7619 int ret, retries;
e315cd28 7620 struct qla_hw_data *ha = vha->hw;
18c6c127 7621
85880801
AV
7622 if (ha->flags.pci_channel_io_perm_failure)
7623 return;
e428924c 7624 if (!IS_FWI2_CAPABLE(ha))
18c6c127 7625 return;
75edf81d
AV
7626 if (!ha->fw_major_version)
7627 return;
ec7193e2
QT
7628 if (!ha->flags.fw_started)
7629 return;
18c6c127 7630
e315cd28 7631 ret = qla2x00_stop_firmware(vha);
7c7f1f29 7632 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
b469a7cb 7633 ret != QLA_INVALID_COMMAND && retries ; retries--) {
e315cd28
AC
7634 ha->isp_ops->reset_chip(vha);
7635 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
18c6c127 7636 continue;
e315cd28 7637 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
18c6c127 7638 continue;
7c3df132
SK
7639 ql_log(ql_log_info, vha, 0x8015,
7640 "Attempting retry of stop-firmware command.\n");
e315cd28 7641 ret = qla2x00_stop_firmware(vha);
18c6c127 7642 }
ec7193e2 7643
4b60c827 7644 QLA_FW_STOPPED(ha);
ec7193e2 7645 ha->flags.fw_init_done = 0;
18c6c127 7646}
2c3dfe3f
SJ
7647
7648int
e315cd28 7649qla24xx_configure_vhba(scsi_qla_host_t *vha)
2c3dfe3f
SJ
7650{
7651 int rval = QLA_SUCCESS;
0b91d116 7652 int rval2;
2c3dfe3f 7653 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28
AC
7654 struct qla_hw_data *ha = vha->hw;
7655 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
67c2e93a
AC
7656 struct req_que *req;
7657 struct rsp_que *rsp;
2c3dfe3f 7658
e315cd28 7659 if (!vha->vp_idx)
2c3dfe3f
SJ
7660 return -EINVAL;
7661
e315cd28 7662 rval = qla2x00_fw_ready(base_vha);
d7459527
MH
7663 if (vha->qpair)
7664 req = vha->qpair->req;
67c2e93a 7665 else
d7459527 7666 req = ha->req_q_map[0];
67c2e93a
AC
7667 rsp = req->rsp;
7668
2c3dfe3f 7669 if (rval == QLA_SUCCESS) {
e315cd28 7670 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
73208dfd 7671 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
2c3dfe3f
SJ
7672 }
7673
e315cd28 7674 vha->flags.management_server_logged_in = 0;
2c3dfe3f
SJ
7675
7676 /* Login to SNS first */
0b91d116
CD
7677 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
7678 BIT_1);
7679 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
7680 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
7681 ql_dbg(ql_dbg_init, vha, 0x0120,
7682 "Failed SNS login: loop_id=%x, rval2=%d\n",
7683 NPH_SNS, rval2);
7684 else
7685 ql_dbg(ql_dbg_init, vha, 0x0103,
7686 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
7687 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
7688 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
2c3dfe3f
SJ
7689 return (QLA_FUNCTION_FAILED);
7690 }
7691
e315cd28
AC
7692 atomic_set(&vha->loop_down_timer, 0);
7693 atomic_set(&vha->loop_state, LOOP_UP);
7694 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7695 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
7696 rval = qla2x00_loop_resync(base_vha);
2c3dfe3f
SJ
7697
7698 return rval;
7699}
4d4df193
HK
7700
7701/* 84XX Support **************************************************************/
7702
7703static LIST_HEAD(qla_cs84xx_list);
7704static DEFINE_MUTEX(qla_cs84xx_mutex);
7705
7706static struct qla_chip_state_84xx *
e315cd28 7707qla84xx_get_chip(struct scsi_qla_host *vha)
4d4df193
HK
7708{
7709 struct qla_chip_state_84xx *cs84xx;
e315cd28 7710 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
7711
7712 mutex_lock(&qla_cs84xx_mutex);
7713
7714 /* Find any shared 84xx chip. */
7715 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
7716 if (cs84xx->bus == ha->pdev->bus) {
7717 kref_get(&cs84xx->kref);
7718 goto done;
7719 }
7720 }
7721
7722 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
7723 if (!cs84xx)
7724 goto done;
7725
7726 kref_init(&cs84xx->kref);
7727 spin_lock_init(&cs84xx->access_lock);
7728 mutex_init(&cs84xx->fw_update_mutex);
7729 cs84xx->bus = ha->pdev->bus;
7730
7731 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
7732done:
7733 mutex_unlock(&qla_cs84xx_mutex);
7734 return cs84xx;
7735}
7736
7737static void
7738__qla84xx_chip_release(struct kref *kref)
7739{
7740 struct qla_chip_state_84xx *cs84xx =
7741 container_of(kref, struct qla_chip_state_84xx, kref);
7742
7743 mutex_lock(&qla_cs84xx_mutex);
7744 list_del(&cs84xx->list);
7745 mutex_unlock(&qla_cs84xx_mutex);
7746 kfree(cs84xx);
7747}
7748
7749void
e315cd28 7750qla84xx_put_chip(struct scsi_qla_host *vha)
4d4df193 7751{
e315cd28 7752 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
7753 if (ha->cs84xx)
7754 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
7755}
7756
7757static int
e315cd28 7758qla84xx_init_chip(scsi_qla_host_t *vha)
4d4df193
HK
7759{
7760 int rval;
7761 uint16_t status[2];
e315cd28 7762 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
7763
7764 mutex_lock(&ha->cs84xx->fw_update_mutex);
7765
e315cd28 7766 rval = qla84xx_verify_chip(vha, status);
4d4df193
HK
7767
7768 mutex_unlock(&ha->cs84xx->fw_update_mutex);
7769
7770 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
7771 QLA_SUCCESS;
7772}
3a03eb79
AV
7773
7774/* 81XX Support **************************************************************/
7775
7776int
7777qla81xx_nvram_config(scsi_qla_host_t *vha)
7778{
7779 int rval;
7780 struct init_cb_81xx *icb;
7781 struct nvram_81xx *nv;
7782 uint32_t *dptr;
7783 uint8_t *dptr1, *dptr2;
7784 uint32_t chksum;
7785 uint16_t cnt;
7786 struct qla_hw_data *ha = vha->hw;
7787
7788 rval = QLA_SUCCESS;
7789 icb = (struct init_cb_81xx *)ha->init_cb;
7790 nv = ha->nvram;
7791
7792 /* Determine NVRAM starting address. */
7793 ha->nvram_size = sizeof(struct nvram_81xx);
3a03eb79 7794 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7ec0effd
AD
7795 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
7796 ha->vpd_size = FA_VPD_SIZE_82XX;
3a03eb79
AV
7797
7798 /* Get VPD data into cache */
7799 ha->vpd = ha->nvram + VPD_OFFSET;
3d79038f
AV
7800 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
7801 ha->vpd_size);
3a03eb79
AV
7802
7803 /* Get NVRAM data into cache and calculate checksum. */
3d79038f 7804 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
3a03eb79 7805 ha->nvram_size);
3d79038f 7806 dptr = (uint32_t *)nv;
da08ef5c
JC
7807 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7808 chksum += le32_to_cpu(*dptr);
3a03eb79 7809
7c3df132
SK
7810 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
7811 "Contents of NVRAM:\n");
7812 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
7813 (uint8_t *)nv, ha->nvram_size);
3a03eb79
AV
7814
7815 /* Bad NVRAM data, set defaults parameters. */
7816 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
7817 || nv->id[3] != ' ' ||
ad950360 7818 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
3a03eb79 7819 /* Reset NVRAM data. */
7c3df132 7820 ql_log(ql_log_info, vha, 0x0073,
9e336520 7821 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132 7822 "version=0x%x.\n", chksum, nv->id[0],
3a03eb79 7823 le16_to_cpu(nv->nvram_version));
7c3df132
SK
7824 ql_log(ql_log_info, vha, 0x0074,
7825 "Falling back to functioning (yet invalid -- WWPN) "
7826 "defaults.\n");
3a03eb79
AV
7827
7828 /*
7829 * Set default initialization control block.
7830 */
7831 memset(nv, 0, ha->nvram_size);
ad950360
BVA
7832 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7833 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 7834 nv->frame_payload_size = 2048;
ad950360
BVA
7835 nv->execution_throttle = cpu_to_le16(0xFFFF);
7836 nv->exchange_count = cpu_to_le16(0);
3a03eb79 7837 nv->port_name[0] = 0x21;
f73cb695 7838 nv->port_name[1] = 0x00 + ha->port_no + 1;
3a03eb79
AV
7839 nv->port_name[2] = 0x00;
7840 nv->port_name[3] = 0xe0;
7841 nv->port_name[4] = 0x8b;
7842 nv->port_name[5] = 0x1c;
7843 nv->port_name[6] = 0x55;
7844 nv->port_name[7] = 0x86;
7845 nv->node_name[0] = 0x20;
7846 nv->node_name[1] = 0x00;
7847 nv->node_name[2] = 0x00;
7848 nv->node_name[3] = 0xe0;
7849 nv->node_name[4] = 0x8b;
7850 nv->node_name[5] = 0x1c;
7851 nv->node_name[6] = 0x55;
7852 nv->node_name[7] = 0x86;
ad950360
BVA
7853 nv->login_retry_count = cpu_to_le16(8);
7854 nv->interrupt_delay_timer = cpu_to_le16(0);
7855 nv->login_timeout = cpu_to_le16(0);
3a03eb79 7856 nv->firmware_options_1 =
ad950360
BVA
7857 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7858 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7859 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7860 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7861 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7862 nv->efi_parameters = cpu_to_le32(0);
3a03eb79 7863 nv->reset_delay = 5;
ad950360
BVA
7864 nv->max_luns_per_target = cpu_to_le16(128);
7865 nv->port_down_retry_count = cpu_to_le16(30);
7866 nv->link_down_timeout = cpu_to_le16(180);
eeebcc92 7867 nv->enode_mac[0] = 0x00;
6246b8a1
GM
7868 nv->enode_mac[1] = 0xC0;
7869 nv->enode_mac[2] = 0xDD;
3a03eb79
AV
7870 nv->enode_mac[3] = 0x04;
7871 nv->enode_mac[4] = 0x05;
f73cb695 7872 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
7873
7874 rval = 1;
7875 }
7876
9e522cd8
AE
7877 if (IS_T10_PI_CAPABLE(ha))
7878 nv->frame_payload_size &= ~7;
7879
aa230bc5
AE
7880 qlt_81xx_config_nvram_stage1(vha, nv);
7881
3a03eb79 7882 /* Reset Initialization control block */
773120e4 7883 memset(icb, 0, ha->init_cb_size);
3a03eb79
AV
7884
7885 /* Copy 1st segment. */
7886 dptr1 = (uint8_t *)icb;
7887 dptr2 = (uint8_t *)&nv->version;
7888 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7889 while (cnt--)
7890 *dptr1++ = *dptr2++;
7891
7892 icb->login_retry_count = nv->login_retry_count;
7893
7894 /* Copy 2nd segment. */
7895 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7896 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7897 cnt = (uint8_t *)&icb->reserved_5 -
7898 (uint8_t *)&icb->interrupt_delay_timer;
7899 while (cnt--)
7900 *dptr1++ = *dptr2++;
7901
7902 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
7903 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
7904 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
69e5f1ea
AV
7905 icb->enode_mac[0] = 0x00;
7906 icb->enode_mac[1] = 0xC0;
7907 icb->enode_mac[2] = 0xDD;
3a03eb79
AV
7908 icb->enode_mac[3] = 0x04;
7909 icb->enode_mac[4] = 0x05;
f73cb695 7910 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
7911 }
7912
b64b0e8f
AV
7913 /* Use extended-initialization control block. */
7914 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
7915
3a03eb79
AV
7916 /*
7917 * Setup driver NVRAM options.
7918 */
7919 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
a9083016 7920 "QLE8XXX");
3a03eb79 7921
aa230bc5
AE
7922 qlt_81xx_config_nvram_stage2(vha, icb);
7923
3a03eb79 7924 /* Use alternate WWN? */
ad950360 7925 if (nv->host_p & cpu_to_le32(BIT_15)) {
3a03eb79
AV
7926 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7927 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7928 }
7929
7930 /* Prepare nodename */
ad950360 7931 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
3a03eb79
AV
7932 /*
7933 * Firmware will apply the following mask if the nodename was
7934 * not provided.
7935 */
7936 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7937 icb->node_name[0] &= 0xF0;
7938 }
7939
7940 /* Set host adapter parameters. */
7941 ha->flags.disable_risc_code_load = 0;
7942 ha->flags.enable_lip_reset = 0;
7943 ha->flags.enable_lip_full_login =
7944 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
7945 ha->flags.enable_target_reset =
7946 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
7947 ha->flags.enable_led_scheme = 0;
7948 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
7949
7950 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7951 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7952
7953 /* save HBA serial number */
7954 ha->serial0 = icb->port_name[5];
7955 ha->serial1 = icb->port_name[6];
7956 ha->serial2 = icb->port_name[7];
7957 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7958 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7959
ad950360 7960 icb->execution_throttle = cpu_to_le16(0xFFFF);
3a03eb79
AV
7961
7962 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7963
7964 /* Set minimum login_timeout to 4 seconds. */
7965 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7966 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7967 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 7968 nv->login_timeout = cpu_to_le16(4);
3a03eb79 7969 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3a03eb79
AV
7970
7971 /* Set minimum RATOV to 100 tenths of a second. */
7972 ha->r_a_tov = 100;
7973
7974 ha->loop_reset_delay = nv->reset_delay;
7975
7976 /* Link Down Timeout = 0:
7977 *
7ec0effd 7978 * When Port Down timer expires we will start returning
3a03eb79
AV
7979 * I/O's to OS with "DID_NO_CONNECT".
7980 *
7981 * Link Down Timeout != 0:
7982 *
7983 * The driver waits for the link to come up after link down
7984 * before returning I/Os to OS with "DID_NO_CONNECT".
7985 */
7986 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7987 ha->loop_down_abort_time =
7988 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7989 } else {
7990 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7991 ha->loop_down_abort_time =
7992 (LOOP_DOWN_TIME - ha->link_down_timeout);
7993 }
7994
7995 /* Need enough time to try and get the port back. */
7996 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7997 if (qlport_down_retry)
7998 ha->port_down_retry_count = qlport_down_retry;
7999
8000 /* Set login_retry_count */
8001 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
8002 if (ha->port_down_retry_count ==
8003 le16_to_cpu(nv->port_down_retry_count) &&
8004 ha->port_down_retry_count > 3)
8005 ha->login_retry_count = ha->port_down_retry_count;
8006 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8007 ha->login_retry_count = ha->port_down_retry_count;
8008 if (ql2xloginretrycount)
8009 ha->login_retry_count = ql2xloginretrycount;
8010
6246b8a1 8011 /* if not running MSI-X we need handshaking on interrupts */
f73cb695 8012 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
ad950360 8013 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
6246b8a1 8014
3a03eb79
AV
8015 /* Enable ZIO. */
8016 if (!vha->flags.init_done) {
8017 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8018 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
8019 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
8020 le16_to_cpu(icb->interrupt_delay_timer): 2;
8021 }
ad950360 8022 icb->firmware_options_2 &= cpu_to_le32(
3a03eb79
AV
8023 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
8024 vha->flags.process_response_queue = 0;
8025 if (ha->zio_mode != QLA_ZIO_DISABLED) {
8026 ha->zio_mode = QLA_ZIO_MODE_6;
8027
7c3df132 8028 ql_log(ql_log_info, vha, 0x0075,
3a03eb79 8029 "ZIO mode %d enabled; timer delay (%d us).\n",
7c3df132
SK
8030 ha->zio_mode,
8031 ha->zio_timer * 100);
3a03eb79
AV
8032
8033 icb->firmware_options_2 |= cpu_to_le32(
8034 (uint32_t)ha->zio_mode);
8035 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
8036 vha->flags.process_response_queue = 1;
8037 }
8038
41dc529a 8039 /* enable RIDA Format2 */
48acad09 8040 icb->firmware_options_3 |= BIT_0;
41dc529a 8041
edd05de1
DG
8042 if (IS_QLA27XX(ha)) {
8043 icb->firmware_options_3 |= BIT_8;
8044 ql_dbg(ql_log_info, vha, 0x0075,
8045 "Enabling direct connection.\n");
8046 }
8047
3a03eb79 8048 if (rval) {
7c3df132
SK
8049 ql_log(ql_log_warn, vha, 0x0076,
8050 "NVRAM configuration failed.\n");
3a03eb79
AV
8051 }
8052 return (rval);
8053}
8054
a9083016
GM
8055int
8056qla82xx_restart_isp(scsi_qla_host_t *vha)
8057{
8058 int status, rval;
a9083016
GM
8059 struct qla_hw_data *ha = vha->hw;
8060 struct req_que *req = ha->req_q_map[0];
8061 struct rsp_que *rsp = ha->rsp_q_map[0];
8062 struct scsi_qla_host *vp;
feafb7b1 8063 unsigned long flags;
a9083016
GM
8064
8065 status = qla2x00_init_rings(vha);
8066 if (!status) {
8067 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8068 ha->flags.chip_reset_done = 1;
8069
8070 status = qla2x00_fw_ready(vha);
8071 if (!status) {
a9083016
GM
8072 /* Issue a marker after FW becomes ready. */
8073 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
a9083016 8074 vha->flags.online = 1;
7108b76e 8075 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
a9083016
GM
8076 }
8077
8078 /* if no cable then assume it's good */
8079 if ((vha->device_flags & DFLG_NO_CABLE))
8080 status = 0;
a9083016
GM
8081 }
8082
8083 if (!status) {
8084 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8085
8086 if (!atomic_read(&vha->loop_down_timer)) {
8087 /*
8088 * Issue marker command only when we are going
8089 * to start the I/O .
8090 */
8091 vha->marker_needed = 1;
8092 }
8093
a9083016
GM
8094 ha->isp_ops->enable_intrs(ha);
8095
8096 ha->isp_abort_cnt = 0;
8097 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
8098
53296788 8099 /* Update the firmware version */
3173167f 8100 status = qla82xx_check_md_needed(vha);
53296788 8101
a9083016
GM
8102 if (ha->fce) {
8103 ha->flags.fce_enabled = 1;
8104 memset(ha->fce, 0,
8105 fce_calc_size(ha->fce_bufs));
8106 rval = qla2x00_enable_fce_trace(vha,
8107 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
8108 &ha->fce_bufs);
8109 if (rval) {
cfb0919c 8110 ql_log(ql_log_warn, vha, 0x8001,
7c3df132
SK
8111 "Unable to reinitialize FCE (%d).\n",
8112 rval);
a9083016
GM
8113 ha->flags.fce_enabled = 0;
8114 }
8115 }
8116
8117 if (ha->eft) {
8118 memset(ha->eft, 0, EFT_SIZE);
8119 rval = qla2x00_enable_eft_trace(vha,
8120 ha->eft_dma, EFT_NUM_BUFFERS);
8121 if (rval) {
cfb0919c 8122 ql_log(ql_log_warn, vha, 0x8010,
7c3df132
SK
8123 "Unable to reinitialize EFT (%d).\n",
8124 rval);
a9083016
GM
8125 }
8126 }
a9083016
GM
8127 }
8128
8129 if (!status) {
cfb0919c 8130 ql_dbg(ql_dbg_taskm, vha, 0x8011,
7c3df132 8131 "qla82xx_restart_isp succeeded.\n");
feafb7b1
AE
8132
8133 spin_lock_irqsave(&ha->vport_slock, flags);
8134 list_for_each_entry(vp, &ha->vp_list, list) {
8135 if (vp->vp_idx) {
8136 atomic_inc(&vp->vref_count);
8137 spin_unlock_irqrestore(&ha->vport_slock, flags);
8138
a9083016 8139 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
8140
8141 spin_lock_irqsave(&ha->vport_slock, flags);
8142 atomic_dec(&vp->vref_count);
8143 }
a9083016 8144 }
feafb7b1
AE
8145 spin_unlock_irqrestore(&ha->vport_slock, flags);
8146
a9083016 8147 } else {
cfb0919c 8148 ql_log(ql_log_warn, vha, 0x8016,
7c3df132 8149 "qla82xx_restart_isp **** FAILED ****.\n");
a9083016
GM
8150 }
8151
8152 return status;
8153}
8154
3a03eb79 8155void
ae97c91e 8156qla81xx_update_fw_options(scsi_qla_host_t *vha)
3a03eb79 8157{
ae97c91e
AV
8158 struct qla_hw_data *ha = vha->hw;
8159
f198cafa
HM
8160 /* Hold status IOCBs until ABTS response received. */
8161 if (ql2xfwholdabts)
8162 ha->fw_options[3] |= BIT_12;
8163
088d09d4
GM
8164 /* Set Retry FLOGI in case of P2P connection */
8165 if (ha->operating_mode == P2P) {
8166 ha->fw_options[2] |= BIT_3;
8167 ql_dbg(ql_dbg_disc, vha, 0x2103,
8168 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
8169 __func__, ha->fw_options[2]);
8170 }
8171
41dc529a
QT
8172 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
8173 if (ql2xmvasynctoatio) {
8174 if (qla_tgt_mode_enabled(vha) ||
8175 qla_dual_mode_enabled(vha))
8176 ha->fw_options[2] |= BIT_11;
8177 else
8178 ha->fw_options[2] &= ~BIT_11;
8179 }
8180
f7e761f5 8181 if (qla_tgt_mode_enabled(vha) ||
2da52737
QT
8182 qla_dual_mode_enabled(vha)) {
8183 /* FW auto send SCSI status during */
8184 ha->fw_options[1] |= BIT_8;
8185 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
8186
8187 /* FW perform Exchange validation */
f7e761f5 8188 ha->fw_options[2] |= BIT_4;
2da52737
QT
8189 } else {
8190 ha->fw_options[1] &= ~BIT_8;
8191 ha->fw_options[10] &= 0x00ff;
8192
f7e761f5 8193 ha->fw_options[2] &= ~BIT_4;
2da52737 8194 }
f7e761f5 8195
41dc529a
QT
8196 if (ql2xetsenable) {
8197 /* Enable ETS Burst. */
8198 memset(ha->fw_options, 0, sizeof(ha->fw_options));
8199 ha->fw_options[2] |= BIT_9;
8200 }
8201
83548fe2
QT
8202 ql_dbg(ql_dbg_init, vha, 0x00e9,
8203 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
8204 __func__, ha->fw_options[1], ha->fw_options[2],
8205 ha->fw_options[3], vha->host->active_mode);
ae97c91e 8206
ae97c91e 8207 qla2x00_set_fw_options(vha, ha->fw_options);
3a03eb79 8208}
09ff701a
SR
8209
8210/*
8211 * qla24xx_get_fcp_prio
8212 * Gets the fcp cmd priority value for the logged in port.
8213 * Looks for a match of the port descriptors within
8214 * each of the fcp prio config entries. If a match is found,
8215 * the tag (priority) value is returned.
8216 *
8217 * Input:
21090cbe 8218 * vha = scsi host structure pointer.
09ff701a
SR
8219 * fcport = port structure pointer.
8220 *
8221 * Return:
6c452a45 8222 * non-zero (if found)
f28a0a96 8223 * -1 (if not found)
09ff701a
SR
8224 *
8225 * Context:
8226 * Kernel context
8227 */
f28a0a96 8228static int
09ff701a
SR
8229qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8230{
8231 int i, entries;
8232 uint8_t pid_match, wwn_match;
f28a0a96 8233 int priority;
09ff701a
SR
8234 uint32_t pid1, pid2;
8235 uint64_t wwn1, wwn2;
8236 struct qla_fcp_prio_entry *pri_entry;
8237 struct qla_hw_data *ha = vha->hw;
8238
8239 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
f28a0a96 8240 return -1;
09ff701a 8241
f28a0a96 8242 priority = -1;
09ff701a
SR
8243 entries = ha->fcp_prio_cfg->num_entries;
8244 pri_entry = &ha->fcp_prio_cfg->entry[0];
8245
8246 for (i = 0; i < entries; i++) {
8247 pid_match = wwn_match = 0;
8248
8249 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
8250 pri_entry++;
8251 continue;
8252 }
8253
8254 /* check source pid for a match */
8255 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
8256 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
8257 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
8258 if (pid1 == INVALID_PORT_ID)
8259 pid_match++;
8260 else if (pid1 == pid2)
8261 pid_match++;
8262 }
8263
8264 /* check destination pid for a match */
8265 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
8266 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
8267 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
8268 if (pid1 == INVALID_PORT_ID)
8269 pid_match++;
8270 else if (pid1 == pid2)
8271 pid_match++;
8272 }
8273
8274 /* check source WWN for a match */
8275 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
8276 wwn1 = wwn_to_u64(vha->port_name);
8277 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
8278 if (wwn2 == (uint64_t)-1)
8279 wwn_match++;
8280 else if (wwn1 == wwn2)
8281 wwn_match++;
8282 }
8283
8284 /* check destination WWN for a match */
8285 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
8286 wwn1 = wwn_to_u64(fcport->port_name);
8287 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
8288 if (wwn2 == (uint64_t)-1)
8289 wwn_match++;
8290 else if (wwn1 == wwn2)
8291 wwn_match++;
8292 }
8293
8294 if (pid_match == 2 || wwn_match == 2) {
8295 /* Found a matching entry */
8296 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
8297 priority = pri_entry->tag;
8298 break;
8299 }
8300
8301 pri_entry++;
8302 }
8303
8304 return priority;
8305}
8306
8307/*
8308 * qla24xx_update_fcport_fcp_prio
8309 * Activates fcp priority for the logged in fc port
8310 *
8311 * Input:
21090cbe 8312 * vha = scsi host structure pointer.
09ff701a
SR
8313 * fcp = port structure pointer.
8314 *
8315 * Return:
8316 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8317 *
8318 * Context:
8319 * Kernel context.
8320 */
8321int
21090cbe 8322qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
09ff701a
SR
8323{
8324 int ret;
f28a0a96 8325 int priority;
09ff701a
SR
8326 uint16_t mb[5];
8327
21090cbe
MI
8328 if (fcport->port_type != FCT_TARGET ||
8329 fcport->loop_id == FC_NO_LOOP_ID)
09ff701a
SR
8330 return QLA_FUNCTION_FAILED;
8331
21090cbe 8332 priority = qla24xx_get_fcp_prio(vha, fcport);
f28a0a96
AV
8333 if (priority < 0)
8334 return QLA_FUNCTION_FAILED;
8335
7ec0effd 8336 if (IS_P3P_TYPE(vha->hw)) {
a00f6296
SK
8337 fcport->fcp_prio = priority & 0xf;
8338 return QLA_SUCCESS;
8339 }
8340
21090cbe 8341 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
cfb0919c
CD
8342 if (ret == QLA_SUCCESS) {
8343 if (fcport->fcp_prio != priority)
8344 ql_dbg(ql_dbg_user, vha, 0x709e,
8345 "Updated FCP_CMND priority - value=%d loop_id=%d "
8346 "port_id=%02x%02x%02x.\n", priority,
8347 fcport->loop_id, fcport->d_id.b.domain,
8348 fcport->d_id.b.area, fcport->d_id.b.al_pa);
a00f6296 8349 fcport->fcp_prio = priority & 0xf;
cfb0919c 8350 } else
7c3df132 8351 ql_dbg(ql_dbg_user, vha, 0x704f,
cfb0919c
CD
8352 "Unable to update FCP_CMND priority - ret=0x%x for "
8353 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
8354 fcport->d_id.b.domain, fcport->d_id.b.area,
8355 fcport->d_id.b.al_pa);
09ff701a
SR
8356 return ret;
8357}
8358
8359/*
8360 * qla24xx_update_all_fcp_prio
8361 * Activates fcp priority for all the logged in ports
8362 *
8363 * Input:
8364 * ha = adapter block pointer.
8365 *
8366 * Return:
8367 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8368 *
8369 * Context:
8370 * Kernel context.
8371 */
8372int
8373qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
8374{
8375 int ret;
8376 fc_port_t *fcport;
8377
8378 ret = QLA_FUNCTION_FAILED;
8379 /* We need to set priority for all logged in ports */
8380 list_for_each_entry(fcport, &vha->vp_fcports, list)
8381 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
8382
8383 return ret;
8384}
d7459527 8385
82de802a
QT
8386struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
8387 int vp_idx, bool startqp)
d7459527
MH
8388{
8389 int rsp_id = 0;
8390 int req_id = 0;
8391 int i;
8392 struct qla_hw_data *ha = vha->hw;
8393 uint16_t qpair_id = 0;
8394 struct qla_qpair *qpair = NULL;
8395 struct qla_msix_entry *msix;
8396
8397 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
8398 ql_log(ql_log_warn, vha, 0x00181,
8399 "FW/Driver is not multi-queue capable.\n");
8400 return NULL;
8401 }
8402
c38d1baf 8403 if (ql2xmqsupport || ql2xnvmeenable) {
d7459527
MH
8404 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
8405 if (qpair == NULL) {
8406 ql_log(ql_log_warn, vha, 0x0182,
8407 "Failed to allocate memory for queue pair.\n");
8408 return NULL;
8409 }
8410 memset(qpair, 0, sizeof(struct qla_qpair));
8411
8412 qpair->hw = vha->hw;
25ff6af1 8413 qpair->vha = vha;
82de802a
QT
8414 qpair->qp_lock_ptr = &qpair->qp_lock;
8415 spin_lock_init(&qpair->qp_lock);
af7bb382 8416 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
d7459527
MH
8417
8418 /* Assign available que pair id */
8419 mutex_lock(&ha->mq_lock);
8420 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
b95b9452 8421 if (ha->num_qpairs >= ha->max_qpairs) {
d7459527
MH
8422 mutex_unlock(&ha->mq_lock);
8423 ql_log(ql_log_warn, vha, 0x0183,
8424 "No resources to create additional q pair.\n");
8425 goto fail_qid_map;
8426 }
b95b9452 8427 ha->num_qpairs++;
d7459527
MH
8428 set_bit(qpair_id, ha->qpair_qid_map);
8429 ha->queue_pair_map[qpair_id] = qpair;
8430 qpair->id = qpair_id;
8431 qpair->vp_idx = vp_idx;
e6373f33 8432 qpair->fw_started = ha->flags.fw_started;
e326d22a 8433 INIT_LIST_HEAD(&qpair->hints_list);
7c3f8fd1
QT
8434 qpair->chip_reset = ha->base_qpair->chip_reset;
8435 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
8436 qpair->enable_explicit_conf =
8437 ha->base_qpair->enable_explicit_conf;
d7459527
MH
8438
8439 for (i = 0; i < ha->msix_count; i++) {
093df737 8440 msix = &ha->msix_entries[i];
d7459527
MH
8441 if (msix->in_use)
8442 continue;
8443 qpair->msix = msix;
83548fe2 8444 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
d7459527
MH
8445 "Vector %x selected for qpair\n", msix->vector);
8446 break;
8447 }
8448 if (!qpair->msix) {
8449 ql_log(ql_log_warn, vha, 0x0184,
8450 "Out of MSI-X vectors!.\n");
8451 goto fail_msix;
8452 }
8453
8454 qpair->msix->in_use = 1;
8455 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
8abfa9e2
QT
8456 qpair->pdev = ha->pdev;
8457 if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
8458 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
d7459527
MH
8459
8460 mutex_unlock(&ha->mq_lock);
8461
8462 /* Create response queue first */
82de802a 8463 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
d7459527
MH
8464 if (!rsp_id) {
8465 ql_log(ql_log_warn, vha, 0x0185,
8466 "Failed to create response queue.\n");
8467 goto fail_rsp;
8468 }
8469
8470 qpair->rsp = ha->rsp_q_map[rsp_id];
8471
8472 /* Create request queue */
82de802a
QT
8473 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
8474 startqp);
d7459527
MH
8475 if (!req_id) {
8476 ql_log(ql_log_warn, vha, 0x0186,
8477 "Failed to create request queue.\n");
8478 goto fail_req;
8479 }
8480
8481 qpair->req = ha->req_q_map[req_id];
8482 qpair->rsp->req = qpair->req;
82de802a 8483 qpair->rsp->qpair = qpair;
e326d22a
QT
8484 /* init qpair to this cpu. Will adjust at run time. */
8485 qla_cpu_update(qpair, smp_processor_id());
d7459527
MH
8486
8487 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
8488 if (ha->fw_attributes & BIT_4)
8489 qpair->difdix_supported = 1;
8490 }
8491
8492 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
8493 if (!qpair->srb_mempool) {
83548fe2 8494 ql_log(ql_log_warn, vha, 0xd036,
d7459527
MH
8495 "Failed to create srb mempool for qpair %d\n",
8496 qpair->id);
8497 goto fail_mempool;
8498 }
8499
8500 /* Mark as online */
8501 qpair->online = 1;
8502
8503 if (!vha->flags.qpairs_available)
8504 vha->flags.qpairs_available = 1;
8505
8506 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
8507 "Request/Response queue pair created, id %d\n",
8508 qpair->id);
8509 ql_dbg(ql_dbg_init, vha, 0x0187,
8510 "Request/Response queue pair created, id %d\n",
8511 qpair->id);
8512 }
8513 return qpair;
8514
8515fail_mempool:
8516fail_req:
8517 qla25xx_delete_rsp_que(vha, qpair->rsp);
8518fail_rsp:
8519 mutex_lock(&ha->mq_lock);
8520 qpair->msix->in_use = 0;
8521 list_del(&qpair->qp_list_elem);
8522 if (list_empty(&vha->qp_list))
8523 vha->flags.qpairs_available = 0;
8524fail_msix:
8525 ha->queue_pair_map[qpair_id] = NULL;
8526 clear_bit(qpair_id, ha->qpair_qid_map);
b95b9452 8527 ha->num_qpairs--;
d7459527
MH
8528 mutex_unlock(&ha->mq_lock);
8529fail_qid_map:
8530 kfree(qpair);
8531 return NULL;
8532}
8533
8534int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
8535{
d65237c7 8536 int ret = QLA_FUNCTION_FAILED;
d7459527
MH
8537 struct qla_hw_data *ha = qpair->hw;
8538
8539 qpair->delete_in_progress = 1;
8540 while (atomic_read(&qpair->ref_count))
8541 msleep(500);
8542
8543 ret = qla25xx_delete_req_que(vha, qpair->req);
8544 if (ret != QLA_SUCCESS)
8545 goto fail;
7867b98d 8546
d7459527
MH
8547 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
8548 if (ret != QLA_SUCCESS)
8549 goto fail;
8550
8551 mutex_lock(&ha->mq_lock);
8552 ha->queue_pair_map[qpair->id] = NULL;
8553 clear_bit(qpair->id, ha->qpair_qid_map);
b95b9452 8554 ha->num_qpairs--;
d7459527 8555 list_del(&qpair->qp_list_elem);
d65237c7 8556 if (list_empty(&vha->qp_list)) {
d7459527 8557 vha->flags.qpairs_available = 0;
d65237c7
SC
8558 vha->flags.qpairs_req_created = 0;
8559 vha->flags.qpairs_rsp_created = 0;
8560 }
d7459527
MH
8561 mempool_destroy(qpair->srb_mempool);
8562 kfree(qpair);
8563 mutex_unlock(&ha->mq_lock);
8564
8565 return QLA_SUCCESS;
8566fail:
8567 return ret;
8568}