scsi: qla2xxx: move fields from qla_hw_data to qla_qpair
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_init.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
73208dfd 8#include "qla_gbl.h"
1da177e4
LT
9
10#include <linux/delay.h>
5a0e3ad6 11#include <linux/slab.h>
0107109e 12#include <linux/vmalloc.h>
1da177e4
LT
13
14#include "qla_devtbl.h"
15
4e08df3f
DM
16#ifdef CONFIG_SPARC
17#include <asm/prom.h>
4e08df3f
DM
18#endif
19
2d70c103
NB
20#include <target/target_core_base.h>
21#include "qla_target.h"
22
1da177e4
LT
23/*
24* QLogic ISP2x00 Hardware Support Function Prototypes.
25*/
1da177e4 26static int qla2x00_isp_firmware(scsi_qla_host_t *);
1da177e4 27static int qla2x00_setup_chip(scsi_qla_host_t *);
1da177e4
LT
28static int qla2x00_fw_ready(scsi_qla_host_t *);
29static int qla2x00_configure_hba(scsi_qla_host_t *);
1da177e4
LT
30static int qla2x00_configure_loop(scsi_qla_host_t *);
31static int qla2x00_configure_local_loop(scsi_qla_host_t *);
1da177e4 32static int qla2x00_configure_fabric(scsi_qla_host_t *);
726b8548 33static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
1da177e4 34static int qla2x00_restart_isp(scsi_qla_host_t *);
1da177e4 35
4d4df193
HK
36static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37static int qla84xx_init_chip(scsi_qla_host_t *);
73208dfd 38static int qla25xx_init_queues(struct qla_hw_data *);
726b8548
QT
39static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
40static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
41 struct event_arg *);
4d4df193 42
ac280b67
AV
43/* SRB Extensions ---------------------------------------------------------- */
44
9ba56b95
GM
45void
46qla2x00_sp_timeout(unsigned long __data)
ac280b67
AV
47{
48 srb_t *sp = (srb_t *)__data;
4916392b 49 struct srb_iocb *iocb;
25ff6af1 50 scsi_qla_host_t *vha = sp->vha;
ac280b67
AV
51 struct req_que *req;
52 unsigned long flags;
53
25ff6af1
JC
54 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
55 req = vha->hw->req_q_map[0];
ac280b67 56 req->outstanding_cmds[sp->handle] = NULL;
9ba56b95 57 iocb = &sp->u.iocb_cmd;
4916392b 58 iocb->timeout(sp);
25ff6af1
JC
59 sp->free(sp);
60 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
ac280b67
AV
61}
62
9ba56b95 63void
25ff6af1 64qla2x00_sp_free(void *ptr)
ac280b67 65{
25ff6af1 66 srb_t *sp = ptr;
9ba56b95 67 struct srb_iocb *iocb = &sp->u.iocb_cmd;
ac280b67 68
4d97cc53 69 del_timer(&iocb->timer);
25ff6af1 70 qla2x00_rel_sp(sp);
ac280b67
AV
71}
72
ac280b67
AV
73/* Asynchronous Login/Logout Routines -------------------------------------- */
74
a9b6f722 75unsigned long
5b91490e
AV
76qla2x00_get_async_timeout(struct scsi_qla_host *vha)
77{
78 unsigned long tmo;
79 struct qla_hw_data *ha = vha->hw;
80
81 /* Firmware should use switch negotiated r_a_tov for timeout. */
82 tmo = ha->r_a_tov / 10 * 2;
8ae6d9c7
GM
83 if (IS_QLAFX00(ha)) {
84 tmo = FX00_DEF_RATOV * 2;
85 } else if (!IS_FWI2_CAPABLE(ha)) {
5b91490e
AV
86 /*
87 * Except for earlier ISPs where the timeout is seeded from the
88 * initialization control block.
89 */
90 tmo = ha->login_timeout;
91 }
92 return tmo;
93}
ac280b67 94
726b8548 95void
9ba56b95 96qla2x00_async_iocb_timeout(void *data)
ac280b67 97{
25ff6af1 98 srb_t *sp = data;
ac280b67 99 fc_port_t *fcport = sp->fcport;
726b8548
QT
100 struct srb_iocb *lio = &sp->u.iocb_cmd;
101 struct event_arg ea;
ac280b67 102
7c3df132 103 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
726b8548
QT
104 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
105 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
ac280b67 106
5ff1d584 107 fcport->flags &= ~FCF_ASYNC_SENT;
726b8548
QT
108
109 switch (sp->type) {
110 case SRB_LOGIN_CMD:
6ac52608
AV
111 /* Retry as needed. */
112 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
113 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
114 QLA_LOGIO_LOGIN_RETRIED : 0;
726b8548
QT
115 memset(&ea, 0, sizeof(ea));
116 ea.event = FCME_PLOGI_DONE;
117 ea.fcport = sp->fcport;
118 ea.data[0] = lio->u.logio.data[0];
119 ea.data[1] = lio->u.logio.data[1];
120 ea.sp = sp;
121 qla24xx_handle_plogi_done_event(fcport->vha, &ea);
122 break;
123 case SRB_LOGOUT_CMD:
a6ca8878 124 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
726b8548
QT
125 break;
126 case SRB_CT_PTHRU_CMD:
127 case SRB_MB_IOCB:
128 case SRB_NACK_PLOGI:
129 case SRB_NACK_PRLI:
130 case SRB_NACK_LOGO:
25ff6af1 131 sp->done(sp, QLA_FUNCTION_TIMEOUT);
726b8548 132 break;
6ac52608 133 }
ac280b67
AV
134}
135
99b0bec7 136static void
25ff6af1 137qla2x00_async_login_sp_done(void *ptr, int res)
99b0bec7 138{
25ff6af1
JC
139 srb_t *sp = ptr;
140 struct scsi_qla_host *vha = sp->vha;
9ba56b95 141 struct srb_iocb *lio = &sp->u.iocb_cmd;
726b8548 142 struct event_arg ea;
9ba56b95 143
83548fe2 144 ql_dbg(ql_dbg_disc, vha, 0x20dd,
25ff6af1 145 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
726b8548
QT
146
147 sp->fcport->flags &= ~FCF_ASYNC_SENT;
148 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
149 memset(&ea, 0, sizeof(ea));
150 ea.event = FCME_PLOGI_DONE;
151 ea.fcport = sp->fcport;
152 ea.data[0] = lio->u.logio.data[0];
153 ea.data[1] = lio->u.logio.data[1];
154 ea.iop[0] = lio->u.logio.iop[0];
155 ea.iop[1] = lio->u.logio.iop[1];
156 ea.sp = sp;
157 qla2x00_fcport_event_handler(vha, &ea);
158 }
9ba56b95 159
25ff6af1 160 sp->free(sp);
99b0bec7
AV
161}
162
ac280b67
AV
163int
164qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
165 uint16_t *data)
166{
ac280b67 167 srb_t *sp;
4916392b 168 struct srb_iocb *lio;
726b8548
QT
169 int rval = QLA_FUNCTION_FAILED;
170
171 if (!vha->flags.online)
172 goto done;
173
174 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
175 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
176 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
177 goto done;
ac280b67 178
9ba56b95 179 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
180 if (!sp)
181 goto done;
182
726b8548
QT
183 fcport->flags |= FCF_ASYNC_SENT;
184 fcport->logout_completed = 0;
185
9ba56b95
GM
186 sp->type = SRB_LOGIN_CMD;
187 sp->name = "login";
188 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
189
190 lio = &sp->u.iocb_cmd;
3822263e 191 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 192 sp->done = qla2x00_async_login_sp_done;
4916392b 193 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
ac280b67 194 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 195 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
ac280b67 196 rval = qla2x00_start_sp(sp);
080c9517
CD
197 if (rval != QLA_SUCCESS) {
198 fcport->flags &= ~FCF_ASYNC_SENT;
199 fcport->flags |= FCF_LOGIN_NEEDED;
200 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
ac280b67 201 goto done_free_sp;
080c9517 202 }
ac280b67 203
7c3df132 204 ql_dbg(ql_dbg_disc, vha, 0x2072,
726b8548
QT
205 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
206 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
cfb0919c
CD
207 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
208 fcport->login_retry);
ac280b67
AV
209 return rval;
210
211done_free_sp:
25ff6af1 212 sp->free(sp);
ac280b67 213done:
726b8548 214 fcport->flags &= ~FCF_ASYNC_SENT;
ac280b67
AV
215 return rval;
216}
217
99b0bec7 218static void
25ff6af1 219qla2x00_async_logout_sp_done(void *ptr, int res)
99b0bec7 220{
25ff6af1 221 srb_t *sp = ptr;
9ba56b95 222 struct srb_iocb *lio = &sp->u.iocb_cmd;
9ba56b95 223
726b8548 224 sp->fcport->flags &= ~FCF_ASYNC_SENT;
25ff6af1
JC
225 if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
226 qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
9ba56b95 227 lio->u.logio.data);
25ff6af1 228 sp->free(sp);
99b0bec7
AV
229}
230
ac280b67
AV
231int
232qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
233{
ac280b67 234 srb_t *sp;
4916392b 235 struct srb_iocb *lio;
ac280b67
AV
236 int rval;
237
238 rval = QLA_FUNCTION_FAILED;
726b8548 239 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 240 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
ac280b67
AV
241 if (!sp)
242 goto done;
243
9ba56b95
GM
244 sp->type = SRB_LOGOUT_CMD;
245 sp->name = "logout";
246 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
247
248 lio = &sp->u.iocb_cmd;
3822263e 249 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 250 sp->done = qla2x00_async_logout_sp_done;
ac280b67
AV
251 rval = qla2x00_start_sp(sp);
252 if (rval != QLA_SUCCESS)
253 goto done_free_sp;
254
7c3df132 255 ql_dbg(ql_dbg_disc, vha, 0x2070,
726b8548 256 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
cfb0919c 257 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
726b8548
QT
258 fcport->d_id.b.area, fcport->d_id.b.al_pa,
259 fcport->port_name);
ac280b67
AV
260 return rval;
261
262done_free_sp:
25ff6af1 263 sp->free(sp);
ac280b67 264done:
726b8548 265 fcport->flags &= ~FCF_ASYNC_SENT;
ac280b67
AV
266 return rval;
267}
268
5ff1d584 269static void
25ff6af1 270qla2x00_async_adisc_sp_done(void *ptr, int res)
5ff1d584 271{
25ff6af1
JC
272 srb_t *sp = ptr;
273 struct scsi_qla_host *vha = sp->vha;
9ba56b95 274 struct srb_iocb *lio = &sp->u.iocb_cmd;
9ba56b95
GM
275
276 if (!test_bit(UNLOADING, &vha->dpc_flags))
25ff6af1 277 qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
9ba56b95 278 lio->u.logio.data);
25ff6af1 279 sp->free(sp);
5ff1d584
AV
280}
281
282int
283qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
284 uint16_t *data)
285{
5ff1d584 286 srb_t *sp;
4916392b 287 struct srb_iocb *lio;
5ff1d584
AV
288 int rval;
289
290 rval = QLA_FUNCTION_FAILED;
726b8548 291 fcport->flags |= FCF_ASYNC_SENT;
9ba56b95 292 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
5ff1d584
AV
293 if (!sp)
294 goto done;
295
9ba56b95
GM
296 sp->type = SRB_ADISC_CMD;
297 sp->name = "adisc";
298 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
299
300 lio = &sp->u.iocb_cmd;
3822263e 301 lio->timeout = qla2x00_async_iocb_timeout;
9ba56b95 302 sp->done = qla2x00_async_adisc_sp_done;
5ff1d584 303 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
4916392b 304 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
5ff1d584
AV
305 rval = qla2x00_start_sp(sp);
306 if (rval != QLA_SUCCESS)
307 goto done_free_sp;
308
7c3df132 309 ql_dbg(ql_dbg_disc, vha, 0x206f,
cfb0919c
CD
310 "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
311 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
312 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5ff1d584
AV
313 return rval;
314
315done_free_sp:
25ff6af1 316 sp->free(sp);
5ff1d584 317done:
726b8548
QT
318 fcport->flags &= ~FCF_ASYNC_SENT;
319 return rval;
320}
321
322static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
323 struct event_arg *ea)
324{
325 fc_port_t *fcport, *conflict_fcport;
326 struct get_name_list_extended *e;
327 u16 i, n, found = 0, loop_id;
328 port_id_t id;
329 u64 wwn;
330 u8 opt = 0;
331
332 fcport = ea->fcport;
333
334 if (ea->rc) { /* rval */
335 if (fcport->login_retry == 0) {
336 fcport->login_retry = vha->hw->login_retry_count;
83548fe2
QT
337 ql_dbg(ql_dbg_disc, vha, 0x20de,
338 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
339 fcport->port_name, fcport->login_retry);
726b8548
QT
340 }
341 return;
342 }
343
344 if (fcport->last_rscn_gen != fcport->rscn_gen) {
83548fe2 345 ql_dbg(ql_dbg_disc, vha, 0x20df,
726b8548
QT
346 "%s %8phC rscn gen changed rscn %d|%d \n",
347 __func__, fcport->port_name,
348 fcport->last_rscn_gen, fcport->rscn_gen);
349 qla24xx_post_gidpn_work(vha, fcport);
350 return;
351 } else if (fcport->last_login_gen != fcport->login_gen) {
83548fe2
QT
352 ql_dbg(ql_dbg_disc, vha, 0x20e0,
353 "%s %8phC login gen changed login %d|%d\n",
354 __func__, fcport->port_name,
355 fcport->last_login_gen, fcport->login_gen);
726b8548
QT
356 return;
357 }
358
359 n = ea->data[0] / sizeof(struct get_name_list_extended);
360
83548fe2 361 ql_dbg(ql_dbg_disc, vha, 0x20e1,
726b8548
QT
362 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
363 __func__, __LINE__, fcport->port_name, n,
364 fcport->d_id.b.domain, fcport->d_id.b.area,
365 fcport->d_id.b.al_pa, fcport->loop_id);
366
367 for (i = 0; i < n; i++) {
368 e = &vha->gnl.l[i];
369 wwn = wwn_to_u64(e->port_name);
370
371 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
372 continue;
373
374 found = 1;
375 id.b.domain = e->port_id[2];
376 id.b.area = e->port_id[1];
377 id.b.al_pa = e->port_id[0];
378 id.b.rsvd_1 = 0;
379
380 loop_id = le16_to_cpu(e->nport_handle);
381 loop_id = (loop_id & 0x7fff);
382
83548fe2
QT
383 ql_dbg(ql_dbg_disc, vha, 0x20e2,
384 "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
385 __func__, fcport->port_name,
386 e->current_login_state, fcport->fw_login_state,
387 id.b.domain, id.b.area, id.b.al_pa,
388 fcport->d_id.b.domain, fcport->d_id.b.area,
389 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
726b8548
QT
390
391 if ((id.b24 != fcport->d_id.b24) ||
392 ((fcport->loop_id != FC_NO_LOOP_ID) &&
393 (fcport->loop_id != loop_id))) {
83548fe2
QT
394 ql_dbg(ql_dbg_disc, vha, 0x20e3,
395 "%s %d %8phC post del sess\n",
396 __func__, __LINE__, fcport->port_name);
726b8548
QT
397 qlt_schedule_sess_for_deletion(fcport, 1);
398 return;
399 }
400
401 fcport->loop_id = loop_id;
402
403 wwn = wwn_to_u64(fcport->port_name);
404 qlt_find_sess_invalidate_other(vha, wwn,
405 id, loop_id, &conflict_fcport);
406
407 if (conflict_fcport) {
408 /*
409 * Another share fcport share the same loop_id &
410 * nport id. Conflict fcport needs to finish
411 * cleanup before this fcport can proceed to login.
412 */
413 conflict_fcport->conflict = fcport;
414 fcport->login_pause = 1;
415 }
416
417 switch (e->current_login_state) {
418 case DSC_LS_PRLI_COMP:
83548fe2
QT
419 ql_dbg(ql_dbg_disc, vha, 0x20e4,
420 "%s %d %8phC post gpdb\n",
421 __func__, __LINE__, fcport->port_name);
726b8548
QT
422 opt = PDO_FORCE_ADISC;
423 qla24xx_post_gpdb_work(vha, fcport, opt);
424 break;
425
426 case DSC_LS_PORT_UNAVAIL:
427 default:
428 if (fcport->loop_id == FC_NO_LOOP_ID) {
429 qla2x00_find_new_loop_id(vha, fcport);
430 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
431 }
83548fe2
QT
432 ql_dbg(ql_dbg_disc, vha, 0x20e5,
433 "%s %d %8phC\n",
434 __func__, __LINE__, fcport->port_name);
726b8548
QT
435 qla24xx_fcport_handle_login(vha, fcport);
436 break;
437 }
438 }
439
440 if (!found) {
441 /* fw has no record of this port */
442 if (fcport->loop_id == FC_NO_LOOP_ID) {
443 qla2x00_find_new_loop_id(vha, fcport);
444 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
445 } else {
446 for (i = 0; i < n; i++) {
447 e = &vha->gnl.l[i];
448 id.b.domain = e->port_id[0];
449 id.b.area = e->port_id[1];
450 id.b.al_pa = e->port_id[2];
451 id.b.rsvd_1 = 0;
452 loop_id = le16_to_cpu(e->nport_handle);
453
454 if (fcport->d_id.b24 == id.b24) {
455 conflict_fcport =
456 qla2x00_find_fcport_by_wwpn(vha,
457 e->port_name, 0);
458
83548fe2 459 ql_dbg(ql_dbg_disc, vha, 0x20e6,
726b8548
QT
460 "%s %d %8phC post del sess\n",
461 __func__, __LINE__,
462 conflict_fcport->port_name);
463 qlt_schedule_sess_for_deletion
464 (conflict_fcport, 1);
465 }
466
467 if (fcport->loop_id == loop_id) {
468 /* FW already picked this loop id for another fcport */
469 qla2x00_find_new_loop_id(vha, fcport);
470 }
471 }
472 }
473 qla24xx_fcport_handle_login(vha, fcport);
474 }
475} /* gnl_event */
476
477static void
25ff6af1 478qla24xx_async_gnl_sp_done(void *s, int res)
726b8548 479{
25ff6af1
JC
480 struct srb *sp = s;
481 struct scsi_qla_host *vha = sp->vha;
726b8548
QT
482 unsigned long flags;
483 struct fc_port *fcport = NULL, *tf;
484 u16 i, n = 0, loop_id;
485 struct event_arg ea;
486 struct get_name_list_extended *e;
487 u64 wwn;
488 struct list_head h;
489
83548fe2 490 ql_dbg(ql_dbg_disc, vha, 0x20e7,
726b8548
QT
491 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
492 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
493 sp->u.iocb_cmd.u.mbx.in_mb[2]);
494
495 memset(&ea, 0, sizeof(ea));
496 ea.sp = sp;
497 ea.rc = res;
498 ea.event = FCME_GNL_DONE;
499
500 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
501 sizeof(struct get_name_list_extended)) {
502 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
503 sizeof(struct get_name_list_extended);
504 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
505 }
506
507 for (i = 0; i < n; i++) {
508 e = &vha->gnl.l[i];
509 loop_id = le16_to_cpu(e->nport_handle);
510 /* mask out reserve bit */
511 loop_id = (loop_id & 0x7fff);
512 set_bit(loop_id, vha->hw->loop_id_map);
513 wwn = wwn_to_u64(e->port_name);
514
83548fe2 515 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
726b8548
QT
516 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
517 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
518 e->port_id[0], e->current_login_state, e->last_login_state,
519 (loop_id & 0x7fff));
520 }
521
522 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
523 vha->gnl.sent = 0;
524
525 INIT_LIST_HEAD(&h);
526 fcport = tf = NULL;
527 if (!list_empty(&vha->gnl.fcports))
528 list_splice_init(&vha->gnl.fcports, &h);
529
530 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
531 list_del_init(&fcport->gnl_entry);
532 fcport->flags &= ~FCF_ASYNC_SENT;
533 ea.fcport = fcport;
534
535 qla2x00_fcport_event_handler(vha, &ea);
536 }
537
538 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
539
25ff6af1 540 sp->free(sp);
726b8548
QT
541}
542
543int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
544{
545 srb_t *sp;
546 struct srb_iocb *mbx;
547 int rval = QLA_FUNCTION_FAILED;
548 unsigned long flags;
549 u16 *mb;
550
551 if (!vha->flags.online)
552 goto done;
553
83548fe2 554 ql_dbg(ql_dbg_disc, vha, 0x20d9,
726b8548
QT
555 "Async-gnlist WWPN %8phC \n", fcport->port_name);
556
557 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
558 fcport->flags |= FCF_ASYNC_SENT;
559 fcport->disc_state = DSC_GNL;
560 fcport->last_rscn_gen = fcport->rscn_gen;
561 fcport->last_login_gen = fcport->login_gen;
562
563 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
564 if (vha->gnl.sent) {
565 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
566 rval = QLA_SUCCESS;
567 goto done;
568 }
569 vha->gnl.sent = 1;
570 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
571
572 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
573 if (!sp)
574 goto done;
575 sp->type = SRB_MB_IOCB;
576 sp->name = "gnlist";
577 sp->gen1 = fcport->rscn_gen;
578 sp->gen2 = fcport->login_gen;
579
580 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
581
582 mb = sp->u.iocb_cmd.u.mbx.out_mb;
583 mb[0] = MBC_PORT_NODE_NAME_LIST;
584 mb[1] = BIT_2 | BIT_3;
585 mb[2] = MSW(vha->gnl.ldma);
586 mb[3] = LSW(vha->gnl.ldma);
587 mb[6] = MSW(MSD(vha->gnl.ldma));
588 mb[7] = LSW(MSD(vha->gnl.ldma));
589 mb[8] = vha->gnl.size;
590 mb[9] = vha->vp_idx;
591
592 mbx = &sp->u.iocb_cmd;
593 mbx->timeout = qla2x00_async_iocb_timeout;
594
595 sp->done = qla24xx_async_gnl_sp_done;
596
597 rval = qla2x00_start_sp(sp);
598 if (rval != QLA_SUCCESS)
599 goto done_free_sp;
600
83548fe2
QT
601 ql_dbg(ql_dbg_disc, vha, 0x20da,
602 "Async-%s - OUT WWPN %8phC hndl %x\n",
603 sp->name, fcport->port_name, sp->handle);
726b8548
QT
604
605 return rval;
606
607done_free_sp:
25ff6af1 608 sp->free(sp);
726b8548
QT
609done:
610 fcport->flags &= ~FCF_ASYNC_SENT;
611 return rval;
612}
613
614int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
615{
616 struct qla_work_evt *e;
617
618 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
619 if (!e)
620 return QLA_FUNCTION_FAILED;
621
622 e->u.fcport.fcport = fcport;
623 return qla2x00_post_work(vha, e);
624}
625
626static
25ff6af1 627void qla24xx_async_gpdb_sp_done(void *s, int res)
726b8548 628{
25ff6af1
JC
629 struct srb *sp = s;
630 struct scsi_qla_host *vha = sp->vha;
726b8548 631 struct qla_hw_data *ha = vha->hw;
726b8548
QT
632 struct port_database_24xx *pd;
633 fc_port_t *fcport = sp->fcport;
634 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
635 int rval = QLA_SUCCESS;
636 struct event_arg ea;
637
83548fe2 638 ql_dbg(ql_dbg_disc, vha, 0x20db,
726b8548
QT
639 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
640 sp->name, res, fcport->port_name, mb[1], mb[2]);
641
642 fcport->flags &= ~FCF_ASYNC_SENT;
643
644 if (res) {
645 rval = res;
646 goto gpd_error_out;
647 }
648
649 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
650
15f30a57 651 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
726b8548
QT
652
653gpd_error_out:
654 memset(&ea, 0, sizeof(ea));
655 ea.event = FCME_GPDB_DONE;
656 ea.rc = rval;
657 ea.fcport = fcport;
658 ea.sp = sp;
659
660 qla2x00_fcport_event_handler(vha, &ea);
661
662 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
663 sp->u.iocb_cmd.u.mbx.in_dma);
664
25ff6af1 665 sp->free(sp);
726b8548
QT
666}
667
668static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport,
669 u8 opt)
670{
671 struct qla_work_evt *e;
672
673 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
674 if (!e)
675 return QLA_FUNCTION_FAILED;
676
677 e->u.fcport.fcport = fcport;
678 e->u.fcport.opt = opt;
679 return qla2x00_post_work(vha, e);
680}
681
682int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
683{
684 srb_t *sp;
685 struct srb_iocb *mbx;
686 int rval = QLA_FUNCTION_FAILED;
687 u16 *mb;
688 dma_addr_t pd_dma;
689 struct port_database_24xx *pd;
690 struct qla_hw_data *ha = vha->hw;
691
692 if (!vha->flags.online)
693 goto done;
694
695 fcport->flags |= FCF_ASYNC_SENT;
696 fcport->disc_state = DSC_GPDB;
697
698 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
699 if (!sp)
700 goto done;
701
702 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
703 if (pd == NULL) {
83548fe2
QT
704 ql_log(ql_log_warn, vha, 0xd043,
705 "Failed to allocate port database structure.\n");
726b8548
QT
706 goto done_free_sp;
707 }
708 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
709
710 sp->type = SRB_MB_IOCB;
711 sp->name = "gpdb";
712 sp->gen1 = fcport->rscn_gen;
713 sp->gen2 = fcport->login_gen;
714 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
715
716 mb = sp->u.iocb_cmd.u.mbx.out_mb;
717 mb[0] = MBC_GET_PORT_DATABASE;
718 mb[1] = fcport->loop_id;
719 mb[2] = MSW(pd_dma);
720 mb[3] = LSW(pd_dma);
721 mb[6] = MSW(MSD(pd_dma));
722 mb[7] = LSW(MSD(pd_dma));
723 mb[9] = vha->vp_idx;
724 mb[10] = opt;
725
726 mbx = &sp->u.iocb_cmd;
727 mbx->timeout = qla2x00_async_iocb_timeout;
728 mbx->u.mbx.in = (void *)pd;
729 mbx->u.mbx.in_dma = pd_dma;
730
731 sp->done = qla24xx_async_gpdb_sp_done;
732
733 rval = qla2x00_start_sp(sp);
734 if (rval != QLA_SUCCESS)
735 goto done_free_sp;
736
83548fe2
QT
737 ql_dbg(ql_dbg_disc, vha, 0x20dc,
738 "Async-%s %8phC hndl %x opt %x\n",
739 sp->name, fcport->port_name, sp->handle, opt);
726b8548
QT
740
741 return rval;
742
743done_free_sp:
744 if (pd)
745 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
746
25ff6af1 747 sp->free(sp);
726b8548
QT
748done:
749 fcport->flags &= ~FCF_ASYNC_SENT;
750 qla24xx_post_gpdb_work(vha, fcport, opt);
5ff1d584
AV
751 return rval;
752}
753
726b8548
QT
754static
755void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
756{
757 int rval = ea->rc;
758 fc_port_t *fcport = ea->fcport;
759 unsigned long flags;
760
761 fcport->flags &= ~FCF_ASYNC_SENT;
762
83548fe2 763 ql_dbg(ql_dbg_disc, vha, 0x20d2,
726b8548
QT
764 "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
765 fcport->disc_state, fcport->fw_login_state, rval);
766
767 if (ea->sp->gen2 != fcport->login_gen) {
768 /* target side must have changed it. */
83548fe2 769 ql_dbg(ql_dbg_disc, vha, 0x20d3,
726b8548
QT
770 "%s %8phC generation changed rscn %d|%d login %d|%d \n",
771 __func__, fcport->port_name, fcport->last_rscn_gen,
772 fcport->rscn_gen, fcport->last_login_gen,
773 fcport->login_gen);
774 return;
775 } else if (ea->sp->gen1 != fcport->rscn_gen) {
83548fe2 776 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
726b8548
QT
777 __func__, __LINE__, fcport->port_name);
778 qla24xx_post_gidpn_work(vha, fcport);
779 return;
780 }
781
782 if (rval != QLA_SUCCESS) {
83548fe2 783 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
726b8548
QT
784 __func__, __LINE__, fcport->port_name);
785 qlt_schedule_sess_for_deletion_lock(fcport);
786 return;
787 }
788
789 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
790 ea->fcport->login_gen++;
791 ea->fcport->deleted = 0;
792 ea->fcport->logout_on_delete = 1;
793
794 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
795 vha->fcport_count++;
796 ea->fcport->login_succ = 1;
797
798 if (!IS_IIDMA_CAPABLE(vha->hw) ||
799 !vha->hw->flags.gpsc_supported) {
83548fe2 800 ql_dbg(ql_dbg_disc, vha, 0x20d6,
726b8548
QT
801 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
802 __func__, __LINE__, fcport->port_name,
803 vha->fcport_count);
804
805 qla24xx_post_upd_fcport_work(vha, fcport);
806 } else {
83548fe2 807 ql_dbg(ql_dbg_disc, vha, 0x20d7,
726b8548
QT
808 "%s %d %8phC post gpsc fcp_cnt %d\n",
809 __func__, __LINE__, fcport->port_name,
810 vha->fcport_count);
811
812 qla24xx_post_gpsc_work(vha, fcport);
813 }
814 }
815 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
816} /* gpdb event */
817
818int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
819{
820 if (fcport->login_retry == 0)
821 return 0;
822
823 if (fcport->scan_state != QLA_FCPORT_FOUND)
824 return 0;
825
83548fe2 826 ql_dbg(ql_dbg_disc, vha, 0x20d8,
726b8548
QT
827 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
828 __func__, fcport->port_name, fcport->disc_state,
829 fcport->fw_login_state, fcport->login_pause, fcport->flags,
830 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
831 fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
832 fcport->loop_id);
833
834 fcport->login_retry--;
835
836 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
726b8548
QT
837 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
838 return 0;
839
5b33469a
QT
840 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
841 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
842 return 0;
843 }
844
726b8548
QT
845 /* for pure Target Mode. Login will not be initiated */
846 if (vha->host->active_mode == MODE_TARGET)
847 return 0;
848
849 if (fcport->flags & FCF_ASYNC_SENT) {
850 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
851 return 0;
852 }
853
854 switch (fcport->disc_state) {
855 case DSC_DELETED:
856 if (fcport->loop_id == FC_NO_LOOP_ID) {
83548fe2
QT
857 ql_dbg(ql_dbg_disc, vha, 0x20bd,
858 "%s %d %8phC post gnl\n",
859 __func__, __LINE__, fcport->port_name);
726b8548
QT
860 qla24xx_async_gnl(vha, fcport);
861 } else {
83548fe2
QT
862 ql_dbg(ql_dbg_disc, vha, 0x20bf,
863 "%s %d %8phC post login\n",
864 __func__, __LINE__, fcport->port_name);
726b8548
QT
865 fcport->disc_state = DSC_LOGIN_PEND;
866 qla2x00_post_async_login_work(vha, fcport, NULL);
867 }
868 break;
869
870 case DSC_GNL:
871 if (fcport->login_pause) {
872 fcport->last_rscn_gen = fcport->rscn_gen;
873 fcport->last_login_gen = fcport->login_gen;
874 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
875 break;
876 }
877
878 if (fcport->flags & FCF_FCP2_DEVICE) {
879 u8 opt = PDO_FORCE_ADISC;
880
83548fe2
QT
881 ql_dbg(ql_dbg_disc, vha, 0x20c9,
882 "%s %d %8phC post gpdb\n",
883 __func__, __LINE__, fcport->port_name);
726b8548
QT
884
885 fcport->disc_state = DSC_GPDB;
886 qla24xx_post_gpdb_work(vha, fcport, opt);
887 } else {
83548fe2
QT
888 ql_dbg(ql_dbg_disc, vha, 0x20cf,
889 "%s %d %8phC post login\n",
890 __func__, __LINE__, fcport->port_name);
726b8548
QT
891 fcport->disc_state = DSC_LOGIN_PEND;
892 qla2x00_post_async_login_work(vha, fcport, NULL);
893 }
894
895 break;
896
897 case DSC_LOGIN_FAILED:
83548fe2
QT
898 ql_dbg(ql_dbg_disc, vha, 0x20d0,
899 "%s %d %8phC post gidpn\n",
900 __func__, __LINE__, fcport->port_name);
726b8548
QT
901
902 qla24xx_post_gidpn_work(vha, fcport);
903 break;
904
905 case DSC_LOGIN_COMPLETE:
906 /* recheck login state */
83548fe2
QT
907 ql_dbg(ql_dbg_disc, vha, 0x20d1,
908 "%s %d %8phC post gpdb\n",
909 __func__, __LINE__, fcport->port_name);
726b8548
QT
910
911 qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
912 break;
913
914 default:
915 break;
916 }
917
918 return 0;
919}
920
921static
922void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
923{
924 fcport->rscn_gen++;
925
83548fe2
QT
926 ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
927 "%s %8phC DS %d LS %d\n",
928 __func__, fcport->port_name, fcport->disc_state,
929 fcport->fw_login_state);
726b8548
QT
930
931 if (fcport->flags & FCF_ASYNC_SENT)
932 return;
933
934 switch (fcport->disc_state) {
935 case DSC_DELETED:
936 case DSC_LOGIN_COMPLETE:
937 qla24xx_post_gidpn_work(fcport->vha, fcport);
938 break;
939
940 default:
941 break;
942 }
943}
944
945int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
946 u8 *port_name, void *pla)
947{
948 struct qla_work_evt *e;
949 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
950 if (!e)
951 return QLA_FUNCTION_FAILED;
952
953 e->u.new_sess.id = *id;
954 e->u.new_sess.pla = pla;
955 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
956
957 return qla2x00_post_work(vha, e);
958}
959
960static
961int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
962 struct event_arg *ea)
963{
964 fc_port_t *fcport = ea->fcport;
965
966 if (test_bit(UNLOADING, &vha->dpc_flags))
967 return 0;
968
969 switch (vha->host->active_mode) {
970 case MODE_INITIATOR:
971 case MODE_DUAL:
972 if (fcport->scan_state == QLA_FCPORT_FOUND)
973 qla24xx_fcport_handle_login(vha, fcport);
974 break;
975
976 case MODE_TARGET:
977 default:
978 /* no-op */
979 break;
980 }
981
982 return 0;
983}
984
985static
986void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
987 struct event_arg *ea)
988{
989 fc_port_t *fcport = ea->fcport;
990
991 if (fcport->scan_state != QLA_FCPORT_FOUND) {
992 fcport->login_retry++;
993 return;
994 }
995
83548fe2
QT
996 ql_dbg(ql_dbg_disc, vha, 0x2102,
997 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
998 __func__, fcport->port_name, fcport->disc_state,
999 fcport->fw_login_state, fcport->login_pause,
1000 fcport->deleted, fcport->conflict,
1001 fcport->last_rscn_gen, fcport->rscn_gen,
1002 fcport->last_login_gen, fcport->login_gen,
1003 fcport->flags);
726b8548
QT
1004
1005 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
726b8548
QT
1006 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1007 return;
1008
5b33469a
QT
1009 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1010 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
1011 return;
1012 }
1013
726b8548
QT
1014 if (fcport->flags & FCF_ASYNC_SENT) {
1015 fcport->login_retry++;
1016 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1017 return;
1018 }
1019
1020 if (fcport->disc_state == DSC_DELETE_PEND) {
1021 fcport->login_retry++;
1022 return;
1023 }
1024
1025 if (fcport->last_rscn_gen != fcport->rscn_gen) {
83548fe2 1026 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
726b8548
QT
1027 __func__, __LINE__, fcport->port_name);
1028
1029 qla24xx_async_gidpn(vha, fcport);
1030 return;
1031 }
1032
1033 qla24xx_fcport_handle_login(vha, fcport);
1034}
1035
41dc529a 1036void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
726b8548 1037{
41dc529a
QT
1038 fc_port_t *fcport, *f, *tf;
1039 uint32_t id = 0, mask, rid;
726b8548
QT
1040 int rc;
1041
b98ae0d7
QT
1042 switch (ea->event) {
1043 case FCME_RELOGIN:
1044 case FCME_RSCN:
1045 case FCME_GIDPN_DONE:
1046 case FCME_GPSC_DONE:
1047 case FCME_GPNID_DONE:
1048 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
1049 test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1050 return;
1051 break;
1052 default:
1053 break;
1054 }
1055
726b8548
QT
1056 switch (ea->event) {
1057 case FCME_RELOGIN:
1058 if (test_bit(UNLOADING, &vha->dpc_flags))
1059 return;
5ff1d584 1060
726b8548
QT
1061 qla24xx_handle_relogin_event(vha, ea);
1062 break;
1063 case FCME_RSCN:
1064 if (test_bit(UNLOADING, &vha->dpc_flags))
1065 return;
41dc529a
QT
1066 switch (ea->id.b.rsvd_1) {
1067 case RSCN_PORT_ADDR:
1068 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1069 if (!fcport) {
1070 /* cable moved */
1071 rc = qla24xx_post_gpnid_work(vha, &ea->id);
1072 if (rc) {
83548fe2
QT
1073 ql_log(ql_log_warn, vha, 0xd044,
1074 "RSCN GPNID work failed %02x%02x%02x\n",
1075 ea->id.b.domain, ea->id.b.area,
1076 ea->id.b.al_pa);
41dc529a
QT
1077 }
1078 } else {
1079 ea->fcport = fcport;
1080 qla24xx_handle_rscn_event(fcport, ea);
1081 }
1082 break;
1083 case RSCN_AREA_ADDR:
1084 case RSCN_DOM_ADDR:
1085 if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
1086 mask = 0xffff00;
83548fe2
QT
1087 ql_dbg(ql_dbg_async, vha, 0x5044,
1088 "RSCN: Area 0x%06x was affected\n",
1089 ea->id.b24);
41dc529a
QT
1090 } else {
1091 mask = 0xff0000;
83548fe2
QT
1092 ql_dbg(ql_dbg_async, vha, 0x507a,
1093 "RSCN: Domain 0x%06x was affected\n",
1094 ea->id.b24);
41dc529a 1095 }
726b8548 1096
41dc529a
QT
1097 rid = ea->id.b24 & mask;
1098 list_for_each_entry_safe(f, tf, &vha->vp_fcports,
1099 list) {
1100 id = f->d_id.b24 & mask;
1101 if (rid == id) {
1102 ea->fcport = f;
1103 qla24xx_handle_rscn_event(f, ea);
1104 }
726b8548 1105 }
41dc529a
QT
1106 break;
1107 case RSCN_FAB_ADDR:
1108 default:
83548fe2
QT
1109 ql_log(ql_log_warn, vha, 0xd045,
1110 "RSCN: Fabric was affected. Addr format %d\n",
1111 ea->id.b.rsvd_1);
41dc529a
QT
1112 qla2x00_mark_all_devices_lost(vha, 1);
1113 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1114 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
726b8548
QT
1115 }
1116 break;
1117 case FCME_GIDPN_DONE:
1118 qla24xx_handle_gidpn_event(vha, ea);
1119 break;
1120 case FCME_GNL_DONE:
1121 qla24xx_handle_gnl_done_event(vha, ea);
1122 break;
1123 case FCME_GPSC_DONE:
1124 qla24xx_post_upd_fcport_work(vha, ea->fcport);
1125 break;
1126 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1127 qla24xx_handle_plogi_done_event(vha, ea);
1128 break;
1129 case FCME_GPDB_DONE:
1130 qla24xx_handle_gpdb_event(vha, ea);
1131 break;
1132 case FCME_GPNID_DONE:
1133 qla24xx_handle_gpnid_event(vha, ea);
1134 break;
1135 case FCME_DELETE_DONE:
1136 qla24xx_handle_delete_done_event(vha, ea);
1137 break;
1138 default:
1139 BUG_ON(1);
1140 break;
1141 }
5ff1d584
AV
1142}
1143
3822263e 1144static void
faef62d1 1145qla2x00_tmf_iocb_timeout(void *data)
3822263e 1146{
25ff6af1 1147 srb_t *sp = data;
faef62d1 1148 struct srb_iocb *tmf = &sp->u.iocb_cmd;
3822263e 1149
faef62d1
AB
1150 tmf->u.tmf.comp_status = CS_TIMEOUT;
1151 complete(&tmf->u.tmf.comp);
1152}
9ba56b95 1153
faef62d1 1154static void
25ff6af1 1155qla2x00_tmf_sp_done(void *ptr, int res)
faef62d1 1156{
25ff6af1 1157 srb_t *sp = ptr;
faef62d1 1158 struct srb_iocb *tmf = &sp->u.iocb_cmd;
25ff6af1 1159
faef62d1 1160 complete(&tmf->u.tmf.comp);
3822263e
MI
1161}
1162
1163int
faef62d1 1164qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
3822263e
MI
1165 uint32_t tag)
1166{
1167 struct scsi_qla_host *vha = fcport->vha;
faef62d1 1168 struct srb_iocb *tm_iocb;
3822263e 1169 srb_t *sp;
faef62d1 1170 int rval = QLA_FUNCTION_FAILED;
3822263e 1171
9ba56b95 1172 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3822263e
MI
1173 if (!sp)
1174 goto done;
1175
faef62d1 1176 tm_iocb = &sp->u.iocb_cmd;
9ba56b95
GM
1177 sp->type = SRB_TM_CMD;
1178 sp->name = "tmf";
faef62d1
AB
1179 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1180 tm_iocb->u.tmf.flags = flags;
1181 tm_iocb->u.tmf.lun = lun;
1182 tm_iocb->u.tmf.data = tag;
1183 sp->done = qla2x00_tmf_sp_done;
1184 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1185 init_completion(&tm_iocb->u.tmf.comp);
3822263e
MI
1186
1187 rval = qla2x00_start_sp(sp);
1188 if (rval != QLA_SUCCESS)
1189 goto done_free_sp;
1190
7c3df132 1191 ql_dbg(ql_dbg_taskm, vha, 0x802f,
cfb0919c
CD
1192 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1193 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1194 fcport->d_id.b.area, fcport->d_id.b.al_pa);
faef62d1
AB
1195
1196 wait_for_completion(&tm_iocb->u.tmf.comp);
1197
1198 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
1199 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1200
1201 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
1202 ql_dbg(ql_dbg_taskm, vha, 0x8030,
1203 "TM IOCB failed (%x).\n", rval);
1204 }
1205
1206 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1207 flags = tm_iocb->u.tmf.flags;
1208 lun = (uint16_t)tm_iocb->u.tmf.lun;
1209
1210 /* Issue Marker IOCB */
1211 qla2x00_marker(vha, vha->hw->req_q_map[0],
1212 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
1213 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1214 }
3822263e
MI
1215
1216done_free_sp:
25ff6af1 1217 sp->free(sp);
3822263e
MI
1218done:
1219 return rval;
1220}
1221
4440e46d
AB
1222static void
1223qla24xx_abort_iocb_timeout(void *data)
1224{
25ff6af1 1225 srb_t *sp = data;
4440e46d
AB
1226 struct srb_iocb *abt = &sp->u.iocb_cmd;
1227
1228 abt->u.abt.comp_status = CS_TIMEOUT;
1229 complete(&abt->u.abt.comp);
1230}
1231
1232static void
25ff6af1 1233qla24xx_abort_sp_done(void *ptr, int res)
4440e46d 1234{
25ff6af1 1235 srb_t *sp = ptr;
4440e46d
AB
1236 struct srb_iocb *abt = &sp->u.iocb_cmd;
1237
1238 complete(&abt->u.abt.comp);
1239}
1240
15f30a57 1241int
4440e46d
AB
1242qla24xx_async_abort_cmd(srb_t *cmd_sp)
1243{
25ff6af1 1244 scsi_qla_host_t *vha = cmd_sp->vha;
4440e46d
AB
1245 fc_port_t *fcport = cmd_sp->fcport;
1246 struct srb_iocb *abt_iocb;
1247 srb_t *sp;
1248 int rval = QLA_FUNCTION_FAILED;
1249
1250 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1251 if (!sp)
1252 goto done;
1253
1254 abt_iocb = &sp->u.iocb_cmd;
1255 sp->type = SRB_ABT_CMD;
1256 sp->name = "abort";
1257 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1258 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
1259 sp->done = qla24xx_abort_sp_done;
1260 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1261 init_completion(&abt_iocb->u.abt.comp);
1262
1263 rval = qla2x00_start_sp(sp);
1264 if (rval != QLA_SUCCESS)
1265 goto done_free_sp;
1266
1267 ql_dbg(ql_dbg_async, vha, 0x507c,
1268 "Abort command issued - hdl=%x, target_id=%x\n",
1269 cmd_sp->handle, fcport->tgt_id);
1270
1271 wait_for_completion(&abt_iocb->u.abt.comp);
1272
1273 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1274 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1275
1276done_free_sp:
25ff6af1 1277 sp->free(sp);
4440e46d
AB
1278done:
1279 return rval;
1280}
1281
1282int
1283qla24xx_async_abort_command(srb_t *sp)
1284{
1285 unsigned long flags = 0;
1286
1287 uint32_t handle;
1288 fc_port_t *fcport = sp->fcport;
1289 struct scsi_qla_host *vha = fcport->vha;
1290 struct qla_hw_data *ha = vha->hw;
1291 struct req_que *req = vha->req;
1292
1293 spin_lock_irqsave(&ha->hardware_lock, flags);
1294 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1295 if (req->outstanding_cmds[handle] == sp)
1296 break;
1297 }
1298 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1299 if (handle == req->num_outstanding_cmds) {
1300 /* Command not found. */
1301 return QLA_FUNCTION_FAILED;
1302 }
1303 if (sp->type == SRB_FXIOCB_DCMD)
1304 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1305 FXDISC_ABORT_IOCTL);
1306
1307 return qla24xx_async_abort_cmd(sp);
1308}
1309
726b8548
QT
1310static void
1311qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ac280b67 1312{
726b8548 1313 port_id_t cid; /* conflict Nport id */
ac280b67 1314
726b8548 1315 switch (ea->data[0]) {
ac280b67 1316 case MBS_COMMAND_COMPLETE:
a4f92a32
AV
1317 /*
1318 * Driver must validate login state - If PRLI not complete,
1319 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1320 * requests.
1321 */
83548fe2
QT
1322 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1323 "%s %d %8phC post gpdb\n",
1324 __func__, __LINE__, ea->fcport->port_name);
7c3f8fd1 1325 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
726b8548
QT
1326 ea->fcport->logout_on_delete = 1;
1327 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
ac280b67
AV
1328 break;
1329 case MBS_COMMAND_ERROR:
83548fe2 1330 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
726b8548
QT
1331 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1332
1333 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1334 ea->fcport->disc_state = DSC_LOGIN_FAILED;
1335 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
ac280b67
AV
1336 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1337 else
726b8548 1338 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
ac280b67
AV
1339 break;
1340 case MBS_LOOP_ID_USED:
726b8548
QT
1341 /* data[1] = IO PARAM 1 = nport ID */
1342 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
1343 cid.b.area = (ea->iop[1] >> 8) & 0xff;
1344 cid.b.al_pa = ea->iop[1] & 0xff;
1345 cid.b.rsvd_1 = 0;
1346
83548fe2
QT
1347 ql_dbg(ql_dbg_disc, vha, 0x20ec,
1348 "%s %d %8phC LoopID 0x%x in use post gnl\n",
1349 __func__, __LINE__, ea->fcport->port_name,
1350 ea->fcport->loop_id);
726b8548
QT
1351
1352 if (IS_SW_RESV_ADDR(cid)) {
1353 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1354 ea->fcport->loop_id = FC_NO_LOOP_ID;
1355 } else {
1356 qla2x00_clear_loop_id(ea->fcport);
ac280b67 1357 }
726b8548
QT
1358 qla24xx_post_gnl_work(vha, ea->fcport);
1359 break;
1360 case MBS_PORT_ID_USED:
83548fe2
QT
1361 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1362 "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
1363 __func__, __LINE__, ea->fcport->port_name,
1364 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
1365 ea->fcport->d_id.b.al_pa);
726b8548
QT
1366
1367 qla2x00_clear_loop_id(ea->fcport);
1368 qla24xx_post_gidpn_work(vha, ea->fcport);
ac280b67
AV
1369 break;
1370 }
4916392b 1371 return;
ac280b67
AV
1372}
1373
4916392b 1374void
ac280b67
AV
1375qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1376 uint16_t *data)
1377{
726b8548 1378 qla2x00_mark_device_lost(vha, fcport, 1, 0);
a6ca8878 1379 qlt_logo_completion_handler(fcport, data[0]);
726b8548 1380 fcport->login_gen++;
4916392b 1381 return;
ac280b67
AV
1382}
1383
4916392b 1384void
5ff1d584
AV
1385qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1386 uint16_t *data)
1387{
1388 if (data[0] == MBS_COMMAND_COMPLETE) {
1389 qla2x00_update_fcport(vha, fcport);
1390
4916392b 1391 return;
5ff1d584
AV
1392 }
1393
1394 /* Retry login. */
1395 fcport->flags &= ~FCF_ASYNC_SENT;
1396 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
1397 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1398 else
80d79440 1399 qla2x00_mark_device_lost(vha, fcport, 1, 0);
5ff1d584 1400
4916392b 1401 return;
5ff1d584
AV
1402}
1403
1da177e4
LT
1404/****************************************************************************/
1405/* QLogic ISP2x00 Hardware Support Functions. */
1406/****************************************************************************/
1407
fa492630 1408static int
7d613ac6
SV
1409qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
1410{
1411 int rval = QLA_SUCCESS;
1412 struct qla_hw_data *ha = vha->hw;
1413 uint32_t idc_major_ver, idc_minor_ver;
711aa7f7 1414 uint16_t config[4];
7d613ac6
SV
1415
1416 qla83xx_idc_lock(vha, 0);
1417
1418 /* SV: TODO: Assign initialization timeout from
1419 * flash-info / other param
1420 */
1421 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
1422 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
1423
1424 /* Set our fcoe function presence */
1425 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
1426 ql_dbg(ql_dbg_p3p, vha, 0xb077,
1427 "Error while setting DRV-Presence.\n");
1428 rval = QLA_FUNCTION_FAILED;
1429 goto exit;
1430 }
1431
1432 /* Decide the reset ownership */
1433 qla83xx_reset_ownership(vha);
1434
1435 /*
1436 * On first protocol driver load:
1437 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
1438 * register.
1439 * Others: Check compatibility with current IDC Major version.
1440 */
1441 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
1442 if (ha->flags.nic_core_reset_owner) {
1443 /* Set IDC Major version */
1444 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
1445 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
1446
1447 /* Clearing IDC-Lock-Recovery register */
1448 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
1449 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
1450 /*
1451 * Clear further IDC participation if we are not compatible with
1452 * the current IDC Major Version.
1453 */
1454 ql_log(ql_log_warn, vha, 0xb07d,
1455 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
1456 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
1457 __qla83xx_clear_drv_presence(vha);
1458 rval = QLA_FUNCTION_FAILED;
1459 goto exit;
1460 }
1461 /* Each function sets its supported Minor version. */
1462 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
1463 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
1464 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
1465
711aa7f7
SK
1466 if (ha->flags.nic_core_reset_owner) {
1467 memset(config, 0, sizeof(config));
1468 if (!qla81xx_get_port_config(vha, config))
1469 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
1470 QLA8XXX_DEV_READY);
1471 }
1472
7d613ac6
SV
1473 rval = qla83xx_idc_state_handler(vha);
1474
1475exit:
1476 qla83xx_idc_unlock(vha, 0);
1477
1478 return rval;
1479}
1480
1da177e4
LT
1481/*
1482* qla2x00_initialize_adapter
1483* Initialize board.
1484*
1485* Input:
1486* ha = adapter block pointer.
1487*
1488* Returns:
1489* 0 = success
1490*/
1491int
e315cd28 1492qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1da177e4
LT
1493{
1494 int rval;
e315cd28 1495 struct qla_hw_data *ha = vha->hw;
73208dfd 1496 struct req_que *req = ha->req_q_map[0];
2533cf67 1497
fc90adaf
JC
1498 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
1499 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
1500
1da177e4 1501 /* Clear adapter flags. */
e315cd28 1502 vha->flags.online = 0;
2533cf67 1503 ha->flags.chip_reset_done = 0;
e315cd28 1504 vha->flags.reset_active = 0;
85880801
AV
1505 ha->flags.pci_channel_io_perm_failure = 0;
1506 ha->flags.eeh_busy = 0;
fabbb8df 1507 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
e315cd28
AC
1508 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1509 atomic_set(&vha->loop_state, LOOP_DOWN);
1510 vha->device_flags = DFLG_NO_CABLE;
1511 vha->dpc_flags = 0;
1512 vha->flags.management_server_logged_in = 0;
1513 vha->marker_needed = 0;
1da177e4
LT
1514 ha->isp_abort_cnt = 0;
1515 ha->beacon_blink_led = 0;
1516
73208dfd
AC
1517 set_bit(0, ha->req_qid_map);
1518 set_bit(0, ha->rsp_qid_map);
1519
cfb0919c 1520 ql_dbg(ql_dbg_init, vha, 0x0040,
7c3df132 1521 "Configuring PCI space...\n");
e315cd28 1522 rval = ha->isp_ops->pci_config(vha);
1da177e4 1523 if (rval) {
7c3df132
SK
1524 ql_log(ql_log_warn, vha, 0x0044,
1525 "Unable to configure PCI space.\n");
1da177e4
LT
1526 return (rval);
1527 }
1528
e315cd28 1529 ha->isp_ops->reset_chip(vha);
1da177e4 1530
e315cd28 1531 rval = qla2xxx_get_flash_info(vha);
c00d8994 1532 if (rval) {
7c3df132
SK
1533 ql_log(ql_log_fatal, vha, 0x004f,
1534 "Unable to validate FLASH data.\n");
7ec0effd
AD
1535 return rval;
1536 }
1537
1538 if (IS_QLA8044(ha)) {
1539 qla8044_read_reset_template(vha);
1540
1541 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
1542 * If DONRESET_BIT0 is set, drivers should not set dev_state
1543 * to NEED_RESET. But if NEED_RESET is set, drivers should
1544 * should honor the reset. */
1545 if (ql2xdontresethba == 1)
1546 qla8044_set_idc_dontreset(vha);
c00d8994
AV
1547 }
1548
73208dfd 1549 ha->isp_ops->get_flash_version(vha, req->ring);
cfb0919c 1550 ql_dbg(ql_dbg_init, vha, 0x0061,
7c3df132 1551 "Configure NVRAM parameters...\n");
0107109e 1552
e315cd28 1553 ha->isp_ops->nvram_config(vha);
1da177e4 1554
d4c760c2
AV
1555 if (ha->flags.disable_serdes) {
1556 /* Mask HBA via NVRAM settings? */
7c3df132 1557 ql_log(ql_log_info, vha, 0x0077,
7b833558 1558 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
d4c760c2
AV
1559 return QLA_FUNCTION_FAILED;
1560 }
1561
cfb0919c 1562 ql_dbg(ql_dbg_init, vha, 0x0078,
7c3df132 1563 "Verifying loaded RISC code...\n");
1da177e4 1564
e315cd28
AC
1565 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
1566 rval = ha->isp_ops->chip_diag(vha);
d19044c3
AV
1567 if (rval)
1568 return (rval);
e315cd28 1569 rval = qla2x00_setup_chip(vha);
d19044c3
AV
1570 if (rval)
1571 return (rval);
1da177e4 1572 }
a9083016 1573
4d4df193 1574 if (IS_QLA84XX(ha)) {
e315cd28 1575 ha->cs84xx = qla84xx_get_chip(vha);
4d4df193 1576 if (!ha->cs84xx) {
7c3df132 1577 ql_log(ql_log_warn, vha, 0x00d0,
4d4df193
HK
1578 "Unable to configure ISP84XX.\n");
1579 return QLA_FUNCTION_FAILED;
1580 }
1581 }
2d70c103 1582
ead03855 1583 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2d70c103
NB
1584 rval = qla2x00_init_rings(vha);
1585
2533cf67 1586 ha->flags.chip_reset_done = 1;
1da177e4 1587
9a069e19 1588 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
6c452a45 1589 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
9a069e19
GM
1590 rval = qla84xx_init_chip(vha);
1591 if (rval != QLA_SUCCESS) {
7c3df132
SK
1592 ql_log(ql_log_warn, vha, 0x00d4,
1593 "Unable to initialize ISP84XX.\n");
8d2b21db 1594 qla84xx_put_chip(vha);
9a069e19
GM
1595 }
1596 }
1597
7d613ac6
SV
1598 /* Load the NIC Core f/w if we are the first protocol driver. */
1599 if (IS_QLA8031(ha)) {
1600 rval = qla83xx_nic_core_fw_load(vha);
1601 if (rval)
1602 ql_log(ql_log_warn, vha, 0x0124,
1603 "Error in initializing NIC Core f/w.\n");
1604 }
1605
2f0f3f4f
MI
1606 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
1607 qla24xx_read_fcp_prio_cfg(vha);
09ff701a 1608
c46e65c7
JC
1609 if (IS_P3P_TYPE(ha))
1610 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
1611 else
1612 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
1613
1da177e4
LT
1614 return (rval);
1615}
1616
1617/**
abbd8870 1618 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
1da177e4
LT
1619 * @ha: HA context
1620 *
1621 * Returns 0 on success.
1622 */
abbd8870 1623int
e315cd28 1624qla2100_pci_config(scsi_qla_host_t *vha)
1da177e4 1625{
a157b101 1626 uint16_t w;
abbd8870 1627 unsigned long flags;
e315cd28 1628 struct qla_hw_data *ha = vha->hw;
3d71644c 1629 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 1630
1da177e4 1631 pci_set_master(ha->pdev);
af6177d8 1632 pci_try_set_mwi(ha->pdev);
1da177e4 1633
1da177e4 1634 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 1635 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
abbd8870
AV
1636 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1637
737faece 1638 pci_disable_rom(ha->pdev);
1da177e4
LT
1639
1640 /* Get PCI bus information. */
1641 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 1642 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
1da177e4
LT
1643 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1644
abbd8870
AV
1645 return QLA_SUCCESS;
1646}
1da177e4 1647
abbd8870
AV
1648/**
1649 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
1650 * @ha: HA context
1651 *
1652 * Returns 0 on success.
1653 */
1654int
e315cd28 1655qla2300_pci_config(scsi_qla_host_t *vha)
abbd8870 1656{
a157b101 1657 uint16_t w;
abbd8870
AV
1658 unsigned long flags = 0;
1659 uint32_t cnt;
e315cd28 1660 struct qla_hw_data *ha = vha->hw;
3d71644c 1661 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 1662
abbd8870 1663 pci_set_master(ha->pdev);
af6177d8 1664 pci_try_set_mwi(ha->pdev);
1da177e4 1665
abbd8870 1666 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 1667 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1da177e4 1668
abbd8870
AV
1669 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1670 w &= ~PCI_COMMAND_INTX_DISABLE;
a157b101 1671 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1da177e4 1672
abbd8870
AV
1673 /*
1674 * If this is a 2300 card and not 2312, reset the
1675 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
1676 * the 2310 also reports itself as a 2300 so we need to get the
1677 * fb revision level -- a 6 indicates it really is a 2300 and
1678 * not a 2310.
1679 */
1680 if (IS_QLA2300(ha)) {
1681 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4 1682
abbd8870 1683 /* Pause RISC. */
3d71644c 1684 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
abbd8870 1685 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 1686 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
abbd8870 1687 break;
1da177e4 1688
abbd8870
AV
1689 udelay(10);
1690 }
1da177e4 1691
abbd8870 1692 /* Select FPM registers. */
3d71644c
AV
1693 WRT_REG_WORD(&reg->ctrl_status, 0x20);
1694 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
1695
1696 /* Get the fb rev level */
3d71644c 1697 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
abbd8870
AV
1698
1699 if (ha->fb_rev == FPM_2300)
a157b101 1700 pci_clear_mwi(ha->pdev);
abbd8870
AV
1701
1702 /* Deselect FPM registers. */
3d71644c
AV
1703 WRT_REG_WORD(&reg->ctrl_status, 0x0);
1704 RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
1705
1706 /* Release RISC module. */
3d71644c 1707 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
abbd8870 1708 for (cnt = 0; cnt < 30000; cnt++) {
3d71644c 1709 if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
abbd8870
AV
1710 break;
1711
1712 udelay(10);
1da177e4 1713 }
1da177e4 1714
abbd8870
AV
1715 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1716 }
1da177e4 1717
abbd8870
AV
1718 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1719
737faece 1720 pci_disable_rom(ha->pdev);
1da177e4 1721
abbd8870
AV
1722 /* Get PCI bus information. */
1723 spin_lock_irqsave(&ha->hardware_lock, flags);
3d71644c 1724 ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
abbd8870
AV
1725 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1726
1727 return QLA_SUCCESS;
1da177e4
LT
1728}
1729
0107109e
AV
1730/**
1731 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
1732 * @ha: HA context
1733 *
1734 * Returns 0 on success.
1735 */
1736int
e315cd28 1737qla24xx_pci_config(scsi_qla_host_t *vha)
0107109e 1738{
a157b101 1739 uint16_t w;
0107109e 1740 unsigned long flags = 0;
e315cd28 1741 struct qla_hw_data *ha = vha->hw;
0107109e 1742 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
0107109e
AV
1743
1744 pci_set_master(ha->pdev);
af6177d8 1745 pci_try_set_mwi(ha->pdev);
0107109e
AV
1746
1747 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
a157b101 1748 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
0107109e
AV
1749 w &= ~PCI_COMMAND_INTX_DISABLE;
1750 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1751
1752 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1753
1754 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
f85ec187
AV
1755 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
1756 pcix_set_mmrbc(ha->pdev, 2048);
0107109e
AV
1757
1758 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 1759 if (pci_is_pcie(ha->pdev))
5ffd3a52 1760 pcie_set_readrq(ha->pdev, 4096);
0107109e 1761
737faece 1762 pci_disable_rom(ha->pdev);
0107109e 1763
44c10138 1764 ha->chip_revision = ha->pdev->revision;
a8488abe 1765
0107109e
AV
1766 /* Get PCI bus information. */
1767 spin_lock_irqsave(&ha->hardware_lock, flags);
1768 ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
1769 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1770
1771 return QLA_SUCCESS;
1772}
1773
c3a2f0df
AV
1774/**
1775 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
1776 * @ha: HA context
1777 *
1778 * Returns 0 on success.
1779 */
1780int
e315cd28 1781qla25xx_pci_config(scsi_qla_host_t *vha)
c3a2f0df
AV
1782{
1783 uint16_t w;
e315cd28 1784 struct qla_hw_data *ha = vha->hw;
c3a2f0df
AV
1785
1786 pci_set_master(ha->pdev);
1787 pci_try_set_mwi(ha->pdev);
1788
1789 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1790 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1791 w &= ~PCI_COMMAND_INTX_DISABLE;
1792 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1793
1794 /* PCIe -- adjust Maximum Read Request Size (2048). */
e67f1321 1795 if (pci_is_pcie(ha->pdev))
5ffd3a52 1796 pcie_set_readrq(ha->pdev, 4096);
c3a2f0df 1797
737faece 1798 pci_disable_rom(ha->pdev);
c3a2f0df
AV
1799
1800 ha->chip_revision = ha->pdev->revision;
1801
1802 return QLA_SUCCESS;
1803}
1804
1da177e4
LT
1805/**
1806 * qla2x00_isp_firmware() - Choose firmware image.
1807 * @ha: HA context
1808 *
1809 * Returns 0 on success.
1810 */
1811static int
e315cd28 1812qla2x00_isp_firmware(scsi_qla_host_t *vha)
1da177e4
LT
1813{
1814 int rval;
42e421b1
AV
1815 uint16_t loop_id, topo, sw_cap;
1816 uint8_t domain, area, al_pa;
e315cd28 1817 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
1818
1819 /* Assume loading risc code */
fa2a1ce5 1820 rval = QLA_FUNCTION_FAILED;
1da177e4
LT
1821
1822 if (ha->flags.disable_risc_code_load) {
7c3df132 1823 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
1da177e4
LT
1824
1825 /* Verify checksum of loaded RISC code. */
e315cd28 1826 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
42e421b1
AV
1827 if (rval == QLA_SUCCESS) {
1828 /* And, verify we are not in ROM code. */
e315cd28 1829 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
42e421b1
AV
1830 &area, &domain, &topo, &sw_cap);
1831 }
1da177e4
LT
1832 }
1833
7c3df132
SK
1834 if (rval)
1835 ql_dbg(ql_dbg_init, vha, 0x007a,
1836 "**** Load RISC code ****.\n");
1da177e4
LT
1837
1838 return (rval);
1839}
1840
1841/**
1842 * qla2x00_reset_chip() - Reset ISP chip.
1843 * @ha: HA context
1844 *
1845 * Returns 0 on success.
1846 */
abbd8870 1847void
e315cd28 1848qla2x00_reset_chip(scsi_qla_host_t *vha)
1da177e4
LT
1849{
1850 unsigned long flags = 0;
e315cd28 1851 struct qla_hw_data *ha = vha->hw;
3d71644c 1852 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 1853 uint32_t cnt;
1da177e4
LT
1854 uint16_t cmd;
1855
85880801
AV
1856 if (unlikely(pci_channel_offline(ha->pdev)))
1857 return;
1858
fd34f556 1859 ha->isp_ops->disable_intrs(ha);
1da177e4
LT
1860
1861 spin_lock_irqsave(&ha->hardware_lock, flags);
1862
1863 /* Turn off master enable */
1864 cmd = 0;
1865 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
1866 cmd &= ~PCI_COMMAND_MASTER;
1867 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
1868
1869 if (!IS_QLA2100(ha)) {
1870 /* Pause RISC. */
1871 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1872 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
1873 for (cnt = 0; cnt < 30000; cnt++) {
1874 if ((RD_REG_WORD(&reg->hccr) &
1875 HCCR_RISC_PAUSE) != 0)
1876 break;
1877 udelay(100);
1878 }
1879 } else {
1880 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
1881 udelay(10);
1882 }
1883
1884 /* Select FPM registers. */
1885 WRT_REG_WORD(&reg->ctrl_status, 0x20);
1886 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1887
1888 /* FPM Soft Reset. */
1889 WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
1890 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
1891
1892 /* Toggle Fpm Reset. */
1893 if (!IS_QLA2200(ha)) {
1894 WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
1895 RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
1896 }
1897
1898 /* Select frame buffer registers. */
1899 WRT_REG_WORD(&reg->ctrl_status, 0x10);
1900 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1901
1902 /* Reset frame buffer FIFOs. */
1903 if (IS_QLA2200(ha)) {
1904 WRT_FB_CMD_REG(ha, reg, 0xa000);
1905 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
1906 } else {
1907 WRT_FB_CMD_REG(ha, reg, 0x00fc);
1908
1909 /* Read back fb_cmd until zero or 3 seconds max */
1910 for (cnt = 0; cnt < 3000; cnt++) {
1911 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
1912 break;
1913 udelay(100);
1914 }
1915 }
1916
1917 /* Select RISC module registers. */
1918 WRT_REG_WORD(&reg->ctrl_status, 0);
1919 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
1920
1921 /* Reset RISC processor. */
1922 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1923 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
1924
1925 /* Release RISC processor. */
1926 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1927 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
1928 }
1929
1930 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1931 WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
1932
1933 /* Reset ISP chip. */
1934 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1935
1936 /* Wait for RISC to recover from reset. */
1937 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1938 /*
1939 * It is necessary to for a delay here since the card doesn't
1940 * respond to PCI reads during a reset. On some architectures
1941 * this will result in an MCA.
1942 */
1943 udelay(20);
1944 for (cnt = 30000; cnt; cnt--) {
1945 if ((RD_REG_WORD(&reg->ctrl_status) &
1946 CSR_ISP_SOFT_RESET) == 0)
1947 break;
1948 udelay(100);
1949 }
1950 } else
1951 udelay(10);
1952
1953 /* Reset RISC processor. */
1954 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
1955
1956 WRT_REG_WORD(&reg->semaphore, 0);
1957
1958 /* Release RISC processor. */
1959 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1960 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
1961
1962 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
1963 for (cnt = 0; cnt < 30000; cnt++) {
ffb39f03 1964 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
1da177e4 1965 break;
1da177e4
LT
1966
1967 udelay(100);
1968 }
1969 } else
1970 udelay(100);
1971
1972 /* Turn on master enable */
1973 cmd |= PCI_COMMAND_MASTER;
1974 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
1975
1976 /* Disable RISC pause on FPM parity error. */
1977 if (!IS_QLA2100(ha)) {
1978 WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
1979 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
1980 }
1981
1982 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1983}
1984
b1d46989
MI
1985/**
1986 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
1987 *
1988 * Returns 0 on success.
1989 */
fa492630 1990static int
b1d46989
MI
1991qla81xx_reset_mpi(scsi_qla_host_t *vha)
1992{
1993 uint16_t mb[4] = {0x1010, 0, 1, 0};
1994
6246b8a1
GM
1995 if (!IS_QLA81XX(vha->hw))
1996 return QLA_SUCCESS;
1997
b1d46989
MI
1998 return qla81xx_write_mpi_register(vha, mb);
1999}
2000
0107109e 2001/**
88c26663 2002 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
0107109e
AV
2003 * @ha: HA context
2004 *
2005 * Returns 0 on success.
2006 */
d14e72fb 2007static inline int
e315cd28 2008qla24xx_reset_risc(scsi_qla_host_t *vha)
0107109e
AV
2009{
2010 unsigned long flags = 0;
e315cd28 2011 struct qla_hw_data *ha = vha->hw;
0107109e 2012 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
52c82823 2013 uint32_t cnt;
335a1cc9 2014 uint16_t wd;
b1d46989 2015 static int abts_cnt; /* ISP abort retry counts */
d14e72fb 2016 int rval = QLA_SUCCESS;
0107109e 2017
0107109e
AV
2018 spin_lock_irqsave(&ha->hardware_lock, flags);
2019
2020 /* Reset RISC. */
2021 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2022 for (cnt = 0; cnt < 30000; cnt++) {
2023 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2024 break;
2025
2026 udelay(10);
2027 }
2028
d14e72fb
HM
2029 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
2030 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2031
2032 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2033 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2034 RD_REG_DWORD(&reg->hccr),
2035 RD_REG_DWORD(&reg->ctrl_status),
2036 (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
2037
0107109e
AV
2038 WRT_REG_DWORD(&reg->ctrl_status,
2039 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
335a1cc9 2040 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
88c26663 2041
335a1cc9 2042 udelay(100);
d14e72fb 2043
88c26663 2044 /* Wait for firmware to complete NVRAM accesses. */
52c82823 2045 RD_REG_WORD(&reg->mailbox0);
d14e72fb
HM
2046 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
2047 rval == QLA_SUCCESS; cnt--) {
88c26663 2048 barrier();
d14e72fb
HM
2049 if (cnt)
2050 udelay(5);
2051 else
2052 rval = QLA_FUNCTION_TIMEOUT;
88c26663
AV
2053 }
2054
d14e72fb
HM
2055 if (rval == QLA_SUCCESS)
2056 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2057
2058 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2059 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2060 RD_REG_DWORD(&reg->hccr),
2061 RD_REG_DWORD(&reg->mailbox0));
2062
335a1cc9 2063 /* Wait for soft-reset to complete. */
52c82823 2064 RD_REG_DWORD(&reg->ctrl_status);
200ffb15 2065 for (cnt = 0; cnt < 60; cnt++) {
0107109e 2066 barrier();
d14e72fb
HM
2067 if ((RD_REG_DWORD(&reg->ctrl_status) &
2068 CSRX_ISP_SOFT_RESET) == 0)
2069 break;
2070
2071 udelay(5);
0107109e 2072 }
d14e72fb
HM
2073 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
2074 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2075
2076 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2077 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2078 RD_REG_DWORD(&reg->hccr),
2079 RD_REG_DWORD(&reg->ctrl_status));
0107109e 2080
b1d46989
MI
2081 /* If required, do an MPI FW reset now */
2082 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2083 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2084 if (++abts_cnt < 5) {
2085 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2086 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2087 } else {
2088 /*
2089 * We exhausted the ISP abort retries. We have to
2090 * set the board offline.
2091 */
2092 abts_cnt = 0;
2093 vha->flags.online = 0;
2094 }
2095 }
2096 }
2097
0107109e
AV
2098 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2099 RD_REG_DWORD(&reg->hccr);
2100
2101 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2102 RD_REG_DWORD(&reg->hccr);
2103
2104 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2105 RD_REG_DWORD(&reg->hccr);
2106
52c82823 2107 RD_REG_WORD(&reg->mailbox0);
200ffb15 2108 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
d14e72fb 2109 rval == QLA_SUCCESS; cnt--) {
0107109e 2110 barrier();
d14e72fb
HM
2111 if (cnt)
2112 udelay(5);
2113 else
2114 rval = QLA_FUNCTION_TIMEOUT;
0107109e 2115 }
d14e72fb
HM
2116 if (rval == QLA_SUCCESS)
2117 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2118
2119 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2120 "Host Risc 0x%x, mailbox0 0x%x\n",
2121 RD_REG_DWORD(&reg->hccr),
2122 RD_REG_WORD(&reg->mailbox0));
0107109e
AV
2123
2124 spin_unlock_irqrestore(&ha->hardware_lock, flags);
124f85e6 2125
d14e72fb
HM
2126 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2127 "Driver in %s mode\n",
2128 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2129
124f85e6
AV
2130 if (IS_NOPOLLING_TYPE(ha))
2131 ha->isp_ops->enable_intrs(ha);
d14e72fb
HM
2132
2133 return rval;
0107109e
AV
2134}
2135
4ea2c9c7
JC
2136static void
2137qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2138{
2139 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2140
2141 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2142 *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2143
2144}
2145
2146static void
2147qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2148{
2149 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2150
2151 WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2152 WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2153}
2154
2155static void
2156qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2157{
4ea2c9c7
JC
2158 uint32_t wd32 = 0;
2159 uint delta_msec = 100;
2160 uint elapsed_msec = 0;
2161 uint timeout_msec;
2162 ulong n;
2163
cc790764
JC
2164 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2165 vha->hw->pdev->subsystem_device != 0x0240)
4ea2c9c7
JC
2166 return;
2167
8dd7e3a5
JC
2168 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2169 udelay(100);
2170
4ea2c9c7
JC
2171attempt:
2172 timeout_msec = TIMEOUT_SEMAPHORE;
2173 n = timeout_msec / delta_msec;
2174 while (n--) {
2175 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2176 qla25xx_read_risc_sema_reg(vha, &wd32);
2177 if (wd32 & RISC_SEMAPHORE)
2178 break;
2179 msleep(delta_msec);
2180 elapsed_msec += delta_msec;
2181 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2182 goto force;
2183 }
2184
2185 if (!(wd32 & RISC_SEMAPHORE))
2186 goto force;
2187
2188 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2189 goto acquired;
2190
2191 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2192 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2193 n = timeout_msec / delta_msec;
2194 while (n--) {
2195 qla25xx_read_risc_sema_reg(vha, &wd32);
2196 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2197 break;
2198 msleep(delta_msec);
2199 elapsed_msec += delta_msec;
2200 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2201 goto force;
2202 }
2203
2204 if (wd32 & RISC_SEMAPHORE_FORCE)
2205 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2206
2207 goto attempt;
2208
2209force:
2210 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2211
2212acquired:
2213 return;
2214}
2215
88c26663
AV
2216/**
2217 * qla24xx_reset_chip() - Reset ISP24xx chip.
2218 * @ha: HA context
2219 *
2220 * Returns 0 on success.
2221 */
2222void
e315cd28 2223qla24xx_reset_chip(scsi_qla_host_t *vha)
88c26663 2224{
e315cd28 2225 struct qla_hw_data *ha = vha->hw;
85880801
AV
2226
2227 if (pci_channel_offline(ha->pdev) &&
2228 ha->flags.pci_channel_io_perm_failure) {
2229 return;
2230 }
2231
fd34f556 2232 ha->isp_ops->disable_intrs(ha);
88c26663 2233
4ea2c9c7
JC
2234 qla25xx_manipulate_risc_semaphore(vha);
2235
88c26663 2236 /* Perform RISC reset. */
e315cd28 2237 qla24xx_reset_risc(vha);
88c26663
AV
2238}
2239
1da177e4
LT
2240/**
2241 * qla2x00_chip_diag() - Test chip for proper operation.
2242 * @ha: HA context
2243 *
2244 * Returns 0 on success.
2245 */
abbd8870 2246int
e315cd28 2247qla2x00_chip_diag(scsi_qla_host_t *vha)
1da177e4
LT
2248{
2249 int rval;
e315cd28 2250 struct qla_hw_data *ha = vha->hw;
3d71644c 2251 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
2252 unsigned long flags = 0;
2253 uint16_t data;
2254 uint32_t cnt;
2255 uint16_t mb[5];
73208dfd 2256 struct req_que *req = ha->req_q_map[0];
1da177e4
LT
2257
2258 /* Assume a failed state */
2259 rval = QLA_FUNCTION_FAILED;
2260
7c3df132
SK
2261 ql_dbg(ql_dbg_init, vha, 0x007b,
2262 "Testing device at %lx.\n", (u_long)&reg->flash_address);
1da177e4
LT
2263
2264 spin_lock_irqsave(&ha->hardware_lock, flags);
2265
2266 /* Reset ISP chip. */
2267 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2268
2269 /*
2270 * We need to have a delay here since the card will not respond while
2271 * in reset causing an MCA on some architectures.
2272 */
2273 udelay(20);
2274 data = qla2x00_debounce_register(&reg->ctrl_status);
2275 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2276 udelay(5);
2277 data = RD_REG_WORD(&reg->ctrl_status);
2278 barrier();
2279 }
2280
2281 if (!cnt)
2282 goto chip_diag_failed;
2283
7c3df132
SK
2284 ql_dbg(ql_dbg_init, vha, 0x007c,
2285 "Reset register cleared by chip reset.\n");
1da177e4
LT
2286
2287 /* Reset RISC processor. */
2288 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
2289 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
2290
2291 /* Workaround for QLA2312 PCI parity error */
2292 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2293 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2294 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2295 udelay(5);
2296 data = RD_MAILBOX_REG(ha, reg, 0);
fa2a1ce5 2297 barrier();
1da177e4
LT
2298 }
2299 } else
2300 udelay(10);
2301
2302 if (!cnt)
2303 goto chip_diag_failed;
2304
2305 /* Check product ID of chip */
5a68a1c2 2306 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
1da177e4
LT
2307
2308 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2309 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
2310 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
2311 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
2312 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
2313 mb[3] != PROD_ID_3) {
7c3df132
SK
2314 ql_log(ql_log_warn, vha, 0x0062,
2315 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
2316 mb[1], mb[2], mb[3]);
1da177e4
LT
2317
2318 goto chip_diag_failed;
2319 }
2320 ha->product_id[0] = mb[1];
2321 ha->product_id[1] = mb[2];
2322 ha->product_id[2] = mb[3];
2323 ha->product_id[3] = mb[4];
2324
2325 /* Adjust fw RISC transfer size */
73208dfd 2326 if (req->length > 1024)
1da177e4
LT
2327 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
2328 else
2329 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
73208dfd 2330 req->length;
1da177e4
LT
2331
2332 if (IS_QLA2200(ha) &&
2333 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
2334 /* Limit firmware transfer size with a 2200A */
7c3df132 2335 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1da177e4 2336
ea5b6382 2337 ha->device_type |= DT_ISP2200A;
1da177e4
LT
2338 ha->fw_transfer_size = 128;
2339 }
2340
2341 /* Wrap Incoming Mailboxes Test. */
2342 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2343
7c3df132 2344 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
e315cd28 2345 rval = qla2x00_mbx_reg_test(vha);
7c3df132
SK
2346 if (rval)
2347 ql_log(ql_log_warn, vha, 0x0080,
2348 "Failed mailbox send register test.\n");
2349 else
1da177e4
LT
2350 /* Flag a successful rval */
2351 rval = QLA_SUCCESS;
1da177e4
LT
2352 spin_lock_irqsave(&ha->hardware_lock, flags);
2353
2354chip_diag_failed:
2355 if (rval)
7c3df132
SK
2356 ql_log(ql_log_info, vha, 0x0081,
2357 "Chip diagnostics **** FAILED ****.\n");
1da177e4
LT
2358
2359 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2360
2361 return (rval);
2362}
2363
0107109e
AV
2364/**
2365 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
2366 * @ha: HA context
2367 *
2368 * Returns 0 on success.
2369 */
2370int
e315cd28 2371qla24xx_chip_diag(scsi_qla_host_t *vha)
0107109e
AV
2372{
2373 int rval;
e315cd28 2374 struct qla_hw_data *ha = vha->hw;
73208dfd 2375 struct req_que *req = ha->req_q_map[0];
0107109e 2376
7ec0effd 2377 if (IS_P3P_TYPE(ha))
a9083016
GM
2378 return QLA_SUCCESS;
2379
73208dfd 2380 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
0107109e 2381
e315cd28 2382 rval = qla2x00_mbx_reg_test(vha);
0107109e 2383 if (rval) {
7c3df132
SK
2384 ql_log(ql_log_warn, vha, 0x0082,
2385 "Failed mailbox send register test.\n");
0107109e
AV
2386 } else {
2387 /* Flag a successful rval */
2388 rval = QLA_SUCCESS;
2389 }
2390
2391 return rval;
2392}
2393
a7a167bf 2394void
e315cd28 2395qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
0107109e 2396{
a7a167bf
AV
2397 int rval;
2398 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
73208dfd 2399 eft_size, fce_size, mq_size;
df613b96
AV
2400 dma_addr_t tc_dma;
2401 void *tc;
e315cd28 2402 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
2403 struct req_que *req = ha->req_q_map[0];
2404 struct rsp_que *rsp = ha->rsp_q_map[0];
a7a167bf
AV
2405
2406 if (ha->fw_dump) {
7c3df132
SK
2407 ql_dbg(ql_dbg_init, vha, 0x00bd,
2408 "Firmware dump already allocated.\n");
a7a167bf
AV
2409 return;
2410 }
d4e3e04d 2411
0107109e 2412 ha->fw_dumped = 0;
61f098dd 2413 ha->fw_dump_cap_flags = 0;
f73cb695
CD
2414 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
2415 req_q_size = rsp_q_size = 0;
2416
2417 if (IS_QLA27XX(ha))
2418 goto try_fce;
2419
d4e3e04d 2420 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
a7a167bf 2421 fixed_size = sizeof(struct qla2100_fw_dump);
d4e3e04d 2422 } else if (IS_QLA23XX(ha)) {
a7a167bf
AV
2423 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
2424 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
2425 sizeof(uint16_t);
e428924c 2426 } else if (IS_FWI2_CAPABLE(ha)) {
b20f02e1 2427 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
6246b8a1
GM
2428 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
2429 else if (IS_QLA81XX(ha))
3a03eb79
AV
2430 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
2431 else if (IS_QLA25XX(ha))
2432 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
2433 else
2434 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
f73cb695 2435
a7a167bf
AV
2436 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
2437 sizeof(uint32_t);
050c9bb1 2438 if (ha->mqenable) {
b20f02e1 2439 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
6246b8a1 2440 mq_size = sizeof(struct qla2xxx_mq_chain);
050c9bb1
GM
2441 /*
2442 * Allocate maximum buffer size for all queues.
2443 * Resizing must be done at end-of-dump processing.
2444 */
2445 mq_size += ha->max_req_queues *
2446 (req->length * sizeof(request_t));
2447 mq_size += ha->max_rsp_queues *
2448 (rsp->length * sizeof(response_t));
2449 }
00876ae8 2450 if (ha->tgt.atio_ring)
2d70c103 2451 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
df613b96 2452 /* Allocate memory for Fibre Channel Event Buffer. */
f73cb695
CD
2453 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2454 !IS_QLA27XX(ha))
436a7b11 2455 goto try_eft;
df613b96 2456
f73cb695
CD
2457try_fce:
2458 if (ha->fce)
2459 dma_free_coherent(&ha->pdev->dev,
2460 FCE_SIZE, ha->fce, ha->fce_dma);
2461
2462 /* Allocate memory for Fibre Channel Event Buffer. */
0ea85b50
JP
2463 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
2464 GFP_KERNEL);
df613b96 2465 if (!tc) {
7c3df132
SK
2466 ql_log(ql_log_warn, vha, 0x00be,
2467 "Unable to allocate (%d KB) for FCE.\n",
2468 FCE_SIZE / 1024);
17d98630 2469 goto try_eft;
df613b96
AV
2470 }
2471
e315cd28 2472 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
df613b96
AV
2473 ha->fce_mb, &ha->fce_bufs);
2474 if (rval) {
7c3df132
SK
2475 ql_log(ql_log_warn, vha, 0x00bf,
2476 "Unable to initialize FCE (%d).\n", rval);
df613b96
AV
2477 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
2478 tc_dma);
2479 ha->flags.fce_enabled = 0;
17d98630 2480 goto try_eft;
df613b96 2481 }
cfb0919c 2482 ql_dbg(ql_dbg_init, vha, 0x00c0,
7c3df132 2483 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
df613b96 2484
7d9dade3 2485 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
df613b96
AV
2486 ha->flags.fce_enabled = 1;
2487 ha->fce_dma = tc_dma;
2488 ha->fce = tc;
f73cb695 2489
436a7b11 2490try_eft:
f73cb695
CD
2491 if (ha->eft)
2492 dma_free_coherent(&ha->pdev->dev,
2493 EFT_SIZE, ha->eft, ha->eft_dma);
2494
436a7b11 2495 /* Allocate memory for Extended Trace Buffer. */
0ea85b50
JP
2496 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
2497 GFP_KERNEL);
436a7b11 2498 if (!tc) {
7c3df132
SK
2499 ql_log(ql_log_warn, vha, 0x00c1,
2500 "Unable to allocate (%d KB) for EFT.\n",
2501 EFT_SIZE / 1024);
436a7b11
AV
2502 goto cont_alloc;
2503 }
2504
e315cd28 2505 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
436a7b11 2506 if (rval) {
7c3df132
SK
2507 ql_log(ql_log_warn, vha, 0x00c2,
2508 "Unable to initialize EFT (%d).\n", rval);
436a7b11
AV
2509 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
2510 tc_dma);
2511 goto cont_alloc;
2512 }
cfb0919c 2513 ql_dbg(ql_dbg_init, vha, 0x00c3,
7c3df132 2514 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
436a7b11
AV
2515
2516 eft_size = EFT_SIZE;
2517 ha->eft_dma = tc_dma;
2518 ha->eft = tc;
d4e3e04d 2519 }
f73cb695 2520
a7a167bf 2521cont_alloc:
f73cb695
CD
2522 if (IS_QLA27XX(ha)) {
2523 if (!ha->fw_dump_template) {
2524 ql_log(ql_log_warn, vha, 0x00ba,
2525 "Failed missing fwdump template\n");
2526 return;
2527 }
2528 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
2529 ql_dbg(ql_dbg_init, vha, 0x00fa,
2530 "-> allocating fwdump (%x bytes)...\n", dump_size);
2531 goto allocate;
2532 }
2533
73208dfd
AC
2534 req_q_size = req->length * sizeof(request_t);
2535 rsp_q_size = rsp->length * sizeof(response_t);
a7a167bf 2536 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2afa19a9 2537 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
bb99de67
AV
2538 ha->chain_offset = dump_size;
2539 dump_size += mq_size + fce_size;
d4e3e04d 2540
f73cb695 2541allocate:
d4e3e04d 2542 ha->fw_dump = vmalloc(dump_size);
a7a167bf 2543 if (!ha->fw_dump) {
7c3df132
SK
2544 ql_log(ql_log_warn, vha, 0x00c4,
2545 "Unable to allocate (%d KB) for firmware dump.\n",
2546 dump_size / 1024);
a7a167bf 2547
e30d1756
MI
2548 if (ha->fce) {
2549 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2550 ha->fce_dma);
2551 ha->fce = NULL;
2552 ha->fce_dma = 0;
2553 }
2554
a7a167bf
AV
2555 if (ha->eft) {
2556 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
2557 ha->eft_dma);
2558 ha->eft = NULL;
2559 ha->eft_dma = 0;
2560 }
2561 return;
2562 }
f73cb695 2563 ha->fw_dump_len = dump_size;
cfb0919c 2564 ql_dbg(ql_dbg_init, vha, 0x00c5,
7c3df132 2565 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
a7a167bf 2566
f73cb695
CD
2567 if (IS_QLA27XX(ha))
2568 return;
2569
a7a167bf
AV
2570 ha->fw_dump->signature[0] = 'Q';
2571 ha->fw_dump->signature[1] = 'L';
2572 ha->fw_dump->signature[2] = 'G';
2573 ha->fw_dump->signature[3] = 'C';
ad950360 2574 ha->fw_dump->version = htonl(1);
a7a167bf
AV
2575
2576 ha->fw_dump->fixed_size = htonl(fixed_size);
2577 ha->fw_dump->mem_size = htonl(mem_size);
2578 ha->fw_dump->req_q_size = htonl(req_q_size);
2579 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
2580
2581 ha->fw_dump->eft_size = htonl(eft_size);
2582 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
2583 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
2584
2585 ha->fw_dump->header_size =
2586 htonl(offsetof(struct qla2xxx_fw_dump, isp));
0107109e
AV
2587}
2588
18e7555a
AV
2589static int
2590qla81xx_mpi_sync(scsi_qla_host_t *vha)
2591{
2592#define MPS_MASK 0xe0
2593 int rval;
2594 uint16_t dc;
2595 uint32_t dw;
18e7555a
AV
2596
2597 if (!IS_QLA81XX(vha->hw))
2598 return QLA_SUCCESS;
2599
2600 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
2601 if (rval != QLA_SUCCESS) {
7c3df132
SK
2602 ql_log(ql_log_warn, vha, 0x0105,
2603 "Unable to acquire semaphore.\n");
18e7555a
AV
2604 goto done;
2605 }
2606
2607 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
2608 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
2609 if (rval != QLA_SUCCESS) {
7c3df132 2610 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
18e7555a
AV
2611 goto done_release;
2612 }
2613
2614 dc &= MPS_MASK;
2615 if (dc == (dw & MPS_MASK))
2616 goto done_release;
2617
2618 dw &= ~MPS_MASK;
2619 dw |= dc;
2620 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
2621 if (rval != QLA_SUCCESS) {
7c3df132 2622 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
18e7555a
AV
2623 }
2624
2625done_release:
2626 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
2627 if (rval != QLA_SUCCESS) {
7c3df132
SK
2628 ql_log(ql_log_warn, vha, 0x006d,
2629 "Unable to release semaphore.\n");
18e7555a
AV
2630 }
2631
2632done:
2633 return rval;
2634}
2635
8d93f550
CD
2636int
2637qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
2638{
2639 /* Don't try to reallocate the array */
2640 if (req->outstanding_cmds)
2641 return QLA_SUCCESS;
2642
d7459527 2643 if (!IS_FWI2_CAPABLE(ha))
8d93f550
CD
2644 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
2645 else {
03e8c680
QT
2646 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
2647 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
8d93f550 2648 else
03e8c680 2649 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
8d93f550
CD
2650 }
2651
2652 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2653 req->num_outstanding_cmds, GFP_KERNEL);
2654
2655 if (!req->outstanding_cmds) {
2656 /*
2657 * Try to allocate a minimal size just so we can get through
2658 * initialization.
2659 */
2660 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
2661 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2662 req->num_outstanding_cmds, GFP_KERNEL);
2663
2664 if (!req->outstanding_cmds) {
2665 ql_log(ql_log_fatal, NULL, 0x0126,
2666 "Failed to allocate memory for "
2667 "outstanding_cmds for req_que %p.\n", req);
2668 req->num_outstanding_cmds = 0;
2669 return QLA_FUNCTION_FAILED;
2670 }
2671 }
2672
2673 return QLA_SUCCESS;
2674}
2675
1da177e4
LT
2676/**
2677 * qla2x00_setup_chip() - Load and start RISC firmware.
2678 * @ha: HA context
2679 *
2680 * Returns 0 on success.
2681 */
2682static int
e315cd28 2683qla2x00_setup_chip(scsi_qla_host_t *vha)
1da177e4 2684{
0107109e
AV
2685 int rval;
2686 uint32_t srisc_address = 0;
e315cd28 2687 struct qla_hw_data *ha = vha->hw;
3db0652e
AV
2688 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2689 unsigned long flags;
dda772e8 2690 uint16_t fw_major_version;
3db0652e 2691
7ec0effd 2692 if (IS_P3P_TYPE(ha)) {
a9083016 2693 rval = ha->isp_ops->load_risc(vha, &srisc_address);
14e303d9
AV
2694 if (rval == QLA_SUCCESS) {
2695 qla2x00_stop_firmware(vha);
a9083016 2696 goto enable_82xx_npiv;
14e303d9 2697 } else
b963752f 2698 goto failed;
a9083016
GM
2699 }
2700
3db0652e
AV
2701 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
2702 /* Disable SRAM, Instruction RAM and GP RAM parity. */
2703 spin_lock_irqsave(&ha->hardware_lock, flags);
2704 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
2705 RD_REG_WORD(&reg->hccr);
2706 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2707 }
1da177e4 2708
18e7555a
AV
2709 qla81xx_mpi_sync(vha);
2710
1da177e4 2711 /* Load firmware sequences */
e315cd28 2712 rval = ha->isp_ops->load_risc(vha, &srisc_address);
0107109e 2713 if (rval == QLA_SUCCESS) {
7c3df132
SK
2714 ql_dbg(ql_dbg_init, vha, 0x00c9,
2715 "Verifying Checksum of loaded RISC code.\n");
1da177e4 2716
e315cd28 2717 rval = qla2x00_verify_checksum(vha, srisc_address);
1da177e4
LT
2718 if (rval == QLA_SUCCESS) {
2719 /* Start firmware execution. */
7c3df132
SK
2720 ql_dbg(ql_dbg_init, vha, 0x00ca,
2721 "Starting firmware.\n");
1da177e4 2722
b0d6cabd
HM
2723 if (ql2xexlogins)
2724 ha->flags.exlogins_enabled = 1;
2725
99e1b683 2726 if (qla_is_exch_offld_enabled(vha))
2f56a7f1
HM
2727 ha->flags.exchoffld_enabled = 1;
2728
e315cd28 2729 rval = qla2x00_execute_fw(vha, srisc_address);
1da177e4 2730 /* Retrieve firmware information. */
dda772e8 2731 if (rval == QLA_SUCCESS) {
b0d6cabd
HM
2732 rval = qla2x00_set_exlogins_buffer(vha);
2733 if (rval != QLA_SUCCESS)
2734 goto failed;
2735
2f56a7f1
HM
2736 rval = qla2x00_set_exchoffld_buffer(vha);
2737 if (rval != QLA_SUCCESS)
2738 goto failed;
2739
a9083016 2740enable_82xx_npiv:
dda772e8 2741 fw_major_version = ha->fw_major_version;
7ec0effd 2742 if (IS_P3P_TYPE(ha))
3173167f 2743 qla82xx_check_md_needed(vha);
6246b8a1
GM
2744 else
2745 rval = qla2x00_get_fw_version(vha);
ca9e9c3e
AV
2746 if (rval != QLA_SUCCESS)
2747 goto failed;
2c3dfe3f 2748 ha->flags.npiv_supported = 0;
e315cd28 2749 if (IS_QLA2XXX_MIDTYPE(ha) &&
946fb891 2750 (ha->fw_attributes & BIT_2)) {
2c3dfe3f 2751 ha->flags.npiv_supported = 1;
4d0ea247
SJ
2752 if ((!ha->max_npiv_vports) ||
2753 ((ha->max_npiv_vports + 1) %
eb66dc60 2754 MIN_MULTI_ID_FABRIC))
4d0ea247 2755 ha->max_npiv_vports =
eb66dc60 2756 MIN_MULTI_ID_FABRIC - 1;
4d0ea247 2757 }
03e8c680 2758 qla2x00_get_resource_cnts(vha);
d743de66 2759
8d93f550
CD
2760 /*
2761 * Allocate the array of outstanding commands
2762 * now that we know the firmware resources.
2763 */
2764 rval = qla2x00_alloc_outstanding_cmds(ha,
2765 vha->req);
2766 if (rval != QLA_SUCCESS)
2767 goto failed;
2768
be5ea3cf 2769 if (!fw_major_version && ql2xallocfwdump
7ec0effd 2770 && !(IS_P3P_TYPE(ha)))
08de2844 2771 qla2x00_alloc_fw_dump(vha);
3b6e5b9d
CD
2772 } else {
2773 goto failed;
1da177e4
LT
2774 }
2775 } else {
7c3df132
SK
2776 ql_log(ql_log_fatal, vha, 0x00cd,
2777 "ISP Firmware failed checksum.\n");
2778 goto failed;
1da177e4 2779 }
c74d88a4
AV
2780 } else
2781 goto failed;
1da177e4 2782
3db0652e
AV
2783 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
2784 /* Enable proper parity. */
2785 spin_lock_irqsave(&ha->hardware_lock, flags);
2786 if (IS_QLA2300(ha))
2787 /* SRAM parity */
2788 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
2789 else
2790 /* SRAM, Instruction RAM and GP RAM parity */
2791 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
2792 RD_REG_WORD(&reg->hccr);
2793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2794 }
2795
f3982d89
CD
2796 if (IS_QLA27XX(ha))
2797 ha->flags.fac_supported = 1;
2798 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1d2874de
JC
2799 uint32_t size;
2800
2801 rval = qla81xx_fac_get_sector_size(vha, &size);
2802 if (rval == QLA_SUCCESS) {
2803 ha->flags.fac_supported = 1;
2804 ha->fdt_block_size = size << 2;
2805 } else {
7c3df132 2806 ql_log(ql_log_warn, vha, 0x00ce,
1d2874de
JC
2807 "Unsupported FAC firmware (%d.%02d.%02d).\n",
2808 ha->fw_major_version, ha->fw_minor_version,
2809 ha->fw_subminor_version);
1ca60e3b 2810
f73cb695 2811 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6246b8a1
GM
2812 ha->flags.fac_supported = 0;
2813 rval = QLA_SUCCESS;
2814 }
1d2874de
JC
2815 }
2816 }
ca9e9c3e 2817failed:
1da177e4 2818 if (rval) {
7c3df132
SK
2819 ql_log(ql_log_fatal, vha, 0x00cf,
2820 "Setup chip ****FAILED****.\n");
1da177e4
LT
2821 }
2822
2823 return (rval);
2824}
2825
2826/**
2827 * qla2x00_init_response_q_entries() - Initializes response queue entries.
2828 * @ha: HA context
2829 *
2830 * Beginning of request ring has initialization control block already built
2831 * by nvram config routine.
2832 *
2833 * Returns 0 on success.
2834 */
73208dfd
AC
2835void
2836qla2x00_init_response_q_entries(struct rsp_que *rsp)
1da177e4
LT
2837{
2838 uint16_t cnt;
2839 response_t *pkt;
2840
2afa19a9
AC
2841 rsp->ring_ptr = rsp->ring;
2842 rsp->ring_index = 0;
2843 rsp->status_srb = NULL;
e315cd28
AC
2844 pkt = rsp->ring_ptr;
2845 for (cnt = 0; cnt < rsp->length; cnt++) {
1da177e4
LT
2846 pkt->signature = RESPONSE_PROCESSED;
2847 pkt++;
2848 }
1da177e4
LT
2849}
2850
2851/**
2852 * qla2x00_update_fw_options() - Read and process firmware options.
2853 * @ha: HA context
2854 *
2855 * Returns 0 on success.
2856 */
abbd8870 2857void
e315cd28 2858qla2x00_update_fw_options(scsi_qla_host_t *vha)
1da177e4
LT
2859{
2860 uint16_t swing, emphasis, tx_sens, rx_sens;
e315cd28 2861 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
2862
2863 memset(ha->fw_options, 0, sizeof(ha->fw_options));
e315cd28 2864 qla2x00_get_fw_options(vha, ha->fw_options);
1da177e4
LT
2865
2866 if (IS_QLA2100(ha) || IS_QLA2200(ha))
2867 return;
2868
2869 /* Serial Link options. */
7c3df132
SK
2870 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
2871 "Serial link options.\n");
2872 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
2873 (uint8_t *)&ha->fw_seriallink_options,
2874 sizeof(ha->fw_seriallink_options));
1da177e4
LT
2875
2876 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
2877 if (ha->fw_seriallink_options[3] & BIT_2) {
2878 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
2879
2880 /* 1G settings */
2881 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
2882 emphasis = (ha->fw_seriallink_options[2] &
2883 (BIT_4 | BIT_3)) >> 3;
2884 tx_sens = ha->fw_seriallink_options[0] &
fa2a1ce5 2885 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
2886 rx_sens = (ha->fw_seriallink_options[0] &
2887 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
2888 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
2889 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
2890 if (rx_sens == 0x0)
2891 rx_sens = 0x3;
2892 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
2893 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
2894 ha->fw_options[10] |= BIT_5 |
2895 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
2896 (tx_sens & (BIT_1 | BIT_0));
2897
2898 /* 2G settings */
2899 swing = (ha->fw_seriallink_options[2] &
2900 (BIT_7 | BIT_6 | BIT_5)) >> 5;
2901 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
2902 tx_sens = ha->fw_seriallink_options[1] &
fa2a1ce5 2903 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1da177e4
LT
2904 rx_sens = (ha->fw_seriallink_options[1] &
2905 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
2906 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
2907 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
2908 if (rx_sens == 0x0)
2909 rx_sens = 0x3;
2910 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
2911 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
2912 ha->fw_options[11] |= BIT_5 |
2913 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
2914 (tx_sens & (BIT_1 | BIT_0));
2915 }
2916
2917 /* FCP2 options. */
2918 /* Return command IOCBs without waiting for an ABTS to complete. */
2919 ha->fw_options[3] |= BIT_13;
2920
2921 /* LED scheme. */
2922 if (ha->flags.enable_led_scheme)
2923 ha->fw_options[2] |= BIT_12;
2924
48c02fde 2925 /* Detect ISP6312. */
2926 if (IS_QLA6312(ha))
2927 ha->fw_options[2] |= BIT_13;
2928
088d09d4
GM
2929 /* Set Retry FLOGI in case of P2P connection */
2930 if (ha->operating_mode == P2P) {
2931 ha->fw_options[2] |= BIT_3;
2932 ql_dbg(ql_dbg_disc, vha, 0x2100,
2933 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
2934 __func__, ha->fw_options[2]);
2935 }
2936
1da177e4 2937 /* Update firmware options. */
e315cd28 2938 qla2x00_set_fw_options(vha, ha->fw_options);
1da177e4
LT
2939}
2940
0107109e 2941void
e315cd28 2942qla24xx_update_fw_options(scsi_qla_host_t *vha)
0107109e
AV
2943{
2944 int rval;
e315cd28 2945 struct qla_hw_data *ha = vha->hw;
0107109e 2946
7ec0effd 2947 if (IS_P3P_TYPE(ha))
a9083016
GM
2948 return;
2949
f198cafa
HM
2950 /* Hold status IOCBs until ABTS response received. */
2951 if (ql2xfwholdabts)
2952 ha->fw_options[3] |= BIT_12;
2953
088d09d4
GM
2954 /* Set Retry FLOGI in case of P2P connection */
2955 if (ha->operating_mode == P2P) {
2956 ha->fw_options[2] |= BIT_3;
2957 ql_dbg(ql_dbg_disc, vha, 0x2101,
2958 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
2959 __func__, ha->fw_options[2]);
2960 }
2961
41dc529a 2962 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
3c4810ff
QT
2963 if (ql2xmvasynctoatio &&
2964 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
41dc529a
QT
2965 if (qla_tgt_mode_enabled(vha) ||
2966 qla_dual_mode_enabled(vha))
2967 ha->fw_options[2] |= BIT_11;
2968 else
2969 ha->fw_options[2] &= ~BIT_11;
2970 }
2971
f7e761f5
QT
2972 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2973 /*
2974 * Tell FW to track each exchange to prevent
2975 * driver from using stale exchange.
2976 */
2977 if (qla_tgt_mode_enabled(vha) ||
2978 qla_dual_mode_enabled(vha))
2979 ha->fw_options[2] |= BIT_4;
2980 else
2981 ha->fw_options[2] &= ~BIT_4;
2982 }
2983
83548fe2
QT
2984 ql_dbg(ql_dbg_init, vha, 0x00e8,
2985 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
2986 __func__, ha->fw_options[1], ha->fw_options[2],
2987 ha->fw_options[3], vha->host->active_mode);
3c4810ff
QT
2988
2989 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
2990 qla2x00_set_fw_options(vha, ha->fw_options);
41dc529a 2991
0107109e 2992 /* Update Serial Link options. */
f94097ed 2993 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
0107109e
AV
2994 return;
2995
e315cd28 2996 rval = qla2x00_set_serdes_params(vha,
f94097ed 2997 le16_to_cpu(ha->fw_seriallink_options24[1]),
2998 le16_to_cpu(ha->fw_seriallink_options24[2]),
2999 le16_to_cpu(ha->fw_seriallink_options24[3]));
0107109e 3000 if (rval != QLA_SUCCESS) {
7c3df132 3001 ql_log(ql_log_warn, vha, 0x0104,
0107109e
AV
3002 "Unable to update Serial Link options (%x).\n", rval);
3003 }
3004}
3005
abbd8870 3006void
e315cd28 3007qla2x00_config_rings(struct scsi_qla_host *vha)
abbd8870 3008{
e315cd28 3009 struct qla_hw_data *ha = vha->hw;
3d71644c 3010 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
73208dfd
AC
3011 struct req_que *req = ha->req_q_map[0];
3012 struct rsp_que *rsp = ha->rsp_q_map[0];
abbd8870
AV
3013
3014 /* Setup ring parameters in initialization control block. */
ad950360
BVA
3015 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3016 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3017 ha->init_cb->request_q_length = cpu_to_le16(req->length);
3018 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
3019 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3020 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3021 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3022 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
abbd8870
AV
3023
3024 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3025 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3026 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3027 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3028 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
3029}
3030
0107109e 3031void
e315cd28 3032qla24xx_config_rings(struct scsi_qla_host *vha)
0107109e 3033{
e315cd28 3034 struct qla_hw_data *ha = vha->hw;
118e2ef9 3035 device_reg_t *reg = ISP_QUE_REG(ha, 0);
73208dfd
AC
3036 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3037 struct qla_msix_entry *msix;
0107109e 3038 struct init_cb_24xx *icb;
73208dfd
AC
3039 uint16_t rid = 0;
3040 struct req_que *req = ha->req_q_map[0];
3041 struct rsp_que *rsp = ha->rsp_q_map[0];
0107109e 3042
6246b8a1 3043 /* Setup ring parameters in initialization control block. */
0107109e 3044 icb = (struct init_cb_24xx *)ha->init_cb;
ad950360
BVA
3045 icb->request_q_outpointer = cpu_to_le16(0);
3046 icb->response_q_inpointer = cpu_to_le16(0);
e315cd28
AC
3047 icb->request_q_length = cpu_to_le16(req->length);
3048 icb->response_q_length = cpu_to_le16(rsp->length);
3049 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3050 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3051 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3052 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
0107109e 3053
2d70c103 3054 /* Setup ATIO queue dma pointers for target mode */
ad950360 3055 icb->atio_q_inpointer = cpu_to_le16(0);
2d70c103
NB
3056 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3057 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
3058 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
3059
7c6300e3 3060 if (IS_SHADOW_REG_CAPABLE(ha))
ad950360 3061 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
7c6300e3 3062
f73cb695 3063 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ad950360
BVA
3064 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3065 icb->rid = cpu_to_le16(rid);
73208dfd
AC
3066 if (ha->flags.msix_enabled) {
3067 msix = &ha->msix_entries[1];
83548fe2 3068 ql_dbg(ql_dbg_init, vha, 0x0019,
7c3df132
SK
3069 "Registering vector 0x%x for base que.\n",
3070 msix->entry);
73208dfd
AC
3071 icb->msix = cpu_to_le16(msix->entry);
3072 }
3073 /* Use alternate PCI bus number */
3074 if (MSB(rid))
ad950360 3075 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
73208dfd
AC
3076 /* Use alternate PCI devfn */
3077 if (LSB(rid))
ad950360 3078 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
73208dfd 3079
3155754a 3080 /* Use Disable MSIX Handshake mode for capable adapters */
6246b8a1
GM
3081 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
3082 (ha->flags.msix_enabled)) {
ad950360 3083 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3155754a 3084 ha->flags.disable_msix_handshake = 1;
7c3df132
SK
3085 ql_dbg(ql_dbg_init, vha, 0x00fe,
3086 "MSIX Handshake Disable Mode turned on.\n");
3155754a 3087 } else {
ad950360 3088 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3155754a 3089 }
ad950360 3090 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
73208dfd
AC
3091
3092 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
3093 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
3094 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
3095 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
3096 } else {
3097 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
3098 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
3099 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
3100 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
3101 }
aa230bc5 3102 qlt_24xx_config_rings(vha);
2d70c103 3103
73208dfd
AC
3104 /* PCI posting */
3105 RD_REG_DWORD(&ioreg->hccr);
0107109e
AV
3106}
3107
1da177e4
LT
3108/**
3109 * qla2x00_init_rings() - Initializes firmware.
3110 * @ha: HA context
3111 *
3112 * Beginning of request ring has initialization control block already built
3113 * by nvram config routine.
3114 *
3115 * Returns 0 on success.
3116 */
8ae6d9c7 3117int
e315cd28 3118qla2x00_init_rings(scsi_qla_host_t *vha)
1da177e4
LT
3119{
3120 int rval;
3121 unsigned long flags = 0;
29bdccbe 3122 int cnt, que;
e315cd28 3123 struct qla_hw_data *ha = vha->hw;
29bdccbe
AC
3124 struct req_que *req;
3125 struct rsp_que *rsp;
2c3dfe3f
SJ
3126 struct mid_init_cb_24xx *mid_init_cb =
3127 (struct mid_init_cb_24xx *) ha->init_cb;
1da177e4
LT
3128
3129 spin_lock_irqsave(&ha->hardware_lock, flags);
3130
3131 /* Clear outstanding commands array. */
2afa19a9 3132 for (que = 0; que < ha->max_req_queues; que++) {
29bdccbe 3133 req = ha->req_q_map[que];
cb43285f 3134 if (!req || !test_bit(que, ha->req_qid_map))
29bdccbe 3135 continue;
7c6300e3
JC
3136 req->out_ptr = (void *)(req->ring + req->length);
3137 *req->out_ptr = 0;
8d93f550 3138 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
29bdccbe 3139 req->outstanding_cmds[cnt] = NULL;
1da177e4 3140
2afa19a9 3141 req->current_outstanding_cmd = 1;
1da177e4 3142
29bdccbe
AC
3143 /* Initialize firmware. */
3144 req->ring_ptr = req->ring;
3145 req->ring_index = 0;
3146 req->cnt = req->length;
3147 }
1da177e4 3148
2afa19a9 3149 for (que = 0; que < ha->max_rsp_queues; que++) {
29bdccbe 3150 rsp = ha->rsp_q_map[que];
cb43285f 3151 if (!rsp || !test_bit(que, ha->rsp_qid_map))
29bdccbe 3152 continue;
7c6300e3
JC
3153 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
3154 *rsp->in_ptr = 0;
29bdccbe 3155 /* Initialize response queue entries */
8ae6d9c7
GM
3156 if (IS_QLAFX00(ha))
3157 qlafx00_init_response_q_entries(rsp);
3158 else
3159 qla2x00_init_response_q_entries(rsp);
29bdccbe 3160 }
1da177e4 3161
2d70c103
NB
3162 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
3163 ha->tgt.atio_ring_index = 0;
3164 /* Initialize ATIO queue entries */
3165 qlt_init_atio_q_entries(vha);
3166
e315cd28 3167 ha->isp_ops->config_rings(vha);
1da177e4
LT
3168
3169 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3170
8ae6d9c7
GM
3171 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
3172
3173 if (IS_QLAFX00(ha)) {
3174 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
3175 goto next_check;
3176 }
3177
1da177e4 3178 /* Update any ISP specific firmware options before initialization. */
e315cd28 3179 ha->isp_ops->update_fw_options(vha);
1da177e4 3180
605aa2bc 3181 if (ha->flags.npiv_supported) {
45980cc2 3182 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
605aa2bc 3183 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
c48339de 3184 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
605aa2bc
LC
3185 }
3186
24a08138 3187 if (IS_FWI2_CAPABLE(ha)) {
ad950360 3188 mid_init_cb->options = cpu_to_le16(BIT_1);
24a08138 3189 mid_init_cb->init_cb.execution_throttle =
03e8c680 3190 cpu_to_le16(ha->cur_fw_xcb_count);
40f3862b
JC
3191 ha->flags.dport_enabled =
3192 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
3193 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
3194 (ha->flags.dport_enabled) ? "enabled" : "disabled");
3195 /* FA-WWPN Status */
2486c627 3196 ha->flags.fawwpn_enabled =
40f3862b 3197 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
83548fe2 3198 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
2486c627 3199 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
24a08138 3200 }
2c3dfe3f 3201
e315cd28 3202 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
8ae6d9c7 3203next_check:
1da177e4 3204 if (rval) {
7c3df132
SK
3205 ql_log(ql_log_fatal, vha, 0x00d2,
3206 "Init Firmware **** FAILED ****.\n");
1da177e4 3207 } else {
7c3df132
SK
3208 ql_dbg(ql_dbg_init, vha, 0x00d3,
3209 "Init Firmware -- success.\n");
4b60c827 3210 QLA_FW_STARTED(ha);
1da177e4
LT
3211 }
3212
3213 return (rval);
3214}
3215
3216/**
3217 * qla2x00_fw_ready() - Waits for firmware ready.
3218 * @ha: HA context
3219 *
3220 * Returns 0 on success.
3221 */
3222static int
e315cd28 3223qla2x00_fw_ready(scsi_qla_host_t *vha)
1da177e4
LT
3224{
3225 int rval;
4d4df193 3226 unsigned long wtime, mtime, cs84xx_time;
1da177e4
LT
3227 uint16_t min_wait; /* Minimum wait time if loop is down */
3228 uint16_t wait_time; /* Wait time if loop is coming ready */
b5a340dd 3229 uint16_t state[6];
e315cd28 3230 struct qla_hw_data *ha = vha->hw;
1da177e4 3231
8ae6d9c7
GM
3232 if (IS_QLAFX00(vha->hw))
3233 return qlafx00_fw_ready(vha);
3234
1da177e4
LT
3235 rval = QLA_SUCCESS;
3236
33461491
CD
3237 /* Time to wait for loop down */
3238 if (IS_P3P_TYPE(ha))
3239 min_wait = 30;
3240 else
3241 min_wait = 20;
1da177e4
LT
3242
3243 /*
3244 * Firmware should take at most one RATOV to login, plus 5 seconds for
3245 * our own processing.
3246 */
3247 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
3248 wait_time = min_wait;
3249 }
3250
3251 /* Min wait time if loop down */
3252 mtime = jiffies + (min_wait * HZ);
3253
3254 /* wait time before firmware ready */
3255 wtime = jiffies + (wait_time * HZ);
3256
3257 /* Wait for ISP to finish LIP */
e315cd28 3258 if (!vha->flags.init_done)
7c3df132
SK
3259 ql_log(ql_log_info, vha, 0x801e,
3260 "Waiting for LIP to complete.\n");
1da177e4
LT
3261
3262 do {
5b939038 3263 memset(state, -1, sizeof(state));
e315cd28 3264 rval = qla2x00_get_firmware_state(vha, state);
1da177e4 3265 if (rval == QLA_SUCCESS) {
4d4df193 3266 if (state[0] < FSTATE_LOSS_OF_SYNC) {
e315cd28 3267 vha->device_flags &= ~DFLG_NO_CABLE;
1da177e4 3268 }
4d4df193 3269 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
7c3df132
SK
3270 ql_dbg(ql_dbg_taskm, vha, 0x801f,
3271 "fw_state=%x 84xx=%x.\n", state[0],
3272 state[2]);
4d4df193
HK
3273 if ((state[2] & FSTATE_LOGGED_IN) &&
3274 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
7c3df132
SK
3275 ql_dbg(ql_dbg_taskm, vha, 0x8028,
3276 "Sending verify iocb.\n");
4d4df193
HK
3277
3278 cs84xx_time = jiffies;
e315cd28 3279 rval = qla84xx_init_chip(vha);
7c3df132
SK
3280 if (rval != QLA_SUCCESS) {
3281 ql_log(ql_log_warn,
cfb0919c 3282 vha, 0x8007,
7c3df132 3283 "Init chip failed.\n");
4d4df193 3284 break;
7c3df132 3285 }
4d4df193
HK
3286
3287 /* Add time taken to initialize. */
3288 cs84xx_time = jiffies - cs84xx_time;
3289 wtime += cs84xx_time;
3290 mtime += cs84xx_time;
cfb0919c 3291 ql_dbg(ql_dbg_taskm, vha, 0x8008,
7c3df132
SK
3292 "Increasing wait time by %ld. "
3293 "New time %ld.\n", cs84xx_time,
3294 wtime);
4d4df193
HK
3295 }
3296 } else if (state[0] == FSTATE_READY) {
7c3df132
SK
3297 ql_dbg(ql_dbg_taskm, vha, 0x8037,
3298 "F/W Ready - OK.\n");
1da177e4 3299
e315cd28 3300 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1da177e4
LT
3301 &ha->login_timeout, &ha->r_a_tov);
3302
3303 rval = QLA_SUCCESS;
3304 break;
3305 }
3306
3307 rval = QLA_FUNCTION_FAILED;
3308
e315cd28 3309 if (atomic_read(&vha->loop_down_timer) &&
4d4df193 3310 state[0] != FSTATE_READY) {
1da177e4 3311 /* Loop down. Timeout on min_wait for states
fa2a1ce5
AV
3312 * other than Wait for Login.
3313 */
1da177e4 3314 if (time_after_eq(jiffies, mtime)) {
7c3df132 3315 ql_log(ql_log_info, vha, 0x8038,
1da177e4
LT
3316 "Cable is unplugged...\n");
3317
e315cd28 3318 vha->device_flags |= DFLG_NO_CABLE;
1da177e4
LT
3319 break;
3320 }
3321 }
3322 } else {
3323 /* Mailbox cmd failed. Timeout on min_wait. */
cdbb0a4f 3324 if (time_after_eq(jiffies, mtime) ||
7190575f 3325 ha->flags.isp82xx_fw_hung)
1da177e4
LT
3326 break;
3327 }
3328
3329 if (time_after_eq(jiffies, wtime))
3330 break;
3331
3332 /* Delay for a while */
3333 msleep(500);
1da177e4
LT
3334 } while (1);
3335
7c3df132 3336 ql_dbg(ql_dbg_taskm, vha, 0x803a,
b5a340dd
JC
3337 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
3338 state[1], state[2], state[3], state[4], state[5], jiffies);
1da177e4 3339
cfb0919c 3340 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
7c3df132
SK
3341 ql_log(ql_log_warn, vha, 0x803b,
3342 "Firmware ready **** FAILED ****.\n");
1da177e4
LT
3343 }
3344
3345 return (rval);
3346}
3347
3348/*
3349* qla2x00_configure_hba
3350* Setup adapter context.
3351*
3352* Input:
3353* ha = adapter state pointer.
3354*
3355* Returns:
3356* 0 = success
3357*
3358* Context:
3359* Kernel context.
3360*/
3361static int
e315cd28 3362qla2x00_configure_hba(scsi_qla_host_t *vha)
1da177e4
LT
3363{
3364 int rval;
3365 uint16_t loop_id;
3366 uint16_t topo;
2c3dfe3f 3367 uint16_t sw_cap;
1da177e4
LT
3368 uint8_t al_pa;
3369 uint8_t area;
3370 uint8_t domain;
3371 char connect_type[22];
e315cd28 3372 struct qla_hw_data *ha = vha->hw;
61e1b269 3373 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
482c9dc7 3374 port_id_t id;
1da177e4
LT
3375
3376 /* Get host addresses. */
e315cd28 3377 rval = qla2x00_get_adapter_id(vha,
2c3dfe3f 3378 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1da177e4 3379 if (rval != QLA_SUCCESS) {
e315cd28 3380 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
6246b8a1 3381 IS_CNA_CAPABLE(ha) ||
33135aa2 3382 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
7c3df132
SK
3383 ql_dbg(ql_dbg_disc, vha, 0x2008,
3384 "Loop is in a transition state.\n");
33135aa2 3385 } else {
7c3df132
SK
3386 ql_log(ql_log_warn, vha, 0x2009,
3387 "Unable to get host loop ID.\n");
61e1b269
JC
3388 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
3389 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
3390 ql_log(ql_log_warn, vha, 0x1151,
3391 "Doing link init.\n");
3392 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
3393 return rval;
3394 }
e315cd28 3395 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
33135aa2 3396 }
1da177e4
LT
3397 return (rval);
3398 }
3399
3400 if (topo == 4) {
7c3df132
SK
3401 ql_log(ql_log_info, vha, 0x200a,
3402 "Cannot get topology - retrying.\n");
1da177e4
LT
3403 return (QLA_FUNCTION_FAILED);
3404 }
3405
e315cd28 3406 vha->loop_id = loop_id;
1da177e4
LT
3407
3408 /* initialize */
3409 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
3410 ha->operating_mode = LOOP;
2c3dfe3f 3411 ha->switch_cap = 0;
1da177e4
LT
3412
3413 switch (topo) {
3414 case 0:
7c3df132 3415 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
1da177e4
LT
3416 ha->current_topology = ISP_CFG_NL;
3417 strcpy(connect_type, "(Loop)");
3418 break;
3419
3420 case 1:
7c3df132 3421 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2c3dfe3f 3422 ha->switch_cap = sw_cap;
1da177e4
LT
3423 ha->current_topology = ISP_CFG_FL;
3424 strcpy(connect_type, "(FL_Port)");
3425 break;
3426
3427 case 2:
7c3df132 3428 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
1da177e4
LT
3429 ha->operating_mode = P2P;
3430 ha->current_topology = ISP_CFG_N;
3431 strcpy(connect_type, "(N_Port-to-N_Port)");
3432 break;
3433
3434 case 3:
7c3df132 3435 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2c3dfe3f 3436 ha->switch_cap = sw_cap;
1da177e4
LT
3437 ha->operating_mode = P2P;
3438 ha->current_topology = ISP_CFG_F;
3439 strcpy(connect_type, "(F_Port)");
3440 break;
3441
3442 default:
7c3df132
SK
3443 ql_dbg(ql_dbg_disc, vha, 0x200f,
3444 "HBA in unknown topology %x, using NL.\n", topo);
1da177e4
LT
3445 ha->current_topology = ISP_CFG_NL;
3446 strcpy(connect_type, "(Loop)");
3447 break;
3448 }
3449
3450 /* Save Host port and loop ID. */
3451 /* byte order - Big Endian */
482c9dc7
QT
3452 id.b.domain = domain;
3453 id.b.area = area;
3454 id.b.al_pa = al_pa;
3455 id.b.rsvd_1 = 0;
3456 qlt_update_host_map(vha, id);
2d70c103 3457
e315cd28 3458 if (!vha->flags.init_done)
7c3df132
SK
3459 ql_log(ql_log_info, vha, 0x2010,
3460 "Topology - %s, Host Loop address 0x%x.\n",
e315cd28 3461 connect_type, vha->loop_id);
1da177e4 3462
1da177e4
LT
3463 return(rval);
3464}
3465
a9083016 3466inline void
e315cd28
AC
3467qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
3468 char *def)
9bb9fcf2
AV
3469{
3470 char *st, *en;
3471 uint16_t index;
e315cd28 3472 struct qla_hw_data *ha = vha->hw;
ab671149 3473 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
6246b8a1 3474 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
9bb9fcf2
AV
3475
3476 if (memcmp(model, BINZERO, len) != 0) {
3477 strncpy(ha->model_number, model, len);
3478 st = en = ha->model_number;
3479 en += len - 1;
3480 while (en > st) {
3481 if (*en != 0x20 && *en != 0x00)
3482 break;
3483 *en-- = '\0';
3484 }
3485
3486 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
3487 if (use_tbl &&
3488 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2 3489 index < QLA_MODEL_NAMES)
1ee27146
JC
3490 strncpy(ha->model_desc,
3491 qla2x00_model_name[index * 2 + 1],
3492 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
3493 } else {
3494 index = (ha->pdev->subsystem_device & 0xff);
7d0dba17
AV
3495 if (use_tbl &&
3496 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
9bb9fcf2
AV
3497 index < QLA_MODEL_NAMES) {
3498 strcpy(ha->model_number,
3499 qla2x00_model_name[index * 2]);
1ee27146
JC
3500 strncpy(ha->model_desc,
3501 qla2x00_model_name[index * 2 + 1],
3502 sizeof(ha->model_desc) - 1);
9bb9fcf2
AV
3503 } else {
3504 strcpy(ha->model_number, def);
3505 }
3506 }
1ee27146 3507 if (IS_FWI2_CAPABLE(ha))
e315cd28 3508 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1ee27146 3509 sizeof(ha->model_desc));
9bb9fcf2
AV
3510}
3511
4e08df3f
DM
3512/* On sparc systems, obtain port and node WWN from firmware
3513 * properties.
3514 */
e315cd28 3515static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4e08df3f
DM
3516{
3517#ifdef CONFIG_SPARC
e315cd28 3518 struct qla_hw_data *ha = vha->hw;
4e08df3f 3519 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
3520 struct device_node *dp = pci_device_to_OF_node(pdev);
3521 const u8 *val;
4e08df3f
DM
3522 int len;
3523
3524 val = of_get_property(dp, "port-wwn", &len);
3525 if (val && len >= WWN_SIZE)
3526 memcpy(nv->port_name, val, WWN_SIZE);
3527
3528 val = of_get_property(dp, "node-wwn", &len);
3529 if (val && len >= WWN_SIZE)
3530 memcpy(nv->node_name, val, WWN_SIZE);
3531#endif
3532}
3533
1da177e4
LT
3534/*
3535* NVRAM configuration for ISP 2xxx
3536*
3537* Input:
3538* ha = adapter block pointer.
3539*
3540* Output:
3541* initialization control block in response_ring
3542* host adapters parameters in host adapter block
3543*
3544* Returns:
3545* 0 = success.
3546*/
abbd8870 3547int
e315cd28 3548qla2x00_nvram_config(scsi_qla_host_t *vha)
1da177e4 3549{
4e08df3f 3550 int rval;
0107109e
AV
3551 uint8_t chksum = 0;
3552 uint16_t cnt;
3553 uint8_t *dptr1, *dptr2;
e315cd28 3554 struct qla_hw_data *ha = vha->hw;
0107109e 3555 init_cb_t *icb = ha->init_cb;
281afe19
SJ
3556 nvram_t *nv = ha->nvram;
3557 uint8_t *ptr = ha->nvram;
3d71644c 3558 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 3559
4e08df3f
DM
3560 rval = QLA_SUCCESS;
3561
1da177e4 3562 /* Determine NVRAM starting address. */
0107109e 3563 ha->nvram_size = sizeof(nvram_t);
1da177e4
LT
3564 ha->nvram_base = 0;
3565 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
3566 if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
3567 ha->nvram_base = 0x80;
3568
3569 /* Get NVRAM data and calculate checksum. */
e315cd28 3570 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
0107109e
AV
3571 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
3572 chksum += *ptr++;
1da177e4 3573
7c3df132
SK
3574 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
3575 "Contents of NVRAM.\n");
3576 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
3577 (uint8_t *)nv, ha->nvram_size);
1da177e4
LT
3578
3579 /* Bad NVRAM data, set defaults parameters. */
3580 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
3581 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
3582 /* Reset NVRAM data. */
7c3df132 3583 ql_log(ql_log_warn, vha, 0x0064,
9e336520 3584 "Inconsistent NVRAM "
7c3df132
SK
3585 "detected: checksum=0x%x id=%c version=0x%x.\n",
3586 chksum, nv->id[0], nv->nvram_version);
3587 ql_log(ql_log_warn, vha, 0x0065,
3588 "Falling back to "
3589 "functioning (yet invalid -- WWPN) defaults.\n");
4e08df3f
DM
3590
3591 /*
3592 * Set default initialization control block.
3593 */
3594 memset(nv, 0, ha->nvram_size);
3595 nv->parameter_block_version = ICB_VERSION;
3596
3597 if (IS_QLA23XX(ha)) {
3598 nv->firmware_options[0] = BIT_2 | BIT_1;
3599 nv->firmware_options[1] = BIT_7 | BIT_5;
3600 nv->add_firmware_options[0] = BIT_5;
3601 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 3602 nv->frame_payload_size = 2048;
4e08df3f
DM
3603 nv->special_options[1] = BIT_7;
3604 } else if (IS_QLA2200(ha)) {
3605 nv->firmware_options[0] = BIT_2 | BIT_1;
3606 nv->firmware_options[1] = BIT_7 | BIT_5;
3607 nv->add_firmware_options[0] = BIT_5;
3608 nv->add_firmware_options[1] = BIT_5 | BIT_4;
98aee70d 3609 nv->frame_payload_size = 1024;
4e08df3f
DM
3610 } else if (IS_QLA2100(ha)) {
3611 nv->firmware_options[0] = BIT_3 | BIT_1;
3612 nv->firmware_options[1] = BIT_5;
98aee70d 3613 nv->frame_payload_size = 1024;
4e08df3f
DM
3614 }
3615
ad950360
BVA
3616 nv->max_iocb_allocation = cpu_to_le16(256);
3617 nv->execution_throttle = cpu_to_le16(16);
4e08df3f
DM
3618 nv->retry_count = 8;
3619 nv->retry_delay = 1;
3620
3621 nv->port_name[0] = 33;
3622 nv->port_name[3] = 224;
3623 nv->port_name[4] = 139;
3624
e315cd28 3625 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4e08df3f
DM
3626
3627 nv->login_timeout = 4;
3628
3629 /*
3630 * Set default host adapter parameters
3631 */
3632 nv->host_p[1] = BIT_2;
3633 nv->reset_delay = 5;
3634 nv->port_down_retry_count = 8;
ad950360 3635 nv->max_luns_per_target = cpu_to_le16(8);
4e08df3f
DM
3636 nv->link_down_timeout = 60;
3637
3638 rval = 1;
1da177e4
LT
3639 }
3640
3641#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
3642 /*
3643 * The SN2 does not provide BIOS emulation which means you can't change
3644 * potentially bogus BIOS settings. Force the use of default settings
3645 * for link rate and frame size. Hope that the rest of the settings
3646 * are valid.
3647 */
3648 if (ia64_platform_is("sn2")) {
98aee70d 3649 nv->frame_payload_size = 2048;
1da177e4
LT
3650 if (IS_QLA23XX(ha))
3651 nv->special_options[1] = BIT_7;
3652 }
3653#endif
3654
3655 /* Reset Initialization control block */
0107109e 3656 memset(icb, 0, ha->init_cb_size);
1da177e4
LT
3657
3658 /*
3659 * Setup driver NVRAM options.
3660 */
3661 nv->firmware_options[0] |= (BIT_6 | BIT_1);
3662 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
3663 nv->firmware_options[1] |= (BIT_5 | BIT_0);
3664 nv->firmware_options[1] &= ~BIT_4;
3665
3666 if (IS_QLA23XX(ha)) {
3667 nv->firmware_options[0] |= BIT_2;
3668 nv->firmware_options[0] &= ~BIT_3;
2d70c103 3669 nv->special_options[0] &= ~BIT_6;
0107109e 3670 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
1da177e4
LT
3671
3672 if (IS_QLA2300(ha)) {
3673 if (ha->fb_rev == FPM_2310) {
3674 strcpy(ha->model_number, "QLA2310");
3675 } else {
3676 strcpy(ha->model_number, "QLA2300");
3677 }
3678 } else {
e315cd28 3679 qla2x00_set_model_info(vha, nv->model_number,
9bb9fcf2 3680 sizeof(nv->model_number), "QLA23xx");
1da177e4
LT
3681 }
3682 } else if (IS_QLA2200(ha)) {
3683 nv->firmware_options[0] |= BIT_2;
3684 /*
3685 * 'Point-to-point preferred, else loop' is not a safe
3686 * connection mode setting.
3687 */
3688 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
3689 (BIT_5 | BIT_4)) {
3690 /* Force 'loop preferred, else point-to-point'. */
3691 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
3692 nv->add_firmware_options[0] |= BIT_5;
3693 }
3694 strcpy(ha->model_number, "QLA22xx");
3695 } else /*if (IS_QLA2100(ha))*/ {
3696 strcpy(ha->model_number, "QLA2100");
3697 }
3698
3699 /*
3700 * Copy over NVRAM RISC parameter block to initialization control block.
3701 */
3702 dptr1 = (uint8_t *)icb;
3703 dptr2 = (uint8_t *)&nv->parameter_block_version;
3704 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
3705 while (cnt--)
3706 *dptr1++ = *dptr2++;
3707
3708 /* Copy 2nd half. */
3709 dptr1 = (uint8_t *)icb->add_firmware_options;
3710 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
3711 while (cnt--)
3712 *dptr1++ = *dptr2++;
3713
5341e868
AV
3714 /* Use alternate WWN? */
3715 if (nv->host_p[1] & BIT_7) {
3716 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
3717 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
3718 }
3719
1da177e4
LT
3720 /* Prepare nodename */
3721 if ((icb->firmware_options[1] & BIT_6) == 0) {
3722 /*
3723 * Firmware will apply the following mask if the nodename was
3724 * not provided.
3725 */
3726 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
3727 icb->node_name[0] &= 0xF0;
3728 }
3729
3730 /*
3731 * Set host adapter parameters.
3732 */
3ce8866c
SK
3733
3734 /*
3735 * BIT_7 in the host-parameters section allows for modification to
3736 * internal driver logging.
3737 */
0181944f 3738 if (nv->host_p[0] & BIT_7)
cfb0919c 3739 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
1da177e4
LT
3740 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
3741 /* Always load RISC code on non ISP2[12]00 chips. */
3742 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
3743 ha->flags.disable_risc_code_load = 0;
3744 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
3745 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
3746 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
06c22bd1 3747 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
d4c760c2 3748 ha->flags.disable_serdes = 0;
1da177e4
LT
3749
3750 ha->operating_mode =
3751 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
3752
3753 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
3754 sizeof(ha->fw_seriallink_options));
3755
3756 /* save HBA serial number */
3757 ha->serial0 = icb->port_name[5];
3758 ha->serial1 = icb->port_name[6];
3759 ha->serial2 = icb->port_name[7];
e315cd28
AC
3760 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
3761 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1da177e4 3762
ad950360 3763 icb->execution_throttle = cpu_to_le16(0xFFFF);
1da177e4
LT
3764
3765 ha->retry_count = nv->retry_count;
3766
3767 /* Set minimum login_timeout to 4 seconds. */
5b91490e 3768 if (nv->login_timeout != ql2xlogintimeout)
1da177e4
LT
3769 nv->login_timeout = ql2xlogintimeout;
3770 if (nv->login_timeout < 4)
3771 nv->login_timeout = 4;
3772 ha->login_timeout = nv->login_timeout;
1da177e4 3773
00a537b8
AV
3774 /* Set minimum RATOV to 100 tenths of a second. */
3775 ha->r_a_tov = 100;
1da177e4 3776
1da177e4
LT
3777 ha->loop_reset_delay = nv->reset_delay;
3778
1da177e4
LT
3779 /* Link Down Timeout = 0:
3780 *
3781 * When Port Down timer expires we will start returning
3782 * I/O's to OS with "DID_NO_CONNECT".
3783 *
3784 * Link Down Timeout != 0:
3785 *
3786 * The driver waits for the link to come up after link down
3787 * before returning I/Os to OS with "DID_NO_CONNECT".
fa2a1ce5 3788 */
1da177e4
LT
3789 if (nv->link_down_timeout == 0) {
3790 ha->loop_down_abort_time =
354d6b21 3791 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
1da177e4
LT
3792 } else {
3793 ha->link_down_timeout = nv->link_down_timeout;
3794 ha->loop_down_abort_time =
3795 (LOOP_DOWN_TIME - ha->link_down_timeout);
fa2a1ce5 3796 }
1da177e4 3797
1da177e4
LT
3798 /*
3799 * Need enough time to try and get the port back.
3800 */
3801 ha->port_down_retry_count = nv->port_down_retry_count;
3802 if (qlport_down_retry)
3803 ha->port_down_retry_count = qlport_down_retry;
3804 /* Set login_retry_count */
3805 ha->login_retry_count = nv->retry_count;
3806 if (ha->port_down_retry_count == nv->port_down_retry_count &&
3807 ha->port_down_retry_count > 3)
3808 ha->login_retry_count = ha->port_down_retry_count;
3809 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
3810 ha->login_retry_count = ha->port_down_retry_count;
3811 if (ql2xloginretrycount)
3812 ha->login_retry_count = ql2xloginretrycount;
3813
ad950360 3814 icb->lun_enables = cpu_to_le16(0);
1da177e4
LT
3815 icb->command_resource_count = 0;
3816 icb->immediate_notify_resource_count = 0;
ad950360 3817 icb->timeout = cpu_to_le16(0);
1da177e4
LT
3818
3819 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3820 /* Enable RIO */
3821 icb->firmware_options[0] &= ~BIT_3;
3822 icb->add_firmware_options[0] &=
3823 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
3824 icb->add_firmware_options[0] |= BIT_2;
3825 icb->response_accumulation_timer = 3;
3826 icb->interrupt_delay_timer = 5;
3827
e315cd28 3828 vha->flags.process_response_queue = 1;
1da177e4 3829 } else {
4fdfefe5 3830 /* Enable ZIO. */
e315cd28 3831 if (!vha->flags.init_done) {
4fdfefe5
AV
3832 ha->zio_mode = icb->add_firmware_options[0] &
3833 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3834 ha->zio_timer = icb->interrupt_delay_timer ?
3835 icb->interrupt_delay_timer: 2;
3836 }
1da177e4
LT
3837 icb->add_firmware_options[0] &=
3838 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
e315cd28 3839 vha->flags.process_response_queue = 0;
4fdfefe5 3840 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d 3841 ha->zio_mode = QLA_ZIO_MODE_6;
3842
7c3df132 3843 ql_log(ql_log_info, vha, 0x0068,
4fdfefe5
AV
3844 "ZIO mode %d enabled; timer delay (%d us).\n",
3845 ha->zio_mode, ha->zio_timer * 100);
1da177e4 3846
4fdfefe5
AV
3847 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
3848 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
e315cd28 3849 vha->flags.process_response_queue = 1;
1da177e4
LT
3850 }
3851 }
3852
4e08df3f 3853 if (rval) {
7c3df132
SK
3854 ql_log(ql_log_warn, vha, 0x0069,
3855 "NVRAM configuration failed.\n");
4e08df3f
DM
3856 }
3857 return (rval);
1da177e4
LT
3858}
3859
19a7b4ae
JSEC
3860static void
3861qla2x00_rport_del(void *data)
3862{
3863 fc_port_t *fcport = data;
d97994dc 3864 struct fc_rport *rport;
044d78e1 3865 unsigned long flags;
d97994dc 3866
044d78e1 3867 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
ac280b67 3868 rport = fcport->drport ? fcport->drport: fcport->rport;
d97994dc 3869 fcport->drport = NULL;
044d78e1 3870 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
726b8548 3871 if (rport) {
83548fe2
QT
3872 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
3873 "%s %8phN. rport %p roles %x\n",
3874 __func__, fcport->port_name, rport,
3875 rport->roles);
726b8548 3876
d97994dc 3877 fc_remote_port_delete(rport);
726b8548 3878 }
19a7b4ae
JSEC
3879}
3880
1da177e4
LT
3881/**
3882 * qla2x00_alloc_fcport() - Allocate a generic fcport.
3883 * @ha: HA context
3884 * @flags: allocation flags
3885 *
3886 * Returns a pointer to the allocated fcport, or NULL, if none available.
3887 */
9a069e19 3888fc_port_t *
e315cd28 3889qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1da177e4
LT
3890{
3891 fc_port_t *fcport;
3892
bbfbbbc1
MK
3893 fcport = kzalloc(sizeof(fc_port_t), flags);
3894 if (!fcport)
3895 return NULL;
1da177e4
LT
3896
3897 /* Setup fcport template structure. */
e315cd28 3898 fcport->vha = vha;
1da177e4
LT
3899 fcport->port_type = FCT_UNKNOWN;
3900 fcport->loop_id = FC_NO_LOOP_ID;
ec426e10 3901 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
ad3e0eda 3902 fcport->supported_classes = FC_COS_UNSPECIFIED;
1da177e4 3903
726b8548
QT
3904 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
3905 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
6cb3216a 3906 flags);
726b8548
QT
3907 fcport->disc_state = DSC_DELETED;
3908 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
3909 fcport->deleted = QLA_SESS_DELETED;
3910 fcport->login_retry = vha->hw->login_retry_count;
3911 fcport->login_retry = 5;
3912 fcport->logout_on_delete = 1;
3913
3914 if (!fcport->ct_desc.ct_sns) {
83548fe2 3915 ql_log(ql_log_warn, vha, 0xd049,
726b8548
QT
3916 "Failed to allocate ct_sns request.\n");
3917 kfree(fcport);
3918 fcport = NULL;
3919 }
3920 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
3921 INIT_LIST_HEAD(&fcport->gnl_entry);
3922 INIT_LIST_HEAD(&fcport->list);
3923
bbfbbbc1 3924 return fcport;
1da177e4
LT
3925}
3926
726b8548
QT
3927void
3928qla2x00_free_fcport(fc_port_t *fcport)
3929{
3930 if (fcport->ct_desc.ct_sns) {
3931 dma_free_coherent(&fcport->vha->hw->pdev->dev,
3932 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
3933 fcport->ct_desc.ct_sns_dma);
3934
3935 fcport->ct_desc.ct_sns = NULL;
3936 }
3937 kfree(fcport);
3938}
3939
1da177e4
LT
3940/*
3941 * qla2x00_configure_loop
3942 * Updates Fibre Channel Device Database with what is actually on loop.
3943 *
3944 * Input:
3945 * ha = adapter block pointer.
3946 *
3947 * Returns:
3948 * 0 = success.
3949 * 1 = error.
3950 * 2 = database was full and device was not configured.
3951 */
3952static int
e315cd28 3953qla2x00_configure_loop(scsi_qla_host_t *vha)
1da177e4
LT
3954{
3955 int rval;
3956 unsigned long flags, save_flags;
e315cd28 3957 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
3958 rval = QLA_SUCCESS;
3959
3960 /* Get Initiator ID */
e315cd28
AC
3961 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
3962 rval = qla2x00_configure_hba(vha);
1da177e4 3963 if (rval != QLA_SUCCESS) {
7c3df132
SK
3964 ql_dbg(ql_dbg_disc, vha, 0x2013,
3965 "Unable to configure HBA.\n");
1da177e4
LT
3966 return (rval);
3967 }
3968 }
3969
e315cd28 3970 save_flags = flags = vha->dpc_flags;
7c3df132
SK
3971 ql_dbg(ql_dbg_disc, vha, 0x2014,
3972 "Configure loop -- dpc flags = 0x%lx.\n", flags);
1da177e4
LT
3973
3974 /*
3975 * If we have both an RSCN and PORT UPDATE pending then handle them
3976 * both at the same time.
3977 */
e315cd28
AC
3978 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3979 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1da177e4 3980
3064ff39
MH
3981 qla2x00_get_data_rate(vha);
3982
1da177e4
LT
3983 /* Determine what we need to do */
3984 if (ha->current_topology == ISP_CFG_FL &&
3985 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
3986
1da177e4
LT
3987 set_bit(RSCN_UPDATE, &flags);
3988
3989 } else if (ha->current_topology == ISP_CFG_F &&
3990 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
3991
1da177e4
LT
3992 set_bit(RSCN_UPDATE, &flags);
3993 clear_bit(LOCAL_LOOP_UPDATE, &flags);
21333b48
AV
3994
3995 } else if (ha->current_topology == ISP_CFG_N) {
3996 clear_bit(RSCN_UPDATE, &flags);
41dc529a
QT
3997 } else if (ha->current_topology == ISP_CFG_NL) {
3998 clear_bit(RSCN_UPDATE, &flags);
3999 set_bit(LOCAL_LOOP_UPDATE, &flags);
e315cd28 4000 } else if (!vha->flags.online ||
1da177e4 4001 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
1da177e4
LT
4002 set_bit(RSCN_UPDATE, &flags);
4003 set_bit(LOCAL_LOOP_UPDATE, &flags);
4004 }
4005
4006 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
7c3df132
SK
4007 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4008 ql_dbg(ql_dbg_disc, vha, 0x2015,
4009 "Loop resync needed, failing.\n");
1da177e4 4010 rval = QLA_FUNCTION_FAILED;
642ef983 4011 } else
e315cd28 4012 rval = qla2x00_configure_local_loop(vha);
1da177e4
LT
4013 }
4014
4015 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
7c3df132 4016 if (LOOP_TRANSITION(vha)) {
83548fe2 4017 ql_dbg(ql_dbg_disc, vha, 0x2099,
7c3df132 4018 "Needs RSCN update and loop transition.\n");
1da177e4 4019 rval = QLA_FUNCTION_FAILED;
7c3df132 4020 }
e315cd28
AC
4021 else
4022 rval = qla2x00_configure_fabric(vha);
1da177e4
LT
4023 }
4024
4025 if (rval == QLA_SUCCESS) {
e315cd28
AC
4026 if (atomic_read(&vha->loop_down_timer) ||
4027 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4
LT
4028 rval = QLA_FUNCTION_FAILED;
4029 } else {
e315cd28 4030 atomic_set(&vha->loop_state, LOOP_READY);
7c3df132
SK
4031 ql_dbg(ql_dbg_disc, vha, 0x2069,
4032 "LOOP READY.\n");
ec7193e2 4033 ha->flags.fw_init_done = 1;
3bb67df5
DKU
4034
4035 /*
4036 * Process any ATIO queue entries that came in
4037 * while we weren't online.
4038 */
ead03855
QT
4039 if (qla_tgt_mode_enabled(vha) ||
4040 qla_dual_mode_enabled(vha)) {
3bb67df5
DKU
4041 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
4042 spin_lock_irqsave(&ha->tgt.atio_lock,
4043 flags);
4044 qlt_24xx_process_atio_queue(vha, 0);
4045 spin_unlock_irqrestore(
4046 &ha->tgt.atio_lock, flags);
4047 } else {
4048 spin_lock_irqsave(&ha->hardware_lock,
4049 flags);
4050 qlt_24xx_process_atio_queue(vha, 1);
4051 spin_unlock_irqrestore(
4052 &ha->hardware_lock, flags);
4053 }
4054 }
1da177e4
LT
4055 }
4056 }
4057
4058 if (rval) {
7c3df132
SK
4059 ql_dbg(ql_dbg_disc, vha, 0x206a,
4060 "%s *** FAILED ***.\n", __func__);
1da177e4 4061 } else {
7c3df132
SK
4062 ql_dbg(ql_dbg_disc, vha, 0x206b,
4063 "%s: exiting normally.\n", __func__);
1da177e4
LT
4064 }
4065
cc3ef7bc 4066 /* Restore state if a resync event occurred during processing */
e315cd28 4067 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1da177e4 4068 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
e315cd28 4069 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
f4658b6c 4070 if (test_bit(RSCN_UPDATE, &save_flags)) {
e315cd28 4071 set_bit(RSCN_UPDATE, &vha->dpc_flags);
f4658b6c 4072 }
1da177e4
LT
4073 }
4074
4075 return (rval);
4076}
4077
4078
4079
4080/*
4081 * qla2x00_configure_local_loop
4082 * Updates Fibre Channel Device Database with local loop devices.
4083 *
4084 * Input:
4085 * ha = adapter block pointer.
4086 *
4087 * Returns:
4088 * 0 = success.
4089 */
4090static int
e315cd28 4091qla2x00_configure_local_loop(scsi_qla_host_t *vha)
1da177e4
LT
4092{
4093 int rval, rval2;
4094 int found_devs;
4095 int found;
4096 fc_port_t *fcport, *new_fcport;
4097
4098 uint16_t index;
4099 uint16_t entries;
4100 char *id_iter;
4101 uint16_t loop_id;
4102 uint8_t domain, area, al_pa;
e315cd28 4103 struct qla_hw_data *ha = vha->hw;
41dc529a 4104 unsigned long flags;
1da177e4
LT
4105
4106 found_devs = 0;
4107 new_fcport = NULL;
642ef983 4108 entries = MAX_FIBRE_DEVICES_LOOP;
1da177e4 4109
1da177e4 4110 /* Get list of logged in devices. */
642ef983 4111 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
e315cd28 4112 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
1da177e4
LT
4113 &entries);
4114 if (rval != QLA_SUCCESS)
4115 goto cleanup_allocation;
4116
83548fe2 4117 ql_dbg(ql_dbg_disc, vha, 0x2011,
7c3df132
SK
4118 "Entries in ID list (%d).\n", entries);
4119 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
4120 (uint8_t *)ha->gid_list,
4121 entries * sizeof(struct gid_list_info));
1da177e4
LT
4122
4123 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 4124 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4125 if (new_fcport == NULL) {
83548fe2 4126 ql_log(ql_log_warn, vha, 0x2012,
7c3df132 4127 "Memory allocation failed for fcport.\n");
1da177e4
LT
4128 rval = QLA_MEMORY_ALLOC_FAILED;
4129 goto cleanup_allocation;
4130 }
4131 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4132
4133 /*
4134 * Mark local devices that were present with FCF_DEVICE_LOST for now.
4135 */
e315cd28 4136 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
4137 if (atomic_read(&fcport->state) == FCS_ONLINE &&
4138 fcport->port_type != FCT_BROADCAST &&
4139 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4140
83548fe2 4141 ql_dbg(ql_dbg_disc, vha, 0x2096,
7c3df132
SK
4142 "Marking port lost loop_id=0x%04x.\n",
4143 fcport->loop_id);
1da177e4 4144
41dc529a 4145 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1da177e4
LT
4146 }
4147 }
4148
4149 /* Add devices to port list. */
4150 id_iter = (char *)ha->gid_list;
4151 for (index = 0; index < entries; index++) {
4152 domain = ((struct gid_list_info *)id_iter)->domain;
4153 area = ((struct gid_list_info *)id_iter)->area;
4154 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
abbd8870 4155 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1da177e4
LT
4156 loop_id = (uint16_t)
4157 ((struct gid_list_info *)id_iter)->loop_id_2100;
abbd8870 4158 else
1da177e4
LT
4159 loop_id = le16_to_cpu(
4160 ((struct gid_list_info *)id_iter)->loop_id);
abbd8870 4161 id_iter += ha->gid_list_info_size;
1da177e4
LT
4162
4163 /* Bypass reserved domain fields. */
4164 if ((domain & 0xf0) == 0xf0)
4165 continue;
4166
4167 /* Bypass if not same domain and area of adapter. */
f7d289f6 4168 if (area && domain &&
e315cd28 4169 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
1da177e4
LT
4170 continue;
4171
4172 /* Bypass invalid local loop ID. */
4173 if (loop_id > LAST_LOCAL_LOOP_ID)
4174 continue;
4175
41dc529a 4176 memset(new_fcport->port_name, 0, WWN_SIZE);
370d550e 4177
1da177e4
LT
4178 /* Fill in member data. */
4179 new_fcport->d_id.b.domain = domain;
4180 new_fcport->d_id.b.area = area;
4181 new_fcport->d_id.b.al_pa = al_pa;
4182 new_fcport->loop_id = loop_id;
41dc529a 4183
e315cd28 4184 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
1da177e4 4185 if (rval2 != QLA_SUCCESS) {
83548fe2 4186 ql_dbg(ql_dbg_disc, vha, 0x2097,
7c3df132
SK
4187 "Failed to retrieve fcport information "
4188 "-- get_port_database=%x, loop_id=0x%04x.\n",
4189 rval2, new_fcport->loop_id);
83548fe2 4190 ql_dbg(ql_dbg_disc, vha, 0x2105,
7c3df132 4191 "Scheduling resync.\n");
e315cd28 4192 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4
LT
4193 continue;
4194 }
4195
41dc529a 4196 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1da177e4
LT
4197 /* Check for matching device in port list. */
4198 found = 0;
4199 fcport = NULL;
e315cd28 4200 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
4201 if (memcmp(new_fcport->port_name, fcport->port_name,
4202 WWN_SIZE))
4203 continue;
4204
ddb9b126 4205 fcport->flags &= ~FCF_FABRIC_DEVICE;
1da177e4
LT
4206 fcport->loop_id = new_fcport->loop_id;
4207 fcport->port_type = new_fcport->port_type;
4208 fcport->d_id.b24 = new_fcport->d_id.b24;
4209 memcpy(fcport->node_name, new_fcport->node_name,
4210 WWN_SIZE);
4211
41dc529a
QT
4212 if (!fcport->login_succ) {
4213 vha->fcport_count++;
4214 fcport->login_succ = 1;
4215 fcport->disc_state = DSC_LOGIN_COMPLETE;
4216 }
4217
1da177e4
LT
4218 found++;
4219 break;
4220 }
4221
4222 if (!found) {
4223 /* New device, add to fcports list. */
e315cd28 4224 list_add_tail(&new_fcport->list, &vha->vp_fcports);
1da177e4
LT
4225
4226 /* Allocate a new replacement fcport. */
4227 fcport = new_fcport;
41dc529a
QT
4228 if (!fcport->login_succ) {
4229 vha->fcport_count++;
4230 fcport->login_succ = 1;
4231 fcport->disc_state = DSC_LOGIN_COMPLETE;
4232 }
4233
4234 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4235
e315cd28 4236 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
41dc529a 4237
1da177e4 4238 if (new_fcport == NULL) {
83548fe2 4239 ql_log(ql_log_warn, vha, 0xd031,
7c3df132 4240 "Failed to allocate memory for fcport.\n");
1da177e4
LT
4241 rval = QLA_MEMORY_ALLOC_FAILED;
4242 goto cleanup_allocation;
4243 }
41dc529a 4244 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1da177e4
LT
4245 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4246 }
4247
41dc529a
QT
4248 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4249
d8b45213 4250 /* Base iIDMA settings on HBA port speed. */
a3cbdfad 4251 fcport->fp_speed = ha->link_data_rate;
d8b45213 4252
e315cd28 4253 qla2x00_update_fcport(vha, fcport);
1da177e4
LT
4254
4255 found_devs++;
4256 }
4257
4258cleanup_allocation:
c9475cb0 4259 kfree(new_fcport);
1da177e4
LT
4260
4261 if (rval != QLA_SUCCESS) {
83548fe2 4262 ql_dbg(ql_dbg_disc, vha, 0x2098,
7c3df132 4263 "Configure local loop error exit: rval=%x.\n", rval);
1da177e4
LT
4264 }
4265
1da177e4
LT
4266 return (rval);
4267}
4268
d8b45213 4269static void
e315cd28 4270qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
d8b45213 4271{
d8b45213 4272 int rval;
93f2bd67 4273 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 4274 struct qla_hw_data *ha = vha->hw;
d8b45213 4275
c76f2c01 4276 if (!IS_IIDMA_CAPABLE(ha))
d8b45213
AV
4277 return;
4278
c9afb9a2
GM
4279 if (atomic_read(&fcport->state) != FCS_ONLINE)
4280 return;
4281
39bd9622
AV
4282 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
4283 fcport->fp_speed > ha->link_data_rate)
d8b45213
AV
4284 return;
4285
e315cd28 4286 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
a3cbdfad 4287 mb);
d8b45213 4288 if (rval != QLA_SUCCESS) {
7c3df132 4289 ql_dbg(ql_dbg_disc, vha, 0x2004,
7b833558
OK
4290 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
4291 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
d8b45213 4292 } else {
7c3df132 4293 ql_dbg(ql_dbg_disc, vha, 0x2005,
7b833558 4294 "iIDMA adjusted to %s GB/s on %8phN.\n",
d0297c9a 4295 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
7b833558 4296 fcport->port_name);
d8b45213
AV
4297 }
4298}
4299
726b8548 4300/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
23be331d 4301static void
e315cd28 4302qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
8482e118 4303{
4304 struct fc_rport_identifiers rport_ids;
bdf79621 4305 struct fc_rport *rport;
044d78e1 4306 unsigned long flags;
8482e118 4307
f8b02a85
AV
4308 rport_ids.node_name = wwn_to_u64(fcport->node_name);
4309 rport_ids.port_name = wwn_to_u64(fcport->port_name);
8482e118 4310 rport_ids.port_id = fcport->d_id.b.domain << 16 |
4311 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
77d74143 4312 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
e315cd28 4313 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
77d74143 4314 if (!rport) {
7c3df132
SK
4315 ql_log(ql_log_warn, vha, 0x2006,
4316 "Unable to allocate fc remote port.\n");
77d74143
AV
4317 return;
4318 }
2d70c103 4319
044d78e1 4320 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
19a7b4ae 4321 *((fc_port_t **)rport->dd_data) = fcport;
044d78e1 4322 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
d97994dc 4323
ad3e0eda 4324 rport->supported_classes = fcport->supported_classes;
77d74143 4325
8482e118 4326 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4327 if (fcport->port_type == FCT_INITIATOR)
4328 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
4329 if (fcport->port_type == FCT_TARGET)
4330 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
726b8548 4331
83548fe2
QT
4332 ql_dbg(ql_dbg_disc, vha, 0x20ee,
4333 "%s %8phN. rport %p is %s mode\n",
4334 __func__, fcport->port_name, rport,
4335 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
726b8548 4336
77d74143 4337 fc_remote_port_rolechg(rport, rport_ids.roles);
1da177e4
LT
4338}
4339
23be331d
AB
4340/*
4341 * qla2x00_update_fcport
4342 * Updates device on list.
4343 *
4344 * Input:
4345 * ha = adapter block pointer.
4346 * fcport = port structure pointer.
4347 *
4348 * Return:
4349 * 0 - Success
4350 * BIT_0 - error
4351 *
4352 * Context:
4353 * Kernel context.
4354 */
4355void
e315cd28 4356qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
23be331d 4357{
e315cd28 4358 fcport->vha = vha;
8ae6d9c7 4359
726b8548
QT
4360 if (IS_SW_RESV_ADDR(fcport->d_id))
4361 return;
4362
83548fe2 4363 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
726b8548
QT
4364 __func__, fcport->port_name);
4365
8ae6d9c7
GM
4366 if (IS_QLAFX00(vha->hw)) {
4367 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
d20ed91b 4368 goto reg_port;
8ae6d9c7 4369 }
23be331d 4370 fcport->login_retry = 0;
5ff1d584 4371 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
726b8548
QT
4372 fcport->disc_state = DSC_LOGIN_COMPLETE;
4373 fcport->deleted = 0;
4374 fcport->logout_on_delete = 1;
23be331d 4375
1f93da52 4376 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
e315cd28 4377 qla2x00_iidma_fcport(vha, fcport);
21090cbe 4378 qla24xx_update_fcport_fcp_prio(vha, fcport);
d20ed91b
AP
4379
4380reg_port:
726b8548
QT
4381 switch (vha->host->active_mode) {
4382 case MODE_INITIATOR:
d20ed91b 4383 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
4384 break;
4385 case MODE_TARGET:
4386 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4387 !vha->vha_tgt.qla_tgt->tgt_stopped)
4388 qlt_fc_port_added(vha, fcport);
4389 break;
4390 case MODE_DUAL:
d20ed91b 4391 qla2x00_reg_remote_port(vha, fcport);
726b8548
QT
4392 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4393 !vha->vha_tgt.qla_tgt->tgt_stopped)
4394 qlt_fc_port_added(vha, fcport);
4395 break;
4396 default:
4397 break;
d20ed91b 4398 }
23be331d
AB
4399}
4400
1da177e4
LT
4401/*
4402 * qla2x00_configure_fabric
4403 * Setup SNS devices with loop ID's.
4404 *
4405 * Input:
4406 * ha = adapter block pointer.
4407 *
4408 * Returns:
4409 * 0 = success.
4410 * BIT_0 = error
4411 */
4412static int
e315cd28 4413qla2x00_configure_fabric(scsi_qla_host_t *vha)
1da177e4 4414{
b3b02e6e 4415 int rval;
726b8548 4416 fc_port_t *fcport;
1da177e4 4417 uint16_t mb[MAILBOX_REGISTER_COUNT];
0107109e 4418 uint16_t loop_id;
1da177e4 4419 LIST_HEAD(new_fcports);
e315cd28 4420 struct qla_hw_data *ha = vha->hw;
df673274 4421 int discovery_gen;
1da177e4
LT
4422
4423 /* If FL port exists, then SNS is present */
e428924c 4424 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
4425 loop_id = NPH_F_PORT;
4426 else
4427 loop_id = SNS_FL_PORT;
e315cd28 4428 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
1da177e4 4429 if (rval != QLA_SUCCESS) {
83548fe2 4430 ql_dbg(ql_dbg_disc, vha, 0x20a0,
7c3df132 4431 "MBX_GET_PORT_NAME failed, No FL Port.\n");
1da177e4 4432
e315cd28 4433 vha->device_flags &= ~SWITCH_FOUND;
1da177e4
LT
4434 return (QLA_SUCCESS);
4435 }
e315cd28 4436 vha->device_flags |= SWITCH_FOUND;
1da177e4 4437
41dc529a
QT
4438
4439 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
4440 rval = qla2x00_send_change_request(vha, 0x3, 0);
4441 if (rval != QLA_SUCCESS)
4442 ql_log(ql_log_warn, vha, 0x121,
4443 "Failed to enable receiving of RSCN requests: 0x%x.\n",
4444 rval);
4445 }
4446
4447
1da177e4 4448 do {
726b8548
QT
4449 qla2x00_mgmt_svr_login(vha);
4450
cca5335c
AV
4451 /* FDMI support. */
4452 if (ql2xfdmienable &&
e315cd28
AC
4453 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
4454 qla2x00_fdmi_register(vha);
cca5335c 4455
1da177e4 4456 /* Ensure we are logged into the SNS. */
e428924c 4457 if (IS_FWI2_CAPABLE(ha))
0107109e
AV
4458 loop_id = NPH_SNS;
4459 else
4460 loop_id = SIMPLE_NAME_SERVER;
0b91d116
CD
4461 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
4462 0xfc, mb, BIT_1|BIT_0);
4463 if (rval != QLA_SUCCESS) {
4464 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
e452ceb6 4465 return rval;
0b91d116 4466 }
1da177e4 4467 if (mb[0] != MBS_COMMAND_COMPLETE) {
83548fe2 4468 ql_dbg(ql_dbg_disc, vha, 0x20a1,
7c3df132
SK
4469 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
4470 "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
4471 mb[2], mb[6], mb[7]);
1da177e4
LT
4472 return (QLA_SUCCESS);
4473 }
4474
e315cd28
AC
4475 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
4476 if (qla2x00_rft_id(vha)) {
1da177e4 4477 /* EMPTY */
83548fe2 4478 ql_dbg(ql_dbg_disc, vha, 0x20a2,
7c3df132 4479 "Register FC-4 TYPE failed.\n");
b98ae0d7
QT
4480 if (test_bit(LOOP_RESYNC_NEEDED,
4481 &vha->dpc_flags))
4482 break;
1da177e4 4483 }
e315cd28 4484 if (qla2x00_rff_id(vha)) {
1da177e4 4485 /* EMPTY */
83548fe2 4486 ql_dbg(ql_dbg_disc, vha, 0x209a,
7c3df132 4487 "Register FC-4 Features failed.\n");
b98ae0d7
QT
4488 if (test_bit(LOOP_RESYNC_NEEDED,
4489 &vha->dpc_flags))
4490 break;
1da177e4 4491 }
e315cd28 4492 if (qla2x00_rnn_id(vha)) {
1da177e4 4493 /* EMPTY */
83548fe2 4494 ql_dbg(ql_dbg_disc, vha, 0x2104,
7c3df132 4495 "Register Node Name failed.\n");
b98ae0d7
QT
4496 if (test_bit(LOOP_RESYNC_NEEDED,
4497 &vha->dpc_flags))
4498 break;
e315cd28 4499 } else if (qla2x00_rsnn_nn(vha)) {
1da177e4 4500 /* EMPTY */
83548fe2 4501 ql_dbg(ql_dbg_disc, vha, 0x209b,
7c3df132 4502 "Register Symobilic Node Name failed.\n");
b98ae0d7
QT
4503 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4504 break;
1da177e4
LT
4505 }
4506 }
4507
827210ba
JC
4508 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4509 fcport->scan_state = QLA_FCPORT_SCAN;
4510 }
4511
df673274
AP
4512 /* Mark the time right before querying FW for connected ports.
4513 * This process is long, asynchronous and by the time it's done,
4514 * collected information might not be accurate anymore. E.g.
4515 * disconnected port might have re-connected and a brand new
4516 * session has been created. In this case session's generation
4517 * will be newer than discovery_gen. */
4518 qlt_do_generation_tick(vha, &discovery_gen);
4519
726b8548 4520 rval = qla2x00_find_all_fabric_devs(vha);
1da177e4
LT
4521 if (rval != QLA_SUCCESS)
4522 break;
1da177e4
LT
4523 } while (0);
4524
726b8548 4525 if (rval)
7c3df132
SK
4526 ql_dbg(ql_dbg_disc, vha, 0x2068,
4527 "Configure fabric error exit rval=%d.\n", rval);
1da177e4
LT
4528
4529 return (rval);
4530}
4531
1da177e4
LT
4532/*
4533 * qla2x00_find_all_fabric_devs
4534 *
4535 * Input:
4536 * ha = adapter block pointer.
4537 * dev = database device entry pointer.
4538 *
4539 * Returns:
4540 * 0 = success.
4541 *
4542 * Context:
4543 * Kernel context.
4544 */
4545static int
726b8548 4546qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
1da177e4
LT
4547{
4548 int rval;
4549 uint16_t loop_id;
726b8548 4550 fc_port_t *fcport, *new_fcport;
1da177e4
LT
4551 int found;
4552
4553 sw_info_t *swl;
4554 int swl_idx;
4555 int first_dev, last_dev;
1516ef44 4556 port_id_t wrap = {}, nxt_d_id;
e315cd28 4557 struct qla_hw_data *ha = vha->hw;
bb4cf5b7 4558 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
726b8548 4559 unsigned long flags;
1da177e4
LT
4560
4561 rval = QLA_SUCCESS;
4562
4563 /* Try GID_PT to get device list, else GAN. */
7a67735b 4564 if (!ha->swl)
642ef983 4565 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
7a67735b
AV
4566 GFP_KERNEL);
4567 swl = ha->swl;
bbfbbbc1 4568 if (!swl) {
1da177e4 4569 /*EMPTY*/
83548fe2 4570 ql_dbg(ql_dbg_disc, vha, 0x209c,
7c3df132 4571 "GID_PT allocations failed, fallback on GA_NXT.\n");
1da177e4 4572 } else {
642ef983 4573 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
e315cd28 4574 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
1da177e4 4575 swl = NULL;
b98ae0d7
QT
4576 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4577 return rval;
e315cd28 4578 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 4579 swl = NULL;
b98ae0d7
QT
4580 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4581 return rval;
e315cd28 4582 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
1da177e4 4583 swl = NULL;
b98ae0d7
QT
4584 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4585 return rval;
726b8548
QT
4586 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
4587 swl = NULL;
b98ae0d7
QT
4588 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4589 return rval;
1da177e4 4590 }
e8c72ba5
CD
4591
4592 /* If other queries succeeded probe for FC-4 type */
b98ae0d7 4593 if (swl) {
e8c72ba5 4594 qla2x00_gff_id(vha, swl);
b98ae0d7
QT
4595 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4596 return rval;
4597 }
1da177e4
LT
4598 }
4599 swl_idx = 0;
4600
4601 /* Allocate temporary fcport for any new fcports discovered. */
e315cd28 4602 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4603 if (new_fcport == NULL) {
83548fe2 4604 ql_log(ql_log_warn, vha, 0x209d,
7c3df132 4605 "Failed to allocate memory for fcport.\n");
1da177e4
LT
4606 return (QLA_MEMORY_ALLOC_FAILED);
4607 }
4608 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
1da177e4
LT
4609 /* Set start port ID scan at adapter ID. */
4610 first_dev = 1;
4611 last_dev = 0;
4612
4613 /* Starting free loop ID. */
e315cd28
AC
4614 loop_id = ha->min_external_loopid;
4615 for (; loop_id <= ha->max_loop_id; loop_id++) {
4616 if (qla2x00_is_reserved_id(vha, loop_id))
1da177e4
LT
4617 continue;
4618
3a6478df
GM
4619 if (ha->current_topology == ISP_CFG_FL &&
4620 (atomic_read(&vha->loop_down_timer) ||
4621 LOOP_TRANSITION(vha))) {
bb2d52b2
AV
4622 atomic_set(&vha->loop_down_timer, 0);
4623 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4624 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4 4625 break;
bb2d52b2 4626 }
1da177e4
LT
4627
4628 if (swl != NULL) {
4629 if (last_dev) {
4630 wrap.b24 = new_fcport->d_id.b24;
4631 } else {
4632 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
4633 memcpy(new_fcport->node_name,
4634 swl[swl_idx].node_name, WWN_SIZE);
4635 memcpy(new_fcport->port_name,
4636 swl[swl_idx].port_name, WWN_SIZE);
d8b45213
AV
4637 memcpy(new_fcport->fabric_port_name,
4638 swl[swl_idx].fabric_port_name, WWN_SIZE);
4639 new_fcport->fp_speed = swl[swl_idx].fp_speed;
e8c72ba5 4640 new_fcport->fc4_type = swl[swl_idx].fc4_type;
1da177e4
LT
4641
4642 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
4643 last_dev = 1;
4644 }
4645 swl_idx++;
4646 }
4647 } else {
4648 /* Send GA_NXT to the switch */
e315cd28 4649 rval = qla2x00_ga_nxt(vha, new_fcport);
1da177e4 4650 if (rval != QLA_SUCCESS) {
83548fe2 4651 ql_log(ql_log_warn, vha, 0x209e,
7c3df132
SK
4652 "SNS scan failed -- assuming "
4653 "zero-entry result.\n");
1da177e4
LT
4654 rval = QLA_SUCCESS;
4655 break;
4656 }
4657 }
4658
4659 /* If wrap on switch device list, exit. */
4660 if (first_dev) {
4661 wrap.b24 = new_fcport->d_id.b24;
4662 first_dev = 0;
4663 } else if (new_fcport->d_id.b24 == wrap.b24) {
83548fe2 4664 ql_dbg(ql_dbg_disc, vha, 0x209f,
7c3df132
SK
4665 "Device wrap (%02x%02x%02x).\n",
4666 new_fcport->d_id.b.domain,
4667 new_fcport->d_id.b.area,
4668 new_fcport->d_id.b.al_pa);
1da177e4
LT
4669 break;
4670 }
4671
2c3dfe3f 4672 /* Bypass if same physical adapter. */
e315cd28 4673 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
1da177e4
LT
4674 continue;
4675
2c3dfe3f 4676 /* Bypass virtual ports of the same host. */
bb4cf5b7
CD
4677 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
4678 continue;
2c3dfe3f 4679
f7d289f6
AV
4680 /* Bypass if same domain and area of adapter. */
4681 if (((new_fcport->d_id.b24 & 0xffff00) ==
e315cd28 4682 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
f7d289f6
AV
4683 ISP_CFG_FL)
4684 continue;
4685
1da177e4
LT
4686 /* Bypass reserved domain fields. */
4687 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
4688 continue;
4689
e8c72ba5 4690 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
4da26e16
CD
4691 if (ql2xgffidenable &&
4692 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
4693 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
e8c72ba5
CD
4694 continue;
4695
726b8548
QT
4696 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4697
1da177e4
LT
4698 /* Locate matching device in database. */
4699 found = 0;
e315cd28 4700 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1da177e4
LT
4701 if (memcmp(new_fcport->port_name, fcport->port_name,
4702 WWN_SIZE))
4703 continue;
4704
827210ba 4705 fcport->scan_state = QLA_FCPORT_FOUND;
b3b02e6e 4706
1da177e4
LT
4707 found++;
4708
d8b45213
AV
4709 /* Update port state. */
4710 memcpy(fcport->fabric_port_name,
4711 new_fcport->fabric_port_name, WWN_SIZE);
4712 fcport->fp_speed = new_fcport->fp_speed;
4713
1da177e4 4714 /*
b2032fd5
RD
4715 * If address the same and state FCS_ONLINE
4716 * (or in target mode), nothing changed.
1da177e4
LT
4717 */
4718 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
b2032fd5 4719 (atomic_read(&fcport->state) == FCS_ONLINE ||
726b8548 4720 (vha->host->active_mode == MODE_TARGET))) {
1da177e4
LT
4721 break;
4722 }
4723
4724 /*
4725 * If device was not a fabric device before.
4726 */
4727 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4728 fcport->d_id.b24 = new_fcport->d_id.b24;
5f16b331 4729 qla2x00_clear_loop_id(fcport);
1da177e4
LT
4730 fcport->flags |= (FCF_FABRIC_DEVICE |
4731 FCF_LOGIN_NEEDED);
1da177e4
LT
4732 break;
4733 }
4734
4735 /*
4736 * Port ID changed or device was marked to be updated;
4737 * Log it out if still logged in and mark it for
4738 * relogin later.
4739 */
726b8548 4740 if (qla_tgt_mode_enabled(base_vha)) {
b2032fd5
RD
4741 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
4742 "port changed FC ID, %8phC"
4743 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
4744 fcport->port_name,
4745 fcport->d_id.b.domain,
4746 fcport->d_id.b.area,
4747 fcport->d_id.b.al_pa,
4748 fcport->loop_id,
4749 new_fcport->d_id.b.domain,
4750 new_fcport->d_id.b.area,
4751 new_fcport->d_id.b.al_pa);
4752 fcport->d_id.b24 = new_fcport->d_id.b24;
4753 break;
4754 }
4755
1da177e4
LT
4756 fcport->d_id.b24 = new_fcport->d_id.b24;
4757 fcport->flags |= FCF_LOGIN_NEEDED;
1da177e4
LT
4758 break;
4759 }
4760
726b8548
QT
4761 if (found) {
4762 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1da177e4 4763 continue;
726b8548 4764 }
1da177e4 4765 /* If device was not in our fcports list, then add it. */
b2032fd5 4766 new_fcport->scan_state = QLA_FCPORT_FOUND;
726b8548
QT
4767 list_add_tail(&new_fcport->list, &vha->vp_fcports);
4768
4769 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4770
1da177e4
LT
4771
4772 /* Allocate a new replacement fcport. */
4773 nxt_d_id.b24 = new_fcport->d_id.b24;
e315cd28 4774 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1da177e4 4775 if (new_fcport == NULL) {
83548fe2 4776 ql_log(ql_log_warn, vha, 0xd032,
7c3df132 4777 "Memory allocation failed for fcport.\n");
1da177e4
LT
4778 return (QLA_MEMORY_ALLOC_FAILED);
4779 }
4780 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
4781 new_fcport->d_id.b24 = nxt_d_id.b24;
4782 }
4783
726b8548
QT
4784 qla2x00_free_fcport(new_fcport);
4785
4786 /*
4787 * Logout all previous fabric dev marked lost, except FCP2 devices.
4788 */
4789 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4790 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4791 break;
4792
4793 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
4794 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
4795 continue;
4796
4797 if (fcport->scan_state == QLA_FCPORT_SCAN) {
4798 if ((qla_dual_mode_enabled(vha) ||
4799 qla_ini_mode_enabled(vha)) &&
4800 atomic_read(&fcport->state) == FCS_ONLINE) {
4801 qla2x00_mark_device_lost(vha, fcport,
4802 ql2xplogiabsentdevice, 0);
4803 if (fcport->loop_id != FC_NO_LOOP_ID &&
4804 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
4805 fcport->port_type != FCT_INITIATOR &&
4806 fcport->port_type != FCT_BROADCAST) {
83548fe2 4807 ql_dbg(ql_dbg_disc, vha, 0x20f0,
726b8548
QT
4808 "%s %d %8phC post del sess\n",
4809 __func__, __LINE__,
4810 fcport->port_name);
4811
4812 qlt_schedule_sess_for_deletion_lock
4813 (fcport);
4814 continue;
4815 }
4816 }
4817 }
1da177e4 4818
726b8548
QT
4819 if (fcport->scan_state == QLA_FCPORT_FOUND)
4820 qla24xx_fcport_handle_login(vha, fcport);
4821 }
1da177e4
LT
4822 return (rval);
4823}
4824
4825/*
4826 * qla2x00_find_new_loop_id
4827 * Scan through our port list and find a new usable loop ID.
4828 *
4829 * Input:
4830 * ha: adapter state pointer.
4831 * dev: port structure pointer.
4832 *
4833 * Returns:
4834 * qla2x00 local function return status code.
4835 *
4836 * Context:
4837 * Kernel context.
4838 */
03bcfb57 4839int
e315cd28 4840qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
1da177e4
LT
4841{
4842 int rval;
e315cd28 4843 struct qla_hw_data *ha = vha->hw;
feafb7b1 4844 unsigned long flags = 0;
1da177e4
LT
4845
4846 rval = QLA_SUCCESS;
4847
5f16b331 4848 spin_lock_irqsave(&ha->vport_slock, flags);
1da177e4 4849
5f16b331
CD
4850 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
4851 LOOPID_MAP_SIZE);
4852 if (dev->loop_id >= LOOPID_MAP_SIZE ||
4853 qla2x00_is_reserved_id(vha, dev->loop_id)) {
4854 dev->loop_id = FC_NO_LOOP_ID;
4855 rval = QLA_FUNCTION_FAILED;
4856 } else
4857 set_bit(dev->loop_id, ha->loop_id_map);
1da177e4 4858
5f16b331 4859 spin_unlock_irqrestore(&ha->vport_slock, flags);
1da177e4 4860
5f16b331
CD
4861 if (rval == QLA_SUCCESS)
4862 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
4863 "Assigning new loopid=%x, portid=%x.\n",
4864 dev->loop_id, dev->d_id.b24);
4865 else
4866 ql_log(ql_log_warn, dev->vha, 0x2087,
4867 "No loop_id's available, portid=%x.\n",
4868 dev->d_id.b24);
1da177e4
LT
4869
4870 return (rval);
4871}
4872
1da177e4
LT
4873
4874/*
4875 * qla2x00_fabric_login
4876 * Issue fabric login command.
4877 *
4878 * Input:
4879 * ha = adapter block pointer.
4880 * device = pointer to FC device type structure.
4881 *
4882 * Returns:
4883 * 0 - Login successfully
4884 * 1 - Login failed
4885 * 2 - Initiator device
4886 * 3 - Fatal error
4887 */
4888int
e315cd28 4889qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1da177e4
LT
4890 uint16_t *next_loopid)
4891{
4892 int rval;
4893 int retry;
4894 uint16_t tmp_loopid;
4895 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28 4896 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
4897
4898 retry = 0;
4899 tmp_loopid = 0;
4900
4901 for (;;) {
7c3df132
SK
4902 ql_dbg(ql_dbg_disc, vha, 0x2000,
4903 "Trying Fabric Login w/loop id 0x%04x for port "
4904 "%02x%02x%02x.\n",
4905 fcport->loop_id, fcport->d_id.b.domain,
4906 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
4907
4908 /* Login fcport on switch. */
0b91d116 4909 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
1da177e4
LT
4910 fcport->d_id.b.domain, fcport->d_id.b.area,
4911 fcport->d_id.b.al_pa, mb, BIT_0);
0b91d116
CD
4912 if (rval != QLA_SUCCESS) {
4913 return rval;
4914 }
1da177e4
LT
4915 if (mb[0] == MBS_PORT_ID_USED) {
4916 /*
4917 * Device has another loop ID. The firmware team
0107109e
AV
4918 * recommends the driver perform an implicit login with
4919 * the specified ID again. The ID we just used is save
4920 * here so we return with an ID that can be tried by
4921 * the next login.
1da177e4
LT
4922 */
4923 retry++;
4924 tmp_loopid = fcport->loop_id;
4925 fcport->loop_id = mb[1];
4926
7c3df132
SK
4927 ql_dbg(ql_dbg_disc, vha, 0x2001,
4928 "Fabric Login: port in use - next loop "
4929 "id=0x%04x, port id= %02x%02x%02x.\n",
1da177e4 4930 fcport->loop_id, fcport->d_id.b.domain,
7c3df132 4931 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1da177e4
LT
4932
4933 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
4934 /*
4935 * Login succeeded.
4936 */
4937 if (retry) {
4938 /* A retry occurred before. */
4939 *next_loopid = tmp_loopid;
4940 } else {
4941 /*
4942 * No retry occurred before. Just increment the
4943 * ID value for next login.
4944 */
4945 *next_loopid = (fcport->loop_id + 1);
4946 }
4947
4948 if (mb[1] & BIT_0) {
4949 fcport->port_type = FCT_INITIATOR;
4950 } else {
4951 fcport->port_type = FCT_TARGET;
4952 if (mb[1] & BIT_1) {
8474f3a0 4953 fcport->flags |= FCF_FCP2_DEVICE;
1da177e4
LT
4954 }
4955 }
4956
ad3e0eda
AV
4957 if (mb[10] & BIT_0)
4958 fcport->supported_classes |= FC_COS_CLASS2;
4959 if (mb[10] & BIT_1)
4960 fcport->supported_classes |= FC_COS_CLASS3;
4961
2d70c103
NB
4962 if (IS_FWI2_CAPABLE(ha)) {
4963 if (mb[10] & BIT_7)
4964 fcport->flags |=
4965 FCF_CONF_COMP_SUPPORTED;
4966 }
4967
1da177e4
LT
4968 rval = QLA_SUCCESS;
4969 break;
4970 } else if (mb[0] == MBS_LOOP_ID_USED) {
4971 /*
4972 * Loop ID already used, try next loop ID.
4973 */
4974 fcport->loop_id++;
e315cd28 4975 rval = qla2x00_find_new_loop_id(vha, fcport);
1da177e4
LT
4976 if (rval != QLA_SUCCESS) {
4977 /* Ran out of loop IDs to use */
4978 break;
4979 }
4980 } else if (mb[0] == MBS_COMMAND_ERROR) {
4981 /*
4982 * Firmware possibly timed out during login. If NO
4983 * retries are left to do then the device is declared
4984 * dead.
4985 */
4986 *next_loopid = fcport->loop_id;
e315cd28 4987 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
4988 fcport->d_id.b.domain, fcport->d_id.b.area,
4989 fcport->d_id.b.al_pa);
e315cd28 4990 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1da177e4
LT
4991
4992 rval = 1;
4993 break;
4994 } else {
4995 /*
4996 * unrecoverable / not handled error
4997 */
7c3df132
SK
4998 ql_dbg(ql_dbg_disc, vha, 0x2002,
4999 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
5000 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
5001 fcport->d_id.b.area, fcport->d_id.b.al_pa,
5002 fcport->loop_id, jiffies);
1da177e4
LT
5003
5004 *next_loopid = fcport->loop_id;
e315cd28 5005 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
1c7c6357
AV
5006 fcport->d_id.b.domain, fcport->d_id.b.area,
5007 fcport->d_id.b.al_pa);
5f16b331 5008 qla2x00_clear_loop_id(fcport);
0eedfcf0 5009 fcport->login_retry = 0;
1da177e4
LT
5010
5011 rval = 3;
5012 break;
5013 }
5014 }
5015
5016 return (rval);
5017}
5018
5019/*
5020 * qla2x00_local_device_login
5021 * Issue local device login command.
5022 *
5023 * Input:
5024 * ha = adapter block pointer.
5025 * loop_id = loop id of device to login to.
5026 *
5027 * Returns (Where's the #define!!!!):
5028 * 0 - Login successfully
5029 * 1 - Login failed
5030 * 3 - Fatal error
5031 */
5032int
e315cd28 5033qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
1da177e4
LT
5034{
5035 int rval;
5036 uint16_t mb[MAILBOX_REGISTER_COUNT];
5037
5038 memset(mb, 0, sizeof(mb));
e315cd28 5039 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
1da177e4
LT
5040 if (rval == QLA_SUCCESS) {
5041 /* Interrogate mailbox registers for any errors */
5042 if (mb[0] == MBS_COMMAND_ERROR)
5043 rval = 1;
5044 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
5045 /* device not in PCB table */
5046 rval = 3;
5047 }
5048
5049 return (rval);
5050}
5051
5052/*
5053 * qla2x00_loop_resync
5054 * Resync with fibre channel devices.
5055 *
5056 * Input:
5057 * ha = adapter block pointer.
5058 *
5059 * Returns:
5060 * 0 = success
5061 */
5062int
e315cd28 5063qla2x00_loop_resync(scsi_qla_host_t *vha)
1da177e4 5064{
73208dfd 5065 int rval = QLA_SUCCESS;
1da177e4 5066 uint32_t wait_time;
67c2e93a
AC
5067 struct req_que *req;
5068 struct rsp_que *rsp;
5069
d7459527 5070 req = vha->req;
67c2e93a 5071 rsp = req->rsp;
1da177e4 5072
e315cd28
AC
5073 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5074 if (vha->flags.online) {
5075 if (!(rval = qla2x00_fw_ready(vha))) {
1da177e4
LT
5076 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5077 wait_time = 256;
5078 do {
8ae6d9c7
GM
5079 if (!IS_QLAFX00(vha->hw)) {
5080 /*
5081 * Issue a marker after FW becomes
5082 * ready.
5083 */
5084 qla2x00_marker(vha, req, rsp, 0, 0,
5085 MK_SYNC_ALL);
5086 vha->marker_needed = 0;
5087 }
1da177e4
LT
5088
5089 /* Remap devices on Loop. */
e315cd28 5090 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4 5091
8ae6d9c7
GM
5092 if (IS_QLAFX00(vha->hw))
5093 qlafx00_configure_devices(vha);
5094 else
5095 qla2x00_configure_loop(vha);
5096
1da177e4 5097 wait_time--;
e315cd28
AC
5098 } while (!atomic_read(&vha->loop_down_timer) &&
5099 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5100 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
5101 &vha->dpc_flags)));
1da177e4 5102 }
1da177e4
LT
5103 }
5104
e315cd28 5105 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1da177e4 5106 return (QLA_FUNCTION_FAILED);
1da177e4 5107
e315cd28 5108 if (rval)
7c3df132
SK
5109 ql_dbg(ql_dbg_disc, vha, 0x206c,
5110 "%s *** FAILED ***.\n", __func__);
1da177e4
LT
5111
5112 return (rval);
5113}
5114
579d12b5
SK
5115/*
5116* qla2x00_perform_loop_resync
5117* Description: This function will set the appropriate flags and call
5118* qla2x00_loop_resync. If successful loop will be resynced
5119* Arguments : scsi_qla_host_t pointer
5120* returm : Success or Failure
5121*/
5122
5123int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
5124{
5125 int32_t rval = 0;
5126
5127 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
5128 /*Configure the flags so that resync happens properly*/
5129 atomic_set(&ha->loop_down_timer, 0);
5130 if (!(ha->device_flags & DFLG_NO_CABLE)) {
5131 atomic_set(&ha->loop_state, LOOP_UP);
5132 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
5133 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
5134 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
5135
5136 rval = qla2x00_loop_resync(ha);
5137 } else
5138 atomic_set(&ha->loop_state, LOOP_DEAD);
5139
5140 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
5141 }
5142
5143 return rval;
5144}
5145
d97994dc 5146void
67becc00 5147qla2x00_update_fcports(scsi_qla_host_t *base_vha)
d97994dc 5148{
5149 fc_port_t *fcport;
feafb7b1
AE
5150 struct scsi_qla_host *vha;
5151 struct qla_hw_data *ha = base_vha->hw;
5152 unsigned long flags;
d97994dc 5153
feafb7b1 5154 spin_lock_irqsave(&ha->vport_slock, flags);
d97994dc 5155 /* Go with deferred removal of rport references. */
feafb7b1
AE
5156 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
5157 atomic_inc(&vha->vref_count);
5158 list_for_each_entry(fcport, &vha->vp_fcports, list) {
8ae598d0 5159 if (fcport->drport &&
feafb7b1
AE
5160 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
5161 spin_unlock_irqrestore(&ha->vport_slock, flags);
67becc00 5162 qla2x00_rport_del(fcport);
df673274 5163
feafb7b1
AE
5164 spin_lock_irqsave(&ha->vport_slock, flags);
5165 }
5166 }
5167 atomic_dec(&vha->vref_count);
c4a9b538 5168 wake_up(&vha->vref_waitq);
feafb7b1
AE
5169 }
5170 spin_unlock_irqrestore(&ha->vport_slock, flags);
d97994dc 5171}
5172
7d613ac6
SV
5173/* Assumes idc_lock always held on entry */
5174void
5175qla83xx_reset_ownership(scsi_qla_host_t *vha)
5176{
5177 struct qla_hw_data *ha = vha->hw;
5178 uint32_t drv_presence, drv_presence_mask;
5179 uint32_t dev_part_info1, dev_part_info2, class_type;
5180 uint32_t class_type_mask = 0x3;
5181 uint16_t fcoe_other_function = 0xffff, i;
5182
7ec0effd
AD
5183 if (IS_QLA8044(ha)) {
5184 drv_presence = qla8044_rd_direct(vha,
5185 QLA8044_CRB_DRV_ACTIVE_INDEX);
5186 dev_part_info1 = qla8044_rd_direct(vha,
5187 QLA8044_CRB_DEV_PART_INFO_INDEX);
5188 dev_part_info2 = qla8044_rd_direct(vha,
5189 QLA8044_CRB_DEV_PART_INFO2);
5190 } else {
5191 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5192 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
5193 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
5194 }
7d613ac6
SV
5195 for (i = 0; i < 8; i++) {
5196 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
5197 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5198 (i != ha->portnum)) {
5199 fcoe_other_function = i;
5200 break;
5201 }
5202 }
5203 if (fcoe_other_function == 0xffff) {
5204 for (i = 0; i < 8; i++) {
5205 class_type = ((dev_part_info2 >> (i * 4)) &
5206 class_type_mask);
5207 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5208 ((i + 8) != ha->portnum)) {
5209 fcoe_other_function = i + 8;
5210 break;
5211 }
5212 }
5213 }
5214 /*
5215 * Prepare drv-presence mask based on fcoe functions present.
5216 * However consider only valid physical fcoe function numbers (0-15).
5217 */
5218 drv_presence_mask = ~((1 << (ha->portnum)) |
5219 ((fcoe_other_function == 0xffff) ?
5220 0 : (1 << (fcoe_other_function))));
5221
5222 /* We are the reset owner iff:
5223 * - No other protocol drivers present.
5224 * - This is the lowest among fcoe functions. */
5225 if (!(drv_presence & drv_presence_mask) &&
5226 (ha->portnum < fcoe_other_function)) {
5227 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
5228 "This host is Reset owner.\n");
5229 ha->flags.nic_core_reset_owner = 1;
5230 }
5231}
5232
fa492630 5233static int
7d613ac6
SV
5234__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
5235{
5236 int rval = QLA_SUCCESS;
5237 struct qla_hw_data *ha = vha->hw;
5238 uint32_t drv_ack;
5239
5240 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5241 if (rval == QLA_SUCCESS) {
5242 drv_ack |= (1 << ha->portnum);
5243 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5244 }
5245
5246 return rval;
5247}
5248
fa492630 5249static int
7d613ac6
SV
5250__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
5251{
5252 int rval = QLA_SUCCESS;
5253 struct qla_hw_data *ha = vha->hw;
5254 uint32_t drv_ack;
5255
5256 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5257 if (rval == QLA_SUCCESS) {
5258 drv_ack &= ~(1 << ha->portnum);
5259 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5260 }
5261
5262 return rval;
5263}
5264
fa492630 5265static const char *
7d613ac6
SV
5266qla83xx_dev_state_to_string(uint32_t dev_state)
5267{
5268 switch (dev_state) {
5269 case QLA8XXX_DEV_COLD:
5270 return "COLD/RE-INIT";
5271 case QLA8XXX_DEV_INITIALIZING:
5272 return "INITIALIZING";
5273 case QLA8XXX_DEV_READY:
5274 return "READY";
5275 case QLA8XXX_DEV_NEED_RESET:
5276 return "NEED RESET";
5277 case QLA8XXX_DEV_NEED_QUIESCENT:
5278 return "NEED QUIESCENT";
5279 case QLA8XXX_DEV_FAILED:
5280 return "FAILED";
5281 case QLA8XXX_DEV_QUIESCENT:
5282 return "QUIESCENT";
5283 default:
5284 return "Unknown";
5285 }
5286}
5287
5288/* Assumes idc-lock always held on entry */
5289void
5290qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
5291{
5292 struct qla_hw_data *ha = vha->hw;
5293 uint32_t idc_audit_reg = 0, duration_secs = 0;
5294
5295 switch (audit_type) {
5296 case IDC_AUDIT_TIMESTAMP:
5297 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
5298 idc_audit_reg = (ha->portnum) |
5299 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
5300 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5301 break;
5302
5303 case IDC_AUDIT_COMPLETION:
5304 duration_secs = ((jiffies_to_msecs(jiffies) -
5305 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
5306 idc_audit_reg = (ha->portnum) |
5307 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
5308 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5309 break;
5310
5311 default:
5312 ql_log(ql_log_warn, vha, 0xb078,
5313 "Invalid audit type specified.\n");
5314 break;
5315 }
5316}
5317
5318/* Assumes idc_lock always held on entry */
fa492630 5319static int
7d613ac6
SV
5320qla83xx_initiating_reset(scsi_qla_host_t *vha)
5321{
5322 struct qla_hw_data *ha = vha->hw;
5323 uint32_t idc_control, dev_state;
5324
5325 __qla83xx_get_idc_control(vha, &idc_control);
5326 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
5327 ql_log(ql_log_info, vha, 0xb080,
5328 "NIC Core reset has been disabled. idc-control=0x%x\n",
5329 idc_control);
5330 return QLA_FUNCTION_FAILED;
5331 }
5332
5333 /* Set NEED-RESET iff in READY state and we are the reset-owner */
5334 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5335 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
5336 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
5337 QLA8XXX_DEV_NEED_RESET);
5338 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
5339 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
5340 } else {
5341 const char *state = qla83xx_dev_state_to_string(dev_state);
5342 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
5343
5344 /* SV: XXX: Is timeout required here? */
5345 /* Wait for IDC state change READY -> NEED_RESET */
5346 while (dev_state == QLA8XXX_DEV_READY) {
5347 qla83xx_idc_unlock(vha, 0);
5348 msleep(200);
5349 qla83xx_idc_lock(vha, 0);
5350 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5351 }
5352 }
5353
5354 /* Send IDC ack by writing to drv-ack register */
5355 __qla83xx_set_drv_ack(vha);
5356
5357 return QLA_SUCCESS;
5358}
5359
5360int
5361__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
5362{
5363 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5364}
5365
7d613ac6
SV
5366int
5367__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
5368{
5369 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5370}
5371
fa492630 5372static int
7d613ac6
SV
5373qla83xx_check_driver_presence(scsi_qla_host_t *vha)
5374{
5375 uint32_t drv_presence = 0;
5376 struct qla_hw_data *ha = vha->hw;
5377
5378 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5379 if (drv_presence & (1 << ha->portnum))
5380 return QLA_SUCCESS;
5381 else
5382 return QLA_TEST_FAILED;
5383}
5384
5385int
5386qla83xx_nic_core_reset(scsi_qla_host_t *vha)
5387{
5388 int rval = QLA_SUCCESS;
5389 struct qla_hw_data *ha = vha->hw;
5390
5391 ql_dbg(ql_dbg_p3p, vha, 0xb058,
5392 "Entered %s().\n", __func__);
5393
5394 if (vha->device_flags & DFLG_DEV_FAILED) {
5395 ql_log(ql_log_warn, vha, 0xb059,
5396 "Device in unrecoverable FAILED state.\n");
5397 return QLA_FUNCTION_FAILED;
5398 }
5399
5400 qla83xx_idc_lock(vha, 0);
5401
5402 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
5403 ql_log(ql_log_warn, vha, 0xb05a,
5404 "Function=0x%x has been removed from IDC participation.\n",
5405 ha->portnum);
5406 rval = QLA_FUNCTION_FAILED;
5407 goto exit;
5408 }
5409
5410 qla83xx_reset_ownership(vha);
5411
5412 rval = qla83xx_initiating_reset(vha);
5413
5414 /*
5415 * Perform reset if we are the reset-owner,
5416 * else wait till IDC state changes to READY/FAILED.
5417 */
5418 if (rval == QLA_SUCCESS) {
5419 rval = qla83xx_idc_state_handler(vha);
5420
5421 if (rval == QLA_SUCCESS)
5422 ha->flags.nic_core_hung = 0;
5423 __qla83xx_clear_drv_ack(vha);
5424 }
5425
5426exit:
5427 qla83xx_idc_unlock(vha, 0);
5428
5429 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
5430
5431 return rval;
5432}
5433
81178772
SK
5434int
5435qla2xxx_mctp_dump(scsi_qla_host_t *vha)
5436{
5437 struct qla_hw_data *ha = vha->hw;
5438 int rval = QLA_FUNCTION_FAILED;
5439
5440 if (!IS_MCTP_CAPABLE(ha)) {
5441 /* This message can be removed from the final version */
5442 ql_log(ql_log_info, vha, 0x506d,
5443 "This board is not MCTP capable\n");
5444 return rval;
5445 }
5446
5447 if (!ha->mctp_dump) {
5448 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
5449 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
5450
5451 if (!ha->mctp_dump) {
5452 ql_log(ql_log_warn, vha, 0x506e,
5453 "Failed to allocate memory for mctp dump\n");
5454 return rval;
5455 }
5456 }
5457
5458#define MCTP_DUMP_STR_ADDR 0x00000000
5459 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
5460 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
5461 if (rval != QLA_SUCCESS) {
5462 ql_log(ql_log_warn, vha, 0x506f,
5463 "Failed to capture mctp dump\n");
5464 } else {
5465 ql_log(ql_log_info, vha, 0x5070,
5466 "Mctp dump capture for host (%ld/%p).\n",
5467 vha->host_no, ha->mctp_dump);
5468 ha->mctp_dumped = 1;
5469 }
5470
409ee0fe 5471 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
81178772
SK
5472 ha->flags.nic_core_reset_hdlr_active = 1;
5473 rval = qla83xx_restart_nic_firmware(vha);
5474 if (rval)
5475 /* NIC Core reset failed. */
5476 ql_log(ql_log_warn, vha, 0x5071,
5477 "Failed to restart nic firmware\n");
5478 else
5479 ql_dbg(ql_dbg_p3p, vha, 0xb084,
5480 "Restarted NIC firmware successfully.\n");
5481 ha->flags.nic_core_reset_hdlr_active = 0;
5482 }
5483
5484 return rval;
5485
5486}
5487
579d12b5 5488/*
8fcd6b8b 5489* qla2x00_quiesce_io
579d12b5
SK
5490* Description: This function will block the new I/Os
5491* Its not aborting any I/Os as context
5492* is not destroyed during quiescence
5493* Arguments: scsi_qla_host_t
5494* return : void
5495*/
5496void
8fcd6b8b 5497qla2x00_quiesce_io(scsi_qla_host_t *vha)
579d12b5
SK
5498{
5499 struct qla_hw_data *ha = vha->hw;
5500 struct scsi_qla_host *vp;
5501
8fcd6b8b
CD
5502 ql_dbg(ql_dbg_dpc, vha, 0x401d,
5503 "Quiescing I/O - ha=%p.\n", ha);
579d12b5
SK
5504
5505 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
5506 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
5507 atomic_set(&vha->loop_state, LOOP_DOWN);
5508 qla2x00_mark_all_devices_lost(vha, 0);
5509 list_for_each_entry(vp, &ha->vp_list, list)
8fcd6b8b 5510 qla2x00_mark_all_devices_lost(vp, 0);
579d12b5
SK
5511 } else {
5512 if (!atomic_read(&vha->loop_down_timer))
5513 atomic_set(&vha->loop_down_timer,
5514 LOOP_DOWN_TIME);
5515 }
5516 /* Wait for pending cmds to complete */
5517 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
5518}
5519
a9083016
GM
5520void
5521qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
5522{
5523 struct qla_hw_data *ha = vha->hw;
579d12b5 5524 struct scsi_qla_host *vp;
feafb7b1 5525 unsigned long flags;
6aef87be 5526 fc_port_t *fcport;
7c3f8fd1 5527 u16 i;
a9083016 5528
e46ef004
SK
5529 /* For ISP82XX, driver waits for completion of the commands.
5530 * online flag should be set.
5531 */
7ec0effd 5532 if (!(IS_P3P_TYPE(ha)))
e46ef004 5533 vha->flags.online = 0;
a9083016
GM
5534 ha->flags.chip_reset_done = 0;
5535 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2be21fa2 5536 vha->qla_stats.total_isp_aborts++;
a9083016 5537
7c3df132
SK
5538 ql_log(ql_log_info, vha, 0x00af,
5539 "Performing ISP error recovery - ha=%p.\n", ha);
a9083016 5540
e46ef004
SK
5541 /* For ISP82XX, reset_chip is just disabling interrupts.
5542 * Driver waits for the completion of the commands.
5543 * the interrupts need to be enabled.
5544 */
7ec0effd 5545 if (!(IS_P3P_TYPE(ha)))
a9083016
GM
5546 ha->isp_ops->reset_chip(vha);
5547
ec7193e2
QT
5548 ha->flags.n2n_ae = 0;
5549 ha->flags.lip_ae = 0;
5550 ha->current_topology = 0;
5551 ha->flags.fw_started = 0;
5552 ha->flags.fw_init_done = 0;
7c3f8fd1
QT
5553 ha->base_qpair->chip_reset++;
5554 for (i = 0; i < ha->max_qpairs; i++) {
5555 if (ha->queue_pair_map[i])
5556 ha->queue_pair_map[i]->chip_reset =
5557 ha->base_qpair->chip_reset;
5558 }
726b8548 5559
a9083016
GM
5560 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
5561 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
5562 atomic_set(&vha->loop_state, LOOP_DOWN);
5563 qla2x00_mark_all_devices_lost(vha, 0);
feafb7b1
AE
5564
5565 spin_lock_irqsave(&ha->vport_slock, flags);
579d12b5 5566 list_for_each_entry(vp, &ha->vp_list, list) {
feafb7b1
AE
5567 atomic_inc(&vp->vref_count);
5568 spin_unlock_irqrestore(&ha->vport_slock, flags);
5569
a9083016 5570 qla2x00_mark_all_devices_lost(vp, 0);
feafb7b1
AE
5571
5572 spin_lock_irqsave(&ha->vport_slock, flags);
5573 atomic_dec(&vp->vref_count);
5574 }
5575 spin_unlock_irqrestore(&ha->vport_slock, flags);
a9083016
GM
5576 } else {
5577 if (!atomic_read(&vha->loop_down_timer))
5578 atomic_set(&vha->loop_down_timer,
5579 LOOP_DOWN_TIME);
5580 }
5581
6aef87be
AV
5582 /* Clear all async request states across all VPs. */
5583 list_for_each_entry(fcport, &vha->vp_fcports, list)
5584 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5585 spin_lock_irqsave(&ha->vport_slock, flags);
5586 list_for_each_entry(vp, &ha->vp_list, list) {
5587 atomic_inc(&vp->vref_count);
5588 spin_unlock_irqrestore(&ha->vport_slock, flags);
5589
5590 list_for_each_entry(fcport, &vp->vp_fcports, list)
5591 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5592
5593 spin_lock_irqsave(&ha->vport_slock, flags);
5594 atomic_dec(&vp->vref_count);
5595 }
5596 spin_unlock_irqrestore(&ha->vport_slock, flags);
5597
bddd2d65
LC
5598 if (!ha->flags.eeh_busy) {
5599 /* Make sure for ISP 82XX IO DMA is complete */
7ec0effd 5600 if (IS_P3P_TYPE(ha)) {
7190575f 5601 qla82xx_chip_reset_cleanup(vha);
7c3df132
SK
5602 ql_log(ql_log_info, vha, 0x00b4,
5603 "Done chip reset cleanup.\n");
a9083016 5604
e46ef004
SK
5605 /* Done waiting for pending commands.
5606 * Reset the online flag.
5607 */
5608 vha->flags.online = 0;
4d78c973 5609 }
a9083016 5610
bddd2d65
LC
5611 /* Requeue all commands in outstanding command list. */
5612 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
5613 }
b6a029e1
AE
5614 /* memory barrier */
5615 wmb();
a9083016
GM
5616}
5617
1da177e4
LT
5618/*
5619* qla2x00_abort_isp
5620* Resets ISP and aborts all outstanding commands.
5621*
5622* Input:
5623* ha = adapter block pointer.
5624*
5625* Returns:
5626* 0 = success
5627*/
5628int
e315cd28 5629qla2x00_abort_isp(scsi_qla_host_t *vha)
1da177e4 5630{
476e8978 5631 int rval;
1da177e4 5632 uint8_t status = 0;
e315cd28
AC
5633 struct qla_hw_data *ha = vha->hw;
5634 struct scsi_qla_host *vp;
73208dfd 5635 struct req_que *req = ha->req_q_map[0];
feafb7b1 5636 unsigned long flags;
1da177e4 5637
e315cd28 5638 if (vha->flags.online) {
a9083016 5639 qla2x00_abort_isp_cleanup(vha);
1da177e4 5640
a6171297
SV
5641 if (IS_QLA8031(ha)) {
5642 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
5643 "Clearing fcoe driver presence.\n");
5644 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
5645 ql_dbg(ql_dbg_p3p, vha, 0xb073,
5646 "Error while clearing DRV-Presence.\n");
5647 }
5648
85880801
AV
5649 if (unlikely(pci_channel_offline(ha->pdev) &&
5650 ha->flags.pci_channel_io_perm_failure)) {
5651 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5652 status = 0;
5653 return status;
5654 }
5655
73208dfd 5656 ha->isp_ops->get_flash_version(vha, req->ring);
30c47662 5657
e315cd28 5658 ha->isp_ops->nvram_config(vha);
1da177e4 5659
e315cd28
AC
5660 if (!qla2x00_restart_isp(vha)) {
5661 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4 5662
e315cd28 5663 if (!atomic_read(&vha->loop_down_timer)) {
1da177e4
LT
5664 /*
5665 * Issue marker command only when we are going
5666 * to start the I/O .
5667 */
e315cd28 5668 vha->marker_needed = 1;
1da177e4
LT
5669 }
5670
e315cd28 5671 vha->flags.online = 1;
1da177e4 5672
fd34f556 5673 ha->isp_ops->enable_intrs(ha);
1da177e4 5674
fa2a1ce5 5675 ha->isp_abort_cnt = 0;
e315cd28 5676 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
476e8978 5677
6246b8a1
GM
5678 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
5679 qla2x00_get_fw_version(vha);
df613b96
AV
5680 if (ha->fce) {
5681 ha->flags.fce_enabled = 1;
5682 memset(ha->fce, 0,
5683 fce_calc_size(ha->fce_bufs));
e315cd28 5684 rval = qla2x00_enable_fce_trace(vha,
df613b96
AV
5685 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5686 &ha->fce_bufs);
5687 if (rval) {
7c3df132 5688 ql_log(ql_log_warn, vha, 0x8033,
df613b96
AV
5689 "Unable to reinitialize FCE "
5690 "(%d).\n", rval);
5691 ha->flags.fce_enabled = 0;
5692 }
5693 }
436a7b11
AV
5694
5695 if (ha->eft) {
5696 memset(ha->eft, 0, EFT_SIZE);
e315cd28 5697 rval = qla2x00_enable_eft_trace(vha,
436a7b11
AV
5698 ha->eft_dma, EFT_NUM_BUFFERS);
5699 if (rval) {
7c3df132 5700 ql_log(ql_log_warn, vha, 0x8034,
436a7b11
AV
5701 "Unable to reinitialize EFT "
5702 "(%d).\n", rval);
5703 }
5704 }
1da177e4 5705 } else { /* failed the ISP abort */
e315cd28
AC
5706 vha->flags.online = 1;
5707 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1da177e4 5708 if (ha->isp_abort_cnt == 0) {
7c3df132
SK
5709 ql_log(ql_log_fatal, vha, 0x8035,
5710 "ISP error recover failed - "
5711 "board disabled.\n");
fa2a1ce5 5712 /*
1da177e4
LT
5713 * The next call disables the board
5714 * completely.
5715 */
e315cd28
AC
5716 ha->isp_ops->reset_adapter(vha);
5717 vha->flags.online = 0;
1da177e4 5718 clear_bit(ISP_ABORT_RETRY,
e315cd28 5719 &vha->dpc_flags);
1da177e4
LT
5720 status = 0;
5721 } else { /* schedule another ISP abort */
5722 ha->isp_abort_cnt--;
7c3df132
SK
5723 ql_dbg(ql_dbg_taskm, vha, 0x8020,
5724 "ISP abort - retry remaining %d.\n",
5725 ha->isp_abort_cnt);
1da177e4
LT
5726 status = 1;
5727 }
5728 } else {
5729 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7c3df132
SK
5730 ql_dbg(ql_dbg_taskm, vha, 0x8021,
5731 "ISP error recovery - retrying (%d) "
5732 "more times.\n", ha->isp_abort_cnt);
e315cd28 5733 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1da177e4
LT
5734 status = 1;
5735 }
5736 }
fa2a1ce5 5737
1da177e4
LT
5738 }
5739
e315cd28 5740 if (!status) {
7c3df132 5741 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
feafb7b1
AE
5742
5743 spin_lock_irqsave(&ha->vport_slock, flags);
5744 list_for_each_entry(vp, &ha->vp_list, list) {
5745 if (vp->vp_idx) {
5746 atomic_inc(&vp->vref_count);
5747 spin_unlock_irqrestore(&ha->vport_slock, flags);
5748
e315cd28 5749 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
5750
5751 spin_lock_irqsave(&ha->vport_slock, flags);
5752 atomic_dec(&vp->vref_count);
5753 }
e315cd28 5754 }
feafb7b1
AE
5755 spin_unlock_irqrestore(&ha->vport_slock, flags);
5756
7d613ac6
SV
5757 if (IS_QLA8031(ha)) {
5758 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
5759 "Setting back fcoe driver presence.\n");
5760 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
5761 ql_dbg(ql_dbg_p3p, vha, 0xb074,
5762 "Error while setting DRV-Presence.\n");
5763 }
e315cd28 5764 } else {
d8424f68
JP
5765 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
5766 __func__);
1da177e4
LT
5767 }
5768
5769 return(status);
5770}
5771
5772/*
5773* qla2x00_restart_isp
5774* restarts the ISP after a reset
5775*
5776* Input:
5777* ha = adapter block pointer.
5778*
5779* Returns:
5780* 0 = success
5781*/
5782static int
e315cd28 5783qla2x00_restart_isp(scsi_qla_host_t *vha)
1da177e4 5784{
c6b2fca8 5785 int status = 0;
e315cd28 5786 struct qla_hw_data *ha = vha->hw;
73208dfd
AC
5787 struct req_que *req = ha->req_q_map[0];
5788 struct rsp_que *rsp = ha->rsp_q_map[0];
1da177e4
LT
5789
5790 /* If firmware needs to be loaded */
e315cd28
AC
5791 if (qla2x00_isp_firmware(vha)) {
5792 vha->flags.online = 0;
5793 status = ha->isp_ops->chip_diag(vha);
5794 if (!status)
5795 status = qla2x00_setup_chip(vha);
1da177e4
LT
5796 }
5797
e315cd28
AC
5798 if (!status && !(status = qla2x00_init_rings(vha))) {
5799 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2533cf67 5800 ha->flags.chip_reset_done = 1;
7108b76e 5801
73208dfd
AC
5802 /* Initialize the queues in use */
5803 qla25xx_init_queues(ha);
5804
e315cd28
AC
5805 status = qla2x00_fw_ready(vha);
5806 if (!status) {
0107109e 5807 /* Issue a marker after FW becomes ready. */
73208dfd 5808 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7108b76e 5809 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1da177e4
LT
5810 }
5811
5812 /* if no cable then assume it's good */
e315cd28 5813 if ((vha->device_flags & DFLG_NO_CABLE))
1da177e4 5814 status = 0;
1da177e4
LT
5815 }
5816 return (status);
5817}
5818
73208dfd
AC
5819static int
5820qla25xx_init_queues(struct qla_hw_data *ha)
5821{
5822 struct rsp_que *rsp = NULL;
5823 struct req_que *req = NULL;
5824 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5825 int ret = -1;
5826 int i;
5827
2afa19a9 5828 for (i = 1; i < ha->max_rsp_queues; i++) {
73208dfd 5829 rsp = ha->rsp_q_map[i];
cb43285f 5830 if (rsp && test_bit(i, ha->rsp_qid_map)) {
73208dfd 5831 rsp->options &= ~BIT_0;
618a7523 5832 ret = qla25xx_init_rsp_que(base_vha, rsp);
73208dfd 5833 if (ret != QLA_SUCCESS)
7c3df132
SK
5834 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
5835 "%s Rsp que: %d init failed.\n",
5836 __func__, rsp->id);
73208dfd 5837 else
7c3df132
SK
5838 ql_dbg(ql_dbg_init, base_vha, 0x0100,
5839 "%s Rsp que: %d inited.\n",
5840 __func__, rsp->id);
73208dfd 5841 }
2afa19a9
AC
5842 }
5843 for (i = 1; i < ha->max_req_queues; i++) {
73208dfd 5844 req = ha->req_q_map[i];
cb43285f
QT
5845 if (req && test_bit(i, ha->req_qid_map)) {
5846 /* Clear outstanding commands array. */
73208dfd 5847 req->options &= ~BIT_0;
618a7523 5848 ret = qla25xx_init_req_que(base_vha, req);
73208dfd 5849 if (ret != QLA_SUCCESS)
7c3df132
SK
5850 ql_dbg(ql_dbg_init, base_vha, 0x0101,
5851 "%s Req que: %d init failed.\n",
5852 __func__, req->id);
73208dfd 5853 else
7c3df132
SK
5854 ql_dbg(ql_dbg_init, base_vha, 0x0102,
5855 "%s Req que: %d inited.\n",
5856 __func__, req->id);
73208dfd
AC
5857 }
5858 }
5859 return ret;
5860}
5861
1da177e4
LT
5862/*
5863* qla2x00_reset_adapter
5864* Reset adapter.
5865*
5866* Input:
5867* ha = adapter block pointer.
5868*/
abbd8870 5869void
e315cd28 5870qla2x00_reset_adapter(scsi_qla_host_t *vha)
1da177e4
LT
5871{
5872 unsigned long flags = 0;
e315cd28 5873 struct qla_hw_data *ha = vha->hw;
3d71644c 5874 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4 5875
e315cd28 5876 vha->flags.online = 0;
fd34f556 5877 ha->isp_ops->disable_intrs(ha);
1da177e4 5878
1da177e4
LT
5879 spin_lock_irqsave(&ha->hardware_lock, flags);
5880 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
5881 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
5882 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
5883 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
5884 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5885}
0107109e
AV
5886
5887void
e315cd28 5888qla24xx_reset_adapter(scsi_qla_host_t *vha)
0107109e
AV
5889{
5890 unsigned long flags = 0;
e315cd28 5891 struct qla_hw_data *ha = vha->hw;
0107109e
AV
5892 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5893
7ec0effd 5894 if (IS_P3P_TYPE(ha))
a9083016
GM
5895 return;
5896
e315cd28 5897 vha->flags.online = 0;
fd34f556 5898 ha->isp_ops->disable_intrs(ha);
0107109e
AV
5899
5900 spin_lock_irqsave(&ha->hardware_lock, flags);
5901 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
5902 RD_REG_DWORD(&reg->hccr);
5903 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
5904 RD_REG_DWORD(&reg->hccr);
5905 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09ff36d3
AV
5906
5907 if (IS_NOPOLLING_TYPE(ha))
5908 ha->isp_ops->enable_intrs(ha);
0107109e
AV
5909}
5910
4e08df3f
DM
5911/* On sparc systems, obtain port and node WWN from firmware
5912 * properties.
5913 */
e315cd28
AC
5914static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
5915 struct nvram_24xx *nv)
4e08df3f
DM
5916{
5917#ifdef CONFIG_SPARC
e315cd28 5918 struct qla_hw_data *ha = vha->hw;
4e08df3f 5919 struct pci_dev *pdev = ha->pdev;
15576bc8
DM
5920 struct device_node *dp = pci_device_to_OF_node(pdev);
5921 const u8 *val;
4e08df3f
DM
5922 int len;
5923
5924 val = of_get_property(dp, "port-wwn", &len);
5925 if (val && len >= WWN_SIZE)
5926 memcpy(nv->port_name, val, WWN_SIZE);
5927
5928 val = of_get_property(dp, "node-wwn", &len);
5929 if (val && len >= WWN_SIZE)
5930 memcpy(nv->node_name, val, WWN_SIZE);
5931#endif
5932}
5933
0107109e 5934int
e315cd28 5935qla24xx_nvram_config(scsi_qla_host_t *vha)
0107109e 5936{
4e08df3f 5937 int rval;
0107109e
AV
5938 struct init_cb_24xx *icb;
5939 struct nvram_24xx *nv;
5940 uint32_t *dptr;
5941 uint8_t *dptr1, *dptr2;
5942 uint32_t chksum;
5943 uint16_t cnt;
e315cd28 5944 struct qla_hw_data *ha = vha->hw;
0107109e 5945
4e08df3f 5946 rval = QLA_SUCCESS;
0107109e 5947 icb = (struct init_cb_24xx *)ha->init_cb;
281afe19 5948 nv = ha->nvram;
0107109e
AV
5949
5950 /* Determine NVRAM starting address. */
f73cb695 5951 if (ha->port_no == 0) {
e5b68a61
AC
5952 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
5953 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
5954 } else {
0107109e 5955 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6f641790 5956 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
5957 }
f73cb695 5958
e5b68a61
AC
5959 ha->nvram_size = sizeof(struct nvram_24xx);
5960 ha->vpd_size = FA_NVRAM_VPD_SIZE;
0107109e 5961
281afe19
SJ
5962 /* Get VPD data into cache */
5963 ha->vpd = ha->nvram + VPD_OFFSET;
e315cd28 5964 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
281afe19
SJ
5965 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
5966
5967 /* Get NVRAM data into cache and calculate checksum. */
0107109e 5968 dptr = (uint32_t *)nv;
e315cd28 5969 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
0107109e 5970 ha->nvram_size);
da08ef5c
JC
5971 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
5972 chksum += le32_to_cpu(*dptr);
0107109e 5973
7c3df132
SK
5974 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
5975 "Contents of NVRAM\n");
5976 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
5977 (uint8_t *)nv, ha->nvram_size);
0107109e
AV
5978
5979 /* Bad NVRAM data, set defaults parameters. */
5980 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5981 || nv->id[3] != ' ' ||
ad950360 5982 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
0107109e 5983 /* Reset NVRAM data. */
7c3df132 5984 ql_log(ql_log_warn, vha, 0x006b,
9e336520 5985 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132
SK
5986 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
5987 ql_log(ql_log_warn, vha, 0x006c,
5988 "Falling back to functioning (yet invalid -- WWPN) "
5989 "defaults.\n");
4e08df3f
DM
5990
5991 /*
5992 * Set default initialization control block.
5993 */
5994 memset(nv, 0, ha->nvram_size);
ad950360
BVA
5995 nv->nvram_version = cpu_to_le16(ICB_VERSION);
5996 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 5997 nv->frame_payload_size = 2048;
ad950360
BVA
5998 nv->execution_throttle = cpu_to_le16(0xFFFF);
5999 nv->exchange_count = cpu_to_le16(0);
6000 nv->hard_address = cpu_to_le16(124);
4e08df3f 6001 nv->port_name[0] = 0x21;
f73cb695 6002 nv->port_name[1] = 0x00 + ha->port_no + 1;
4e08df3f
DM
6003 nv->port_name[2] = 0x00;
6004 nv->port_name[3] = 0xe0;
6005 nv->port_name[4] = 0x8b;
6006 nv->port_name[5] = 0x1c;
6007 nv->port_name[6] = 0x55;
6008 nv->port_name[7] = 0x86;
6009 nv->node_name[0] = 0x20;
6010 nv->node_name[1] = 0x00;
6011 nv->node_name[2] = 0x00;
6012 nv->node_name[3] = 0xe0;
6013 nv->node_name[4] = 0x8b;
6014 nv->node_name[5] = 0x1c;
6015 nv->node_name[6] = 0x55;
6016 nv->node_name[7] = 0x86;
e315cd28 6017 qla24xx_nvram_wwn_from_ofw(vha, nv);
ad950360
BVA
6018 nv->login_retry_count = cpu_to_le16(8);
6019 nv->interrupt_delay_timer = cpu_to_le16(0);
6020 nv->login_timeout = cpu_to_le16(0);
4e08df3f 6021 nv->firmware_options_1 =
ad950360
BVA
6022 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
6023 nv->firmware_options_2 = cpu_to_le32(2 << 4);
6024 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6025 nv->firmware_options_3 = cpu_to_le32(2 << 13);
6026 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
6027 nv->efi_parameters = cpu_to_le32(0);
4e08df3f 6028 nv->reset_delay = 5;
ad950360
BVA
6029 nv->max_luns_per_target = cpu_to_le16(128);
6030 nv->port_down_retry_count = cpu_to_le16(30);
6031 nv->link_down_timeout = cpu_to_le16(30);
4e08df3f
DM
6032
6033 rval = 1;
0107109e
AV
6034 }
6035
726b8548 6036 if (qla_tgt_mode_enabled(vha)) {
2d70c103 6037 /* Don't enable full login after initial LIP */
ad950360 6038 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
2d70c103 6039 /* Don't enable LIP full login for initiator */
ad950360 6040 nv->host_p &= cpu_to_le32(~BIT_10);
2d70c103
NB
6041 }
6042
6043 qlt_24xx_config_nvram_stage1(vha, nv);
6044
0107109e 6045 /* Reset Initialization control block */
e315cd28 6046 memset(icb, 0, ha->init_cb_size);
0107109e
AV
6047
6048 /* Copy 1st segment. */
6049 dptr1 = (uint8_t *)icb;
6050 dptr2 = (uint8_t *)&nv->version;
6051 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
6052 while (cnt--)
6053 *dptr1++ = *dptr2++;
6054
6055 icb->login_retry_count = nv->login_retry_count;
3ea66e28 6056 icb->link_down_on_nos = nv->link_down_on_nos;
0107109e
AV
6057
6058 /* Copy 2nd segment. */
6059 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
6060 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
6061 cnt = (uint8_t *)&icb->reserved_3 -
6062 (uint8_t *)&icb->interrupt_delay_timer;
6063 while (cnt--)
6064 *dptr1++ = *dptr2++;
6065
6066 /*
6067 * Setup driver NVRAM options.
6068 */
e315cd28 6069 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
9bb9fcf2 6070 "QLA2462");
0107109e 6071
2d70c103
NB
6072 qlt_24xx_config_nvram_stage2(vha, icb);
6073
ad950360 6074 if (nv->host_p & cpu_to_le32(BIT_15)) {
2d70c103 6075 /* Use alternate WWN? */
5341e868
AV
6076 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
6077 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
6078 }
6079
0107109e 6080 /* Prepare nodename */
ad950360 6081 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
0107109e
AV
6082 /*
6083 * Firmware will apply the following mask if the nodename was
6084 * not provided.
6085 */
6086 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
6087 icb->node_name[0] &= 0xF0;
6088 }
6089
6090 /* Set host adapter parameters. */
6091 ha->flags.disable_risc_code_load = 0;
0c8c39af
AV
6092 ha->flags.enable_lip_reset = 0;
6093 ha->flags.enable_lip_full_login =
6094 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
6095 ha->flags.enable_target_reset =
6096 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
0107109e 6097 ha->flags.enable_led_scheme = 0;
d4c760c2 6098 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
0107109e 6099
fd0e7e4d
AV
6100 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
6101 (BIT_6 | BIT_5 | BIT_4)) >> 4;
0107109e
AV
6102
6103 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
6104 sizeof(ha->fw_seriallink_options24));
6105
6106 /* save HBA serial number */
6107 ha->serial0 = icb->port_name[5];
6108 ha->serial1 = icb->port_name[6];
6109 ha->serial2 = icb->port_name[7];
e315cd28
AC
6110 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6111 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
0107109e 6112
ad950360 6113 icb->execution_throttle = cpu_to_le16(0xFFFF);
bc8fb3cb 6114
0107109e
AV
6115 ha->retry_count = le16_to_cpu(nv->login_retry_count);
6116
6117 /* Set minimum login_timeout to 4 seconds. */
6118 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6119 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6120 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 6121 nv->login_timeout = cpu_to_le16(4);
0107109e 6122 ha->login_timeout = le16_to_cpu(nv->login_timeout);
0107109e 6123
00a537b8
AV
6124 /* Set minimum RATOV to 100 tenths of a second. */
6125 ha->r_a_tov = 100;
0107109e
AV
6126
6127 ha->loop_reset_delay = nv->reset_delay;
6128
6129 /* Link Down Timeout = 0:
6130 *
6131 * When Port Down timer expires we will start returning
6132 * I/O's to OS with "DID_NO_CONNECT".
6133 *
6134 * Link Down Timeout != 0:
6135 *
6136 * The driver waits for the link to come up after link down
6137 * before returning I/Os to OS with "DID_NO_CONNECT".
6138 */
6139 if (le16_to_cpu(nv->link_down_timeout) == 0) {
6140 ha->loop_down_abort_time =
6141 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
6142 } else {
6143 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
6144 ha->loop_down_abort_time =
6145 (LOOP_DOWN_TIME - ha->link_down_timeout);
6146 }
6147
6148 /* Need enough time to try and get the port back. */
6149 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
6150 if (qlport_down_retry)
6151 ha->port_down_retry_count = qlport_down_retry;
6152
6153 /* Set login_retry_count */
6154 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
6155 if (ha->port_down_retry_count ==
6156 le16_to_cpu(nv->port_down_retry_count) &&
6157 ha->port_down_retry_count > 3)
6158 ha->login_retry_count = ha->port_down_retry_count;
6159 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
6160 ha->login_retry_count = ha->port_down_retry_count;
6161 if (ql2xloginretrycount)
6162 ha->login_retry_count = ql2xloginretrycount;
6163
4fdfefe5 6164 /* Enable ZIO. */
e315cd28 6165 if (!vha->flags.init_done) {
4fdfefe5
AV
6166 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
6167 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
6168 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6169 le16_to_cpu(icb->interrupt_delay_timer): 2;
6170 }
ad950360 6171 icb->firmware_options_2 &= cpu_to_le32(
4fdfefe5 6172 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
e315cd28 6173 vha->flags.process_response_queue = 0;
4fdfefe5 6174 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4a59f71d 6175 ha->zio_mode = QLA_ZIO_MODE_6;
6176
7c3df132 6177 ql_log(ql_log_info, vha, 0x006f,
4fdfefe5
AV
6178 "ZIO mode %d enabled; timer delay (%d us).\n",
6179 ha->zio_mode, ha->zio_timer * 100);
6180
6181 icb->firmware_options_2 |= cpu_to_le32(
6182 (uint32_t)ha->zio_mode);
6183 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
e315cd28 6184 vha->flags.process_response_queue = 1;
4fdfefe5
AV
6185 }
6186
4e08df3f 6187 if (rval) {
7c3df132
SK
6188 ql_log(ql_log_warn, vha, 0x0070,
6189 "NVRAM configuration failed.\n");
4e08df3f
DM
6190 }
6191 return (rval);
0107109e
AV
6192}
6193
4243c115
SC
6194uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
6195{
6196 struct qla27xx_image_status pri_image_status, sec_image_status;
6197 uint8_t valid_pri_image, valid_sec_image;
6198 uint32_t *wptr;
6199 uint32_t cnt, chksum, size;
6200 struct qla_hw_data *ha = vha->hw;
6201
6202 valid_pri_image = valid_sec_image = 1;
6203 ha->active_image = 0;
6204 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
6205
6206 if (!ha->flt_region_img_status_pri) {
6207 valid_pri_image = 0;
6208 goto check_sec_image;
6209 }
6210
6211 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
6212 ha->flt_region_img_status_pri, size);
6213
6214 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6215 ql_dbg(ql_dbg_init, vha, 0x018b,
6216 "Primary image signature (0x%x) not valid\n",
6217 pri_image_status.signature);
6218 valid_pri_image = 0;
6219 goto check_sec_image;
6220 }
6221
6222 wptr = (uint32_t *)(&pri_image_status);
6223 cnt = size;
6224
da08ef5c
JC
6225 for (chksum = 0; cnt--; wptr++)
6226 chksum += le32_to_cpu(*wptr);
41dc529a 6227
4243c115
SC
6228 if (chksum) {
6229 ql_dbg(ql_dbg_init, vha, 0x018c,
6230 "Checksum validation failed for primary image (0x%x)\n",
6231 chksum);
6232 valid_pri_image = 0;
6233 }
6234
6235check_sec_image:
6236 if (!ha->flt_region_img_status_sec) {
6237 valid_sec_image = 0;
6238 goto check_valid_image;
6239 }
6240
6241 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
6242 ha->flt_region_img_status_sec, size);
6243
6244 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6245 ql_dbg(ql_dbg_init, vha, 0x018d,
6246 "Secondary image signature(0x%x) not valid\n",
6247 sec_image_status.signature);
6248 valid_sec_image = 0;
6249 goto check_valid_image;
6250 }
6251
6252 wptr = (uint32_t *)(&sec_image_status);
6253 cnt = size;
da08ef5c
JC
6254 for (chksum = 0; cnt--; wptr++)
6255 chksum += le32_to_cpu(*wptr);
4243c115
SC
6256 if (chksum) {
6257 ql_dbg(ql_dbg_init, vha, 0x018e,
6258 "Checksum validation failed for secondary image (0x%x)\n",
6259 chksum);
6260 valid_sec_image = 0;
6261 }
6262
6263check_valid_image:
6264 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
6265 ha->active_image = QLA27XX_PRIMARY_IMAGE;
6266 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
6267 if (!ha->active_image ||
6268 pri_image_status.generation_number <
6269 sec_image_status.generation_number)
6270 ha->active_image = QLA27XX_SECONDARY_IMAGE;
6271 }
6272
6273 ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
6274 ha->active_image == 0 ? "default bootld and fw" :
6275 ha->active_image == 1 ? "primary" :
6276 ha->active_image == 2 ? "secondary" :
6277 "Invalid");
6278
6279 return ha->active_image;
6280}
6281
413975a0 6282static int
cbc8eb67
AV
6283qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
6284 uint32_t faddr)
d1c61909 6285{
73208dfd 6286 int rval = QLA_SUCCESS;
d1c61909 6287 int segments, fragment;
d1c61909
AV
6288 uint32_t *dcode, dlen;
6289 uint32_t risc_addr;
6290 uint32_t risc_size;
6291 uint32_t i;
e315cd28 6292 struct qla_hw_data *ha = vha->hw;
73208dfd 6293 struct req_que *req = ha->req_q_map[0];
eaac30be 6294
7c3df132 6295 ql_dbg(ql_dbg_init, vha, 0x008b,
cfb0919c 6296 "FW: Loading firmware from flash (%x).\n", faddr);
eaac30be 6297
d1c61909
AV
6298 rval = QLA_SUCCESS;
6299
6300 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 6301 dcode = (uint32_t *)req->ring;
d1c61909
AV
6302 *srisc_addr = 0;
6303
4243c115
SC
6304 if (IS_QLA27XX(ha) &&
6305 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
6306 faddr = ha->flt_region_fw_sec;
6307
d1c61909 6308 /* Validate firmware image by checking version. */
e315cd28 6309 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
d1c61909
AV
6310 for (i = 0; i < 4; i++)
6311 dcode[i] = be32_to_cpu(dcode[i]);
6312 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
6313 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
6314 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
6315 dcode[3] == 0)) {
7c3df132
SK
6316 ql_log(ql_log_fatal, vha, 0x008c,
6317 "Unable to verify the integrity of flash firmware "
6318 "image.\n");
6319 ql_log(ql_log_fatal, vha, 0x008d,
6320 "Firmware data: %08x %08x %08x %08x.\n",
6321 dcode[0], dcode[1], dcode[2], dcode[3]);
d1c61909
AV
6322
6323 return QLA_FUNCTION_FAILED;
6324 }
6325
6326 while (segments && rval == QLA_SUCCESS) {
6327 /* Read segment's load information. */
e315cd28 6328 qla24xx_read_flash_data(vha, dcode, faddr, 4);
d1c61909
AV
6329
6330 risc_addr = be32_to_cpu(dcode[2]);
6331 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
6332 risc_size = be32_to_cpu(dcode[3]);
6333
6334 fragment = 0;
6335 while (risc_size > 0 && rval == QLA_SUCCESS) {
6336 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
6337 if (dlen > risc_size)
6338 dlen = risc_size;
6339
7c3df132
SK
6340 ql_dbg(ql_dbg_init, vha, 0x008e,
6341 "Loading risc segment@ risc addr %x "
6342 "number of dwords 0x%x offset 0x%x.\n",
6343 risc_addr, dlen, faddr);
d1c61909 6344
e315cd28 6345 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
d1c61909
AV
6346 for (i = 0; i < dlen; i++)
6347 dcode[i] = swab32(dcode[i]);
6348
73208dfd 6349 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
d1c61909
AV
6350 dlen);
6351 if (rval) {
7c3df132
SK
6352 ql_log(ql_log_fatal, vha, 0x008f,
6353 "Failed to load segment %d of firmware.\n",
6354 fragment);
f261f7af 6355 return QLA_FUNCTION_FAILED;
d1c61909
AV
6356 }
6357
6358 faddr += dlen;
6359 risc_addr += dlen;
6360 risc_size -= dlen;
6361 fragment++;
6362 }
6363
6364 /* Next segment. */
6365 segments--;
6366 }
6367
f73cb695
CD
6368 if (!IS_QLA27XX(ha))
6369 return rval;
6370
6371 if (ha->fw_dump_template)
6372 vfree(ha->fw_dump_template);
6373 ha->fw_dump_template = NULL;
6374 ha->fw_dump_template_len = 0;
6375
6376 ql_dbg(ql_dbg_init, vha, 0x0161,
6377 "Loading fwdump template from %x\n", faddr);
6378 qla24xx_read_flash_data(vha, dcode, faddr, 7);
6379 risc_size = be32_to_cpu(dcode[2]);
6380 ql_dbg(ql_dbg_init, vha, 0x0162,
6381 "-> array size %x dwords\n", risc_size);
6382 if (risc_size == 0 || risc_size == ~0)
6383 goto default_template;
6384
6385 dlen = (risc_size - 8) * sizeof(*dcode);
6386 ql_dbg(ql_dbg_init, vha, 0x0163,
6387 "-> template allocating %x bytes...\n", dlen);
6388 ha->fw_dump_template = vmalloc(dlen);
6389 if (!ha->fw_dump_template) {
6390 ql_log(ql_log_warn, vha, 0x0164,
6391 "Failed fwdump template allocate %x bytes.\n", risc_size);
6392 goto default_template;
6393 }
6394
6395 faddr += 7;
6396 risc_size -= 8;
6397 dcode = ha->fw_dump_template;
6398 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
6399 for (i = 0; i < risc_size; i++)
6400 dcode[i] = le32_to_cpu(dcode[i]);
6401
6402 if (!qla27xx_fwdt_template_valid(dcode)) {
6403 ql_log(ql_log_warn, vha, 0x0165,
6404 "Failed fwdump template validate\n");
6405 goto default_template;
6406 }
6407
6408 dlen = qla27xx_fwdt_template_size(dcode);
6409 ql_dbg(ql_dbg_init, vha, 0x0166,
6410 "-> template size %x bytes\n", dlen);
6411 if (dlen > risc_size * sizeof(*dcode)) {
6412 ql_log(ql_log_warn, vha, 0x0167,
4fae52b5 6413 "Failed fwdump template exceeds array by %zx bytes\n",
383a298b 6414 (size_t)(dlen - risc_size * sizeof(*dcode)));
f73cb695
CD
6415 goto default_template;
6416 }
6417 ha->fw_dump_template_len = dlen;
6418 return rval;
6419
6420default_template:
6421 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
6422 if (ha->fw_dump_template)
6423 vfree(ha->fw_dump_template);
6424 ha->fw_dump_template = NULL;
6425 ha->fw_dump_template_len = 0;
6426
6427 dlen = qla27xx_fwdt_template_default_size();
6428 ql_dbg(ql_dbg_init, vha, 0x0169,
6429 "-> template allocating %x bytes...\n", dlen);
6430 ha->fw_dump_template = vmalloc(dlen);
6431 if (!ha->fw_dump_template) {
6432 ql_log(ql_log_warn, vha, 0x016a,
6433 "Failed fwdump template allocate %x bytes.\n", risc_size);
6434 goto failed_template;
6435 }
6436
6437 dcode = ha->fw_dump_template;
6438 risc_size = dlen / sizeof(*dcode);
6439 memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
6440 for (i = 0; i < risc_size; i++)
6441 dcode[i] = be32_to_cpu(dcode[i]);
6442
6443 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
6444 ql_log(ql_log_warn, vha, 0x016b,
6445 "Failed fwdump template validate\n");
6446 goto failed_template;
6447 }
6448
6449 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
6450 ql_dbg(ql_dbg_init, vha, 0x016c,
6451 "-> template size %x bytes\n", dlen);
6452 ha->fw_dump_template_len = dlen;
6453 return rval;
6454
6455failed_template:
6456 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
6457 if (ha->fw_dump_template)
6458 vfree(ha->fw_dump_template);
6459 ha->fw_dump_template = NULL;
6460 ha->fw_dump_template_len = 0;
d1c61909
AV
6461 return rval;
6462}
6463
e9454a88 6464#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
d1c61909 6465
0107109e 6466int
e315cd28 6467qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5433383e
AV
6468{
6469 int rval;
6470 int i, fragment;
6471 uint16_t *wcode, *fwcode;
6472 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
6473 struct fw_blob *blob;
e315cd28 6474 struct qla_hw_data *ha = vha->hw;
73208dfd 6475 struct req_que *req = ha->req_q_map[0];
5433383e
AV
6476
6477 /* Load firmware blob. */
e315cd28 6478 blob = qla2x00_request_firmware(vha);
5433383e 6479 if (!blob) {
7c3df132 6480 ql_log(ql_log_info, vha, 0x0083,
94bcf830 6481 "Firmware image unavailable.\n");
7c3df132
SK
6482 ql_log(ql_log_info, vha, 0x0084,
6483 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
5433383e
AV
6484 return QLA_FUNCTION_FAILED;
6485 }
6486
6487 rval = QLA_SUCCESS;
6488
73208dfd 6489 wcode = (uint16_t *)req->ring;
5433383e
AV
6490 *srisc_addr = 0;
6491 fwcode = (uint16_t *)blob->fw->data;
6492 fwclen = 0;
6493
6494 /* Validate firmware image by checking version. */
6495 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7c3df132 6496 ql_log(ql_log_fatal, vha, 0x0085,
5b5e0928 6497 "Unable to verify integrity of firmware image (%zd).\n",
5433383e
AV
6498 blob->fw->size);
6499 goto fail_fw_integrity;
6500 }
6501 for (i = 0; i < 4; i++)
6502 wcode[i] = be16_to_cpu(fwcode[i + 4]);
6503 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
6504 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
6505 wcode[2] == 0 && wcode[3] == 0)) {
7c3df132
SK
6506 ql_log(ql_log_fatal, vha, 0x0086,
6507 "Unable to verify integrity of firmware image.\n");
6508 ql_log(ql_log_fatal, vha, 0x0087,
6509 "Firmware data: %04x %04x %04x %04x.\n",
6510 wcode[0], wcode[1], wcode[2], wcode[3]);
5433383e
AV
6511 goto fail_fw_integrity;
6512 }
6513
6514 seg = blob->segs;
6515 while (*seg && rval == QLA_SUCCESS) {
6516 risc_addr = *seg;
6517 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
6518 risc_size = be16_to_cpu(fwcode[3]);
6519
6520 /* Validate firmware image size. */
6521 fwclen += risc_size * sizeof(uint16_t);
6522 if (blob->fw->size < fwclen) {
7c3df132 6523 ql_log(ql_log_fatal, vha, 0x0088,
5433383e 6524 "Unable to verify integrity of firmware image "
5b5e0928 6525 "(%zd).\n", blob->fw->size);
5433383e
AV
6526 goto fail_fw_integrity;
6527 }
6528
6529 fragment = 0;
6530 while (risc_size > 0 && rval == QLA_SUCCESS) {
6531 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
6532 if (wlen > risc_size)
6533 wlen = risc_size;
7c3df132
SK
6534 ql_dbg(ql_dbg_init, vha, 0x0089,
6535 "Loading risc segment@ risc addr %x number of "
6536 "words 0x%x.\n", risc_addr, wlen);
5433383e
AV
6537
6538 for (i = 0; i < wlen; i++)
6539 wcode[i] = swab16(fwcode[i]);
6540
73208dfd 6541 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
5433383e
AV
6542 wlen);
6543 if (rval) {
7c3df132
SK
6544 ql_log(ql_log_fatal, vha, 0x008a,
6545 "Failed to load segment %d of firmware.\n",
6546 fragment);
5433383e
AV
6547 break;
6548 }
6549
6550 fwcode += wlen;
6551 risc_addr += wlen;
6552 risc_size -= wlen;
6553 fragment++;
6554 }
6555
6556 /* Next segment. */
6557 seg++;
6558 }
6559 return rval;
6560
6561fail_fw_integrity:
6562 return QLA_FUNCTION_FAILED;
6563}
6564
eaac30be
AV
6565static int
6566qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
0107109e
AV
6567{
6568 int rval;
6569 int segments, fragment;
6570 uint32_t *dcode, dlen;
6571 uint32_t risc_addr;
6572 uint32_t risc_size;
6573 uint32_t i;
5433383e 6574 struct fw_blob *blob;
f73cb695
CD
6575 const uint32_t *fwcode;
6576 uint32_t fwclen;
e315cd28 6577 struct qla_hw_data *ha = vha->hw;
73208dfd 6578 struct req_que *req = ha->req_q_map[0];
0107109e 6579
5433383e 6580 /* Load firmware blob. */
e315cd28 6581 blob = qla2x00_request_firmware(vha);
5433383e 6582 if (!blob) {
7c3df132 6583 ql_log(ql_log_warn, vha, 0x0090,
94bcf830 6584 "Firmware image unavailable.\n");
7c3df132
SK
6585 ql_log(ql_log_warn, vha, 0x0091,
6586 "Firmware images can be retrieved from: "
6587 QLA_FW_URL ".\n");
d1c61909 6588
eaac30be 6589 return QLA_FUNCTION_FAILED;
0107109e
AV
6590 }
6591
cfb0919c
CD
6592 ql_dbg(ql_dbg_init, vha, 0x0092,
6593 "FW: Loading via request-firmware.\n");
eaac30be 6594
0107109e
AV
6595 rval = QLA_SUCCESS;
6596
6597 segments = FA_RISC_CODE_SEGMENTS;
73208dfd 6598 dcode = (uint32_t *)req->ring;
0107109e 6599 *srisc_addr = 0;
5433383e 6600 fwcode = (uint32_t *)blob->fw->data;
0107109e
AV
6601 fwclen = 0;
6602
6603 /* Validate firmware image by checking version. */
5433383e 6604 if (blob->fw->size < 8 * sizeof(uint32_t)) {
7c3df132 6605 ql_log(ql_log_fatal, vha, 0x0093,
5b5e0928 6606 "Unable to verify integrity of firmware image (%zd).\n",
5433383e 6607 blob->fw->size);
f73cb695 6608 return QLA_FUNCTION_FAILED;
0107109e
AV
6609 }
6610 for (i = 0; i < 4; i++)
6611 dcode[i] = be32_to_cpu(fwcode[i + 4]);
6612 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
6613 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
6614 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
6615 dcode[3] == 0)) {
7c3df132 6616 ql_log(ql_log_fatal, vha, 0x0094,
5b5e0928 6617 "Unable to verify integrity of firmware image (%zd).\n",
7c3df132
SK
6618 blob->fw->size);
6619 ql_log(ql_log_fatal, vha, 0x0095,
6620 "Firmware data: %08x %08x %08x %08x.\n",
6621 dcode[0], dcode[1], dcode[2], dcode[3]);
f73cb695 6622 return QLA_FUNCTION_FAILED;
0107109e
AV
6623 }
6624
6625 while (segments && rval == QLA_SUCCESS) {
6626 risc_addr = be32_to_cpu(fwcode[2]);
6627 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
6628 risc_size = be32_to_cpu(fwcode[3]);
6629
6630 /* Validate firmware image size. */
6631 fwclen += risc_size * sizeof(uint32_t);
5433383e 6632 if (blob->fw->size < fwclen) {
7c3df132 6633 ql_log(ql_log_fatal, vha, 0x0096,
5433383e 6634 "Unable to verify integrity of firmware image "
5b5e0928 6635 "(%zd).\n", blob->fw->size);
f73cb695 6636 return QLA_FUNCTION_FAILED;
0107109e
AV
6637 }
6638
6639 fragment = 0;
6640 while (risc_size > 0 && rval == QLA_SUCCESS) {
6641 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
6642 if (dlen > risc_size)
6643 dlen = risc_size;
6644
7c3df132
SK
6645 ql_dbg(ql_dbg_init, vha, 0x0097,
6646 "Loading risc segment@ risc addr %x "
6647 "number of dwords 0x%x.\n", risc_addr, dlen);
0107109e
AV
6648
6649 for (i = 0; i < dlen; i++)
6650 dcode[i] = swab32(fwcode[i]);
6651
73208dfd 6652 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
590f98e5 6653 dlen);
0107109e 6654 if (rval) {
7c3df132
SK
6655 ql_log(ql_log_fatal, vha, 0x0098,
6656 "Failed to load segment %d of firmware.\n",
6657 fragment);
f261f7af 6658 return QLA_FUNCTION_FAILED;
0107109e
AV
6659 }
6660
6661 fwcode += dlen;
6662 risc_addr += dlen;
6663 risc_size -= dlen;
6664 fragment++;
6665 }
6666
6667 /* Next segment. */
6668 segments--;
6669 }
f73cb695
CD
6670
6671 if (!IS_QLA27XX(ha))
6672 return rval;
6673
6674 if (ha->fw_dump_template)
6675 vfree(ha->fw_dump_template);
6676 ha->fw_dump_template = NULL;
6677 ha->fw_dump_template_len = 0;
6678
6679 ql_dbg(ql_dbg_init, vha, 0x171,
97ea702b
CD
6680 "Loading fwdump template from %x\n",
6681 (uint32_t)((void *)fwcode - (void *)blob->fw->data));
f73cb695
CD
6682 risc_size = be32_to_cpu(fwcode[2]);
6683 ql_dbg(ql_dbg_init, vha, 0x172,
6684 "-> array size %x dwords\n", risc_size);
6685 if (risc_size == 0 || risc_size == ~0)
6686 goto default_template;
6687
6688 dlen = (risc_size - 8) * sizeof(*fwcode);
6689 ql_dbg(ql_dbg_init, vha, 0x0173,
6690 "-> template allocating %x bytes...\n", dlen);
6691 ha->fw_dump_template = vmalloc(dlen);
6692 if (!ha->fw_dump_template) {
6693 ql_log(ql_log_warn, vha, 0x0174,
6694 "Failed fwdump template allocate %x bytes.\n", risc_size);
6695 goto default_template;
6696 }
6697
6698 fwcode += 7;
6699 risc_size -= 8;
6700 dcode = ha->fw_dump_template;
6701 for (i = 0; i < risc_size; i++)
6702 dcode[i] = le32_to_cpu(fwcode[i]);
6703
6704 if (!qla27xx_fwdt_template_valid(dcode)) {
6705 ql_log(ql_log_warn, vha, 0x0175,
6706 "Failed fwdump template validate\n");
6707 goto default_template;
6708 }
6709
6710 dlen = qla27xx_fwdt_template_size(dcode);
6711 ql_dbg(ql_dbg_init, vha, 0x0176,
6712 "-> template size %x bytes\n", dlen);
6713 if (dlen > risc_size * sizeof(*fwcode)) {
6714 ql_log(ql_log_warn, vha, 0x0177,
4fae52b5 6715 "Failed fwdump template exceeds array by %zx bytes\n",
383a298b 6716 (size_t)(dlen - risc_size * sizeof(*fwcode)));
f73cb695
CD
6717 goto default_template;
6718 }
6719 ha->fw_dump_template_len = dlen;
0107109e
AV
6720 return rval;
6721
f73cb695
CD
6722default_template:
6723 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
6724 if (ha->fw_dump_template)
6725 vfree(ha->fw_dump_template);
6726 ha->fw_dump_template = NULL;
6727 ha->fw_dump_template_len = 0;
6728
6729 dlen = qla27xx_fwdt_template_default_size();
6730 ql_dbg(ql_dbg_init, vha, 0x0179,
6731 "-> template allocating %x bytes...\n", dlen);
6732 ha->fw_dump_template = vmalloc(dlen);
6733 if (!ha->fw_dump_template) {
6734 ql_log(ql_log_warn, vha, 0x017a,
6735 "Failed fwdump template allocate %x bytes.\n", risc_size);
6736 goto failed_template;
6737 }
6738
6739 dcode = ha->fw_dump_template;
6740 risc_size = dlen / sizeof(*fwcode);
6741 fwcode = qla27xx_fwdt_template_default();
6742 for (i = 0; i < risc_size; i++)
6743 dcode[i] = be32_to_cpu(fwcode[i]);
6744
6745 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
6746 ql_log(ql_log_warn, vha, 0x017b,
6747 "Failed fwdump template validate\n");
6748 goto failed_template;
6749 }
6750
6751 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
6752 ql_dbg(ql_dbg_init, vha, 0x017c,
6753 "-> template size %x bytes\n", dlen);
6754 ha->fw_dump_template_len = dlen;
6755 return rval;
6756
6757failed_template:
6758 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
6759 if (ha->fw_dump_template)
6760 vfree(ha->fw_dump_template);
6761 ha->fw_dump_template = NULL;
6762 ha->fw_dump_template_len = 0;
6763 return rval;
0107109e 6764}
18c6c127 6765
eaac30be
AV
6766int
6767qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6768{
6769 int rval;
6770
e337d907
AV
6771 if (ql2xfwloadbin == 1)
6772 return qla81xx_load_risc(vha, srisc_addr);
6773
eaac30be
AV
6774 /*
6775 * FW Load priority:
6776 * 1) Firmware via request-firmware interface (.bin file).
6777 * 2) Firmware residing in flash.
6778 */
6779 rval = qla24xx_load_risc_blob(vha, srisc_addr);
6780 if (rval == QLA_SUCCESS)
6781 return rval;
6782
cbc8eb67
AV
6783 return qla24xx_load_risc_flash(vha, srisc_addr,
6784 vha->hw->flt_region_fw);
eaac30be
AV
6785}
6786
6787int
6788qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6789{
6790 int rval;
cbc8eb67 6791 struct qla_hw_data *ha = vha->hw;
eaac30be 6792
e337d907 6793 if (ql2xfwloadbin == 2)
cbc8eb67 6794 goto try_blob_fw;
e337d907 6795
eaac30be
AV
6796 /*
6797 * FW Load priority:
6798 * 1) Firmware residing in flash.
6799 * 2) Firmware via request-firmware interface (.bin file).
cbc8eb67 6800 * 3) Golden-Firmware residing in flash -- limited operation.
eaac30be 6801 */
cbc8eb67 6802 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
eaac30be
AV
6803 if (rval == QLA_SUCCESS)
6804 return rval;
6805
cbc8eb67
AV
6806try_blob_fw:
6807 rval = qla24xx_load_risc_blob(vha, srisc_addr);
6808 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
6809 return rval;
6810
7c3df132
SK
6811 ql_log(ql_log_info, vha, 0x0099,
6812 "Attempting to fallback to golden firmware.\n");
cbc8eb67
AV
6813 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
6814 if (rval != QLA_SUCCESS)
6815 return rval;
6816
7c3df132 6817 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
cbc8eb67 6818 ha->flags.running_gold_fw = 1;
cbc8eb67 6819 return rval;
eaac30be
AV
6820}
6821
18c6c127 6822void
e315cd28 6823qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
18c6c127
AV
6824{
6825 int ret, retries;
e315cd28 6826 struct qla_hw_data *ha = vha->hw;
18c6c127 6827
85880801
AV
6828 if (ha->flags.pci_channel_io_perm_failure)
6829 return;
e428924c 6830 if (!IS_FWI2_CAPABLE(ha))
18c6c127 6831 return;
75edf81d
AV
6832 if (!ha->fw_major_version)
6833 return;
ec7193e2
QT
6834 if (!ha->flags.fw_started)
6835 return;
18c6c127 6836
e315cd28 6837 ret = qla2x00_stop_firmware(vha);
7c7f1f29 6838 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
b469a7cb 6839 ret != QLA_INVALID_COMMAND && retries ; retries--) {
e315cd28
AC
6840 ha->isp_ops->reset_chip(vha);
6841 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
18c6c127 6842 continue;
e315cd28 6843 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
18c6c127 6844 continue;
7c3df132
SK
6845 ql_log(ql_log_info, vha, 0x8015,
6846 "Attempting retry of stop-firmware command.\n");
e315cd28 6847 ret = qla2x00_stop_firmware(vha);
18c6c127 6848 }
ec7193e2 6849
4b60c827 6850 QLA_FW_STOPPED(ha);
ec7193e2 6851 ha->flags.fw_init_done = 0;
18c6c127 6852}
2c3dfe3f
SJ
6853
6854int
e315cd28 6855qla24xx_configure_vhba(scsi_qla_host_t *vha)
2c3dfe3f
SJ
6856{
6857 int rval = QLA_SUCCESS;
0b91d116 6858 int rval2;
2c3dfe3f 6859 uint16_t mb[MAILBOX_REGISTER_COUNT];
e315cd28
AC
6860 struct qla_hw_data *ha = vha->hw;
6861 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
67c2e93a
AC
6862 struct req_que *req;
6863 struct rsp_que *rsp;
2c3dfe3f 6864
e315cd28 6865 if (!vha->vp_idx)
2c3dfe3f
SJ
6866 return -EINVAL;
6867
e315cd28 6868 rval = qla2x00_fw_ready(base_vha);
d7459527
MH
6869 if (vha->qpair)
6870 req = vha->qpair->req;
67c2e93a 6871 else
d7459527 6872 req = ha->req_q_map[0];
67c2e93a
AC
6873 rsp = req->rsp;
6874
2c3dfe3f 6875 if (rval == QLA_SUCCESS) {
e315cd28 6876 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
73208dfd 6877 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
2c3dfe3f
SJ
6878 }
6879
e315cd28 6880 vha->flags.management_server_logged_in = 0;
2c3dfe3f
SJ
6881
6882 /* Login to SNS first */
0b91d116
CD
6883 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
6884 BIT_1);
6885 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
6886 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
6887 ql_dbg(ql_dbg_init, vha, 0x0120,
6888 "Failed SNS login: loop_id=%x, rval2=%d\n",
6889 NPH_SNS, rval2);
6890 else
6891 ql_dbg(ql_dbg_init, vha, 0x0103,
6892 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
6893 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
6894 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
2c3dfe3f
SJ
6895 return (QLA_FUNCTION_FAILED);
6896 }
6897
e315cd28
AC
6898 atomic_set(&vha->loop_down_timer, 0);
6899 atomic_set(&vha->loop_state, LOOP_UP);
6900 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6901 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
6902 rval = qla2x00_loop_resync(base_vha);
2c3dfe3f
SJ
6903
6904 return rval;
6905}
4d4df193
HK
6906
6907/* 84XX Support **************************************************************/
6908
6909static LIST_HEAD(qla_cs84xx_list);
6910static DEFINE_MUTEX(qla_cs84xx_mutex);
6911
6912static struct qla_chip_state_84xx *
e315cd28 6913qla84xx_get_chip(struct scsi_qla_host *vha)
4d4df193
HK
6914{
6915 struct qla_chip_state_84xx *cs84xx;
e315cd28 6916 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
6917
6918 mutex_lock(&qla_cs84xx_mutex);
6919
6920 /* Find any shared 84xx chip. */
6921 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
6922 if (cs84xx->bus == ha->pdev->bus) {
6923 kref_get(&cs84xx->kref);
6924 goto done;
6925 }
6926 }
6927
6928 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
6929 if (!cs84xx)
6930 goto done;
6931
6932 kref_init(&cs84xx->kref);
6933 spin_lock_init(&cs84xx->access_lock);
6934 mutex_init(&cs84xx->fw_update_mutex);
6935 cs84xx->bus = ha->pdev->bus;
6936
6937 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
6938done:
6939 mutex_unlock(&qla_cs84xx_mutex);
6940 return cs84xx;
6941}
6942
6943static void
6944__qla84xx_chip_release(struct kref *kref)
6945{
6946 struct qla_chip_state_84xx *cs84xx =
6947 container_of(kref, struct qla_chip_state_84xx, kref);
6948
6949 mutex_lock(&qla_cs84xx_mutex);
6950 list_del(&cs84xx->list);
6951 mutex_unlock(&qla_cs84xx_mutex);
6952 kfree(cs84xx);
6953}
6954
6955void
e315cd28 6956qla84xx_put_chip(struct scsi_qla_host *vha)
4d4df193 6957{
e315cd28 6958 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
6959 if (ha->cs84xx)
6960 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
6961}
6962
6963static int
e315cd28 6964qla84xx_init_chip(scsi_qla_host_t *vha)
4d4df193
HK
6965{
6966 int rval;
6967 uint16_t status[2];
e315cd28 6968 struct qla_hw_data *ha = vha->hw;
4d4df193
HK
6969
6970 mutex_lock(&ha->cs84xx->fw_update_mutex);
6971
e315cd28 6972 rval = qla84xx_verify_chip(vha, status);
4d4df193
HK
6973
6974 mutex_unlock(&ha->cs84xx->fw_update_mutex);
6975
6976 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
6977 QLA_SUCCESS;
6978}
3a03eb79
AV
6979
6980/* 81XX Support **************************************************************/
6981
6982int
6983qla81xx_nvram_config(scsi_qla_host_t *vha)
6984{
6985 int rval;
6986 struct init_cb_81xx *icb;
6987 struct nvram_81xx *nv;
6988 uint32_t *dptr;
6989 uint8_t *dptr1, *dptr2;
6990 uint32_t chksum;
6991 uint16_t cnt;
6992 struct qla_hw_data *ha = vha->hw;
6993
6994 rval = QLA_SUCCESS;
6995 icb = (struct init_cb_81xx *)ha->init_cb;
6996 nv = ha->nvram;
6997
6998 /* Determine NVRAM starting address. */
6999 ha->nvram_size = sizeof(struct nvram_81xx);
3a03eb79 7000 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7ec0effd
AD
7001 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
7002 ha->vpd_size = FA_VPD_SIZE_82XX;
3a03eb79
AV
7003
7004 /* Get VPD data into cache */
7005 ha->vpd = ha->nvram + VPD_OFFSET;
3d79038f
AV
7006 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
7007 ha->vpd_size);
3a03eb79
AV
7008
7009 /* Get NVRAM data into cache and calculate checksum. */
3d79038f 7010 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
3a03eb79 7011 ha->nvram_size);
3d79038f 7012 dptr = (uint32_t *)nv;
da08ef5c
JC
7013 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7014 chksum += le32_to_cpu(*dptr);
3a03eb79 7015
7c3df132
SK
7016 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
7017 "Contents of NVRAM:\n");
7018 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
7019 (uint8_t *)nv, ha->nvram_size);
3a03eb79
AV
7020
7021 /* Bad NVRAM data, set defaults parameters. */
7022 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
7023 || nv->id[3] != ' ' ||
ad950360 7024 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
3a03eb79 7025 /* Reset NVRAM data. */
7c3df132 7026 ql_log(ql_log_info, vha, 0x0073,
9e336520 7027 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7c3df132 7028 "version=0x%x.\n", chksum, nv->id[0],
3a03eb79 7029 le16_to_cpu(nv->nvram_version));
7c3df132
SK
7030 ql_log(ql_log_info, vha, 0x0074,
7031 "Falling back to functioning (yet invalid -- WWPN) "
7032 "defaults.\n");
3a03eb79
AV
7033
7034 /*
7035 * Set default initialization control block.
7036 */
7037 memset(nv, 0, ha->nvram_size);
ad950360
BVA
7038 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7039 nv->version = cpu_to_le16(ICB_VERSION);
98aee70d 7040 nv->frame_payload_size = 2048;
ad950360
BVA
7041 nv->execution_throttle = cpu_to_le16(0xFFFF);
7042 nv->exchange_count = cpu_to_le16(0);
3a03eb79 7043 nv->port_name[0] = 0x21;
f73cb695 7044 nv->port_name[1] = 0x00 + ha->port_no + 1;
3a03eb79
AV
7045 nv->port_name[2] = 0x00;
7046 nv->port_name[3] = 0xe0;
7047 nv->port_name[4] = 0x8b;
7048 nv->port_name[5] = 0x1c;
7049 nv->port_name[6] = 0x55;
7050 nv->port_name[7] = 0x86;
7051 nv->node_name[0] = 0x20;
7052 nv->node_name[1] = 0x00;
7053 nv->node_name[2] = 0x00;
7054 nv->node_name[3] = 0xe0;
7055 nv->node_name[4] = 0x8b;
7056 nv->node_name[5] = 0x1c;
7057 nv->node_name[6] = 0x55;
7058 nv->node_name[7] = 0x86;
ad950360
BVA
7059 nv->login_retry_count = cpu_to_le16(8);
7060 nv->interrupt_delay_timer = cpu_to_le16(0);
7061 nv->login_timeout = cpu_to_le16(0);
3a03eb79 7062 nv->firmware_options_1 =
ad950360
BVA
7063 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7064 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7065 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7066 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7067 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7068 nv->efi_parameters = cpu_to_le32(0);
3a03eb79 7069 nv->reset_delay = 5;
ad950360
BVA
7070 nv->max_luns_per_target = cpu_to_le16(128);
7071 nv->port_down_retry_count = cpu_to_le16(30);
7072 nv->link_down_timeout = cpu_to_le16(180);
eeebcc92 7073 nv->enode_mac[0] = 0x00;
6246b8a1
GM
7074 nv->enode_mac[1] = 0xC0;
7075 nv->enode_mac[2] = 0xDD;
3a03eb79
AV
7076 nv->enode_mac[3] = 0x04;
7077 nv->enode_mac[4] = 0x05;
f73cb695 7078 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
7079
7080 rval = 1;
7081 }
7082
9e522cd8
AE
7083 if (IS_T10_PI_CAPABLE(ha))
7084 nv->frame_payload_size &= ~7;
7085
aa230bc5
AE
7086 qlt_81xx_config_nvram_stage1(vha, nv);
7087
3a03eb79 7088 /* Reset Initialization control block */
773120e4 7089 memset(icb, 0, ha->init_cb_size);
3a03eb79
AV
7090
7091 /* Copy 1st segment. */
7092 dptr1 = (uint8_t *)icb;
7093 dptr2 = (uint8_t *)&nv->version;
7094 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7095 while (cnt--)
7096 *dptr1++ = *dptr2++;
7097
7098 icb->login_retry_count = nv->login_retry_count;
7099
7100 /* Copy 2nd segment. */
7101 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7102 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7103 cnt = (uint8_t *)&icb->reserved_5 -
7104 (uint8_t *)&icb->interrupt_delay_timer;
7105 while (cnt--)
7106 *dptr1++ = *dptr2++;
7107
7108 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
7109 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
7110 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
69e5f1ea
AV
7111 icb->enode_mac[0] = 0x00;
7112 icb->enode_mac[1] = 0xC0;
7113 icb->enode_mac[2] = 0xDD;
3a03eb79
AV
7114 icb->enode_mac[3] = 0x04;
7115 icb->enode_mac[4] = 0x05;
f73cb695 7116 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
3a03eb79
AV
7117 }
7118
b64b0e8f
AV
7119 /* Use extended-initialization control block. */
7120 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
7121
3a03eb79
AV
7122 /*
7123 * Setup driver NVRAM options.
7124 */
7125 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
a9083016 7126 "QLE8XXX");
3a03eb79 7127
aa230bc5
AE
7128 qlt_81xx_config_nvram_stage2(vha, icb);
7129
3a03eb79 7130 /* Use alternate WWN? */
ad950360 7131 if (nv->host_p & cpu_to_le32(BIT_15)) {
3a03eb79
AV
7132 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7133 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7134 }
7135
7136 /* Prepare nodename */
ad950360 7137 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
3a03eb79
AV
7138 /*
7139 * Firmware will apply the following mask if the nodename was
7140 * not provided.
7141 */
7142 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7143 icb->node_name[0] &= 0xF0;
7144 }
7145
7146 /* Set host adapter parameters. */
7147 ha->flags.disable_risc_code_load = 0;
7148 ha->flags.enable_lip_reset = 0;
7149 ha->flags.enable_lip_full_login =
7150 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
7151 ha->flags.enable_target_reset =
7152 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
7153 ha->flags.enable_led_scheme = 0;
7154 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
7155
7156 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7157 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7158
7159 /* save HBA serial number */
7160 ha->serial0 = icb->port_name[5];
7161 ha->serial1 = icb->port_name[6];
7162 ha->serial2 = icb->port_name[7];
7163 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7164 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7165
ad950360 7166 icb->execution_throttle = cpu_to_le16(0xFFFF);
3a03eb79
AV
7167
7168 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7169
7170 /* Set minimum login_timeout to 4 seconds. */
7171 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7172 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7173 if (le16_to_cpu(nv->login_timeout) < 4)
ad950360 7174 nv->login_timeout = cpu_to_le16(4);
3a03eb79 7175 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3a03eb79
AV
7176
7177 /* Set minimum RATOV to 100 tenths of a second. */
7178 ha->r_a_tov = 100;
7179
7180 ha->loop_reset_delay = nv->reset_delay;
7181
7182 /* Link Down Timeout = 0:
7183 *
7ec0effd 7184 * When Port Down timer expires we will start returning
3a03eb79
AV
7185 * I/O's to OS with "DID_NO_CONNECT".
7186 *
7187 * Link Down Timeout != 0:
7188 *
7189 * The driver waits for the link to come up after link down
7190 * before returning I/Os to OS with "DID_NO_CONNECT".
7191 */
7192 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7193 ha->loop_down_abort_time =
7194 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7195 } else {
7196 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7197 ha->loop_down_abort_time =
7198 (LOOP_DOWN_TIME - ha->link_down_timeout);
7199 }
7200
7201 /* Need enough time to try and get the port back. */
7202 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7203 if (qlport_down_retry)
7204 ha->port_down_retry_count = qlport_down_retry;
7205
7206 /* Set login_retry_count */
7207 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7208 if (ha->port_down_retry_count ==
7209 le16_to_cpu(nv->port_down_retry_count) &&
7210 ha->port_down_retry_count > 3)
7211 ha->login_retry_count = ha->port_down_retry_count;
7212 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7213 ha->login_retry_count = ha->port_down_retry_count;
7214 if (ql2xloginretrycount)
7215 ha->login_retry_count = ql2xloginretrycount;
7216
6246b8a1 7217 /* if not running MSI-X we need handshaking on interrupts */
f73cb695 7218 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
ad950360 7219 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
6246b8a1 7220
3a03eb79
AV
7221 /* Enable ZIO. */
7222 if (!vha->flags.init_done) {
7223 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7224 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7225 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7226 le16_to_cpu(icb->interrupt_delay_timer): 2;
7227 }
ad950360 7228 icb->firmware_options_2 &= cpu_to_le32(
3a03eb79
AV
7229 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7230 vha->flags.process_response_queue = 0;
7231 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7232 ha->zio_mode = QLA_ZIO_MODE_6;
7233
7c3df132 7234 ql_log(ql_log_info, vha, 0x0075,
3a03eb79 7235 "ZIO mode %d enabled; timer delay (%d us).\n",
7c3df132
SK
7236 ha->zio_mode,
7237 ha->zio_timer * 100);
3a03eb79
AV
7238
7239 icb->firmware_options_2 |= cpu_to_le32(
7240 (uint32_t)ha->zio_mode);
7241 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7242 vha->flags.process_response_queue = 1;
7243 }
7244
41dc529a
QT
7245 /* enable RIDA Format2 */
7246 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7247 icb->firmware_options_3 |= BIT_0;
7248
3a03eb79 7249 if (rval) {
7c3df132
SK
7250 ql_log(ql_log_warn, vha, 0x0076,
7251 "NVRAM configuration failed.\n");
3a03eb79
AV
7252 }
7253 return (rval);
7254}
7255
a9083016
GM
7256int
7257qla82xx_restart_isp(scsi_qla_host_t *vha)
7258{
7259 int status, rval;
a9083016
GM
7260 struct qla_hw_data *ha = vha->hw;
7261 struct req_que *req = ha->req_q_map[0];
7262 struct rsp_que *rsp = ha->rsp_q_map[0];
7263 struct scsi_qla_host *vp;
feafb7b1 7264 unsigned long flags;
a9083016
GM
7265
7266 status = qla2x00_init_rings(vha);
7267 if (!status) {
7268 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7269 ha->flags.chip_reset_done = 1;
7270
7271 status = qla2x00_fw_ready(vha);
7272 if (!status) {
a9083016
GM
7273 /* Issue a marker after FW becomes ready. */
7274 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
a9083016 7275 vha->flags.online = 1;
7108b76e 7276 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
a9083016
GM
7277 }
7278
7279 /* if no cable then assume it's good */
7280 if ((vha->device_flags & DFLG_NO_CABLE))
7281 status = 0;
a9083016
GM
7282 }
7283
7284 if (!status) {
7285 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7286
7287 if (!atomic_read(&vha->loop_down_timer)) {
7288 /*
7289 * Issue marker command only when we are going
7290 * to start the I/O .
7291 */
7292 vha->marker_needed = 1;
7293 }
7294
a9083016
GM
7295 ha->isp_ops->enable_intrs(ha);
7296
7297 ha->isp_abort_cnt = 0;
7298 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7299
53296788 7300 /* Update the firmware version */
3173167f 7301 status = qla82xx_check_md_needed(vha);
53296788 7302
a9083016
GM
7303 if (ha->fce) {
7304 ha->flags.fce_enabled = 1;
7305 memset(ha->fce, 0,
7306 fce_calc_size(ha->fce_bufs));
7307 rval = qla2x00_enable_fce_trace(vha,
7308 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7309 &ha->fce_bufs);
7310 if (rval) {
cfb0919c 7311 ql_log(ql_log_warn, vha, 0x8001,
7c3df132
SK
7312 "Unable to reinitialize FCE (%d).\n",
7313 rval);
a9083016
GM
7314 ha->flags.fce_enabled = 0;
7315 }
7316 }
7317
7318 if (ha->eft) {
7319 memset(ha->eft, 0, EFT_SIZE);
7320 rval = qla2x00_enable_eft_trace(vha,
7321 ha->eft_dma, EFT_NUM_BUFFERS);
7322 if (rval) {
cfb0919c 7323 ql_log(ql_log_warn, vha, 0x8010,
7c3df132
SK
7324 "Unable to reinitialize EFT (%d).\n",
7325 rval);
a9083016
GM
7326 }
7327 }
a9083016
GM
7328 }
7329
7330 if (!status) {
cfb0919c 7331 ql_dbg(ql_dbg_taskm, vha, 0x8011,
7c3df132 7332 "qla82xx_restart_isp succeeded.\n");
feafb7b1
AE
7333
7334 spin_lock_irqsave(&ha->vport_slock, flags);
7335 list_for_each_entry(vp, &ha->vp_list, list) {
7336 if (vp->vp_idx) {
7337 atomic_inc(&vp->vref_count);
7338 spin_unlock_irqrestore(&ha->vport_slock, flags);
7339
a9083016 7340 qla2x00_vp_abort_isp(vp);
feafb7b1
AE
7341
7342 spin_lock_irqsave(&ha->vport_slock, flags);
7343 atomic_dec(&vp->vref_count);
7344 }
a9083016 7345 }
feafb7b1
AE
7346 spin_unlock_irqrestore(&ha->vport_slock, flags);
7347
a9083016 7348 } else {
cfb0919c 7349 ql_log(ql_log_warn, vha, 0x8016,
7c3df132 7350 "qla82xx_restart_isp **** FAILED ****.\n");
a9083016
GM
7351 }
7352
7353 return status;
7354}
7355
3a03eb79 7356void
ae97c91e 7357qla81xx_update_fw_options(scsi_qla_host_t *vha)
3a03eb79 7358{
ae97c91e
AV
7359 struct qla_hw_data *ha = vha->hw;
7360
f198cafa
HM
7361 /* Hold status IOCBs until ABTS response received. */
7362 if (ql2xfwholdabts)
7363 ha->fw_options[3] |= BIT_12;
7364
088d09d4
GM
7365 /* Set Retry FLOGI in case of P2P connection */
7366 if (ha->operating_mode == P2P) {
7367 ha->fw_options[2] |= BIT_3;
7368 ql_dbg(ql_dbg_disc, vha, 0x2103,
7369 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
7370 __func__, ha->fw_options[2]);
7371 }
7372
41dc529a
QT
7373 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
7374 if (ql2xmvasynctoatio) {
7375 if (qla_tgt_mode_enabled(vha) ||
7376 qla_dual_mode_enabled(vha))
7377 ha->fw_options[2] |= BIT_11;
7378 else
7379 ha->fw_options[2] &= ~BIT_11;
7380 }
7381
f7e761f5 7382 if (qla_tgt_mode_enabled(vha) ||
2da52737
QT
7383 qla_dual_mode_enabled(vha)) {
7384 /* FW auto send SCSI status during */
7385 ha->fw_options[1] |= BIT_8;
7386 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
7387
7388 /* FW perform Exchange validation */
f7e761f5 7389 ha->fw_options[2] |= BIT_4;
2da52737
QT
7390 } else {
7391 ha->fw_options[1] &= ~BIT_8;
7392 ha->fw_options[10] &= 0x00ff;
7393
f7e761f5 7394 ha->fw_options[2] &= ~BIT_4;
2da52737 7395 }
f7e761f5 7396
41dc529a
QT
7397 if (ql2xetsenable) {
7398 /* Enable ETS Burst. */
7399 memset(ha->fw_options, 0, sizeof(ha->fw_options));
7400 ha->fw_options[2] |= BIT_9;
7401 }
7402
83548fe2
QT
7403 ql_dbg(ql_dbg_init, vha, 0x00e9,
7404 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
7405 __func__, ha->fw_options[1], ha->fw_options[2],
7406 ha->fw_options[3], vha->host->active_mode);
ae97c91e 7407
ae97c91e 7408 qla2x00_set_fw_options(vha, ha->fw_options);
3a03eb79 7409}
09ff701a
SR
7410
7411/*
7412 * qla24xx_get_fcp_prio
7413 * Gets the fcp cmd priority value for the logged in port.
7414 * Looks for a match of the port descriptors within
7415 * each of the fcp prio config entries. If a match is found,
7416 * the tag (priority) value is returned.
7417 *
7418 * Input:
21090cbe 7419 * vha = scsi host structure pointer.
09ff701a
SR
7420 * fcport = port structure pointer.
7421 *
7422 * Return:
6c452a45 7423 * non-zero (if found)
f28a0a96 7424 * -1 (if not found)
09ff701a
SR
7425 *
7426 * Context:
7427 * Kernel context
7428 */
f28a0a96 7429static int
09ff701a
SR
7430qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
7431{
7432 int i, entries;
7433 uint8_t pid_match, wwn_match;
f28a0a96 7434 int priority;
09ff701a
SR
7435 uint32_t pid1, pid2;
7436 uint64_t wwn1, wwn2;
7437 struct qla_fcp_prio_entry *pri_entry;
7438 struct qla_hw_data *ha = vha->hw;
7439
7440 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
f28a0a96 7441 return -1;
09ff701a 7442
f28a0a96 7443 priority = -1;
09ff701a
SR
7444 entries = ha->fcp_prio_cfg->num_entries;
7445 pri_entry = &ha->fcp_prio_cfg->entry[0];
7446
7447 for (i = 0; i < entries; i++) {
7448 pid_match = wwn_match = 0;
7449
7450 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
7451 pri_entry++;
7452 continue;
7453 }
7454
7455 /* check source pid for a match */
7456 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
7457 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
7458 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
7459 if (pid1 == INVALID_PORT_ID)
7460 pid_match++;
7461 else if (pid1 == pid2)
7462 pid_match++;
7463 }
7464
7465 /* check destination pid for a match */
7466 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
7467 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
7468 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
7469 if (pid1 == INVALID_PORT_ID)
7470 pid_match++;
7471 else if (pid1 == pid2)
7472 pid_match++;
7473 }
7474
7475 /* check source WWN for a match */
7476 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
7477 wwn1 = wwn_to_u64(vha->port_name);
7478 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
7479 if (wwn2 == (uint64_t)-1)
7480 wwn_match++;
7481 else if (wwn1 == wwn2)
7482 wwn_match++;
7483 }
7484
7485 /* check destination WWN for a match */
7486 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
7487 wwn1 = wwn_to_u64(fcport->port_name);
7488 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
7489 if (wwn2 == (uint64_t)-1)
7490 wwn_match++;
7491 else if (wwn1 == wwn2)
7492 wwn_match++;
7493 }
7494
7495 if (pid_match == 2 || wwn_match == 2) {
7496 /* Found a matching entry */
7497 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
7498 priority = pri_entry->tag;
7499 break;
7500 }
7501
7502 pri_entry++;
7503 }
7504
7505 return priority;
7506}
7507
7508/*
7509 * qla24xx_update_fcport_fcp_prio
7510 * Activates fcp priority for the logged in fc port
7511 *
7512 * Input:
21090cbe 7513 * vha = scsi host structure pointer.
09ff701a
SR
7514 * fcp = port structure pointer.
7515 *
7516 * Return:
7517 * QLA_SUCCESS or QLA_FUNCTION_FAILED
7518 *
7519 * Context:
7520 * Kernel context.
7521 */
7522int
21090cbe 7523qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
09ff701a
SR
7524{
7525 int ret;
f28a0a96 7526 int priority;
09ff701a
SR
7527 uint16_t mb[5];
7528
21090cbe
MI
7529 if (fcport->port_type != FCT_TARGET ||
7530 fcport->loop_id == FC_NO_LOOP_ID)
09ff701a
SR
7531 return QLA_FUNCTION_FAILED;
7532
21090cbe 7533 priority = qla24xx_get_fcp_prio(vha, fcport);
f28a0a96
AV
7534 if (priority < 0)
7535 return QLA_FUNCTION_FAILED;
7536
7ec0effd 7537 if (IS_P3P_TYPE(vha->hw)) {
a00f6296
SK
7538 fcport->fcp_prio = priority & 0xf;
7539 return QLA_SUCCESS;
7540 }
7541
21090cbe 7542 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
cfb0919c
CD
7543 if (ret == QLA_SUCCESS) {
7544 if (fcport->fcp_prio != priority)
7545 ql_dbg(ql_dbg_user, vha, 0x709e,
7546 "Updated FCP_CMND priority - value=%d loop_id=%d "
7547 "port_id=%02x%02x%02x.\n", priority,
7548 fcport->loop_id, fcport->d_id.b.domain,
7549 fcport->d_id.b.area, fcport->d_id.b.al_pa);
a00f6296 7550 fcport->fcp_prio = priority & 0xf;
cfb0919c 7551 } else
7c3df132 7552 ql_dbg(ql_dbg_user, vha, 0x704f,
cfb0919c
CD
7553 "Unable to update FCP_CMND priority - ret=0x%x for "
7554 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
7555 fcport->d_id.b.domain, fcport->d_id.b.area,
7556 fcport->d_id.b.al_pa);
09ff701a
SR
7557 return ret;
7558}
7559
7560/*
7561 * qla24xx_update_all_fcp_prio
7562 * Activates fcp priority for all the logged in ports
7563 *
7564 * Input:
7565 * ha = adapter block pointer.
7566 *
7567 * Return:
7568 * QLA_SUCCESS or QLA_FUNCTION_FAILED
7569 *
7570 * Context:
7571 * Kernel context.
7572 */
7573int
7574qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
7575{
7576 int ret;
7577 fc_port_t *fcport;
7578
7579 ret = QLA_FUNCTION_FAILED;
7580 /* We need to set priority for all logged in ports */
7581 list_for_each_entry(fcport, &vha->vp_fcports, list)
7582 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
7583
7584 return ret;
7585}
d7459527 7586
82de802a
QT
7587struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
7588 int vp_idx, bool startqp)
d7459527
MH
7589{
7590 int rsp_id = 0;
7591 int req_id = 0;
7592 int i;
7593 struct qla_hw_data *ha = vha->hw;
7594 uint16_t qpair_id = 0;
7595 struct qla_qpair *qpair = NULL;
7596 struct qla_msix_entry *msix;
7597
7598 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
7599 ql_log(ql_log_warn, vha, 0x00181,
7600 "FW/Driver is not multi-queue capable.\n");
7601 return NULL;
7602 }
7603
7604 if (ql2xmqsupport) {
7605 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
7606 if (qpair == NULL) {
7607 ql_log(ql_log_warn, vha, 0x0182,
7608 "Failed to allocate memory for queue pair.\n");
7609 return NULL;
7610 }
7611 memset(qpair, 0, sizeof(struct qla_qpair));
7612
7613 qpair->hw = vha->hw;
25ff6af1 7614 qpair->vha = vha;
82de802a
QT
7615 qpair->qp_lock_ptr = &qpair->qp_lock;
7616 spin_lock_init(&qpair->qp_lock);
d7459527
MH
7617
7618 /* Assign available que pair id */
7619 mutex_lock(&ha->mq_lock);
7620 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
b95b9452 7621 if (ha->num_qpairs >= ha->max_qpairs) {
d7459527
MH
7622 mutex_unlock(&ha->mq_lock);
7623 ql_log(ql_log_warn, vha, 0x0183,
7624 "No resources to create additional q pair.\n");
7625 goto fail_qid_map;
7626 }
b95b9452 7627 ha->num_qpairs++;
d7459527
MH
7628 set_bit(qpair_id, ha->qpair_qid_map);
7629 ha->queue_pair_map[qpair_id] = qpair;
7630 qpair->id = qpair_id;
7631 qpair->vp_idx = vp_idx;
e326d22a 7632 INIT_LIST_HEAD(&qpair->hints_list);
7c3f8fd1
QT
7633 qpair->chip_reset = ha->base_qpair->chip_reset;
7634 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
7635 qpair->enable_explicit_conf =
7636 ha->base_qpair->enable_explicit_conf;
d7459527
MH
7637
7638 for (i = 0; i < ha->msix_count; i++) {
093df737 7639 msix = &ha->msix_entries[i];
d7459527
MH
7640 if (msix->in_use)
7641 continue;
7642 qpair->msix = msix;
83548fe2 7643 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
d7459527
MH
7644 "Vector %x selected for qpair\n", msix->vector);
7645 break;
7646 }
7647 if (!qpair->msix) {
7648 ql_log(ql_log_warn, vha, 0x0184,
7649 "Out of MSI-X vectors!.\n");
7650 goto fail_msix;
7651 }
7652
7653 qpair->msix->in_use = 1;
7654 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
7655
7656 mutex_unlock(&ha->mq_lock);
7657
7658 /* Create response queue first */
82de802a 7659 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
d7459527
MH
7660 if (!rsp_id) {
7661 ql_log(ql_log_warn, vha, 0x0185,
7662 "Failed to create response queue.\n");
7663 goto fail_rsp;
7664 }
7665
7666 qpair->rsp = ha->rsp_q_map[rsp_id];
7667
7668 /* Create request queue */
82de802a
QT
7669 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
7670 startqp);
d7459527
MH
7671 if (!req_id) {
7672 ql_log(ql_log_warn, vha, 0x0186,
7673 "Failed to create request queue.\n");
7674 goto fail_req;
7675 }
7676
7677 qpair->req = ha->req_q_map[req_id];
7678 qpair->rsp->req = qpair->req;
82de802a 7679 qpair->rsp->qpair = qpair;
e326d22a
QT
7680 /* init qpair to this cpu. Will adjust at run time. */
7681 qla_cpu_update(qpair, smp_processor_id());
d7459527
MH
7682
7683 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
7684 if (ha->fw_attributes & BIT_4)
7685 qpair->difdix_supported = 1;
7686 }
7687
7688 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
7689 if (!qpair->srb_mempool) {
83548fe2 7690 ql_log(ql_log_warn, vha, 0xd036,
d7459527
MH
7691 "Failed to create srb mempool for qpair %d\n",
7692 qpair->id);
7693 goto fail_mempool;
7694 }
7695
7696 /* Mark as online */
7697 qpair->online = 1;
7698
7699 if (!vha->flags.qpairs_available)
7700 vha->flags.qpairs_available = 1;
7701
7702 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
7703 "Request/Response queue pair created, id %d\n",
7704 qpair->id);
7705 ql_dbg(ql_dbg_init, vha, 0x0187,
7706 "Request/Response queue pair created, id %d\n",
7707 qpair->id);
7708 }
7709 return qpair;
7710
7711fail_mempool:
7712fail_req:
7713 qla25xx_delete_rsp_que(vha, qpair->rsp);
7714fail_rsp:
7715 mutex_lock(&ha->mq_lock);
7716 qpair->msix->in_use = 0;
7717 list_del(&qpair->qp_list_elem);
7718 if (list_empty(&vha->qp_list))
7719 vha->flags.qpairs_available = 0;
7720fail_msix:
7721 ha->queue_pair_map[qpair_id] = NULL;
7722 clear_bit(qpair_id, ha->qpair_qid_map);
b95b9452 7723 ha->num_qpairs--;
d7459527
MH
7724 mutex_unlock(&ha->mq_lock);
7725fail_qid_map:
7726 kfree(qpair);
7727 return NULL;
7728}
7729
7730int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
7731{
d65237c7 7732 int ret = QLA_FUNCTION_FAILED;
d7459527
MH
7733 struct qla_hw_data *ha = qpair->hw;
7734
d65237c7
SC
7735 if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
7736 goto fail;
7737
d7459527
MH
7738 qpair->delete_in_progress = 1;
7739 while (atomic_read(&qpair->ref_count))
7740 msleep(500);
7741
7742 ret = qla25xx_delete_req_que(vha, qpair->req);
7743 if (ret != QLA_SUCCESS)
7744 goto fail;
7745 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
7746 if (ret != QLA_SUCCESS)
7747 goto fail;
7748
7749 mutex_lock(&ha->mq_lock);
7750 ha->queue_pair_map[qpair->id] = NULL;
7751 clear_bit(qpair->id, ha->qpair_qid_map);
b95b9452 7752 ha->num_qpairs--;
d7459527 7753 list_del(&qpair->qp_list_elem);
d65237c7 7754 if (list_empty(&vha->qp_list)) {
d7459527 7755 vha->flags.qpairs_available = 0;
d65237c7
SC
7756 vha->flags.qpairs_req_created = 0;
7757 vha->flags.qpairs_rsp_created = 0;
7758 }
d7459527
MH
7759 mempool_destroy(qpair->srb_mempool);
7760 kfree(qpair);
7761 mutex_unlock(&ha->mq_lock);
7762
7763 return QLA_SUCCESS;
7764fail:
7765 return ret;
7766}