[SCSI] initialize shost_data to zero
[linux-block.git] / drivers / scsi / lpfc / lpfc_hbadisc.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
9413afff 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/kthread.h>
25#include <linux/interrupt.h>
26
91886523 27#include <scsi/scsi.h>
dea3101e 28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
32#include "lpfc_hw.h"
33#include "lpfc_disc.h"
34#include "lpfc_sli.h"
35#include "lpfc_scsi.h"
36#include "lpfc.h"
37#include "lpfc_logmsg.h"
38#include "lpfc_crtn.h"
92d7f7b0 39#include "lpfc_vport.h"
858c9f6c 40#include "lpfc_debugfs.h"
dea3101e 41
42/* AlpaArray for assignment of scsid for scan-down and bind_method */
43static uint8_t lpfcAlpaArray[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57};
58
2e0fef85 59static void lpfc_disc_timeout_handler(struct lpfc_vport *);
dea3101e 60
c01f3208
JS
61void
62lpfc_terminate_rport_io(struct fc_rport *rport)
dea3101e 63{
c01f3208
JS
64 struct lpfc_rport_data *rdata;
65 struct lpfc_nodelist * ndlp;
66 struct lpfc_hba *phba;
dea3101e 67
c01f3208
JS
68 rdata = rport->dd_data;
69 ndlp = rdata->pnode;
70
71 if (!ndlp) {
72 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
73 printk(KERN_ERR "Cannot find remote node"
74 " to terminate I/O Data x%x\n",
75 rport->port_id);
dea3101e 76 return;
77 }
78
2e0fef85 79 phba = ndlp->vport->phba;
c01f3208 80
858c9f6c
JS
81 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
82 "rport terminate: sid:x%x did:x%x flg:x%x",
83 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
84
c01f3208
JS
85 if (ndlp->nlp_sid != NLP_NO_SID) {
86 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
87 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
88 }
c01f3208
JS
89
90 return;
91}
92
93/*
94 * This function will be called when dev_loss_tmo fire.
95 */
96void
97lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
98{
99 struct lpfc_rport_data *rdata;
100 struct lpfc_nodelist * ndlp;
2e0fef85 101 struct lpfc_vport *vport;
858c9f6c
JS
102 struct lpfc_hba *phba;
103 struct completion devloss_compl;
104 struct lpfc_work_evt *evtp;
c01f3208
JS
105
106 rdata = rport->dd_data;
107 ndlp = rdata->pnode;
1a169689 108
c01f3208 109 if (!ndlp) {
92d7f7b0 110 if (rport->scsi_target_id != -1) {
c01f3208 111 printk(KERN_ERR "Cannot find remote node"
92d7f7b0
JS
112 " for rport in dev_loss_tmo_callbk x%x\n",
113 rport->port_id);
114 }
c01f3208
JS
115 return;
116 }
117
858c9f6c
JS
118 vport = ndlp->vport;
119 phba = vport->phba;
120
121 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
122 "rport devlosscb: sid:x%x did:x%x flg:x%x",
123 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
124
125 init_completion(&devloss_compl);
126 evtp = &ndlp->dev_loss_evt;
127
128 if (!list_empty(&evtp->evt_listp))
129 return;
130
131 spin_lock_irq(&phba->hbalock);
132 evtp->evt_arg1 = ndlp;
133 evtp->evt_arg2 = &devloss_compl;
134 evtp->evt = LPFC_EVT_DEV_LOSS;
135 list_add_tail(&evtp->evt_listp, &phba->work_list);
136 if (phba->work_wait)
137 wake_up(phba->work_wait);
138
139 spin_unlock_irq(&phba->hbalock);
140
141 wait_for_completion(&devloss_compl);
142
143 return;
144}
145
146/*
147 * This function is called from the worker thread when dev_loss_tmo
148 * expire.
149 */
150void
151lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
152{
153 struct lpfc_rport_data *rdata;
154 struct fc_rport *rport;
155 struct lpfc_vport *vport;
156 struct lpfc_hba *phba;
157 uint8_t *name;
158 int warn_on = 0;
159
160 rport = ndlp->rport;
161
162 if (!rport)
163 return;
164
165 rdata = rport->dd_data;
166 name = (uint8_t *) &ndlp->nlp_portname;
167 vport = ndlp->vport;
168 phba = vport->phba;
169
170 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
171 "rport devlosstmo:did:x%x type:x%x id:x%x",
172 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
173
174 if (!(vport->load_flag & FC_UNLOADING) &&
175 ndlp->nlp_state == NLP_STE_MAPPED_NODE)
176 return;
177
92d7f7b0 178 if (ndlp->nlp_type & NLP_FABRIC) {
858c9f6c
JS
179 int put_node;
180 int put_rport;
181
92d7f7b0
JS
182 /* We will clean up these Nodes in linkup */
183 put_node = rdata->pnode != NULL;
184 put_rport = ndlp->rport != NULL;
185 rdata->pnode = NULL;
186 ndlp->rport = NULL;
187 if (put_node)
188 lpfc_nlp_put(ndlp);
189 if (put_rport)
190 put_device(&rport->dev);
82085718 191 return;
92d7f7b0 192 }
82085718 193
dea3101e 194 if (ndlp->nlp_sid != NLP_NO_SID) {
6e8215e4 195 warn_on = 1;
dea3101e 196 /* flush the target */
197 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
92d7f7b0 198 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
dea3101e 199 }
2e0fef85 200 if (vport->load_flag & FC_UNLOADING)
c01f3208
JS
201 warn_on = 0;
202
6e8215e4
JSEC
203 if (warn_on) {
204 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0 205 "%d (%d):0203 Devloss timeout on "
488d1469
JS
206 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
207 "NPort x%x Data: x%x x%x x%x\n",
92d7f7b0 208 phba->brd_no, vport->vpi,
488d1469
JS
209 *name, *(name+1), *(name+2), *(name+3),
210 *(name+4), *(name+5), *(name+6), *(name+7),
211 ndlp->nlp_DID, ndlp->nlp_flag,
6e8215e4
JSEC
212 ndlp->nlp_state, ndlp->nlp_rpi);
213 } else {
214 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
92d7f7b0 215 "%d (%d):0204 Devloss timeout on "
488d1469
JS
216 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
217 "NPort x%x Data: x%x x%x x%x\n",
92d7f7b0 218 phba->brd_no, vport->vpi,
488d1469
JS
219 *name, *(name+1), *(name+2), *(name+3),
220 *(name+4), *(name+5), *(name+6), *(name+7),
221 ndlp->nlp_DID, ndlp->nlp_flag,
6e8215e4
JSEC
222 ndlp->nlp_state, ndlp->nlp_rpi);
223 }
224
2e0fef85 225 if (!(vport->load_flag & FC_UNLOADING) &&
1dcb58e5 226 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
82085718
JS
227 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
228 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
2e0fef85 229 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
1dcb58e5 230 else {
858c9f6c
JS
231 int put_node;
232 int put_rport;
233
92d7f7b0
JS
234 put_node = rdata->pnode != NULL;
235 put_rport = ndlp->rport != NULL;
1dcb58e5
JS
236 rdata->pnode = NULL;
237 ndlp->rport = NULL;
92d7f7b0
JS
238 if (put_node)
239 lpfc_nlp_put(ndlp);
240 if (put_rport)
241 put_device(&rport->dev);
1dcb58e5 242 }
92d7f7b0 243}
c01f3208 244
92d7f7b0
JS
245
246void
247lpfc_worker_wake_up(struct lpfc_hba *phba)
248{
249 wake_up(phba->work_wait);
dea3101e 250 return;
251}
252
253static void
2e0fef85 254lpfc_work_list_done(struct lpfc_hba *phba)
dea3101e 255{
256 struct lpfc_work_evt *evtp = NULL;
257 struct lpfc_nodelist *ndlp;
92d7f7b0 258 struct lpfc_vport *vport;
dea3101e 259 int free_evt;
260
2e0fef85
JS
261 spin_lock_irq(&phba->hbalock);
262 while (!list_empty(&phba->work_list)) {
dea3101e 263 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
264 evt_listp);
2e0fef85 265 spin_unlock_irq(&phba->hbalock);
dea3101e 266 free_evt = 1;
2fe165b6 267 switch (evtp->evt) {
858c9f6c 268 case LPFC_EVT_DEV_LOSS_DELAY:
92d7f7b0
JS
269 free_evt = 0; /* evt is part of ndlp */
270 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
271 vport = ndlp->vport;
272 if (!vport)
273 break;
858c9f6c
JS
274
275 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
276 "rport devlossdly:did:x%x flg:x%x",
277 ndlp->nlp_DID, ndlp->nlp_flag, 0);
278
92d7f7b0
JS
279 if (!(vport->load_flag & FC_UNLOADING) &&
280 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
281 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
282 lpfc_disc_state_machine(vport, ndlp, NULL,
283 NLP_EVT_DEVICE_RM);
284 }
285 break;
dea3101e 286 case LPFC_EVT_ELS_RETRY:
2e0fef85 287 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
dea3101e 288 lpfc_els_retry_delay_handler(ndlp);
92d7f7b0 289 free_evt = 0; /* evt is part of ndlp */
dea3101e 290 break;
858c9f6c
JS
291 case LPFC_EVT_DEV_LOSS:
292 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
293 lpfc_nlp_get(ndlp);
294 lpfc_dev_loss_tmo_handler(ndlp);
295 free_evt = 0;
296 complete((struct completion *)(evtp->evt_arg2));
297 lpfc_nlp_put(ndlp);
298 break;
dea3101e 299 case LPFC_EVT_ONLINE:
2e0fef85
JS
300 if (phba->link_state < LPFC_LINK_DOWN)
301 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
41415862 302 else
2e0fef85 303 *(int *) (evtp->evt_arg1) = 0;
dea3101e 304 complete((struct completion *)(evtp->evt_arg2));
305 break;
46fa311e 306 case LPFC_EVT_OFFLINE_PREP:
2e0fef85 307 if (phba->link_state >= LPFC_LINK_DOWN)
46fa311e
JS
308 lpfc_offline_prep(phba);
309 *(int *)(evtp->evt_arg1) = 0;
310 complete((struct completion *)(evtp->evt_arg2));
311 break;
312 case LPFC_EVT_OFFLINE:
313 lpfc_offline(phba);
41415862
JW
314 lpfc_sli_brdrestart(phba);
315 *(int *)(evtp->evt_arg1) =
46fa311e
JS
316 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
317 lpfc_unblock_mgmt_io(phba);
41415862
JW
318 complete((struct completion *)(evtp->evt_arg2));
319 break;
320 case LPFC_EVT_WARM_START:
46fa311e 321 lpfc_offline(phba);
9290831f 322 lpfc_reset_barrier(phba);
41415862
JW
323 lpfc_sli_brdreset(phba);
324 lpfc_hba_down_post(phba);
325 *(int *)(evtp->evt_arg1) =
326 lpfc_sli_brdready(phba, HS_MBRDY);
46fa311e 327 lpfc_unblock_mgmt_io(phba);
41415862
JW
328 complete((struct completion *)(evtp->evt_arg2));
329 break;
330 case LPFC_EVT_KILL:
46fa311e 331 lpfc_offline(phba);
9290831f 332 *(int *)(evtp->evt_arg1)
2e0fef85
JS
333 = (phba->pport->stopped)
334 ? 0 : lpfc_sli_brdkill(phba);
46fa311e 335 lpfc_unblock_mgmt_io(phba);
dea3101e 336 complete((struct completion *)(evtp->evt_arg2));
337 break;
338 }
339 if (free_evt)
340 kfree(evtp);
2e0fef85 341 spin_lock_irq(&phba->hbalock);
dea3101e 342 }
2e0fef85 343 spin_unlock_irq(&phba->hbalock);
dea3101e 344
345}
346
858c9f6c 347void
2e0fef85 348lpfc_work_done(struct lpfc_hba *phba)
dea3101e 349{
350 struct lpfc_sli_ring *pring;
858c9f6c 351 uint32_t ha_copy, status, control, work_port_events;
2e0fef85 352 struct lpfc_vport *vport;
dea3101e 353
2e0fef85 354 spin_lock_irq(&phba->hbalock);
dea3101e 355 ha_copy = phba->work_ha;
356 phba->work_ha = 0;
2e0fef85 357 spin_unlock_irq(&phba->hbalock);
dea3101e 358
2fe165b6 359 if (ha_copy & HA_ERATT)
dea3101e 360 lpfc_handle_eratt(phba);
361
2fe165b6 362 if (ha_copy & HA_MBATT)
dea3101e 363 lpfc_sli_handle_mb_event(phba);
364
2fe165b6 365 if (ha_copy & HA_LATT)
dea3101e 366 lpfc_handle_latt(phba);
367
92d7f7b0
JS
368 spin_lock_irq(&phba->hbalock);
369 list_for_each_entry(vport, &phba->port_list, listentry) {
370 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 371
92d7f7b0
JS
372 if (!scsi_host_get(shost)) {
373 continue;
374 }
375 spin_unlock_irq(&phba->hbalock);
376 work_port_events = vport->work_port_events;
dea3101e 377
92d7f7b0
JS
378 if (work_port_events & WORKER_DISC_TMO)
379 lpfc_disc_timeout_handler(vport);
2e0fef85 380
92d7f7b0
JS
381 if (work_port_events & WORKER_ELS_TMO)
382 lpfc_els_timeout_handler(vport);
2e0fef85 383
858c9f6c
JS
384 if (work_port_events & WORKER_HB_TMO)
385 lpfc_hb_timeout_handler(phba);
386
92d7f7b0
JS
387 if (work_port_events & WORKER_MBOX_TMO)
388 lpfc_mbox_timeout_handler(phba);
dea3101e 389
92d7f7b0
JS
390 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
391 lpfc_unblock_fabric_iocbs(phba);
dea3101e 392
92d7f7b0
JS
393 if (work_port_events & WORKER_FDMI_TMO)
394 lpfc_fdmi_timeout_handler(vport);
395
396 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
397 lpfc_ramp_down_queue_handler(phba);
398
399 if (work_port_events & WORKER_RAMP_UP_QUEUE)
400 lpfc_ramp_up_queue_handler(phba);
401
402 spin_lock_irq(&vport->work_port_lock);
403 vport->work_port_events &= ~work_port_events;
404 spin_unlock_irq(&vport->work_port_lock);
405 scsi_host_put(shost);
406 spin_lock_irq(&phba->hbalock);
407 }
2e0fef85 408 spin_unlock_irq(&phba->hbalock);
dea3101e 409
858c9f6c
JS
410 pring = &phba->sli.ring[LPFC_ELS_RING];
411 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
412 status >>= (4*LPFC_ELS_RING);
413 if ((status & HA_RXMASK)
414 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
415 if (pring->flag & LPFC_STOP_IOCB_MASK) {
416 pring->flag |= LPFC_DEFERRED_RING_EVENT;
417 } else {
418 lpfc_sli_handle_slow_ring_event(phba, pring,
419 (status &
420 HA_RXMASK));
421 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
422 }
423 /*
424 * Turn on Ring interrupts
425 */
426 spin_lock_irq(&phba->hbalock);
427 control = readl(phba->HCregaddr);
428 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
429 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 430 writel(control, phba->HCregaddr);
431 readl(phba->HCregaddr); /* flush */
dea3101e 432 }
858c9f6c 433 spin_unlock_irq(&phba->hbalock);
dea3101e 434 }
2e0fef85 435 lpfc_work_list_done(phba);
dea3101e 436}
437
438static int
2e0fef85
JS
439check_work_wait_done(struct lpfc_hba *phba)
440{
92d7f7b0
JS
441 struct lpfc_vport *vport;
442 struct lpfc_sli_ring *pring;
858c9f6c 443 int rc = 0;
ed957684 444
2e0fef85 445 spin_lock_irq(&phba->hbalock);
92d7f7b0
JS
446 list_for_each_entry(vport, &phba->port_list, listentry) {
447 if (vport->work_port_events) {
448 rc = 1;
449 goto exit;
450 }
451 }
dea3101e 452
92d7f7b0
JS
453 if (phba->work_ha || (!list_empty(&phba->work_list)) ||
454 kthread_should_stop()) {
2e0fef85 455 rc = 1;
92d7f7b0
JS
456 goto exit;
457 }
858c9f6c
JS
458
459 pring = &phba->sli.ring[LPFC_ELS_RING];
460 if (pring->flag & LPFC_DEFERRED_RING_EVENT)
461 rc = 1;
92d7f7b0
JS
462exit:
463 if (rc)
464 phba->work_found++;
465 else
466 phba->work_found = 0;
2e0fef85
JS
467
468 spin_unlock_irq(&phba->hbalock);
469 return rc;
dea3101e 470}
471
92d7f7b0 472
dea3101e 473int
474lpfc_do_work(void *p)
475{
476 struct lpfc_hba *phba = p;
477 int rc;
7259f0d0 478 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
dea3101e 479
480 set_user_nice(current, -20);
481 phba->work_wait = &work_waitq;
92d7f7b0 482 phba->work_found = 0;
dea3101e 483
484 while (1) {
485
486 rc = wait_event_interruptible(work_waitq,
92d7f7b0
JS
487 check_work_wait_done(phba));
488
dea3101e 489 BUG_ON(rc);
490
491 if (kthread_should_stop())
492 break;
493
494 lpfc_work_done(phba);
495
92d7f7b0
JS
496 /* If there is alot of slow ring work, like during link up
497 * check_work_wait_done() may cause this thread to not give
498 * up the CPU for very long periods of time. This may cause
499 * soft lockups or other problems. To avoid these situations
500 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
501 * consecutive iterations.
502 */
503 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
504 phba->work_found = 0;
505 schedule();
506 }
dea3101e 507 }
508 phba->work_wait = NULL;
509 return 0;
510}
511
512/*
513 * This is only called to handle FC worker events. Since this a rare
514 * occurance, we allocate a struct lpfc_work_evt structure here instead of
515 * embedding it in the IOCB.
516 */
517int
2e0fef85 518lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
dea3101e 519 uint32_t evt)
520{
521 struct lpfc_work_evt *evtp;
ed957684 522 unsigned long flags;
dea3101e 523
524 /*
525 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
526 * be queued to worker thread for processing
527 */
92d7f7b0 528 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
dea3101e 529 if (!evtp)
530 return 0;
531
532 evtp->evt_arg1 = arg1;
533 evtp->evt_arg2 = arg2;
534 evtp->evt = evt;
535
ed957684 536 spin_lock_irqsave(&phba->hbalock, flags);
071fbd3d 537 list_add_tail(&evtp->evt_listp, &phba->work_list);
dea3101e 538 if (phba->work_wait)
92d7f7b0 539 lpfc_worker_wake_up(phba);
ed957684 540 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 541
542 return 1;
543}
544
92d7f7b0
JS
545void
546lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
547{
548 struct lpfc_hba *phba = vport->phba;
549 struct lpfc_nodelist *ndlp, *next_ndlp;
550 int rc;
551
552 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
553 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
554 continue;
555
556 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
557 lpfc_unreg_rpi(vport, ndlp);
558
559 /* Leave Fabric nodes alone on link down */
560 if (!remove && ndlp->nlp_type & NLP_FABRIC)
561 continue;
562 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
563 remove
564 ? NLP_EVT_DEVICE_RM
565 : NLP_EVT_DEVICE_RECOVERY);
566 }
567 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
568 lpfc_mbx_unreg_vpi(vport);
569 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
570 }
571}
572
573static void
574lpfc_linkdown_port(struct lpfc_vport *vport)
575{
576 struct lpfc_nodelist *ndlp, *next_ndlp;
577 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
578
579 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
580
858c9f6c
JS
581 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
582 "Link Down: state:x%x rtry:x%x flg:x%x",
583 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
584
92d7f7b0
JS
585 /* Cleanup any outstanding RSCN activity */
586 lpfc_els_flush_rscn(vport);
587
588 /* Cleanup any outstanding ELS commands */
589 lpfc_els_flush_cmd(vport);
590
591 lpfc_cleanup_rpis(vport, 0);
592
593 /* free any ndlp's on unused list */
594 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
595 /* free any ndlp's in unused state */
596 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
597 lpfc_drop_node(vport, ndlp);
598
599 /* Turn off discovery timer if its running */
600 lpfc_can_disctmo(vport);
601}
602
dea3101e 603int
685f0bf7 604lpfc_linkdown(struct lpfc_hba *phba)
dea3101e 605{
2e0fef85
JS
606 struct lpfc_vport *vport = phba->pport;
607 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0 608 struct lpfc_vport *port_iterator;
685f0bf7 609 LPFC_MBOXQ_t *mb;
dea3101e 610
2e0fef85
JS
611 if (phba->link_state == LPFC_LINK_DOWN) {
612 return 0;
c9f8735b 613 }
2e0fef85 614 spin_lock_irq(&phba->hbalock);
92d7f7b0 615 if (phba->link_state > LPFC_LINK_DOWN) {
2e0fef85 616 phba->link_state = LPFC_LINK_DOWN;
92d7f7b0
JS
617 phba->pport->fc_flag &= ~FC_LBIT;
618 }
2e0fef85 619 spin_unlock_irq(&phba->hbalock);
dea3101e 620
92d7f7b0
JS
621 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
622
623 /* Issue a LINK DOWN event to all nodes */
624 lpfc_linkdown_port(port_iterator);
625 }
d2873e4c 626
dea3101e 627 /* Clean up any firmware default rpi's */
2e0fef85
JS
628 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
629 if (mb) {
92d7f7b0 630 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
ed957684 631 mb->vport = vport;
2e0fef85 632 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
dea3101e 633 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
634 == MBX_NOT_FINISHED) {
2e0fef85 635 mempool_free(mb, phba->mbox_mem_pool);
dea3101e 636 }
637 }
638
dea3101e 639 /* Setup myDID for link up if we are in pt2pt mode */
92d7f7b0
JS
640 if (phba->pport->fc_flag & FC_PT2PT) {
641 phba->pport->fc_myDID = 0;
2e0fef85
JS
642 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 if (mb) {
dea3101e 644 lpfc_config_link(phba, mb);
92d7f7b0 645 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
ed957684 646 mb->vport = vport;
2e0fef85
JS
647 if (lpfc_sli_issue_mbox(phba, mb,
648 (MBX_NOWAIT | MBX_STOP_IOCB))
dea3101e 649 == MBX_NOT_FINISHED) {
2e0fef85 650 mempool_free(mb, phba->mbox_mem_pool);
dea3101e 651 }
652 }
2e0fef85 653 spin_lock_irq(shost->host_lock);
92d7f7b0 654 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
2e0fef85 655 spin_unlock_irq(shost->host_lock);
dea3101e 656 }
2e0fef85 657
92d7f7b0
JS
658 return 0;
659}
dea3101e 660
92d7f7b0
JS
661static void
662lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
663{
664 struct lpfc_nodelist *ndlp;
dea3101e 665
92d7f7b0
JS
666 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
667 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
668 continue;
669
670 if (ndlp->nlp_type & NLP_FABRIC) {
671 /* On Linkup its safe to clean up the ndlp
672 * from Fabric connections.
673 */
674 if (ndlp->nlp_DID != Fabric_DID)
675 lpfc_unreg_rpi(vport, ndlp);
676 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
677 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
678 /* Fail outstanding IO now since device is
679 * marked for PLOGI.
680 */
681 lpfc_unreg_rpi(vport, ndlp);
682 }
683 }
dea3101e 684}
685
92d7f7b0
JS
686static void
687lpfc_linkup_port(struct lpfc_vport *vport)
dea3101e 688{
92d7f7b0 689 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 690 struct lpfc_nodelist *ndlp, *next_ndlp;
92d7f7b0
JS
691 struct lpfc_hba *phba = vport->phba;
692
693 if ((vport->load_flag & FC_UNLOADING) != 0)
694 return;
695
858c9f6c
JS
696 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
697 "Link Up: top:x%x speed:x%x flg:x%x",
698 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
699
92d7f7b0
JS
700 /* If NPIV is not enabled, only bring the physical port up */
701 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
702 (vport != phba->pport))
703 return;
dea3101e 704
2e0fef85 705 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
d2873e4c 706
2e0fef85 707 spin_lock_irq(shost->host_lock);
2e0fef85
JS
708 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
709 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
710 vport->fc_flag |= FC_NDISC_ACTIVE;
711 vport->fc_ns_retry = 0;
712 spin_unlock_irq(shost->host_lock);
dea3101e 713
92d7f7b0
JS
714 if (vport->fc_flag & FC_LBIT)
715 lpfc_linkup_cleanup_nodes(vport);
dea3101e 716
92d7f7b0 717 /* free any ndlp's in unused state */
2e0fef85 718 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
92d7f7b0 719 nlp_listp)
685f0bf7 720 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2e0fef85 721 lpfc_drop_node(vport, ndlp);
92d7f7b0
JS
722}
723
724static int
725lpfc_linkup(struct lpfc_hba *phba)
726{
727 struct lpfc_vport *vport;
728
729 phba->link_state = LPFC_LINK_UP;
730
731 /* Unblock fabric iocbs if they are blocked */
732 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
733 del_timer_sync(&phba->fabric_block_timer);
734
735 list_for_each_entry(vport, &phba->port_list, listentry) {
736 lpfc_linkup_port(vport);
685f0bf7 737 }
92d7f7b0
JS
738 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
739 lpfc_issue_clear_la(phba, phba->pport);
dea3101e 740
741 return 0;
742}
743
744/*
745 * This routine handles processing a CLEAR_LA mailbox
746 * command upon completion. It is setup in the LPFC_MBOXQ
747 * as the completion routine when the command is
748 * handed off to the SLI layer.
749 */
750void
2e0fef85 751lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 752{
2e0fef85
JS
753 struct lpfc_vport *vport = pmb->vport;
754 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
755 struct lpfc_sli *psli = &phba->sli;
756 MAILBOX_t *mb = &pmb->mb;
dea3101e 757 uint32_t control;
758
dea3101e 759 /* Since we don't do discovery right now, turn these off here */
a4bc3379 760 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
dea3101e 761 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
762 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
763
764 /* Check for error */
765 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
92d7f7b0 766 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
dea3101e 767 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
92d7f7b0 768 "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
dea3101e 769 "state x%x\n",
92d7f7b0
JS
770 phba->brd_no, vport->vpi, mb->mbxStatus,
771 vport->port_state);
dea3101e 772
2e0fef85 773 phba->link_state = LPFC_HBA_ERROR;
dea3101e 774 goto out;
775 }
776
92d7f7b0
JS
777 if (vport->port_type == LPFC_PHYSICAL_PORT)
778 phba->link_state = LPFC_HBA_READY;
779
780 spin_lock_irq(&phba->hbalock);
781 psli->sli_flag |= LPFC_PROCESS_LA;
782 control = readl(phba->HCregaddr);
783 control |= HC_LAINT_ENA;
784 writel(control, phba->HCregaddr);
785 readl(phba->HCregaddr); /* flush */
786 spin_unlock_irq(&phba->hbalock);
787 return;
dea3101e 788
2e0fef85
JS
789 vport->num_disc_nodes = 0;
790 /* go thru NPR nodes and issue ELS PLOGIs */
791 if (vport->fc_npr_cnt)
792 lpfc_els_disc_plogi(vport);
dea3101e 793
2e0fef85
JS
794 if (!vport->num_disc_nodes) {
795 spin_lock_irq(shost->host_lock);
796 vport->fc_flag &= ~FC_NDISC_ACTIVE;
797 spin_unlock_irq(shost->host_lock);
dea3101e 798 }
799
2e0fef85 800 vport->port_state = LPFC_VPORT_READY;
dea3101e 801
802out:
803 /* Device Discovery completes */
ed957684 804 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
92d7f7b0
JS
805 "%d (%d):0225 Device Discovery completes\n",
806 phba->brd_no, vport->vpi);
dea3101e 807
2e0fef85 808 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 809
2e0fef85
JS
810 spin_lock_irq(shost->host_lock);
811 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
812 spin_unlock_irq(shost->host_lock);
dea3101e 813
814 del_timer_sync(&phba->fc_estabtmo);
815
2e0fef85 816 lpfc_can_disctmo(vport);
dea3101e 817
818 /* turn on Link Attention interrupts */
2e0fef85
JS
819
820 spin_lock_irq(&phba->hbalock);
dea3101e 821 psli->sli_flag |= LPFC_PROCESS_LA;
822 control = readl(phba->HCregaddr);
823 control |= HC_LAINT_ENA;
824 writel(control, phba->HCregaddr);
825 readl(phba->HCregaddr); /* flush */
2e0fef85 826 spin_unlock_irq(&phba->hbalock);
dea3101e 827
828 return;
829}
830
2e0fef85 831
dea3101e 832static void
25594c6b 833lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 834{
2e0fef85 835 struct lpfc_vport *vport = pmb->vport;
dea3101e 836
25594c6b 837 if (pmb->mb.mbxStatus)
dea3101e 838 goto out;
dea3101e 839
25594c6b
JW
840 mempool_free(pmb, phba->mbox_mem_pool);
841
842 if (phba->fc_topology == TOPOLOGY_LOOP &&
2e0fef85
JS
843 vport->fc_flag & FC_PUBLIC_LOOP &&
844 !(vport->fc_flag & FC_LBIT)) {
25594c6b 845 /* Need to wait for FAN - use discovery timer
2e0fef85 846 * for timeout. port_state is identically
25594c6b
JW
847 * LPFC_LOCAL_CFG_LINK while waiting for FAN
848 */
2e0fef85 849 lpfc_set_disctmo(vport);
25594c6b 850 return;
92d7f7b0 851 }
dea3101e 852
2e0fef85 853 /* Start discovery by sending a FLOGI. port_state is identically
25594c6b
JW
854 * LPFC_FLOGI while waiting for FLOGI cmpl
855 */
92d7f7b0
JS
856 if (vport->port_state != LPFC_FLOGI) {
857 vport->port_state = LPFC_FLOGI;
858 lpfc_set_disctmo(vport);
859 lpfc_initial_flogi(vport);
860 }
25594c6b 861 return;
dea3101e 862
863out:
25594c6b 864 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
92d7f7b0 865 "%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
25594c6b 866 "HBA state x%x\n",
92d7f7b0
JS
867 phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
868 vport->port_state);
25594c6b 869
92d7f7b0 870 mempool_free(pmb, phba->mbox_mem_pool);
25594c6b 871
92d7f7b0 872 lpfc_linkdown(phba);
25594c6b
JW
873
874 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0
JS
875 "%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
876 phba->brd_no, vport->vpi, vport->port_state);
dea3101e 877
92d7f7b0 878 lpfc_issue_clear_la(phba, vport);
dea3101e 879 return;
880}
881
882static void
2e0fef85 883lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 884{
dea3101e 885 MAILBOX_t *mb = &pmb->mb;
886 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
2e0fef85 887 struct lpfc_vport *vport = pmb->vport;
dea3101e 888
889
890 /* Check for error */
891 if (mb->mbxStatus) {
892 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
92d7f7b0 894 "%d (%d):0319 READ_SPARAM mbxStatus error x%x "
dea3101e 895 "hba state x%x>\n",
92d7f7b0
JS
896 phba->brd_no, vport->vpi, mb->mbxStatus,
897 vport->port_state);
dea3101e 898
899 lpfc_linkdown(phba);
dea3101e 900 goto out;
901 }
902
2e0fef85 903 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
dea3101e 904 sizeof (struct serv_parm));
a12e07bc 905 if (phba->cfg_soft_wwnn)
2e0fef85
JS
906 u64_to_wwn(phba->cfg_soft_wwnn,
907 vport->fc_sparam.nodeName.u.wwn);
c3f28afa 908 if (phba->cfg_soft_wwpn)
2e0fef85
JS
909 u64_to_wwn(phba->cfg_soft_wwpn,
910 vport->fc_sparam.portName.u.wwn);
92d7f7b0
JS
911 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
912 sizeof(vport->fc_nodename));
913 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
914 sizeof(vport->fc_portname));
915 if (vport->port_type == LPFC_PHYSICAL_PORT) {
916 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
917 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
918 }
919
dea3101e 920 lpfc_mbuf_free(phba, mp->virt, mp->phys);
921 kfree(mp);
2e0fef85 922 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 923 return;
924
925out:
926 pmb->context1 = NULL;
927 lpfc_mbuf_free(phba, mp->virt, mp->phys);
928 kfree(mp);
92d7f7b0
JS
929 lpfc_issue_clear_la(phba, vport);
930 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 931 return;
932}
933
934static void
92d7f7b0 935lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
dea3101e 936{
92d7f7b0 937 struct lpfc_vport *vport = phba->pport;
dea3101e 938 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
2e0fef85 939 int i;
14691150
JS
940 struct lpfc_dmabuf *mp;
941 int rc;
942
dea3101e 943 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
944 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
945
92d7f7b0 946 spin_lock_irq(&phba->hbalock);
2fe165b6 947 switch (la->UlnkSpeed) {
92d7f7b0
JS
948 case LA_1GHZ_LINK:
949 phba->fc_linkspeed = LA_1GHZ_LINK;
950 break;
951 case LA_2GHZ_LINK:
952 phba->fc_linkspeed = LA_2GHZ_LINK;
953 break;
954 case LA_4GHZ_LINK:
955 phba->fc_linkspeed = LA_4GHZ_LINK;
956 break;
957 case LA_8GHZ_LINK:
958 phba->fc_linkspeed = LA_8GHZ_LINK;
959 break;
960 default:
961 phba->fc_linkspeed = LA_UNKNW_LINK;
962 break;
dea3101e 963 }
964
965 phba->fc_topology = la->topology;
92d7f7b0 966 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
dea3101e 967
968 if (phba->fc_topology == TOPOLOGY_LOOP) {
92d7f7b0 969 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
dea3101e 970
92d7f7b0 971 /* Get Loop Map information */
dea3101e 972 if (la->il)
2e0fef85 973 vport->fc_flag |= FC_LBIT;
dea3101e 974
2e0fef85 975 vport->fc_myDID = la->granted_AL_PA;
dea3101e 976 i = la->un.lilpBde64.tus.f.bdeSize;
977
978 if (i == 0) {
979 phba->alpa_map[0] = 0;
980 } else {
981 if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
982 int numalpa, j, k;
983 union {
984 uint8_t pamap[16];
985 struct {
986 uint32_t wd1;
987 uint32_t wd2;
988 uint32_t wd3;
989 uint32_t wd4;
990 } pa;
991 } un;
992 numalpa = phba->alpa_map[0];
993 j = 0;
994 while (j < numalpa) {
995 memset(un.pamap, 0, 16);
996 for (k = 1; j < numalpa; k++) {
997 un.pamap[k - 1] =
998 phba->alpa_map[j + 1];
999 j++;
1000 if (k == 16)
1001 break;
1002 }
1003 /* Link Up Event ALPA map */
1004 lpfc_printf_log(phba,
92d7f7b0
JS
1005 KERN_WARNING,
1006 LOG_LINK_EVENT,
1007 "%d:1304 Link Up Event "
1008 "ALPA map Data: x%x "
1009 "x%x x%x x%x\n",
1010 phba->brd_no,
1011 un.pa.wd1, un.pa.wd2,
1012 un.pa.wd3, un.pa.wd4);
dea3101e 1013 }
1014 }
1015 }
1016 } else {
92d7f7b0 1017 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
858c9f6c 1018 if (phba->max_vpi && phba->cfg_npiv_enable &&
92d7f7b0
JS
1019 (phba->sli_rev == 3))
1020 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
1021 }
2e0fef85
JS
1022 vport->fc_myDID = phba->fc_pref_DID;
1023 vport->fc_flag |= FC_LBIT;
dea3101e 1024 }
92d7f7b0 1025 spin_unlock_irq(&phba->hbalock);
dea3101e 1026
1027 lpfc_linkup(phba);
1028 if (sparam_mbox) {
92d7f7b0 1029 lpfc_read_sparam(phba, sparam_mbox, 0);
2e0fef85 1030 sparam_mbox->vport = vport;
dea3101e 1031 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
14691150 1032 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
92d7f7b0 1033 (MBX_NOWAIT | MBX_STOP_IOCB));
14691150
JS
1034 if (rc == MBX_NOT_FINISHED) {
1035 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1036 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1037 kfree(mp);
1038 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1039 if (cfglink_mbox)
1040 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
92d7f7b0 1041 goto out;
14691150 1042 }
dea3101e 1043 }
1044
1045 if (cfglink_mbox) {
2e0fef85 1046 vport->port_state = LPFC_LOCAL_CFG_LINK;
dea3101e 1047 lpfc_config_link(phba, cfglink_mbox);
2e0fef85 1048 cfglink_mbox->vport = vport;
25594c6b 1049 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
14691150 1050 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
92d7f7b0
JS
1051 (MBX_NOWAIT | MBX_STOP_IOCB));
1052 if (rc != MBX_NOT_FINISHED)
1053 return;
1054 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
dea3101e 1055 }
92d7f7b0
JS
1056out:
1057 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1058 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1059 "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1060 phba->brd_no, vport->vpi,
1061 vport->port_state, sparam_mbox, cfglink_mbox);
1062
1063 lpfc_issue_clear_la(phba, vport);
1064 return;
dea3101e 1065}
1066
1067static void
2e0fef85
JS
1068lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1069{
dea3101e 1070 uint32_t control;
1071 struct lpfc_sli *psli = &phba->sli;
1072
1073 lpfc_linkdown(phba);
1074
1075 /* turn on Link Attention interrupts - no CLEAR_LA needed */
2e0fef85 1076 spin_lock_irq(&phba->hbalock);
dea3101e 1077 psli->sli_flag |= LPFC_PROCESS_LA;
1078 control = readl(phba->HCregaddr);
1079 control |= HC_LAINT_ENA;
1080 writel(control, phba->HCregaddr);
1081 readl(phba->HCregaddr); /* flush */
2e0fef85 1082 spin_unlock_irq(&phba->hbalock);
dea3101e 1083}
1084
1085/*
1086 * This routine handles processing a READ_LA mailbox
1087 * command upon completion. It is setup in the LPFC_MBOXQ
1088 * as the completion routine when the command is
1089 * handed off to the SLI layer.
1090 */
1091void
2e0fef85 1092lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1093{
2e0fef85
JS
1094 struct lpfc_vport *vport = pmb->vport;
1095 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 1096 READ_LA_VAR *la;
1097 MAILBOX_t *mb = &pmb->mb;
1098 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1099
1100 /* Check for error */
1101 if (mb->mbxStatus) {
ed957684 1102 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
dea3101e 1103 "%d:1307 READ_LA mbox error x%x state x%x\n",
ed957684 1104 phba->brd_no, mb->mbxStatus, vport->port_state);
dea3101e 1105 lpfc_mbx_issue_link_down(phba);
2e0fef85 1106 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1107 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1108 }
1109
1110 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1111
1112 memcpy(&phba->alpa_map[0], mp->virt, 128);
1113
2e0fef85 1114 spin_lock_irq(shost->host_lock);
c9f8735b 1115 if (la->pb)
2e0fef85 1116 vport->fc_flag |= FC_BYPASSED_MODE;
c9f8735b 1117 else
2e0fef85
JS
1118 vport->fc_flag &= ~FC_BYPASSED_MODE;
1119 spin_unlock_irq(shost->host_lock);
c9f8735b 1120
dea3101e 1121 if (((phba->fc_eventTag + 1) < la->eventTag) ||
92d7f7b0 1122 (phba->fc_eventTag == la->eventTag)) {
dea3101e 1123 phba->fc_stat.LinkMultiEvent++;
2e0fef85 1124 if (la->attType == AT_LINK_UP)
dea3101e 1125 if (phba->fc_eventTag != 0)
1126 lpfc_linkdown(phba);
92d7f7b0 1127 }
dea3101e 1128
1129 phba->fc_eventTag = la->eventTag;
1130
1131 if (la->attType == AT_LINK_UP) {
1132 phba->fc_stat.LinkUp++;
2e0fef85 1133 if (phba->link_flag & LS_LOOPBACK_MODE) {
5b8bd0c9
JS
1134 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1135 "%d:1306 Link Up Event in loop back mode "
1136 "x%x received Data: x%x x%x x%x x%x\n",
1137 phba->brd_no, la->eventTag, phba->fc_eventTag,
1138 la->granted_AL_PA, la->UlnkSpeed,
1139 phba->alpa_map[0]);
1140 } else {
1141 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
dea3101e 1142 "%d:1303 Link Up Event x%x received "
1143 "Data: x%x x%x x%x x%x\n",
1144 phba->brd_no, la->eventTag, phba->fc_eventTag,
1145 la->granted_AL_PA, la->UlnkSpeed,
1146 phba->alpa_map[0]);
5b8bd0c9 1147 }
92d7f7b0 1148 lpfc_mbx_process_link_up(phba, la);
dea3101e 1149 } else {
1150 phba->fc_stat.LinkDown++;
1151 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1152 "%d:1305 Link Down Event x%x received "
1153 "Data: x%x x%x x%x\n",
1154 phba->brd_no, la->eventTag, phba->fc_eventTag,
2e0fef85 1155 phba->pport->port_state, vport->fc_flag);
dea3101e 1156 lpfc_mbx_issue_link_down(phba);
1157 }
1158
1159lpfc_mbx_cmpl_read_la_free_mbuf:
1160 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1161 kfree(mp);
1162 mempool_free(pmb, phba->mbox_mem_pool);
1163 return;
1164}
1165
1166/*
1167 * This routine handles processing a REG_LOGIN mailbox
1168 * command upon completion. It is setup in the LPFC_MBOXQ
1169 * as the completion routine when the command is
1170 * handed off to the SLI layer.
1171 */
1172void
2e0fef85 1173lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1174{
2e0fef85 1175 struct lpfc_vport *vport = pmb->vport;
92d7f7b0 1176 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2e0fef85 1177 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
dea3101e 1178
dea3101e 1179 pmb->context1 = NULL;
1180
1181 /* Good status, call state machine */
2e0fef85 1182 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
dea3101e 1183 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1184 kfree(mp);
2e0fef85 1185 mempool_free(pmb, phba->mbox_mem_pool);
329f9bc7 1186 lpfc_nlp_put(ndlp);
dea3101e 1187
1188 return;
1189}
1190
92d7f7b0
JS
1191static void
1192lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1193{
1194 MAILBOX_t *mb = &pmb->mb;
1195 struct lpfc_vport *vport = pmb->vport;
1196 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1197
1198 switch (mb->mbxStatus) {
1199 case 0x0011:
1200 case 0x0020:
1201 case 0x9700:
1202 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1203 "%d (%d):0911 cmpl_unreg_vpi, "
1204 "mb status = 0x%x\n",
1205 phba->brd_no, vport->vpi, mb->mbxStatus);
1206 break;
92d7f7b0
JS
1207 }
1208 vport->unreg_vpi_cmpl = VPORT_OK;
1209 mempool_free(pmb, phba->mbox_mem_pool);
1210 /*
1211 * This shost reference might have been taken at the beginning of
1212 * lpfc_vport_delete()
1213 */
1214 if (vport->load_flag & FC_UNLOADING)
1215 scsi_host_put(shost);
1216}
1217
1218void
1219lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1220{
1221 struct lpfc_hba *phba = vport->phba;
1222 LPFC_MBOXQ_t *mbox;
1223 int rc;
1224
1225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1226 if (!mbox)
1227 return;
1228
1229 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1230 mbox->vport = vport;
1231 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1232 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1233 if (rc == MBX_NOT_FINISHED) {
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1235 "%d (%d):1800 Could not issue unreg_vpi\n",
1236 phba->brd_no, vport->vpi);
1237 mempool_free(mbox, phba->mbox_mem_pool);
1238 vport->unreg_vpi_cmpl = VPORT_ERROR;
1239 }
1240}
1241
1242static void
1243lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1244{
1245 struct lpfc_vport *vport = pmb->vport;
1246 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1247 MAILBOX_t *mb = &pmb->mb;
1248
1249 switch (mb->mbxStatus) {
1250 case 0x0011:
1251 case 0x9601:
1252 case 0x9602:
1253 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1254 "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
1255 phba->brd_no, vport->vpi, mb->mbxStatus);
1256 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1257 spin_lock_irq(shost->host_lock);
1258 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1259 spin_unlock_irq(shost->host_lock);
1260 vport->fc_myDID = 0;
1261 goto out;
1262 }
92d7f7b0
JS
1263
1264 vport->num_disc_nodes = 0;
1265 /* go thru NPR list and issue ELS PLOGIs */
1266 if (vport->fc_npr_cnt)
1267 lpfc_els_disc_plogi(vport);
1268
1269 if (!vport->num_disc_nodes) {
1270 spin_lock_irq(shost->host_lock);
1271 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1272 spin_unlock_irq(shost->host_lock);
1273 lpfc_can_disctmo(vport);
1274 }
1275 vport->port_state = LPFC_VPORT_READY;
1276
1277out:
1278 mempool_free(pmb, phba->mbox_mem_pool);
1279 return;
1280}
1281
dea3101e 1282/*
1283 * This routine handles processing a Fabric REG_LOGIN mailbox
1284 * command upon completion. It is setup in the LPFC_MBOXQ
1285 * as the completion routine when the command is
1286 * handed off to the SLI layer.
1287 */
1288void
2e0fef85 1289lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1290{
92d7f7b0
JS
1291 struct lpfc_vport *vport = pmb->vport;
1292 struct lpfc_vport *next_vport;
2e0fef85
JS
1293 MAILBOX_t *mb = &pmb->mb;
1294 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
92d7f7b0 1295 struct lpfc_nodelist *ndlp;
dea3101e 1296 ndlp = (struct lpfc_nodelist *) pmb->context2;
dea3101e 1297
329f9bc7
JS
1298 pmb->context1 = NULL;
1299 pmb->context2 = NULL;
1300
dea3101e 1301 if (mb->mbxStatus) {
1302 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1303 kfree(mp);
329f9bc7
JS
1304 mempool_free(pmb, phba->mbox_mem_pool);
1305 lpfc_nlp_put(ndlp);
dea3101e 1306
92d7f7b0
JS
1307 if (phba->fc_topology == TOPOLOGY_LOOP) {
1308 /* FLOGI failed, use loop map to make discovery list */
1309 lpfc_disc_list_loopmap(vport);
1310
1311 /* Start discovery */
1312 lpfc_disc_start(vport);
1313 return;
1314 }
1315
1316 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1317 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1318 "%d (%d):0258 Register Fabric login error: 0x%x\n",
1319 phba->brd_no, vport->vpi, mb->mbxStatus);
dea3101e 1320
dea3101e 1321 return;
1322 }
1323
dea3101e 1324 ndlp->nlp_rpi = mb->un.varWords[0];
dea3101e 1325 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 1326 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 1327
329f9bc7
JS
1328 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1329
2e0fef85 1330 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
92d7f7b0
JS
1331 list_for_each_entry(next_vport, &phba->port_list, listentry) {
1332 if (next_vport->port_type == LPFC_PHYSICAL_PORT)
1333 continue;
2e0fef85 1334
92d7f7b0
JS
1335 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1336 lpfc_initial_fdisc(next_vport);
858c9f6c
JS
1337 else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1338 lpfc_vport_set_state(vport,
1339 FC_VPORT_NO_FABRIC_SUPP);
1340 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
92d7f7b0
JS
1341 "%d (%d):0259 No NPIV Fabric "
1342 "support\n",
1343 phba->brd_no, vport->vpi);
dea3101e 1344 }
1345 }
92d7f7b0 1346 lpfc_do_scr_ns_plogi(phba, vport);
dea3101e 1347 }
1348
1349 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1350 kfree(mp);
329f9bc7 1351 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 1352 return;
1353}
1354
1355/*
1356 * This routine handles processing a NameServer REG_LOGIN mailbox
1357 * command upon completion. It is setup in the LPFC_MBOXQ
1358 * as the completion routine when the command is
1359 * handed off to the SLI layer.
1360 */
1361void
2e0fef85 1362lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1363{
2e0fef85
JS
1364 MAILBOX_t *mb = &pmb->mb;
1365 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1366 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1367 struct lpfc_vport *vport = pmb->vport;
dea3101e 1368
1369 if (mb->mbxStatus) {
92d7f7b0 1370out:
329f9bc7 1371 lpfc_nlp_put(ndlp);
dea3101e 1372 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1373 kfree(mp);
de0c5b32 1374 mempool_free(pmb, phba->mbox_mem_pool);
2e0fef85 1375 lpfc_drop_node(vport, ndlp);
dea3101e 1376
92d7f7b0
JS
1377 if (phba->fc_topology == TOPOLOGY_LOOP) {
1378 /*
1379 * RegLogin failed, use loop map to make discovery
1380 * list
1381 */
1382 lpfc_disc_list_loopmap(vport);
dea3101e 1383
92d7f7b0
JS
1384 /* Start discovery */
1385 lpfc_disc_start(vport);
1386 return;
1387 }
1388 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1389 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1390 "%d (%d):0260 Register NameServer error: 0x%x\n",
1391 phba->brd_no, vport->vpi, mb->mbxStatus);
dea3101e 1392 return;
1393 }
1394
1395 pmb->context1 = NULL;
1396
dea3101e 1397 ndlp->nlp_rpi = mb->un.varWords[0];
dea3101e 1398 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 1399 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 1400
2e0fef85
JS
1401 if (vport->port_state < LPFC_VPORT_READY) {
1402 /* Link up discovery requires Fabric registration. */
92d7f7b0
JS
1403 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1404 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1405 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1406 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1407 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1408
1409 /* Issue SCR just before NameServer GID_FT Query */
1410 lpfc_issue_els_scr(vport, SCR_DID, 0);
dea3101e 1411 }
1412
2e0fef85 1413 vport->fc_ns_retry = 0;
dea3101e 1414 /* Good status, issue CT Request to NameServer */
92d7f7b0 1415 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
dea3101e 1416 /* Cannot issue NameServer Query, so finish up discovery */
92d7f7b0 1417 goto out;
dea3101e 1418 }
1419
329f9bc7 1420 lpfc_nlp_put(ndlp);
dea3101e 1421 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1422 kfree(mp);
2e0fef85 1423 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 1424
1425 return;
1426}
1427
1428static void
2e0fef85 1429lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 1430{
2e0fef85
JS
1431 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1432 struct fc_rport *rport;
dea3101e 1433 struct lpfc_rport_data *rdata;
1434 struct fc_rport_identifiers rport_ids;
2e0fef85 1435 struct lpfc_hba *phba = vport->phba;
dea3101e 1436
1437 /* Remote port has reappeared. Re-register w/ FC transport */
68ce1eb5
AM
1438 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1439 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
dea3101e 1440 rport_ids.port_id = ndlp->nlp_DID;
1441 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
dea3101e 1442
329f9bc7
JS
1443 /*
1444 * We leave our node pointer in rport->dd_data when we unregister a
1445 * FCP target port. But fc_remote_port_add zeros the space to which
1446 * rport->dd_data points. So, if we're reusing a previously
1447 * registered port, drop the reference that we took the last time we
1448 * registered the port.
1449 */
1450 if (ndlp->rport && ndlp->rport->dd_data &&
92d7f7b0 1451 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
329f9bc7
JS
1452 lpfc_nlp_put(ndlp);
1453 }
858c9f6c
JS
1454
1455 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1456 "rport add: did:x%x flg:x%x type x%x",
1457 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1458
2e0fef85 1459 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
329f9bc7 1460 if (!rport || !get_device(&rport->dev)) {
dea3101e 1461 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1462 "Warning: fc_remote_port_add failed\n");
1463 return;
1464 }
1465
1466 /* initialize static port data */
1467 rport->maxframe_size = ndlp->nlp_maxframe;
1468 rport->supported_classes = ndlp->nlp_class_sup;
dea3101e 1469 rdata = rport->dd_data;
329f9bc7 1470 rdata->pnode = lpfc_nlp_get(ndlp);
23dc04f1
JSEC
1471
1472 if (ndlp->nlp_type & NLP_FCP_TARGET)
1473 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1474 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1475 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1476
1477
1478 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1479 fc_remote_port_rolechg(rport, rport_ids.roles);
1480
071fbd3d 1481 if ((rport->scsi_target_id != -1) &&
92d7f7b0 1482 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
071fbd3d
JS
1483 ndlp->nlp_sid = rport->scsi_target_id;
1484 }
19a7b4ae
JSEC
1485 return;
1486}
1487
1488static void
2e0fef85 1489lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
19a7b4ae
JSEC
1490{
1491 struct fc_rport *rport = ndlp->rport;
c01f3208 1492
858c9f6c
JS
1493 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1494 "rport delete: did:x%x flg:x%x type x%x",
1495 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1496
19a7b4ae 1497 fc_remote_port_delete(rport);
dea3101e 1498
1499 return;
1500}
1501
de0c5b32 1502static void
2e0fef85 1503lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
dea3101e 1504{
2e0fef85
JS
1505 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1506
1507 spin_lock_irq(shost->host_lock);
de0c5b32
JS
1508 switch (state) {
1509 case NLP_STE_UNUSED_NODE:
2e0fef85 1510 vport->fc_unused_cnt += count;
de0c5b32
JS
1511 break;
1512 case NLP_STE_PLOGI_ISSUE:
2e0fef85 1513 vport->fc_plogi_cnt += count;
de0c5b32
JS
1514 break;
1515 case NLP_STE_ADISC_ISSUE:
2e0fef85 1516 vport->fc_adisc_cnt += count;
dea3101e 1517 break;
de0c5b32 1518 case NLP_STE_REG_LOGIN_ISSUE:
2e0fef85 1519 vport->fc_reglogin_cnt += count;
de0c5b32
JS
1520 break;
1521 case NLP_STE_PRLI_ISSUE:
2e0fef85 1522 vport->fc_prli_cnt += count;
de0c5b32
JS
1523 break;
1524 case NLP_STE_UNMAPPED_NODE:
2e0fef85 1525 vport->fc_unmap_cnt += count;
de0c5b32
JS
1526 break;
1527 case NLP_STE_MAPPED_NODE:
2e0fef85 1528 vport->fc_map_cnt += count;
de0c5b32
JS
1529 break;
1530 case NLP_STE_NPR_NODE:
2e0fef85 1531 vport->fc_npr_cnt += count;
de0c5b32
JS
1532 break;
1533 }
2e0fef85 1534 spin_unlock_irq(shost->host_lock);
de0c5b32 1535}
66a9ed66 1536
de0c5b32 1537static void
2e0fef85 1538lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
de0c5b32
JS
1539 int old_state, int new_state)
1540{
2e0fef85
JS
1541 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1542
de0c5b32
JS
1543 if (new_state == NLP_STE_UNMAPPED_NODE) {
1544 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1545 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1546 ndlp->nlp_type |= NLP_FC_NODE;
1547 }
1548 if (new_state == NLP_STE_MAPPED_NODE)
1549 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1550 if (new_state == NLP_STE_NPR_NODE)
1551 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1552
1553 /* Transport interface */
1554 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1555 old_state == NLP_STE_UNMAPPED_NODE)) {
2e0fef85
JS
1556 vport->phba->nport_event_cnt++;
1557 lpfc_unregister_remote_port(ndlp);
de0c5b32 1558 }
dea3101e 1559
de0c5b32
JS
1560 if (new_state == NLP_STE_MAPPED_NODE ||
1561 new_state == NLP_STE_UNMAPPED_NODE) {
2e0fef85 1562 vport->phba->nport_event_cnt++;
858c9f6c
JS
1563 /*
1564 * Tell the fc transport about the port, if we haven't
1565 * already. If we have, and it's a scsi entity, be
1566 * sure to unblock any attached scsi devices
1567 */
1568 lpfc_register_remote_port(vport, ndlp);
de0c5b32 1569 }
858c9f6c
JS
1570 /*
1571 * if we added to Mapped list, but the remote port
1572 * registration failed or assigned a target id outside
1573 * our presentable range - move the node to the
1574 * Unmapped List
1575 */
de0c5b32
JS
1576 if (new_state == NLP_STE_MAPPED_NODE &&
1577 (!ndlp->rport ||
1578 ndlp->rport->scsi_target_id == -1 ||
1579 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
2e0fef85 1580 spin_lock_irq(shost->host_lock);
de0c5b32 1581 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
2e0fef85
JS
1582 spin_unlock_irq(shost->host_lock);
1583 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 1584 }
de0c5b32
JS
1585}
1586
685f0bf7
JS
1587static char *
1588lpfc_nlp_state_name(char *buffer, size_t size, int state)
1589{
1590 static char *states[] = {
1591 [NLP_STE_UNUSED_NODE] = "UNUSED",
1592 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1593 [NLP_STE_ADISC_ISSUE] = "ADISC",
1594 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1595 [NLP_STE_PRLI_ISSUE] = "PRLI",
1596 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1597 [NLP_STE_MAPPED_NODE] = "MAPPED",
1598 [NLP_STE_NPR_NODE] = "NPR",
1599 };
1600
1601 if (state < ARRAY_SIZE(states) && states[state])
1602 strlcpy(buffer, states[state], size);
1603 else
1604 snprintf(buffer, size, "unknown (%d)", state);
1605 return buffer;
1606}
1607
de0c5b32 1608void
2e0fef85
JS
1609lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1610 int state)
de0c5b32 1611{
2e0fef85 1612 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
de0c5b32 1613 int old_state = ndlp->nlp_state;
685f0bf7 1614 char name1[16], name2[16];
de0c5b32 1615
2e0fef85 1616 lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE,
858c9f6c
JS
1617 "%d (%d):0904 NPort state transition x%06x, %s -> %s\n",
1618 vport->phba->brd_no, vport->vpi,
685f0bf7
JS
1619 ndlp->nlp_DID,
1620 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1621 lpfc_nlp_state_name(name2, sizeof(name2), state));
858c9f6c
JS
1622
1623 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1624 "node statechg did:x%x old:%d ste:%d",
1625 ndlp->nlp_DID, old_state, state);
1626
de0c5b32
JS
1627 if (old_state == NLP_STE_NPR_NODE &&
1628 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1629 state != NLP_STE_NPR_NODE)
2e0fef85 1630 lpfc_cancel_retry_delay_tmo(vport, ndlp);
de0c5b32
JS
1631 if (old_state == NLP_STE_UNMAPPED_NODE) {
1632 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1633 ndlp->nlp_type &= ~NLP_FC_NODE;
1634 }
1635
685f0bf7 1636 if (list_empty(&ndlp->nlp_listp)) {
2e0fef85
JS
1637 spin_lock_irq(shost->host_lock);
1638 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1639 spin_unlock_irq(shost->host_lock);
685f0bf7 1640 } else if (old_state)
2e0fef85 1641 lpfc_nlp_counters(vport, old_state, -1);
de0c5b32
JS
1642
1643 ndlp->nlp_state = state;
2e0fef85
JS
1644 lpfc_nlp_counters(vport, state, 1);
1645 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
de0c5b32
JS
1646}
1647
1648void
2e0fef85 1649lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
de0c5b32 1650{
2e0fef85
JS
1651 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1652
de0c5b32 1653 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
2e0fef85 1654 lpfc_cancel_retry_delay_tmo(vport, ndlp);
de0c5b32 1655 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
2e0fef85
JS
1656 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1657 spin_lock_irq(shost->host_lock);
685f0bf7 1658 list_del_init(&ndlp->nlp_listp);
2e0fef85 1659 spin_unlock_irq(shost->host_lock);
858c9f6c
JS
1660 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1661 NLP_STE_UNUSED_NODE);
de0c5b32
JS
1662}
1663
1664void
2e0fef85 1665lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
de0c5b32 1666{
2e0fef85
JS
1667 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1668
de0c5b32 1669 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
2e0fef85 1670 lpfc_cancel_retry_delay_tmo(vport, ndlp);
de0c5b32 1671 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
2e0fef85
JS
1672 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1673 spin_lock_irq(shost->host_lock);
685f0bf7 1674 list_del_init(&ndlp->nlp_listp);
858c9f6c 1675 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
2e0fef85 1676 spin_unlock_irq(shost->host_lock);
329f9bc7 1677 lpfc_nlp_put(ndlp);
dea3101e 1678}
1679
1680/*
1681 * Start / ReStart rescue timer for Discovery / RSCN handling
1682 */
1683void
2e0fef85 1684lpfc_set_disctmo(struct lpfc_vport *vport)
dea3101e 1685{
2e0fef85
JS
1686 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1687 struct lpfc_hba *phba = vport->phba;
dea3101e 1688 uint32_t tmo;
1689
2e0fef85 1690 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
c9f8735b
JW
1691 /* For FAN, timeout should be greater then edtov */
1692 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1693 } else {
1694 /* Normal discovery timeout should be > then ELS/CT timeout
1695 * FC spec states we need 3 * ratov for CT requests
1696 */
1697 tmo = ((phba->fc_ratov * 3) + 3);
1698 }
dea3101e 1699
858c9f6c
JS
1700
1701 if (!timer_pending(&vport->fc_disctmo)) {
1702 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1703 "set disc timer: tmo:x%x state:x%x flg:x%x",
1704 tmo, vport->port_state, vport->fc_flag);
1705 }
1706
2e0fef85
JS
1707 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1708 spin_lock_irq(shost->host_lock);
1709 vport->fc_flag |= FC_DISC_TMO;
1710 spin_unlock_irq(shost->host_lock);
dea3101e 1711
1712 /* Start Discovery Timer state <hba_state> */
1713 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
92d7f7b0 1714 "%d (%d):0247 Start Discovery Timer state x%x "
dea3101e 1715 "Data: x%x x%lx x%x x%x\n",
92d7f7b0 1716 phba->brd_no, vport->vpi, vport->port_state, tmo,
2e0fef85
JS
1717 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1718 vport->fc_adisc_cnt);
dea3101e 1719
1720 return;
1721}
1722
1723/*
1724 * Cancel rescue timer for Discovery / RSCN handling
1725 */
1726int
2e0fef85 1727lpfc_can_disctmo(struct lpfc_vport *vport)
dea3101e 1728{
2e0fef85
JS
1729 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1730 struct lpfc_hba *phba = vport->phba;
1731 unsigned long iflags;
1732
858c9f6c
JS
1733 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1734 "can disc timer: state:x%x rtry:x%x flg:x%x",
1735 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1736
dea3101e 1737 /* Turn off discovery timer if its running */
2e0fef85
JS
1738 if (vport->fc_flag & FC_DISC_TMO) {
1739 spin_lock_irqsave(shost->host_lock, iflags);
1740 vport->fc_flag &= ~FC_DISC_TMO;
1741 spin_unlock_irqrestore(shost->host_lock, iflags);
1742 del_timer_sync(&vport->fc_disctmo);
1743 spin_lock_irqsave(&vport->work_port_lock, iflags);
1744 vport->work_port_events &= ~WORKER_DISC_TMO;
1745 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
dea3101e 1746 }
1747
1748 /* Cancel Discovery Timer state <hba_state> */
1749 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
92d7f7b0 1750 "%d (%d):0248 Cancel Discovery Timer state x%x "
dea3101e 1751 "Data: x%x x%x x%x\n",
92d7f7b0
JS
1752 phba->brd_no, vport->vpi, vport->port_state,
1753 vport->fc_flag, vport->fc_plogi_cnt,
1754 vport->fc_adisc_cnt);
dea3101e 1755
2fe165b6 1756 return 0;
dea3101e 1757}
1758
1759/*
1760 * Check specified ring for outstanding IOCB on the SLI queue
1761 * Return true if iocb matches the specified nport
1762 */
1763int
2e0fef85
JS
1764lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1765 struct lpfc_sli_ring *pring,
1766 struct lpfc_iocbq *iocb,
1767 struct lpfc_nodelist *ndlp)
dea3101e 1768{
2e0fef85
JS
1769 struct lpfc_sli *psli = &phba->sli;
1770 IOCB_t *icmd = &iocb->iocb;
92d7f7b0
JS
1771 struct lpfc_vport *vport = ndlp->vport;
1772
1773 if (iocb->vport != vport)
1774 return 0;
1775
dea3101e 1776 if (pring->ringno == LPFC_ELS_RING) {
1777 switch (icmd->ulpCommand) {
1778 case CMD_GEN_REQUEST64_CR:
1779 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
2fe165b6 1780 return 1;
dea3101e 1781 case CMD_ELS_REQUEST64_CR:
10d4e957
JS
1782 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1783 return 1;
dea3101e 1784 case CMD_XMIT_ELS_RSP64_CX:
1785 if (iocb->context1 == (uint8_t *) ndlp)
2fe165b6 1786 return 1;
dea3101e 1787 }
a4bc3379 1788 } else if (pring->ringno == psli->extra_ring) {
dea3101e 1789
1790 } else if (pring->ringno == psli->fcp_ring) {
1791 /* Skip match check if waiting to relogin to FCP target */
1792 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
92d7f7b0 1793 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
2fe165b6 1794 return 0;
dea3101e 1795 }
1796 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
2fe165b6 1797 return 1;
dea3101e 1798 }
1799 } else if (pring->ringno == psli->next_ring) {
1800
1801 }
2fe165b6 1802 return 0;
dea3101e 1803}
1804
1805/*
1806 * Free resources / clean up outstanding I/Os
1807 * associated with nlp_rpi in the LPFC_NODELIST entry.
1808 */
1809static int
2e0fef85 1810lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 1811{
2534ba75 1812 LIST_HEAD(completions);
dea3101e 1813 struct lpfc_sli *psli;
1814 struct lpfc_sli_ring *pring;
1815 struct lpfc_iocbq *iocb, *next_iocb;
1816 IOCB_t *icmd;
1817 uint32_t rpi, i;
1818
92d7f7b0
JS
1819 lpfc_fabric_abort_nport(ndlp);
1820
dea3101e 1821 /*
1822 * Everything that matches on txcmplq will be returned
1823 * by firmware with a no rpi error.
1824 */
1825 psli = &phba->sli;
1826 rpi = ndlp->nlp_rpi;
1827 if (rpi) {
1828 /* Now process each ring */
1829 for (i = 0; i < psli->num_rings; i++) {
1830 pring = &psli->ring[i];
1831
2e0fef85 1832 spin_lock_irq(&phba->hbalock);
dea3101e 1833 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
2e0fef85 1834 list) {
dea3101e 1835 /*
1836 * Check to see if iocb matches the nport we are
1837 * looking for
1838 */
92d7f7b0
JS
1839 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1840 ndlp))) {
dea3101e 1841 /* It matches, so deque and call compl
1842 with an error */
2534ba75
JS
1843 list_move_tail(&iocb->list,
1844 &completions);
dea3101e 1845 pring->txq_cnt--;
dea3101e 1846 }
1847 }
2e0fef85 1848 spin_unlock_irq(&phba->hbalock);
dea3101e 1849 }
1850 }
2534ba75
JS
1851
1852 while (!list_empty(&completions)) {
1853 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
92d7f7b0 1854 list_del_init(&iocb->list);
2534ba75 1855
2e0fef85
JS
1856 if (!iocb->iocb_cmpl)
1857 lpfc_sli_release_iocbq(phba, iocb);
1858 else {
2534ba75
JS
1859 icmd = &iocb->iocb;
1860 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1861 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2e0fef85
JS
1862 (iocb->iocb_cmpl)(phba, iocb, iocb);
1863 }
2534ba75
JS
1864 }
1865
2fe165b6 1866 return 0;
dea3101e 1867}
1868
1869/*
1870 * Free rpi associated with LPFC_NODELIST entry.
1871 * This routine is called from lpfc_freenode(), when we are removing
1872 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1873 * LOGO that completes successfully, and we are waiting to PLOGI back
1874 * to the remote NPort. In addition, it is called after we receive
1875 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1876 * we are waiting to PLOGI back to the remote NPort.
1877 */
1878int
2e0fef85 1879lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 1880{
2e0fef85
JS
1881 struct lpfc_hba *phba = vport->phba;
1882 LPFC_MBOXQ_t *mbox;
dea3101e 1883 int rc;
1884
1885 if (ndlp->nlp_rpi) {
2e0fef85
JS
1886 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1887 if (mbox) {
92d7f7b0 1888 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
ed957684 1889 mbox->vport = vport;
92d7f7b0
JS
1890 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1891 rc = lpfc_sli_issue_mbox(phba, mbox,
1892 (MBX_NOWAIT | MBX_STOP_IOCB));
dea3101e 1893 if (rc == MBX_NOT_FINISHED)
2e0fef85 1894 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 1895 }
dea3101e 1896 lpfc_no_rpi(phba, ndlp);
1897 ndlp->nlp_rpi = 0;
1898 return 1;
1899 }
1900 return 0;
1901}
1902
92d7f7b0
JS
1903void
1904lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1905{
1906 struct lpfc_hba *phba = vport->phba;
1907 LPFC_MBOXQ_t *mbox;
1908 int rc;
1909
1910 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1911 if (mbox) {
1912 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1913 mbox->vport = vport;
1914 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1915 rc = lpfc_sli_issue_mbox(phba, mbox,
1916 (MBX_NOWAIT | MBX_STOP_IOCB));
1917 if (rc == MBX_NOT_FINISHED) {
1918 mempool_free(mbox, phba->mbox_mem_pool);
1919 }
1920 }
1921}
1922
1923void
1924lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1925{
1926 struct lpfc_hba *phba = vport->phba;
1927 LPFC_MBOXQ_t *mbox;
1928 int rc;
1929
1930 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1931 if (mbox) {
1932 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1933 mbox->vport = vport;
1934 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1935 rc = lpfc_sli_issue_mbox(phba, mbox,
1936 (MBX_NOWAIT | MBX_STOP_IOCB));
1937 if (rc == MBX_NOT_FINISHED) {
1938 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1939 "%d (%d):1815 Could not issue "
1940 "unreg_did (default rpis)\n",
1941 phba->brd_no, vport->vpi);
1942 mempool_free(mbox, phba->mbox_mem_pool);
1943 }
1944 }
1945}
1946
dea3101e 1947/*
1948 * Free resources associated with LPFC_NODELIST entry
1949 * so it can be freed.
1950 */
1951static int
2e0fef85 1952lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 1953{
2e0fef85
JS
1954 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1955 struct lpfc_hba *phba = vport->phba;
1956 LPFC_MBOXQ_t *mb, *nextmb;
dea3101e 1957 struct lpfc_dmabuf *mp;
dea3101e 1958
1959 /* Cleanup node for NPort <nlp_DID> */
1960 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
92d7f7b0 1961 "%d (%d):0900 Cleanup node for NPort x%x "
dea3101e 1962 "Data: x%x x%x x%x\n",
92d7f7b0 1963 phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
dea3101e 1964 ndlp->nlp_state, ndlp->nlp_rpi);
1965
2e0fef85 1966 lpfc_dequeue_node(vport, ndlp);
dea3101e 1967
dea3101e 1968 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1969 if ((mb = phba->sli.mbox_active)) {
1970 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1971 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1972 mb->context2 = NULL;
1973 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1974 }
1975 }
33ccf8d1 1976
2e0fef85 1977 spin_lock_irq(&phba->hbalock);
dea3101e 1978 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1979 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
92d7f7b0 1980 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
dea3101e 1981 mp = (struct lpfc_dmabuf *) (mb->context1);
1982 if (mp) {
2e0fef85 1983 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e 1984 kfree(mp);
1985 }
1986 list_del(&mb->list);
1987 mempool_free(mb, phba->mbox_mem_pool);
329f9bc7 1988 lpfc_nlp_put(ndlp);
dea3101e 1989 }
1990 }
2e0fef85 1991 spin_unlock_irq(&phba->hbalock);
dea3101e 1992
07951076 1993 lpfc_els_abort(phba,ndlp);
2e0fef85 1994 spin_lock_irq(shost->host_lock);
c01f3208 1995 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 1996 spin_unlock_irq(shost->host_lock);
dea3101e 1997
5024ab17 1998 ndlp->nlp_last_elscmd = 0;
dea3101e 1999 del_timer_sync(&ndlp->nlp_delayfunc);
2000
dea3101e 2001 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
2002 list_del_init(&ndlp->els_retry_evt.evt_listp);
92d7f7b0
JS
2003 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
2004 list_del_init(&ndlp->dev_loss_evt.evt_listp);
dea3101e 2005
858c9f6c
JS
2006 if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
2007 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2008 complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
2009 }
2010
2e0fef85 2011 lpfc_unreg_rpi(vport, ndlp);
dea3101e 2012
2fe165b6 2013 return 0;
dea3101e 2014}
2015
2016/*
2017 * Check to see if we can free the nlp back to the freelist.
2018 * If we are in the middle of using the nlp in the discovery state
2019 * machine, defer the free till we reach the end of the state machine.
2020 */
329f9bc7 2021static void
2e0fef85 2022lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 2023{
1dcb58e5 2024 struct lpfc_rport_data *rdata;
dea3101e 2025
2026 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
2e0fef85 2027 lpfc_cancel_retry_delay_tmo(vport, ndlp);
dea3101e 2028 }
2029
2e0fef85 2030 lpfc_cleanup_node(vport, ndlp);
1dcb58e5 2031
2e0fef85 2032 /*
92d7f7b0
JS
2033 * We can get here with a non-NULL ndlp->rport because when we
2034 * unregister a rport we don't break the rport/node linkage. So if we
2035 * do, make sure we don't leaving any dangling pointers behind.
2e0fef85 2036 */
92d7f7b0 2037 if (ndlp->rport) {
329f9bc7
JS
2038 rdata = ndlp->rport->dd_data;
2039 rdata->pnode = NULL;
2040 ndlp->rport = NULL;
dea3101e 2041 }
dea3101e 2042}
2043
2044static int
2e0fef85
JS
2045lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2046 uint32_t did)
dea3101e 2047{
2e0fef85 2048 D_ID mydid, ndlpdid, matchdid;
dea3101e 2049
2050 if (did == Bcast_DID)
2fe165b6 2051 return 0;
dea3101e 2052
2053 if (ndlp->nlp_DID == 0) {
2fe165b6 2054 return 0;
dea3101e 2055 }
2056
2057 /* First check for Direct match */
2058 if (ndlp->nlp_DID == did)
2fe165b6 2059 return 1;
dea3101e 2060
2061 /* Next check for area/domain identically equals 0 match */
2e0fef85 2062 mydid.un.word = vport->fc_myDID;
dea3101e 2063 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2fe165b6 2064 return 0;
dea3101e 2065 }
2066
2067 matchdid.un.word = did;
2068 ndlpdid.un.word = ndlp->nlp_DID;
2069 if (matchdid.un.b.id == ndlpdid.un.b.id) {
2070 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2071 (mydid.un.b.area == matchdid.un.b.area)) {
2072 if ((ndlpdid.un.b.domain == 0) &&
2073 (ndlpdid.un.b.area == 0)) {
2074 if (ndlpdid.un.b.id)
2fe165b6 2075 return 1;
dea3101e 2076 }
2fe165b6 2077 return 0;
dea3101e 2078 }
2079
2080 matchdid.un.word = ndlp->nlp_DID;
2081 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2082 (mydid.un.b.area == ndlpdid.un.b.area)) {
2083 if ((matchdid.un.b.domain == 0) &&
2084 (matchdid.un.b.area == 0)) {
2085 if (matchdid.un.b.id)
2fe165b6 2086 return 1;
dea3101e 2087 }
2088 }
2089 }
2fe165b6 2090 return 0;
dea3101e 2091}
2092
685f0bf7 2093/* Search for a nodelist entry */
2e0fef85
JS
2094static struct lpfc_nodelist *
2095__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
dea3101e 2096{
2e0fef85 2097 struct lpfc_hba *phba = vport->phba;
2fb9bd8b 2098 struct lpfc_nodelist *ndlp;
dea3101e 2099 uint32_t data1;
2100
2e0fef85
JS
2101 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2102 if (lpfc_matchdid(vport, ndlp, did)) {
685f0bf7
JS
2103 data1 = (((uint32_t) ndlp->nlp_state << 24) |
2104 ((uint32_t) ndlp->nlp_xri << 16) |
2105 ((uint32_t) ndlp->nlp_type << 8) |
2106 ((uint32_t) ndlp->nlp_rpi & 0xff));
2107 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
92d7f7b0 2108 "%d (%d):0929 FIND node DID "
685f0bf7 2109 " Data: x%p x%x x%x x%x\n",
92d7f7b0 2110 phba->brd_no, vport->vpi,
685f0bf7
JS
2111 ndlp, ndlp->nlp_DID,
2112 ndlp->nlp_flag, data1);
685f0bf7 2113 return ndlp;
dea3101e 2114 }
2115 }
66a9ed66 2116
dea3101e 2117 /* FIND node did <did> NOT FOUND */
2fb9bd8b 2118 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
92d7f7b0
JS
2119 "%d (%d):0932 FIND node did x%x NOT FOUND.\n",
2120 phba->brd_no, vport->vpi, did);
dea3101e 2121 return NULL;
2122}
2123
2124struct lpfc_nodelist *
2e0fef85
JS
2125lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2126{
2127 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2128 struct lpfc_nodelist *ndlp;
2129
2130 spin_lock_irq(shost->host_lock);
2131 ndlp = __lpfc_findnode_did(vport, did);
2132 spin_unlock_irq(shost->host_lock);
2133 return ndlp;
2134}
2135
2136struct lpfc_nodelist *
2137lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
dea3101e 2138{
2e0fef85 2139 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2140 struct lpfc_nodelist *ndlp;
dea3101e 2141
2e0fef85 2142 ndlp = lpfc_findnode_did(vport, did);
c9f8735b 2143 if (!ndlp) {
2e0fef85
JS
2144 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2145 lpfc_rscn_payload_check(vport, did) == 0)
dea3101e 2146 return NULL;
2147 ndlp = (struct lpfc_nodelist *)
2e0fef85 2148 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
dea3101e 2149 if (!ndlp)
2150 return NULL;
2e0fef85
JS
2151 lpfc_nlp_init(vport, ndlp, did);
2152 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2153 spin_lock_irq(shost->host_lock);
dea3101e 2154 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2155 spin_unlock_irq(shost->host_lock);
dea3101e 2156 return ndlp;
2157 }
2e0fef85
JS
2158 if (vport->fc_flag & FC_RSCN_MODE) {
2159 if (lpfc_rscn_payload_check(vport, did)) {
2160 spin_lock_irq(shost->host_lock);
dea3101e 2161 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2162 spin_unlock_irq(shost->host_lock);
c9f8735b
JW
2163
2164 /* Since this node is marked for discovery,
2165 * delay timeout is not needed.
2166 */
fdcebe28 2167 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2e0fef85 2168 lpfc_cancel_retry_delay_tmo(vport, ndlp);
071fbd3d 2169 } else
dea3101e 2170 ndlp = NULL;
2fe165b6 2171 } else {
685f0bf7
JS
2172 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2173 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
dea3101e 2174 return NULL;
2e0fef85
JS
2175 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2176 spin_lock_irq(shost->host_lock);
dea3101e 2177 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2178 spin_unlock_irq(shost->host_lock);
dea3101e 2179 }
2180 return ndlp;
2181}
2182
2183/* Build a list of nodes to discover based on the loopmap */
2184void
2e0fef85 2185lpfc_disc_list_loopmap(struct lpfc_vport *vport)
dea3101e 2186{
2e0fef85 2187 struct lpfc_hba *phba = vport->phba;
dea3101e 2188 int j;
2189 uint32_t alpa, index;
2190
2e0fef85 2191 if (!lpfc_is_link_up(phba))
dea3101e 2192 return;
2e0fef85
JS
2193
2194 if (phba->fc_topology != TOPOLOGY_LOOP)
dea3101e 2195 return;
dea3101e 2196
2197 /* Check for loop map present or not */
2198 if (phba->alpa_map[0]) {
2199 for (j = 1; j <= phba->alpa_map[0]; j++) {
2200 alpa = phba->alpa_map[j];
2e0fef85 2201 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
dea3101e 2202 continue;
2e0fef85 2203 lpfc_setup_disc_node(vport, alpa);
dea3101e 2204 }
2205 } else {
2206 /* No alpamap, so try all alpa's */
2207 for (j = 0; j < FC_MAXLOOP; j++) {
2208 /* If cfg_scan_down is set, start from highest
2209 * ALPA (0xef) to lowest (0x1).
2210 */
2211 if (phba->cfg_scan_down)
2212 index = j;
2213 else
2214 index = FC_MAXLOOP - j - 1;
2215 alpa = lpfcAlpaArray[index];
2e0fef85 2216 if ((vport->fc_myDID & 0xff) == alpa)
dea3101e 2217 continue;
2e0fef85 2218 lpfc_setup_disc_node(vport, alpa);
dea3101e 2219 }
2220 }
2221 return;
2222}
2223
dea3101e 2224void
2e0fef85 2225lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
dea3101e 2226{
dea3101e 2227 LPFC_MBOXQ_t *mbox;
2e0fef85
JS
2228 struct lpfc_sli *psli = &phba->sli;
2229 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2230 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2231 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2232 int rc;
2233
92d7f7b0
JS
2234 /*
2235 * if it's not a physical port or if we already send
2236 * clear_la then don't send it.
2237 */
2238 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2239 (vport->port_type != LPFC_PHYSICAL_PORT))
2240 return;
2241
2e0fef85
JS
2242 /* Link up discovery */
2243 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2244 phba->link_state = LPFC_CLEAR_LA;
2245 lpfc_clear_la(phba, mbox);
2246 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2247 mbox->vport = vport;
2248 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT |
2249 MBX_STOP_IOCB));
2250 if (rc == MBX_NOT_FINISHED) {
2251 mempool_free(mbox, phba->mbox_mem_pool);
2252 lpfc_disc_flush_list(vport);
2253 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2254 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2255 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
92d7f7b0
JS
2256 phba->link_state = LPFC_HBA_ERROR;
2257 }
2258 }
2259}
2260
2261/* Reg_vpi to tell firmware to resume normal operations */
2262void
2263lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2264{
2265 LPFC_MBOXQ_t *regvpimbox;
2266
2267 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2268 if (regvpimbox) {
2269 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2270 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2271 regvpimbox->vport = vport;
2272 if (lpfc_sli_issue_mbox(phba, regvpimbox,
2273 (MBX_NOWAIT | MBX_STOP_IOCB))
2274 == MBX_NOT_FINISHED) {
2275 mempool_free(regvpimbox, phba->mbox_mem_pool);
2e0fef85
JS
2276 }
2277 }
2278}
2279
2280/* Start Link up / RSCN discovery on NPR nodes */
2281void
2282lpfc_disc_start(struct lpfc_vport *vport)
2283{
2284 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2285 struct lpfc_hba *phba = vport->phba;
685f0bf7 2286 uint32_t num_sent;
dea3101e 2287 uint32_t clear_la_pending;
685f0bf7 2288 int did_changed;
dea3101e 2289
2e0fef85 2290 if (!lpfc_is_link_up(phba))
dea3101e 2291 return;
2e0fef85
JS
2292
2293 if (phba->link_state == LPFC_CLEAR_LA)
dea3101e 2294 clear_la_pending = 1;
2295 else
2296 clear_la_pending = 0;
2297
2e0fef85
JS
2298 if (vport->port_state < LPFC_VPORT_READY)
2299 vport->port_state = LPFC_DISC_AUTH;
dea3101e 2300
2e0fef85
JS
2301 lpfc_set_disctmo(vport);
2302
2303 if (vport->fc_prevDID == vport->fc_myDID)
dea3101e 2304 did_changed = 0;
2e0fef85 2305 else
dea3101e 2306 did_changed = 1;
2e0fef85
JS
2307
2308 vport->fc_prevDID = vport->fc_myDID;
2309 vport->num_disc_nodes = 0;
dea3101e 2310
2311 /* Start Discovery state <hba_state> */
2312 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
92d7f7b0 2313 "%d (%d):0202 Start Discovery hba state x%x "
dea3101e 2314 "Data: x%x x%x x%x\n",
92d7f7b0
JS
2315 phba->brd_no, vport->vpi, vport->port_state,
2316 vport->fc_flag, vport->fc_plogi_cnt,
2317 vport->fc_adisc_cnt);
dea3101e 2318
2319 /* First do ADISCs - if any */
2e0fef85 2320 num_sent = lpfc_els_disc_adisc(vport);
dea3101e 2321
2322 if (num_sent)
2323 return;
2324
92d7f7b0
JS
2325 /*
2326 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2327 * continue discovery.
2328 */
2329 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2330 !(vport->fc_flag & FC_RSCN_MODE)) {
2331 lpfc_issue_reg_vpi(phba, vport);
2332 return;
2333 }
2334
2335 /*
2336 * For SLI2, we need to set port_state to READY and continue
2337 * discovery.
2338 */
2e0fef85 2339 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
dea3101e 2340 /* If we get here, there is nothing to ADISC */
92d7f7b0 2341 if (vport->port_type == LPFC_PHYSICAL_PORT)
2e0fef85 2342 lpfc_issue_clear_la(phba, vport);
2e0fef85 2343
92d7f7b0 2344 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2e0fef85
JS
2345 vport->num_disc_nodes = 0;
2346 /* go thru NPR nodes and issue ELS PLOGIs */
2347 if (vport->fc_npr_cnt)
2348 lpfc_els_disc_plogi(vport);
2349
2350 if (!vport->num_disc_nodes) {
2351 spin_lock_irq(shost->host_lock);
2352 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2353 spin_unlock_irq(shost->host_lock);
92d7f7b0 2354 lpfc_can_disctmo(vport);
dea3101e 2355 }
2356 }
92d7f7b0 2357 vport->port_state = LPFC_VPORT_READY;
dea3101e 2358 } else {
2359 /* Next do PLOGIs - if any */
2e0fef85 2360 num_sent = lpfc_els_disc_plogi(vport);
dea3101e 2361
2362 if (num_sent)
2363 return;
2364
2e0fef85 2365 if (vport->fc_flag & FC_RSCN_MODE) {
dea3101e 2366 /* Check to see if more RSCNs came in while we
2367 * were processing this one.
2368 */
2e0fef85
JS
2369 if ((vport->fc_rscn_id_cnt == 0) &&
2370 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2371 spin_lock_irq(shost->host_lock);
2372 vport->fc_flag &= ~FC_RSCN_MODE;
2373 spin_unlock_irq(shost->host_lock);
92d7f7b0 2374 lpfc_can_disctmo(vport);
2fe165b6 2375 } else
2e0fef85 2376 lpfc_els_handle_rscn(vport);
dea3101e 2377 }
2378 }
2379 return;
2380}
2381
2382/*
2383 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2384 * ring the match the sppecified nodelist.
2385 */
2386static void
2e0fef85 2387lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 2388{
2534ba75 2389 LIST_HEAD(completions);
dea3101e 2390 struct lpfc_sli *psli;
2391 IOCB_t *icmd;
2392 struct lpfc_iocbq *iocb, *next_iocb;
2393 struct lpfc_sli_ring *pring;
dea3101e 2394
2395 psli = &phba->sli;
2396 pring = &psli->ring[LPFC_ELS_RING];
2397
2398 /* Error matching iocb on txq or txcmplq
2399 * First check the txq.
2400 */
2e0fef85 2401 spin_lock_irq(&phba->hbalock);
dea3101e 2402 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2403 if (iocb->context1 != ndlp) {
2404 continue;
2405 }
2406 icmd = &iocb->iocb;
2407 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2408 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2409
2534ba75 2410 list_move_tail(&iocb->list, &completions);
dea3101e 2411 pring->txq_cnt--;
dea3101e 2412 }
2413 }
2414
2415 /* Next check the txcmplq */
2416 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2417 if (iocb->context1 != ndlp) {
2418 continue;
2419 }
2420 icmd = &iocb->iocb;
2e0fef85
JS
2421 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2422 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2534ba75
JS
2423 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2424 }
2425 }
2e0fef85 2426 spin_unlock_irq(&phba->hbalock);
dea3101e 2427
2534ba75
JS
2428 while (!list_empty(&completions)) {
2429 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
92d7f7b0 2430 list_del_init(&iocb->list);
dea3101e 2431
2e0fef85
JS
2432 if (!iocb->iocb_cmpl)
2433 lpfc_sli_release_iocbq(phba, iocb);
2434 else {
2534ba75
JS
2435 icmd = &iocb->iocb;
2436 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2437 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2438 (iocb->iocb_cmpl) (phba, iocb, iocb);
2e0fef85 2439 }
dea3101e 2440 }
dea3101e 2441}
2442
2443void
2e0fef85 2444lpfc_disc_flush_list(struct lpfc_vport *vport)
dea3101e 2445{
2446 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 2447 struct lpfc_hba *phba = vport->phba;
dea3101e 2448
2e0fef85
JS
2449 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2450 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
685f0bf7
JS
2451 nlp_listp) {
2452 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2453 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2454 lpfc_free_tx(phba, ndlp);
2455 lpfc_nlp_put(ndlp);
2456 }
dea3101e 2457 }
2458 }
dea3101e 2459}
2460
92d7f7b0
JS
2461void
2462lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2463{
2464 lpfc_els_flush_rscn(vport);
2465 lpfc_els_flush_cmd(vport);
2466 lpfc_disc_flush_list(vport);
2467}
2468
dea3101e 2469/*****************************************************************************/
2470/*
2471 * NAME: lpfc_disc_timeout
2472 *
2473 * FUNCTION: Fibre Channel driver discovery timeout routine.
2474 *
2475 * EXECUTION ENVIRONMENT: interrupt only
2476 *
2477 * CALLED FROM:
2478 * Timer function
2479 *
2480 * RETURNS:
2481 * none
2482 */
2483/*****************************************************************************/
2484void
2485lpfc_disc_timeout(unsigned long ptr)
2486{
2e0fef85
JS
2487 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2488 struct lpfc_hba *phba = vport->phba;
dea3101e 2489 unsigned long flags = 0;
2490
2491 if (unlikely(!phba))
2492 return;
2493
2e0fef85
JS
2494 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2495 spin_lock_irqsave(&vport->work_port_lock, flags);
2496 vport->work_port_events |= WORKER_DISC_TMO;
2497 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2498
92d7f7b0 2499 spin_lock_irqsave(&phba->hbalock, flags);
dea3101e 2500 if (phba->work_wait)
92d7f7b0
JS
2501 lpfc_worker_wake_up(phba);
2502 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 2503 }
dea3101e 2504 return;
2505}
2506
2507static void
2e0fef85 2508lpfc_disc_timeout_handler(struct lpfc_vport *vport)
dea3101e 2509{
2e0fef85
JS
2510 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2511 struct lpfc_hba *phba = vport->phba;
2512 struct lpfc_sli *psli = &phba->sli;
c9f8735b 2513 struct lpfc_nodelist *ndlp, *next_ndlp;
92d7f7b0 2514 LPFC_MBOXQ_t *initlinkmbox;
dea3101e 2515 int rc, clrlaerr = 0;
2516
2e0fef85 2517 if (!(vport->fc_flag & FC_DISC_TMO))
dea3101e 2518 return;
2519
2e0fef85
JS
2520 spin_lock_irq(shost->host_lock);
2521 vport->fc_flag &= ~FC_DISC_TMO;
2522 spin_unlock_irq(shost->host_lock);
dea3101e 2523
858c9f6c
JS
2524 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2525 "disc timeout: state:x%x rtry:x%x flg:x%x",
2526 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2527
2e0fef85 2528 switch (vport->port_state) {
dea3101e 2529
2530 case LPFC_LOCAL_CFG_LINK:
2e0fef85
JS
2531 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2532 * FAN
2533 */
2534 /* FAN timeout */
ed957684 2535 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
92d7f7b0
JS
2536 "%d (%d):0221 FAN timeout\n",
2537 phba->brd_no, vport->vpi);
dea3101e 2538
c9f8735b 2539 /* Start discovery by sending FLOGI, clean up old rpis */
2e0fef85 2540 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
685f0bf7
JS
2541 nlp_listp) {
2542 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2543 continue;
c9f8735b
JW
2544 if (ndlp->nlp_type & NLP_FABRIC) {
2545 /* Clean up the ndlp on Fabric connections */
2e0fef85 2546 lpfc_drop_node(vport, ndlp);
2fe165b6 2547 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
c9f8735b
JW
2548 /* Fail outstanding IO now since device
2549 * is marked for PLOGI.
2550 */
2e0fef85 2551 lpfc_unreg_rpi(vport, ndlp);
c9f8735b
JW
2552 }
2553 }
92d7f7b0
JS
2554 if (vport->port_state != LPFC_FLOGI) {
2555 vport->port_state = LPFC_FLOGI;
2556 lpfc_set_disctmo(vport);
2557 lpfc_initial_flogi(vport);
2558 }
dea3101e 2559 break;
2560
92d7f7b0 2561 case LPFC_FDISC:
dea3101e 2562 case LPFC_FLOGI:
2e0fef85 2563 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
dea3101e 2564 /* Initial FLOGI timeout */
ed957684 2565 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0
JS
2566 "%d (%d):0222 Initial %s timeout\n",
2567 phba->brd_no, vport->vpi,
2568 vport->vpi ? "FLOGI" : "FDISC");
dea3101e 2569
2570 /* Assume no Fabric and go on with discovery.
2571 * Check for outstanding ELS FLOGI to abort.
2572 */
2573
2574 /* FLOGI failed, so just use loop map to make discovery list */
2e0fef85 2575 lpfc_disc_list_loopmap(vport);
dea3101e 2576
2577 /* Start discovery */
2e0fef85 2578 lpfc_disc_start(vport);
dea3101e 2579 break;
2580
2581 case LPFC_FABRIC_CFG_LINK:
2582 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2583 NameServer login */
2584 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0
JS
2585 "%d (%d):0223 Timeout while waiting for "
2586 "NameServer login\n",
2587 phba->brd_no, vport->vpi);
dea3101e 2588
2589 /* Next look for NameServer ndlp */
2e0fef85 2590 ndlp = lpfc_findnode_did(vport, NameServer_DID);
dea3101e 2591 if (ndlp)
329f9bc7 2592 lpfc_nlp_put(ndlp);
dea3101e 2593 /* Start discovery */
2e0fef85 2594 lpfc_disc_start(vport);
dea3101e 2595 break;
2596
2597 case LPFC_NS_QRY:
2598 /* Check for wait for NameServer Rsp timeout */
2599 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0 2600 "%d (%d):0224 NameServer Query timeout "
dea3101e 2601 "Data: x%x x%x\n",
92d7f7b0 2602 phba->brd_no, vport->vpi,
2e0fef85 2603 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
dea3101e 2604
92d7f7b0
JS
2605 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2606 /* Try it one more time */
2607 vport->fc_ns_retry++;
2608 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2609 vport->fc_ns_retry, 0);
2610 if (rc == 0)
2611 break;
dea3101e 2612 }
92d7f7b0 2613 vport->fc_ns_retry = 0;
dea3101e 2614
92d7f7b0
JS
2615 /*
2616 * Discovery is over.
2617 * set port_state to PORT_READY if SLI2.
2618 * cmpl_reg_vpi will set port_state to READY for SLI3.
2619 */
2620 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2621 lpfc_issue_reg_vpi(phba, vport);
2622 else { /* NPIV Not enabled */
2623 lpfc_issue_clear_la(phba, vport);
2624 vport->port_state = LPFC_VPORT_READY;
dea3101e 2625 }
2626
2627 /* Setup and issue mailbox INITIALIZE LINK command */
2628 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2629 if (!initlinkmbox) {
2630 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0 2631 "%d (%d):0206 Device Discovery "
dea3101e 2632 "completion error\n",
92d7f7b0 2633 phba->brd_no, vport->vpi);
2e0fef85 2634 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2635 break;
2636 }
2637
2638 lpfc_linkdown(phba);
2639 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2640 phba->cfg_link_speed);
2641 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
ed957684 2642 initlinkmbox->vport = vport;
92d7f7b0 2643 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
dea3101e 2644 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2645 (MBX_NOWAIT | MBX_STOP_IOCB));
5b8bd0c9 2646 lpfc_set_loopback_flag(phba);
dea3101e 2647 if (rc == MBX_NOT_FINISHED)
2648 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2649
2650 break;
2651
2652 case LPFC_DISC_AUTH:
2653 /* Node Authentication timeout */
ed957684 2654 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0
JS
2655 "%d (%d):0227 Node Authentication timeout\n",
2656 phba->brd_no, vport->vpi);
2e0fef85
JS
2657 lpfc_disc_flush_list(vport);
2658
92d7f7b0
JS
2659 /*
2660 * set port_state to PORT_READY if SLI2.
2661 * cmpl_reg_vpi will set port_state to READY for SLI3.
2662 */
2663 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2664 lpfc_issue_reg_vpi(phba, vport);
2665 else { /* NPIV Not enabled */
2666 lpfc_issue_clear_la(phba, vport);
2667 vport->port_state = LPFC_VPORT_READY;
dea3101e 2668 }
2669 break;
2670
2e0fef85
JS
2671 case LPFC_VPORT_READY:
2672 if (vport->fc_flag & FC_RSCN_MODE) {
ed957684 2673 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0
JS
2674 "%d (%d):0231 RSCN timeout Data: x%x "
2675 "x%x\n",
2676 phba->brd_no, vport->vpi,
2e0fef85 2677 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
dea3101e 2678
2679 /* Cleanup any outstanding ELS commands */
2e0fef85 2680 lpfc_els_flush_cmd(vport);
dea3101e 2681
2e0fef85
JS
2682 lpfc_els_flush_rscn(vport);
2683 lpfc_disc_flush_list(vport);
dea3101e 2684 }
2685 break;
2e0fef85 2686
92d7f7b0 2687 default:
ed957684 2688 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0
JS
2689 "%d (%d):0229 Unexpected discovery timeout, "
2690 "vport State x%x\n",
2691 phba->brd_no, vport->vpi, vport->port_state);
2e0fef85
JS
2692
2693 break;
2694 }
2695
2696 switch (phba->link_state) {
2697 case LPFC_CLEAR_LA:
92d7f7b0 2698 /* CLEAR LA timeout */
ed957684 2699 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0
JS
2700 "%d (%d):0228 CLEAR LA timeout\n",
2701 phba->brd_no, vport->vpi);
2e0fef85
JS
2702 clrlaerr = 1;
2703 break;
2704
2705 case LPFC_LINK_UNKNOWN:
2706 case LPFC_WARM_START:
2707 case LPFC_INIT_START:
2708 case LPFC_INIT_MBX_CMDS:
2709 case LPFC_LINK_DOWN:
2710 case LPFC_LINK_UP:
2711 case LPFC_HBA_ERROR:
ed957684 2712 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92d7f7b0 2713 "%d (%d):0230 Unexpected timeout, hba link "
2e0fef85 2714 "state x%x\n",
92d7f7b0 2715 phba->brd_no, vport->vpi, phba->link_state);
2e0fef85
JS
2716 clrlaerr = 1;
2717 break;
92d7f7b0
JS
2718
2719 case LPFC_HBA_READY:
2720 break;
dea3101e 2721 }
2722
2723 if (clrlaerr) {
2e0fef85 2724 lpfc_disc_flush_list(vport);
a4bc3379 2725 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
dea3101e 2726 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2727 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2e0fef85 2728 vport->port_state = LPFC_VPORT_READY;
dea3101e 2729 }
2730
2731 return;
2732}
2733
dea3101e 2734/*
2735 * This routine handles processing a NameServer REG_LOGIN mailbox
2736 * command upon completion. It is setup in the LPFC_MBOXQ
2737 * as the completion routine when the command is
2738 * handed off to the SLI layer.
2739 */
2740void
2e0fef85 2741lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2742{
2e0fef85
JS
2743 MAILBOX_t *mb = &pmb->mb;
2744 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2745 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2746 struct lpfc_vport *vport = pmb->vport;
dea3101e 2747
2748 pmb->context1 = NULL;
2749
dea3101e 2750 ndlp->nlp_rpi = mb->un.varWords[0];
dea3101e 2751 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 2752 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 2753
2e0fef85
JS
2754 /*
2755 * Start issuing Fabric-Device Management Interface (FDMI) command to
2756 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2757 * fdmi-on=2 (supporting RPA/hostnmae)
dea3101e 2758 */
2e0fef85
JS
2759
2760 if (phba->cfg_fdmi_on == 1)
2761 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2762 else
2763 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
dea3101e 2764
329f9bc7
JS
2765 /* Mailbox took a reference to the node */
2766 lpfc_nlp_put(ndlp);
dea3101e 2767 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2768 kfree(mp);
329f9bc7 2769 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2770
2771 return;
2772}
2773
685f0bf7
JS
2774static int
2775lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2776{
2777 uint16_t *rpi = param;
2778
2779 return ndlp->nlp_rpi == *rpi;
2780}
2781
2782static int
2783lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2784{
2785 return memcmp(&ndlp->nlp_portname, param,
2786 sizeof(ndlp->nlp_portname)) == 0;
2787}
2788
dea3101e 2789struct lpfc_nodelist *
2e0fef85 2790__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
dea3101e 2791{
21568f53 2792 struct lpfc_nodelist *ndlp;
dea3101e 2793
2e0fef85 2794 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
685f0bf7
JS
2795 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2796 filter(ndlp, param))
2797 return ndlp;
2798 }
21568f53 2799 return NULL;
dea3101e 2800}
2801
685f0bf7
JS
2802/*
2803 * Search node lists for a remote port matching filter criteria
92d7f7b0 2804 * Caller needs to hold host_lock before calling this routine.
685f0bf7
JS
2805 */
2806struct lpfc_nodelist *
2e0fef85 2807lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
685f0bf7 2808{
2e0fef85 2809 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
685f0bf7
JS
2810 struct lpfc_nodelist *ndlp;
2811
2e0fef85
JS
2812 spin_lock_irq(shost->host_lock);
2813 ndlp = __lpfc_find_node(vport, filter, param);
2814 spin_unlock_irq(shost->host_lock);
685f0bf7
JS
2815 return ndlp;
2816}
2817
2818/*
2819 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2e0fef85 2820 * returns the node list element pointer else return NULL.
685f0bf7
JS
2821 */
2822struct lpfc_nodelist *
2e0fef85 2823__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
685f0bf7 2824{
2e0fef85 2825 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
685f0bf7
JS
2826}
2827
2534ba75 2828struct lpfc_nodelist *
2e0fef85 2829lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2534ba75 2830{
2e0fef85 2831 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2534ba75
JS
2832 struct lpfc_nodelist *ndlp;
2833
2e0fef85
JS
2834 spin_lock_irq(shost->host_lock);
2835 ndlp = __lpfc_findnode_rpi(vport, rpi);
2836 spin_unlock_irq(shost->host_lock);
2534ba75
JS
2837 return ndlp;
2838}
2839
488d1469 2840/*
685f0bf7 2841 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2e0fef85 2842 * returns the node element list pointer else return NULL.
488d1469
JS
2843 */
2844struct lpfc_nodelist *
2e0fef85 2845lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
488d1469 2846{
2e0fef85 2847 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
488d1469 2848 struct lpfc_nodelist *ndlp;
488d1469 2849
2e0fef85
JS
2850 spin_lock_irq(shost->host_lock);
2851 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2852 spin_unlock_irq(shost->host_lock);
858c9f6c 2853 return ndlp;
488d1469
JS
2854}
2855
92d7f7b0
JS
2856void
2857lpfc_dev_loss_delay(unsigned long ptr)
2858{
2859 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2860 struct lpfc_vport *vport = ndlp->vport;
2861 struct lpfc_hba *phba = vport->phba;
2862 struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
2863 unsigned long flags;
2864
2865 evtp = &ndlp->dev_loss_evt;
2866
2867 spin_lock_irqsave(&phba->hbalock, flags);
2868 if (!list_empty(&evtp->evt_listp)) {
2869 spin_unlock_irqrestore(&phba->hbalock, flags);
2870 return;
2871 }
2872
2873 evtp->evt_arg1 = ndlp;
858c9f6c 2874 evtp->evt = LPFC_EVT_DEV_LOSS_DELAY;
92d7f7b0
JS
2875 list_add_tail(&evtp->evt_listp, &phba->work_list);
2876 if (phba->work_wait)
2877 lpfc_worker_wake_up(phba);
2878 spin_unlock_irqrestore(&phba->hbalock, flags);
2879 return;
2880}
2881
dea3101e 2882void
2e0fef85
JS
2883lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2884 uint32_t did)
dea3101e 2885{
2886 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
dea3101e 2887 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
92d7f7b0 2888 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
dea3101e 2889 init_timer(&ndlp->nlp_delayfunc);
2890 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2891 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2892 ndlp->nlp_DID = did;
2e0fef85 2893 ndlp->vport = vport;
dea3101e 2894 ndlp->nlp_sid = NLP_NO_SID;
685f0bf7 2895 INIT_LIST_HEAD(&ndlp->nlp_listp);
329f9bc7 2896 kref_init(&ndlp->kref);
858c9f6c
JS
2897
2898 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2899 "node init: did:x%x",
2900 ndlp->nlp_DID, 0, 0);
2901
dea3101e 2902 return;
2903}
329f9bc7
JS
2904
2905void
2906lpfc_nlp_release(struct kref *kref)
2907{
2908 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2909 kref);
858c9f6c
JS
2910
2911 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2912 "node release: did:x%x flg:x%x type:x%x",
2913 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2914
2e0fef85
JS
2915 lpfc_nlp_remove(ndlp->vport, ndlp);
2916 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
329f9bc7
JS
2917}
2918
2919struct lpfc_nodelist *
2920lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2921{
2922 if (ndlp)
2923 kref_get(&ndlp->kref);
2924 return ndlp;
2925}
2926
2927int
2928lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2929{
2930 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2931}