[SCSI] lpfc 8.2.8 : Update driver for new SLI-3 features
[linux-block.git] / drivers / scsi / lpfc / lpfc_hbadisc.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
e47c9093 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/kthread.h>
25#include <linux/interrupt.h>
26
91886523 27#include <scsi/scsi.h>
dea3101e 28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
32#include "lpfc_hw.h"
33#include "lpfc_disc.h"
34#include "lpfc_sli.h"
35#include "lpfc_scsi.h"
36#include "lpfc.h"
37#include "lpfc_logmsg.h"
38#include "lpfc_crtn.h"
92d7f7b0 39#include "lpfc_vport.h"
858c9f6c 40#include "lpfc_debugfs.h"
dea3101e 41
42/* AlpaArray for assignment of scsid for scan-down and bind_method */
43static uint8_t lpfcAlpaArray[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57};
58
2e0fef85 59static void lpfc_disc_timeout_handler(struct lpfc_vport *);
a6ababd2 60static void lpfc_disc_flush_list(struct lpfc_vport *vport);
dea3101e 61
c01f3208
JS
62void
63lpfc_terminate_rport_io(struct fc_rport *rport)
dea3101e 64{
c01f3208
JS
65 struct lpfc_rport_data *rdata;
66 struct lpfc_nodelist * ndlp;
67 struct lpfc_hba *phba;
dea3101e 68
c01f3208
JS
69 rdata = rport->dd_data;
70 ndlp = rdata->pnode;
71
58da1ffb 72 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
c01f3208
JS
73 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
74 printk(KERN_ERR "Cannot find remote node"
75 " to terminate I/O Data x%x\n",
76 rport->port_id);
dea3101e 77 return;
78 }
79
2e0fef85 80 phba = ndlp->vport->phba;
c01f3208 81
858c9f6c
JS
82 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
83 "rport terminate: sid:x%x did:x%x flg:x%x",
84 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
85
c01f3208 86 if (ndlp->nlp_sid != NLP_NO_SID) {
51ef4c26
JS
87 lpfc_sli_abort_iocb(ndlp->vport,
88 &phba->sli.ring[phba->sli.fcp_ring],
89 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
c01f3208 90 }
c01f3208
JS
91}
92
93/*
94 * This function will be called when dev_loss_tmo fire.
95 */
96void
97lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
98{
99 struct lpfc_rport_data *rdata;
100 struct lpfc_nodelist * ndlp;
2e0fef85 101 struct lpfc_vport *vport;
858c9f6c 102 struct lpfc_hba *phba;
858c9f6c 103 struct lpfc_work_evt *evtp;
a8adb832
JS
104 int put_node;
105 int put_rport;
c01f3208
JS
106
107 rdata = rport->dd_data;
108 ndlp = rdata->pnode;
58da1ffb 109 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
c01f3208 110 return;
c01f3208 111
858c9f6c
JS
112 vport = ndlp->vport;
113 phba = vport->phba;
114
115 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
116 "rport devlosscb: sid:x%x did:x%x flg:x%x",
117 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
118
a8adb832
JS
119 /* Don't defer this if we are in the process of deleting the vport
120 * or unloading the driver. The unload will cleanup the node
121 * appropriately we just need to cleanup the ndlp rport info here.
122 */
123 if (vport->load_flag & FC_UNLOADING) {
124 put_node = rdata->pnode != NULL;
125 put_rport = ndlp->rport != NULL;
126 rdata->pnode = NULL;
127 ndlp->rport = NULL;
128 if (put_node)
129 lpfc_nlp_put(ndlp);
130 if (put_rport)
131 put_device(&rport->dev);
132 return;
133 }
134
135 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
136 return;
137
858c9f6c
JS
138 evtp = &ndlp->dev_loss_evt;
139
140 if (!list_empty(&evtp->evt_listp))
141 return;
142
143 spin_lock_irq(&phba->hbalock);
fa4066b6
JS
144 /* We need to hold the node by incrementing the reference
145 * count until this queued work is done
146 */
147 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
5e9d9b82
JS
148 if (evtp->evt_arg1) {
149 evtp->evt = LPFC_EVT_DEV_LOSS;
150 list_add_tail(&evtp->evt_listp, &phba->work_list);
151 lpfc_worker_wake_up(phba);
152 }
858c9f6c
JS
153 spin_unlock_irq(&phba->hbalock);
154
858c9f6c
JS
155 return;
156}
157
158/*
159 * This function is called from the worker thread when dev_loss_tmo
160 * expire.
161 */
a6ababd2 162static void
858c9f6c
JS
163lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
164{
165 struct lpfc_rport_data *rdata;
166 struct fc_rport *rport;
167 struct lpfc_vport *vport;
168 struct lpfc_hba *phba;
169 uint8_t *name;
87af33fe
JS
170 int put_node;
171 int put_rport;
858c9f6c
JS
172 int warn_on = 0;
173
174 rport = ndlp->rport;
175
176 if (!rport)
177 return;
178
179 rdata = rport->dd_data;
180 name = (uint8_t *) &ndlp->nlp_portname;
181 vport = ndlp->vport;
182 phba = vport->phba;
183
184 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
185 "rport devlosstmo:did:x%x type:x%x id:x%x",
186 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
187
a8adb832
JS
188 /* Don't defer this if we are in the process of deleting the vport
189 * or unloading the driver. The unload will cleanup the node
190 * appropriately we just need to cleanup the ndlp rport info here.
191 */
192 if (vport->load_flag & FC_UNLOADING) {
09372820
JS
193 if (ndlp->nlp_sid != NLP_NO_SID) {
194 /* flush the target */
195 lpfc_sli_abort_iocb(vport,
196 &phba->sli.ring[phba->sli.fcp_ring],
197 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
198 }
a8adb832
JS
199 put_node = rdata->pnode != NULL;
200 put_rport = ndlp->rport != NULL;
201 rdata->pnode = NULL;
202 ndlp->rport = NULL;
203 if (put_node)
204 lpfc_nlp_put(ndlp);
205 if (put_rport)
206 put_device(&rport->dev);
207 return;
208 }
209
210 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
858c9f6c
JS
211 return;
212
92d7f7b0
JS
213 if (ndlp->nlp_type & NLP_FABRIC) {
214 /* We will clean up these Nodes in linkup */
215 put_node = rdata->pnode != NULL;
216 put_rport = ndlp->rport != NULL;
217 rdata->pnode = NULL;
218 ndlp->rport = NULL;
219 if (put_node)
220 lpfc_nlp_put(ndlp);
221 if (put_rport)
222 put_device(&rport->dev);
82085718 223 return;
92d7f7b0 224 }
82085718 225
dea3101e 226 if (ndlp->nlp_sid != NLP_NO_SID) {
6e8215e4 227 warn_on = 1;
dea3101e 228 /* flush the target */
51ef4c26
JS
229 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
230 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
dea3101e 231 }
2e0fef85 232 if (vport->load_flag & FC_UNLOADING)
c01f3208
JS
233 warn_on = 0;
234
6e8215e4 235 if (warn_on) {
e8b62011
JS
236 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
237 "0203 Devloss timeout on "
58da1ffb
JS
238 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
239 "NPort x%06x Data: x%x x%x x%x\n",
e8b62011
JS
240 *name, *(name+1), *(name+2), *(name+3),
241 *(name+4), *(name+5), *(name+6), *(name+7),
242 ndlp->nlp_DID, ndlp->nlp_flag,
243 ndlp->nlp_state, ndlp->nlp_rpi);
6e8215e4 244 } else {
e8b62011
JS
245 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
246 "0204 Devloss timeout on "
58da1ffb
JS
247 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
248 "NPort x%06x Data: x%x x%x x%x\n",
e8b62011
JS
249 *name, *(name+1), *(name+2), *(name+3),
250 *(name+4), *(name+5), *(name+6), *(name+7),
251 ndlp->nlp_DID, ndlp->nlp_flag,
252 ndlp->nlp_state, ndlp->nlp_rpi);
6e8215e4
JSEC
253 }
254
87af33fe
JS
255 put_node = rdata->pnode != NULL;
256 put_rport = ndlp->rport != NULL;
257 rdata->pnode = NULL;
258 ndlp->rport = NULL;
259 if (put_node)
260 lpfc_nlp_put(ndlp);
261 if (put_rport)
262 put_device(&rport->dev);
263
2e0fef85 264 if (!(vport->load_flag & FC_UNLOADING) &&
1dcb58e5 265 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
82085718 266 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
e47c9093 267 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
2e0fef85 268 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
92d7f7b0 269}
c01f3208 270
dea3101e 271static void
2e0fef85 272lpfc_work_list_done(struct lpfc_hba *phba)
dea3101e 273{
274 struct lpfc_work_evt *evtp = NULL;
275 struct lpfc_nodelist *ndlp;
276 int free_evt;
277
2e0fef85
JS
278 spin_lock_irq(&phba->hbalock);
279 while (!list_empty(&phba->work_list)) {
dea3101e 280 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
281 evt_listp);
2e0fef85 282 spin_unlock_irq(&phba->hbalock);
dea3101e 283 free_evt = 1;
2fe165b6 284 switch (evtp->evt) {
dea3101e 285 case LPFC_EVT_ELS_RETRY:
2e0fef85 286 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
dea3101e 287 lpfc_els_retry_delay_handler(ndlp);
92d7f7b0 288 free_evt = 0; /* evt is part of ndlp */
fa4066b6
JS
289 /* decrement the node reference count held
290 * for this queued work
291 */
292 lpfc_nlp_put(ndlp);
dea3101e 293 break;
858c9f6c
JS
294 case LPFC_EVT_DEV_LOSS:
295 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
858c9f6c
JS
296 lpfc_dev_loss_tmo_handler(ndlp);
297 free_evt = 0;
fa4066b6
JS
298 /* decrement the node reference count held for
299 * this queued work
300 */
858c9f6c
JS
301 lpfc_nlp_put(ndlp);
302 break;
dea3101e 303 case LPFC_EVT_ONLINE:
2e0fef85
JS
304 if (phba->link_state < LPFC_LINK_DOWN)
305 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
41415862 306 else
2e0fef85 307 *(int *) (evtp->evt_arg1) = 0;
dea3101e 308 complete((struct completion *)(evtp->evt_arg2));
309 break;
46fa311e 310 case LPFC_EVT_OFFLINE_PREP:
2e0fef85 311 if (phba->link_state >= LPFC_LINK_DOWN)
46fa311e
JS
312 lpfc_offline_prep(phba);
313 *(int *)(evtp->evt_arg1) = 0;
314 complete((struct completion *)(evtp->evt_arg2));
315 break;
316 case LPFC_EVT_OFFLINE:
317 lpfc_offline(phba);
41415862
JW
318 lpfc_sli_brdrestart(phba);
319 *(int *)(evtp->evt_arg1) =
46fa311e
JS
320 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
321 lpfc_unblock_mgmt_io(phba);
41415862
JW
322 complete((struct completion *)(evtp->evt_arg2));
323 break;
324 case LPFC_EVT_WARM_START:
46fa311e 325 lpfc_offline(phba);
9290831f 326 lpfc_reset_barrier(phba);
41415862
JW
327 lpfc_sli_brdreset(phba);
328 lpfc_hba_down_post(phba);
329 *(int *)(evtp->evt_arg1) =
330 lpfc_sli_brdready(phba, HS_MBRDY);
46fa311e 331 lpfc_unblock_mgmt_io(phba);
41415862
JW
332 complete((struct completion *)(evtp->evt_arg2));
333 break;
334 case LPFC_EVT_KILL:
46fa311e 335 lpfc_offline(phba);
9290831f 336 *(int *)(evtp->evt_arg1)
2e0fef85
JS
337 = (phba->pport->stopped)
338 ? 0 : lpfc_sli_brdkill(phba);
46fa311e 339 lpfc_unblock_mgmt_io(phba);
dea3101e 340 complete((struct completion *)(evtp->evt_arg2));
341 break;
342 }
343 if (free_evt)
344 kfree(evtp);
2e0fef85 345 spin_lock_irq(&phba->hbalock);
dea3101e 346 }
2e0fef85 347 spin_unlock_irq(&phba->hbalock);
dea3101e 348
349}
350
311464ec 351static void
2e0fef85 352lpfc_work_done(struct lpfc_hba *phba)
dea3101e 353{
354 struct lpfc_sli_ring *pring;
858c9f6c 355 uint32_t ha_copy, status, control, work_port_events;
549e55cd 356 struct lpfc_vport **vports;
51ef4c26 357 struct lpfc_vport *vport;
549e55cd 358 int i;
dea3101e 359
2e0fef85 360 spin_lock_irq(&phba->hbalock);
dea3101e 361 ha_copy = phba->work_ha;
362 phba->work_ha = 0;
2e0fef85 363 spin_unlock_irq(&phba->hbalock);
dea3101e 364
2fe165b6 365 if (ha_copy & HA_ERATT)
dea3101e 366 lpfc_handle_eratt(phba);
367
2fe165b6 368 if (ha_copy & HA_MBATT)
dea3101e 369 lpfc_sli_handle_mb_event(phba);
370
2fe165b6 371 if (ha_copy & HA_LATT)
dea3101e 372 lpfc_handle_latt(phba);
549e55cd
JS
373 vports = lpfc_create_vport_work_array(phba);
374 if (vports != NULL)
09372820 375 for(i = 0; i <= phba->max_vpi; i++) {
51ef4c26
JS
376 /*
377 * We could have no vports in array if unloading, so if
378 * this happens then just use the pport
379 */
380 if (vports[i] == NULL && i == 0)
381 vport = phba->pport;
382 else
383 vport = vports[i];
384 if (vport == NULL)
385 break;
58da1ffb 386 spin_lock_irq(&vport->work_port_lock);
51ef4c26 387 work_port_events = vport->work_port_events;
58da1ffb
JS
388 vport->work_port_events &= ~work_port_events;
389 spin_unlock_irq(&vport->work_port_lock);
549e55cd 390 if (work_port_events & WORKER_DISC_TMO)
51ef4c26 391 lpfc_disc_timeout_handler(vport);
549e55cd 392 if (work_port_events & WORKER_ELS_TMO)
51ef4c26 393 lpfc_els_timeout_handler(vport);
549e55cd
JS
394 if (work_port_events & WORKER_HB_TMO)
395 lpfc_hb_timeout_handler(phba);
396 if (work_port_events & WORKER_MBOX_TMO)
397 lpfc_mbox_timeout_handler(phba);
398 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
399 lpfc_unblock_fabric_iocbs(phba);
400 if (work_port_events & WORKER_FDMI_TMO)
51ef4c26 401 lpfc_fdmi_timeout_handler(vport);
549e55cd
JS
402 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
403 lpfc_ramp_down_queue_handler(phba);
404 if (work_port_events & WORKER_RAMP_UP_QUEUE)
405 lpfc_ramp_up_queue_handler(phba);
92d7f7b0 406 }
09372820 407 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 408
858c9f6c
JS
409 pring = &phba->sli.ring[LPFC_ELS_RING];
410 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
411 status >>= (4*LPFC_ELS_RING);
412 if ((status & HA_RXMASK)
413 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
0b727fea 414 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
858c9f6c 415 pring->flag |= LPFC_DEFERRED_RING_EVENT;
5e9d9b82
JS
416 /* Set the lpfc data pending flag */
417 set_bit(LPFC_DATA_READY, &phba->data_flags);
858c9f6c 418 } else {
58da1ffb 419 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
858c9f6c
JS
420 lpfc_sli_handle_slow_ring_event(phba, pring,
421 (status &
422 HA_RXMASK));
858c9f6c
JS
423 }
424 /*
425 * Turn on Ring interrupts
426 */
427 spin_lock_irq(&phba->hbalock);
428 control = readl(phba->HCregaddr);
429 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
a58cbd52
JS
430 lpfc_debugfs_slow_ring_trc(phba,
431 "WRK Enable ring: cntl:x%x hacopy:x%x",
432 control, ha_copy, 0);
433
858c9f6c 434 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
dea3101e 435 writel(control, phba->HCregaddr);
436 readl(phba->HCregaddr); /* flush */
dea3101e 437 }
a58cbd52
JS
438 else {
439 lpfc_debugfs_slow_ring_trc(phba,
440 "WRK Ring ok: cntl:x%x hacopy:x%x",
441 control, ha_copy, 0);
442 }
858c9f6c 443 spin_unlock_irq(&phba->hbalock);
dea3101e 444 }
2e0fef85 445 lpfc_work_list_done(phba);
dea3101e 446}
447
dea3101e 448int
449lpfc_do_work(void *p)
450{
451 struct lpfc_hba *phba = p;
452 int rc;
dea3101e 453
454 set_user_nice(current, -20);
5e9d9b82 455 phba->data_flags = 0;
dea3101e 456
457 while (1) {
5e9d9b82
JS
458 /* wait and check worker queue activities */
459 rc = wait_event_interruptible(phba->work_waitq,
460 (test_and_clear_bit(LPFC_DATA_READY,
461 &phba->data_flags)
462 || kthread_should_stop()));
dea3101e 463 BUG_ON(rc);
464
465 if (kthread_should_stop())
466 break;
467
5e9d9b82 468 /* Attend pending lpfc data processing */
dea3101e 469 lpfc_work_done(phba);
dea3101e 470 }
dea3101e 471 return 0;
472}
473
474/*
475 * This is only called to handle FC worker events. Since this a rare
476 * occurance, we allocate a struct lpfc_work_evt structure here instead of
477 * embedding it in the IOCB.
478 */
479int
2e0fef85 480lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
dea3101e 481 uint32_t evt)
482{
483 struct lpfc_work_evt *evtp;
ed957684 484 unsigned long flags;
dea3101e 485
486 /*
487 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
488 * be queued to worker thread for processing
489 */
92d7f7b0 490 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
dea3101e 491 if (!evtp)
492 return 0;
493
494 evtp->evt_arg1 = arg1;
495 evtp->evt_arg2 = arg2;
496 evtp->evt = evt;
497
ed957684 498 spin_lock_irqsave(&phba->hbalock, flags);
071fbd3d 499 list_add_tail(&evtp->evt_listp, &phba->work_list);
ed957684 500 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 501
5e9d9b82
JS
502 lpfc_worker_wake_up(phba);
503
dea3101e 504 return 1;
505}
506
92d7f7b0
JS
507void
508lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
509{
09372820 510 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
511 struct lpfc_hba *phba = vport->phba;
512 struct lpfc_nodelist *ndlp, *next_ndlp;
513 int rc;
514
515 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
516 if (!NLP_CHK_NODE_ACT(ndlp))
517 continue;
92d7f7b0
JS
518 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
519 continue;
98c9ea5c
JS
520 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
521 ((vport->port_type == LPFC_NPIV_PORT) &&
522 (ndlp->nlp_DID == NameServer_DID)))
92d7f7b0
JS
523 lpfc_unreg_rpi(vport, ndlp);
524
525 /* Leave Fabric nodes alone on link down */
526 if (!remove && ndlp->nlp_type & NLP_FABRIC)
527 continue;
528 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
529 remove
530 ? NLP_EVT_DEVICE_RM
531 : NLP_EVT_DEVICE_RECOVERY);
532 }
533 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
534 lpfc_mbx_unreg_vpi(vport);
09372820 535 spin_lock_irq(shost->host_lock);
92d7f7b0 536 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
09372820 537 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
538 }
539}
540
87af33fe 541void
98c9ea5c 542lpfc_port_link_failure(struct lpfc_vport *vport)
92d7f7b0 543{
92d7f7b0
JS
544 /* Cleanup any outstanding RSCN activity */
545 lpfc_els_flush_rscn(vport);
546
547 /* Cleanup any outstanding ELS commands */
548 lpfc_els_flush_cmd(vport);
549
550 lpfc_cleanup_rpis(vport, 0);
551
92d7f7b0
JS
552 /* Turn off discovery timer if its running */
553 lpfc_can_disctmo(vport);
554}
555
98c9ea5c
JS
556static void
557lpfc_linkdown_port(struct lpfc_vport *vport)
558{
559 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
560
561 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
562
563 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
564 "Link Down: state:x%x rtry:x%x flg:x%x",
565 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
566
567 lpfc_port_link_failure(vport);
568
569}
570
dea3101e 571int
685f0bf7 572lpfc_linkdown(struct lpfc_hba *phba)
dea3101e 573{
2e0fef85
JS
574 struct lpfc_vport *vport = phba->pport;
575 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
549e55cd 576 struct lpfc_vport **vports;
685f0bf7 577 LPFC_MBOXQ_t *mb;
549e55cd 578 int i;
dea3101e 579
3163f725 580 if (phba->link_state == LPFC_LINK_DOWN)
2e0fef85 581 return 0;
2e0fef85 582 spin_lock_irq(&phba->hbalock);
92d7f7b0 583 if (phba->link_state > LPFC_LINK_DOWN) {
2e0fef85 584 phba->link_state = LPFC_LINK_DOWN;
92d7f7b0
JS
585 phba->pport->fc_flag &= ~FC_LBIT;
586 }
2e0fef85 587 spin_unlock_irq(&phba->hbalock);
549e55cd
JS
588 vports = lpfc_create_vport_work_array(phba);
589 if (vports != NULL)
09372820 590 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
549e55cd
JS
591 /* Issue a LINK DOWN event to all nodes */
592 lpfc_linkdown_port(vports[i]);
593 }
09372820 594 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 595 /* Clean up any firmware default rpi's */
2e0fef85
JS
596 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
597 if (mb) {
92d7f7b0 598 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
ed957684 599 mb->vport = vport;
2e0fef85 600 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0b727fea 601 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
dea3101e 602 == MBX_NOT_FINISHED) {
2e0fef85 603 mempool_free(mb, phba->mbox_mem_pool);
dea3101e 604 }
605 }
606
dea3101e 607 /* Setup myDID for link up if we are in pt2pt mode */
92d7f7b0
JS
608 if (phba->pport->fc_flag & FC_PT2PT) {
609 phba->pport->fc_myDID = 0;
2e0fef85
JS
610 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
611 if (mb) {
dea3101e 612 lpfc_config_link(phba, mb);
92d7f7b0 613 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
ed957684 614 mb->vport = vport;
0b727fea 615 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
dea3101e 616 == MBX_NOT_FINISHED) {
2e0fef85 617 mempool_free(mb, phba->mbox_mem_pool);
dea3101e 618 }
619 }
2e0fef85 620 spin_lock_irq(shost->host_lock);
92d7f7b0 621 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
2e0fef85 622 spin_unlock_irq(shost->host_lock);
dea3101e 623 }
2e0fef85 624
92d7f7b0
JS
625 return 0;
626}
dea3101e 627
92d7f7b0
JS
628static void
629lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
630{
631 struct lpfc_nodelist *ndlp;
dea3101e 632
92d7f7b0 633 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
634 if (!NLP_CHK_NODE_ACT(ndlp))
635 continue;
92d7f7b0
JS
636 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
637 continue;
92d7f7b0 638 if (ndlp->nlp_type & NLP_FABRIC) {
e47c9093
JS
639 /* On Linkup its safe to clean up the ndlp
640 * from Fabric connections.
641 */
92d7f7b0
JS
642 if (ndlp->nlp_DID != Fabric_DID)
643 lpfc_unreg_rpi(vport, ndlp);
644 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
645 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
e47c9093
JS
646 /* Fail outstanding IO now since device is
647 * marked for PLOGI.
648 */
92d7f7b0
JS
649 lpfc_unreg_rpi(vport, ndlp);
650 }
651 }
dea3101e 652}
653
92d7f7b0
JS
654static void
655lpfc_linkup_port(struct lpfc_vport *vport)
dea3101e 656{
92d7f7b0 657 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
658 struct lpfc_hba *phba = vport->phba;
659
660 if ((vport->load_flag & FC_UNLOADING) != 0)
661 return;
662
858c9f6c
JS
663 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
664 "Link Up: top:x%x speed:x%x flg:x%x",
665 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
666
92d7f7b0
JS
667 /* If NPIV is not enabled, only bring the physical port up */
668 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
669 (vport != phba->pport))
670 return;
dea3101e 671
2e0fef85 672 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
d2873e4c 673
2e0fef85 674 spin_lock_irq(shost->host_lock);
2e0fef85
JS
675 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
676 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
677 vport->fc_flag |= FC_NDISC_ACTIVE;
678 vport->fc_ns_retry = 0;
679 spin_unlock_irq(shost->host_lock);
dea3101e 680
92d7f7b0
JS
681 if (vport->fc_flag & FC_LBIT)
682 lpfc_linkup_cleanup_nodes(vport);
dea3101e 683
92d7f7b0
JS
684}
685
686static int
687lpfc_linkup(struct lpfc_hba *phba)
688{
549e55cd
JS
689 struct lpfc_vport **vports;
690 int i;
92d7f7b0
JS
691
692 phba->link_state = LPFC_LINK_UP;
693
694 /* Unblock fabric iocbs if they are blocked */
695 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
696 del_timer_sync(&phba->fabric_block_timer);
697
549e55cd
JS
698 vports = lpfc_create_vport_work_array(phba);
699 if (vports != NULL)
09372820 700 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
549e55cd 701 lpfc_linkup_port(vports[i]);
09372820 702 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
703 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
704 lpfc_issue_clear_la(phba, phba->pport);
dea3101e 705
706 return 0;
707}
708
709/*
710 * This routine handles processing a CLEAR_LA mailbox
711 * command upon completion. It is setup in the LPFC_MBOXQ
712 * as the completion routine when the command is
713 * handed off to the SLI layer.
714 */
a6ababd2 715static void
2e0fef85 716lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 717{
2e0fef85
JS
718 struct lpfc_vport *vport = pmb->vport;
719 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
720 struct lpfc_sli *psli = &phba->sli;
721 MAILBOX_t *mb = &pmb->mb;
dea3101e 722 uint32_t control;
723
dea3101e 724 /* Since we don't do discovery right now, turn these off here */
a4bc3379 725 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
dea3101e 726 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
727 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
728
729 /* Check for error */
730 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
92d7f7b0 731 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
e8b62011
JS
732 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
733 "0320 CLEAR_LA mbxStatus error x%x hba "
734 "state x%x\n",
735 mb->mbxStatus, vport->port_state);
2e0fef85 736 phba->link_state = LPFC_HBA_ERROR;
dea3101e 737 goto out;
738 }
739
92d7f7b0
JS
740 if (vport->port_type == LPFC_PHYSICAL_PORT)
741 phba->link_state = LPFC_HBA_READY;
742
743 spin_lock_irq(&phba->hbalock);
744 psli->sli_flag |= LPFC_PROCESS_LA;
745 control = readl(phba->HCregaddr);
746 control |= HC_LAINT_ENA;
747 writel(control, phba->HCregaddr);
748 readl(phba->HCregaddr); /* flush */
749 spin_unlock_irq(&phba->hbalock);
1b32f6aa 750 mempool_free(pmb, phba->mbox_mem_pool);
92d7f7b0 751 return;
dea3101e 752
dea3101e 753out:
754 /* Device Discovery completes */
e8b62011
JS
755 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
756 "0225 Device Discovery completes\n");
2e0fef85 757 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 758
2e0fef85 759 spin_lock_irq(shost->host_lock);
58da1ffb 760 vport->fc_flag &= ~FC_ABORT_DISCOVERY;
2e0fef85 761 spin_unlock_irq(shost->host_lock);
dea3101e 762
2e0fef85 763 lpfc_can_disctmo(vport);
dea3101e 764
765 /* turn on Link Attention interrupts */
2e0fef85
JS
766
767 spin_lock_irq(&phba->hbalock);
dea3101e 768 psli->sli_flag |= LPFC_PROCESS_LA;
769 control = readl(phba->HCregaddr);
770 control |= HC_LAINT_ENA;
771 writel(control, phba->HCregaddr);
772 readl(phba->HCregaddr); /* flush */
2e0fef85 773 spin_unlock_irq(&phba->hbalock);
dea3101e 774
775 return;
776}
777
2e0fef85 778
dea3101e 779static void
25594c6b 780lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 781{
2e0fef85 782 struct lpfc_vport *vport = pmb->vport;
dea3101e 783
25594c6b 784 if (pmb->mb.mbxStatus)
dea3101e 785 goto out;
dea3101e 786
25594c6b
JW
787 mempool_free(pmb, phba->mbox_mem_pool);
788
789 if (phba->fc_topology == TOPOLOGY_LOOP &&
2e0fef85
JS
790 vport->fc_flag & FC_PUBLIC_LOOP &&
791 !(vport->fc_flag & FC_LBIT)) {
25594c6b 792 /* Need to wait for FAN - use discovery timer
2e0fef85 793 * for timeout. port_state is identically
25594c6b
JW
794 * LPFC_LOCAL_CFG_LINK while waiting for FAN
795 */
2e0fef85 796 lpfc_set_disctmo(vport);
25594c6b 797 return;
92d7f7b0 798 }
dea3101e 799
2e0fef85 800 /* Start discovery by sending a FLOGI. port_state is identically
25594c6b
JW
801 * LPFC_FLOGI while waiting for FLOGI cmpl
802 */
92d7f7b0 803 if (vport->port_state != LPFC_FLOGI) {
92d7f7b0
JS
804 lpfc_initial_flogi(vport);
805 }
25594c6b 806 return;
dea3101e 807
808out:
e8b62011
JS
809 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
810 "0306 CONFIG_LINK mbxStatus error x%x "
811 "HBA state x%x\n",
812 pmb->mb.mbxStatus, vport->port_state);
92d7f7b0 813 mempool_free(pmb, phba->mbox_mem_pool);
25594c6b 814
92d7f7b0 815 lpfc_linkdown(phba);
25594c6b 816
e8b62011
JS
817 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
818 "0200 CONFIG_LINK bad hba state x%x\n",
819 vport->port_state);
dea3101e 820
92d7f7b0 821 lpfc_issue_clear_la(phba, vport);
dea3101e 822 return;
823}
824
825static void
2e0fef85 826lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 827{
dea3101e 828 MAILBOX_t *mb = &pmb->mb;
829 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
2e0fef85 830 struct lpfc_vport *vport = pmb->vport;
dea3101e 831
832
833 /* Check for error */
834 if (mb->mbxStatus) {
835 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
e8b62011
JS
836 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
837 "0319 READ_SPARAM mbxStatus error x%x "
838 "hba state x%x>\n",
839 mb->mbxStatus, vport->port_state);
dea3101e 840 lpfc_linkdown(phba);
dea3101e 841 goto out;
842 }
843
2e0fef85 844 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
dea3101e 845 sizeof (struct serv_parm));
a12e07bc 846 if (phba->cfg_soft_wwnn)
2e0fef85
JS
847 u64_to_wwn(phba->cfg_soft_wwnn,
848 vport->fc_sparam.nodeName.u.wwn);
c3f28afa 849 if (phba->cfg_soft_wwpn)
2e0fef85
JS
850 u64_to_wwn(phba->cfg_soft_wwpn,
851 vport->fc_sparam.portName.u.wwn);
92d7f7b0
JS
852 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
853 sizeof(vport->fc_nodename));
854 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
855 sizeof(vport->fc_portname));
856 if (vport->port_type == LPFC_PHYSICAL_PORT) {
857 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
858 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
859 }
860
dea3101e 861 lpfc_mbuf_free(phba, mp->virt, mp->phys);
862 kfree(mp);
2e0fef85 863 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 864 return;
865
866out:
867 pmb->context1 = NULL;
868 lpfc_mbuf_free(phba, mp->virt, mp->phys);
869 kfree(mp);
92d7f7b0
JS
870 lpfc_issue_clear_la(phba, vport);
871 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 872 return;
873}
874
875static void
92d7f7b0 876lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
dea3101e 877{
92d7f7b0 878 struct lpfc_vport *vport = phba->pport;
dea3101e 879 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
2e0fef85 880 int i;
14691150
JS
881 struct lpfc_dmabuf *mp;
882 int rc;
883
dea3101e 884 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
885 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
886
92d7f7b0 887 spin_lock_irq(&phba->hbalock);
2fe165b6 888 switch (la->UlnkSpeed) {
92d7f7b0
JS
889 case LA_1GHZ_LINK:
890 phba->fc_linkspeed = LA_1GHZ_LINK;
891 break;
892 case LA_2GHZ_LINK:
893 phba->fc_linkspeed = LA_2GHZ_LINK;
894 break;
895 case LA_4GHZ_LINK:
896 phba->fc_linkspeed = LA_4GHZ_LINK;
897 break;
898 case LA_8GHZ_LINK:
899 phba->fc_linkspeed = LA_8GHZ_LINK;
900 break;
901 default:
902 phba->fc_linkspeed = LA_UNKNW_LINK;
903 break;
dea3101e 904 }
905
906 phba->fc_topology = la->topology;
92d7f7b0 907 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
dea3101e 908
909 if (phba->fc_topology == TOPOLOGY_LOOP) {
92d7f7b0 910 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
dea3101e 911
495a714c
JS
912 if (phba->cfg_enable_npiv)
913 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
914 "1309 Link Up Event npiv not supported in loop "
915 "topology\n");
92d7f7b0 916 /* Get Loop Map information */
dea3101e 917 if (la->il)
2e0fef85 918 vport->fc_flag |= FC_LBIT;
dea3101e 919
2e0fef85 920 vport->fc_myDID = la->granted_AL_PA;
dea3101e 921 i = la->un.lilpBde64.tus.f.bdeSize;
922
923 if (i == 0) {
924 phba->alpa_map[0] = 0;
925 } else {
e8b62011 926 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
dea3101e 927 int numalpa, j, k;
928 union {
929 uint8_t pamap[16];
930 struct {
931 uint32_t wd1;
932 uint32_t wd2;
933 uint32_t wd3;
934 uint32_t wd4;
935 } pa;
936 } un;
937 numalpa = phba->alpa_map[0];
938 j = 0;
939 while (j < numalpa) {
940 memset(un.pamap, 0, 16);
941 for (k = 1; j < numalpa; k++) {
942 un.pamap[k - 1] =
943 phba->alpa_map[j + 1];
944 j++;
945 if (k == 16)
946 break;
947 }
948 /* Link Up Event ALPA map */
949 lpfc_printf_log(phba,
92d7f7b0
JS
950 KERN_WARNING,
951 LOG_LINK_EVENT,
e8b62011 952 "1304 Link Up Event "
92d7f7b0
JS
953 "ALPA map Data: x%x "
954 "x%x x%x x%x\n",
92d7f7b0
JS
955 un.pa.wd1, un.pa.wd2,
956 un.pa.wd3, un.pa.wd4);
dea3101e 957 }
958 }
959 }
960 } else {
92d7f7b0 961 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
78b2d852 962 if (phba->max_vpi && phba->cfg_enable_npiv &&
92d7f7b0
JS
963 (phba->sli_rev == 3))
964 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
965 }
2e0fef85
JS
966 vport->fc_myDID = phba->fc_pref_DID;
967 vport->fc_flag |= FC_LBIT;
dea3101e 968 }
92d7f7b0 969 spin_unlock_irq(&phba->hbalock);
dea3101e 970
971 lpfc_linkup(phba);
972 if (sparam_mbox) {
92d7f7b0 973 lpfc_read_sparam(phba, sparam_mbox, 0);
2e0fef85 974 sparam_mbox->vport = vport;
dea3101e 975 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
0b727fea 976 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
14691150
JS
977 if (rc == MBX_NOT_FINISHED) {
978 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
979 lpfc_mbuf_free(phba, mp->virt, mp->phys);
980 kfree(mp);
981 mempool_free(sparam_mbox, phba->mbox_mem_pool);
982 if (cfglink_mbox)
983 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
92d7f7b0 984 goto out;
14691150 985 }
dea3101e 986 }
987
988 if (cfglink_mbox) {
2e0fef85 989 vport->port_state = LPFC_LOCAL_CFG_LINK;
dea3101e 990 lpfc_config_link(phba, cfglink_mbox);
2e0fef85 991 cfglink_mbox->vport = vport;
25594c6b 992 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
0b727fea 993 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
92d7f7b0
JS
994 if (rc != MBX_NOT_FINISHED)
995 return;
996 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
dea3101e 997 }
92d7f7b0
JS
998out:
999 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
1000 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1001 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1002 vport->port_state, sparam_mbox, cfglink_mbox);
92d7f7b0
JS
1003 lpfc_issue_clear_la(phba, vport);
1004 return;
dea3101e 1005}
1006
1007static void
2e0fef85
JS
1008lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1009{
dea3101e 1010 uint32_t control;
1011 struct lpfc_sli *psli = &phba->sli;
1012
1013 lpfc_linkdown(phba);
1014
1015 /* turn on Link Attention interrupts - no CLEAR_LA needed */
2e0fef85 1016 spin_lock_irq(&phba->hbalock);
dea3101e 1017 psli->sli_flag |= LPFC_PROCESS_LA;
1018 control = readl(phba->HCregaddr);
1019 control |= HC_LAINT_ENA;
1020 writel(control, phba->HCregaddr);
1021 readl(phba->HCregaddr); /* flush */
2e0fef85 1022 spin_unlock_irq(&phba->hbalock);
dea3101e 1023}
1024
1025/*
1026 * This routine handles processing a READ_LA mailbox
1027 * command upon completion. It is setup in the LPFC_MBOXQ
1028 * as the completion routine when the command is
1029 * handed off to the SLI layer.
1030 */
1031void
2e0fef85 1032lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1033{
2e0fef85
JS
1034 struct lpfc_vport *vport = pmb->vport;
1035 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 1036 READ_LA_VAR *la;
1037 MAILBOX_t *mb = &pmb->mb;
1038 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1039
0d2b6b83
JS
1040 /* Unblock ELS traffic */
1041 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
dea3101e 1042 /* Check for error */
1043 if (mb->mbxStatus) {
ed957684 1044 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
e8b62011
JS
1045 "1307 READ_LA mbox error x%x state x%x\n",
1046 mb->mbxStatus, vport->port_state);
dea3101e 1047 lpfc_mbx_issue_link_down(phba);
2e0fef85 1048 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1049 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1050 }
1051
1052 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1053
1054 memcpy(&phba->alpa_map[0], mp->virt, 128);
1055
2e0fef85 1056 spin_lock_irq(shost->host_lock);
c9f8735b 1057 if (la->pb)
2e0fef85 1058 vport->fc_flag |= FC_BYPASSED_MODE;
c9f8735b 1059 else
2e0fef85
JS
1060 vport->fc_flag &= ~FC_BYPASSED_MODE;
1061 spin_unlock_irq(shost->host_lock);
c9f8735b 1062
dea3101e 1063 if (((phba->fc_eventTag + 1) < la->eventTag) ||
92d7f7b0 1064 (phba->fc_eventTag == la->eventTag)) {
dea3101e 1065 phba->fc_stat.LinkMultiEvent++;
2e0fef85 1066 if (la->attType == AT_LINK_UP)
dea3101e 1067 if (phba->fc_eventTag != 0)
1068 lpfc_linkdown(phba);
92d7f7b0 1069 }
dea3101e 1070
1071 phba->fc_eventTag = la->eventTag;
1072
1073 if (la->attType == AT_LINK_UP) {
1074 phba->fc_stat.LinkUp++;
2e0fef85 1075 if (phba->link_flag & LS_LOOPBACK_MODE) {
3163f725 1076 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
e8b62011
JS
1077 "1306 Link Up Event in loop back mode "
1078 "x%x received Data: x%x x%x x%x x%x\n",
1079 la->eventTag, phba->fc_eventTag,
1080 la->granted_AL_PA, la->UlnkSpeed,
1081 phba->alpa_map[0]);
5b8bd0c9
JS
1082 } else {
1083 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
e8b62011
JS
1084 "1303 Link Up Event x%x received "
1085 "Data: x%x x%x x%x x%x\n",
1086 la->eventTag, phba->fc_eventTag,
1087 la->granted_AL_PA, la->UlnkSpeed,
1088 phba->alpa_map[0]);
5b8bd0c9 1089 }
92d7f7b0 1090 lpfc_mbx_process_link_up(phba, la);
dea3101e 1091 } else {
1092 phba->fc_stat.LinkDown++;
3163f725
JS
1093 if (phba->link_flag & LS_LOOPBACK_MODE) {
1094 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1095 "1308 Link Down Event in loop back mode "
1096 "x%x received "
1097 "Data: x%x x%x x%x\n",
1098 la->eventTag, phba->fc_eventTag,
1099 phba->pport->port_state, vport->fc_flag);
1100 }
1101 else {
1102 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
e8b62011 1103 "1305 Link Down Event x%x received "
dea3101e 1104 "Data: x%x x%x x%x\n",
e8b62011 1105 la->eventTag, phba->fc_eventTag,
2e0fef85 1106 phba->pport->port_state, vport->fc_flag);
3163f725 1107 }
dea3101e 1108 lpfc_mbx_issue_link_down(phba);
1109 }
1110
1111lpfc_mbx_cmpl_read_la_free_mbuf:
1112 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1113 kfree(mp);
1114 mempool_free(pmb, phba->mbox_mem_pool);
1115 return;
1116}
1117
1118/*
1119 * This routine handles processing a REG_LOGIN mailbox
1120 * command upon completion. It is setup in the LPFC_MBOXQ
1121 * as the completion routine when the command is
1122 * handed off to the SLI layer.
1123 */
1124void
2e0fef85 1125lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1126{
2e0fef85 1127 struct lpfc_vport *vport = pmb->vport;
92d7f7b0 1128 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2e0fef85 1129 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
dea3101e 1130
dea3101e 1131 pmb->context1 = NULL;
1132
1133 /* Good status, call state machine */
2e0fef85 1134 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
dea3101e 1135 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1136 kfree(mp);
2e0fef85 1137 mempool_free(pmb, phba->mbox_mem_pool);
fa4066b6
JS
1138 /* decrement the node reference count held for this callback
1139 * function.
1140 */
329f9bc7 1141 lpfc_nlp_put(ndlp);
dea3101e 1142
1143 return;
1144}
1145
92d7f7b0
JS
1146static void
1147lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1148{
1149 MAILBOX_t *mb = &pmb->mb;
1150 struct lpfc_vport *vport = pmb->vport;
1151 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1152
1153 switch (mb->mbxStatus) {
1154 case 0x0011:
1155 case 0x0020:
1156 case 0x9700:
e8b62011
JS
1157 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1158 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
1159 mb->mbxStatus);
92d7f7b0 1160 break;
92d7f7b0
JS
1161 }
1162 vport->unreg_vpi_cmpl = VPORT_OK;
1163 mempool_free(pmb, phba->mbox_mem_pool);
1164 /*
1165 * This shost reference might have been taken at the beginning of
1166 * lpfc_vport_delete()
1167 */
1168 if (vport->load_flag & FC_UNLOADING)
1169 scsi_host_put(shost);
1170}
1171
1172void
1173lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1174{
1175 struct lpfc_hba *phba = vport->phba;
1176 LPFC_MBOXQ_t *mbox;
1177 int rc;
1178
1179 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1180 if (!mbox)
1181 return;
1182
1183 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1184 mbox->vport = vport;
1185 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
0b727fea 1186 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
92d7f7b0 1187 if (rc == MBX_NOT_FINISHED) {
e8b62011
JS
1188 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1189 "1800 Could not issue unreg_vpi\n");
92d7f7b0
JS
1190 mempool_free(mbox, phba->mbox_mem_pool);
1191 vport->unreg_vpi_cmpl = VPORT_ERROR;
1192 }
1193}
1194
1195static void
1196lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1197{
1198 struct lpfc_vport *vport = pmb->vport;
1199 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1200 MAILBOX_t *mb = &pmb->mb;
1201
1202 switch (mb->mbxStatus) {
1203 case 0x0011:
1204 case 0x9601:
1205 case 0x9602:
e8b62011
JS
1206 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1207 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
1208 mb->mbxStatus);
92d7f7b0
JS
1209 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1210 spin_lock_irq(shost->host_lock);
1211 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1212 spin_unlock_irq(shost->host_lock);
1213 vport->fc_myDID = 0;
1214 goto out;
1215 }
92d7f7b0
JS
1216
1217 vport->num_disc_nodes = 0;
1218 /* go thru NPR list and issue ELS PLOGIs */
1219 if (vport->fc_npr_cnt)
1220 lpfc_els_disc_plogi(vport);
1221
1222 if (!vport->num_disc_nodes) {
1223 spin_lock_irq(shost->host_lock);
1224 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1225 spin_unlock_irq(shost->host_lock);
1226 lpfc_can_disctmo(vport);
1227 }
1228 vport->port_state = LPFC_VPORT_READY;
1229
1230out:
1231 mempool_free(pmb, phba->mbox_mem_pool);
1232 return;
1233}
1234
dea3101e 1235/*
1236 * This routine handles processing a Fabric REG_LOGIN mailbox
1237 * command upon completion. It is setup in the LPFC_MBOXQ
1238 * as the completion routine when the command is
1239 * handed off to the SLI layer.
1240 */
1241void
2e0fef85 1242lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1243{
92d7f7b0 1244 struct lpfc_vport *vport = pmb->vport;
2e0fef85
JS
1245 MAILBOX_t *mb = &pmb->mb;
1246 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
92d7f7b0 1247 struct lpfc_nodelist *ndlp;
549e55cd
JS
1248 struct lpfc_vport **vports;
1249 int i;
dea3101e 1250
549e55cd 1251 ndlp = (struct lpfc_nodelist *) pmb->context2;
329f9bc7
JS
1252 pmb->context1 = NULL;
1253 pmb->context2 = NULL;
dea3101e 1254 if (mb->mbxStatus) {
1255 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1256 kfree(mp);
329f9bc7 1257 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 1258
92d7f7b0
JS
1259 if (phba->fc_topology == TOPOLOGY_LOOP) {
1260 /* FLOGI failed, use loop map to make discovery list */
1261 lpfc_disc_list_loopmap(vport);
1262
1263 /* Start discovery */
1264 lpfc_disc_start(vport);
e47c9093
JS
1265 /* Decrement the reference count to ndlp after the
1266 * reference to the ndlp are done.
1267 */
1268 lpfc_nlp_put(ndlp);
92d7f7b0
JS
1269 return;
1270 }
1271
1272 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
1273 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1274 "0258 Register Fabric login error: 0x%x\n",
1275 mb->mbxStatus);
e47c9093
JS
1276 /* Decrement the reference count to ndlp after the reference
1277 * to the ndlp are done.
1278 */
1279 lpfc_nlp_put(ndlp);
dea3101e 1280 return;
1281 }
1282
dea3101e 1283 ndlp->nlp_rpi = mb->un.varWords[0];
dea3101e 1284 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 1285 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 1286
2e0fef85 1287 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
549e55cd
JS
1288 vports = lpfc_create_vport_work_array(phba);
1289 if (vports != NULL)
1290 for(i = 0;
09372820 1291 i <= phba->max_vpi && vports[i] != NULL;
549e55cd
JS
1292 i++) {
1293 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1294 continue;
58da1ffb
JS
1295 if (phba->fc_topology == TOPOLOGY_LOOP) {
1296 lpfc_vport_set_state(vports[i],
1297 FC_VPORT_LINKDOWN);
1298 continue;
1299 }
549e55cd
JS
1300 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1301 lpfc_initial_fdisc(vports[i]);
58da1ffb 1302 else {
549e55cd
JS
1303 lpfc_vport_set_state(vports[i],
1304 FC_VPORT_NO_FABRIC_SUPP);
e8b62011
JS
1305 lpfc_printf_vlog(vport, KERN_ERR,
1306 LOG_ELS,
1307 "0259 No NPIV "
1308 "Fabric support\n");
549e55cd 1309 }
dea3101e 1310 }
09372820 1311 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0 1312 lpfc_do_scr_ns_plogi(phba, vport);
dea3101e 1313 }
1314
1315 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1316 kfree(mp);
329f9bc7 1317 mempool_free(pmb, phba->mbox_mem_pool);
e47c9093
JS
1318
1319 /* Drop the reference count from the mbox at the end after
1320 * all the current reference to the ndlp have been done.
1321 */
1322 lpfc_nlp_put(ndlp);
dea3101e 1323 return;
1324}
1325
1326/*
1327 * This routine handles processing a NameServer REG_LOGIN mailbox
1328 * command upon completion. It is setup in the LPFC_MBOXQ
1329 * as the completion routine when the command is
1330 * handed off to the SLI layer.
1331 */
1332void
2e0fef85 1333lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1334{
2e0fef85
JS
1335 MAILBOX_t *mb = &pmb->mb;
1336 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1337 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1338 struct lpfc_vport *vport = pmb->vport;
dea3101e 1339
1340 if (mb->mbxStatus) {
92d7f7b0 1341out:
fa4066b6
JS
1342 /* decrement the node reference count held for this
1343 * callback function.
1344 */
329f9bc7 1345 lpfc_nlp_put(ndlp);
dea3101e 1346 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1347 kfree(mp);
de0c5b32 1348 mempool_free(pmb, phba->mbox_mem_pool);
87af33fe
JS
1349
1350 /* If no other thread is using the ndlp, free it */
1351 lpfc_nlp_not_used(ndlp);
dea3101e 1352
92d7f7b0
JS
1353 if (phba->fc_topology == TOPOLOGY_LOOP) {
1354 /*
1355 * RegLogin failed, use loop map to make discovery
1356 * list
1357 */
1358 lpfc_disc_list_loopmap(vport);
dea3101e 1359
92d7f7b0
JS
1360 /* Start discovery */
1361 lpfc_disc_start(vport);
1362 return;
1363 }
1364 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011
JS
1365 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1366 "0260 Register NameServer error: 0x%x\n",
1367 mb->mbxStatus);
dea3101e 1368 return;
1369 }
1370
1371 pmb->context1 = NULL;
1372
dea3101e 1373 ndlp->nlp_rpi = mb->un.varWords[0];
dea3101e 1374 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 1375 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 1376
2e0fef85
JS
1377 if (vport->port_state < LPFC_VPORT_READY) {
1378 /* Link up discovery requires Fabric registration. */
92d7f7b0
JS
1379 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1380 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1381 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1382 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1383 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1384
1385 /* Issue SCR just before NameServer GID_FT Query */
1386 lpfc_issue_els_scr(vport, SCR_DID, 0);
dea3101e 1387 }
1388
2e0fef85 1389 vport->fc_ns_retry = 0;
dea3101e 1390 /* Good status, issue CT Request to NameServer */
92d7f7b0 1391 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
dea3101e 1392 /* Cannot issue NameServer Query, so finish up discovery */
92d7f7b0 1393 goto out;
dea3101e 1394 }
1395
fa4066b6
JS
1396 /* decrement the node reference count held for this
1397 * callback function.
1398 */
329f9bc7 1399 lpfc_nlp_put(ndlp);
dea3101e 1400 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1401 kfree(mp);
2e0fef85 1402 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 1403
1404 return;
1405}
1406
1407static void
2e0fef85 1408lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 1409{
2e0fef85
JS
1410 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1411 struct fc_rport *rport;
dea3101e 1412 struct lpfc_rport_data *rdata;
1413 struct fc_rport_identifiers rport_ids;
2e0fef85 1414 struct lpfc_hba *phba = vport->phba;
dea3101e 1415
1416 /* Remote port has reappeared. Re-register w/ FC transport */
68ce1eb5
AM
1417 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1418 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
dea3101e 1419 rport_ids.port_id = ndlp->nlp_DID;
1420 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
dea3101e 1421
329f9bc7
JS
1422 /*
1423 * We leave our node pointer in rport->dd_data when we unregister a
1424 * FCP target port. But fc_remote_port_add zeros the space to which
1425 * rport->dd_data points. So, if we're reusing a previously
1426 * registered port, drop the reference that we took the last time we
1427 * registered the port.
1428 */
1429 if (ndlp->rport && ndlp->rport->dd_data &&
e47c9093 1430 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
329f9bc7 1431 lpfc_nlp_put(ndlp);
858c9f6c
JS
1432
1433 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1434 "rport add: did:x%x flg:x%x type x%x",
1435 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1436
2e0fef85 1437 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
329f9bc7 1438 if (!rport || !get_device(&rport->dev)) {
dea3101e 1439 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1440 "Warning: fc_remote_port_add failed\n");
1441 return;
1442 }
1443
1444 /* initialize static port data */
1445 rport->maxframe_size = ndlp->nlp_maxframe;
1446 rport->supported_classes = ndlp->nlp_class_sup;
dea3101e 1447 rdata = rport->dd_data;
329f9bc7 1448 rdata->pnode = lpfc_nlp_get(ndlp);
23dc04f1
JSEC
1449
1450 if (ndlp->nlp_type & NLP_FCP_TARGET)
1451 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1452 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1453 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1454
1455
1456 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1457 fc_remote_port_rolechg(rport, rport_ids.roles);
1458
071fbd3d 1459 if ((rport->scsi_target_id != -1) &&
92d7f7b0 1460 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
071fbd3d
JS
1461 ndlp->nlp_sid = rport->scsi_target_id;
1462 }
19a7b4ae
JSEC
1463 return;
1464}
1465
1466static void
2e0fef85 1467lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
19a7b4ae
JSEC
1468{
1469 struct fc_rport *rport = ndlp->rport;
c01f3208 1470
858c9f6c
JS
1471 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1472 "rport delete: did:x%x flg:x%x type x%x",
1473 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1474
19a7b4ae 1475 fc_remote_port_delete(rport);
dea3101e 1476
1477 return;
1478}
1479
de0c5b32 1480static void
2e0fef85 1481lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
dea3101e 1482{
2e0fef85
JS
1483 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1484
1485 spin_lock_irq(shost->host_lock);
de0c5b32
JS
1486 switch (state) {
1487 case NLP_STE_UNUSED_NODE:
2e0fef85 1488 vport->fc_unused_cnt += count;
de0c5b32
JS
1489 break;
1490 case NLP_STE_PLOGI_ISSUE:
2e0fef85 1491 vport->fc_plogi_cnt += count;
de0c5b32
JS
1492 break;
1493 case NLP_STE_ADISC_ISSUE:
2e0fef85 1494 vport->fc_adisc_cnt += count;
dea3101e 1495 break;
de0c5b32 1496 case NLP_STE_REG_LOGIN_ISSUE:
2e0fef85 1497 vport->fc_reglogin_cnt += count;
de0c5b32
JS
1498 break;
1499 case NLP_STE_PRLI_ISSUE:
2e0fef85 1500 vport->fc_prli_cnt += count;
de0c5b32
JS
1501 break;
1502 case NLP_STE_UNMAPPED_NODE:
2e0fef85 1503 vport->fc_unmap_cnt += count;
de0c5b32
JS
1504 break;
1505 case NLP_STE_MAPPED_NODE:
2e0fef85 1506 vport->fc_map_cnt += count;
de0c5b32
JS
1507 break;
1508 case NLP_STE_NPR_NODE:
2e0fef85 1509 vport->fc_npr_cnt += count;
de0c5b32
JS
1510 break;
1511 }
2e0fef85 1512 spin_unlock_irq(shost->host_lock);
de0c5b32 1513}
66a9ed66 1514
de0c5b32 1515static void
2e0fef85 1516lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
de0c5b32
JS
1517 int old_state, int new_state)
1518{
2e0fef85
JS
1519 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1520
de0c5b32
JS
1521 if (new_state == NLP_STE_UNMAPPED_NODE) {
1522 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1523 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1524 ndlp->nlp_type |= NLP_FC_NODE;
1525 }
1526 if (new_state == NLP_STE_MAPPED_NODE)
1527 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1528 if (new_state == NLP_STE_NPR_NODE)
1529 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1530
1531 /* Transport interface */
1532 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1533 old_state == NLP_STE_UNMAPPED_NODE)) {
2e0fef85
JS
1534 vport->phba->nport_event_cnt++;
1535 lpfc_unregister_remote_port(ndlp);
de0c5b32 1536 }
dea3101e 1537
de0c5b32
JS
1538 if (new_state == NLP_STE_MAPPED_NODE ||
1539 new_state == NLP_STE_UNMAPPED_NODE) {
2e0fef85 1540 vport->phba->nport_event_cnt++;
858c9f6c
JS
1541 /*
1542 * Tell the fc transport about the port, if we haven't
1543 * already. If we have, and it's a scsi entity, be
1544 * sure to unblock any attached scsi devices
1545 */
1546 lpfc_register_remote_port(vport, ndlp);
de0c5b32 1547 }
858c9f6c
JS
1548 /*
1549 * if we added to Mapped list, but the remote port
1550 * registration failed or assigned a target id outside
1551 * our presentable range - move the node to the
1552 * Unmapped List
1553 */
de0c5b32
JS
1554 if (new_state == NLP_STE_MAPPED_NODE &&
1555 (!ndlp->rport ||
1556 ndlp->rport->scsi_target_id == -1 ||
1557 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
2e0fef85 1558 spin_lock_irq(shost->host_lock);
de0c5b32 1559 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
2e0fef85
JS
1560 spin_unlock_irq(shost->host_lock);
1561 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 1562 }
de0c5b32
JS
1563}
1564
685f0bf7
JS
1565static char *
1566lpfc_nlp_state_name(char *buffer, size_t size, int state)
1567{
1568 static char *states[] = {
1569 [NLP_STE_UNUSED_NODE] = "UNUSED",
1570 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1571 [NLP_STE_ADISC_ISSUE] = "ADISC",
1572 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1573 [NLP_STE_PRLI_ISSUE] = "PRLI",
1574 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1575 [NLP_STE_MAPPED_NODE] = "MAPPED",
1576 [NLP_STE_NPR_NODE] = "NPR",
1577 };
1578
311464ec 1579 if (state < NLP_STE_MAX_STATE && states[state])
685f0bf7
JS
1580 strlcpy(buffer, states[state], size);
1581 else
1582 snprintf(buffer, size, "unknown (%d)", state);
1583 return buffer;
1584}
1585
de0c5b32 1586void
2e0fef85
JS
1587lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1588 int state)
de0c5b32 1589{
2e0fef85 1590 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
de0c5b32 1591 int old_state = ndlp->nlp_state;
685f0bf7 1592 char name1[16], name2[16];
de0c5b32 1593
e8b62011
JS
1594 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1595 "0904 NPort state transition x%06x, %s -> %s\n",
1596 ndlp->nlp_DID,
1597 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1598 lpfc_nlp_state_name(name2, sizeof(name2), state));
858c9f6c
JS
1599
1600 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1601 "node statechg did:x%x old:%d ste:%d",
1602 ndlp->nlp_DID, old_state, state);
1603
de0c5b32 1604 if (old_state == NLP_STE_NPR_NODE &&
de0c5b32 1605 state != NLP_STE_NPR_NODE)
2e0fef85 1606 lpfc_cancel_retry_delay_tmo(vport, ndlp);
de0c5b32
JS
1607 if (old_state == NLP_STE_UNMAPPED_NODE) {
1608 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1609 ndlp->nlp_type &= ~NLP_FC_NODE;
1610 }
1611
685f0bf7 1612 if (list_empty(&ndlp->nlp_listp)) {
2e0fef85
JS
1613 spin_lock_irq(shost->host_lock);
1614 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1615 spin_unlock_irq(shost->host_lock);
685f0bf7 1616 } else if (old_state)
2e0fef85 1617 lpfc_nlp_counters(vport, old_state, -1);
de0c5b32
JS
1618
1619 ndlp->nlp_state = state;
2e0fef85
JS
1620 lpfc_nlp_counters(vport, state, 1);
1621 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
de0c5b32
JS
1622}
1623
e47c9093
JS
1624void
1625lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1626{
1627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1628
1629 if (list_empty(&ndlp->nlp_listp)) {
1630 spin_lock_irq(shost->host_lock);
1631 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1632 spin_unlock_irq(shost->host_lock);
1633 }
1634}
1635
de0c5b32 1636void
2e0fef85 1637lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
de0c5b32 1638{
2e0fef85
JS
1639 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1640
0d2b6b83 1641 lpfc_cancel_retry_delay_tmo(vport, ndlp);
de0c5b32 1642 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
2e0fef85
JS
1643 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1644 spin_lock_irq(shost->host_lock);
685f0bf7 1645 list_del_init(&ndlp->nlp_listp);
2e0fef85 1646 spin_unlock_irq(shost->host_lock);
858c9f6c 1647 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
e47c9093
JS
1648 NLP_STE_UNUSED_NODE);
1649}
1650
4d9db01e 1651static void
e47c9093
JS
1652lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1653{
0d2b6b83 1654 lpfc_cancel_retry_delay_tmo(vport, ndlp);
e47c9093
JS
1655 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1656 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1657 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1658 NLP_STE_UNUSED_NODE);
1659}
1660
1661struct lpfc_nodelist *
1662lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1663 int state)
1664{
1665 struct lpfc_hba *phba = vport->phba;
1666 uint32_t did;
1667 unsigned long flags;
1668
1669 if (!ndlp)
1670 return NULL;
1671
1672 spin_lock_irqsave(&phba->ndlp_lock, flags);
1673 /* The ndlp should not be in memory free mode */
1674 if (NLP_CHK_FREE_REQ(ndlp)) {
1675 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1676 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
1677 "0277 lpfc_enable_node: ndlp:x%p "
1678 "usgmap:x%x refcnt:%d\n",
1679 (void *)ndlp, ndlp->nlp_usg_map,
1680 atomic_read(&ndlp->kref.refcount));
1681 return NULL;
1682 }
1683 /* The ndlp should not already be in active mode */
1684 if (NLP_CHK_NODE_ACT(ndlp)) {
1685 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1686 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
1687 "0278 lpfc_enable_node: ndlp:x%p "
1688 "usgmap:x%x refcnt:%d\n",
1689 (void *)ndlp, ndlp->nlp_usg_map,
1690 atomic_read(&ndlp->kref.refcount));
1691 return NULL;
1692 }
1693
1694 /* Keep the original DID */
1695 did = ndlp->nlp_DID;
1696
1697 /* re-initialize ndlp except of ndlp linked list pointer */
1698 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
1699 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
1700 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
1701 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
1702 init_timer(&ndlp->nlp_delayfunc);
1703 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
1704 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
1705 ndlp->nlp_DID = did;
1706 ndlp->vport = vport;
1707 ndlp->nlp_sid = NLP_NO_SID;
1708 /* ndlp management re-initialize */
1709 kref_init(&ndlp->kref);
1710 NLP_INT_NODE_ACT(ndlp);
1711
1712 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1713
1714 if (state != NLP_STE_UNUSED_NODE)
1715 lpfc_nlp_set_state(vport, ndlp, state);
1716
1717 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1718 "node enable: did:x%x",
1719 ndlp->nlp_DID, 0, 0);
1720 return ndlp;
de0c5b32
JS
1721}
1722
1723void
2e0fef85 1724lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
de0c5b32 1725{
87af33fe 1726 /*
fa4066b6 1727 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
87af33fe 1728 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
fa4066b6
JS
1729 * the ndlp from the vport. The ndlp marked as UNUSED on the list
1730 * until ALL other outstanding threads have completed. We check
1731 * that the ndlp not already in the UNUSED state before we proceed.
87af33fe 1732 */
fa4066b6
JS
1733 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1734 return;
51ef4c26 1735 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
87af33fe 1736 lpfc_nlp_put(ndlp);
98c9ea5c 1737 return;
dea3101e 1738}
1739
1740/*
1741 * Start / ReStart rescue timer for Discovery / RSCN handling
1742 */
1743void
2e0fef85 1744lpfc_set_disctmo(struct lpfc_vport *vport)
dea3101e 1745{
2e0fef85
JS
1746 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1747 struct lpfc_hba *phba = vport->phba;
dea3101e 1748 uint32_t tmo;
1749
2e0fef85 1750 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
c9f8735b
JW
1751 /* For FAN, timeout should be greater then edtov */
1752 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1753 } else {
1754 /* Normal discovery timeout should be > then ELS/CT timeout
1755 * FC spec states we need 3 * ratov for CT requests
1756 */
1757 tmo = ((phba->fc_ratov * 3) + 3);
1758 }
dea3101e 1759
858c9f6c
JS
1760
1761 if (!timer_pending(&vport->fc_disctmo)) {
1762 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1763 "set disc timer: tmo:x%x state:x%x flg:x%x",
1764 tmo, vport->port_state, vport->fc_flag);
1765 }
1766
2e0fef85
JS
1767 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1768 spin_lock_irq(shost->host_lock);
1769 vport->fc_flag |= FC_DISC_TMO;
1770 spin_unlock_irq(shost->host_lock);
dea3101e 1771
1772 /* Start Discovery Timer state <hba_state> */
e8b62011
JS
1773 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1774 "0247 Start Discovery Timer state x%x "
1775 "Data: x%x x%lx x%x x%x\n",
1776 vport->port_state, tmo,
1777 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1778 vport->fc_adisc_cnt);
dea3101e 1779
1780 return;
1781}
1782
1783/*
1784 * Cancel rescue timer for Discovery / RSCN handling
1785 */
1786int
2e0fef85 1787lpfc_can_disctmo(struct lpfc_vport *vport)
dea3101e 1788{
2e0fef85 1789 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2e0fef85
JS
1790 unsigned long iflags;
1791
858c9f6c
JS
1792 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1793 "can disc timer: state:x%x rtry:x%x flg:x%x",
1794 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1795
dea3101e 1796 /* Turn off discovery timer if its running */
2e0fef85
JS
1797 if (vport->fc_flag & FC_DISC_TMO) {
1798 spin_lock_irqsave(shost->host_lock, iflags);
1799 vport->fc_flag &= ~FC_DISC_TMO;
1800 spin_unlock_irqrestore(shost->host_lock, iflags);
1801 del_timer_sync(&vport->fc_disctmo);
1802 spin_lock_irqsave(&vport->work_port_lock, iflags);
1803 vport->work_port_events &= ~WORKER_DISC_TMO;
1804 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
dea3101e 1805 }
1806
1807 /* Cancel Discovery Timer state <hba_state> */
e8b62011
JS
1808 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1809 "0248 Cancel Discovery Timer state x%x "
1810 "Data: x%x x%x x%x\n",
1811 vport->port_state, vport->fc_flag,
1812 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
2fe165b6 1813 return 0;
dea3101e 1814}
1815
1816/*
1817 * Check specified ring for outstanding IOCB on the SLI queue
1818 * Return true if iocb matches the specified nport
1819 */
1820int
2e0fef85
JS
1821lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1822 struct lpfc_sli_ring *pring,
1823 struct lpfc_iocbq *iocb,
1824 struct lpfc_nodelist *ndlp)
dea3101e 1825{
2e0fef85
JS
1826 struct lpfc_sli *psli = &phba->sli;
1827 IOCB_t *icmd = &iocb->iocb;
92d7f7b0
JS
1828 struct lpfc_vport *vport = ndlp->vport;
1829
1830 if (iocb->vport != vport)
1831 return 0;
1832
dea3101e 1833 if (pring->ringno == LPFC_ELS_RING) {
1834 switch (icmd->ulpCommand) {
1835 case CMD_GEN_REQUEST64_CR:
1836 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
2fe165b6 1837 return 1;
dea3101e 1838 case CMD_ELS_REQUEST64_CR:
10d4e957
JS
1839 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1840 return 1;
dea3101e 1841 case CMD_XMIT_ELS_RSP64_CX:
1842 if (iocb->context1 == (uint8_t *) ndlp)
2fe165b6 1843 return 1;
dea3101e 1844 }
a4bc3379 1845 } else if (pring->ringno == psli->extra_ring) {
dea3101e 1846
1847 } else if (pring->ringno == psli->fcp_ring) {
1848 /* Skip match check if waiting to relogin to FCP target */
1849 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
92d7f7b0 1850 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
2fe165b6 1851 return 0;
dea3101e 1852 }
1853 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
2fe165b6 1854 return 1;
dea3101e 1855 }
1856 } else if (pring->ringno == psli->next_ring) {
1857
1858 }
2fe165b6 1859 return 0;
dea3101e 1860}
1861
1862/*
1863 * Free resources / clean up outstanding I/Os
1864 * associated with nlp_rpi in the LPFC_NODELIST entry.
1865 */
1866static int
2e0fef85 1867lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 1868{
2534ba75 1869 LIST_HEAD(completions);
dea3101e 1870 struct lpfc_sli *psli;
1871 struct lpfc_sli_ring *pring;
1872 struct lpfc_iocbq *iocb, *next_iocb;
1873 IOCB_t *icmd;
1874 uint32_t rpi, i;
1875
92d7f7b0
JS
1876 lpfc_fabric_abort_nport(ndlp);
1877
dea3101e 1878 /*
1879 * Everything that matches on txcmplq will be returned
1880 * by firmware with a no rpi error.
1881 */
1882 psli = &phba->sli;
1883 rpi = ndlp->nlp_rpi;
1884 if (rpi) {
1885 /* Now process each ring */
1886 for (i = 0; i < psli->num_rings; i++) {
1887 pring = &psli->ring[i];
1888
2e0fef85 1889 spin_lock_irq(&phba->hbalock);
dea3101e 1890 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
2e0fef85 1891 list) {
dea3101e 1892 /*
1893 * Check to see if iocb matches the nport we are
1894 * looking for
1895 */
92d7f7b0
JS
1896 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1897 ndlp))) {
dea3101e 1898 /* It matches, so deque and call compl
1899 with an error */
2534ba75
JS
1900 list_move_tail(&iocb->list,
1901 &completions);
dea3101e 1902 pring->txq_cnt--;
dea3101e 1903 }
1904 }
2e0fef85 1905 spin_unlock_irq(&phba->hbalock);
dea3101e 1906 }
1907 }
2534ba75
JS
1908
1909 while (!list_empty(&completions)) {
1910 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
92d7f7b0 1911 list_del_init(&iocb->list);
2534ba75 1912
2e0fef85
JS
1913 if (!iocb->iocb_cmpl)
1914 lpfc_sli_release_iocbq(phba, iocb);
1915 else {
2534ba75
JS
1916 icmd = &iocb->iocb;
1917 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1918 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2e0fef85
JS
1919 (iocb->iocb_cmpl)(phba, iocb, iocb);
1920 }
2534ba75
JS
1921 }
1922
2fe165b6 1923 return 0;
dea3101e 1924}
1925
1926/*
1927 * Free rpi associated with LPFC_NODELIST entry.
1928 * This routine is called from lpfc_freenode(), when we are removing
1929 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1930 * LOGO that completes successfully, and we are waiting to PLOGI back
1931 * to the remote NPort. In addition, it is called after we receive
1932 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1933 * we are waiting to PLOGI back to the remote NPort.
1934 */
1935int
2e0fef85 1936lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 1937{
2e0fef85
JS
1938 struct lpfc_hba *phba = vport->phba;
1939 LPFC_MBOXQ_t *mbox;
dea3101e 1940 int rc;
1941
1942 if (ndlp->nlp_rpi) {
2e0fef85
JS
1943 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1944 if (mbox) {
92d7f7b0 1945 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
ed957684 1946 mbox->vport = vport;
92d7f7b0 1947 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0b727fea 1948 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
dea3101e 1949 if (rc == MBX_NOT_FINISHED)
2e0fef85 1950 mempool_free(mbox, phba->mbox_mem_pool);
dea3101e 1951 }
dea3101e 1952 lpfc_no_rpi(phba, ndlp);
1953 ndlp->nlp_rpi = 0;
1954 return 1;
1955 }
1956 return 0;
1957}
1958
92d7f7b0
JS
1959void
1960lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1961{
1962 struct lpfc_hba *phba = vport->phba;
1963 LPFC_MBOXQ_t *mbox;
1964 int rc;
1965
1966 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1967 if (mbox) {
1968 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1969 mbox->vport = vport;
1970 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
09372820
JS
1971 mbox->context1 = NULL;
1972 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
92d7f7b0
JS
1973 if (rc == MBX_NOT_FINISHED) {
1974 mempool_free(mbox, phba->mbox_mem_pool);
1975 }
1976 }
1977}
1978
1979void
1980lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1981{
1982 struct lpfc_hba *phba = vport->phba;
1983 LPFC_MBOXQ_t *mbox;
1984 int rc;
1985
1986 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1987 if (mbox) {
1988 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1989 mbox->vport = vport;
1990 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
09372820
JS
1991 mbox->context1 = NULL;
1992 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
92d7f7b0 1993 if (rc == MBX_NOT_FINISHED) {
e8b62011
JS
1994 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1995 "1815 Could not issue "
1996 "unreg_did (default rpis)\n");
92d7f7b0
JS
1997 mempool_free(mbox, phba->mbox_mem_pool);
1998 }
1999 }
2000}
2001
dea3101e 2002/*
2003 * Free resources associated with LPFC_NODELIST entry
2004 * so it can be freed.
2005 */
2006static int
2e0fef85 2007lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 2008{
2e0fef85
JS
2009 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2010 struct lpfc_hba *phba = vport->phba;
2011 LPFC_MBOXQ_t *mb, *nextmb;
dea3101e 2012 struct lpfc_dmabuf *mp;
dea3101e 2013
2014 /* Cleanup node for NPort <nlp_DID> */
e8b62011
JS
2015 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2016 "0900 Cleanup node for NPort x%x "
2017 "Data: x%x x%x x%x\n",
2018 ndlp->nlp_DID, ndlp->nlp_flag,
2019 ndlp->nlp_state, ndlp->nlp_rpi);
e47c9093
JS
2020 if (NLP_CHK_FREE_REQ(ndlp)) {
2021 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2022 "0280 lpfc_cleanup_node: ndlp:x%p "
2023 "usgmap:x%x refcnt:%d\n",
2024 (void *)ndlp, ndlp->nlp_usg_map,
2025 atomic_read(&ndlp->kref.refcount));
2026 lpfc_dequeue_node(vport, ndlp);
2027 } else {
2028 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2029 "0281 lpfc_cleanup_node: ndlp:x%p "
2030 "usgmap:x%x refcnt:%d\n",
2031 (void *)ndlp, ndlp->nlp_usg_map,
2032 atomic_read(&ndlp->kref.refcount));
2033 lpfc_disable_node(vport, ndlp);
2034 }
dea3101e 2035
dea3101e 2036 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
2037 if ((mb = phba->sli.mbox_active)) {
2038 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
2039 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2040 mb->context2 = NULL;
2041 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2042 }
2043 }
33ccf8d1 2044
2e0fef85 2045 spin_lock_irq(&phba->hbalock);
dea3101e 2046 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
2047 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
92d7f7b0 2048 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
dea3101e 2049 mp = (struct lpfc_dmabuf *) (mb->context1);
2050 if (mp) {
2e0fef85 2051 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e 2052 kfree(mp);
2053 }
2054 list_del(&mb->list);
2055 mempool_free(mb, phba->mbox_mem_pool);
e47c9093
JS
2056 /* We shall not invoke the lpfc_nlp_put to decrement
2057 * the ndlp reference count as we are in the process
2058 * of lpfc_nlp_release.
2059 */
dea3101e 2060 }
2061 }
2e0fef85 2062 spin_unlock_irq(&phba->hbalock);
dea3101e 2063
e47c9093
JS
2064 lpfc_els_abort(phba, ndlp);
2065
2e0fef85 2066 spin_lock_irq(shost->host_lock);
c01f3208 2067 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 2068 spin_unlock_irq(shost->host_lock);
dea3101e 2069
5024ab17 2070 ndlp->nlp_last_elscmd = 0;
dea3101e 2071 del_timer_sync(&ndlp->nlp_delayfunc);
2072
0d2b6b83
JS
2073 list_del_init(&ndlp->els_retry_evt.evt_listp);
2074 list_del_init(&ndlp->dev_loss_evt.evt_listp);
dea3101e 2075
2e0fef85 2076 lpfc_unreg_rpi(vport, ndlp);
dea3101e 2077
2fe165b6 2078 return 0;
dea3101e 2079}
2080
2081/*
2082 * Check to see if we can free the nlp back to the freelist.
2083 * If we are in the middle of using the nlp in the discovery state
2084 * machine, defer the free till we reach the end of the state machine.
2085 */
329f9bc7 2086static void
2e0fef85 2087lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 2088{
a8adb832 2089 struct lpfc_hba *phba = vport->phba;
1dcb58e5 2090 struct lpfc_rport_data *rdata;
a8adb832
JS
2091 LPFC_MBOXQ_t *mbox;
2092 int rc;
dea3101e 2093
0d2b6b83 2094 lpfc_cancel_retry_delay_tmo(vport, ndlp);
a8adb832
JS
2095 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
2096 /* For this case we need to cleanup the default rpi
2097 * allocated by the firmware.
2098 */
2099 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2100 != NULL) {
2101 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
2102 (uint8_t *) &vport->fc_sparam, mbox, 0);
2103 if (rc) {
2104 mempool_free(mbox, phba->mbox_mem_pool);
2105 }
2106 else {
2107 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2108 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2109 mbox->vport = vport;
09372820 2110 mbox->context2 = NULL;
a8adb832
JS
2111 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2112 if (rc == MBX_NOT_FINISHED) {
2113 mempool_free(mbox, phba->mbox_mem_pool);
2114 }
2115 }
2116 }
2117 }
2e0fef85 2118 lpfc_cleanup_node(vport, ndlp);
1dcb58e5 2119
2e0fef85 2120 /*
92d7f7b0
JS
2121 * We can get here with a non-NULL ndlp->rport because when we
2122 * unregister a rport we don't break the rport/node linkage. So if we
2123 * do, make sure we don't leaving any dangling pointers behind.
2e0fef85 2124 */
92d7f7b0 2125 if (ndlp->rport) {
329f9bc7
JS
2126 rdata = ndlp->rport->dd_data;
2127 rdata->pnode = NULL;
2128 ndlp->rport = NULL;
dea3101e 2129 }
dea3101e 2130}
2131
2132static int
2e0fef85
JS
2133lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2134 uint32_t did)
dea3101e 2135{
2e0fef85 2136 D_ID mydid, ndlpdid, matchdid;
dea3101e 2137
2138 if (did == Bcast_DID)
2fe165b6 2139 return 0;
dea3101e 2140
dea3101e 2141 /* First check for Direct match */
2142 if (ndlp->nlp_DID == did)
2fe165b6 2143 return 1;
dea3101e 2144
2145 /* Next check for area/domain identically equals 0 match */
2e0fef85 2146 mydid.un.word = vport->fc_myDID;
dea3101e 2147 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2fe165b6 2148 return 0;
dea3101e 2149 }
2150
2151 matchdid.un.word = did;
2152 ndlpdid.un.word = ndlp->nlp_DID;
2153 if (matchdid.un.b.id == ndlpdid.un.b.id) {
2154 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2155 (mydid.un.b.area == matchdid.un.b.area)) {
2156 if ((ndlpdid.un.b.domain == 0) &&
2157 (ndlpdid.un.b.area == 0)) {
2158 if (ndlpdid.un.b.id)
2fe165b6 2159 return 1;
dea3101e 2160 }
2fe165b6 2161 return 0;
dea3101e 2162 }
2163
2164 matchdid.un.word = ndlp->nlp_DID;
2165 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2166 (mydid.un.b.area == ndlpdid.un.b.area)) {
2167 if ((matchdid.un.b.domain == 0) &&
2168 (matchdid.un.b.area == 0)) {
2169 if (matchdid.un.b.id)
2fe165b6 2170 return 1;
dea3101e 2171 }
2172 }
2173 }
2fe165b6 2174 return 0;
dea3101e 2175}
2176
685f0bf7 2177/* Search for a nodelist entry */
2e0fef85
JS
2178static struct lpfc_nodelist *
2179__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
dea3101e 2180{
2fb9bd8b 2181 struct lpfc_nodelist *ndlp;
dea3101e 2182 uint32_t data1;
2183
2e0fef85
JS
2184 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2185 if (lpfc_matchdid(vport, ndlp, did)) {
685f0bf7
JS
2186 data1 = (((uint32_t) ndlp->nlp_state << 24) |
2187 ((uint32_t) ndlp->nlp_xri << 16) |
2188 ((uint32_t) ndlp->nlp_type << 8) |
2189 ((uint32_t) ndlp->nlp_rpi & 0xff));
e8b62011
JS
2190 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2191 "0929 FIND node DID "
2192 "Data: x%p x%x x%x x%x\n",
2193 ndlp, ndlp->nlp_DID,
2194 ndlp->nlp_flag, data1);
685f0bf7 2195 return ndlp;
dea3101e 2196 }
2197 }
66a9ed66 2198
dea3101e 2199 /* FIND node did <did> NOT FOUND */
e8b62011
JS
2200 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2201 "0932 FIND node did x%x NOT FOUND.\n", did);
dea3101e 2202 return NULL;
2203}
2204
2205struct lpfc_nodelist *
2e0fef85
JS
2206lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2207{
2208 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2209 struct lpfc_nodelist *ndlp;
2210
2211 spin_lock_irq(shost->host_lock);
2212 ndlp = __lpfc_findnode_did(vport, did);
2213 spin_unlock_irq(shost->host_lock);
2214 return ndlp;
2215}
2216
2217struct lpfc_nodelist *
2218lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
dea3101e 2219{
2e0fef85 2220 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 2221 struct lpfc_nodelist *ndlp;
dea3101e 2222
2e0fef85 2223 ndlp = lpfc_findnode_did(vport, did);
c9f8735b 2224 if (!ndlp) {
2e0fef85
JS
2225 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2226 lpfc_rscn_payload_check(vport, did) == 0)
dea3101e 2227 return NULL;
2228 ndlp = (struct lpfc_nodelist *)
2e0fef85 2229 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
dea3101e 2230 if (!ndlp)
2231 return NULL;
2e0fef85
JS
2232 lpfc_nlp_init(vport, ndlp, did);
2233 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2234 spin_lock_irq(shost->host_lock);
dea3101e 2235 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2236 spin_unlock_irq(shost->host_lock);
dea3101e 2237 return ndlp;
e47c9093
JS
2238 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2239 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
2240 if (!ndlp)
2241 return NULL;
2242 spin_lock_irq(shost->host_lock);
2243 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2244 spin_unlock_irq(shost->host_lock);
2245 return ndlp;
dea3101e 2246 }
e47c9093 2247
58da1ffb
JS
2248 if ((vport->fc_flag & FC_RSCN_MODE) &&
2249 !(vport->fc_flag & FC_NDISC_ACTIVE)) {
2e0fef85 2250 if (lpfc_rscn_payload_check(vport, did)) {
87af33fe
JS
2251 /* If we've already recieved a PLOGI from this NPort
2252 * we don't need to try to discover it again.
2253 */
2254 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
2255 return NULL;
2256
2e0fef85 2257 spin_lock_irq(shost->host_lock);
dea3101e 2258 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2259 spin_unlock_irq(shost->host_lock);
c9f8735b
JW
2260
2261 /* Since this node is marked for discovery,
2262 * delay timeout is not needed.
2263 */
0d2b6b83 2264 lpfc_cancel_retry_delay_tmo(vport, ndlp);
071fbd3d 2265 } else
dea3101e 2266 ndlp = NULL;
2fe165b6 2267 } else {
87af33fe
JS
2268 /* If we've already recieved a PLOGI from this NPort,
2269 * or we are already in the process of discovery on it,
2270 * we don't need to try to discover it again.
2271 */
685f0bf7 2272 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
87af33fe
JS
2273 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2274 ndlp->nlp_flag & NLP_RCV_PLOGI)
dea3101e 2275 return NULL;
2e0fef85
JS
2276 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2277 spin_lock_irq(shost->host_lock);
dea3101e 2278 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 2279 spin_unlock_irq(shost->host_lock);
dea3101e 2280 }
2281 return ndlp;
2282}
2283
2284/* Build a list of nodes to discover based on the loopmap */
2285void
2e0fef85 2286lpfc_disc_list_loopmap(struct lpfc_vport *vport)
dea3101e 2287{
2e0fef85 2288 struct lpfc_hba *phba = vport->phba;
dea3101e 2289 int j;
2290 uint32_t alpa, index;
2291
2e0fef85 2292 if (!lpfc_is_link_up(phba))
dea3101e 2293 return;
2e0fef85
JS
2294
2295 if (phba->fc_topology != TOPOLOGY_LOOP)
dea3101e 2296 return;
dea3101e 2297
2298 /* Check for loop map present or not */
2299 if (phba->alpa_map[0]) {
2300 for (j = 1; j <= phba->alpa_map[0]; j++) {
2301 alpa = phba->alpa_map[j];
2e0fef85 2302 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
dea3101e 2303 continue;
2e0fef85 2304 lpfc_setup_disc_node(vport, alpa);
dea3101e 2305 }
2306 } else {
2307 /* No alpamap, so try all alpa's */
2308 for (j = 0; j < FC_MAXLOOP; j++) {
2309 /* If cfg_scan_down is set, start from highest
2310 * ALPA (0xef) to lowest (0x1).
2311 */
3de2a653 2312 if (vport->cfg_scan_down)
dea3101e 2313 index = j;
2314 else
2315 index = FC_MAXLOOP - j - 1;
2316 alpa = lpfcAlpaArray[index];
2e0fef85 2317 if ((vport->fc_myDID & 0xff) == alpa)
dea3101e 2318 continue;
2e0fef85 2319 lpfc_setup_disc_node(vport, alpa);
dea3101e 2320 }
2321 }
2322 return;
2323}
2324
dea3101e 2325void
2e0fef85 2326lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
dea3101e 2327{
dea3101e 2328 LPFC_MBOXQ_t *mbox;
2e0fef85
JS
2329 struct lpfc_sli *psli = &phba->sli;
2330 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2331 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2332 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2333 int rc;
2334
92d7f7b0
JS
2335 /*
2336 * if it's not a physical port or if we already send
2337 * clear_la then don't send it.
2338 */
2339 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2340 (vport->port_type != LPFC_PHYSICAL_PORT))
2341 return;
2342
2e0fef85
JS
2343 /* Link up discovery */
2344 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2345 phba->link_state = LPFC_CLEAR_LA;
2346 lpfc_clear_la(phba, mbox);
2347 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2348 mbox->vport = vport;
0b727fea 2349 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2e0fef85
JS
2350 if (rc == MBX_NOT_FINISHED) {
2351 mempool_free(mbox, phba->mbox_mem_pool);
2352 lpfc_disc_flush_list(vport);
2353 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2354 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2355 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
92d7f7b0
JS
2356 phba->link_state = LPFC_HBA_ERROR;
2357 }
2358 }
2359}
2360
2361/* Reg_vpi to tell firmware to resume normal operations */
2362void
2363lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2364{
2365 LPFC_MBOXQ_t *regvpimbox;
2366
2367 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2368 if (regvpimbox) {
2369 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2370 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2371 regvpimbox->vport = vport;
0b727fea 2372 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
92d7f7b0
JS
2373 == MBX_NOT_FINISHED) {
2374 mempool_free(regvpimbox, phba->mbox_mem_pool);
2e0fef85
JS
2375 }
2376 }
2377}
2378
2379/* Start Link up / RSCN discovery on NPR nodes */
2380void
2381lpfc_disc_start(struct lpfc_vport *vport)
2382{
2383 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2384 struct lpfc_hba *phba = vport->phba;
685f0bf7 2385 uint32_t num_sent;
dea3101e 2386 uint32_t clear_la_pending;
685f0bf7 2387 int did_changed;
dea3101e 2388
2e0fef85 2389 if (!lpfc_is_link_up(phba))
dea3101e 2390 return;
2e0fef85
JS
2391
2392 if (phba->link_state == LPFC_CLEAR_LA)
dea3101e 2393 clear_la_pending = 1;
2394 else
2395 clear_la_pending = 0;
2396
2e0fef85
JS
2397 if (vport->port_state < LPFC_VPORT_READY)
2398 vport->port_state = LPFC_DISC_AUTH;
dea3101e 2399
2e0fef85
JS
2400 lpfc_set_disctmo(vport);
2401
2402 if (vport->fc_prevDID == vport->fc_myDID)
dea3101e 2403 did_changed = 0;
2e0fef85 2404 else
dea3101e 2405 did_changed = 1;
2e0fef85
JS
2406
2407 vport->fc_prevDID = vport->fc_myDID;
2408 vport->num_disc_nodes = 0;
dea3101e 2409
2410 /* Start Discovery state <hba_state> */
e8b62011
JS
2411 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2412 "0202 Start Discovery hba state x%x "
2413 "Data: x%x x%x x%x\n",
2414 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
2415 vport->fc_adisc_cnt);
dea3101e 2416
2417 /* First do ADISCs - if any */
2e0fef85 2418 num_sent = lpfc_els_disc_adisc(vport);
dea3101e 2419
2420 if (num_sent)
2421 return;
2422
92d7f7b0
JS
2423 /*
2424 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2425 * continue discovery.
2426 */
2427 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1b32f6aa 2428 !(vport->fc_flag & FC_PT2PT) &&
92d7f7b0
JS
2429 !(vport->fc_flag & FC_RSCN_MODE)) {
2430 lpfc_issue_reg_vpi(phba, vport);
2431 return;
2432 }
2433
2434 /*
2435 * For SLI2, we need to set port_state to READY and continue
2436 * discovery.
2437 */
2e0fef85 2438 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
dea3101e 2439 /* If we get here, there is nothing to ADISC */
92d7f7b0 2440 if (vport->port_type == LPFC_PHYSICAL_PORT)
2e0fef85 2441 lpfc_issue_clear_la(phba, vport);
2e0fef85 2442
92d7f7b0 2443 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2e0fef85
JS
2444 vport->num_disc_nodes = 0;
2445 /* go thru NPR nodes and issue ELS PLOGIs */
2446 if (vport->fc_npr_cnt)
2447 lpfc_els_disc_plogi(vport);
2448
2449 if (!vport->num_disc_nodes) {
2450 spin_lock_irq(shost->host_lock);
2451 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2452 spin_unlock_irq(shost->host_lock);
92d7f7b0 2453 lpfc_can_disctmo(vport);
dea3101e 2454 }
2455 }
92d7f7b0 2456 vport->port_state = LPFC_VPORT_READY;
dea3101e 2457 } else {
2458 /* Next do PLOGIs - if any */
2e0fef85 2459 num_sent = lpfc_els_disc_plogi(vport);
dea3101e 2460
2461 if (num_sent)
2462 return;
2463
2e0fef85 2464 if (vport->fc_flag & FC_RSCN_MODE) {
dea3101e 2465 /* Check to see if more RSCNs came in while we
2466 * were processing this one.
2467 */
2e0fef85
JS
2468 if ((vport->fc_rscn_id_cnt == 0) &&
2469 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2470 spin_lock_irq(shost->host_lock);
2471 vport->fc_flag &= ~FC_RSCN_MODE;
2472 spin_unlock_irq(shost->host_lock);
92d7f7b0 2473 lpfc_can_disctmo(vport);
2fe165b6 2474 } else
2e0fef85 2475 lpfc_els_handle_rscn(vport);
dea3101e 2476 }
2477 }
2478 return;
2479}
2480
2481/*
2482 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2483 * ring the match the sppecified nodelist.
2484 */
2485static void
2e0fef85 2486lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 2487{
2534ba75 2488 LIST_HEAD(completions);
dea3101e 2489 struct lpfc_sli *psli;
2490 IOCB_t *icmd;
2491 struct lpfc_iocbq *iocb, *next_iocb;
2492 struct lpfc_sli_ring *pring;
dea3101e 2493
2494 psli = &phba->sli;
2495 pring = &psli->ring[LPFC_ELS_RING];
2496
2497 /* Error matching iocb on txq or txcmplq
2498 * First check the txq.
2499 */
2e0fef85 2500 spin_lock_irq(&phba->hbalock);
dea3101e 2501 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2502 if (iocb->context1 != ndlp) {
2503 continue;
2504 }
2505 icmd = &iocb->iocb;
2506 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2507 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2508
2534ba75 2509 list_move_tail(&iocb->list, &completions);
dea3101e 2510 pring->txq_cnt--;
dea3101e 2511 }
2512 }
2513
2514 /* Next check the txcmplq */
2515 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2516 if (iocb->context1 != ndlp) {
2517 continue;
2518 }
2519 icmd = &iocb->iocb;
2e0fef85
JS
2520 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2521 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2534ba75
JS
2522 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2523 }
2524 }
2e0fef85 2525 spin_unlock_irq(&phba->hbalock);
dea3101e 2526
2534ba75
JS
2527 while (!list_empty(&completions)) {
2528 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
92d7f7b0 2529 list_del_init(&iocb->list);
dea3101e 2530
2e0fef85
JS
2531 if (!iocb->iocb_cmpl)
2532 lpfc_sli_release_iocbq(phba, iocb);
2533 else {
2534ba75
JS
2534 icmd = &iocb->iocb;
2535 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2536 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2537 (iocb->iocb_cmpl) (phba, iocb, iocb);
2e0fef85 2538 }
dea3101e 2539 }
dea3101e 2540}
2541
a6ababd2 2542static void
2e0fef85 2543lpfc_disc_flush_list(struct lpfc_vport *vport)
dea3101e 2544{
2545 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 2546 struct lpfc_hba *phba = vport->phba;
dea3101e 2547
2e0fef85
JS
2548 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2549 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
685f0bf7 2550 nlp_listp) {
e47c9093
JS
2551 if (!NLP_CHK_NODE_ACT(ndlp))
2552 continue;
685f0bf7
JS
2553 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2554 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2555 lpfc_free_tx(phba, ndlp);
685f0bf7 2556 }
dea3101e 2557 }
2558 }
dea3101e 2559}
2560
92d7f7b0
JS
2561void
2562lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2563{
2564 lpfc_els_flush_rscn(vport);
2565 lpfc_els_flush_cmd(vport);
2566 lpfc_disc_flush_list(vport);
2567}
2568
dea3101e 2569/*****************************************************************************/
2570/*
2571 * NAME: lpfc_disc_timeout
2572 *
2573 * FUNCTION: Fibre Channel driver discovery timeout routine.
2574 *
2575 * EXECUTION ENVIRONMENT: interrupt only
2576 *
2577 * CALLED FROM:
2578 * Timer function
2579 *
2580 * RETURNS:
2581 * none
2582 */
2583/*****************************************************************************/
2584void
2585lpfc_disc_timeout(unsigned long ptr)
2586{
2e0fef85
JS
2587 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2588 struct lpfc_hba *phba = vport->phba;
5e9d9b82 2589 uint32_t tmo_posted;
dea3101e 2590 unsigned long flags = 0;
2591
2592 if (unlikely(!phba))
2593 return;
2594
5e9d9b82
JS
2595 spin_lock_irqsave(&vport->work_port_lock, flags);
2596 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
2597 if (!tmo_posted)
2e0fef85 2598 vport->work_port_events |= WORKER_DISC_TMO;
5e9d9b82 2599 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2e0fef85 2600
5e9d9b82
JS
2601 if (!tmo_posted)
2602 lpfc_worker_wake_up(phba);
dea3101e 2603 return;
2604}
2605
2606static void
2e0fef85 2607lpfc_disc_timeout_handler(struct lpfc_vport *vport)
dea3101e 2608{
2e0fef85
JS
2609 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2610 struct lpfc_hba *phba = vport->phba;
2611 struct lpfc_sli *psli = &phba->sli;
c9f8735b 2612 struct lpfc_nodelist *ndlp, *next_ndlp;
92d7f7b0 2613 LPFC_MBOXQ_t *initlinkmbox;
dea3101e 2614 int rc, clrlaerr = 0;
2615
2e0fef85 2616 if (!(vport->fc_flag & FC_DISC_TMO))
dea3101e 2617 return;
2618
2e0fef85
JS
2619 spin_lock_irq(shost->host_lock);
2620 vport->fc_flag &= ~FC_DISC_TMO;
2621 spin_unlock_irq(shost->host_lock);
dea3101e 2622
858c9f6c
JS
2623 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2624 "disc timeout: state:x%x rtry:x%x flg:x%x",
2625 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2626
2e0fef85 2627 switch (vport->port_state) {
dea3101e 2628
2629 case LPFC_LOCAL_CFG_LINK:
2e0fef85
JS
2630 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2631 * FAN
2632 */
2633 /* FAN timeout */
e8b62011
JS
2634 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
2635 "0221 FAN timeout\n");
c9f8735b 2636 /* Start discovery by sending FLOGI, clean up old rpis */
2e0fef85 2637 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
685f0bf7 2638 nlp_listp) {
e47c9093
JS
2639 if (!NLP_CHK_NODE_ACT(ndlp))
2640 continue;
685f0bf7
JS
2641 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2642 continue;
c9f8735b
JW
2643 if (ndlp->nlp_type & NLP_FABRIC) {
2644 /* Clean up the ndlp on Fabric connections */
2e0fef85 2645 lpfc_drop_node(vport, ndlp);
87af33fe 2646
2fe165b6 2647 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
c9f8735b
JW
2648 /* Fail outstanding IO now since device
2649 * is marked for PLOGI.
2650 */
2e0fef85 2651 lpfc_unreg_rpi(vport, ndlp);
c9f8735b
JW
2652 }
2653 }
92d7f7b0 2654 if (vport->port_state != LPFC_FLOGI) {
92d7f7b0 2655 lpfc_initial_flogi(vport);
0ff10d46 2656 return;
92d7f7b0 2657 }
dea3101e 2658 break;
2659
92d7f7b0 2660 case LPFC_FDISC:
dea3101e 2661 case LPFC_FLOGI:
2e0fef85 2662 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
dea3101e 2663 /* Initial FLOGI timeout */
e8b62011
JS
2664 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2665 "0222 Initial %s timeout\n",
87af33fe 2666 vport->vpi ? "FDISC" : "FLOGI");
dea3101e 2667
2668 /* Assume no Fabric and go on with discovery.
2669 * Check for outstanding ELS FLOGI to abort.
2670 */
2671
2672 /* FLOGI failed, so just use loop map to make discovery list */
2e0fef85 2673 lpfc_disc_list_loopmap(vport);
dea3101e 2674
2675 /* Start discovery */
2e0fef85 2676 lpfc_disc_start(vport);
dea3101e 2677 break;
2678
2679 case LPFC_FABRIC_CFG_LINK:
2680 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2681 NameServer login */
e8b62011
JS
2682 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2683 "0223 Timeout while waiting for "
2684 "NameServer login\n");
dea3101e 2685 /* Next look for NameServer ndlp */
2e0fef85 2686 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093 2687 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
87af33fe
JS
2688 lpfc_els_abort(phba, ndlp);
2689
2690 /* ReStart discovery */
2691 goto restart_disc;
dea3101e 2692
2693 case LPFC_NS_QRY:
2694 /* Check for wait for NameServer Rsp timeout */
e8b62011
JS
2695 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2696 "0224 NameServer Query timeout "
2697 "Data: x%x x%x\n",
2698 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
dea3101e 2699
92d7f7b0
JS
2700 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2701 /* Try it one more time */
2702 vport->fc_ns_retry++;
2703 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2704 vport->fc_ns_retry, 0);
2705 if (rc == 0)
2706 break;
dea3101e 2707 }
92d7f7b0 2708 vport->fc_ns_retry = 0;
dea3101e 2709
87af33fe 2710restart_disc:
92d7f7b0
JS
2711 /*
2712 * Discovery is over.
2713 * set port_state to PORT_READY if SLI2.
2714 * cmpl_reg_vpi will set port_state to READY for SLI3.
2715 */
2716 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2717 lpfc_issue_reg_vpi(phba, vport);
2718 else { /* NPIV Not enabled */
2719 lpfc_issue_clear_la(phba, vport);
2720 vport->port_state = LPFC_VPORT_READY;
dea3101e 2721 }
2722
2723 /* Setup and issue mailbox INITIALIZE LINK command */
2724 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2725 if (!initlinkmbox) {
e8b62011
JS
2726 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2727 "0206 Device Discovery "
2728 "completion error\n");
2e0fef85 2729 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2730 break;
2731 }
2732
2733 lpfc_linkdown(phba);
2734 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2735 phba->cfg_link_speed);
2736 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
ed957684 2737 initlinkmbox->vport = vport;
92d7f7b0 2738 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0b727fea 2739 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
5b8bd0c9 2740 lpfc_set_loopback_flag(phba);
dea3101e 2741 if (rc == MBX_NOT_FINISHED)
2742 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2743
2744 break;
2745
2746 case LPFC_DISC_AUTH:
2747 /* Node Authentication timeout */
e8b62011
JS
2748 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2749 "0227 Node Authentication timeout\n");
2e0fef85
JS
2750 lpfc_disc_flush_list(vport);
2751
92d7f7b0
JS
2752 /*
2753 * set port_state to PORT_READY if SLI2.
2754 * cmpl_reg_vpi will set port_state to READY for SLI3.
2755 */
2756 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2757 lpfc_issue_reg_vpi(phba, vport);
2758 else { /* NPIV Not enabled */
2759 lpfc_issue_clear_la(phba, vport);
2760 vport->port_state = LPFC_VPORT_READY;
dea3101e 2761 }
2762 break;
2763
2e0fef85
JS
2764 case LPFC_VPORT_READY:
2765 if (vport->fc_flag & FC_RSCN_MODE) {
e8b62011
JS
2766 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2767 "0231 RSCN timeout Data: x%x "
2768 "x%x\n",
2769 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
dea3101e 2770
2771 /* Cleanup any outstanding ELS commands */
2e0fef85 2772 lpfc_els_flush_cmd(vport);
dea3101e 2773
2e0fef85
JS
2774 lpfc_els_flush_rscn(vport);
2775 lpfc_disc_flush_list(vport);
dea3101e 2776 }
2777 break;
2e0fef85 2778
92d7f7b0 2779 default:
e8b62011
JS
2780 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2781 "0229 Unexpected discovery timeout, "
2782 "vport State x%x\n", vport->port_state);
2e0fef85
JS
2783 break;
2784 }
2785
2786 switch (phba->link_state) {
2787 case LPFC_CLEAR_LA:
92d7f7b0 2788 /* CLEAR LA timeout */
e8b62011
JS
2789 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2790 "0228 CLEAR LA timeout\n");
2e0fef85
JS
2791 clrlaerr = 1;
2792 break;
2793
09372820
JS
2794 case LPFC_LINK_UP:
2795 lpfc_issue_clear_la(phba, vport);
2796 /* Drop thru */
2e0fef85
JS
2797 case LPFC_LINK_UNKNOWN:
2798 case LPFC_WARM_START:
2799 case LPFC_INIT_START:
2800 case LPFC_INIT_MBX_CMDS:
2801 case LPFC_LINK_DOWN:
2e0fef85 2802 case LPFC_HBA_ERROR:
e8b62011
JS
2803 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2804 "0230 Unexpected timeout, hba link "
2805 "state x%x\n", phba->link_state);
2e0fef85
JS
2806 clrlaerr = 1;
2807 break;
92d7f7b0
JS
2808
2809 case LPFC_HBA_READY:
2810 break;
dea3101e 2811 }
2812
2813 if (clrlaerr) {
2e0fef85 2814 lpfc_disc_flush_list(vport);
a4bc3379 2815 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
dea3101e 2816 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2817 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2e0fef85 2818 vport->port_state = LPFC_VPORT_READY;
dea3101e 2819 }
2820
2821 return;
2822}
2823
dea3101e 2824/*
2825 * This routine handles processing a NameServer REG_LOGIN mailbox
2826 * command upon completion. It is setup in the LPFC_MBOXQ
2827 * as the completion routine when the command is
2828 * handed off to the SLI layer.
2829 */
2830void
2e0fef85 2831lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 2832{
2e0fef85
JS
2833 MAILBOX_t *mb = &pmb->mb;
2834 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2835 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2836 struct lpfc_vport *vport = pmb->vport;
dea3101e 2837
2838 pmb->context1 = NULL;
2839
dea3101e 2840 ndlp->nlp_rpi = mb->un.varWords[0];
dea3101e 2841 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 2842 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 2843
2e0fef85
JS
2844 /*
2845 * Start issuing Fabric-Device Management Interface (FDMI) command to
2846 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2847 * fdmi-on=2 (supporting RPA/hostnmae)
dea3101e 2848 */
2e0fef85 2849
3de2a653 2850 if (vport->cfg_fdmi_on == 1)
2e0fef85
JS
2851 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2852 else
2853 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
dea3101e 2854
fa4066b6
JS
2855 /* decrement the node reference count held for this callback
2856 * function.
2857 */
329f9bc7 2858 lpfc_nlp_put(ndlp);
dea3101e 2859 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2860 kfree(mp);
329f9bc7 2861 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2862
2863 return;
2864}
2865
685f0bf7
JS
2866static int
2867lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2868{
2869 uint16_t *rpi = param;
2870
2871 return ndlp->nlp_rpi == *rpi;
2872}
2873
2874static int
2875lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2876{
2877 return memcmp(&ndlp->nlp_portname, param,
2878 sizeof(ndlp->nlp_portname)) == 0;
2879}
2880
a6ababd2 2881static struct lpfc_nodelist *
2e0fef85 2882__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
dea3101e 2883{
21568f53 2884 struct lpfc_nodelist *ndlp;
dea3101e 2885
2e0fef85 2886 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
87af33fe 2887 if (filter(ndlp, param))
685f0bf7
JS
2888 return ndlp;
2889 }
21568f53 2890 return NULL;
dea3101e 2891}
2892
685f0bf7
JS
2893/*
2894 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2e0fef85 2895 * returns the node list element pointer else return NULL.
685f0bf7
JS
2896 */
2897struct lpfc_nodelist *
2e0fef85 2898__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
685f0bf7 2899{
2e0fef85 2900 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
685f0bf7
JS
2901}
2902
488d1469 2903/*
685f0bf7 2904 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2e0fef85 2905 * returns the node element list pointer else return NULL.
488d1469
JS
2906 */
2907struct lpfc_nodelist *
2e0fef85 2908lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
488d1469 2909{
2e0fef85 2910 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
488d1469 2911 struct lpfc_nodelist *ndlp;
488d1469 2912
2e0fef85
JS
2913 spin_lock_irq(shost->host_lock);
2914 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2915 spin_unlock_irq(shost->host_lock);
858c9f6c 2916 return ndlp;
488d1469
JS
2917}
2918
dea3101e 2919void
2e0fef85
JS
2920lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2921 uint32_t did)
dea3101e 2922{
2923 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
dea3101e 2924 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
92d7f7b0 2925 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
dea3101e 2926 init_timer(&ndlp->nlp_delayfunc);
2927 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2928 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2929 ndlp->nlp_DID = did;
2e0fef85 2930 ndlp->vport = vport;
dea3101e 2931 ndlp->nlp_sid = NLP_NO_SID;
685f0bf7 2932 INIT_LIST_HEAD(&ndlp->nlp_listp);
329f9bc7 2933 kref_init(&ndlp->kref);
e47c9093 2934 NLP_INT_NODE_ACT(ndlp);
858c9f6c
JS
2935
2936 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2937 "node init: did:x%x",
2938 ndlp->nlp_DID, 0, 0);
2939
dea3101e 2940 return;
2941}
329f9bc7 2942
98c9ea5c
JS
2943/* This routine releases all resources associated with a specifc NPort's ndlp
2944 * and mempool_free's the nodelist.
2945 */
311464ec 2946static void
329f9bc7
JS
2947lpfc_nlp_release(struct kref *kref)
2948{
e47c9093
JS
2949 struct lpfc_hba *phba;
2950 unsigned long flags;
329f9bc7
JS
2951 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2952 kref);
858c9f6c
JS
2953
2954 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2955 "node release: did:x%x flg:x%x type:x%x",
2956 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2957
e47c9093
JS
2958 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
2959 "0279 lpfc_nlp_release: ndlp:x%p "
2960 "usgmap:x%x refcnt:%d\n",
2961 (void *)ndlp, ndlp->nlp_usg_map,
2962 atomic_read(&ndlp->kref.refcount));
2963
2964 /* remove ndlp from action. */
2e0fef85 2965 lpfc_nlp_remove(ndlp->vport, ndlp);
e47c9093
JS
2966
2967 /* clear the ndlp active flag for all release cases */
2968 phba = ndlp->vport->phba;
2969 spin_lock_irqsave(&phba->ndlp_lock, flags);
2970 NLP_CLR_NODE_ACT(ndlp);
2971 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
2972
2973 /* free ndlp memory for final ndlp release */
2974 if (NLP_CHK_FREE_REQ(ndlp))
2975 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
329f9bc7
JS
2976}
2977
98c9ea5c
JS
2978/* This routine bumps the reference count for a ndlp structure to ensure
2979 * that one discovery thread won't free a ndlp while another discovery thread
2980 * is using it.
2981 */
329f9bc7
JS
2982struct lpfc_nodelist *
2983lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2984{
e47c9093
JS
2985 struct lpfc_hba *phba;
2986 unsigned long flags;
2987
98c9ea5c
JS
2988 if (ndlp) {
2989 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2990 "node get: did:x%x flg:x%x refcnt:x%x",
2991 ndlp->nlp_DID, ndlp->nlp_flag,
2992 atomic_read(&ndlp->kref.refcount));
e47c9093
JS
2993 /* The check of ndlp usage to prevent incrementing the
2994 * ndlp reference count that is in the process of being
2995 * released.
2996 */
2997 phba = ndlp->vport->phba;
2998 spin_lock_irqsave(&phba->ndlp_lock, flags);
2999 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
3000 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3001 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
3002 "0276 lpfc_nlp_get: ndlp:x%p "
3003 "usgmap:x%x refcnt:%d\n",
3004 (void *)ndlp, ndlp->nlp_usg_map,
3005 atomic_read(&ndlp->kref.refcount));
3006 return NULL;
3007 } else
3008 kref_get(&ndlp->kref);
3009 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
98c9ea5c 3010 }
329f9bc7
JS
3011 return ndlp;
3012}
3013
98c9ea5c 3014/* This routine decrements the reference count for a ndlp structure. If the
e47c9093
JS
3015 * count goes to 0, this indicates the the associated nodelist should be
3016 * freed. Returning 1 indicates the ndlp resource has been released; on the
3017 * other hand, returning 0 indicates the ndlp resource has not been released
3018 * yet.
98c9ea5c 3019 */
329f9bc7
JS
3020int
3021lpfc_nlp_put(struct lpfc_nodelist *ndlp)
3022{
e47c9093
JS
3023 struct lpfc_hba *phba;
3024 unsigned long flags;
3025
3026 if (!ndlp)
3027 return 1;
3028
3029 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
3030 "node put: did:x%x flg:x%x refcnt:x%x",
3031 ndlp->nlp_DID, ndlp->nlp_flag,
3032 atomic_read(&ndlp->kref.refcount));
3033 phba = ndlp->vport->phba;
3034 spin_lock_irqsave(&phba->ndlp_lock, flags);
3035 /* Check the ndlp memory free acknowledge flag to avoid the
3036 * possible race condition that kref_put got invoked again
3037 * after previous one has done ndlp memory free.
3038 */
3039 if (NLP_CHK_FREE_ACK(ndlp)) {
3040 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3041 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
3042 "0274 lpfc_nlp_put: ndlp:x%p "
3043 "usgmap:x%x refcnt:%d\n",
3044 (void *)ndlp, ndlp->nlp_usg_map,
3045 atomic_read(&ndlp->kref.refcount));
3046 return 1;
3047 }
3048 /* Check the ndlp inactivate log flag to avoid the possible
3049 * race condition that kref_put got invoked again after ndlp
3050 * is already in inactivating state.
3051 */
3052 if (NLP_CHK_IACT_REQ(ndlp)) {
3053 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3054 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
3055 "0275 lpfc_nlp_put: ndlp:x%p "
3056 "usgmap:x%x refcnt:%d\n",
3057 (void *)ndlp, ndlp->nlp_usg_map,
3058 atomic_read(&ndlp->kref.refcount));
3059 return 1;
98c9ea5c 3060 }
e47c9093
JS
3061 /* For last put, mark the ndlp usage flags to make sure no
3062 * other kref_get and kref_put on the same ndlp shall get
3063 * in between the process when the final kref_put has been
3064 * invoked on this ndlp.
3065 */
3066 if (atomic_read(&ndlp->kref.refcount) == 1) {
3067 /* Indicate ndlp is put to inactive state. */
3068 NLP_SET_IACT_REQ(ndlp);
3069 /* Acknowledge ndlp memory free has been seen. */
3070 if (NLP_CHK_FREE_REQ(ndlp))
3071 NLP_SET_FREE_ACK(ndlp);
3072 }
3073 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3074 /* Note, the kref_put returns 1 when decrementing a reference
3075 * count that was 1, it invokes the release callback function,
3076 * but it still left the reference count as 1 (not actually
3077 * performs the last decrementation). Otherwise, it actually
3078 * decrements the reference count and returns 0.
3079 */
3080 return kref_put(&ndlp->kref, lpfc_nlp_release);
329f9bc7 3081}
98c9ea5c
JS
3082
3083/* This routine free's the specified nodelist if it is not in use
e47c9093
JS
3084 * by any other discovery thread. This routine returns 1 if the
3085 * ndlp has been freed. A return value of 0 indicates the ndlp is
3086 * not yet been released.
98c9ea5c
JS
3087 */
3088int
3089lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
3090{
3091 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
3092 "node not used: did:x%x flg:x%x refcnt:x%x",
3093 ndlp->nlp_DID, ndlp->nlp_flag,
3094 atomic_read(&ndlp->kref.refcount));
e47c9093
JS
3095 if (atomic_read(&ndlp->kref.refcount) == 1)
3096 if (lpfc_nlp_put(ndlp))
3097 return 1;
98c9ea5c
JS
3098 return 0;
3099}