scsi: lpfc: Add blk_io_poll support for latency improvment
[linux-block.git] / drivers / scsi / lpfc / lpfc_hbadisc.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
145e5a8a 4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
4ae2ebde 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
50611577 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e 22 *******************************************************************/
23
dea3101e 24#include <linux/blkdev.h>
a93ff37a 25#include <linux/delay.h>
5a0e3ad6 26#include <linux/slab.h>
dea3101e 27#include <linux/pci.h>
28#include <linux/kthread.h>
29#include <linux/interrupt.h>
1c2ba475 30#include <linux/lockdep.h>
e3ba04c9 31#include <linux/utsname.h>
dea3101e 32
91886523 33#include <scsi/scsi.h>
dea3101e 34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_transport_fc.h>
a0f2d3ef
JS
37#include <scsi/fc/fc_fs.h>
38
da0436e9 39#include "lpfc_hw4.h"
dea3101e 40#include "lpfc_hw.h"
ea2151b4 41#include "lpfc_nl.h"
dea3101e 42#include "lpfc_disc.h"
43#include "lpfc_sli.h"
da0436e9 44#include "lpfc_sli4.h"
dea3101e 45#include "lpfc.h"
a0f2d3ef
JS
46#include "lpfc_scsi.h"
47#include "lpfc_nvme.h"
dea3101e 48#include "lpfc_logmsg.h"
49#include "lpfc_crtn.h"
92d7f7b0 50#include "lpfc_vport.h"
858c9f6c 51#include "lpfc_debugfs.h"
dea3101e 52
53/* AlpaArray for assignment of scsid for scan-down and bind_method */
54static uint8_t lpfcAlpaArray[] = {
55 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
56 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
57 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
58 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
59 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
60 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
61 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
62 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
63 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
64 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
65 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
66 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
67 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
68};
69
2e0fef85 70static void lpfc_disc_timeout_handler(struct lpfc_vport *);
a6ababd2 71static void lpfc_disc_flush_list(struct lpfc_vport *vport);
32b9793f 72static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
a93ff37a 73static int lpfc_fcf_inuse(struct lpfc_hba *);
dea3101e 74
c01f3208
JS
75void
76lpfc_terminate_rport_io(struct fc_rport *rport)
dea3101e 77{
c01f3208
JS
78 struct lpfc_rport_data *rdata;
79 struct lpfc_nodelist * ndlp;
80 struct lpfc_hba *phba;
dea3101e 81
c01f3208
JS
82 rdata = rport->dd_data;
83 ndlp = rdata->pnode;
84
58da1ffb 85 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
c01f3208
JS
86 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
87 printk(KERN_ERR "Cannot find remote node"
88 " to terminate I/O Data x%x\n",
89 rport->port_id);
dea3101e 90 return;
91 }
92
a257bf90 93 phba = ndlp->phba;
c01f3208 94
858c9f6c
JS
95 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
96 "rport terminate: sid:x%x did:x%x flg:x%x",
97 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
98
c01f3208 99 if (ndlp->nlp_sid != NLP_NO_SID) {
51ef4c26 100 lpfc_sli_abort_iocb(ndlp->vport,
895427bd 101 &phba->sli.sli3_ring[LPFC_FCP_RING],
51ef4c26 102 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
c01f3208 103 }
c01f3208
JS
104}
105
106/*
107 * This function will be called when dev_loss_tmo fire.
108 */
109void
110lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
111{
112 struct lpfc_rport_data *rdata;
113 struct lpfc_nodelist * ndlp;
2e0fef85 114 struct lpfc_vport *vport;
466e840b 115 struct Scsi_Host *shost;
858c9f6c 116 struct lpfc_hba *phba;
858c9f6c 117 struct lpfc_work_evt *evtp;
a8adb832
JS
118 int put_node;
119 int put_rport;
894bb17f 120 unsigned long iflags;
c01f3208
JS
121
122 rdata = rport->dd_data;
123 ndlp = rdata->pnode;
58da1ffb 124 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
c01f3208 125 return;
c01f3208 126
858c9f6c
JS
127 vport = ndlp->vport;
128 phba = vport->phba;
129
130 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
131 "rport devlosscb: sid:x%x did:x%x flg:x%x",
132 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
133
34f5ad8b 134 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
32350664 135 "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
34f5ad8b
JS
136 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
137
a8adb832
JS
138 /* Don't defer this if we are in the process of deleting the vport
139 * or unloading the driver. The unload will cleanup the node
140 * appropriately we just need to cleanup the ndlp rport info here.
141 */
142 if (vport->load_flag & FC_UNLOADING) {
143 put_node = rdata->pnode != NULL;
144 put_rport = ndlp->rport != NULL;
145 rdata->pnode = NULL;
146 ndlp->rport = NULL;
147 if (put_node)
148 lpfc_nlp_put(ndlp);
149 if (put_rport)
150 put_device(&rport->dev);
151 return;
152 }
153
154 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
155 return;
156
466e840b
JS
157 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
158 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
159 "6789 rport name %llx != node port name %llx",
160 rport->port_name,
161 wwn_to_u64(ndlp->nlp_portname.u.wwn));
34f5ad8b 162
858c9f6c
JS
163 evtp = &ndlp->dev_loss_evt;
164
466e840b
JS
165 if (!list_empty(&evtp->evt_listp)) {
166 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
167 "6790 rport name %llx dev_loss_evt pending",
168 rport->port_name);
858c9f6c 169 return;
466e840b 170 }
858c9f6c 171
466e840b 172 shost = lpfc_shost_from_vport(vport);
894bb17f 173 spin_lock_irqsave(shost->host_lock, iflags);
466e840b 174 ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
894bb17f 175 spin_unlock_irqrestore(shost->host_lock, iflags);
a62a435a 176
fa4066b6
JS
177 /* We need to hold the node by incrementing the reference
178 * count until this queued work is done
179 */
466e840b
JS
180 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
181
894bb17f 182 spin_lock_irqsave(&phba->hbalock, iflags);
5e9d9b82
JS
183 if (evtp->evt_arg1) {
184 evtp->evt = LPFC_EVT_DEV_LOSS;
185 list_add_tail(&evtp->evt_listp, &phba->work_list);
186 lpfc_worker_wake_up(phba);
187 }
894bb17f 188 spin_unlock_irqrestore(&phba->hbalock, iflags);
858c9f6c 189
858c9f6c
JS
190 return;
191}
192
a93ff37a
JS
193/**
194 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
195 * @ndlp: Pointer to remote node object.
196 *
197 * This function is called from the worker thread when devloss timeout timer
198 * expires. For SLI4 host, this routine shall return 1 when at lease one
199 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
200 * routine shall return 0 when there is no remote node is still in use of FCF
201 * when devloss timeout happened to this @ndlp.
202 **/
203static int
858c9f6c
JS
204lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
205{
206 struct lpfc_rport_data *rdata;
207 struct fc_rport *rport;
208 struct lpfc_vport *vport;
209 struct lpfc_hba *phba;
466e840b 210 struct Scsi_Host *shost;
858c9f6c 211 uint8_t *name;
87af33fe 212 int put_node;
858c9f6c 213 int warn_on = 0;
a93ff37a 214 int fcf_inuse = 0;
894bb17f 215 unsigned long iflags;
858c9f6c
JS
216
217 rport = ndlp->rport;
466e840b
JS
218 vport = ndlp->vport;
219 shost = lpfc_shost_from_vport(vport);
858c9f6c 220
894bb17f 221 spin_lock_irqsave(shost->host_lock, iflags);
466e840b 222 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
894bb17f 223 spin_unlock_irqrestore(shost->host_lock, iflags);
466e840b
JS
224
225 if (!rport)
a93ff37a 226 return fcf_inuse;
858c9f6c 227
858c9f6c 228 name = (uint8_t *) &ndlp->nlp_portname;
858c9f6c
JS
229 phba = vport->phba;
230
a93ff37a
JS
231 if (phba->sli_rev == LPFC_SLI_REV4)
232 fcf_inuse = lpfc_fcf_inuse(phba);
233
858c9f6c
JS
234 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
235 "rport devlosstmo:did:x%x type:x%x id:x%x",
236 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
237
34f5ad8b 238 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
32350664 239 "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
34f5ad8b
JS
240 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
241
466e840b
JS
242 /*
243 * lpfc_nlp_remove if reached with dangling rport drops the
244 * reference. To make sure that does not happen clear rport
245 * pointer in ndlp before lpfc_nlp_put.
246 */
247 rdata = rport->dd_data;
248
a8adb832
JS
249 /* Don't defer this if we are in the process of deleting the vport
250 * or unloading the driver. The unload will cleanup the node
251 * appropriately we just need to cleanup the ndlp rport info here.
252 */
253 if (vport->load_flag & FC_UNLOADING) {
09372820
JS
254 if (ndlp->nlp_sid != NLP_NO_SID) {
255 /* flush the target */
256 lpfc_sli_abort_iocb(vport,
895427bd
JS
257 &phba->sli.sli3_ring[LPFC_FCP_RING],
258 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
09372820 259 }
a8adb832 260 put_node = rdata->pnode != NULL;
a8adb832
JS
261 rdata->pnode = NULL;
262 ndlp->rport = NULL;
263 if (put_node)
264 lpfc_nlp_put(ndlp);
466e840b
JS
265 put_device(&rport->dev);
266
a93ff37a 267 return fcf_inuse;
a8adb832
JS
268 }
269
d7c255b2
JS
270 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
271 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
272 "0284 Devloss timeout Ignored on "
273 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
274 "NPort x%x\n",
275 *name, *(name+1), *(name+2), *(name+3),
276 *(name+4), *(name+5), *(name+6), *(name+7),
277 ndlp->nlp_DID);
a93ff37a 278 return fcf_inuse;
d7c255b2 279 }
858c9f6c 280
466e840b
JS
281 put_node = rdata->pnode != NULL;
282 rdata->pnode = NULL;
283 ndlp->rport = NULL;
284 if (put_node)
285 lpfc_nlp_put(ndlp);
286 put_device(&rport->dev);
287
288 if (ndlp->nlp_type & NLP_FABRIC)
a93ff37a 289 return fcf_inuse;
82085718 290
dea3101e 291 if (ndlp->nlp_sid != NLP_NO_SID) {
6e8215e4 292 warn_on = 1;
895427bd 293 lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
51ef4c26 294 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
dea3101e 295 }
c01f3208 296
6e8215e4 297 if (warn_on) {
e8b62011
JS
298 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
299 "0203 Devloss timeout on "
58da1ffb
JS
300 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
301 "NPort x%06x Data: x%x x%x x%x\n",
e8b62011
JS
302 *name, *(name+1), *(name+2), *(name+3),
303 *(name+4), *(name+5), *(name+6), *(name+7),
304 ndlp->nlp_DID, ndlp->nlp_flag,
305 ndlp->nlp_state, ndlp->nlp_rpi);
6e8215e4 306 } else {
e8b62011
JS
307 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
308 "0204 Devloss timeout on "
58da1ffb
JS
309 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
310 "NPort x%06x Data: x%x x%x x%x\n",
e8b62011
JS
311 *name, *(name+1), *(name+2), *(name+3),
312 *(name+4), *(name+5), *(name+6), *(name+7),
313 ndlp->nlp_DID, ndlp->nlp_flag,
314 ndlp->nlp_state, ndlp->nlp_rpi);
6e8215e4
JSEC
315 }
316
2ade92ae 317 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
82085718 318 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
ffc95493 319 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
589a52d6
JS
320 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
321 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
2e0fef85 322 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
6fb120a7 323
a93ff37a
JS
324 return fcf_inuse;
325}
326
327/**
328 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
329 * @phba: Pointer to hba context object.
330 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
331 * @nlp_did: remote node identifer with devloss timeout.
332 *
333 * This function is called from the worker thread after invoking devloss
334 * timeout handler and releasing the reference count for the ndlp with
335 * which the devloss timeout was handled for SLI4 host. For the devloss
336 * timeout of the last remote node which had been in use of FCF, when this
337 * routine is invoked, it shall be guaranteed that none of the remote are
338 * in-use of FCF. When devloss timeout to the last remote using the FCF,
339 * if the FIP engine is neither in FCF table scan process nor roundrobin
340 * failover process, the in-use FCF shall be unregistered. If the FIP
341 * engine is in FCF discovery process, the devloss timeout state shall
342 * be set for either the FCF table scan process or roundrobin failover
343 * process to unregister the in-use FCF.
344 **/
345static void
346lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
347 uint32_t nlp_did)
348{
349 /* If devloss timeout happened to a remote node when FCF had no
350 * longer been in-use, do nothing.
351 */
352 if (!fcf_inuse)
353 return;
354
355 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
356 spin_lock_irq(&phba->hbalock);
357 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
358 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
359 spin_unlock_irq(&phba->hbalock);
360 return;
361 }
362 phba->hba_flag |= HBA_DEVLOSS_TMO;
363 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
364 "2847 Last remote node (x%x) using "
365 "FCF devloss tmo\n", nlp_did);
366 }
367 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
368 spin_unlock_irq(&phba->hbalock);
369 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
370 "2868 Devloss tmo to FCF rediscovery "
371 "in progress\n");
372 return;
373 }
374 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
375 spin_unlock_irq(&phba->hbalock);
376 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
377 "2869 Devloss tmo to idle FIP engine, "
378 "unreg in-use FCF and rescan.\n");
379 /* Unregister in-use FCF and rescan */
380 lpfc_unregister_fcf_rescan(phba);
381 return;
382 }
383 spin_unlock_irq(&phba->hbalock);
384 if (phba->hba_flag & FCF_TS_INPROG)
385 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
386 "2870 FCF table scan in progress\n");
387 if (phba->hba_flag & FCF_RR_INPROG)
388 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
389 "2871 FLOGI roundrobin FCF failover "
390 "in progress\n");
391 }
6fb120a7 392 lpfc_unregister_unused_fcf(phba);
92d7f7b0 393}
c01f3208 394
ea2151b4 395/**
3621a710 396 * lpfc_alloc_fast_evt - Allocates data structure for posting event
ea2151b4
JS
397 * @phba: Pointer to hba context object.
398 *
399 * This function is called from the functions which need to post
400 * events from interrupt context. This function allocates data
401 * structure required for posting event. It also keeps track of
402 * number of events pending and prevent event storm when there are
403 * too many events.
404 **/
405struct lpfc_fast_path_event *
406lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
407 struct lpfc_fast_path_event *ret;
408
409 /* If there are lot of fast event do not exhaust memory due to this */
410 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
411 return NULL;
412
413 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
414 GFP_ATOMIC);
6fb120a7 415 if (ret) {
ea2151b4 416 atomic_inc(&phba->fast_event_count);
6fb120a7
JS
417 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
418 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
419 }
ea2151b4
JS
420 return ret;
421}
422
423/**
3621a710 424 * lpfc_free_fast_evt - Frees event data structure
ea2151b4
JS
425 * @phba: Pointer to hba context object.
426 * @evt: Event object which need to be freed.
427 *
428 * This function frees the data structure required for posting
429 * events.
430 **/
431void
432lpfc_free_fast_evt(struct lpfc_hba *phba,
433 struct lpfc_fast_path_event *evt) {
434
435 atomic_dec(&phba->fast_event_count);
436 kfree(evt);
437}
438
439/**
3621a710 440 * lpfc_send_fastpath_evt - Posts events generated from fast path
ea2151b4
JS
441 * @phba: Pointer to hba context object.
442 * @evtp: Event data structure.
443 *
444 * This function is called from worker thread, when the interrupt
445 * context need to post an event. This function posts the event
446 * to fc transport netlink interface.
447 **/
448static void
449lpfc_send_fastpath_evt(struct lpfc_hba *phba,
450 struct lpfc_work_evt *evtp)
451{
452 unsigned long evt_category, evt_sub_category;
453 struct lpfc_fast_path_event *fast_evt_data;
454 char *evt_data;
455 uint32_t evt_data_size;
456 struct Scsi_Host *shost;
457
458 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
459 work_evt);
460
461 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
462 evt_sub_category = (unsigned long) fast_evt_data->un.
463 fabric_evt.subcategory;
464 shost = lpfc_shost_from_vport(fast_evt_data->vport);
465 if (evt_category == FC_REG_FABRIC_EVENT) {
466 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
467 evt_data = (char *) &fast_evt_data->un.read_check_error;
468 evt_data_size = sizeof(fast_evt_data->un.
469 read_check_error);
470 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
eaf15d5b 471 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
ea2151b4
JS
472 evt_data = (char *) &fast_evt_data->un.fabric_evt;
473 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
474 } else {
475 lpfc_free_fast_evt(phba, fast_evt_data);
476 return;
477 }
478 } else if (evt_category == FC_REG_SCSI_EVENT) {
479 switch (evt_sub_category) {
480 case LPFC_EVENT_QFULL:
481 case LPFC_EVENT_DEVBSY:
482 evt_data = (char *) &fast_evt_data->un.scsi_evt;
483 evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
484 break;
485 case LPFC_EVENT_CHECK_COND:
486 evt_data = (char *) &fast_evt_data->un.check_cond_evt;
487 evt_data_size = sizeof(fast_evt_data->un.
488 check_cond_evt);
489 break;
490 case LPFC_EVENT_VARQUEDEPTH:
491 evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
492 evt_data_size = sizeof(fast_evt_data->un.
493 queue_depth_evt);
494 break;
495 default:
496 lpfc_free_fast_evt(phba, fast_evt_data);
497 return;
498 }
499 } else {
500 lpfc_free_fast_evt(phba, fast_evt_data);
501 return;
502 }
503
895427bd
JS
504 if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
505 fc_host_post_vendor_event(shost,
506 fc_get_event_number(),
507 evt_data_size,
508 evt_data,
509 LPFC_NL_VENDOR_ID);
ea2151b4
JS
510
511 lpfc_free_fast_evt(phba, fast_evt_data);
512 return;
513}
514
dea3101e 515static void
2e0fef85 516lpfc_work_list_done(struct lpfc_hba *phba)
dea3101e 517{
518 struct lpfc_work_evt *evtp = NULL;
519 struct lpfc_nodelist *ndlp;
520 int free_evt;
a93ff37a
JS
521 int fcf_inuse;
522 uint32_t nlp_did;
dea3101e 523
2e0fef85
JS
524 spin_lock_irq(&phba->hbalock);
525 while (!list_empty(&phba->work_list)) {
dea3101e 526 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
527 evt_listp);
2e0fef85 528 spin_unlock_irq(&phba->hbalock);
dea3101e 529 free_evt = 1;
2fe165b6 530 switch (evtp->evt) {
dea3101e 531 case LPFC_EVT_ELS_RETRY:
2e0fef85 532 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
dea3101e 533 lpfc_els_retry_delay_handler(ndlp);
92d7f7b0 534 free_evt = 0; /* evt is part of ndlp */
fa4066b6
JS
535 /* decrement the node reference count held
536 * for this queued work
537 */
538 lpfc_nlp_put(ndlp);
dea3101e 539 break;
858c9f6c
JS
540 case LPFC_EVT_DEV_LOSS:
541 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
a93ff37a 542 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
858c9f6c 543 free_evt = 0;
fa4066b6
JS
544 /* decrement the node reference count held for
545 * this queued work
546 */
a93ff37a 547 nlp_did = ndlp->nlp_DID;
858c9f6c 548 lpfc_nlp_put(ndlp);
a93ff37a
JS
549 if (phba->sli_rev == LPFC_SLI_REV4)
550 lpfc_sli4_post_dev_loss_tmo_handler(phba,
551 fcf_inuse,
552 nlp_did);
858c9f6c 553 break;
dea3101e 554 case LPFC_EVT_ONLINE:
2e0fef85
JS
555 if (phba->link_state < LPFC_LINK_DOWN)
556 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
41415862 557 else
2e0fef85 558 *(int *) (evtp->evt_arg1) = 0;
dea3101e 559 complete((struct completion *)(evtp->evt_arg2));
560 break;
46fa311e 561 case LPFC_EVT_OFFLINE_PREP:
2e0fef85 562 if (phba->link_state >= LPFC_LINK_DOWN)
618a5230 563 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
46fa311e
JS
564 *(int *)(evtp->evt_arg1) = 0;
565 complete((struct completion *)(evtp->evt_arg2));
566 break;
567 case LPFC_EVT_OFFLINE:
568 lpfc_offline(phba);
41415862
JW
569 lpfc_sli_brdrestart(phba);
570 *(int *)(evtp->evt_arg1) =
46fa311e
JS
571 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
572 lpfc_unblock_mgmt_io(phba);
41415862
JW
573 complete((struct completion *)(evtp->evt_arg2));
574 break;
575 case LPFC_EVT_WARM_START:
46fa311e 576 lpfc_offline(phba);
9290831f 577 lpfc_reset_barrier(phba);
41415862
JW
578 lpfc_sli_brdreset(phba);
579 lpfc_hba_down_post(phba);
580 *(int *)(evtp->evt_arg1) =
581 lpfc_sli_brdready(phba, HS_MBRDY);
46fa311e 582 lpfc_unblock_mgmt_io(phba);
41415862
JW
583 complete((struct completion *)(evtp->evt_arg2));
584 break;
585 case LPFC_EVT_KILL:
46fa311e 586 lpfc_offline(phba);
9290831f 587 *(int *)(evtp->evt_arg1)
2e0fef85
JS
588 = (phba->pport->stopped)
589 ? 0 : lpfc_sli_brdkill(phba);
46fa311e 590 lpfc_unblock_mgmt_io(phba);
dea3101e 591 complete((struct completion *)(evtp->evt_arg2));
592 break;
ea2151b4
JS
593 case LPFC_EVT_FASTPATH_MGMT_EVT:
594 lpfc_send_fastpath_evt(phba, evtp);
595 free_evt = 0;
596 break;
78730cfe
JS
597 case LPFC_EVT_RESET_HBA:
598 if (!(phba->pport->load_flag & FC_UNLOADING))
599 lpfc_reset_hba(phba);
600 break;
dea3101e 601 }
602 if (free_evt)
603 kfree(evtp);
2e0fef85 604 spin_lock_irq(&phba->hbalock);
dea3101e 605 }
2e0fef85 606 spin_unlock_irq(&phba->hbalock);
dea3101e 607
608}
609
311464ec 610static void
2e0fef85 611lpfc_work_done(struct lpfc_hba *phba)
dea3101e 612{
613 struct lpfc_sli_ring *pring;
858c9f6c 614 uint32_t ha_copy, status, control, work_port_events;
549e55cd 615 struct lpfc_vport **vports;
51ef4c26 616 struct lpfc_vport *vport;
549e55cd 617 int i;
dea3101e 618
2e0fef85 619 spin_lock_irq(&phba->hbalock);
dea3101e 620 ha_copy = phba->work_ha;
621 phba->work_ha = 0;
2e0fef85 622 spin_unlock_irq(&phba->hbalock);
dea3101e 623
da0436e9
JS
624 /* First, try to post the next mailbox command to SLI4 device */
625 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
626 lpfc_sli4_post_async_mbox(phba);
627
2fe165b6 628 if (ha_copy & HA_ERATT)
9399627f 629 /* Handle the error attention event */
dea3101e 630 lpfc_handle_eratt(phba);
631
2fe165b6 632 if (ha_copy & HA_MBATT)
dea3101e 633 lpfc_sli_handle_mb_event(phba);
634
2fe165b6 635 if (ha_copy & HA_LATT)
dea3101e 636 lpfc_handle_latt(phba);
9399627f 637
da0436e9
JS
638 /* Process SLI4 events */
639 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
19ca7609
JS
640 if (phba->hba_flag & HBA_RRQ_ACTIVE)
641 lpfc_handle_rrq_active(phba);
da0436e9
JS
642 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
643 lpfc_sli4_els_xri_abort_event_proc(phba);
644 if (phba->hba_flag & ASYNC_EVENT)
645 lpfc_sli4_async_event_proc(phba);
646 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
647 spin_lock_irq(&phba->hbalock);
648 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
649 spin_unlock_irq(&phba->hbalock);
650 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
651 }
ecfd03c6
JS
652 if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
653 lpfc_sli4_fcf_redisc_event_proc(phba);
da0436e9
JS
654 }
655
549e55cd
JS
656 vports = lpfc_create_vport_work_array(phba);
657 if (vports != NULL)
da0436e9 658 for (i = 0; i <= phba->max_vports; i++) {
51ef4c26
JS
659 /*
660 * We could have no vports in array if unloading, so if
661 * this happens then just use the pport
662 */
663 if (vports[i] == NULL && i == 0)
664 vport = phba->pport;
665 else
666 vport = vports[i];
667 if (vport == NULL)
668 break;
58da1ffb 669 spin_lock_irq(&vport->work_port_lock);
51ef4c26 670 work_port_events = vport->work_port_events;
58da1ffb
JS
671 vport->work_port_events &= ~work_port_events;
672 spin_unlock_irq(&vport->work_port_lock);
549e55cd 673 if (work_port_events & WORKER_DISC_TMO)
51ef4c26 674 lpfc_disc_timeout_handler(vport);
549e55cd 675 if (work_port_events & WORKER_ELS_TMO)
51ef4c26 676 lpfc_els_timeout_handler(vport);
549e55cd
JS
677 if (work_port_events & WORKER_HB_TMO)
678 lpfc_hb_timeout_handler(phba);
679 if (work_port_events & WORKER_MBOX_TMO)
680 lpfc_mbox_timeout_handler(phba);
681 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
682 lpfc_unblock_fabric_iocbs(phba);
549e55cd
JS
683 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
684 lpfc_ramp_down_queue_handler(phba);
92494144
JS
685 if (work_port_events & WORKER_DELAYED_DISC_TMO)
686 lpfc_delayed_disc_timeout_handler(vport);
92d7f7b0 687 }
09372820 688 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 689
895427bd 690 pring = lpfc_phba_elsring(phba);
858c9f6c
JS
691 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
692 status >>= (4*LPFC_ELS_RING);
0c9c6a75
JS
693 if (pring && (status & HA_RXMASK ||
694 pring->flag & LPFC_DEFERRED_RING_EVENT ||
695 phba->hba_flag & HBA_SP_QUEUE_EVT)) {
0b727fea 696 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
858c9f6c 697 pring->flag |= LPFC_DEFERRED_RING_EVENT;
161df4f0
JS
698 /* Preserve legacy behavior. */
699 if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
700 set_bit(LPFC_DATA_READY, &phba->data_flags);
858c9f6c 701 } else {
15498dc1
JS
702 /* Driver could have abort request completed in queue
703 * when link goes down. Allow for this transition.
704 */
705 if (phba->link_state >= LPFC_LINK_DOWN ||
ae9e28f3 706 phba->link_flag & LS_MDS_LOOPBACK) {
94661504
JS
707 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
708 lpfc_sli_handle_slow_ring_event(phba, pring,
709 (status &
710 HA_RXMASK));
711 }
858c9f6c 712 }
dc19e3b4 713 if (phba->sli_rev == LPFC_SLI_REV4)
2a9bf3d0 714 lpfc_drain_txq(phba);
858c9f6c
JS
715 /*
716 * Turn on Ring interrupts
717 */
3772a991
JS
718 if (phba->sli_rev <= LPFC_SLI_REV3) {
719 spin_lock_irq(&phba->hbalock);
720 control = readl(phba->HCregaddr);
721 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
722 lpfc_debugfs_slow_ring_trc(phba,
723 "WRK Enable ring: cntl:x%x hacopy:x%x",
724 control, ha_copy, 0);
725
726 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
727 writel(control, phba->HCregaddr);
728 readl(phba->HCregaddr); /* flush */
729 } else {
730 lpfc_debugfs_slow_ring_trc(phba,
731 "WRK Ring ok: cntl:x%x hacopy:x%x",
732 control, ha_copy, 0);
733 }
734 spin_unlock_irq(&phba->hbalock);
a58cbd52 735 }
dea3101e 736 }
2e0fef85 737 lpfc_work_list_done(phba);
dea3101e 738}
739
dea3101e 740int
741lpfc_do_work(void *p)
742{
743 struct lpfc_hba *phba = p;
744 int rc;
dea3101e 745
8698a745 746 set_user_nice(current, MIN_NICE);
043c956f 747 current->flags |= PF_NOFREEZE;
5e9d9b82 748 phba->data_flags = 0;
dea3101e 749
3a55b532 750 while (!kthread_should_stop()) {
5e9d9b82
JS
751 /* wait and check worker queue activities */
752 rc = wait_event_interruptible(phba->work_waitq,
753 (test_and_clear_bit(LPFC_DATA_READY,
754 &phba->data_flags)
755 || kthread_should_stop()));
3a55b532
JS
756 /* Signal wakeup shall terminate the worker thread */
757 if (rc) {
758 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
759 "0433 Wakeup on signal: rc=x%x\n", rc);
dea3101e 760 break;
3a55b532 761 }
dea3101e 762
5e9d9b82 763 /* Attend pending lpfc data processing */
dea3101e 764 lpfc_work_done(phba);
dea3101e 765 }
3a55b532
JS
766 phba->worker_thread = NULL;
767 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
768 "0432 Worker thread stopped.\n");
dea3101e 769 return 0;
770}
771
772/*
773 * This is only called to handle FC worker events. Since this a rare
25985edc 774 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
dea3101e 775 * embedding it in the IOCB.
776 */
777int
2e0fef85 778lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
dea3101e 779 uint32_t evt)
780{
781 struct lpfc_work_evt *evtp;
ed957684 782 unsigned long flags;
dea3101e 783
784 /*
785 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
786 * be queued to worker thread for processing
787 */
92d7f7b0 788 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
dea3101e 789 if (!evtp)
790 return 0;
791
792 evtp->evt_arg1 = arg1;
793 evtp->evt_arg2 = arg2;
794 evtp->evt = evt;
795
ed957684 796 spin_lock_irqsave(&phba->hbalock, flags);
071fbd3d 797 list_add_tail(&evtp->evt_listp, &phba->work_list);
ed957684 798 spin_unlock_irqrestore(&phba->hbalock, flags);
dea3101e 799
5e9d9b82
JS
800 lpfc_worker_wake_up(phba);
801
dea3101e 802 return 1;
803}
804
92d7f7b0
JS
805void
806lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
807{
09372820 808 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
809 struct lpfc_hba *phba = vport->phba;
810 struct lpfc_nodelist *ndlp, *next_ndlp;
92d7f7b0
JS
811
812 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
813 if (!NLP_CHK_NODE_ACT(ndlp))
814 continue;
92d7f7b0
JS
815 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
816 continue;
98c9ea5c
JS
817 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
818 ((vport->port_type == LPFC_NPIV_PORT) &&
819 (ndlp->nlp_DID == NameServer_DID)))
92d7f7b0
JS
820 lpfc_unreg_rpi(vport, ndlp);
821
822 /* Leave Fabric nodes alone on link down */
4d9ab994
JS
823 if ((phba->sli_rev < LPFC_SLI_REV4) &&
824 (!remove && ndlp->nlp_type & NLP_FABRIC))
92d7f7b0 825 continue;
4c2805aa
JS
826
827 /* Notify transport of connectivity loss to trigger cleanup. */
828 if (phba->nvmet_support &&
829 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
830 lpfc_nvmet_invalidate_host(phba, ndlp);
831
db6f1c2f
SH
832 lpfc_disc_state_machine(vport, ndlp, NULL,
833 remove
834 ? NLP_EVT_DEVICE_RM
835 : NLP_EVT_DEVICE_RECOVERY);
92d7f7b0
JS
836 }
837 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
5af5eee7
JS
838 if (phba->sli_rev == LPFC_SLI_REV4)
839 lpfc_sli4_unreg_all_rpis(vport);
92d7f7b0 840 lpfc_mbx_unreg_vpi(vport);
09372820 841 spin_lock_irq(shost->host_lock);
92d7f7b0 842 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
09372820 843 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
844 }
845}
846
87af33fe 847void
98c9ea5c 848lpfc_port_link_failure(struct lpfc_vport *vport)
92d7f7b0 849{
695a814e
JS
850 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
851
45ed1190
JS
852 /* Cleanup any outstanding received buffers */
853 lpfc_cleanup_rcv_buffers(vport);
854
92d7f7b0
JS
855 /* Cleanup any outstanding RSCN activity */
856 lpfc_els_flush_rscn(vport);
857
858 /* Cleanup any outstanding ELS commands */
859 lpfc_els_flush_cmd(vport);
860
861 lpfc_cleanup_rpis(vport, 0);
862
92d7f7b0
JS
863 /* Turn off discovery timer if its running */
864 lpfc_can_disctmo(vport);
865}
866
3772a991 867void
98c9ea5c
JS
868lpfc_linkdown_port(struct lpfc_vport *vport)
869{
870 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
871
f6e84790 872 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
a0f2d3ef
JS
873 fc_host_post_event(shost, fc_get_event_number(),
874 FCH_EVT_LINKDOWN, 0);
98c9ea5c
JS
875
876 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
877 "Link Down: state:x%x rtry:x%x flg:x%x",
878 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
879
880 lpfc_port_link_failure(vport);
881
92494144
JS
882 /* Stop delayed Nport discovery */
883 spin_lock_irq(shost->host_lock);
884 vport->fc_flag &= ~FC_DISC_DELAYED;
885 spin_unlock_irq(shost->host_lock);
886 del_timer_sync(&vport->delayed_disc_tmo);
98c9ea5c
JS
887}
888
dea3101e 889int
685f0bf7 890lpfc_linkdown(struct lpfc_hba *phba)
dea3101e 891{
2e0fef85
JS
892 struct lpfc_vport *vport = phba->pport;
893 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
549e55cd 894 struct lpfc_vport **vports;
685f0bf7 895 LPFC_MBOXQ_t *mb;
549e55cd 896 int i;
dea3101e 897
19193ff3 898 if (phba->link_state == LPFC_LINK_DOWN)
2e0fef85 899 return 0;
19193ff3 900
aacc20e3
JS
901 /* Block all SCSI stack I/Os */
902 lpfc_scsi_dev_block(phba);
903
0a9e9687
JS
904 phba->defer_flogi_acc_flag = false;
905
2e0fef85 906 spin_lock_irq(&phba->hbalock);
ecfd03c6 907 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
72100cc4 908 spin_unlock_irq(&phba->hbalock);
92d7f7b0 909 if (phba->link_state > LPFC_LINK_DOWN) {
2e0fef85 910 phba->link_state = LPFC_LINK_DOWN;
2977a095
JS
911 if (phba->sli4_hba.conf_trunk) {
912 phba->trunk_link.link0.state = 0;
913 phba->trunk_link.link1.state = 0;
914 phba->trunk_link.link2.state = 0;
915 phba->trunk_link.link3.state = 0;
845d0327
JS
916 phba->sli4_hba.link_state.logical_speed =
917 LPFC_LINK_SPEED_UNKNOWN;
2977a095 918 }
72100cc4 919 spin_lock_irq(shost->host_lock);
92d7f7b0 920 phba->pport->fc_flag &= ~FC_LBIT;
72100cc4 921 spin_unlock_irq(shost->host_lock);
92d7f7b0 922 }
549e55cd 923 vports = lpfc_create_vport_work_array(phba);
895427bd 924 if (vports != NULL) {
6fb120a7 925 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
926 /* Issue a LINK DOWN event to all nodes */
927 lpfc_linkdown_port(vports[i]);
895427bd
JS
928
929 vports[i]->fc_myDID = 0;
930
f6e84790
JS
931 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
932 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
d613b6a7
JS
933 if (phba->nvmet_support)
934 lpfc_nvmet_update_targetport(phba);
935 else
8c258641 936 lpfc_nvme_update_localport(vports[i]);
8c258641 937 }
549e55cd 938 }
895427bd 939 }
09372820 940 lpfc_destroy_vport_work_array(phba, vports);
c95a3b4b
JS
941
942 /* Clean up any SLI3 firmware default rpi's */
943 if (phba->sli_rev > LPFC_SLI_REV3)
944 goto skip_unreg_did;
945
2e0fef85
JS
946 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
947 if (mb) {
6d368e53 948 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
ed957684 949 mb->vport = vport;
2e0fef85 950 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0b727fea 951 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
dea3101e 952 == MBX_NOT_FINISHED) {
2e0fef85 953 mempool_free(mb, phba->mbox_mem_pool);
dea3101e 954 }
955 }
956
c95a3b4b 957 skip_unreg_did:
dea3101e 958 /* Setup myDID for link up if we are in pt2pt mode */
92d7f7b0 959 if (phba->pport->fc_flag & FC_PT2PT) {
2e0fef85
JS
960 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
961 if (mb) {
dea3101e 962 lpfc_config_link(phba, mb);
92d7f7b0 963 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
ed957684 964 mb->vport = vport;
0b727fea 965 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
dea3101e 966 == MBX_NOT_FINISHED) {
2e0fef85 967 mempool_free(mb, phba->mbox_mem_pool);
dea3101e 968 }
969 }
2e0fef85 970 spin_lock_irq(shost->host_lock);
92d7f7b0 971 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
d496b9a7 972 phba->pport->rcv_flogi_cnt = 0;
2e0fef85 973 spin_unlock_irq(shost->host_lock);
dea3101e 974 }
92d7f7b0
JS
975 return 0;
976}
dea3101e 977
92d7f7b0
JS
978static void
979lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
980{
981 struct lpfc_nodelist *ndlp;
dea3101e 982
92d7f7b0 983 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
a3da825b 984 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
e47c9093
JS
985 if (!NLP_CHK_NODE_ACT(ndlp))
986 continue;
92d7f7b0
JS
987 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
988 continue;
92d7f7b0 989 if (ndlp->nlp_type & NLP_FABRIC) {
e47c9093
JS
990 /* On Linkup its safe to clean up the ndlp
991 * from Fabric connections.
992 */
92d7f7b0
JS
993 if (ndlp->nlp_DID != Fabric_DID)
994 lpfc_unreg_rpi(vport, ndlp);
995 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
996 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
e47c9093
JS
997 /* Fail outstanding IO now since device is
998 * marked for PLOGI.
999 */
92d7f7b0
JS
1000 lpfc_unreg_rpi(vport, ndlp);
1001 }
1002 }
dea3101e 1003}
1004
92d7f7b0
JS
1005static void
1006lpfc_linkup_port(struct lpfc_vport *vport)
dea3101e 1007{
92d7f7b0 1008 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
92d7f7b0
JS
1009 struct lpfc_hba *phba = vport->phba;
1010
1011 if ((vport->load_flag & FC_UNLOADING) != 0)
1012 return;
1013
858c9f6c
JS
1014 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1015 "Link Up: top:x%x speed:x%x flg:x%x",
1016 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
1017
92d7f7b0
JS
1018 /* If NPIV is not enabled, only bring the physical port up */
1019 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1020 (vport != phba->pport))
1021 return;
dea3101e 1022
f6e84790 1023 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
a0f2d3ef
JS
1024 fc_host_post_event(shost, fc_get_event_number(),
1025 FCH_EVT_LINKUP, 0);
d2873e4c 1026
2e0fef85 1027 spin_lock_irq(shost->host_lock);
2e0fef85
JS
1028 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
1029 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
1030 vport->fc_flag |= FC_NDISC_ACTIVE;
1031 vport->fc_ns_retry = 0;
1032 spin_unlock_irq(shost->host_lock);
dea3101e 1033
92d7f7b0
JS
1034 if (vport->fc_flag & FC_LBIT)
1035 lpfc_linkup_cleanup_nodes(vport);
dea3101e 1036
92d7f7b0
JS
1037}
1038
1039static int
1040lpfc_linkup(struct lpfc_hba *phba)
1041{
549e55cd
JS
1042 struct lpfc_vport **vports;
1043 int i;
d496b9a7 1044 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
92d7f7b0
JS
1045
1046 phba->link_state = LPFC_LINK_UP;
1047
1048 /* Unblock fabric iocbs if they are blocked */
1049 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1050 del_timer_sync(&phba->fabric_block_timer);
1051
549e55cd
JS
1052 vports = lpfc_create_vport_work_array(phba);
1053 if (vports != NULL)
6fb120a7 1054 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
549e55cd 1055 lpfc_linkup_port(vports[i]);
09372820 1056 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 1057
d496b9a7
JS
1058 /* Clear the pport flogi counter in case the link down was
1059 * absorbed without an ACQE. No lock here - in worker thread
1060 * and discovery is synchronized.
1061 */
1062 spin_lock_irq(shost->host_lock);
1063 phba->pport->rcv_flogi_cnt = 0;
1064 spin_unlock_irq(shost->host_lock);
0a9e9687
JS
1065
1066 /* reinitialize initial FLOGI flag */
1067 phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
1068 phba->defer_flogi_acc_flag = false;
1069
dea3101e 1070 return 0;
1071}
1072
1073/*
1074 * This routine handles processing a CLEAR_LA mailbox
1075 * command upon completion. It is setup in the LPFC_MBOXQ
1076 * as the completion routine when the command is
895427bd 1077 * handed off to the SLI layer. SLI3 only.
dea3101e 1078 */
a6ababd2 1079static void
2e0fef85 1080lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1081{
2e0fef85
JS
1082 struct lpfc_vport *vport = pmb->vport;
1083 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1084 struct lpfc_sli *psli = &phba->sli;
04c68496 1085 MAILBOX_t *mb = &pmb->u.mb;
dea3101e 1086 uint32_t control;
1087
dea3101e 1088 /* Since we don't do discovery right now, turn these off here */
895427bd
JS
1089 psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1090 psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
dea3101e 1091
1092 /* Check for error */
1093 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
92d7f7b0 1094 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
e8b62011
JS
1095 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1096 "0320 CLEAR_LA mbxStatus error x%x hba "
1097 "state x%x\n",
1098 mb->mbxStatus, vport->port_state);
2e0fef85 1099 phba->link_state = LPFC_HBA_ERROR;
dea3101e 1100 goto out;
1101 }
1102
92d7f7b0
JS
1103 if (vport->port_type == LPFC_PHYSICAL_PORT)
1104 phba->link_state = LPFC_HBA_READY;
1105
1106 spin_lock_irq(&phba->hbalock);
1107 psli->sli_flag |= LPFC_PROCESS_LA;
1108 control = readl(phba->HCregaddr);
1109 control |= HC_LAINT_ENA;
1110 writel(control, phba->HCregaddr);
1111 readl(phba->HCregaddr); /* flush */
1112 spin_unlock_irq(&phba->hbalock);
1b32f6aa 1113 mempool_free(pmb, phba->mbox_mem_pool);
92d7f7b0 1114 return;
dea3101e 1115
dea3101e 1116out:
1117 /* Device Discovery completes */
e8b62011
JS
1118 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1119 "0225 Device Discovery completes\n");
2e0fef85 1120 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 1121
2e0fef85 1122 spin_lock_irq(shost->host_lock);
58da1ffb 1123 vport->fc_flag &= ~FC_ABORT_DISCOVERY;
2e0fef85 1124 spin_unlock_irq(shost->host_lock);
dea3101e 1125
2e0fef85 1126 lpfc_can_disctmo(vport);
dea3101e 1127
1128 /* turn on Link Attention interrupts */
2e0fef85
JS
1129
1130 spin_lock_irq(&phba->hbalock);
dea3101e 1131 psli->sli_flag |= LPFC_PROCESS_LA;
1132 control = readl(phba->HCregaddr);
1133 control |= HC_LAINT_ENA;
1134 writel(control, phba->HCregaddr);
1135 readl(phba->HCregaddr); /* flush */
2e0fef85 1136 spin_unlock_irq(&phba->hbalock);
dea3101e 1137
1138 return;
1139}
1140
2e0fef85 1141
d6de08cc 1142void
25594c6b 1143lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 1144{
2e0fef85 1145 struct lpfc_vport *vport = pmb->vport;
dea3101e 1146
04c68496 1147 if (pmb->u.mb.mbxStatus)
dea3101e 1148 goto out;
dea3101e 1149
25594c6b
JW
1150 mempool_free(pmb, phba->mbox_mem_pool);
1151
1b51197d
JS
1152 /* don't perform discovery for SLI4 loopback diagnostic test */
1153 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1154 !(phba->hba_flag & HBA_FCOE_MODE) &&
1155 (phba->link_flag & LS_LOOPBACK_MODE))
1156 return;
1157
76a95d75 1158 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
2e0fef85
JS
1159 vport->fc_flag & FC_PUBLIC_LOOP &&
1160 !(vport->fc_flag & FC_LBIT)) {
25594c6b 1161 /* Need to wait for FAN - use discovery timer
2e0fef85 1162 * for timeout. port_state is identically
25594c6b
JW
1163 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1164 */
2e0fef85 1165 lpfc_set_disctmo(vport);
25594c6b 1166 return;
92d7f7b0 1167 }
dea3101e 1168
2e0fef85 1169 /* Start discovery by sending a FLOGI. port_state is identically
835214f5
JS
1170 * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending
1171 * the FLOGI is being deferred till after MBX_READ_SPARAM completes.
25594c6b 1172 */
835214f5
JS
1173 if (vport->port_state != LPFC_FLOGI) {
1174 if (!(phba->hba_flag & HBA_DEFER_FLOGI))
1175 lpfc_initial_flogi(vport);
1176 } else {
1177 if (vport->fc_flag & FC_PT2PT)
1178 lpfc_disc_start(vport);
1179 }
25594c6b 1180 return;
dea3101e 1181
1182out:
e8b62011
JS
1183 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1184 "0306 CONFIG_LINK mbxStatus error x%x "
1185 "HBA state x%x\n",
04c68496 1186 pmb->u.mb.mbxStatus, vport->port_state);
92d7f7b0 1187 mempool_free(pmb, phba->mbox_mem_pool);
25594c6b 1188
92d7f7b0 1189 lpfc_linkdown(phba);
25594c6b 1190
e8b62011
JS
1191 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1192 "0200 CONFIG_LINK bad hba state x%x\n",
1193 vport->port_state);
dea3101e 1194
92d7f7b0 1195 lpfc_issue_clear_la(phba, vport);
dea3101e 1196 return;
1197}
1198
7d791df7
JS
1199/**
1200 * lpfc_sli4_clear_fcf_rr_bmask
1201 * @phba pointer to the struct lpfc_hba for this port.
1202 * This fucnction resets the round robin bit mask and clears the
1203 * fcf priority list. The list deletions are done while holding the
1204 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1205 * from the lpfc_fcf_pri record.
1206 **/
1207void
1208lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1209{
1210 struct lpfc_fcf_pri *fcf_pri;
1211 struct lpfc_fcf_pri *next_fcf_pri;
1212 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1213 spin_lock_irq(&phba->hbalock);
1214 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1215 &phba->fcf.fcf_pri_list, list) {
1216 list_del_init(&fcf_pri->list);
1217 fcf_pri->fcf_rec.flag = 0;
1218 }
1219 spin_unlock_irq(&phba->hbalock);
1220}
6fb120a7
JS
1221static void
1222lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1223{
1224 struct lpfc_vport *vport = mboxq->vport;
6fb120a7
JS
1225
1226 if (mboxq->u.mb.mbxStatus) {
1227 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1228 "2017 REG_FCFI mbxStatus error x%x "
1229 "HBA state x%x\n",
1230 mboxq->u.mb.mbxStatus, vport->port_state);
a93ff37a 1231 goto fail_out;
6fb120a7
JS
1232 }
1233
1234 /* Start FCoE discovery by sending a FLOGI. */
1235 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1236 /* Set the FCFI registered flag */
d439d286 1237 spin_lock_irq(&phba->hbalock);
6fb120a7 1238 phba->fcf.fcf_flag |= FCF_REGISTERED;
d439d286 1239 spin_unlock_irq(&phba->hbalock);
a93ff37a 1240
32b9793f 1241 /* If there is a pending FCoE event, restart FCF table scan. */
7d791df7
JS
1242 if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1243 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
a93ff37a
JS
1244 goto fail_out;
1245
1246 /* Mark successful completion of FCF table scan */
d439d286 1247 spin_lock_irq(&phba->hbalock);
ecfd03c6 1248 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
a93ff37a
JS
1249 phba->hba_flag &= ~FCF_TS_INPROG;
1250 if (vport->port_state != LPFC_FLOGI) {
1251 phba->hba_flag |= FCF_RR_INPROG;
1252 spin_unlock_irq(&phba->hbalock);
76a95d75 1253 lpfc_issue_init_vfi(vport);
a93ff37a
JS
1254 goto out;
1255 }
1256 spin_unlock_irq(&phba->hbalock);
1257 goto out;
6fb120a7 1258
a93ff37a
JS
1259fail_out:
1260 spin_lock_irq(&phba->hbalock);
1261 phba->hba_flag &= ~FCF_RR_INPROG;
1262 spin_unlock_irq(&phba->hbalock);
1263out:
6fb120a7 1264 mempool_free(mboxq, phba->mbox_mem_pool);
6fb120a7
JS
1265}
1266
1267/**
1268 * lpfc_fab_name_match - Check if the fcf fabric name match.
1269 * @fab_name: pointer to fabric name.
1270 * @new_fcf_record: pointer to fcf record.
1271 *
1272 * This routine compare the fcf record's fabric name with provided
1273 * fabric name. If the fabric name are identical this function
1274 * returns 1 else return 0.
1275 **/
1276static uint32_t
1277lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1278{
ecfd03c6
JS
1279 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1280 return 0;
1281 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1282 return 0;
1283 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1284 return 0;
1285 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1286 return 0;
1287 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1288 return 0;
1289 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
6fb120a7 1290 return 0;
ecfd03c6
JS
1291 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1292 return 0;
1293 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1294 return 0;
1295 return 1;
6fb120a7
JS
1296}
1297
8fa38513
JS
1298/**
1299 * lpfc_sw_name_match - Check if the fcf switch name match.
1300 * @fab_name: pointer to fabric name.
1301 * @new_fcf_record: pointer to fcf record.
1302 *
1303 * This routine compare the fcf record's switch name with provided
1304 * switch name. If the switch name are identical this function
1305 * returns 1 else return 0.
1306 **/
1307static uint32_t
1308lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1309{
ecfd03c6
JS
1310 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1311 return 0;
1312 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1313 return 0;
1314 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
8fa38513 1315 return 0;
ecfd03c6
JS
1316 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1317 return 0;
1318 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1319 return 0;
1320 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1321 return 0;
1322 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1323 return 0;
1324 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1325 return 0;
1326 return 1;
8fa38513
JS
1327}
1328
6fb120a7
JS
1329/**
1330 * lpfc_mac_addr_match - Check if the fcf mac address match.
ecfd03c6 1331 * @mac_addr: pointer to mac address.
6fb120a7
JS
1332 * @new_fcf_record: pointer to fcf record.
1333 *
1334 * This routine compare the fcf record's mac address with HBA's
1335 * FCF mac address. If the mac addresses are identical this function
1336 * returns 1 else return 0.
1337 **/
1338static uint32_t
ecfd03c6 1339lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
6fb120a7 1340{
ecfd03c6
JS
1341 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1342 return 0;
1343 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1344 return 0;
1345 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1346 return 0;
1347 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1348 return 0;
1349 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1350 return 0;
1351 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
6fb120a7 1352 return 0;
ecfd03c6
JS
1353 return 1;
1354}
1355
1356static bool
1357lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1358{
1359 return (curr_vlan_id == new_vlan_id);
6fb120a7
JS
1360}
1361
7d791df7 1362/**
7d791df7
JS
1363 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1364 * @phba: pointer to lpfc hba data structure.
1365 * @fcf_index: Index for the lpfc_fcf_record.
1366 * @new_fcf_record: pointer to hba fcf record.
1367 *
1368 * This routine updates the driver FCF priority record from the new HBA FCF
88acb4d9
DK
1369 * record. The hbalock is asserted held in the code path calling this
1370 * routine.
7d791df7
JS
1371 **/
1372static void
1373__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1374 struct fcf_record *new_fcf_record
1375 )
1376{
1377 struct lpfc_fcf_pri *fcf_pri;
1378
1379 fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1380 fcf_pri->fcf_rec.fcf_index = fcf_index;
1381 /* FCF record priority */
1382 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1383
1384}
1385
6fb120a7
JS
1386/**
1387 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
ecfd03c6 1388 * @fcf: pointer to driver fcf record.
6fb120a7
JS
1389 * @new_fcf_record: pointer to fcf record.
1390 *
1391 * This routine copies the FCF information from the FCF
1392 * record to lpfc_hba data structure.
1393 **/
1394static void
ecfd03c6
JS
1395lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1396 struct fcf_record *new_fcf_record)
6fb120a7 1397{
ecfd03c6
JS
1398 /* Fabric name */
1399 fcf_rec->fabric_name[0] =
6fb120a7 1400 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
ecfd03c6 1401 fcf_rec->fabric_name[1] =
6fb120a7 1402 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
ecfd03c6 1403 fcf_rec->fabric_name[2] =
6fb120a7 1404 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
ecfd03c6 1405 fcf_rec->fabric_name[3] =
6fb120a7 1406 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
ecfd03c6 1407 fcf_rec->fabric_name[4] =
6fb120a7 1408 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
ecfd03c6 1409 fcf_rec->fabric_name[5] =
6fb120a7 1410 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
ecfd03c6 1411 fcf_rec->fabric_name[6] =
6fb120a7 1412 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
ecfd03c6 1413 fcf_rec->fabric_name[7] =
6fb120a7 1414 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
ecfd03c6
JS
1415 /* Mac address */
1416 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1417 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1418 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1419 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1420 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1421 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1422 /* FCF record index */
1423 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1424 /* FCF record priority */
1425 fcf_rec->priority = new_fcf_record->fip_priority;
1426 /* Switch name */
1427 fcf_rec->switch_name[0] =
8fa38513 1428 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
ecfd03c6 1429 fcf_rec->switch_name[1] =
8fa38513 1430 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
ecfd03c6 1431 fcf_rec->switch_name[2] =
8fa38513 1432 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
ecfd03c6 1433 fcf_rec->switch_name[3] =
8fa38513 1434 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
ecfd03c6 1435 fcf_rec->switch_name[4] =
8fa38513 1436 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
ecfd03c6 1437 fcf_rec->switch_name[5] =
8fa38513 1438 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
ecfd03c6 1439 fcf_rec->switch_name[6] =
8fa38513 1440 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
ecfd03c6 1441 fcf_rec->switch_name[7] =
8fa38513 1442 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
6fb120a7
JS
1443}
1444
ecfd03c6
JS
1445/**
1446 * lpfc_update_fcf_record - Update driver fcf record
1447 * @phba: pointer to lpfc hba data structure.
1448 * @fcf_rec: pointer to driver fcf record.
1449 * @new_fcf_record: pointer to hba fcf record.
1450 * @addr_mode: address mode to be set to the driver fcf record.
1451 * @vlan_id: vlan tag to be set to the driver fcf record.
1452 * @flag: flag bits to be set to the driver fcf record.
1453 *
1454 * This routine updates the driver FCF record from the new HBA FCF record
1455 * together with the address mode, vlan_id, and other informations. This
88acb4d9 1456 * routine is called with the hbalock held.
ecfd03c6
JS
1457 **/
1458static void
1459__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1460 struct fcf_record *new_fcf_record, uint32_t addr_mode,
1461 uint16_t vlan_id, uint32_t flag)
1462{
1c2ba475
JT
1463 lockdep_assert_held(&phba->hbalock);
1464
ecfd03c6
JS
1465 /* Copy the fields from the HBA's FCF record */
1466 lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1467 /* Update other fields of driver FCF record */
1468 fcf_rec->addr_mode = addr_mode;
1469 fcf_rec->vlan_id = vlan_id;
1470 fcf_rec->flag |= (flag | RECORD_VALID);
7d791df7
JS
1471 __lpfc_update_fcf_record_pri(phba,
1472 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1473 new_fcf_record);
ecfd03c6
JS
1474}
1475
6fb120a7
JS
1476/**
1477 * lpfc_register_fcf - Register the FCF with hba.
1478 * @phba: pointer to lpfc hba data structure.
1479 *
1480 * This routine issues a register fcfi mailbox command to register
1481 * the fcf with HBA.
1482 **/
1483static void
1484lpfc_register_fcf(struct lpfc_hba *phba)
1485{
1486 LPFC_MBOXQ_t *fcf_mbxq;
1487 int rc;
6fb120a7 1488
d439d286 1489 spin_lock_irq(&phba->hbalock);
25985edc 1490 /* If the FCF is not available do nothing. */
6fb120a7 1491 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
a93ff37a 1492 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
d439d286 1493 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
1494 return;
1495 }
1496
1497 /* The FCF is already registered, start discovery */
1498 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
ecfd03c6 1499 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
a93ff37a 1500 phba->hba_flag &= ~FCF_TS_INPROG;
e74c03c8
JS
1501 if (phba->pport->port_state != LPFC_FLOGI &&
1502 phba->pport->fc_flag & FC_FABRIC) {
a93ff37a
JS
1503 phba->hba_flag |= FCF_RR_INPROG;
1504 spin_unlock_irq(&phba->hbalock);
bf08611b 1505 lpfc_initial_flogi(phba->pport);
a93ff37a
JS
1506 return;
1507 }
1508 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
1509 return;
1510 }
d439d286 1511 spin_unlock_irq(&phba->hbalock);
6fb120a7 1512
a93ff37a 1513 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4d9ab994 1514 if (!fcf_mbxq) {
d439d286 1515 spin_lock_irq(&phba->hbalock);
a93ff37a 1516 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
d439d286 1517 spin_unlock_irq(&phba->hbalock);
6fb120a7 1518 return;
4d9ab994 1519 }
6fb120a7
JS
1520
1521 lpfc_reg_fcfi(phba, fcf_mbxq);
1522 fcf_mbxq->vport = phba->pport;
1523 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1524 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
4d9ab994 1525 if (rc == MBX_NOT_FINISHED) {
d439d286 1526 spin_lock_irq(&phba->hbalock);
a93ff37a 1527 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
d439d286 1528 spin_unlock_irq(&phba->hbalock);
6fb120a7 1529 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
4d9ab994 1530 }
6fb120a7
JS
1531
1532 return;
1533}
1534
1535/**
1536 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1537 * @phba: pointer to lpfc hba data structure.
1538 * @new_fcf_record: pointer to fcf record.
1539 * @boot_flag: Indicates if this record used by boot bios.
1540 * @addr_mode: The address mode to be used by this FCF
ecfd03c6 1541 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
6fb120a7
JS
1542 *
1543 * This routine compare the fcf record with connect list obtained from the
1544 * config region to decide if this FCF can be used for SAN discovery. It returns
1545 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1546 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1547 * is used by boot bios and addr_mode will indicate the addressing mode to be
1548 * used for this FCF when the function returns.
1549 * If the FCF record need to be used with a particular vlan id, the vlan is
1550 * set in the vlan_id on return of the function. If not VLAN tagging need to
dbb6b3ab 1551 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
6fb120a7
JS
1552 **/
1553static int
1554lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1555 struct fcf_record *new_fcf_record,
1556 uint32_t *boot_flag, uint32_t *addr_mode,
1557 uint16_t *vlan_id)
1558{
1559 struct lpfc_fcf_conn_entry *conn_entry;
4d9ab994
JS
1560 int i, j, fcf_vlan_id = 0;
1561
1562 /* Find the lowest VLAN id in the FCF record */
1563 for (i = 0; i < 512; i++) {
1564 if (new_fcf_record->vlan_bitmap[i]) {
1565 fcf_vlan_id = i * 8;
1566 j = 0;
1567 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1568 j++;
1569 fcf_vlan_id++;
1570 }
1571 break;
1572 }
1573 }
6fb120a7 1574
26979ced 1575 /* FCF not valid/available or solicitation in progress */
0c287589 1576 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
26979ced
JS
1577 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1578 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
0c287589
JS
1579 return 0;
1580
45ed1190 1581 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
6fb120a7
JS
1582 *boot_flag = 0;
1583 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1584 new_fcf_record);
1585 if (phba->valid_vlan)
1586 *vlan_id = phba->vlan_id;
1587 else
dbb6b3ab 1588 *vlan_id = LPFC_FCOE_NULL_VID;
6fb120a7
JS
1589 return 1;
1590 }
1591
1592 /*
1593 * If there are no FCF connection table entry, driver connect to all
1594 * FCFs.
1595 */
1596 if (list_empty(&phba->fcf_conn_rec_list)) {
1597 *boot_flag = 0;
1598 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1599 new_fcf_record);
0c287589
JS
1600
1601 /*
1602 * When there are no FCF connect entries, use driver's default
1603 * addressing mode - FPMA.
1604 */
1605 if (*addr_mode & LPFC_FCF_FPMA)
1606 *addr_mode = LPFC_FCF_FPMA;
1607
4d9ab994
JS
1608 /* If FCF record report a vlan id use that vlan id */
1609 if (fcf_vlan_id)
1610 *vlan_id = fcf_vlan_id;
1611 else
dbb6b3ab 1612 *vlan_id = LPFC_FCOE_NULL_VID;
6fb120a7
JS
1613 return 1;
1614 }
1615
ecfd03c6
JS
1616 list_for_each_entry(conn_entry,
1617 &phba->fcf_conn_rec_list, list) {
6fb120a7
JS
1618 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1619 continue;
1620
1621 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1622 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
8fa38513
JS
1623 new_fcf_record))
1624 continue;
1625 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1626 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1627 new_fcf_record))
6fb120a7 1628 continue;
6fb120a7
JS
1629 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1630 /*
1631 * If the vlan bit map does not have the bit set for the
1632 * vlan id to be used, then it is not a match.
1633 */
1634 if (!(new_fcf_record->vlan_bitmap
1635 [conn_entry->conn_rec.vlan_tag / 8] &
1636 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1637 continue;
1638 }
1639
0c287589
JS
1640 /*
1641 * If connection record does not support any addressing mode,
1642 * skip the FCF record.
1643 */
1644 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1645 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1646 continue;
1647
6fb120a7
JS
1648 /*
1649 * Check if the connection record specifies a required
1650 * addressing mode.
1651 */
1652 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1653 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1654
1655 /*
1656 * If SPMA required but FCF not support this continue.
1657 */
1658 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1659 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1660 new_fcf_record) & LPFC_FCF_SPMA))
1661 continue;
1662
1663 /*
1664 * If FPMA required but FCF not support this continue.
1665 */
1666 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1667 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1668 new_fcf_record) & LPFC_FCF_FPMA))
1669 continue;
1670 }
1671
1672 /*
1673 * This fcf record matches filtering criteria.
1674 */
1675 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1676 *boot_flag = 1;
1677 else
1678 *boot_flag = 0;
1679
0c287589
JS
1680 /*
1681 * If user did not specify any addressing mode, or if the
25985edc 1682 * preferred addressing mode specified by user is not supported
0c287589
JS
1683 * by FCF, allow fabric to pick the addressing mode.
1684 */
6fb120a7
JS
1685 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1686 new_fcf_record);
1687 /*
1688 * If the user specified a required address mode, assign that
1689 * address mode
1690 */
1691 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1692 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1693 *addr_mode = (conn_entry->conn_rec.flags &
1694 FCFCNCT_AM_SPMA) ?
1695 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1696 /*
25985edc 1697 * If the user specified a preferred address mode, use the
6fb120a7
JS
1698 * addr mode only if FCF support the addr_mode.
1699 */
1700 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1701 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1702 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1703 (*addr_mode & LPFC_FCF_SPMA))
1704 *addr_mode = LPFC_FCF_SPMA;
1705 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1706 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1707 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1708 (*addr_mode & LPFC_FCF_FPMA))
1709 *addr_mode = LPFC_FCF_FPMA;
6fb120a7 1710
4d9ab994 1711 /* If matching connect list has a vlan id, use it */
6fb120a7
JS
1712 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1713 *vlan_id = conn_entry->conn_rec.vlan_tag;
4d9ab994
JS
1714 /*
1715 * If no vlan id is specified in connect list, use the vlan id
1716 * in the FCF record
1717 */
1718 else if (fcf_vlan_id)
1719 *vlan_id = fcf_vlan_id;
6fb120a7 1720 else
dbb6b3ab 1721 *vlan_id = LPFC_FCOE_NULL_VID;
6fb120a7
JS
1722
1723 return 1;
1724 }
1725
1726 return 0;
1727}
1728
32b9793f
JS
1729/**
1730 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1731 * @phba: pointer to lpfc hba data structure.
1732 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1733 *
1734 * This function check if there is any fcoe event pending while driver
1735 * scan FCF entries. If there is any pending event, it will restart the
1736 * FCF saning and return 1 else return 0.
1737 */
1738int
1739lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1740{
32b9793f
JS
1741 /*
1742 * If the Link is up and no FCoE events while in the
1743 * FCF discovery, no need to restart FCF discovery.
1744 */
1745 if ((phba->link_state >= LPFC_LINK_UP) &&
a93ff37a 1746 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
32b9793f
JS
1747 return 0;
1748
0c9ab6f5
JS
1749 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1750 "2768 Pending link or FCF event during current "
1751 "handling of the previous event: link_state:x%x, "
1752 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1753 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1754 phba->fcoe_eventtag);
1755
32b9793f
JS
1756 spin_lock_irq(&phba->hbalock);
1757 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1758 spin_unlock_irq(&phba->hbalock);
1759
0c9ab6f5
JS
1760 if (phba->link_state >= LPFC_LINK_UP) {
1761 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1762 "2780 Restart FCF table scan due to "
1763 "pending FCF event:evt_tag_at_scan:x%x, "
1764 "evt_tag_current:x%x\n",
1765 phba->fcoe_eventtag_at_fcf_scan,
1766 phba->fcoe_eventtag);
1767 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1768 } else {
4d9ab994 1769 /*
a93ff37a 1770 * Do not continue FCF discovery and clear FCF_TS_INPROG
4d9ab994
JS
1771 * flag
1772 */
dbb6b3ab
JS
1773 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1774 "2833 Stop FCF discovery process due to link "
1775 "state change (x%x)\n", phba->link_state);
1c6f4ef5 1776 spin_lock_irq(&phba->hbalock);
a93ff37a 1777 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
0c9ab6f5 1778 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1c6f4ef5
JS
1779 spin_unlock_irq(&phba->hbalock);
1780 }
32b9793f 1781
0c9ab6f5 1782 /* Unregister the currently registered FCF if required */
32b9793f
JS
1783 if (unreg_fcf) {
1784 spin_lock_irq(&phba->hbalock);
1785 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1786 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 1787 lpfc_sli4_unregister_fcf(phba);
32b9793f 1788 }
32b9793f
JS
1789 return 1;
1790}
1791
6fb120a7 1792/**
999d813f
JS
1793 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1794 * @phba: pointer to lpfc hba data structure.
1795 * @fcf_cnt: number of eligible fcf record seen so far.
1796 *
1797 * This function makes an running random selection decision on FCF record to
1798 * use through a sequence of @fcf_cnt eligible FCF records with equal
1799 * probability. To perform integer manunipulation of random numbers with
1800 * size unit32_t, the lower 16 bits of the 32-bit random number returned
3b60a64f 1801 * from prandom_u32() are taken as the random random number generated.
999d813f
JS
1802 *
1803 * Returns true when outcome is for the newly read FCF record should be
1804 * chosen; otherwise, return false when outcome is for keeping the previously
1805 * chosen FCF record.
1806 **/
1807static bool
1808lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1809{
1810 uint32_t rand_num;
1811
1812 /* Get 16-bit uniform random number */
3b60a64f 1813 rand_num = 0xFFFF & prandom_u32();
999d813f
JS
1814
1815 /* Decision with probability 1/fcf_cnt */
1816 if ((fcf_cnt * rand_num) < 0xFFFF)
1817 return true;
1818 else
1819 return false;
1820}
1821
1822/**
3804dc84 1823 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
6fb120a7
JS
1824 * @phba: pointer to lpfc hba data structure.
1825 * @mboxq: pointer to mailbox object.
0c9ab6f5 1826 * @next_fcf_index: pointer to holder of next fcf index.
6fb120a7 1827 *
0c9ab6f5
JS
1828 * This routine parses the non-embedded fcf mailbox command by performing the
1829 * necessarily error checking, non-embedded read FCF record mailbox command
1830 * SGE parsing, and endianness swapping.
1831 *
1832 * Returns the pointer to the new FCF record in the non-embedded mailbox
1833 * command DMA memory if successfully, other NULL.
6fb120a7 1834 */
0c9ab6f5
JS
1835static struct fcf_record *
1836lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1837 uint16_t *next_fcf_index)
6fb120a7
JS
1838{
1839 void *virt_addr;
6fb120a7
JS
1840 struct lpfc_mbx_sge sge;
1841 struct lpfc_mbx_read_fcf_tbl *read_fcf;
f818ea7a 1842 uint32_t shdr_status, shdr_add_status, if_type;
6fb120a7
JS
1843 union lpfc_sli4_cfg_shdr *shdr;
1844 struct fcf_record *new_fcf_record;
32b9793f 1845
6fb120a7
JS
1846 /* Get the first SGE entry from the non-embedded DMA memory. This
1847 * routine only uses a single SGE.
1848 */
1849 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
6fb120a7
JS
1850 if (unlikely(!mboxq->sge_array)) {
1851 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1852 "2524 Failed to get the non-embedded SGE "
1853 "virtual address\n");
0c9ab6f5 1854 return NULL;
6fb120a7
JS
1855 }
1856 virt_addr = mboxq->sge_array->addr[0];
1857
1858 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
2562669c
JS
1859 lpfc_sli_pcimem_bcopy(shdr, shdr,
1860 sizeof(union lpfc_sli4_cfg_shdr));
6fb120a7 1861 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
f818ea7a 1862 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
0c9ab6f5 1863 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6fb120a7 1864 if (shdr_status || shdr_add_status) {
f818ea7a
JS
1865 if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
1866 if_type == LPFC_SLI_INTF_IF_TYPE_2)
0c9ab6f5 1867 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
def9c7a9
JS
1868 "2726 READ_FCF_RECORD Indicates empty "
1869 "FCF table.\n");
0c9ab6f5
JS
1870 else
1871 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
def9c7a9 1872 "2521 READ_FCF_RECORD mailbox failed "
0c9ab6f5
JS
1873 "with status x%x add_status x%x, "
1874 "mbx\n", shdr_status, shdr_add_status);
1875 return NULL;
6fb120a7 1876 }
0c9ab6f5
JS
1877
1878 /* Interpreting the returned information of the FCF record */
6fb120a7
JS
1879 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1880 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1881 sizeof(struct lpfc_mbx_read_fcf_tbl));
0c9ab6f5 1882 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
6fb120a7
JS
1883 new_fcf_record = (struct fcf_record *)(virt_addr +
1884 sizeof(struct lpfc_mbx_read_fcf_tbl));
1885 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
cb5172ea
JS
1886 offsetof(struct fcf_record, vlan_bitmap));
1887 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1888 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
6fb120a7 1889
0c9ab6f5
JS
1890 return new_fcf_record;
1891}
1892
1893/**
1894 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1895 * @phba: pointer to lpfc hba data structure.
1896 * @fcf_record: pointer to the fcf record.
1897 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1898 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1899 *
1900 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1901 * enabled.
1902 **/
1903static void
1904lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1905 struct fcf_record *fcf_record,
1906 uint16_t vlan_id,
1907 uint16_t next_fcf_index)
1908{
1909 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1910 "2764 READ_FCF_RECORD:\n"
1911 "\tFCF_Index : x%x\n"
1912 "\tFCF_Avail : x%x\n"
1913 "\tFCF_Valid : x%x\n"
26979ced 1914 "\tFCF_SOL : x%x\n"
0c9ab6f5
JS
1915 "\tFIP_Priority : x%x\n"
1916 "\tMAC_Provider : x%x\n"
1917 "\tLowest VLANID : x%x\n"
1918 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1919 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1920 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1921 "\tNext_FCF_Index: x%x\n",
1922 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1923 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1924 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
26979ced 1925 bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
0c9ab6f5
JS
1926 fcf_record->fip_priority,
1927 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1928 vlan_id,
1929 bf_get(lpfc_fcf_record_mac_0, fcf_record),
1930 bf_get(lpfc_fcf_record_mac_1, fcf_record),
1931 bf_get(lpfc_fcf_record_mac_2, fcf_record),
1932 bf_get(lpfc_fcf_record_mac_3, fcf_record),
1933 bf_get(lpfc_fcf_record_mac_4, fcf_record),
1934 bf_get(lpfc_fcf_record_mac_5, fcf_record),
1935 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1936 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1937 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1938 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1939 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1940 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1941 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1942 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1943 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1944 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1945 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1946 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1947 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1948 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1949 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1950 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1951 next_fcf_index);
1952}
1953
dbb6b3ab
JS
1954/**
1955 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1956 * @phba: pointer to lpfc hba data structure.
1957 * @fcf_rec: pointer to an existing FCF record.
1958 * @new_fcf_record: pointer to a new FCF record.
1959 * @new_vlan_id: vlan id from the new FCF record.
1960 *
1961 * This function performs matching test of a new FCF record against an existing
1962 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1963 * will not be used as part of the FCF record matching criteria.
1964 *
1965 * Returns true if all the fields matching, otherwise returns false.
1966 */
1967static bool
1968lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1969 struct lpfc_fcf_rec *fcf_rec,
1970 struct fcf_record *new_fcf_record,
1971 uint16_t new_vlan_id)
1972{
1973 if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
1974 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
1975 return false;
1976 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
1977 return false;
1978 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
1979 return false;
1980 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1981 return false;
7d791df7
JS
1982 if (fcf_rec->priority != new_fcf_record->fip_priority)
1983 return false;
dbb6b3ab
JS
1984 return true;
1985}
1986
a93ff37a
JS
1987/**
1988 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1989 * @vport: Pointer to vport object.
1990 * @fcf_index: index to next fcf.
1991 *
1992 * This function processing the roundrobin fcf failover to next fcf index.
1993 * When this function is invoked, there will be a current fcf registered
1994 * for flogi.
1995 * Return: 0 for continue retrying flogi on currently registered fcf;
1996 * 1 for stop flogi on currently registered fcf;
1997 */
1998int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
1999{
2000 struct lpfc_hba *phba = vport->phba;
2001 int rc;
2002
2003 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
2004 spin_lock_irq(&phba->hbalock);
2005 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2006 spin_unlock_irq(&phba->hbalock);
2007 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2008 "2872 Devloss tmo with no eligible "
2009 "FCF, unregister in-use FCF (x%x) "
2010 "and rescan FCF table\n",
2011 phba->fcf.current_rec.fcf_indx);
2012 lpfc_unregister_fcf_rescan(phba);
2013 goto stop_flogi_current_fcf;
2014 }
2015 /* Mark the end to FLOGI roundrobin failover */
2016 phba->hba_flag &= ~FCF_RR_INPROG;
2017 /* Allow action to new fcf asynchronous event */
2018 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2019 spin_unlock_irq(&phba->hbalock);
2020 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2021 "2865 No FCF available, stop roundrobin FCF "
2022 "failover and change port state:x%x/x%x\n",
2023 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
2024 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
036cad1f
JS
2025
2026 if (!phba->fcf.fcf_redisc_attempted) {
2027 lpfc_unregister_fcf(phba);
2028
2029 rc = lpfc_sli4_redisc_fcf_table(phba);
2030 if (!rc) {
2031 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2032 "3195 Rediscover FCF table\n");
2033 phba->fcf.fcf_redisc_attempted = 1;
2034 lpfc_sli4_clear_fcf_rr_bmask(phba);
2035 } else {
2036 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2037 "3196 Rediscover FCF table "
2038 "failed. Status:x%x\n", rc);
2039 }
2040 } else {
2041 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2042 "3197 Already rediscover FCF table "
2043 "attempted. No more retry\n");
2044 }
a93ff37a
JS
2045 goto stop_flogi_current_fcf;
2046 } else {
2047 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
2048 "2794 Try FLOGI roundrobin FCF failover to "
2049 "(x%x)\n", fcf_index);
2050 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
2051 if (rc)
2052 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
2053 "2761 FLOGI roundrobin FCF failover "
2054 "failed (rc:x%x) to read FCF (x%x)\n",
2055 rc, phba->fcf.current_rec.fcf_indx);
2056 else
2057 goto stop_flogi_current_fcf;
2058 }
2059 return 0;
2060
2061stop_flogi_current_fcf:
2062 lpfc_can_disctmo(vport);
2063 return 1;
2064}
2065
7d791df7
JS
2066/**
2067 * lpfc_sli4_fcf_pri_list_del
2068 * @phba: pointer to lpfc hba data structure.
2069 * @fcf_index the index of the fcf record to delete
2070 * This routine checks the on list flag of the fcf_index to be deleted.
2071 * If it is one the list then it is removed from the list, and the flag
2072 * is cleared. This routine grab the hbalock before removing the fcf
2073 * record from the list.
2074 **/
2075static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
2076 uint16_t fcf_index)
2077{
2078 struct lpfc_fcf_pri *new_fcf_pri;
2079
2080 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2081 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2082 "3058 deleting idx x%x pri x%x flg x%x\n",
2083 fcf_index, new_fcf_pri->fcf_rec.priority,
2084 new_fcf_pri->fcf_rec.flag);
2085 spin_lock_irq(&phba->hbalock);
2086 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
2087 if (phba->fcf.current_rec.priority ==
2088 new_fcf_pri->fcf_rec.priority)
2089 phba->fcf.eligible_fcf_cnt--;
2090 list_del_init(&new_fcf_pri->list);
2091 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2092 }
2093 spin_unlock_irq(&phba->hbalock);
2094}
2095
2096/**
2097 * lpfc_sli4_set_fcf_flogi_fail
2098 * @phba: pointer to lpfc hba data structure.
2099 * @fcf_index the index of the fcf record to update
2100 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2101 * flag so the the round robin slection for the particular priority level
2102 * will try a different fcf record that does not have this bit set.
2103 * If the fcf record is re-read for any reason this flag is cleared brfore
2104 * adding it to the priority list.
2105 **/
2106void
2107lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2108{
2109 struct lpfc_fcf_pri *new_fcf_pri;
2110 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2111 spin_lock_irq(&phba->hbalock);
2112 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2113 spin_unlock_irq(&phba->hbalock);
2114}
2115
2116/**
2117 * lpfc_sli4_fcf_pri_list_add
2118 * @phba: pointer to lpfc hba data structure.
2119 * @fcf_index the index of the fcf record to add
2120 * This routine checks the priority of the fcf_index to be added.
2121 * If it is a lower priority than the current head of the fcf_pri list
2122 * then it is added to the list in the right order.
2123 * If it is the same priority as the current head of the list then it
2124 * is added to the head of the list and its bit in the rr_bmask is set.
2125 * If the fcf_index to be added is of a higher priority than the current
2126 * head of the list then the rr_bmask is cleared, its bit is set in the
2127 * rr_bmask and it is added to the head of the list.
2128 * returns:
2129 * 0=success 1=failure
2130 **/
b86a6756
RK
2131static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
2132 uint16_t fcf_index,
7d791df7
JS
2133 struct fcf_record *new_fcf_record)
2134{
2135 uint16_t current_fcf_pri;
2136 uint16_t last_index;
2137 struct lpfc_fcf_pri *fcf_pri;
2138 struct lpfc_fcf_pri *next_fcf_pri;
2139 struct lpfc_fcf_pri *new_fcf_pri;
2140 int ret;
2141
2142 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2143 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2144 "3059 adding idx x%x pri x%x flg x%x\n",
2145 fcf_index, new_fcf_record->fip_priority,
2146 new_fcf_pri->fcf_rec.flag);
2147 spin_lock_irq(&phba->hbalock);
2148 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2149 list_del_init(&new_fcf_pri->list);
2150 new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2151 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2152 if (list_empty(&phba->fcf.fcf_pri_list)) {
2153 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2154 ret = lpfc_sli4_fcf_rr_index_set(phba,
2155 new_fcf_pri->fcf_rec.fcf_index);
2156 goto out;
2157 }
2158
2159 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2160 LPFC_SLI4_FCF_TBL_INDX_MAX);
2161 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2162 ret = 0; /* Empty rr list */
2163 goto out;
2164 }
2165 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2166 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2167 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2168 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2169 memset(phba->fcf.fcf_rr_bmask, 0,
2170 sizeof(*phba->fcf.fcf_rr_bmask));
2171 /* fcfs_at_this_priority_level = 1; */
2172 phba->fcf.eligible_fcf_cnt = 1;
2173 } else
2174 /* fcfs_at_this_priority_level++; */
2175 phba->fcf.eligible_fcf_cnt++;
2176 ret = lpfc_sli4_fcf_rr_index_set(phba,
2177 new_fcf_pri->fcf_rec.fcf_index);
2178 goto out;
2179 }
2180
2181 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2182 &phba->fcf.fcf_pri_list, list) {
2183 if (new_fcf_pri->fcf_rec.priority <=
2184 fcf_pri->fcf_rec.priority) {
2185 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2186 list_add(&new_fcf_pri->list,
2187 &phba->fcf.fcf_pri_list);
2188 else
2189 list_add(&new_fcf_pri->list,
2190 &((struct lpfc_fcf_pri *)
2191 fcf_pri->list.prev)->list);
2192 ret = 0;
2193 goto out;
2194 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2195 || new_fcf_pri->fcf_rec.priority <
2196 next_fcf_pri->fcf_rec.priority) {
2197 list_add(&new_fcf_pri->list, &fcf_pri->list);
2198 ret = 0;
2199 goto out;
2200 }
2201 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2202 continue;
2203
2204 }
2205 ret = 1;
2206out:
2207 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2208 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2209 spin_unlock_irq(&phba->hbalock);
2210 return ret;
2211}
2212
0c9ab6f5
JS
2213/**
2214 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2215 * @phba: pointer to lpfc hba data structure.
2216 * @mboxq: pointer to mailbox object.
2217 *
2218 * This function iterates through all the fcf records available in
2219 * HBA and chooses the optimal FCF record for discovery. After finding
2220 * the FCF for discovery it registers the FCF record and kicks start
2221 * discovery.
2222 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2223 * use an FCF record which matches fabric name and mac address of the
2224 * currently used FCF record.
2225 * If the driver supports only one FCF, it will try to use the FCF record
2226 * used by BOOT_BIOS.
2227 */
2228void
2229lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2230{
2231 struct fcf_record *new_fcf_record;
2232 uint32_t boot_flag, addr_mode;
2233 uint16_t fcf_index, next_fcf_index;
2234 struct lpfc_fcf_rec *fcf_rec = NULL;
2ade92ae 2235 uint16_t vlan_id = LPFC_FCOE_NULL_VID;
999d813f 2236 bool select_new_fcf;
0c9ab6f5
JS
2237 int rc;
2238
2239 /* If there is pending FCoE event restart FCF table scan */
a93ff37a 2240 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
0c9ab6f5
JS
2241 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2242 return;
2243 }
2244
2245 /* Parse the FCF record from the non-embedded mailbox command */
2246 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2247 &next_fcf_index);
2248 if (!new_fcf_record) {
a93ff37a 2249 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
0c9ab6f5
JS
2250 "2765 Mailbox command READ_FCF_RECORD "
2251 "failed to retrieve a FCF record.\n");
2252 /* Let next new FCF event trigger fast failover */
2253 spin_lock_irq(&phba->hbalock);
a93ff37a 2254 phba->hba_flag &= ~FCF_TS_INPROG;
0c9ab6f5
JS
2255 spin_unlock_irq(&phba->hbalock);
2256 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2257 return;
2258 }
2259
2260 /* Check the FCF record against the connection list */
ecfd03c6
JS
2261 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2262 &addr_mode, &vlan_id);
0c9ab6f5
JS
2263
2264 /* Log the FCF record information if turned on */
2265 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2266 next_fcf_index);
2267
6fb120a7
JS
2268 /*
2269 * If the fcf record does not match with connect list entries
0c9ab6f5 2270 * read the next entry; otherwise, this is an eligible FCF
a93ff37a 2271 * record for roundrobin FCF failover.
6fb120a7 2272 */
0c9ab6f5 2273 if (!rc) {
7d791df7
JS
2274 lpfc_sli4_fcf_pri_list_del(phba,
2275 bf_get(lpfc_fcf_record_fcf_index,
2276 new_fcf_record));
0c9ab6f5 2277 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
a93ff37a 2278 "2781 FCF (x%x) failed connection "
26979ced 2279 "list check: (x%x/x%x/%x)\n",
0c9ab6f5
JS
2280 bf_get(lpfc_fcf_record_fcf_index,
2281 new_fcf_record),
2282 bf_get(lpfc_fcf_record_fcf_avail,
2283 new_fcf_record),
2284 bf_get(lpfc_fcf_record_fcf_valid,
26979ced
JS
2285 new_fcf_record),
2286 bf_get(lpfc_fcf_record_fcf_sol,
0c9ab6f5 2287 new_fcf_record));
dbb6b3ab
JS
2288 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2289 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2290 new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
5ac6b303
JS
2291 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2292 phba->fcf.current_rec.fcf_indx) {
2293 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2294 "2862 FCF (x%x) matches property "
2295 "of in-use FCF (x%x)\n",
2296 bf_get(lpfc_fcf_record_fcf_index,
2297 new_fcf_record),
2298 phba->fcf.current_rec.fcf_indx);
2299 goto read_next_fcf;
2300 }
dbb6b3ab
JS
2301 /*
2302 * In case the current in-use FCF record becomes
2303 * invalid/unavailable during FCF discovery that
2304 * was not triggered by fast FCF failover process,
2305 * treat it as fast FCF failover.
2306 */
2307 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2308 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2309 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2310 "2835 Invalid in-use FCF "
a93ff37a
JS
2311 "(x%x), enter FCF failover "
2312 "table scan.\n",
dbb6b3ab
JS
2313 phba->fcf.current_rec.fcf_indx);
2314 spin_lock_irq(&phba->hbalock);
2315 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2316 spin_unlock_irq(&phba->hbalock);
2317 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2318 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2319 LPFC_FCOE_FCF_GET_FIRST);
2320 return;
2321 }
2322 }
6fb120a7 2323 goto read_next_fcf;
0c9ab6f5
JS
2324 } else {
2325 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
7d791df7
JS
2326 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2327 new_fcf_record);
0c9ab6f5
JS
2328 if (rc)
2329 goto read_next_fcf;
2330 }
2331
6fb120a7
JS
2332 /*
2333 * If this is not the first FCF discovery of the HBA, use last
ecfd03c6
JS
2334 * FCF record for the discovery. The condition that a rescan
2335 * matches the in-use FCF record: fabric name, switch name, mac
2336 * address, and vlan_id.
6fb120a7 2337 */
0c9ab6f5 2338 spin_lock_irq(&phba->hbalock);
6fb120a7 2339 if (phba->fcf.fcf_flag & FCF_IN_USE) {
7d791df7
JS
2340 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2341 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
dbb6b3ab 2342 new_fcf_record, vlan_id)) {
5ac6b303
JS
2343 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2344 phba->fcf.current_rec.fcf_indx) {
2345 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2346 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2347 /* Stop FCF redisc wait timer */
2348 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2349 phba);
2350 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2351 /* Fast failover, mark completed */
2352 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2353 spin_unlock_irq(&phba->hbalock);
2354 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2355 "2836 New FCF matches in-use "
88f43a08
JS
2356 "FCF (x%x), port_state:x%x, "
2357 "fc_flag:x%x\n",
2358 phba->fcf.current_rec.fcf_indx,
2359 phba->pport->port_state,
2360 phba->pport->fc_flag);
5ac6b303
JS
2361 goto out;
2362 } else
2363 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2364 "2863 New FCF (x%x) matches "
2365 "property of in-use FCF (x%x)\n",
dbb6b3ab 2366 bf_get(lpfc_fcf_record_fcf_index,
5ac6b303
JS
2367 new_fcf_record),
2368 phba->fcf.current_rec.fcf_indx);
6fb120a7 2369 }
ecfd03c6
JS
2370 /*
2371 * Read next FCF record from HBA searching for the matching
2372 * with in-use record only if not during the fast failover
2373 * period. In case of fast failover period, it shall try to
2374 * determine whether the FCF record just read should be the
2375 * next candidate.
2376 */
2377 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
0c9ab6f5 2378 spin_unlock_irq(&phba->hbalock);
ecfd03c6
JS
2379 goto read_next_fcf;
2380 }
6fb120a7 2381 }
ecfd03c6
JS
2382 /*
2383 * Update on failover FCF record only if it's in FCF fast-failover
2384 * period; otherwise, update on current FCF record.
2385 */
fc2b989b
JS
2386 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2387 fcf_rec = &phba->fcf.failover_rec;
2388 else
ecfd03c6
JS
2389 fcf_rec = &phba->fcf.current_rec;
2390
6fb120a7
JS
2391 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2392 /*
ecfd03c6
JS
2393 * If the driver FCF record does not have boot flag
2394 * set and new hba fcf record has boot flag set, use
2395 * the new hba fcf record.
6fb120a7 2396 */
ecfd03c6
JS
2397 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2398 /* Choose this FCF record */
dbb6b3ab
JS
2399 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2400 "2837 Update current FCF record "
2401 "(x%x) with new FCF record (x%x)\n",
2402 fcf_rec->fcf_indx,
2403 bf_get(lpfc_fcf_record_fcf_index,
2404 new_fcf_record));
ecfd03c6
JS
2405 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2406 addr_mode, vlan_id, BOOT_ENABLE);
0c9ab6f5 2407 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
2408 goto read_next_fcf;
2409 }
2410 /*
ecfd03c6
JS
2411 * If the driver FCF record has boot flag set and the
2412 * new hba FCF record does not have boot flag, read
2413 * the next FCF record.
6fb120a7 2414 */
ecfd03c6 2415 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
0c9ab6f5 2416 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
2417 goto read_next_fcf;
2418 }
2419 /*
ecfd03c6
JS
2420 * If the new hba FCF record has lower priority value
2421 * than the driver FCF record, use the new record.
6fb120a7 2422 */
fc2b989b 2423 if (new_fcf_record->fip_priority < fcf_rec->priority) {
999d813f 2424 /* Choose the new FCF record with lower priority */
dbb6b3ab
JS
2425 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2426 "2838 Update current FCF record "
2427 "(x%x) with new FCF record (x%x)\n",
2428 fcf_rec->fcf_indx,
2429 bf_get(lpfc_fcf_record_fcf_index,
2430 new_fcf_record));
ecfd03c6
JS
2431 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2432 addr_mode, vlan_id, 0);
999d813f
JS
2433 /* Reset running random FCF selection count */
2434 phba->fcf.eligible_fcf_cnt = 1;
2435 } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2436 /* Update running random FCF selection count */
2437 phba->fcf.eligible_fcf_cnt++;
2438 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2439 phba->fcf.eligible_fcf_cnt);
dbb6b3ab
JS
2440 if (select_new_fcf) {
2441 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2442 "2839 Update current FCF record "
2443 "(x%x) with new FCF record (x%x)\n",
2444 fcf_rec->fcf_indx,
2445 bf_get(lpfc_fcf_record_fcf_index,
2446 new_fcf_record));
999d813f
JS
2447 /* Choose the new FCF by random selection */
2448 __lpfc_update_fcf_record(phba, fcf_rec,
2449 new_fcf_record,
2450 addr_mode, vlan_id, 0);
dbb6b3ab 2451 }
6fb120a7 2452 }
0c9ab6f5 2453 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
2454 goto read_next_fcf;
2455 }
2456 /*
ecfd03c6
JS
2457 * This is the first suitable FCF record, choose this record for
2458 * initial best-fit FCF.
6fb120a7 2459 */
ecfd03c6 2460 if (fcf_rec) {
dbb6b3ab 2461 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
2462 "2840 Update initial FCF candidate "
2463 "with FCF (x%x)\n",
dbb6b3ab
JS
2464 bf_get(lpfc_fcf_record_fcf_index,
2465 new_fcf_record));
ecfd03c6
JS
2466 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2467 addr_mode, vlan_id, (boot_flag ?
2468 BOOT_ENABLE : 0));
2469 phba->fcf.fcf_flag |= FCF_AVAILABLE;
999d813f
JS
2470 /* Setup initial running random FCF selection count */
2471 phba->fcf.eligible_fcf_cnt = 1;
6fb120a7 2472 }
0c9ab6f5 2473 spin_unlock_irq(&phba->hbalock);
6fb120a7
JS
2474 goto read_next_fcf;
2475
2476read_next_fcf:
2477 lpfc_sli4_mbox_cmd_free(phba, mboxq);
ecfd03c6
JS
2478 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2479 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2480 /*
2481 * Case of FCF fast failover scan
2482 */
2483
2484 /*
2485 * It has not found any suitable FCF record, cancel
2486 * FCF scan inprogress, and do nothing
2487 */
2488 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
0c9ab6f5 2489 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
a93ff37a
JS
2490 "2782 No suitable FCF found: "
2491 "(x%x/x%x)\n",
0c9ab6f5
JS
2492 phba->fcoe_eventtag_at_fcf_scan,
2493 bf_get(lpfc_fcf_record_fcf_index,
2494 new_fcf_record));
a93ff37a
JS
2495 spin_lock_irq(&phba->hbalock);
2496 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2497 phba->hba_flag &= ~FCF_TS_INPROG;
2498 spin_unlock_irq(&phba->hbalock);
2499 /* Unregister in-use FCF and rescan */
2500 lpfc_printf_log(phba, KERN_INFO,
2501 LOG_FIP,
2502 "2864 On devloss tmo "
2503 "unreg in-use FCF and "
2504 "rescan FCF table\n");
2505 lpfc_unregister_fcf_rescan(phba);
2506 return;
2507 }
0c9ab6f5 2508 /*
a93ff37a 2509 * Let next new FCF event trigger fast failover
0c9ab6f5 2510 */
a93ff37a 2511 phba->hba_flag &= ~FCF_TS_INPROG;
0c9ab6f5 2512 spin_unlock_irq(&phba->hbalock);
ecfd03c6
JS
2513 return;
2514 }
2515 /*
2516 * It has found a suitable FCF record that is not
2517 * the same as in-use FCF record, unregister the
2518 * in-use FCF record, replace the in-use FCF record
2519 * with the new FCF record, mark FCF fast failover
2520 * completed, and then start register the new FCF
2521 * record.
2522 */
2523
0c9ab6f5 2524 /* Unregister the current in-use FCF record */
ecfd03c6 2525 lpfc_unregister_fcf(phba);
0c9ab6f5
JS
2526
2527 /* Replace in-use record with the new record */
dbb6b3ab 2528 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
2529 "2842 Replace in-use FCF (x%x) "
2530 "with failover FCF (x%x)\n",
dbb6b3ab
JS
2531 phba->fcf.current_rec.fcf_indx,
2532 phba->fcf.failover_rec.fcf_indx);
ecfd03c6
JS
2533 memcpy(&phba->fcf.current_rec,
2534 &phba->fcf.failover_rec,
2535 sizeof(struct lpfc_fcf_rec));
3804dc84
JS
2536 /*
2537 * Mark the fast FCF failover rediscovery completed
2538 * and the start of the first round of the roundrobin
2539 * FCF failover.
2540 */
0c9ab6f5 2541 spin_lock_irq(&phba->hbalock);
a93ff37a 2542 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
0c9ab6f5 2543 spin_unlock_irq(&phba->hbalock);
ecfd03c6
JS
2544 /* Register to the new FCF record */
2545 lpfc_register_fcf(phba);
2546 } else {
2547 /*
2548 * In case of transaction period to fast FCF failover,
2549 * do nothing when search to the end of the FCF table.
2550 */
2551 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2552 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2553 return;
dbb6b3ab 2554
7d791df7
JS
2555 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2556 phba->fcf.fcf_flag & FCF_IN_USE) {
dbb6b3ab
JS
2557 /*
2558 * In case the current in-use FCF record no
2559 * longer existed during FCF discovery that
2560 * was not triggered by fast FCF failover
2561 * process, treat it as fast FCF failover.
2562 */
2563 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2564 "2841 In-use FCF record (x%x) "
2565 "not reported, entering fast "
2566 "FCF failover mode scanning.\n",
2567 phba->fcf.current_rec.fcf_indx);
2568 spin_lock_irq(&phba->hbalock);
2569 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2570 spin_unlock_irq(&phba->hbalock);
dbb6b3ab
JS
2571 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2572 LPFC_FCOE_FCF_GET_FIRST);
2573 return;
2574 }
0c9ab6f5 2575 /* Register to the new FCF record */
ecfd03c6
JS
2576 lpfc_register_fcf(phba);
2577 }
2578 } else
0c9ab6f5 2579 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
6fb120a7
JS
2580 return;
2581
2582out:
2583 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2584 lpfc_register_fcf(phba);
2585
2586 return;
2587}
2588
0c9ab6f5 2589/**
a93ff37a 2590 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
0c9ab6f5
JS
2591 * @phba: pointer to lpfc hba data structure.
2592 * @mboxq: pointer to mailbox object.
2593 *
a93ff37a 2594 * This is the callback function for FLOGI failure roundrobin FCF failover
0c9ab6f5
JS
2595 * read FCF record mailbox command from the eligible FCF record bmask for
2596 * performing the failover. If the FCF read back is not valid/available, it
2597 * fails through to retrying FLOGI to the currently registered FCF again.
2598 * Otherwise, if the FCF read back is valid and available, it will set the
2599 * newly read FCF record to the failover FCF record, unregister currently
2600 * registered FCF record, copy the failover FCF record to the current
2601 * FCF record, and then register the current FCF record before proceeding
2602 * to trying FLOGI on the new failover FCF.
2603 */
2604void
2605lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2606{
2607 struct fcf_record *new_fcf_record;
2608 uint32_t boot_flag, addr_mode;
a93ff37a 2609 uint16_t next_fcf_index, fcf_index;
0c9ab6f5
JS
2610 uint16_t current_fcf_index;
2611 uint16_t vlan_id;
a93ff37a 2612 int rc;
0c9ab6f5 2613
a93ff37a 2614 /* If link state is not up, stop the roundrobin failover process */
0c9ab6f5
JS
2615 if (phba->link_state < LPFC_LINK_UP) {
2616 spin_lock_irq(&phba->hbalock);
2617 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
a93ff37a 2618 phba->hba_flag &= ~FCF_RR_INPROG;
0c9ab6f5 2619 spin_unlock_irq(&phba->hbalock);
a93ff37a 2620 goto out;
0c9ab6f5
JS
2621 }
2622
2623 /* Parse the FCF record from the non-embedded mailbox command */
2624 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2625 &next_fcf_index);
2626 if (!new_fcf_record) {
2627 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2628 "2766 Mailbox command READ_FCF_RECORD "
646a2dd7
JS
2629 "failed to retrieve a FCF record. "
2630 "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2631 phba->fcf.fcf_flag);
2632 lpfc_unregister_fcf_rescan(phba);
2633 goto out;
0c9ab6f5
JS
2634 }
2635
2636 /* Get the needed parameters from FCF record */
a93ff37a
JS
2637 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2638 &addr_mode, &vlan_id);
0c9ab6f5
JS
2639
2640 /* Log the FCF record information if turned on */
2641 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2642 next_fcf_index);
2643
a93ff37a
JS
2644 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2645 if (!rc) {
2646 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2647 "2848 Remove ineligible FCF (x%x) from "
2648 "from roundrobin bmask\n", fcf_index);
2649 /* Clear roundrobin bmask bit for ineligible FCF */
2650 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2651 /* Perform next round of roundrobin FCF failover */
2652 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2653 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2654 if (rc)
2655 goto out;
2656 goto error_out;
2657 }
2658
2659 if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2660 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2661 "2760 Perform FLOGI roundrobin FCF failover: "
2662 "FCF (x%x) back to FCF (x%x)\n",
2663 phba->fcf.current_rec.fcf_indx, fcf_index);
2664 /* Wait 500 ms before retrying FLOGI to current FCF */
2665 msleep(500);
76a95d75 2666 lpfc_issue_init_vfi(phba->pport);
a93ff37a
JS
2667 goto out;
2668 }
2669
0c9ab6f5 2670 /* Upload new FCF record to the failover FCF record */
dbb6b3ab 2671 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
2672 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2673 phba->fcf.failover_rec.fcf_indx, fcf_index);
0c9ab6f5
JS
2674 spin_lock_irq(&phba->hbalock);
2675 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2676 new_fcf_record, addr_mode, vlan_id,
2677 (boot_flag ? BOOT_ENABLE : 0));
2678 spin_unlock_irq(&phba->hbalock);
2679
2680 current_fcf_index = phba->fcf.current_rec.fcf_indx;
2681
2682 /* Unregister the current in-use FCF record */
2683 lpfc_unregister_fcf(phba);
2684
2685 /* Replace in-use record with the new record */
2686 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2687 sizeof(struct lpfc_fcf_rec));
2688
2689 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a
JS
2690 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2691 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
0c9ab6f5 2692
a93ff37a
JS
2693error_out:
2694 lpfc_register_fcf(phba);
0c9ab6f5
JS
2695out:
2696 lpfc_sli4_mbox_cmd_free(phba, mboxq);
0c9ab6f5
JS
2697}
2698
2699/**
2700 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2701 * @phba: pointer to lpfc hba data structure.
2702 * @mboxq: pointer to mailbox object.
2703 *
2704 * This is the callback function of read FCF record mailbox command for
a93ff37a 2705 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
0c9ab6f5
JS
2706 * failover when a new FCF event happened. If the FCF read back is
2707 * valid/available and it passes the connection list check, it updates
a93ff37a 2708 * the bmask for the eligible FCF record for roundrobin failover.
0c9ab6f5
JS
2709 */
2710void
2711lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2712{
2713 struct fcf_record *new_fcf_record;
2714 uint32_t boot_flag, addr_mode;
2715 uint16_t fcf_index, next_fcf_index;
2716 uint16_t vlan_id;
2717 int rc;
2718
2719 /* If link state is not up, no need to proceed */
2720 if (phba->link_state < LPFC_LINK_UP)
2721 goto out;
2722
2723 /* If FCF discovery period is over, no need to proceed */
3804dc84 2724 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
0c9ab6f5
JS
2725 goto out;
2726
2727 /* Parse the FCF record from the non-embedded mailbox command */
2728 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2729 &next_fcf_index);
2730 if (!new_fcf_record) {
2731 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2732 "2767 Mailbox command READ_FCF_RECORD "
2733 "failed to retrieve a FCF record.\n");
2734 goto out;
2735 }
2736
2737 /* Check the connection list for eligibility */
2738 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2739 &addr_mode, &vlan_id);
2740
2741 /* Log the FCF record information if turned on */
2742 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2743 next_fcf_index);
2744
2745 if (!rc)
2746 goto out;
2747
2748 /* Update the eligible FCF record index bmask */
2749 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
7d791df7
JS
2750
2751 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
0c9ab6f5
JS
2752
2753out:
2754 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2755}
2756
76a95d75
JS
2757/**
2758 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2759 * @phba: pointer to lpfc hba data structure.
2760 * @mboxq: pointer to mailbox data structure.
2761 *
2762 * This function handles completion of init vfi mailbox command.
2763 */
b86a6756 2764static void
76a95d75
JS
2765lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2766{
2767 struct lpfc_vport *vport = mboxq->vport;
2768
df9e1b59
JS
2769 /*
2770 * VFI not supported on interface type 0, just do the flogi
2771 * Also continue if the VFI is in use - just use the same one.
2772 */
2773 if (mboxq->u.mb.mbxStatus &&
2774 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2775 LPFC_SLI_INTF_IF_TYPE_0) &&
2776 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
76a95d75
JS
2777 lpfc_printf_vlog(vport, KERN_ERR,
2778 LOG_MBOX,
2779 "2891 Init VFI mailbox failed 0x%x\n",
2780 mboxq->u.mb.mbxStatus);
2781 mempool_free(mboxq, phba->mbox_mem_pool);
2782 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2783 return;
2784 }
73d91e50 2785
76a95d75
JS
2786 lpfc_initial_flogi(vport);
2787 mempool_free(mboxq, phba->mbox_mem_pool);
2788 return;
2789}
2790
2791/**
2792 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2793 * @vport: pointer to lpfc_vport data structure.
2794 *
2795 * This function issue a init_vfi mailbox command to initialize the VFI and
2796 * VPI for the physical port.
2797 */
2798void
2799lpfc_issue_init_vfi(struct lpfc_vport *vport)
2800{
2801 LPFC_MBOXQ_t *mboxq;
2802 int rc;
2803 struct lpfc_hba *phba = vport->phba;
2804
2805 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2806 if (!mboxq) {
2807 lpfc_printf_vlog(vport, KERN_ERR,
2808 LOG_MBOX, "2892 Failed to allocate "
2809 "init_vfi mailbox\n");
2810 return;
2811 }
2812 lpfc_init_vfi(mboxq, vport);
2813 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2814 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2815 if (rc == MBX_NOT_FINISHED) {
2816 lpfc_printf_vlog(vport, KERN_ERR,
2817 LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
2818 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2819 }
2820}
2821
1c6834a7
JS
2822/**
2823 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2824 * @phba: pointer to lpfc hba data structure.
2825 * @mboxq: pointer to mailbox data structure.
2826 *
2827 * This function handles completion of init vpi mailbox command.
2828 */
695a814e 2829void
1c6834a7
JS
2830lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2831{
2832 struct lpfc_vport *vport = mboxq->vport;
695a814e 2833 struct lpfc_nodelist *ndlp;
72100cc4
JS
2834 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2835
1c6834a7
JS
2836 if (mboxq->u.mb.mbxStatus) {
2837 lpfc_printf_vlog(vport, KERN_ERR,
2838 LOG_MBOX,
2839 "2609 Init VPI mailbox failed 0x%x\n",
2840 mboxq->u.mb.mbxStatus);
2841 mempool_free(mboxq, phba->mbox_mem_pool);
2842 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2843 return;
2844 }
72100cc4 2845 spin_lock_irq(shost->host_lock);
1c6834a7 2846 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
72100cc4 2847 spin_unlock_irq(shost->host_lock);
1c6834a7 2848
695a814e
JS
2849 /* If this port is physical port or FDISC is done, do reg_vpi */
2850 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2851 ndlp = lpfc_findnode_did(vport, Fabric_DID);
2852 if (!ndlp)
2853 lpfc_printf_vlog(vport, KERN_ERR,
2854 LOG_DISCOVERY,
2855 "2731 Cannot find fabric "
2856 "controller node\n");
2857 else
2858 lpfc_register_new_vport(phba, vport, ndlp);
2859 mempool_free(mboxq, phba->mbox_mem_pool);
2860 return;
2861 }
2862
1c6834a7
JS
2863 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2864 lpfc_initial_fdisc(vport);
2865 else {
2866 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
6a9c52cf
JS
2867 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2868 "2606 No NPIV Fabric support\n");
1c6834a7 2869 }
695a814e 2870 mempool_free(mboxq, phba->mbox_mem_pool);
1c6834a7
JS
2871 return;
2872}
2873
ecfd03c6
JS
2874/**
2875 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2876 * @vport: pointer to lpfc_vport data structure.
2877 *
2878 * This function issue a init_vpi mailbox command to initialize
2879 * VPI for the vport.
2880 */
2881void
2882lpfc_issue_init_vpi(struct lpfc_vport *vport)
2883{
2884 LPFC_MBOXQ_t *mboxq;
16a3a208
JS
2885 int rc, vpi;
2886
2887 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2888 vpi = lpfc_alloc_vpi(vport->phba);
2889 if (!vpi) {
2890 lpfc_printf_vlog(vport, KERN_ERR,
2891 LOG_MBOX,
2892 "3303 Failed to obtain vport vpi\n");
2893 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2894 return;
2895 }
2896 vport->vpi = vpi;
2897 }
ecfd03c6
JS
2898
2899 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2900 if (!mboxq) {
2901 lpfc_printf_vlog(vport, KERN_ERR,
2902 LOG_MBOX, "2607 Failed to allocate "
2903 "init_vpi mailbox\n");
2904 return;
2905 }
2906 lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2907 mboxq->vport = vport;
2908 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2909 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2910 if (rc == MBX_NOT_FINISHED) {
2911 lpfc_printf_vlog(vport, KERN_ERR,
2912 LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
2913 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2914 }
2915}
2916
6fb120a7
JS
2917/**
2918 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2919 * @phba: pointer to lpfc hba data structure.
2920 *
2921 * This function loops through the list of vports on the @phba and issues an
2922 * FDISC if possible.
2923 */
2924void
2925lpfc_start_fdiscs(struct lpfc_hba *phba)
2926{
2927 struct lpfc_vport **vports;
2928 int i;
2929
2930 vports = lpfc_create_vport_work_array(phba);
2931 if (vports != NULL) {
2932 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2933 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
2934 continue;
2935 /* There are no vpi for this vport */
2936 if (vports[i]->vpi > phba->max_vpi) {
2937 lpfc_vport_set_state(vports[i],
2938 FC_VPORT_FAILED);
2939 continue;
2940 }
76a95d75 2941 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6fb120a7
JS
2942 lpfc_vport_set_state(vports[i],
2943 FC_VPORT_LINKDOWN);
2944 continue;
2945 }
1c6834a7 2946 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
ecfd03c6 2947 lpfc_issue_init_vpi(vports[i]);
1c6834a7
JS
2948 continue;
2949 }
6fb120a7
JS
2950 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2951 lpfc_initial_fdisc(vports[i]);
2952 else {
2953 lpfc_vport_set_state(vports[i],
2954 FC_VPORT_NO_FABRIC_SUPP);
2955 lpfc_printf_vlog(vports[i], KERN_ERR,
2956 LOG_ELS,
2957 "0259 No NPIV "
2958 "Fabric support\n");
2959 }
2960 }
2961 }
2962 lpfc_destroy_vport_work_array(phba, vports);
2963}
2964
2965void
2966lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2967{
3e1f0718 2968 struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
6fb120a7 2969 struct lpfc_vport *vport = mboxq->vport;
72100cc4 2970 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6fb120a7 2971
f5eca9be
JS
2972 /*
2973 * VFI not supported for interface type 0, so ignore any mailbox
2974 * error (except VFI in use) and continue with the discovery.
2975 */
2976 if (mboxq->u.mb.mbxStatus &&
2977 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2978 LPFC_SLI_INTF_IF_TYPE_0) &&
2979 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
6fb120a7
JS
2980 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2981 "2018 REG_VFI mbxStatus error x%x "
2982 "HBA state x%x\n",
2983 mboxq->u.mb.mbxStatus, vport->port_state);
76a95d75 2984 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6fb120a7
JS
2985 /* FLOGI failed, use loop map to make discovery list */
2986 lpfc_disc_list_loopmap(vport);
2987 /* Start discovery */
2988 lpfc_disc_start(vport);
1b51197d 2989 goto out_free_mem;
6fb120a7
JS
2990 }
2991 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1b51197d 2992 goto out_free_mem;
6fb120a7 2993 }
ae05ebe3 2994
e74c03c8
JS
2995 /* If the VFI is already registered, there is nothing else to do
2996 * Unless this was a VFI update and we are in PT2PT mode, then
2997 * we should drop through to set the port state to ready.
2998 */
ae05ebe3 2999 if (vport->fc_flag & FC_VFI_REGISTERED)
e74c03c8
JS
3000 if (!(phba->sli_rev == LPFC_SLI_REV4 &&
3001 vport->fc_flag & FC_PT2PT))
3002 goto out_free_mem;
ae05ebe3 3003
c868595d 3004 /* The VPI is implicitly registered when the VFI is registered */
72100cc4 3005 spin_lock_irq(shost->host_lock);
c868595d 3006 vport->vpi_state |= LPFC_VPI_REGISTERED;
695a814e 3007 vport->fc_flag |= FC_VFI_REGISTERED;
695a814e 3008 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
5248a749 3009 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
72100cc4 3010 spin_unlock_irq(shost->host_lock);
6fb120a7 3011
1b51197d
JS
3012 /* In case SLI4 FC loopback test, we are ready */
3013 if ((phba->sli_rev == LPFC_SLI_REV4) &&
3014 (phba->link_flag & LS_LOOPBACK_MODE)) {
3015 phba->link_state = LPFC_HBA_READY;
3016 goto out_free_mem;
3017 }
3018
e74c03c8
JS
3019 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3020 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
3021 "alpacnt:%d LinkState:%x topology:%x\n",
3022 vport->port_state, vport->fc_flag, vport->fc_myDID,
3023 vport->phba->alpa_map[0],
3024 phba->link_state, phba->fc_topology);
3025
6fb120a7 3026 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
939723a4
JS
3027 /*
3028 * For private loop or for NPort pt2pt,
3029 * just start discovery and we are done.
3030 */
3031 if ((vport->fc_flag & FC_PT2PT) ||
3032 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
3033 !(vport->fc_flag & FC_PUBLIC_LOOP))) {
3034
76a95d75
JS
3035 /* Use loop map to make discovery list */
3036 lpfc_disc_list_loopmap(vport);
3037 /* Start discovery */
e74c03c8
JS
3038 if (vport->fc_flag & FC_PT2PT)
3039 vport->port_state = LPFC_VPORT_READY;
3040 else
3041 lpfc_disc_start(vport);
76a95d75
JS
3042 } else {
3043 lpfc_start_fdiscs(phba);
3044 lpfc_do_scr_ns_plogi(phba, vport);
3045 }
6fb120a7
JS
3046 }
3047
1b51197d 3048out_free_mem:
6fb120a7 3049 mempool_free(mboxq, phba->mbox_mem_pool);
d6de08cc
JS
3050 if (dmabuf) {
3051 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
3052 kfree(dmabuf);
3053 }
6fb120a7
JS
3054 return;
3055}
3056
dea3101e 3057static void
2e0fef85 3058lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 3059{
6fb120a7 3060 MAILBOX_t *mb = &pmb->u.mb;
3e1f0718 3061 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
2e0fef85 3062 struct lpfc_vport *vport = pmb->vport;
aeb3c817 3063 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
21bf0b97
JS
3064 struct serv_parm *sp = &vport->fc_sparam;
3065 uint32_t ed_tov;
dea3101e 3066
3067 /* Check for error */
3068 if (mb->mbxStatus) {
3069 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
e8b62011
JS
3070 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3071 "0319 READ_SPARAM mbxStatus error x%x "
3072 "hba state x%x>\n",
3073 mb->mbxStatus, vport->port_state);
dea3101e 3074 lpfc_linkdown(phba);
dea3101e 3075 goto out;
3076 }
3077
2e0fef85 3078 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
dea3101e 3079 sizeof (struct serv_parm));
21bf0b97
JS
3080
3081 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
3082 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
3083 ed_tov = (ed_tov + 999999) / 1000000;
3084
3085 phba->fc_edtov = ed_tov;
3086 phba->fc_ratov = (2 * ed_tov) / 1000;
3087 if (phba->fc_ratov < FF_DEF_RATOV) {
3088 /* RA_TOV should be atleast 10sec for initial flogi */
3089 phba->fc_ratov = FF_DEF_RATOV;
3090 }
3091
0558056c 3092 lpfc_update_vport_wwn(vport);
aeb3c817 3093 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
92d7f7b0
JS
3094 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3095 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
3096 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
3097 }
3098
dea3101e 3099 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3100 kfree(mp);
2e0fef85 3101 mempool_free(pmb, phba->mbox_mem_pool);
835214f5
JS
3102
3103 /* Check if sending the FLOGI is being deferred to after we get
3104 * up to date CSPs from MBX_READ_SPARAM.
3105 */
3106 if (phba->hba_flag & HBA_DEFER_FLOGI) {
3107 lpfc_initial_flogi(vport);
3108 phba->hba_flag &= ~HBA_DEFER_FLOGI;
3109 }
dea3101e 3110 return;
3111
3112out:
3e1f0718 3113 pmb->ctx_buf = NULL;
dea3101e 3114 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3115 kfree(mp);
92d7f7b0
JS
3116 lpfc_issue_clear_la(phba, vport);
3117 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 3118 return;
3119}
3120
3121static void
76a95d75 3122lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
dea3101e 3123{
92d7f7b0 3124 struct lpfc_vport *vport = phba->pport;
6fb120a7 3125 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
df9e1b59 3126 struct Scsi_Host *shost;
2e0fef85 3127 int i;
14691150
JS
3128 struct lpfc_dmabuf *mp;
3129 int rc;
6fb120a7 3130 struct fcf_record *fcf_record;
38c20673 3131 uint32_t fc_flags = 0;
894bb17f 3132 unsigned long iflags;
14691150 3133
894bb17f 3134 spin_lock_irqsave(&phba->hbalock, iflags);
a085e87c
JS
3135 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3136
3137 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3138 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
3139 case LPFC_LINK_SPEED_1GHZ:
3140 case LPFC_LINK_SPEED_2GHZ:
3141 case LPFC_LINK_SPEED_4GHZ:
3142 case LPFC_LINK_SPEED_8GHZ:
3143 case LPFC_LINK_SPEED_10GHZ:
3144 case LPFC_LINK_SPEED_16GHZ:
3145 case LPFC_LINK_SPEED_32GHZ:
fbd8a6ba 3146 case LPFC_LINK_SPEED_64GHZ:
1dc5ec24 3147 case LPFC_LINK_SPEED_128GHZ:
a085e87c
JS
3148 break;
3149 default:
3150 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
3151 break;
3152 }
dea3101e 3153 }
3154
e74c03c8
JS
3155 if (phba->fc_topology &&
3156 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3157 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3158 "3314 Toplogy changed was 0x%x is 0x%x\n",
3159 phba->fc_topology,
3160 bf_get(lpfc_mbx_read_top_topology, la));
3161 phba->fc_topology_changed = 1;
3162 }
3163
76a95d75 3164 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
92d7f7b0 3165 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
dea3101e 3166
df9e1b59 3167 shost = lpfc_shost_from_vport(vport);
76a95d75 3168 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
92d7f7b0 3169 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
dea3101e 3170
6a9c52cf
JS
3171 /* if npiv is enabled and this adapter supports npiv log
3172 * a message that npiv is not supported in this topology
3173 */
3174 if (phba->cfg_enable_npiv && phba->max_vpi)
495a714c
JS
3175 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3176 "1309 Link Up Event npiv not supported in loop "
3177 "topology\n");
92d7f7b0 3178 /* Get Loop Map information */
38c20673
JS
3179 if (bf_get(lpfc_mbx_read_top_il, la))
3180 fc_flags |= FC_LBIT;
dea3101e 3181
76a95d75
JS
3182 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3183 i = la->lilpBde64.tus.f.bdeSize;
dea3101e 3184
3185 if (i == 0) {
3186 phba->alpa_map[0] = 0;
3187 } else {
e8b62011 3188 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
dea3101e 3189 int numalpa, j, k;
3190 union {
3191 uint8_t pamap[16];
3192 struct {
3193 uint32_t wd1;
3194 uint32_t wd2;
3195 uint32_t wd3;
3196 uint32_t wd4;
3197 } pa;
3198 } un;
3199 numalpa = phba->alpa_map[0];
3200 j = 0;
3201 while (j < numalpa) {
3202 memset(un.pamap, 0, 16);
3203 for (k = 1; j < numalpa; k++) {
3204 un.pamap[k - 1] =
3205 phba->alpa_map[j + 1];
3206 j++;
3207 if (k == 16)
3208 break;
3209 }
3210 /* Link Up Event ALPA map */
3211 lpfc_printf_log(phba,
92d7f7b0
JS
3212 KERN_WARNING,
3213 LOG_LINK_EVENT,
e8b62011 3214 "1304 Link Up Event "
92d7f7b0
JS
3215 "ALPA map Data: x%x "
3216 "x%x x%x x%x\n",
92d7f7b0
JS
3217 un.pa.wd1, un.pa.wd2,
3218 un.pa.wd3, un.pa.wd4);
dea3101e 3219 }
3220 }
3221 }
3222 } else {
92d7f7b0 3223 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
78b2d852 3224 if (phba->max_vpi && phba->cfg_enable_npiv &&
df9e1b59 3225 (phba->sli_rev >= LPFC_SLI_REV3))
92d7f7b0
JS
3226 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3227 }
2e0fef85 3228 vport->fc_myDID = phba->fc_pref_DID;
38c20673 3229 fc_flags |= FC_LBIT;
dea3101e 3230 }
894bb17f 3231 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea3101e 3232
38c20673 3233 if (fc_flags) {
894bb17f 3234 spin_lock_irqsave(shost->host_lock, iflags);
38c20673 3235 vport->fc_flag |= fc_flags;
894bb17f 3236 spin_unlock_irqrestore(shost->host_lock, iflags);
38c20673
JS
3237 }
3238
dea3101e 3239 lpfc_linkup(phba);
835214f5
JS
3240 sparam_mbox = NULL;
3241
3242 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3243 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3244 if (!cfglink_mbox)
3245 goto out;
3246 vport->port_state = LPFC_LOCAL_CFG_LINK;
3247 lpfc_config_link(phba, cfglink_mbox);
3248 cfglink_mbox->vport = vport;
3249 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3250 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3251 if (rc == MBX_NOT_FINISHED) {
3252 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3253 goto out;
3254 }
3255 }
3256
9f1177a3
JS
3257 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3258 if (!sparam_mbox)
3259 goto out;
3260
3261 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3262 if (rc) {
3263 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3264 goto out;
3265 }
3266 sparam_mbox->vport = vport;
3267 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3268 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3269 if (rc == MBX_NOT_FINISHED) {
3e1f0718 3270 mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
9f1177a3
JS
3271 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3272 kfree(mp);
3273 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3274 goto out;
dea3101e 3275 }
3276
835214f5 3277 if (phba->hba_flag & HBA_FCOE_MODE) {
32b9793f 3278 vport->port_state = LPFC_VPORT_UNKNOWN;
6fb120a7
JS
3279 /*
3280 * Add the driver's default FCF record at FCF index 0 now. This
3281 * is phase 1 implementation that support FCF index 0 and driver
3282 * defaults.
3283 */
45ed1190 3284 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
6fb120a7
JS
3285 fcf_record = kzalloc(sizeof(struct fcf_record),
3286 GFP_KERNEL);
3287 if (unlikely(!fcf_record)) {
3288 lpfc_printf_log(phba, KERN_ERR,
3289 LOG_MBOX | LOG_SLI,
b595076a 3290 "2554 Could not allocate memory for "
6fb120a7
JS
3291 "fcf record\n");
3292 rc = -ENODEV;
3293 goto out;
3294 }
3295
3296 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3297 LPFC_FCOE_FCF_DEF_INDEX);
3298 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3299 if (unlikely(rc)) {
3300 lpfc_printf_log(phba, KERN_ERR,
3301 LOG_MBOX | LOG_SLI,
3302 "2013 Could not manually add FCF "
3303 "record 0, status %d\n", rc);
3304 rc = -ENODEV;
3305 kfree(fcf_record);
3306 goto out;
3307 }
3308 kfree(fcf_record);
3309 }
3310 /*
3311 * The driver is expected to do FIP/FCF. Call the port
3312 * and get the FCF Table.
3313 */
894bb17f 3314 spin_lock_irqsave(&phba->hbalock, iflags);
a93ff37a 3315 if (phba->hba_flag & FCF_TS_INPROG) {
894bb17f 3316 spin_unlock_irqrestore(&phba->hbalock, iflags);
32b9793f
JS
3317 return;
3318 }
0c9ab6f5
JS
3319 /* This is the initial FCF discovery scan */
3320 phba->fcf.fcf_flag |= FCF_INIT_DISC;
894bb17f 3321 spin_unlock_irqrestore(&phba->hbalock, iflags);
0c9ab6f5
JS
3322 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3323 "2778 Start FCF table scan at linkup\n");
0c9ab6f5
JS
3324 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3325 LPFC_FCOE_FCF_GET_FIRST);
3326 if (rc) {
894bb17f 3327 spin_lock_irqsave(&phba->hbalock, iflags);
0c9ab6f5 3328 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
894bb17f 3329 spin_unlock_irqrestore(&phba->hbalock, iflags);
6fb120a7 3330 goto out;
0c9ab6f5 3331 }
38b92ef8 3332 /* Reset FCF roundrobin bmask for new discovery */
7d791df7 3333 lpfc_sli4_clear_fcf_rr_bmask(phba);
835214f5
JS
3334 } else {
3335 if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
3336 !(phba->link_flag & LS_LOOPBACK_MODE))
3337 phba->hba_flag |= HBA_DEFER_FLOGI;
dea3101e 3338 }
6fb120a7 3339
e3ba04c9
JS
3340 /* Prepare for LINK up registrations */
3341 memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
3342 scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
3343 init_utsname()->nodename);
6fb120a7 3344 return;
92d7f7b0
JS
3345out:
3346 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e8b62011 3347 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
32350664 3348 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
e8b62011 3349 vport->port_state, sparam_mbox, cfglink_mbox);
92d7f7b0
JS
3350 lpfc_issue_clear_la(phba, vport);
3351 return;
dea3101e 3352}
3353
3354static void
84774a4d 3355lpfc_enable_la(struct lpfc_hba *phba)
2e0fef85 3356{
dea3101e 3357 uint32_t control;
3358 struct lpfc_sli *psli = &phba->sli;
2e0fef85 3359 spin_lock_irq(&phba->hbalock);
dea3101e 3360 psli->sli_flag |= LPFC_PROCESS_LA;
3772a991
JS
3361 if (phba->sli_rev <= LPFC_SLI_REV3) {
3362 control = readl(phba->HCregaddr);
3363 control |= HC_LAINT_ENA;
3364 writel(control, phba->HCregaddr);
3365 readl(phba->HCregaddr); /* flush */
3366 }
2e0fef85 3367 spin_unlock_irq(&phba->hbalock);
dea3101e 3368}
3369
84774a4d
JS
3370static void
3371lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3372{
3373 lpfc_linkdown(phba);
3374 lpfc_enable_la(phba);
6fb120a7 3375 lpfc_unregister_unused_fcf(phba);
84774a4d
JS
3376 /* turn on Link Attention interrupts - no CLEAR_LA needed */
3377}
3378
3379
dea3101e 3380/*
76a95d75 3381 * This routine handles processing a READ_TOPOLOGY mailbox
dea3101e 3382 * command upon completion. It is setup in the LPFC_MBOXQ
3383 * as the completion routine when the command is
895427bd 3384 * handed off to the SLI layer. SLI4 only.
dea3101e 3385 */
3386void
76a95d75 3387lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 3388{
2e0fef85
JS
3389 struct lpfc_vport *vport = pmb->vport;
3390 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
76a95d75 3391 struct lpfc_mbx_read_top *la;
895427bd 3392 struct lpfc_sli_ring *pring;
04c68496 3393 MAILBOX_t *mb = &pmb->u.mb;
3e1f0718 3394 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
aeb3c817 3395 uint8_t attn_type;
894bb17f 3396 unsigned long iflags;
dea3101e 3397
0d2b6b83 3398 /* Unblock ELS traffic */
895427bd 3399 pring = lpfc_phba_elsring(phba);
1234a6d5
DK
3400 if (pring)
3401 pring->flag &= ~LPFC_STOP_IOCB_EVENT;
895427bd 3402
dea3101e 3403 /* Check for error */
3404 if (mb->mbxStatus) {
ed957684 3405 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
e8b62011
JS
3406 "1307 READ_LA mbox error x%x state x%x\n",
3407 mb->mbxStatus, vport->port_state);
dea3101e 3408 lpfc_mbx_issue_link_down(phba);
2e0fef85 3409 phba->link_state = LPFC_HBA_ERROR;
76a95d75 3410 goto lpfc_mbx_cmpl_read_topology_free_mbuf;
dea3101e 3411 }
3412
76a95d75 3413 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
aeb3c817 3414 attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
dea3101e 3415
3416 memcpy(&phba->alpa_map[0], mp->virt, 128);
3417
894bb17f 3418 spin_lock_irqsave(shost->host_lock, iflags);
76a95d75 3419 if (bf_get(lpfc_mbx_read_top_pb, la))
2e0fef85 3420 vport->fc_flag |= FC_BYPASSED_MODE;
c9f8735b 3421 else
2e0fef85 3422 vport->fc_flag &= ~FC_BYPASSED_MODE;
894bb17f 3423 spin_unlock_irqrestore(shost->host_lock, iflags);
c9f8735b 3424
be0c0080 3425 if (phba->fc_eventTag <= la->eventTag) {
dea3101e 3426 phba->fc_stat.LinkMultiEvent++;
aeb3c817 3427 if (attn_type == LPFC_ATT_LINK_UP)
dea3101e 3428 if (phba->fc_eventTag != 0)
3429 lpfc_linkdown(phba);
92d7f7b0 3430 }
dea3101e 3431
3432 phba->fc_eventTag = la->eventTag;
be0c0080 3433 if (phba->sli_rev < LPFC_SLI_REV4) {
894bb17f 3434 spin_lock_irqsave(&phba->hbalock, iflags);
be0c0080
JS
3435 if (bf_get(lpfc_mbx_read_top_mm, la))
3436 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3437 else
3438 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
894bb17f 3439 spin_unlock_irqrestore(&phba->hbalock, iflags);
be0c0080 3440 }
dea3101e 3441
4d9ab994 3442 phba->link_events++;
aeb3c817 3443 if ((attn_type == LPFC_ATT_LINK_UP) &&
be0c0080 3444 !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
dea3101e 3445 phba->fc_stat.LinkUp++;
2e0fef85 3446 if (phba->link_flag & LS_LOOPBACK_MODE) {
3163f725 3447 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
e8b62011
JS
3448 "1306 Link Up Event in loop back mode "
3449 "x%x received Data: x%x x%x x%x x%x\n",
3450 la->eventTag, phba->fc_eventTag,
76a95d75
JS
3451 bf_get(lpfc_mbx_read_top_alpa_granted,
3452 la),
3453 bf_get(lpfc_mbx_read_top_link_spd, la),
e8b62011 3454 phba->alpa_map[0]);
5b8bd0c9
JS
3455 } else {
3456 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
e8b62011 3457 "1303 Link Up Event x%x received "
84774a4d 3458 "Data: x%x x%x x%x x%x x%x x%x %d\n",
e8b62011 3459 la->eventTag, phba->fc_eventTag,
76a95d75
JS
3460 bf_get(lpfc_mbx_read_top_alpa_granted,
3461 la),
3462 bf_get(lpfc_mbx_read_top_link_spd, la),
84774a4d 3463 phba->alpa_map[0],
76a95d75
JS
3464 bf_get(lpfc_mbx_read_top_mm, la),
3465 bf_get(lpfc_mbx_read_top_fa, la),
84774a4d 3466 phba->wait_4_mlo_maint_flg);
5b8bd0c9 3467 }
92d7f7b0 3468 lpfc_mbx_process_link_up(phba, la);
aeb3c817
JS
3469 } else if (attn_type == LPFC_ATT_LINK_DOWN ||
3470 attn_type == LPFC_ATT_UNEXP_WWPN) {
dea3101e 3471 phba->fc_stat.LinkDown++;
1b51197d 3472 if (phba->link_flag & LS_LOOPBACK_MODE)
3163f725
JS
3473 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3474 "1308 Link Down Event in loop back mode "
3475 "x%x received "
3476 "Data: x%x x%x x%x\n",
3477 la->eventTag, phba->fc_eventTag,
3478 phba->pport->port_state, vport->fc_flag);
aeb3c817
JS
3479 else if (attn_type == LPFC_ATT_UNEXP_WWPN)
3480 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
b9da814c
JS
3481 "1313 Link Down Unexpected FA WWPN Event x%x "
3482 "received Data: x%x x%x x%x x%x x%x\n",
aeb3c817
JS
3483 la->eventTag, phba->fc_eventTag,
3484 phba->pport->port_state, vport->fc_flag,
3485 bf_get(lpfc_mbx_read_top_mm, la),
3486 bf_get(lpfc_mbx_read_top_fa, la));
1b51197d 3487 else
3163f725 3488 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
e8b62011 3489 "1305 Link Down Event x%x received "
84774a4d
JS
3490 "Data: x%x x%x x%x x%x x%x\n",
3491 la->eventTag, phba->fc_eventTag,
3492 phba->pport->port_state, vport->fc_flag,
76a95d75
JS
3493 bf_get(lpfc_mbx_read_top_mm, la),
3494 bf_get(lpfc_mbx_read_top_fa, la));
84774a4d
JS
3495 lpfc_mbx_issue_link_down(phba);
3496 }
aeb3c817
JS
3497 if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
3498 attn_type == LPFC_ATT_LINK_UP) {
84774a4d
JS
3499 if (phba->link_state != LPFC_LINK_DOWN) {
3500 phba->fc_stat.LinkDown++;
3501 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3502 "1312 Link Down Event x%x received "
dea3101e 3503 "Data: x%x x%x x%x\n",
e8b62011 3504 la->eventTag, phba->fc_eventTag,
2e0fef85 3505 phba->pport->port_state, vport->fc_flag);
84774a4d
JS
3506 lpfc_mbx_issue_link_down(phba);
3507 } else
3508 lpfc_enable_la(phba);
3509
3510 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3511 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3512 "Data: x%x x%x x%x\n",
3513 la->eventTag, phba->fc_eventTag,
3514 phba->pport->port_state, vport->fc_flag);
3515 /*
3516 * The cmnd that triggered this will be waiting for this
3517 * signal.
3518 */
3519 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3520 if (phba->wait_4_mlo_maint_flg) {
3521 phba->wait_4_mlo_maint_flg = 0;
3522 wake_up_interruptible(&phba->wait_4_mlo_m_q);
3163f725 3523 }
84774a4d
JS
3524 }
3525
be0c0080
JS
3526 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3527 bf_get(lpfc_mbx_read_top_fa, la)) {
3528 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
84774a4d
JS
3529 lpfc_issue_clear_la(phba, vport);
3530 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
76a95d75
JS
3531 "1311 fa %d\n",
3532 bf_get(lpfc_mbx_read_top_fa, la));
dea3101e 3533 }
3534
76a95d75 3535lpfc_mbx_cmpl_read_topology_free_mbuf:
dea3101e 3536 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3537 kfree(mp);
3538 mempool_free(pmb, phba->mbox_mem_pool);
3539 return;
3540}
3541
3542/*
3543 * This routine handles processing a REG_LOGIN mailbox
3544 * command upon completion. It is setup in the LPFC_MBOXQ
3545 * as the completion routine when the command is
3546 * handed off to the SLI layer.
3547 */
3548void
2e0fef85 3549lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 3550{
2e0fef85 3551 struct lpfc_vport *vport = pmb->vport;
3e1f0718
JS
3552 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3553 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
ffc95493 3554 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 3555
3e1f0718
JS
3556 pmb->ctx_buf = NULL;
3557 pmb->ctx_ndlp = NULL;
dea3101e 3558
be6bb941 3559 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
32350664 3560 "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
be6bb941 3561 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
2c935bc5 3562 kref_read(&ndlp->kref),
be6bb941 3563 ndlp->nlp_usg_map, ndlp);
ffc95493
JS
3564 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3565 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3566
4042629e
JS
3567 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3568 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
ffc95493
JS
3569 /* We rcvd a rscn after issuing this
3570 * mbox reg login, we may have cycled
3571 * back through the state and be
3572 * back at reg login state so this
3573 * mbox needs to be ignored becase
3574 * there is another reg login in
25985edc 3575 * process.
ffc95493
JS
3576 */
3577 spin_lock_irq(shost->host_lock);
3578 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3579 spin_unlock_irq(shost->host_lock);
895427bd
JS
3580
3581 /*
3582 * We cannot leave the RPI registered because
3583 * if we go thru discovery again for this ndlp
3584 * a subsequent REG_RPI will fail.
3585 */
3586 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3587 lpfc_unreg_rpi(vport, ndlp);
4b7789b7
JS
3588 }
3589
3590 /* Call state machine */
3591 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
ffc95493 3592
dea3101e 3593 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3594 kfree(mp);
2e0fef85 3595 mempool_free(pmb, phba->mbox_mem_pool);
fa4066b6
JS
3596 /* decrement the node reference count held for this callback
3597 * function.
3598 */
329f9bc7 3599 lpfc_nlp_put(ndlp);
dea3101e 3600
3601 return;
3602}
3603
92d7f7b0
JS
3604static void
3605lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3606{
04c68496 3607 MAILBOX_t *mb = &pmb->u.mb;
92d7f7b0
JS
3608 struct lpfc_vport *vport = pmb->vport;
3609 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3610
3611 switch (mb->mbxStatus) {
3612 case 0x0011:
3613 case 0x0020:
e8b62011
JS
3614 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3615 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3616 mb->mbxStatus);
92d7f7b0 3617 break;
78730cfe
JS
3618 /* If VPI is busy, reset the HBA */
3619 case 0x9700:
3620 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3621 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3622 vport->vpi, mb->mbxStatus);
3623 if (!(phba->pport->load_flag & FC_UNLOADING))
3624 lpfc_workq_post_event(phba, NULL, NULL,
3625 LPFC_EVT_RESET_HBA);
92d7f7b0 3626 }
72100cc4 3627 spin_lock_irq(shost->host_lock);
c868595d 3628 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
1987807d 3629 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
72100cc4 3630 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
3631 vport->unreg_vpi_cmpl = VPORT_OK;
3632 mempool_free(pmb, phba->mbox_mem_pool);
1151e3ec 3633 lpfc_cleanup_vports_rrqs(vport, NULL);
92d7f7b0
JS
3634 /*
3635 * This shost reference might have been taken at the beginning of
3636 * lpfc_vport_delete()
3637 */
1c6f4ef5 3638 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
92d7f7b0
JS
3639 scsi_host_put(shost);
3640}
3641
d7c255b2 3642int
92d7f7b0
JS
3643lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3644{
3645 struct lpfc_hba *phba = vport->phba;
3646 LPFC_MBOXQ_t *mbox;
3647 int rc;
3648
3649 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3650 if (!mbox)
d7c255b2 3651 return 1;
92d7f7b0
JS
3652
3653 lpfc_unreg_vpi(phba, vport->vpi, mbox);
3654 mbox->vport = vport;
3655 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
0b727fea 3656 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
92d7f7b0 3657 if (rc == MBX_NOT_FINISHED) {
e8b62011
JS
3658 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
3659 "1800 Could not issue unreg_vpi\n");
92d7f7b0
JS
3660 mempool_free(mbox, phba->mbox_mem_pool);
3661 vport->unreg_vpi_cmpl = VPORT_ERROR;
d7c255b2 3662 return rc;
92d7f7b0 3663 }
d7c255b2 3664 return 0;
92d7f7b0
JS
3665}
3666
3667static void
3668lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3669{
3670 struct lpfc_vport *vport = pmb->vport;
3671 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
04c68496 3672 MAILBOX_t *mb = &pmb->u.mb;
92d7f7b0
JS
3673
3674 switch (mb->mbxStatus) {
3675 case 0x0011:
3676 case 0x9601:
3677 case 0x9602:
e8b62011
JS
3678 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3679 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3680 mb->mbxStatus);
92d7f7b0
JS
3681 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3682 spin_lock_irq(shost->host_lock);
3683 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3684 spin_unlock_irq(shost->host_lock);
3685 vport->fc_myDID = 0;
a0f2d3ef 3686
f6e84790
JS
3687 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3688 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
d613b6a7
JS
3689 if (phba->nvmet_support)
3690 lpfc_nvmet_update_targetport(phba);
3691 else
8c258641 3692 lpfc_nvme_update_localport(vport);
8c258641 3693 }
92d7f7b0
JS
3694 goto out;
3695 }
92d7f7b0 3696
72100cc4 3697 spin_lock_irq(shost->host_lock);
c868595d 3698 vport->vpi_state |= LPFC_VPI_REGISTERED;
695a814e 3699 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
72100cc4 3700 spin_unlock_irq(shost->host_lock);
92d7f7b0
JS
3701 vport->num_disc_nodes = 0;
3702 /* go thru NPR list and issue ELS PLOGIs */
3703 if (vport->fc_npr_cnt)
3704 lpfc_els_disc_plogi(vport);
3705
3706 if (!vport->num_disc_nodes) {
3707 spin_lock_irq(shost->host_lock);
3708 vport->fc_flag &= ~FC_NDISC_ACTIVE;
3709 spin_unlock_irq(shost->host_lock);
3710 lpfc_can_disctmo(vport);
3711 }
3712 vport->port_state = LPFC_VPORT_READY;
3713
3714out:
3715 mempool_free(pmb, phba->mbox_mem_pool);
3716 return;
3717}
3718
21e9a0a5
JS
3719/**
3720 * lpfc_create_static_vport - Read HBA config region to create static vports.
3721 * @phba: pointer to lpfc hba data structure.
3722 *
3723 * This routine issue a DUMP mailbox command for config region 22 to get
3724 * the list of static vports to be created. The function create vports
3725 * based on the information returned from the HBA.
3726 **/
3727void
3728lpfc_create_static_vport(struct lpfc_hba *phba)
3729{
3730 LPFC_MBOXQ_t *pmb = NULL;
3731 MAILBOX_t *mb;
3732 struct static_vport_info *vport_info;
cdcc2343 3733 int mbx_wait_rc = 0, i;
21e9a0a5
JS
3734 struct fc_vport_identifiers vport_id;
3735 struct fc_vport *new_fc_vport;
3736 struct Scsi_Host *shost;
3737 struct lpfc_vport *vport;
3738 uint16_t offset = 0;
3739 uint8_t *vport_buff;
1c6834a7
JS
3740 struct lpfc_dmabuf *mp;
3741 uint32_t byte_count = 0;
21e9a0a5
JS
3742
3743 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3744 if (!pmb) {
3745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3746 "0542 lpfc_create_static_vport failed to"
3747 " allocate mailbox memory\n");
3748 return;
3749 }
cdcc2343 3750 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
21e9a0a5
JS
3751 mb = &pmb->u.mb;
3752
3753 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
3754 if (!vport_info) {
3755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3756 "0543 lpfc_create_static_vport failed to"
3757 " allocate vport_info\n");
3758 mempool_free(pmb, phba->mbox_mem_pool);
3759 return;
3760 }
3761
3762 vport_buff = (uint8_t *) vport_info;
3763 do {
cdcc2343 3764 /* free dma buffer from previous round */
3e1f0718
JS
3765 if (pmb->ctx_buf) {
3766 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
cdcc2343
JS
3767 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3768 kfree(mp);
3769 }
1c6834a7
JS
3770 if (lpfc_dump_static_vport(phba, pmb, offset))
3771 goto out;
3772
21e9a0a5 3773 pmb->vport = phba->pport;
cdcc2343
JS
3774 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3775 LPFC_MBOX_TMO);
21e9a0a5 3776
cdcc2343 3777 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
21e9a0a5
JS
3778 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3779 "0544 lpfc_create_static_vport failed to"
3780 " issue dump mailbox command ret 0x%x "
3781 "status 0x%x\n",
cdcc2343 3782 mbx_wait_rc, mb->mbxStatus);
21e9a0a5
JS
3783 goto out;
3784 }
3785
1c6834a7
JS
3786 if (phba->sli_rev == LPFC_SLI_REV4) {
3787 byte_count = pmb->u.mqe.un.mb_words[5];
3e1f0718 3788 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
1c6834a7
JS
3789 if (byte_count > sizeof(struct static_vport_info) -
3790 offset)
3791 byte_count = sizeof(struct static_vport_info)
3792 - offset;
3793 memcpy(vport_buff + offset, mp->virt, byte_count);
3794 offset += byte_count;
3795 } else {
3796 if (mb->un.varDmp.word_cnt >
3797 sizeof(struct static_vport_info) - offset)
3798 mb->un.varDmp.word_cnt =
3799 sizeof(struct static_vport_info)
3800 - offset;
3801 byte_count = mb->un.varDmp.word_cnt;
3802 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
3803 vport_buff + offset,
3804 byte_count);
3805
3806 offset += byte_count;
3807 }
21e9a0a5 3808
1c6834a7 3809 } while (byte_count &&
21e9a0a5
JS
3810 offset < sizeof(struct static_vport_info));
3811
3812
3813 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
3814 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
3815 != VPORT_INFO_REV)) {
3816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3817 "0545 lpfc_create_static_vport bad"
3818 " information header 0x%x 0x%x\n",
3819 le32_to_cpu(vport_info->signature),
3820 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
3821
3822 goto out;
3823 }
3824
3825 shost = lpfc_shost_from_vport(phba->pport);
3826
3827 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
3828 memset(&vport_id, 0, sizeof(vport_id));
3829 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
3830 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
3831 if (!vport_id.port_name || !vport_id.node_name)
3832 continue;
3833
3834 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
3835 vport_id.vport_type = FC_PORTTYPE_NPIV;
3836 vport_id.disable = false;
3837 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
3838
3839 if (!new_fc_vport) {
3840 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3841 "0546 lpfc_create_static_vport failed to"
e4e74273 3842 " create vport\n");
21e9a0a5
JS
3843 continue;
3844 }
3845
3846 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
3847 vport->vport_flag |= STATIC_VPORT;
3848 }
3849
3850out:
21e9a0a5 3851 kfree(vport_info);
cdcc2343 3852 if (mbx_wait_rc != MBX_TIMEOUT) {
3e1f0718
JS
3853 if (pmb->ctx_buf) {
3854 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
1c6834a7
JS
3855 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3856 kfree(mp);
3857 }
21e9a0a5 3858 mempool_free(pmb, phba->mbox_mem_pool);
1c6834a7 3859 }
21e9a0a5
JS
3860
3861 return;
3862}
3863
dea3101e 3864/*
3865 * This routine handles processing a Fabric REG_LOGIN mailbox
3866 * command upon completion. It is setup in the LPFC_MBOXQ
3867 * as the completion routine when the command is
3868 * handed off to the SLI layer.
3869 */
3870void
2e0fef85 3871lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 3872{
92d7f7b0 3873 struct lpfc_vport *vport = pmb->vport;
21e9a0a5 3874 MAILBOX_t *mb = &pmb->u.mb;
3e1f0718 3875 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
92d7f7b0 3876 struct lpfc_nodelist *ndlp;
df9e1b59 3877 struct Scsi_Host *shost;
dea3101e 3878
3e1f0718
JS
3879 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3880 pmb->ctx_ndlp = NULL;
3881 pmb->ctx_buf = NULL;
d439d286 3882
dea3101e 3883 if (mb->mbxStatus) {
21e9a0a5
JS
3884 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3885 "0258 Register Fabric login error: 0x%x\n",
3886 mb->mbxStatus);
dea3101e 3887 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3888 kfree(mp);
329f9bc7 3889 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 3890
76a95d75 3891 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
92d7f7b0
JS
3892 /* FLOGI failed, use loop map to make discovery list */
3893 lpfc_disc_list_loopmap(vport);
3894
3895 /* Start discovery */
3896 lpfc_disc_start(vport);
e47c9093
JS
3897 /* Decrement the reference count to ndlp after the
3898 * reference to the ndlp are done.
3899 */
3900 lpfc_nlp_put(ndlp);
92d7f7b0
JS
3901 return;
3902 }
3903
3904 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
e47c9093
JS
3905 /* Decrement the reference count to ndlp after the reference
3906 * to the ndlp are done.
3907 */
3908 lpfc_nlp_put(ndlp);
dea3101e 3909 return;
3910 }
3911
6d368e53
JS
3912 if (phba->sli_rev < LPFC_SLI_REV4)
3913 ndlp->nlp_rpi = mb->un.varWords[0];
4042629e 3914 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
dea3101e 3915 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 3916 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 3917
2e0fef85 3918 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
4b40c59e
JS
3919 /* when physical port receive logo donot start
3920 * vport discovery */
3921 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3922 lpfc_start_fdiscs(phba);
df9e1b59
JS
3923 else {
3924 shost = lpfc_shost_from_vport(vport);
3925 spin_lock_irq(shost->host_lock);
4b40c59e 3926 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
df9e1b59
JS
3927 spin_unlock_irq(shost->host_lock);
3928 }
92d7f7b0 3929 lpfc_do_scr_ns_plogi(phba, vport);
dea3101e 3930 }
3931
3932 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3933 kfree(mp);
329f9bc7 3934 mempool_free(pmb, phba->mbox_mem_pool);
e47c9093
JS
3935
3936 /* Drop the reference count from the mbox at the end after
3937 * all the current reference to the ndlp have been done.
3938 */
3939 lpfc_nlp_put(ndlp);
dea3101e 3940 return;
3941}
3942
a0f2d3ef
JS
3943 /*
3944 * This routine will issue a GID_FT for each FC4 Type supported
3945 * by the driver. ALL GID_FTs must complete before discovery is started.
3946 */
3947int
3948lpfc_issue_gidft(struct lpfc_vport *vport)
3949{
a0f2d3ef 3950 /* Good status, issue CT Request to NameServer */
f6e84790
JS
3951 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3952 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
a0f2d3ef
JS
3953 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
3954 /* Cannot issue NameServer FCP Query, so finish up
3955 * discovery
3956 */
3957 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
3958 "0604 %s FC TYPE %x %s\n",
3959 "Failed to issue GID_FT to ",
3960 FC_TYPE_FCP,
3961 "Finishing discovery.");
3962 return 0;
3963 }
3964 vport->gidft_inp++;
3965 }
3966
f6e84790
JS
3967 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3968 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
a0f2d3ef
JS
3969 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
3970 /* Cannot issue NameServer NVME Query, so finish up
3971 * discovery
3972 */
3973 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
3974 "0605 %s FC_TYPE %x %s %d\n",
3975 "Failed to issue GID_FT to ",
3976 FC_TYPE_NVME,
3977 "Finishing discovery: gidftinp ",
3978 vport->gidft_inp);
3979 if (vport->gidft_inp == 0)
3980 return 0;
3981 } else
3982 vport->gidft_inp++;
3983 }
3984 return vport->gidft_inp;
3985}
3986
7ea92eb4
JS
3987/**
3988 * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
3989 * @vport: The virtual port for which this call is being executed.
3990 *
3991 * This routine will issue a GID_PT to get a list of all N_Ports
3992 *
3993 * Return value :
3994 * 0 - Failure to issue a GID_PT
3995 * 1 - GID_PT issued
3996 **/
3997int
3998lpfc_issue_gidpt(struct lpfc_vport *vport)
3999{
4000 /* Good status, issue CT Request to NameServer */
4001 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
4002 /* Cannot issue NameServer FCP Query, so finish up
4003 * discovery
4004 */
4005 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
4006 "0606 %s Port TYPE %x %s\n",
4007 "Failed to issue GID_PT to ",
4008 GID_PT_N_PORT,
4009 "Finishing discovery.");
4010 return 0;
4011 }
4012 vport->gidft_inp++;
4013 return 1;
4014}
4015
dea3101e 4016/*
4017 * This routine handles processing a NameServer REG_LOGIN mailbox
4018 * command upon completion. It is setup in the LPFC_MBOXQ
4019 * as the completion routine when the command is
4020 * handed off to the SLI layer.
4021 */
4022void
2e0fef85 4023lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 4024{
21e9a0a5 4025 MAILBOX_t *mb = &pmb->u.mb;
3e1f0718
JS
4026 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4027 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2e0fef85 4028 struct lpfc_vport *vport = pmb->vport;
dea3101e 4029
3e1f0718
JS
4030 pmb->ctx_buf = NULL;
4031 pmb->ctx_ndlp = NULL;
a0f2d3ef 4032 vport->gidft_inp = 0;
d439d286 4033
dea3101e 4034 if (mb->mbxStatus) {
21e9a0a5
JS
4035 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4036 "0260 Register NameServer error: 0x%x\n",
4037 mb->mbxStatus);
a0f2d3ef
JS
4038
4039out:
fa4066b6
JS
4040 /* decrement the node reference count held for this
4041 * callback function.
4042 */
329f9bc7 4043 lpfc_nlp_put(ndlp);
dea3101e 4044 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4045 kfree(mp);
de0c5b32 4046 mempool_free(pmb, phba->mbox_mem_pool);
87af33fe
JS
4047
4048 /* If no other thread is using the ndlp, free it */
4049 lpfc_nlp_not_used(ndlp);
dea3101e 4050
76a95d75 4051 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
92d7f7b0
JS
4052 /*
4053 * RegLogin failed, use loop map to make discovery
4054 * list
4055 */
4056 lpfc_disc_list_loopmap(vport);
dea3101e 4057
92d7f7b0
JS
4058 /* Start discovery */
4059 lpfc_disc_start(vport);
4060 return;
4061 }
4062 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
dea3101e 4063 return;
4064 }
4065
6d368e53
JS
4066 if (phba->sli_rev < LPFC_SLI_REV4)
4067 ndlp->nlp_rpi = mb->un.varWords[0];
4042629e 4068 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
dea3101e 4069 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 4070 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
0f154226 4071 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
32350664 4072 "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
be6bb941 4073 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
2c935bc5 4074 kref_read(&ndlp->kref),
be6bb941 4075 ndlp->nlp_usg_map, ndlp);
dea3101e 4076
2e0fef85
JS
4077 if (vport->port_state < LPFC_VPORT_READY) {
4078 /* Link up discovery requires Fabric registration. */
92d7f7b0
JS
4079 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
4080 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
4081 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
4082 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
a0f2d3ef 4083
f6e84790
JS
4084 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4085 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
a0f2d3ef
JS
4086 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
4087
f6e84790
JS
4088 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4089 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
01649561
JS
4090 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
4091 FC_TYPE_NVME);
92d7f7b0
JS
4092
4093 /* Issue SCR just before NameServer GID_FT Query */
df3fe766
JS
4094 lpfc_issue_els_scr(vport, 0);
4095
4096 lpfc_issue_els_rdf(vport, 0);
dea3101e 4097 }
4098
2e0fef85 4099 vport->fc_ns_retry = 0;
a0f2d3ef 4100 if (lpfc_issue_gidft(vport) == 0)
92d7f7b0 4101 goto out;
dea3101e 4102
a0f2d3ef
JS
4103 /*
4104 * At this point in time we may need to wait for multiple
4105 * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
4106 *
4107 * decrement the node reference count held for this
fa4066b6
JS
4108 * callback function.
4109 */
329f9bc7 4110 lpfc_nlp_put(ndlp);
dea3101e 4111 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4112 kfree(mp);
2e0fef85 4113 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 4114
4115 return;
4116}
4117
4118static void
2e0fef85 4119lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 4120{
2e0fef85
JS
4121 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4122 struct fc_rport *rport;
dea3101e 4123 struct lpfc_rport_data *rdata;
4124 struct fc_rport_identifiers rport_ids;
2e0fef85 4125 struct lpfc_hba *phba = vport->phba;
dea3101e 4126
f6e84790 4127 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
895427bd
JS
4128 return;
4129
dea3101e 4130 /* Remote port has reappeared. Re-register w/ FC transport */
68ce1eb5
AM
4131 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4132 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
dea3101e 4133 rport_ids.port_id = ndlp->nlp_DID;
4134 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
dea3101e 4135
329f9bc7
JS
4136 /*
4137 * We leave our node pointer in rport->dd_data when we unregister a
4138 * FCP target port. But fc_remote_port_add zeros the space to which
4139 * rport->dd_data points. So, if we're reusing a previously
4140 * registered port, drop the reference that we took the last time we
4141 * registered the port.
4142 */
466e840b
JS
4143 rport = ndlp->rport;
4144 if (rport) {
4145 rdata = rport->dd_data;
4146 /* break the link before dropping the ref */
4147 ndlp->rport = NULL;
2ade92ae
JS
4148 if (rdata) {
4149 if (rdata->pnode == ndlp)
4150 lpfc_nlp_put(ndlp);
4151 rdata->pnode = NULL;
4152 }
466e840b
JS
4153 /* drop reference for earlier registeration */
4154 put_device(&rport->dev);
4155 }
858c9f6c
JS
4156
4157 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4158 "rport add: did:x%x flg:x%x type x%x",
4159 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4160
9589b062
JS
4161 /* Don't add the remote port if unloading. */
4162 if (vport->load_flag & FC_UNLOADING)
4163 return;
4164
2e0fef85 4165 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
329f9bc7 4166 if (!rport || !get_device(&rport->dev)) {
dea3101e 4167 dev_printk(KERN_WARNING, &phba->pcidev->dev,
4168 "Warning: fc_remote_port_add failed\n");
4169 return;
4170 }
4171
4172 /* initialize static port data */
4173 rport->maxframe_size = ndlp->nlp_maxframe;
4174 rport->supported_classes = ndlp->nlp_class_sup;
dea3101e 4175 rdata = rport->dd_data;
329f9bc7 4176 rdata->pnode = lpfc_nlp_get(ndlp);
23dc04f1
JSEC
4177
4178 if (ndlp->nlp_type & NLP_FCP_TARGET)
a6a6d058 4179 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
23dc04f1 4180 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
a6a6d058
HR
4181 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4182 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
4183 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
4184 if (ndlp->nlp_type & NLP_NVME_TARGET)
4185 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
4186 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
4187 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
23dc04f1 4188
23dc04f1
JSEC
4189 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
4190 fc_remote_port_rolechg(rport, rport_ids.roles);
4191
34f5ad8b 4192 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
32350664 4193 "3183 rport register x%06x, rport x%px role x%x\n",
34f5ad8b
JS
4194 ndlp->nlp_DID, rport, rport_ids.roles);
4195
071fbd3d 4196 if ((rport->scsi_target_id != -1) &&
92d7f7b0 4197 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
071fbd3d
JS
4198 ndlp->nlp_sid = rport->scsi_target_id;
4199 }
19a7b4ae
JSEC
4200 return;
4201}
4202
4203static void
2e0fef85 4204lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
19a7b4ae
JSEC
4205{
4206 struct fc_rport *rport = ndlp->rport;
2ea259ee 4207 struct lpfc_vport *vport = ndlp->vport;
a0f2d3ef 4208
f6e84790 4209 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
a0f2d3ef 4210 return;
c01f3208 4211
2ea259ee 4212 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
858c9f6c
JS
4213 "rport delete: did:x%x flg:x%x type x%x",
4214 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4215
2ea259ee 4216 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
32350664 4217 "3184 rport unregister x%06x, rport x%px\n",
34f5ad8b
JS
4218 ndlp->nlp_DID, rport);
4219
19a7b4ae 4220 fc_remote_port_delete(rport);
dea3101e 4221
4222 return;
4223}
4224
de0c5b32 4225static void
2e0fef85 4226lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
dea3101e 4227{
2e0fef85 4228 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
894bb17f 4229 unsigned long iflags;
2e0fef85 4230
894bb17f 4231 spin_lock_irqsave(shost->host_lock, iflags);
de0c5b32
JS
4232 switch (state) {
4233 case NLP_STE_UNUSED_NODE:
2e0fef85 4234 vport->fc_unused_cnt += count;
de0c5b32
JS
4235 break;
4236 case NLP_STE_PLOGI_ISSUE:
2e0fef85 4237 vport->fc_plogi_cnt += count;
de0c5b32
JS
4238 break;
4239 case NLP_STE_ADISC_ISSUE:
2e0fef85 4240 vport->fc_adisc_cnt += count;
dea3101e 4241 break;
de0c5b32 4242 case NLP_STE_REG_LOGIN_ISSUE:
2e0fef85 4243 vport->fc_reglogin_cnt += count;
de0c5b32
JS
4244 break;
4245 case NLP_STE_PRLI_ISSUE:
2e0fef85 4246 vport->fc_prli_cnt += count;
de0c5b32
JS
4247 break;
4248 case NLP_STE_UNMAPPED_NODE:
2e0fef85 4249 vport->fc_unmap_cnt += count;
de0c5b32
JS
4250 break;
4251 case NLP_STE_MAPPED_NODE:
2e0fef85 4252 vport->fc_map_cnt += count;
de0c5b32
JS
4253 break;
4254 case NLP_STE_NPR_NODE:
646a2dd7
JS
4255 if (vport->fc_npr_cnt == 0 && count == -1)
4256 vport->fc_npr_cnt = 0;
4257 else
4258 vport->fc_npr_cnt += count;
de0c5b32
JS
4259 break;
4260 }
894bb17f 4261 spin_unlock_irqrestore(shost->host_lock, iflags);
de0c5b32 4262}
66a9ed66 4263
de0c5b32 4264static void
2e0fef85 4265lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
de0c5b32
JS
4266 int old_state, int new_state)
4267{
2e0fef85
JS
4268 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4269
de0c5b32 4270 if (new_state == NLP_STE_UNMAPPED_NODE) {
de0c5b32
JS
4271 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4272 ndlp->nlp_type |= NLP_FC_NODE;
4273 }
4274 if (new_state == NLP_STE_MAPPED_NODE)
4275 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4276 if (new_state == NLP_STE_NPR_NODE)
4277 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4278
a0f2d3ef
JS
4279 /* FCP and NVME Transport interface */
4280 if ((old_state == NLP_STE_MAPPED_NODE ||
4281 old_state == NLP_STE_UNMAPPED_NODE)) {
4282 if (ndlp->rport) {
4283 vport->phba->nport_event_cnt++;
4284 lpfc_unregister_remote_port(ndlp);
4285 }
4286
1c5b12f7 4287 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
a0f2d3ef 4288 vport->phba->nport_event_cnt++;
3b5bde69
JS
4289 if (vport->phba->nvmet_support == 0) {
4290 /* Start devloss if target. */
4291 if (ndlp->nlp_type & NLP_NVME_TARGET)
4292 lpfc_nvme_unregister_port(vport, ndlp);
4293 } else {
6599e124
JS
4294 /* NVMET has no upcall. */
4295 lpfc_nlp_put(ndlp);
3b5bde69 4296 }
a0f2d3ef 4297 }
de0c5b32 4298 }
dea3101e 4299
a0f2d3ef
JS
4300 /* FCP and NVME Transport interfaces */
4301
de0c5b32
JS
4302 if (new_state == NLP_STE_MAPPED_NODE ||
4303 new_state == NLP_STE_UNMAPPED_NODE) {
01a8aed6 4304 if (ndlp->nlp_fc4_type ||
92721c3b
JS
4305 ndlp->nlp_DID == Fabric_DID ||
4306 ndlp->nlp_DID == NameServer_DID ||
4307 ndlp->nlp_DID == FDMI_DID) {
a0f2d3ef
JS
4308 vport->phba->nport_event_cnt++;
4309 /*
4310 * Tell the fc transport about the port, if we haven't
4311 * already. If we have, and it's a scsi entity, be
4312 */
4313 lpfc_register_remote_port(vport, ndlp);
4314 }
4315 /* Notify the NVME transport of this new rport. */
09559e81
JS
4316 if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
4317 ndlp->nlp_fc4_type & NLP_FC4_NVME) {
a0f2d3ef
JS
4318 if (vport->phba->nvmet_support == 0) {
4319 /* Register this rport with the transport.
3b5bde69
JS
4320 * Only NVME Target Rports are registered with
4321 * the transport.
a0f2d3ef 4322 */
3b5bde69
JS
4323 if (ndlp->nlp_type & NLP_NVME_TARGET) {
4324 vport->phba->nport_event_cnt++;
4325 lpfc_nvme_register_port(vport, ndlp);
4326 }
8c258641
JS
4327 } else {
4328 /* Just take an NDLP ref count since the
4329 * target does not register rports.
4330 */
4331 lpfc_nlp_get(ndlp);
a0f2d3ef
JS
4332 }
4333 }
de0c5b32 4334 }
a0f2d3ef 4335
ea2151b4
JS
4336 if ((new_state == NLP_STE_MAPPED_NODE) &&
4337 (vport->stat_data_enabled)) {
4338 /*
4339 * A new target is discovered, if there is no buffer for
4340 * statistical data collection allocate buffer.
4341 */
4342 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4343 sizeof(struct lpfc_scsicmd_bkt),
4344 GFP_KERNEL);
4345
4346 if (!ndlp->lat_data)
4347 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
4348 "0286 lpfc_nlp_state_cleanup failed to "
4349 "allocate statistical data buffer DID "
4350 "0x%x\n", ndlp->nlp_DID);
4351 }
858c9f6c 4352 /*
a0f2d3ef
JS
4353 * If the node just added to Mapped list was an FCP target,
4354 * but the remote port registration failed or assigned a target
4355 * id outside the presentable range - move the node to the
4356 * Unmapped List.
858c9f6c 4357 */
a0f2d3ef
JS
4358 if ((new_state == NLP_STE_MAPPED_NODE) &&
4359 (ndlp->nlp_type & NLP_FCP_TARGET) &&
de0c5b32
JS
4360 (!ndlp->rport ||
4361 ndlp->rport->scsi_target_id == -1 ||
4362 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
2e0fef85 4363 spin_lock_irq(shost->host_lock);
de0c5b32 4364 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
2e0fef85
JS
4365 spin_unlock_irq(shost->host_lock);
4366 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
dea3101e 4367 }
de0c5b32
JS
4368}
4369
685f0bf7
JS
4370static char *
4371lpfc_nlp_state_name(char *buffer, size_t size, int state)
4372{
4373 static char *states[] = {
4374 [NLP_STE_UNUSED_NODE] = "UNUSED",
4375 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
4376 [NLP_STE_ADISC_ISSUE] = "ADISC",
4377 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4378 [NLP_STE_PRLI_ISSUE] = "PRLI",
086a345f 4379 [NLP_STE_LOGO_ISSUE] = "LOGO",
685f0bf7
JS
4380 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4381 [NLP_STE_MAPPED_NODE] = "MAPPED",
4382 [NLP_STE_NPR_NODE] = "NPR",
4383 };
4384
311464ec 4385 if (state < NLP_STE_MAX_STATE && states[state])
685f0bf7
JS
4386 strlcpy(buffer, states[state], size);
4387 else
4388 snprintf(buffer, size, "unknown (%d)", state);
4389 return buffer;
4390}
4391
de0c5b32 4392void
2e0fef85
JS
4393lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4394 int state)
de0c5b32 4395{
2e0fef85 4396 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
de0c5b32 4397 int old_state = ndlp->nlp_state;
685f0bf7 4398 char name1[16], name2[16];
de0c5b32 4399
e8b62011
JS
4400 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4401 "0904 NPort state transition x%06x, %s -> %s\n",
4402 ndlp->nlp_DID,
4403 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4404 lpfc_nlp_state_name(name2, sizeof(name2), state));
858c9f6c
JS
4405
4406 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4407 "node statechg did:x%x old:%d ste:%d",
4408 ndlp->nlp_DID, old_state, state);
4409
de0c5b32 4410 if (old_state == NLP_STE_NPR_NODE &&
de0c5b32 4411 state != NLP_STE_NPR_NODE)
2e0fef85 4412 lpfc_cancel_retry_delay_tmo(vport, ndlp);
de0c5b32
JS
4413 if (old_state == NLP_STE_UNMAPPED_NODE) {
4414 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4415 ndlp->nlp_type &= ~NLP_FC_NODE;
4416 }
4417
685f0bf7 4418 if (list_empty(&ndlp->nlp_listp)) {
2e0fef85
JS
4419 spin_lock_irq(shost->host_lock);
4420 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4421 spin_unlock_irq(shost->host_lock);
685f0bf7 4422 } else if (old_state)
2e0fef85 4423 lpfc_nlp_counters(vport, old_state, -1);
de0c5b32
JS
4424
4425 ndlp->nlp_state = state;
2e0fef85
JS
4426 lpfc_nlp_counters(vport, state, 1);
4427 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
de0c5b32
JS
4428}
4429
e47c9093
JS
4430void
4431lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4432{
4433 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4434
4435 if (list_empty(&ndlp->nlp_listp)) {
4436 spin_lock_irq(shost->host_lock);
4437 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4438 spin_unlock_irq(shost->host_lock);
4439 }
4440}
4441
de0c5b32 4442void
2e0fef85 4443lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
de0c5b32 4444{
2e0fef85
JS
4445 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4446
0d2b6b83 4447 lpfc_cancel_retry_delay_tmo(vport, ndlp);
de0c5b32 4448 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
2e0fef85
JS
4449 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4450 spin_lock_irq(shost->host_lock);
685f0bf7 4451 list_del_init(&ndlp->nlp_listp);
2e0fef85 4452 spin_unlock_irq(shost->host_lock);
858c9f6c 4453 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
e47c9093
JS
4454 NLP_STE_UNUSED_NODE);
4455}
4456
4d9db01e 4457static void
e47c9093
JS
4458lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4459{
0d2b6b83 4460 lpfc_cancel_retry_delay_tmo(vport, ndlp);
e47c9093
JS
4461 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4462 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4463 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4464 NLP_STE_UNUSED_NODE);
4465}
109f6ed0 4466/**
3621a710 4467 * lpfc_initialize_node - Initialize all fields of node object
109f6ed0
JS
4468 * @vport: Pointer to Virtual Port object.
4469 * @ndlp: Pointer to FC node object.
4470 * @did: FC_ID of the node.
a257bf90
JS
4471 *
4472 * This function is always called when node object need to be initialized.
4473 * It initializes all the fields of the node object. Although the reference
4474 * to phba from @ndlp can be obtained indirectly through it's reference to
4475 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4476 * to the life-span of the @ndlp might go beyond the existence of @vport as
4477 * the final release of ndlp is determined by its reference count. And, the
4478 * operation on @ndlp needs the reference to phba.
109f6ed0
JS
4479 **/
4480static inline void
4481lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4482 uint32_t did)
4483{
4484 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4485 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
f22eb4d3 4486 timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
109f6ed0
JS
4487 ndlp->nlp_DID = did;
4488 ndlp->vport = vport;
a257bf90 4489 ndlp->phba = vport->phba;
109f6ed0 4490 ndlp->nlp_sid = NLP_NO_SID;
a0f2d3ef 4491 ndlp->nlp_fc4_type = NLP_FC4_NONE;
109f6ed0
JS
4492 kref_init(&ndlp->kref);
4493 NLP_INT_NODE_ACT(ndlp);
4494 atomic_set(&ndlp->cmd_pending, 0);
7dc517df 4495 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
dea16bda 4496 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
109f6ed0 4497}
e47c9093
JS
4498
4499struct lpfc_nodelist *
4500lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4501 int state)
4502{
4503 struct lpfc_hba *phba = vport->phba;
dea16bda 4504 uint32_t did, flag;
e47c9093 4505 unsigned long flags;
cff261f6 4506 unsigned long *active_rrqs_xri_bitmap = NULL;
9d3d340d 4507 int rpi = LPFC_RPI_ALLOC_ERROR;
dea16bda 4508 uint32_t defer_did = 0;
e47c9093
JS
4509
4510 if (!ndlp)
4511 return NULL;
4512
9d3d340d 4513 if (phba->sli_rev == LPFC_SLI_REV4) {
b95b2119
JS
4514 if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
4515 rpi = lpfc_sli4_alloc_rpi(vport->phba);
4516 else
4517 rpi = ndlp->nlp_rpi;
4518
4519 if (rpi == LPFC_RPI_ALLOC_ERROR) {
4520 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4521 "0359 %s: ndlp:x%px "
4522 "usgmap:x%x refcnt:%d FAILED RPI "
4523 " ALLOC\n",
4524 __func__,
4525 (void *)ndlp, ndlp->nlp_usg_map,
4526 kref_read(&ndlp->kref));
9d3d340d 4527 return NULL;
b95b2119 4528 }
9d3d340d
JS
4529 }
4530
e47c9093
JS
4531 spin_lock_irqsave(&phba->ndlp_lock, flags);
4532 /* The ndlp should not be in memory free mode */
4533 if (NLP_CHK_FREE_REQ(ndlp)) {
4534 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4535 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
32350664 4536 "0277 %s: ndlp:x%px "
e47c9093 4537 "usgmap:x%x refcnt:%d\n",
32350664 4538 __func__, (void *)ndlp, ndlp->nlp_usg_map,
2c935bc5 4539 kref_read(&ndlp->kref));
9d3d340d 4540 goto free_rpi;
e47c9093
JS
4541 }
4542 /* The ndlp should not already be in active mode */
4543 if (NLP_CHK_NODE_ACT(ndlp)) {
4544 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4545 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
32350664 4546 "0278 %s: ndlp:x%px "
e47c9093 4547 "usgmap:x%x refcnt:%d\n",
32350664 4548 __func__, (void *)ndlp, ndlp->nlp_usg_map,
2c935bc5 4549 kref_read(&ndlp->kref));
9d3d340d 4550 goto free_rpi;
e47c9093
JS
4551 }
4552
dea16bda 4553 /* First preserve the orginal DID, xri_bitmap and some flags */
e47c9093 4554 did = ndlp->nlp_DID;
dea16bda
JS
4555 flag = (ndlp->nlp_flag & NLP_UNREG_INP);
4556 if (flag & NLP_UNREG_INP)
4557 defer_did = ndlp->nlp_defer_did;
cff261f6
JS
4558 if (phba->sli_rev == LPFC_SLI_REV4)
4559 active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
e47c9093 4560
dea16bda 4561 /* Zero ndlp except of ndlp linked list pointer */
e47c9093
JS
4562 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4563 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
e47c9093 4564
dea16bda
JS
4565 /* Next reinitialize and restore saved objects */
4566 lpfc_initialize_node(vport, ndlp, did);
4567 ndlp->nlp_flag |= flag;
4568 if (flag & NLP_UNREG_INP)
4569 ndlp->nlp_defer_did = defer_did;
cff261f6
JS
4570 if (phba->sli_rev == LPFC_SLI_REV4)
4571 ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
4572
e47c9093 4573 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
be6bb941 4574 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
9d3d340d 4575 ndlp->nlp_rpi = rpi;
be6bb941
JS
4576 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4577 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
32350664 4578 "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
be6bb941 4579 ndlp->nlp_flag,
2c935bc5 4580 kref_read(&ndlp->kref),
be6bb941
JS
4581 ndlp->nlp_usg_map, ndlp);
4582 }
725dd399 4583
e47c9093
JS
4584
4585 if (state != NLP_STE_UNUSED_NODE)
4586 lpfc_nlp_set_state(vport, ndlp, state);
b95b2119
JS
4587 else
4588 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4589 "0013 rpi:%x DID:%x flg:%x refcnt:%d "
4590 "map:%x x%px STATE=UNUSED\n",
4591 ndlp->nlp_rpi, ndlp->nlp_DID,
4592 ndlp->nlp_flag,
4593 kref_read(&ndlp->kref),
4594 ndlp->nlp_usg_map, ndlp);
e47c9093
JS
4595
4596 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4597 "node enable: did:x%x",
4598 ndlp->nlp_DID, 0, 0);
4599 return ndlp;
9d3d340d
JS
4600
4601free_rpi:
0f154226 4602 if (phba->sli_rev == LPFC_SLI_REV4) {
9d3d340d 4603 lpfc_sli4_free_rpi(vport->phba, rpi);
0f154226
JS
4604 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4605 }
9d3d340d 4606 return NULL;
de0c5b32
JS
4607}
4608
4609void
2e0fef85 4610lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
de0c5b32 4611{
87af33fe 4612 /*
fa4066b6 4613 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
87af33fe 4614 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
fa4066b6
JS
4615 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4616 * until ALL other outstanding threads have completed. We check
4617 * that the ndlp not already in the UNUSED state before we proceed.
87af33fe 4618 */
fa4066b6
JS
4619 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4620 return;
51ef4c26 4621 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
be6bb941 4622 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
1151e3ec 4623 lpfc_cleanup_vports_rrqs(vport, ndlp);
be6bb941 4624 lpfc_unreg_rpi(vport, ndlp);
be6bb941 4625 }
0290217a
JS
4626
4627 lpfc_nlp_put(ndlp);
98c9ea5c 4628 return;
dea3101e 4629}
4630
4631/*
4632 * Start / ReStart rescue timer for Discovery / RSCN handling
4633 */
4634void
2e0fef85 4635lpfc_set_disctmo(struct lpfc_vport *vport)
dea3101e 4636{
2e0fef85
JS
4637 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4638 struct lpfc_hba *phba = vport->phba;
dea3101e 4639 uint32_t tmo;
4640
2e0fef85 4641 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
025dfdaf 4642 /* For FAN, timeout should be greater than edtov */
c9f8735b
JW
4643 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4644 } else {
025dfdaf 4645 /* Normal discovery timeout should be > than ELS/CT timeout
c9f8735b
JW
4646 * FC spec states we need 3 * ratov for CT requests
4647 */
4648 tmo = ((phba->fc_ratov * 3) + 3);
4649 }
dea3101e 4650
858c9f6c
JS
4651
4652 if (!timer_pending(&vport->fc_disctmo)) {
4653 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4654 "set disc timer: tmo:x%x state:x%x flg:x%x",
4655 tmo, vport->port_state, vport->fc_flag);
4656 }
4657
256ec0d0 4658 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
2e0fef85
JS
4659 spin_lock_irq(shost->host_lock);
4660 vport->fc_flag |= FC_DISC_TMO;
4661 spin_unlock_irq(shost->host_lock);
dea3101e 4662
4663 /* Start Discovery Timer state <hba_state> */
e8b62011
JS
4664 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4665 "0247 Start Discovery Timer state x%x "
4666 "Data: x%x x%lx x%x x%x\n",
4667 vport->port_state, tmo,
4668 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
4669 vport->fc_adisc_cnt);
dea3101e 4670
4671 return;
4672}
4673
4674/*
4675 * Cancel rescue timer for Discovery / RSCN handling
4676 */
4677int
2e0fef85 4678lpfc_can_disctmo(struct lpfc_vport *vport)
dea3101e 4679{
2e0fef85 4680 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2e0fef85
JS
4681 unsigned long iflags;
4682
858c9f6c
JS
4683 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4684 "can disc timer: state:x%x rtry:x%x flg:x%x",
4685 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
4686
dea3101e 4687 /* Turn off discovery timer if its running */
2e0fef85
JS
4688 if (vport->fc_flag & FC_DISC_TMO) {
4689 spin_lock_irqsave(shost->host_lock, iflags);
4690 vport->fc_flag &= ~FC_DISC_TMO;
4691 spin_unlock_irqrestore(shost->host_lock, iflags);
4692 del_timer_sync(&vport->fc_disctmo);
4693 spin_lock_irqsave(&vport->work_port_lock, iflags);
4694 vport->work_port_events &= ~WORKER_DISC_TMO;
4695 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
dea3101e 4696 }
4697
4698 /* Cancel Discovery Timer state <hba_state> */
e8b62011
JS
4699 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4700 "0248 Cancel Discovery Timer state x%x "
4701 "Data: x%x x%x x%x\n",
4702 vport->port_state, vport->fc_flag,
4703 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
2fe165b6 4704 return 0;
dea3101e 4705}
4706
4707/*
4708 * Check specified ring for outstanding IOCB on the SLI queue
4709 * Return true if iocb matches the specified nport
4710 */
4711int
2e0fef85
JS
4712lpfc_check_sli_ndlp(struct lpfc_hba *phba,
4713 struct lpfc_sli_ring *pring,
4714 struct lpfc_iocbq *iocb,
4715 struct lpfc_nodelist *ndlp)
dea3101e 4716{
2e0fef85 4717 IOCB_t *icmd = &iocb->iocb;
92d7f7b0
JS
4718 struct lpfc_vport *vport = ndlp->vport;
4719
4720 if (iocb->vport != vport)
4721 return 0;
4722
dea3101e 4723 if (pring->ringno == LPFC_ELS_RING) {
4724 switch (icmd->ulpCommand) {
4725 case CMD_GEN_REQUEST64_CR:
21e9a0a5 4726 if (iocb->context_un.ndlp == ndlp)
2fe165b6 4727 return 1;
cd05c155 4728 /* fall through */
dea3101e 4729 case CMD_ELS_REQUEST64_CR:
10d4e957
JS
4730 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4731 return 1;
cd05c155 4732 /* fall through */
dea3101e 4733 case CMD_XMIT_ELS_RSP64_CX:
4734 if (iocb->context1 == (uint8_t *) ndlp)
2fe165b6 4735 return 1;
cd05c155 4736 /* fall through */
dea3101e 4737 }
895427bd 4738 } else if (pring->ringno == LPFC_FCP_RING) {
dea3101e 4739 /* Skip match check if waiting to relogin to FCP target */
4740 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
92d7f7b0 4741 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
2fe165b6 4742 return 0;
dea3101e 4743 }
4744 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
2fe165b6 4745 return 1;
dea3101e 4746 }
dea3101e 4747 }
2fe165b6 4748 return 0;
dea3101e 4749}
4750
895427bd
JS
4751static void
4752__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
4753 struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
4754 struct list_head *dequeue_list)
4755{
4756 struct lpfc_iocbq *iocb, *next_iocb;
4757
4758 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
4759 /* Check to see if iocb matches the nport */
4760 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
4761 /* match, dequeue */
4762 list_move_tail(&iocb->list, dequeue_list);
4763 }
4764}
4765
4766static void
4767lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
4768 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4769{
4770 struct lpfc_sli *psli = &phba->sli;
4771 uint32_t i;
4772
4773 spin_lock_irq(&phba->hbalock);
4774 for (i = 0; i < psli->num_rings; i++)
4775 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
4776 dequeue_list);
4777 spin_unlock_irq(&phba->hbalock);
4778}
4779
4780static void
4781lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
4782 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4783{
4784 struct lpfc_sli_ring *pring;
4785 struct lpfc_queue *qp = NULL;
4786
4787 spin_lock_irq(&phba->hbalock);
4788 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
4789 pring = qp->pring;
4790 if (!pring)
4791 continue;
2ade92ae 4792 spin_lock(&pring->ring_lock);
895427bd 4793 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
2ade92ae 4794 spin_unlock(&pring->ring_lock);
895427bd
JS
4795 }
4796 spin_unlock_irq(&phba->hbalock);
4797}
4798
dea3101e 4799/*
4800 * Free resources / clean up outstanding I/Os
4801 * associated with nlp_rpi in the LPFC_NODELIST entry.
4802 */
4803static int
2e0fef85 4804lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 4805{
2534ba75 4806 LIST_HEAD(completions);
dea3101e 4807
92d7f7b0
JS
4808 lpfc_fabric_abort_nport(ndlp);
4809
dea3101e 4810 /*
4811 * Everything that matches on txcmplq will be returned
4812 * by firmware with a no rpi error.
4813 */
4042629e 4814 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
895427bd
JS
4815 if (phba->sli_rev != LPFC_SLI_REV4)
4816 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
4817 else
4818 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
dea3101e 4819 }
2534ba75 4820
a257bf90
JS
4821 /* Cancel all the IOCBs from the completions list */
4822 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4823 IOERR_SLI_ABORTED);
2534ba75 4824
2fe165b6 4825 return 0;
dea3101e 4826}
4827
086a345f
JS
4828/**
4829 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4830 * @phba: Pointer to HBA context object.
4831 * @pmb: Pointer to mailbox object.
4832 *
4833 * This function will issue an ELS LOGO command after completing
4834 * the UNREG_RPI.
4835 **/
b86a6756 4836static void
086a345f
JS
4837lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4838{
4839 struct lpfc_vport *vport = pmb->vport;
4840 struct lpfc_nodelist *ndlp;
4841
3e1f0718 4842 ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
086a345f
JS
4843 if (!ndlp)
4844 return;
4845 lpfc_issue_els_logo(vport, ndlp, 0);
4aa74c3c 4846 mempool_free(pmb, phba->mbox_mem_pool);
dea16bda
JS
4847
4848 /* Check to see if there are any deferred events to process */
4849 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
4850 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
4851 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4852 "1434 UNREG cmpl deferred logo x%x "
32350664 4853 "on NPort x%x Data: x%x x%px\n",
dea16bda
JS
4854 ndlp->nlp_rpi, ndlp->nlp_DID,
4855 ndlp->nlp_defer_did, ndlp);
4856
4857 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4858 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4859 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
00292e03 4860 } else {
4f1a2fef
JS
4861 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
4862 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
4863 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
0f154226 4864 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4f1a2fef 4865 }
00292e03 4866 ndlp->nlp_flag &= ~NLP_UNREG_INP;
dea16bda 4867 }
086a345f
JS
4868}
4869
97acd001
JS
4870/*
4871 * Sets the mailbox completion handler to be used for the
4872 * unreg_rpi command. The handler varies based on the state of
4873 * the port and what will be happening to the rpi next.
4874 */
4875static void
4876lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
4877 struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
4878{
4879 unsigned long iflags;
4880
4881 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4882 mbox->ctx_ndlp = ndlp;
4883 mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4884
4885 } else if (phba->sli_rev == LPFC_SLI_REV4 &&
4886 (!(vport->load_flag & FC_UNLOADING)) &&
4887 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
4888 LPFC_SLI_INTF_IF_TYPE_2) &&
4889 (kref_read(&ndlp->kref) > 0)) {
4890 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4891 mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
4892 } else {
4893 if (vport->load_flag & FC_UNLOADING) {
4894 if (phba->sli_rev == LPFC_SLI_REV4) {
4895 spin_lock_irqsave(&vport->phba->ndlp_lock,
4896 iflags);
4897 ndlp->nlp_flag |= NLP_RELEASE_RPI;
4898 spin_unlock_irqrestore(&vport->phba->ndlp_lock,
4899 iflags);
4900 }
4901 lpfc_nlp_get(ndlp);
4902 }
4903 mbox->ctx_ndlp = ndlp;
4904 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4905 }
4906}
4907
dea3101e 4908/*
4909 * Free rpi associated with LPFC_NODELIST entry.
4910 * This routine is called from lpfc_freenode(), when we are removing
4911 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4912 * LOGO that completes successfully, and we are waiting to PLOGI back
4913 * to the remote NPort. In addition, it is called after we receive
4914 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4915 * we are waiting to PLOGI back to the remote NPort.
4916 */
4917int
2e0fef85 4918lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 4919{
2e0fef85
JS
4920 struct lpfc_hba *phba = vport->phba;
4921 LPFC_MBOXQ_t *mbox;
7c5e518c 4922 int rc, acc_plogi = 1;
6d368e53 4923 uint16_t rpi;
dea3101e 4924
f454a9ac
JS
4925 if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4926 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
4927 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
0f154226
JS
4928 lpfc_printf_vlog(vport, KERN_INFO,
4929 LOG_NODE | LOG_DISCOVERY,
f454a9ac
JS
4930 "3366 RPI x%x needs to be "
4931 "unregistered nlp_flag x%x "
4932 "did x%x\n",
4933 ndlp->nlp_rpi, ndlp->nlp_flag,
4934 ndlp->nlp_DID);
dea16bda
JS
4935
4936 /* If there is already an UNREG in progress for this ndlp,
4937 * no need to queue up another one.
4938 */
4939 if (ndlp->nlp_flag & NLP_UNREG_INP) {
0f154226
JS
4940 lpfc_printf_vlog(vport, KERN_INFO,
4941 LOG_NODE | LOG_DISCOVERY,
dea16bda
JS
4942 "1436 unreg_rpi SKIP UNREG x%x on "
4943 "NPort x%x deferred x%x flg x%x "
32350664 4944 "Data: x%px\n",
dea16bda
JS
4945 ndlp->nlp_rpi, ndlp->nlp_DID,
4946 ndlp->nlp_defer_did,
4947 ndlp->nlp_flag, ndlp);
4948 goto out;
4949 }
4950
2e0fef85
JS
4951 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4952 if (mbox) {
6d368e53
JS
4953 /* SLI4 ports require the physical rpi value. */
4954 rpi = ndlp->nlp_rpi;
4955 if (phba->sli_rev == LPFC_SLI_REV4)
4956 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
086a345f 4957
6d368e53 4958 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
ed957684 4959 mbox->vport = vport;
97acd001
JS
4960 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
4961 if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
4962 /*
4963 * accept PLOGIs after unreg_rpi_cmpl
4964 */
4965 acc_plogi = 0;
dea16bda
JS
4966 if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
4967 Fabric_DID_MASK) &&
4968 (!(vport->fc_flag & FC_OFFLINE_MODE)))
4969 ndlp->nlp_flag |= NLP_UNREG_INP;
4970
0f154226
JS
4971 lpfc_printf_vlog(vport, KERN_INFO,
4972 LOG_NODE | LOG_DISCOVERY,
dea16bda 4973 "1433 unreg_rpi UNREG x%x on "
32350664
JS
4974 "NPort x%x deferred flg x%x "
4975 "Data:x%px\n",
dea16bda
JS
4976 ndlp->nlp_rpi, ndlp->nlp_DID,
4977 ndlp->nlp_flag, ndlp);
086a345f 4978
0b727fea 4979 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7c5e518c 4980 if (rc == MBX_NOT_FINISHED) {
2e0fef85 4981 mempool_free(mbox, phba->mbox_mem_pool);
7c5e518c
JS
4982 acc_plogi = 1;
4983 }
dea3101e 4984 }
dea3101e 4985 lpfc_no_rpi(phba, ndlp);
dea16bda 4986out:
4042629e
JS
4987 if (phba->sli_rev != LPFC_SLI_REV4)
4988 ndlp->nlp_rpi = 0;
4989 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
0c287589 4990 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
7c5e518c
JS
4991 if (acc_plogi)
4992 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
dea3101e 4993 return 1;
4994 }
7c5e518c 4995 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
dea3101e 4996 return 0;
4997}
4998
ecfd03c6
JS
4999/**
5000 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
5001 * @phba: pointer to lpfc hba data structure.
5002 *
5003 * This routine is invoked to unregister all the currently registered RPIs
5004 * to the HBA.
5005 **/
5006void
5007lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
5008{
5009 struct lpfc_vport **vports;
5010 struct lpfc_nodelist *ndlp;
5011 struct Scsi_Host *shost;
5012 int i;
5013
5014 vports = lpfc_create_vport_work_array(phba);
63e801ce
JS
5015 if (!vports) {
5016 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
5017 "2884 Vport array allocation failed \n");
5018 return;
5019 }
ecfd03c6
JS
5020 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5021 shost = lpfc_shost_from_vport(vports[i]);
5022 spin_lock_irq(shost->host_lock);
5023 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4042629e 5024 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
0c9ab6f5
JS
5025 /* The mempool_alloc might sleep */
5026 spin_unlock_irq(shost->host_lock);
ecfd03c6 5027 lpfc_unreg_rpi(vports[i], ndlp);
0c9ab6f5
JS
5028 spin_lock_irq(shost->host_lock);
5029 }
ecfd03c6
JS
5030 }
5031 spin_unlock_irq(shost->host_lock);
5032 }
5033 lpfc_destroy_vport_work_array(phba, vports);
5034}
5035
92d7f7b0
JS
5036void
5037lpfc_unreg_all_rpis(struct lpfc_vport *vport)
5038{
5039 struct lpfc_hba *phba = vport->phba;
5040 LPFC_MBOXQ_t *mbox;
5041 int rc;
5042
5af5eee7
JS
5043 if (phba->sli_rev == LPFC_SLI_REV4) {
5044 lpfc_sli4_unreg_all_rpis(vport);
5045 return;
5046 }
5047
92d7f7b0
JS
5048 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5049 if (mbox) {
6d368e53
JS
5050 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
5051 mbox);
92d7f7b0
JS
5052 mbox->vport = vport;
5053 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718 5054 mbox->ctx_ndlp = NULL;
09372820 5055 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
a257bf90 5056 if (rc != MBX_TIMEOUT)
92d7f7b0 5057 mempool_free(mbox, phba->mbox_mem_pool);
a257bf90
JS
5058
5059 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5060 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
5061 "1836 Could not issue "
5062 "unreg_login(all_rpis) status %d\n", rc);
92d7f7b0
JS
5063 }
5064}
5065
5066void
5067lpfc_unreg_default_rpis(struct lpfc_vport *vport)
5068{
5069 struct lpfc_hba *phba = vport->phba;
5070 LPFC_MBOXQ_t *mbox;
5071 int rc;
5072
c95a3b4b
JS
5073 /* Unreg DID is an SLI3 operation. */
5074 if (phba->sli_rev > LPFC_SLI_REV3)
5075 return;
5076
92d7f7b0
JS
5077 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5078 if (mbox) {
6d368e53
JS
5079 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
5080 mbox);
92d7f7b0
JS
5081 mbox->vport = vport;
5082 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3e1f0718 5083 mbox->ctx_ndlp = NULL;
09372820 5084 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
a257bf90
JS
5085 if (rc != MBX_TIMEOUT)
5086 mempool_free(mbox, phba->mbox_mem_pool);
5087
5088 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
e8b62011
JS
5089 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
5090 "1815 Could not issue "
a257bf90
JS
5091 "unreg_did (default rpis) status %d\n",
5092 rc);
92d7f7b0
JS
5093 }
5094}
5095
dea3101e 5096/*
5097 * Free resources associated with LPFC_NODELIST entry
5098 * so it can be freed.
5099 */
5100static int
2e0fef85 5101lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 5102{
2e0fef85
JS
5103 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5104 struct lpfc_hba *phba = vport->phba;
5105 LPFC_MBOXQ_t *mb, *nextmb;
dea3101e 5106 struct lpfc_dmabuf *mp;
97acd001 5107 unsigned long iflags;
dea3101e 5108
5109 /* Cleanup node for NPort <nlp_DID> */
e8b62011
JS
5110 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5111 "0900 Cleanup node for NPort x%x "
5112 "Data: x%x x%x x%x\n",
5113 ndlp->nlp_DID, ndlp->nlp_flag,
5114 ndlp->nlp_state, ndlp->nlp_rpi);
e47c9093
JS
5115 if (NLP_CHK_FREE_REQ(ndlp)) {
5116 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
32350664 5117 "0280 %s: ndlp:x%px "
e47c9093 5118 "usgmap:x%x refcnt:%d\n",
32350664 5119 __func__, (void *)ndlp, ndlp->nlp_usg_map,
2c935bc5 5120 kref_read(&ndlp->kref));
e47c9093
JS
5121 lpfc_dequeue_node(vport, ndlp);
5122 } else {
5123 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
32350664 5124 "0281 %s: ndlp:x%px "
e47c9093 5125 "usgmap:x%x refcnt:%d\n",
32350664 5126 __func__, (void *)ndlp, ndlp->nlp_usg_map,
2c935bc5 5127 kref_read(&ndlp->kref));
e47c9093
JS
5128 lpfc_disable_node(vport, ndlp);
5129 }
dea3101e 5130
086a345f
JS
5131
5132 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
5133
dea3101e 5134 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
5135 if ((mb = phba->sli.mbox_active)) {
04c68496 5136 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
086a345f 5137 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
3e1f0718
JS
5138 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5139 mb->ctx_ndlp = NULL;
dea3101e 5140 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5141 }
5142 }
33ccf8d1 5143
2e0fef85 5144 spin_lock_irq(&phba->hbalock);
5ac6b303
JS
5145 /* Cleanup REG_LOGIN completions which are not yet processed */
5146 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
5147 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
086a345f 5148 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
3e1f0718 5149 (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
5ac6b303
JS
5150 continue;
5151
3e1f0718 5152 mb->ctx_ndlp = NULL;
5ac6b303
JS
5153 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5154 }
5155
dea3101e 5156 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
04c68496 5157 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
086a345f 5158 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
3e1f0718
JS
5159 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5160 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
dea3101e 5161 if (mp) {
2e0fef85 5162 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e 5163 kfree(mp);
5164 }
5165 list_del(&mb->list);
5166 mempool_free(mb, phba->mbox_mem_pool);
e47c9093
JS
5167 /* We shall not invoke the lpfc_nlp_put to decrement
5168 * the ndlp reference count as we are in the process
5169 * of lpfc_nlp_release.
5170 */
dea3101e 5171 }
5172 }
2e0fef85 5173 spin_unlock_irq(&phba->hbalock);
dea3101e 5174
e47c9093
JS
5175 lpfc_els_abort(phba, ndlp);
5176
2e0fef85 5177 spin_lock_irq(shost->host_lock);
c01f3208 5178 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2e0fef85 5179 spin_unlock_irq(shost->host_lock);
dea3101e 5180
5024ab17 5181 ndlp->nlp_last_elscmd = 0;
dea3101e 5182 del_timer_sync(&ndlp->nlp_delayfunc);
5183
0d2b6b83
JS
5184 list_del_init(&ndlp->els_retry_evt.evt_listp);
5185 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1151e3ec 5186 lpfc_cleanup_vports_rrqs(vport, ndlp);
4f1a2fef
JS
5187 if (phba->sli_rev == LPFC_SLI_REV4)
5188 ndlp->nlp_flag |= NLP_RELEASE_RPI;
97acd001
JS
5189 if (!lpfc_unreg_rpi(vport, ndlp)) {
5190 /* Clean up unregistered and non freed rpis */
5191 if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
5192 !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
5193 lpfc_sli4_free_rpi(vport->phba,
5194 ndlp->nlp_rpi);
5195 spin_lock_irqsave(&vport->phba->ndlp_lock,
5196 iflags);
5197 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5198 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5199 spin_unlock_irqrestore(&vport->phba->ndlp_lock,
5200 iflags);
5201 }
5202 }
2fe165b6 5203 return 0;
dea3101e 5204}
5205
5206/*
5207 * Check to see if we can free the nlp back to the freelist.
5208 * If we are in the middle of using the nlp in the discovery state
5209 * machine, defer the free till we reach the end of the state machine.
5210 */
329f9bc7 5211static void
2e0fef85 5212lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
dea3101e 5213{
a8adb832 5214 struct lpfc_hba *phba = vport->phba;
1dcb58e5 5215 struct lpfc_rport_data *rdata;
466e840b 5216 struct fc_rport *rport;
a8adb832
JS
5217 LPFC_MBOXQ_t *mbox;
5218 int rc;
dea3101e 5219
0d2b6b83 5220 lpfc_cancel_retry_delay_tmo(vport, ndlp);
21e9a0a5 5221 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4042629e 5222 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
e8bcf0ae
DK
5223 !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
5224 phba->sli_rev != LPFC_SLI_REV4) {
a8adb832
JS
5225 /* For this case we need to cleanup the default rpi
5226 * allocated by the firmware.
5227 */
0f154226
JS
5228 lpfc_printf_vlog(vport, KERN_INFO,
5229 LOG_NODE | LOG_DISCOVERY,
5230 "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
5231 "ref %d map:x%x ndlp x%px\n",
be6bb941 5232 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
2c935bc5 5233 kref_read(&ndlp->kref),
be6bb941 5234 ndlp->nlp_usg_map, ndlp);
a8adb832
JS
5235 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
5236 != NULL) {
21e9a0a5 5237 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
4042629e 5238 (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
a8adb832
JS
5239 if (rc) {
5240 mempool_free(mbox, phba->mbox_mem_pool);
5241 }
5242 else {
5243 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5244 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5245 mbox->vport = vport;
3e1f0718 5246 mbox->ctx_ndlp = ndlp;
a8adb832
JS
5247 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5248 if (rc == MBX_NOT_FINISHED) {
5249 mempool_free(mbox, phba->mbox_mem_pool);
5250 }
5251 }
5252 }
5253 }
2e0fef85 5254 lpfc_cleanup_node(vport, ndlp);
1dcb58e5 5255
2e0fef85 5256 /*
466e840b
JS
5257 * ndlp->rport must be set to NULL before it reaches here
5258 * i.e. break rport/node link before doing lpfc_nlp_put for
5259 * registered rport and then drop the reference of rport.
2e0fef85 5260 */
92d7f7b0 5261 if (ndlp->rport) {
466e840b
JS
5262 /*
5263 * extra lpfc_nlp_put dropped the reference of ndlp
5264 * for registered rport so need to cleanup rport
5265 */
5266 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
32350664 5267 "0940 removed node x%px DID x%x "
0f154226
JS
5268 "rpi %d rport not null x%px\n",
5269 ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
5270 ndlp->rport);
466e840b
JS
5271 rport = ndlp->rport;
5272 rdata = rport->dd_data;
329f9bc7
JS
5273 rdata->pnode = NULL;
5274 ndlp->rport = NULL;
466e840b 5275 put_device(&rport->dev);
dea3101e 5276 }
dea3101e 5277}
5278
5279static int
2e0fef85
JS
5280lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5281 uint32_t did)
dea3101e 5282{
2e0fef85 5283 D_ID mydid, ndlpdid, matchdid;
dea3101e 5284
5285 if (did == Bcast_DID)
2fe165b6 5286 return 0;
dea3101e 5287
dea3101e 5288 /* First check for Direct match */
5289 if (ndlp->nlp_DID == did)
2fe165b6 5290 return 1;
dea3101e 5291
5292 /* Next check for area/domain identically equals 0 match */
2e0fef85 5293 mydid.un.word = vport->fc_myDID;
dea3101e 5294 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2fe165b6 5295 return 0;
dea3101e 5296 }
5297
5298 matchdid.un.word = did;
5299 ndlpdid.un.word = ndlp->nlp_DID;
5300 if (matchdid.un.b.id == ndlpdid.un.b.id) {
5301 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
5302 (mydid.un.b.area == matchdid.un.b.area)) {
6b337e03
JS
5303 /* This code is supposed to match the ID
5304 * for a private loop device that is
5305 * connect to fl_port. But we need to
5306 * check that the port did not just go
5307 * from pt2pt to fabric or we could end
5308 * up matching ndlp->nlp_DID 000001 to
5309 * fabric DID 0x20101
5310 */
dea3101e 5311 if ((ndlpdid.un.b.domain == 0) &&
5312 (ndlpdid.un.b.area == 0)) {
6b337e03
JS
5313 if (ndlpdid.un.b.id &&
5314 vport->phba->fc_topology ==
5315 LPFC_TOPOLOGY_LOOP)
2fe165b6 5316 return 1;
dea3101e 5317 }
2fe165b6 5318 return 0;
dea3101e 5319 }
5320
5321 matchdid.un.word = ndlp->nlp_DID;
5322 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
5323 (mydid.un.b.area == ndlpdid.un.b.area)) {
5324 if ((matchdid.un.b.domain == 0) &&
5325 (matchdid.un.b.area == 0)) {
5326 if (matchdid.un.b.id)
2fe165b6 5327 return 1;
dea3101e 5328 }
5329 }
5330 }
2fe165b6 5331 return 0;
dea3101e 5332}
5333
685f0bf7 5334/* Search for a nodelist entry */
2e0fef85
JS
5335static struct lpfc_nodelist *
5336__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
dea3101e 5337{
2fb9bd8b 5338 struct lpfc_nodelist *ndlp;
dea3101e 5339 uint32_t data1;
5340
2e0fef85
JS
5341 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5342 if (lpfc_matchdid(vport, ndlp, did)) {
b95b2119
JS
5343 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5344 ((uint32_t)ndlp->nlp_xri << 16) |
5345 ((uint32_t)ndlp->nlp_type << 8) |
5346 ((uint32_t)ndlp->nlp_usg_map & 0xff));
e8b62011
JS
5347 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5348 "0929 FIND node DID "
b95b2119 5349 "Data: x%px x%x x%x x%x x%x x%px\n",
e8b62011 5350 ndlp, ndlp->nlp_DID,
b95b2119 5351 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
cff261f6 5352 ndlp->active_rrqs_xri_bitmap);
685f0bf7 5353 return ndlp;
dea3101e 5354 }
5355 }
66a9ed66 5356
dea3101e 5357 /* FIND node did <did> NOT FOUND */
e8b62011
JS
5358 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5359 "0932 FIND node did x%x NOT FOUND.\n", did);
dea3101e 5360 return NULL;
5361}
5362
5363struct lpfc_nodelist *
2e0fef85
JS
5364lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5365{
5366 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5367 struct lpfc_nodelist *ndlp;
fedd3b7b 5368 unsigned long iflags;
2e0fef85 5369
fedd3b7b 5370 spin_lock_irqsave(shost->host_lock, iflags);
2e0fef85 5371 ndlp = __lpfc_findnode_did(vport, did);
fedd3b7b 5372 spin_unlock_irqrestore(shost->host_lock, iflags);
2e0fef85
JS
5373 return ndlp;
5374}
5375
f60cb93b
JS
5376struct lpfc_nodelist *
5377lpfc_findnode_mapped(struct lpfc_vport *vport)
5378{
5379 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5380 struct lpfc_nodelist *ndlp;
5381 uint32_t data1;
5382 unsigned long iflags;
5383
5384 spin_lock_irqsave(shost->host_lock, iflags);
5385
5386 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5387 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
5388 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
5389 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5390 ((uint32_t)ndlp->nlp_xri << 16) |
5391 ((uint32_t)ndlp->nlp_type << 8) |
5392 ((uint32_t)ndlp->nlp_rpi & 0xff));
5393 spin_unlock_irqrestore(shost->host_lock, iflags);
5394 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5395 "2025 FIND node DID "
32350664 5396 "Data: x%px x%x x%x x%x x%px\n",
f60cb93b
JS
5397 ndlp, ndlp->nlp_DID,
5398 ndlp->nlp_flag, data1,
5399 ndlp->active_rrqs_xri_bitmap);
5400 return ndlp;
5401 }
5402 }
5403 spin_unlock_irqrestore(shost->host_lock, iflags);
5404
5405 /* FIND node did <did> NOT FOUND */
5406 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5407 "2026 FIND mapped did NOT FOUND.\n");
5408 return NULL;
5409}
5410
2e0fef85
JS
5411struct lpfc_nodelist *
5412lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
dea3101e 5413{
2e0fef85 5414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 5415 struct lpfc_nodelist *ndlp;
dea3101e 5416
2e0fef85 5417 ndlp = lpfc_findnode_did(vport, did);
c9f8735b 5418 if (!ndlp) {
1c5b12f7
JS
5419 if (vport->phba->nvmet_support)
5420 return NULL;
2e0fef85
JS
5421 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
5422 lpfc_rscn_payload_check(vport, did) == 0)
dea3101e 5423 return NULL;
9d3d340d 5424 ndlp = lpfc_nlp_init(vport, did);
dea3101e 5425 if (!ndlp)
5426 return NULL;
2e0fef85 5427 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
b4b3417c
JS
5428
5429 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5430 "6453 Setup New Node 2B_DISC x%x "
5431 "Data:x%x x%x x%x\n",
5432 ndlp->nlp_DID, ndlp->nlp_flag,
5433 ndlp->nlp_state, vport->fc_flag);
5434
2e0fef85 5435 spin_lock_irq(shost->host_lock);
dea3101e 5436 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 5437 spin_unlock_irq(shost->host_lock);
dea3101e 5438 return ndlp;
e47c9093 5439 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1c5b12f7
JS
5440 if (vport->phba->nvmet_support)
5441 return NULL;
e47c9093 5442 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
b95b2119
JS
5443 if (!ndlp) {
5444 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
5445 "0014 Could not enable ndlp\n");
e47c9093 5446 return NULL;
b95b2119 5447 }
b4b3417c
JS
5448 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5449 "6454 Setup Enabled Node 2B_DISC x%x "
5450 "Data:x%x x%x x%x\n",
5451 ndlp->nlp_DID, ndlp->nlp_flag,
5452 ndlp->nlp_state, vport->fc_flag);
5453
e47c9093
JS
5454 spin_lock_irq(shost->host_lock);
5455 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5456 spin_unlock_irq(shost->host_lock);
5457 return ndlp;
dea3101e 5458 }
e47c9093 5459
1c5b12f7
JS
5460 /* The NVME Target does not want to actively manage an rport.
5461 * The goal is to allow the target to reset its state and clear
5462 * pending IO in preparation for the initiator to recover.
5463 */
58da1ffb
JS
5464 if ((vport->fc_flag & FC_RSCN_MODE) &&
5465 !(vport->fc_flag & FC_NDISC_ACTIVE)) {
2e0fef85 5466 if (lpfc_rscn_payload_check(vport, did)) {
87af33fe 5467
c9f8735b
JW
5468 /* Since this node is marked for discovery,
5469 * delay timeout is not needed.
5470 */
0d2b6b83 5471 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1c5b12f7 5472
b4b3417c
JS
5473 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5474 "6455 Setup RSCN Node 2B_DISC x%x "
5475 "Data:x%x x%x x%x\n",
5476 ndlp->nlp_DID, ndlp->nlp_flag,
5477 ndlp->nlp_state, vport->fc_flag);
5478
1c5b12f7
JS
5479 /* NVME Target mode waits until rport is known to be
5480 * impacted by the RSCN before it transitions. No
5481 * active management - just go to NPR provided the
5482 * node had a valid login.
5483 */
8c258641
JS
5484 if (vport->phba->nvmet_support)
5485 return ndlp;
1c5b12f7
JS
5486
5487 /* If we've already received a PLOGI from this NPort
5488 * we don't need to try to discover it again.
5489 */
3f97aed6
JS
5490 if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
5491 !(ndlp->nlp_type &
5492 (NLP_FCP_TARGET | NLP_NVME_TARGET)))
1c5b12f7
JS
5493 return NULL;
5494
3f97aed6
JS
5495 ndlp->nlp_prev_state = ndlp->nlp_state;
5496 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5497
a257bf90
JS
5498 spin_lock_irq(shost->host_lock);
5499 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5500 spin_unlock_irq(shost->host_lock);
b4b3417c
JS
5501 } else {
5502 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5503 "6456 Skip Setup RSCN Node x%x "
5504 "Data:x%x x%x x%x\n",
5505 ndlp->nlp_DID, ndlp->nlp_flag,
5506 ndlp->nlp_state, vport->fc_flag);
dea3101e 5507 ndlp = NULL;
b4b3417c 5508 }
2fe165b6 5509 } else {
b4b3417c
JS
5510 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5511 "6457 Setup Active Node 2B_DISC x%x "
5512 "Data:x%x x%x x%x\n",
5513 ndlp->nlp_DID, ndlp->nlp_flag,
5514 ndlp->nlp_state, vport->fc_flag);
5515
1c5b12f7
JS
5516 /* If the initiator received a PLOGI from this NPort or if the
5517 * initiator is already in the process of discovery on it,
5518 * there's no need to try to discover it again.
87af33fe 5519 */
685f0bf7 5520 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
87af33fe 5521 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
1c5b12f7
JS
5522 (!vport->phba->nvmet_support &&
5523 ndlp->nlp_flag & NLP_RCV_PLOGI))
dea3101e 5524 return NULL;
1c5b12f7 5525
8c258641
JS
5526 if (vport->phba->nvmet_support)
5527 return ndlp;
1c5b12f7
JS
5528
5529 /* Moving to NPR state clears unsolicited flags and
5530 * allows for rediscovery
5531 */
5532 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5533
2e0fef85 5534 spin_lock_irq(shost->host_lock);
dea3101e 5535 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2e0fef85 5536 spin_unlock_irq(shost->host_lock);
dea3101e 5537 }
5538 return ndlp;
5539}
5540
5541/* Build a list of nodes to discover based on the loopmap */
5542void
2e0fef85 5543lpfc_disc_list_loopmap(struct lpfc_vport *vport)
dea3101e 5544{
2e0fef85 5545 struct lpfc_hba *phba = vport->phba;
dea3101e 5546 int j;
5547 uint32_t alpa, index;
5548
2e0fef85 5549 if (!lpfc_is_link_up(phba))
dea3101e 5550 return;
2e0fef85 5551
76a95d75 5552 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
dea3101e 5553 return;
dea3101e 5554
5555 /* Check for loop map present or not */
5556 if (phba->alpa_map[0]) {
5557 for (j = 1; j <= phba->alpa_map[0]; j++) {
5558 alpa = phba->alpa_map[j];
2e0fef85 5559 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
dea3101e 5560 continue;
2e0fef85 5561 lpfc_setup_disc_node(vport, alpa);
dea3101e 5562 }
5563 } else {
5564 /* No alpamap, so try all alpa's */
5565 for (j = 0; j < FC_MAXLOOP; j++) {
5566 /* If cfg_scan_down is set, start from highest
5567 * ALPA (0xef) to lowest (0x1).
5568 */
3de2a653 5569 if (vport->cfg_scan_down)
dea3101e 5570 index = j;
5571 else
5572 index = FC_MAXLOOP - j - 1;
5573 alpa = lpfcAlpaArray[index];
2e0fef85 5574 if ((vport->fc_myDID & 0xff) == alpa)
dea3101e 5575 continue;
2e0fef85 5576 lpfc_setup_disc_node(vport, alpa);
dea3101e 5577 }
5578 }
5579 return;
5580}
5581
895427bd 5582/* SLI3 only */
dea3101e 5583void
2e0fef85 5584lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
dea3101e 5585{
dea3101e 5586 LPFC_MBOXQ_t *mbox;
2e0fef85 5587 struct lpfc_sli *psli = &phba->sli;
895427bd
JS
5588 struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
5589 struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING];
2e0fef85
JS
5590 int rc;
5591
92d7f7b0
JS
5592 /*
5593 * if it's not a physical port or if we already send
5594 * clear_la then don't send it.
5595 */
5596 if ((phba->link_state >= LPFC_CLEAR_LA) ||
da0436e9
JS
5597 (vport->port_type != LPFC_PHYSICAL_PORT) ||
5598 (phba->sli_rev == LPFC_SLI_REV4))
92d7f7b0
JS
5599 return;
5600
2e0fef85
JS
5601 /* Link up discovery */
5602 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
5603 phba->link_state = LPFC_CLEAR_LA;
5604 lpfc_clear_la(phba, mbox);
5605 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
5606 mbox->vport = vport;
0b727fea 5607 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2e0fef85
JS
5608 if (rc == MBX_NOT_FINISHED) {
5609 mempool_free(mbox, phba->mbox_mem_pool);
5610 lpfc_disc_flush_list(vport);
5611 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5612 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
92d7f7b0
JS
5613 phba->link_state = LPFC_HBA_ERROR;
5614 }
5615 }
5616}
5617
5618/* Reg_vpi to tell firmware to resume normal operations */
5619void
5620lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5621{
5622 LPFC_MBOXQ_t *regvpimbox;
5623
5624 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5625 if (regvpimbox) {
da0436e9 5626 lpfc_reg_vpi(vport, regvpimbox);
92d7f7b0
JS
5627 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
5628 regvpimbox->vport = vport;
0b727fea 5629 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
92d7f7b0
JS
5630 == MBX_NOT_FINISHED) {
5631 mempool_free(regvpimbox, phba->mbox_mem_pool);
2e0fef85
JS
5632 }
5633 }
5634}
5635
5636/* Start Link up / RSCN discovery on NPR nodes */
5637void
5638lpfc_disc_start(struct lpfc_vport *vport)
5639{
5640 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5641 struct lpfc_hba *phba = vport->phba;
685f0bf7 5642 uint32_t num_sent;
dea3101e 5643 uint32_t clear_la_pending;
dea3101e 5644
e74c03c8
JS
5645 if (!lpfc_is_link_up(phba)) {
5646 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5647 "3315 Link is not up %x\n",
5648 phba->link_state);
dea3101e 5649 return;
e74c03c8 5650 }
2e0fef85
JS
5651
5652 if (phba->link_state == LPFC_CLEAR_LA)
dea3101e 5653 clear_la_pending = 1;
5654 else
5655 clear_la_pending = 0;
5656
2e0fef85
JS
5657 if (vport->port_state < LPFC_VPORT_READY)
5658 vport->port_state = LPFC_DISC_AUTH;
dea3101e 5659
2e0fef85
JS
5660 lpfc_set_disctmo(vport);
5661
2e0fef85
JS
5662 vport->fc_prevDID = vport->fc_myDID;
5663 vport->num_disc_nodes = 0;
dea3101e 5664
5665 /* Start Discovery state <hba_state> */
e8b62011 5666 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
b4b3417c
JS
5667 "0202 Start Discovery port state x%x "
5668 "flg x%x Data: x%x x%x x%x\n",
e8b62011 5669 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
b4b3417c 5670 vport->fc_adisc_cnt, vport->fc_npr_cnt);
dea3101e 5671
5672 /* First do ADISCs - if any */
2e0fef85 5673 num_sent = lpfc_els_disc_adisc(vport);
dea3101e 5674
5675 if (num_sent)
5676 return;
5677
6fa139f3 5678 /* Register the VPI for SLI3, NPIV only. */
92d7f7b0 5679 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1b32f6aa 5680 !(vport->fc_flag & FC_PT2PT) &&
da0436e9
JS
5681 !(vport->fc_flag & FC_RSCN_MODE) &&
5682 (phba->sli_rev < LPFC_SLI_REV4)) {
85c0f177 5683 lpfc_issue_clear_la(phba, vport);
92d7f7b0
JS
5684 lpfc_issue_reg_vpi(phba, vport);
5685 return;
5686 }
5687
5688 /*
5689 * For SLI2, we need to set port_state to READY and continue
5690 * discovery.
5691 */
2e0fef85 5692 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
dea3101e 5693 /* If we get here, there is nothing to ADISC */
85c0f177 5694 lpfc_issue_clear_la(phba, vport);
2e0fef85 5695
92d7f7b0 5696 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2e0fef85
JS
5697 vport->num_disc_nodes = 0;
5698 /* go thru NPR nodes and issue ELS PLOGIs */
5699 if (vport->fc_npr_cnt)
5700 lpfc_els_disc_plogi(vport);
5701
5702 if (!vport->num_disc_nodes) {
5703 spin_lock_irq(shost->host_lock);
5704 vport->fc_flag &= ~FC_NDISC_ACTIVE;
5705 spin_unlock_irq(shost->host_lock);
92d7f7b0 5706 lpfc_can_disctmo(vport);
dea3101e 5707 }
5708 }
92d7f7b0 5709 vport->port_state = LPFC_VPORT_READY;
dea3101e 5710 } else {
5711 /* Next do PLOGIs - if any */
2e0fef85 5712 num_sent = lpfc_els_disc_plogi(vport);
dea3101e 5713
5714 if (num_sent)
5715 return;
5716
2e0fef85 5717 if (vport->fc_flag & FC_RSCN_MODE) {
dea3101e 5718 /* Check to see if more RSCNs came in while we
5719 * were processing this one.
5720 */
2e0fef85
JS
5721 if ((vport->fc_rscn_id_cnt == 0) &&
5722 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5723 spin_lock_irq(shost->host_lock);
5724 vport->fc_flag &= ~FC_RSCN_MODE;
5725 spin_unlock_irq(shost->host_lock);
92d7f7b0 5726 lpfc_can_disctmo(vport);
2fe165b6 5727 } else
2e0fef85 5728 lpfc_els_handle_rscn(vport);
dea3101e 5729 }
5730 }
5731 return;
5732}
5733
5734/*
5735 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5736 * ring the match the sppecified nodelist.
5737 */
5738static void
2e0fef85 5739lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 5740{
2534ba75 5741 LIST_HEAD(completions);
dea3101e 5742 IOCB_t *icmd;
5743 struct lpfc_iocbq *iocb, *next_iocb;
5744 struct lpfc_sli_ring *pring;
dea3101e 5745
895427bd 5746 pring = lpfc_phba_elsring(phba);
1234a6d5
DK
5747 if (unlikely(!pring))
5748 return;
dea3101e 5749
5750 /* Error matching iocb on txq or txcmplq
5751 * First check the txq.
5752 */
2e0fef85 5753 spin_lock_irq(&phba->hbalock);
dea3101e 5754 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5755 if (iocb->context1 != ndlp) {
5756 continue;
5757 }
5758 icmd = &iocb->iocb;
5759 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
5760 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5761
2534ba75 5762 list_move_tail(&iocb->list, &completions);
dea3101e 5763 }
5764 }
5765
5766 /* Next check the txcmplq */
5767 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
5768 if (iocb->context1 != ndlp) {
5769 continue;
5770 }
5771 icmd = &iocb->iocb;
2e0fef85
JS
5772 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
5773 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2534ba75
JS
5774 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
5775 }
5776 }
2e0fef85 5777 spin_unlock_irq(&phba->hbalock);
dea3101e 5778
a257bf90
JS
5779 /* Cancel all the IOCBs from the completions list */
5780 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5781 IOERR_SLI_ABORTED);
dea3101e 5782}
5783
a6ababd2 5784static void
2e0fef85 5785lpfc_disc_flush_list(struct lpfc_vport *vport)
dea3101e 5786{
5787 struct lpfc_nodelist *ndlp, *next_ndlp;
2e0fef85 5788 struct lpfc_hba *phba = vport->phba;
dea3101e 5789
2e0fef85
JS
5790 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
5791 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
685f0bf7 5792 nlp_listp) {
e47c9093
JS
5793 if (!NLP_CHK_NODE_ACT(ndlp))
5794 continue;
685f0bf7
JS
5795 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5796 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5797 lpfc_free_tx(phba, ndlp);
685f0bf7 5798 }
dea3101e 5799 }
5800 }
dea3101e 5801}
5802
92d7f7b0
JS
5803void
5804lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
5805{
5806 lpfc_els_flush_rscn(vport);
5807 lpfc_els_flush_cmd(vport);
5808 lpfc_disc_flush_list(vport);
5809}
5810
dea3101e 5811/*****************************************************************************/
5812/*
5813 * NAME: lpfc_disc_timeout
5814 *
5815 * FUNCTION: Fibre Channel driver discovery timeout routine.
5816 *
5817 * EXECUTION ENVIRONMENT: interrupt only
5818 *
5819 * CALLED FROM:
5820 * Timer function
5821 *
5822 * RETURNS:
5823 * none
5824 */
5825/*****************************************************************************/
5826void
f22eb4d3 5827lpfc_disc_timeout(struct timer_list *t)
dea3101e 5828{
f22eb4d3 5829 struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
2e0fef85 5830 struct lpfc_hba *phba = vport->phba;
5e9d9b82 5831 uint32_t tmo_posted;
dea3101e 5832 unsigned long flags = 0;
5833
5834 if (unlikely(!phba))
5835 return;
5836
5e9d9b82
JS
5837 spin_lock_irqsave(&vport->work_port_lock, flags);
5838 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
5839 if (!tmo_posted)
2e0fef85 5840 vport->work_port_events |= WORKER_DISC_TMO;
5e9d9b82 5841 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2e0fef85 5842
5e9d9b82
JS
5843 if (!tmo_posted)
5844 lpfc_worker_wake_up(phba);
dea3101e 5845 return;
5846}
5847
5848static void
2e0fef85 5849lpfc_disc_timeout_handler(struct lpfc_vport *vport)
dea3101e 5850{
2e0fef85
JS
5851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5852 struct lpfc_hba *phba = vport->phba;
5853 struct lpfc_sli *psli = &phba->sli;
c9f8735b 5854 struct lpfc_nodelist *ndlp, *next_ndlp;
92d7f7b0 5855 LPFC_MBOXQ_t *initlinkmbox;
dea3101e 5856 int rc, clrlaerr = 0;
5857
2e0fef85 5858 if (!(vport->fc_flag & FC_DISC_TMO))
dea3101e 5859 return;
5860
2e0fef85
JS
5861 spin_lock_irq(shost->host_lock);
5862 vport->fc_flag &= ~FC_DISC_TMO;
5863 spin_unlock_irq(shost->host_lock);
dea3101e 5864
858c9f6c
JS
5865 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5866 "disc timeout: state:x%x rtry:x%x flg:x%x",
5867 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5868
2e0fef85 5869 switch (vport->port_state) {
dea3101e 5870
5871 case LPFC_LOCAL_CFG_LINK:
a0f2d3ef
JS
5872 /*
5873 * port_state is identically LPFC_LOCAL_CFG_LINK while
5874 * waiting for FAN timeout
5875 */
e8b62011
JS
5876 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
5877 "0221 FAN timeout\n");
a0f2d3ef 5878
c9f8735b 5879 /* Start discovery by sending FLOGI, clean up old rpis */
2e0fef85 5880 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
685f0bf7 5881 nlp_listp) {
e47c9093
JS
5882 if (!NLP_CHK_NODE_ACT(ndlp))
5883 continue;
685f0bf7
JS
5884 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5885 continue;
c9f8735b
JW
5886 if (ndlp->nlp_type & NLP_FABRIC) {
5887 /* Clean up the ndlp on Fabric connections */
2e0fef85 5888 lpfc_drop_node(vport, ndlp);
87af33fe 5889
2fe165b6 5890 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
c9f8735b
JW
5891 /* Fail outstanding IO now since device
5892 * is marked for PLOGI.
5893 */
2e0fef85 5894 lpfc_unreg_rpi(vport, ndlp);
c9f8735b
JW
5895 }
5896 }
92d7f7b0 5897 if (vport->port_state != LPFC_FLOGI) {
76a95d75
JS
5898 if (phba->sli_rev <= LPFC_SLI_REV3)
5899 lpfc_initial_flogi(vport);
5900 else
5901 lpfc_issue_init_vfi(vport);
0ff10d46 5902 return;
92d7f7b0 5903 }
dea3101e 5904 break;
5905
92d7f7b0 5906 case LPFC_FDISC:
dea3101e 5907 case LPFC_FLOGI:
2e0fef85 5908 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
dea3101e 5909 /* Initial FLOGI timeout */
e8b62011
JS
5910 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5911 "0222 Initial %s timeout\n",
87af33fe 5912 vport->vpi ? "FDISC" : "FLOGI");
dea3101e 5913
5914 /* Assume no Fabric and go on with discovery.
5915 * Check for outstanding ELS FLOGI to abort.
5916 */
5917
5918 /* FLOGI failed, so just use loop map to make discovery list */
2e0fef85 5919 lpfc_disc_list_loopmap(vport);
dea3101e 5920
5921 /* Start discovery */
2e0fef85 5922 lpfc_disc_start(vport);
dea3101e 5923 break;
5924
5925 case LPFC_FABRIC_CFG_LINK:
5926 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5927 NameServer login */
e8b62011
JS
5928 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5929 "0223 Timeout while waiting for "
5930 "NameServer login\n");
dea3101e 5931 /* Next look for NameServer ndlp */
2e0fef85 5932 ndlp = lpfc_findnode_did(vport, NameServer_DID);
e47c9093 5933 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
87af33fe
JS
5934 lpfc_els_abort(phba, ndlp);
5935
5936 /* ReStart discovery */
5937 goto restart_disc;
dea3101e 5938
5939 case LPFC_NS_QRY:
5940 /* Check for wait for NameServer Rsp timeout */
e8b62011
JS
5941 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5942 "0224 NameServer Query timeout "
5943 "Data: x%x x%x\n",
5944 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
dea3101e 5945
92d7f7b0
JS
5946 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
5947 /* Try it one more time */
5948 vport->fc_ns_retry++;
a0f2d3ef
JS
5949 vport->gidft_inp = 0;
5950 rc = lpfc_issue_gidft(vport);
92d7f7b0
JS
5951 if (rc == 0)
5952 break;
dea3101e 5953 }
92d7f7b0 5954 vport->fc_ns_retry = 0;
dea3101e 5955
87af33fe 5956restart_disc:
92d7f7b0
JS
5957 /*
5958 * Discovery is over.
5959 * set port_state to PORT_READY if SLI2.
5960 * cmpl_reg_vpi will set port_state to READY for SLI3.
5961 */
3772a991
JS
5962 if (phba->sli_rev < LPFC_SLI_REV4) {
5963 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5964 lpfc_issue_reg_vpi(phba, vport);
6d368e53 5965 else {
3772a991
JS
5966 lpfc_issue_clear_la(phba, vport);
5967 vport->port_state = LPFC_VPORT_READY;
5968 }
dea3101e 5969 }
5970
5971 /* Setup and issue mailbox INITIALIZE LINK command */
5972 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5973 if (!initlinkmbox) {
e8b62011
JS
5974 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5975 "0206 Device Discovery "
5976 "completion error\n");
2e0fef85 5977 phba->link_state = LPFC_HBA_ERROR;
dea3101e 5978 break;
5979 }
5980
5981 lpfc_linkdown(phba);
5982 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
5983 phba->cfg_link_speed);
04c68496 5984 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
ed957684 5985 initlinkmbox->vport = vport;
92d7f7b0 5986 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0b727fea 5987 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
5b8bd0c9 5988 lpfc_set_loopback_flag(phba);
dea3101e 5989 if (rc == MBX_NOT_FINISHED)
5990 mempool_free(initlinkmbox, phba->mbox_mem_pool);
5991
5992 break;
5993
5994 case LPFC_DISC_AUTH:
5995 /* Node Authentication timeout */
e8b62011
JS
5996 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5997 "0227 Node Authentication timeout\n");
2e0fef85
JS
5998 lpfc_disc_flush_list(vport);
5999
92d7f7b0
JS
6000 /*
6001 * set port_state to PORT_READY if SLI2.
6002 * cmpl_reg_vpi will set port_state to READY for SLI3.
6003 */
3772a991
JS
6004 if (phba->sli_rev < LPFC_SLI_REV4) {
6005 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
6006 lpfc_issue_reg_vpi(phba, vport);
6007 else { /* NPIV Not enabled */
6008 lpfc_issue_clear_la(phba, vport);
6009 vport->port_state = LPFC_VPORT_READY;
6010 }
dea3101e 6011 }
6012 break;
6013
2e0fef85
JS
6014 case LPFC_VPORT_READY:
6015 if (vport->fc_flag & FC_RSCN_MODE) {
e8b62011
JS
6016 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
6017 "0231 RSCN timeout Data: x%x "
6018 "x%x\n",
6019 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
dea3101e 6020
6021 /* Cleanup any outstanding ELS commands */
2e0fef85 6022 lpfc_els_flush_cmd(vport);
dea3101e 6023
2e0fef85
JS
6024 lpfc_els_flush_rscn(vport);
6025 lpfc_disc_flush_list(vport);
dea3101e 6026 }
6027 break;
2e0fef85 6028
92d7f7b0 6029 default:
e8b62011 6030 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
d7c255b2 6031 "0273 Unexpected discovery timeout, "
e8b62011 6032 "vport State x%x\n", vport->port_state);
2e0fef85
JS
6033 break;
6034 }
6035
6036 switch (phba->link_state) {
6037 case LPFC_CLEAR_LA:
92d7f7b0 6038 /* CLEAR LA timeout */
e8b62011
JS
6039 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
6040 "0228 CLEAR LA timeout\n");
2e0fef85
JS
6041 clrlaerr = 1;
6042 break;
6043
09372820
JS
6044 case LPFC_LINK_UP:
6045 lpfc_issue_clear_la(phba, vport);
cd05c155 6046 /* fall through */
2e0fef85
JS
6047 case LPFC_LINK_UNKNOWN:
6048 case LPFC_WARM_START:
6049 case LPFC_INIT_START:
6050 case LPFC_INIT_MBX_CMDS:
6051 case LPFC_LINK_DOWN:
2e0fef85 6052 case LPFC_HBA_ERROR:
e8b62011
JS
6053 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
6054 "0230 Unexpected timeout, hba link "
6055 "state x%x\n", phba->link_state);
2e0fef85
JS
6056 clrlaerr = 1;
6057 break;
92d7f7b0
JS
6058
6059 case LPFC_HBA_READY:
6060 break;
dea3101e 6061 }
6062
6063 if (clrlaerr) {
2e0fef85 6064 lpfc_disc_flush_list(vport);
895427bd
JS
6065 if (phba->sli_rev != LPFC_SLI_REV4) {
6066 psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
6067 ~LPFC_STOP_IOCB_EVENT;
6068 psli->sli3_ring[LPFC_FCP_RING].flag &=
6069 ~LPFC_STOP_IOCB_EVENT;
6070 }
2e0fef85 6071 vport->port_state = LPFC_VPORT_READY;
dea3101e 6072 }
dea3101e 6073 return;
6074}
6075
dea3101e 6076/*
6077 * This routine handles processing a NameServer REG_LOGIN mailbox
6078 * command upon completion. It is setup in the LPFC_MBOXQ
6079 * as the completion routine when the command is
6080 * handed off to the SLI layer.
6081 */
6082void
2e0fef85 6083lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea3101e 6084{
04c68496 6085 MAILBOX_t *mb = &pmb->u.mb;
3e1f0718
JS
6086 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
6087 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2e0fef85 6088 struct lpfc_vport *vport = pmb->vport;
dea3101e 6089
3e1f0718
JS
6090 pmb->ctx_buf = NULL;
6091 pmb->ctx_ndlp = NULL;
dea3101e 6092
6d368e53
JS
6093 if (phba->sli_rev < LPFC_SLI_REV4)
6094 ndlp->nlp_rpi = mb->un.varWords[0];
4042629e 6095 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
dea3101e 6096 ndlp->nlp_type |= NLP_FABRIC;
2e0fef85 6097 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
0f154226 6098 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
32350664 6099 "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
be6bb941 6100 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
2c935bc5 6101 kref_read(&ndlp->kref),
be6bb941 6102 ndlp->nlp_usg_map, ndlp);
2e0fef85
JS
6103 /*
6104 * Start issuing Fabric-Device Management Interface (FDMI) command to
4258e98e
JS
6105 * 0xfffffa (FDMI well known port).
6106 * DHBA -> DPRT -> RHBA -> RPA (physical port)
6107 * DPRT -> RPRT (vports)
dea3101e 6108 */
4258e98e
JS
6109 if (vport->port_type == LPFC_PHYSICAL_PORT)
6110 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
76b2c34a 6111 else
4258e98e
JS
6112 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
6113
dea3101e 6114
fa4066b6
JS
6115 /* decrement the node reference count held for this callback
6116 * function.
6117 */
329f9bc7 6118 lpfc_nlp_put(ndlp);
dea3101e 6119 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6120 kfree(mp);
329f9bc7 6121 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 6122
6123 return;
6124}
6125
685f0bf7
JS
6126static int
6127lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
6128{
6129 uint16_t *rpi = param;
6130
eff4a01b
JS
6131 /* check for active node */
6132 if (!NLP_CHK_NODE_ACT(ndlp))
6133 return 0;
6134
685f0bf7
JS
6135 return ndlp->nlp_rpi == *rpi;
6136}
6137
6138static int
6139lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
6140{
6141 return memcmp(&ndlp->nlp_portname, param,
6142 sizeof(ndlp->nlp_portname)) == 0;
6143}
6144
a6ababd2 6145static struct lpfc_nodelist *
2e0fef85 6146__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
dea3101e 6147{
21568f53 6148 struct lpfc_nodelist *ndlp;
dea3101e 6149
2e0fef85 6150 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
34f5ad8b
JS
6151 if (filter(ndlp, param)) {
6152 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2d44d165 6153 "3185 FIND node filter %ps DID "
32350664 6154 "ndlp x%px did x%x flg x%x st x%x "
4938250e 6155 "xri x%x type x%x rpi x%x\n",
34f5ad8b 6156 filter, ndlp, ndlp->nlp_DID,
4938250e
JS
6157 ndlp->nlp_flag, ndlp->nlp_state,
6158 ndlp->nlp_xri, ndlp->nlp_type,
6159 ndlp->nlp_rpi);
685f0bf7 6160 return ndlp;
34f5ad8b 6161 }
685f0bf7 6162 }
34f5ad8b 6163 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2d44d165 6164 "3186 FIND node filter %ps NOT FOUND.\n", filter);
21568f53 6165 return NULL;
dea3101e 6166}
6167
685f0bf7
JS
6168/*
6169 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2e0fef85 6170 * returns the node list element pointer else return NULL.
685f0bf7
JS
6171 */
6172struct lpfc_nodelist *
2e0fef85 6173__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
685f0bf7 6174{
2e0fef85 6175 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
685f0bf7
JS
6176}
6177
488d1469 6178/*
685f0bf7 6179 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2e0fef85 6180 * returns the node element list pointer else return NULL.
488d1469
JS
6181 */
6182struct lpfc_nodelist *
2e0fef85 6183lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
488d1469 6184{
2e0fef85 6185 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
488d1469 6186 struct lpfc_nodelist *ndlp;
488d1469 6187
2e0fef85
JS
6188 spin_lock_irq(shost->host_lock);
6189 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
6190 spin_unlock_irq(shost->host_lock);
858c9f6c 6191 return ndlp;
488d1469
JS
6192}
6193
cb69f7de
JS
6194/*
6195 * This routine looks up the ndlp lists for the given RPI. If the rpi
6196 * is found, the routine returns the node element list pointer else
6197 * return NULL.
6198 */
6199struct lpfc_nodelist *
6200lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6201{
6202 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6203 struct lpfc_nodelist *ndlp;
78d4b132 6204 unsigned long flags;
cb69f7de 6205
78d4b132 6206 spin_lock_irqsave(shost->host_lock, flags);
cb69f7de 6207 ndlp = __lpfc_findnode_rpi(vport, rpi);
78d4b132 6208 spin_unlock_irqrestore(shost->host_lock, flags);
cb69f7de
JS
6209 return ndlp;
6210}
6211
6212/**
6213 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
6214 * @phba: pointer to lpfc hba data structure.
6215 * @vpi: the physical host virtual N_Port identifier.
6216 *
6217 * This routine finds a vport on a HBA (referred by @phba) through a
6218 * @vpi. The function walks the HBA's vport list and returns the address
6219 * of the vport with the matching @vpi.
6220 *
6221 * Return code
6222 * NULL - No vport with the matching @vpi found
6223 * Otherwise - Address to the vport with the matching @vpi.
6224 **/
6225struct lpfc_vport *
6226lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6227{
6228 struct lpfc_vport *vport;
6229 unsigned long flags;
6230 int i = 0;
6231
6232 /* The physical ports are always vpi 0 - translate is unnecessary. */
6233 if (vpi > 0) {
6234 /*
6235 * Translate the physical vpi to the logical vpi. The
6236 * vport stores the logical vpi.
6237 */
6238 for (i = 0; i < phba->max_vpi; i++) {
6239 if (vpi == phba->vpi_ids[i])
6240 break;
6241 }
6242
6243 if (i >= phba->max_vpi) {
6244 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
6245 "2936 Could not find Vport mapped "
6246 "to vpi %d\n", vpi);
6247 return NULL;
6248 }
6249 }
6250
523128e5 6251 spin_lock_irqsave(&phba->port_list_lock, flags);
cb69f7de
JS
6252 list_for_each_entry(vport, &phba->port_list, listentry) {
6253 if (vport->vpi == i) {
523128e5 6254 spin_unlock_irqrestore(&phba->port_list_lock, flags);
cb69f7de
JS
6255 return vport;
6256 }
6257 }
523128e5 6258 spin_unlock_irqrestore(&phba->port_list_lock, flags);
cb69f7de
JS
6259 return NULL;
6260}
6261
9d3d340d
JS
6262struct lpfc_nodelist *
6263lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
dea3101e 6264{
9d3d340d
JS
6265 struct lpfc_nodelist *ndlp;
6266 int rpi = LPFC_RPI_ALLOC_ERROR;
6267
6268 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6269 rpi = lpfc_sli4_alloc_rpi(vport->phba);
6270 if (rpi == LPFC_RPI_ALLOC_ERROR)
6271 return NULL;
6272 }
6273
6274 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
6275 if (!ndlp) {
6276 if (vport->phba->sli_rev == LPFC_SLI_REV4)
6277 lpfc_sli4_free_rpi(vport->phba, rpi);
6278 return NULL;
6279 }
6280
dea3101e 6281 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
109f6ed0
JS
6282
6283 lpfc_initialize_node(vport, ndlp, did);
685f0bf7 6284 INIT_LIST_HEAD(&ndlp->nlp_listp);
cff261f6 6285 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
9d3d340d 6286 ndlp->nlp_rpi = rpi;
0f154226
JS
6287 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6288 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
6289 "flg:x%x refcnt:%d map:x%x\n",
6290 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
6291 ndlp->nlp_flag, kref_read(&ndlp->kref),
6292 ndlp->nlp_usg_map);
be6bb941 6293
cff261f6
JS
6294 ndlp->active_rrqs_xri_bitmap =
6295 mempool_alloc(vport->phba->active_rrq_pool,
6296 GFP_KERNEL);
59c5f61f
JS
6297 if (ndlp->active_rrqs_xri_bitmap)
6298 memset(ndlp->active_rrqs_xri_bitmap, 0,
6299 ndlp->phba->cfg_rrq_xri_bitmap_sz);
cff261f6
JS
6300 }
6301
725dd399 6302
858c9f6c
JS
6303
6304 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
6305 "node init: did:x%x",
6306 ndlp->nlp_DID, 0, 0);
6307
9d3d340d 6308 return ndlp;
dea3101e 6309}
329f9bc7 6310
98c9ea5c
JS
6311/* This routine releases all resources associated with a specifc NPort's ndlp
6312 * and mempool_free's the nodelist.
6313 */
311464ec 6314static void
329f9bc7
JS
6315lpfc_nlp_release(struct kref *kref)
6316{
e47c9093
JS
6317 struct lpfc_hba *phba;
6318 unsigned long flags;
329f9bc7
JS
6319 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6320 kref);
858c9f6c
JS
6321
6322 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6323 "node release: did:x%x flg:x%x type:x%x",
6324 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6325
e47c9093 6326 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
32350664 6327 "0279 %s: ndlp:x%px did %x "
be6bb941 6328 "usgmap:x%x refcnt:%d rpi:%x\n",
32350664 6329 __func__,
939723a4 6330 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
2c935bc5 6331 kref_read(&ndlp->kref), ndlp->nlp_rpi);
e47c9093
JS
6332
6333 /* remove ndlp from action. */
2e0fef85 6334 lpfc_nlp_remove(ndlp->vport, ndlp);
e47c9093
JS
6335
6336 /* clear the ndlp active flag for all release cases */
a257bf90 6337 phba = ndlp->phba;
e47c9093
JS
6338 spin_lock_irqsave(&phba->ndlp_lock, flags);
6339 NLP_CLR_NODE_ACT(ndlp);
6340 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6341
6342 /* free ndlp memory for final ndlp release */
ea2151b4
JS
6343 if (NLP_CHK_FREE_REQ(ndlp)) {
6344 kfree(ndlp->lat_data);
cff261f6
JS
6345 if (phba->sli_rev == LPFC_SLI_REV4)
6346 mempool_free(ndlp->active_rrqs_xri_bitmap,
6347 ndlp->phba->active_rrq_pool);
a257bf90 6348 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
ea2151b4 6349 }
329f9bc7
JS
6350}
6351
98c9ea5c
JS
6352/* This routine bumps the reference count for a ndlp structure to ensure
6353 * that one discovery thread won't free a ndlp while another discovery thread
6354 * is using it.
6355 */
329f9bc7
JS
6356struct lpfc_nodelist *
6357lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6358{
e47c9093
JS
6359 struct lpfc_hba *phba;
6360 unsigned long flags;
6361
98c9ea5c
JS
6362 if (ndlp) {
6363 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6364 "node get: did:x%x flg:x%x refcnt:x%x",
6365 ndlp->nlp_DID, ndlp->nlp_flag,
2c935bc5 6366 kref_read(&ndlp->kref));
e47c9093
JS
6367 /* The check of ndlp usage to prevent incrementing the
6368 * ndlp reference count that is in the process of being
6369 * released.
6370 */
a257bf90 6371 phba = ndlp->phba;
e47c9093
JS
6372 spin_lock_irqsave(&phba->ndlp_lock, flags);
6373 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
6374 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6375 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
32350664 6376 "0276 %s: ndlp:x%px "
e47c9093 6377 "usgmap:x%x refcnt:%d\n",
32350664 6378 __func__, (void *)ndlp, ndlp->nlp_usg_map,
2c935bc5 6379 kref_read(&ndlp->kref));
e47c9093
JS
6380 return NULL;
6381 } else
6382 kref_get(&ndlp->kref);
6383 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
98c9ea5c 6384 }
329f9bc7
JS
6385 return ndlp;
6386}
6387
98c9ea5c 6388/* This routine decrements the reference count for a ndlp structure. If the
e47c9093
JS
6389 * count goes to 0, this indicates the the associated nodelist should be
6390 * freed. Returning 1 indicates the ndlp resource has been released; on the
6391 * other hand, returning 0 indicates the ndlp resource has not been released
6392 * yet.
98c9ea5c 6393 */
329f9bc7
JS
6394int
6395lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6396{
e47c9093
JS
6397 struct lpfc_hba *phba;
6398 unsigned long flags;
6399
6400 if (!ndlp)
6401 return 1;
6402
6403 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
32350664
JS
6404 "node put: did:x%x flg:x%x refcnt:x%x",
6405 ndlp->nlp_DID, ndlp->nlp_flag,
6406 kref_read(&ndlp->kref));
a257bf90 6407 phba = ndlp->phba;
e47c9093
JS
6408 spin_lock_irqsave(&phba->ndlp_lock, flags);
6409 /* Check the ndlp memory free acknowledge flag to avoid the
6410 * possible race condition that kref_put got invoked again
6411 * after previous one has done ndlp memory free.
6412 */
6413 if (NLP_CHK_FREE_ACK(ndlp)) {
6414 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6415 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
32350664 6416 "0274 %s: ndlp:x%px "
e47c9093 6417 "usgmap:x%x refcnt:%d\n",
32350664 6418 __func__, (void *)ndlp, ndlp->nlp_usg_map,
2c935bc5 6419 kref_read(&ndlp->kref));
e47c9093
JS
6420 return 1;
6421 }
6422 /* Check the ndlp inactivate log flag to avoid the possible
6423 * race condition that kref_put got invoked again after ndlp
6424 * is already in inactivating state.
6425 */
6426 if (NLP_CHK_IACT_REQ(ndlp)) {
6427 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6428 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
32350664 6429 "0275 %s: ndlp:x%px "
e47c9093 6430 "usgmap:x%x refcnt:%d\n",
32350664 6431 __func__, (void *)ndlp, ndlp->nlp_usg_map,
2c935bc5 6432 kref_read(&ndlp->kref));
e47c9093 6433 return 1;
98c9ea5c 6434 }
e47c9093
JS
6435 /* For last put, mark the ndlp usage flags to make sure no
6436 * other kref_get and kref_put on the same ndlp shall get
6437 * in between the process when the final kref_put has been
6438 * invoked on this ndlp.
6439 */
2c935bc5 6440 if (kref_read(&ndlp->kref) == 1) {
e47c9093
JS
6441 /* Indicate ndlp is put to inactive state. */
6442 NLP_SET_IACT_REQ(ndlp);
6443 /* Acknowledge ndlp memory free has been seen. */
6444 if (NLP_CHK_FREE_REQ(ndlp))
6445 NLP_SET_FREE_ACK(ndlp);
6446 }
6447 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6448 /* Note, the kref_put returns 1 when decrementing a reference
6449 * count that was 1, it invokes the release callback function,
6450 * but it still left the reference count as 1 (not actually
6451 * performs the last decrementation). Otherwise, it actually
6452 * decrements the reference count and returns 0.
6453 */
6454 return kref_put(&ndlp->kref, lpfc_nlp_release);
329f9bc7 6455}
98c9ea5c
JS
6456
6457/* This routine free's the specified nodelist if it is not in use
e47c9093
JS
6458 * by any other discovery thread. This routine returns 1 if the
6459 * ndlp has been freed. A return value of 0 indicates the ndlp is
6460 * not yet been released.
98c9ea5c
JS
6461 */
6462int
6463lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
6464{
6465 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6466 "node not used: did:x%x flg:x%x refcnt:x%x",
6467 ndlp->nlp_DID, ndlp->nlp_flag,
2c935bc5
PZ
6468 kref_read(&ndlp->kref));
6469 if (kref_read(&ndlp->kref) == 1)
e47c9093
JS
6470 if (lpfc_nlp_put(ndlp))
6471 return 1;
98c9ea5c
JS
6472 return 0;
6473}
6fb120a7
JS
6474
6475/**
6476 * lpfc_fcf_inuse - Check if FCF can be unregistered.
6477 * @phba: Pointer to hba context object.
6478 *
6479 * This function iterate through all FC nodes associated
6480 * will all vports to check if there is any node with
6481 * fc_rports associated with it. If there is an fc_rport
6482 * associated with the node, then the node is either in
6483 * discovered state or its devloss_timer is pending.
6484 */
6485static int
6486lpfc_fcf_inuse(struct lpfc_hba *phba)
6487{
6488 struct lpfc_vport **vports;
6489 int i, ret = 0;
6490 struct lpfc_nodelist *ndlp;
6491 struct Scsi_Host *shost;
6492
6493 vports = lpfc_create_vport_work_array(phba);
6494
63e801ce
JS
6495 /* If driver cannot allocate memory, indicate fcf is in use */
6496 if (!vports)
6497 return 1;
6498
6fb120a7
JS
6499 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6500 shost = lpfc_shost_from_vport(vports[i]);
6501 spin_lock_irq(shost->host_lock);
0558056c
JS
6502 /*
6503 * IF the CVL_RCVD bit is not set then we have sent the
6504 * flogi.
6505 * If dev_loss fires while we are waiting we do not want to
6506 * unreg the fcf.
6507 */
6508 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
6509 spin_unlock_irq(shost->host_lock);
6510 ret = 1;
6511 goto out;
6512 }
6fb120a7
JS
6513 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6514 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
6515 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6516 ret = 1;
6517 spin_unlock_irq(shost->host_lock);
6518 goto out;
80c17849
JS
6519 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6520 ret = 1;
0f154226
JS
6521 lpfc_printf_log(phba, KERN_INFO,
6522 LOG_NODE | LOG_DISCOVERY,
80c17849
JS
6523 "2624 RPI %x DID %x flag %x "
6524 "still logged in\n",
6525 ndlp->nlp_rpi, ndlp->nlp_DID,
6526 ndlp->nlp_flag);
6fb120a7
JS
6527 }
6528 }
6529 spin_unlock_irq(shost->host_lock);
6530 }
6531out:
6532 lpfc_destroy_vport_work_array(phba, vports);
6533 return ret;
6534}
6535
6536/**
6537 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
6538 * @phba: Pointer to hba context object.
6539 * @mboxq: Pointer to mailbox object.
6540 *
6541 * This function frees memory associated with the mailbox command.
6542 */
1b51197d 6543void
6fb120a7
JS
6544lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6545{
6546 struct lpfc_vport *vport = mboxq->vport;
38b92ef8 6547 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6fb120a7
JS
6548
6549 if (mboxq->u.mb.mbxStatus) {
6550 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6551 "2555 UNREG_VFI mbxStatus error x%x "
6552 "HBA state x%x\n",
6553 mboxq->u.mb.mbxStatus, vport->port_state);
6554 }
38b92ef8
JS
6555 spin_lock_irq(shost->host_lock);
6556 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
6557 spin_unlock_irq(shost->host_lock);
6fb120a7
JS
6558 mempool_free(mboxq, phba->mbox_mem_pool);
6559 return;
6560}
6561
6562/**
6563 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
6564 * @phba: Pointer to hba context object.
6565 * @mboxq: Pointer to mailbox object.
6566 *
6567 * This function frees memory associated with the mailbox command.
6568 */
6569static void
6570lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6571{
6572 struct lpfc_vport *vport = mboxq->vport;
6573
6574 if (mboxq->u.mb.mbxStatus) {
6575 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6576 "2550 UNREG_FCFI mbxStatus error x%x "
6577 "HBA state x%x\n",
6578 mboxq->u.mb.mbxStatus, vport->port_state);
6579 }
6580 mempool_free(mboxq, phba->mbox_mem_pool);
6581 return;
6582}
6583
6584/**
ecfd03c6 6585 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
6fb120a7
JS
6586 * @phba: Pointer to hba context object.
6587 *
ecfd03c6
JS
6588 * This function prepare the HBA for unregistering the currently registered
6589 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
6590 * VFIs.
6fb120a7 6591 */
ecfd03c6
JS
6592int
6593lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
6fb120a7 6594{
6fb120a7 6595 struct lpfc_vport **vports;
695a814e 6596 struct lpfc_nodelist *ndlp;
72100cc4 6597 struct Scsi_Host *shost;
e74c03c8 6598 int i = 0, rc;
6fb120a7 6599
ecfd03c6 6600 /* Unregister RPIs */
6fb120a7 6601 if (lpfc_fcf_inuse(phba))
ecfd03c6 6602 lpfc_unreg_hba_rpis(phba);
6fb120a7 6603
4d9ab994
JS
6604 /* At this point, all discovery is aborted */
6605 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6fb120a7
JS
6606
6607 /* Unregister VPIs */
6608 vports = lpfc_create_vport_work_array(phba);
ecfd03c6 6609 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
6fb120a7 6610 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
695a814e
JS
6611 /* Stop FLOGI/FDISC retries */
6612 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6613 if (ndlp)
6614 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
78730cfe 6615 lpfc_cleanup_pending_mbox(vports[i]);
5af5eee7
JS
6616 if (phba->sli_rev == LPFC_SLI_REV4)
6617 lpfc_sli4_unreg_all_rpis(vports[i]);
6fb120a7 6618 lpfc_mbx_unreg_vpi(vports[i]);
72100cc4
JS
6619 shost = lpfc_shost_from_vport(vports[i]);
6620 spin_lock_irq(shost->host_lock);
891478a2 6621 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
c868595d 6622 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
72100cc4 6623 spin_unlock_irq(shost->host_lock);
6fb120a7
JS
6624 }
6625 lpfc_destroy_vport_work_array(phba, vports);
e74c03c8
JS
6626 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
6627 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6628 if (ndlp)
6629 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
6630 lpfc_cleanup_pending_mbox(phba->pport);
6631 if (phba->sli_rev == LPFC_SLI_REV4)
6632 lpfc_sli4_unreg_all_rpis(phba->pport);
6633 lpfc_mbx_unreg_vpi(phba->pport);
6634 shost = lpfc_shost_from_vport(phba->pport);
6635 spin_lock_irq(shost->host_lock);
6636 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6637 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
6638 spin_unlock_irq(shost->host_lock);
6639 }
6fb120a7 6640
695a814e
JS
6641 /* Cleanup any outstanding ELS commands */
6642 lpfc_els_flush_all_cmd(phba);
6643
1b51197d
JS
6644 /* Unregister the physical port VFI */
6645 rc = lpfc_issue_unreg_vfi(phba->pport);
6646 return rc;
ecfd03c6
JS
6647}
6648
6649/**
6650 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
6651 * @phba: Pointer to hba context object.
6652 *
6653 * This function issues synchronous unregister FCF mailbox command to HBA to
6654 * unregister the currently registered FCF record. The driver does not reset
6655 * the driver FCF usage state flags.
6656 *
6657 * Return 0 if successfully issued, none-zero otherwise.
6658 */
6659int
6660lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
6661{
6662 LPFC_MBOXQ_t *mbox;
6663 int rc;
6664
6fb120a7
JS
6665 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6666 if (!mbox) {
6667 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
ecfd03c6
JS
6668 "2551 UNREG_FCFI mbox allocation failed"
6669 "HBA state x%x\n", phba->pport->port_state);
6670 return -ENOMEM;
6fb120a7 6671 }
6fb120a7
JS
6672 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
6673 mbox->vport = phba->pport;
6674 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
6675 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6676
6677 if (rc == MBX_NOT_FINISHED) {
ecfd03c6
JS
6678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6679 "2552 Unregister FCFI command failed rc x%x "
6680 "HBA state x%x\n",
6681 rc, phba->pport->port_state);
6682 return -EINVAL;
6683 }
6684 return 0;
6685}
6686
6687/**
6688 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
6689 * @phba: Pointer to hba context object.
6690 *
6691 * This function unregisters the currently reigstered FCF. This function
6692 * also tries to find another FCF for discovery by rescan the HBA FCF table.
6693 */
6694void
6695lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
6696{
6697 int rc;
6698
6699 /* Preparation for unregistering fcf */
6700 rc = lpfc_unregister_fcf_prep(phba);
6701 if (rc) {
6702 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
6703 "2748 Failed to prepare for unregistering "
6704 "HBA's FCF record: rc=%d\n", rc);
6fb120a7
JS
6705 return;
6706 }
6707
ecfd03c6
JS
6708 /* Now, unregister FCF record and reset HBA FCF state */
6709 rc = lpfc_sli4_unregister_fcf(phba);
6710 if (rc)
6711 return;
6712 /* Reset HBA FCF states after successful unregister FCF */
6713 phba->fcf.fcf_flag = 0;
fc2b989b 6714 phba->fcf.current_rec.flag = 0;
6fb120a7
JS
6715
6716 /*
6717 * If driver is not unloading, check if there is any other
6718 * FCF record that can be used for discovery.
6719 */
6720 if ((phba->pport->load_flag & FC_UNLOADING) ||
ecfd03c6 6721 (phba->link_state < LPFC_LINK_UP))
6fb120a7
JS
6722 return;
6723
0c9ab6f5
JS
6724 /* This is considered as the initial FCF discovery scan */
6725 spin_lock_irq(&phba->hbalock);
6726 phba->fcf.fcf_flag |= FCF_INIT_DISC;
6727 spin_unlock_irq(&phba->hbalock);
38b92ef8
JS
6728
6729 /* Reset FCF roundrobin bmask for new discovery */
7d791df7 6730 lpfc_sli4_clear_fcf_rr_bmask(phba);
38b92ef8 6731
0c9ab6f5 6732 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6fb120a7 6733
0c9ab6f5
JS
6734 if (rc) {
6735 spin_lock_irq(&phba->hbalock);
6736 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
6737 spin_unlock_irq(&phba->hbalock);
6fb120a7 6738 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
ecfd03c6
JS
6739 "2553 lpfc_unregister_unused_fcf failed "
6740 "to read FCF record HBA state x%x\n",
6741 phba->pport->port_state);
0c9ab6f5 6742 }
ecfd03c6
JS
6743}
6744
6745/**
6746 * lpfc_unregister_fcf - Unregister the currently registered fcf record
6747 * @phba: Pointer to hba context object.
6748 *
6749 * This function just unregisters the currently reigstered FCF. It does not
6750 * try to find another FCF for discovery.
6751 */
6752void
6753lpfc_unregister_fcf(struct lpfc_hba *phba)
6754{
6755 int rc;
6756
6757 /* Preparation for unregistering fcf */
6758 rc = lpfc_unregister_fcf_prep(phba);
6759 if (rc) {
6760 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
6761 "2749 Failed to prepare for unregistering "
6762 "HBA's FCF record: rc=%d\n", rc);
6763 return;
6764 }
6765
6766 /* Now, unregister FCF record and reset HBA FCF state */
6767 rc = lpfc_sli4_unregister_fcf(phba);
6768 if (rc)
6769 return;
6770 /* Set proper HBA FCF states after successful unregister FCF */
6771 spin_lock_irq(&phba->hbalock);
6772 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
6773 spin_unlock_irq(&phba->hbalock);
6774}
6775
6776/**
6777 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6778 * @phba: Pointer to hba context object.
6779 *
6780 * This function check if there are any connected remote port for the FCF and
6781 * if all the devices are disconnected, this function unregister FCFI.
6782 * This function also tries to use another FCF for discovery.
6783 */
6784void
6785lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
6786{
6787 /*
3804dc84
JS
6788 * If HBA is not running in FIP mode, if HBA does not support
6789 * FCoE, if FCF discovery is ongoing, or if FCF has not been
6790 * registered, do nothing.
ecfd03c6
JS
6791 */
6792 spin_lock_irq(&phba->hbalock);
76a95d75 6793 if (!(phba->hba_flag & HBA_FCOE_MODE) ||
ecfd03c6 6794 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
dbb6b3ab 6795 !(phba->hba_flag & HBA_FIP_SUPPORT) ||
3804dc84 6796 (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
dbb6b3ab 6797 (phba->pport->port_state == LPFC_FLOGI)) {
ecfd03c6
JS
6798 spin_unlock_irq(&phba->hbalock);
6799 return;
6800 }
6801 spin_unlock_irq(&phba->hbalock);
6802
6803 if (lpfc_fcf_inuse(phba))
6804 return;
6805
6806 lpfc_unregister_fcf_rescan(phba);
6fb120a7
JS
6807}
6808
6809/**
6810 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6811 * @phba: Pointer to hba context object.
6812 * @buff: Buffer containing the FCF connection table as in the config
6813 * region.
6814 * This function create driver data structure for the FCF connection
6815 * record table read from config region 23.
6816 */
6817static void
6818lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6819 uint8_t *buff)
6820{
6821 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6822 struct lpfc_fcf_conn_hdr *conn_hdr;
6823 struct lpfc_fcf_conn_rec *conn_rec;
6824 uint32_t record_count;
6825 int i;
6826
6827 /* Free the current connect table */
6828 list_for_each_entry_safe(conn_entry, next_conn_entry,
4d9ab994
JS
6829 &phba->fcf_conn_rec_list, list) {
6830 list_del_init(&conn_entry->list);
6fb120a7 6831 kfree(conn_entry);
4d9ab994 6832 }
6fb120a7
JS
6833
6834 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
6835 record_count = conn_hdr->length * sizeof(uint32_t)/
6836 sizeof(struct lpfc_fcf_conn_rec);
6837
6838 conn_rec = (struct lpfc_fcf_conn_rec *)
6839 (buff + sizeof(struct lpfc_fcf_conn_hdr));
6840
6841 for (i = 0; i < record_count; i++) {
6842 if (!(conn_rec[i].flags & FCFCNCT_VALID))
6843 continue;
6844 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
6845 GFP_KERNEL);
6846 if (!conn_entry) {
6847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6848 "2566 Failed to allocate connection"
6849 " table entry\n");
6850 return;
6851 }
6852
6853 memcpy(&conn_entry->conn_rec, &conn_rec[i],
6854 sizeof(struct lpfc_fcf_conn_rec));
6fb120a7
JS
6855 list_add_tail(&conn_entry->list,
6856 &phba->fcf_conn_rec_list);
6857 }
df0d085f
JS
6858
6859 if (!list_empty(&phba->fcf_conn_rec_list)) {
6860 i = 0;
6861 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
6862 list) {
6863 conn_rec = &conn_entry->conn_rec;
6864 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6865 "3345 FCF connection list rec[%02d]: "
6866 "flags:x%04x, vtag:x%04x, "
6867 "fabric_name:x%02x:%02x:%02x:%02x:"
6868 "%02x:%02x:%02x:%02x, "
6869 "switch_name:x%02x:%02x:%02x:%02x:"
6870 "%02x:%02x:%02x:%02x\n", i++,
6871 conn_rec->flags, conn_rec->vlan_tag,
6872 conn_rec->fabric_name[0],
6873 conn_rec->fabric_name[1],
6874 conn_rec->fabric_name[2],
6875 conn_rec->fabric_name[3],
6876 conn_rec->fabric_name[4],
6877 conn_rec->fabric_name[5],
6878 conn_rec->fabric_name[6],
6879 conn_rec->fabric_name[7],
6880 conn_rec->switch_name[0],
6881 conn_rec->switch_name[1],
6882 conn_rec->switch_name[2],
6883 conn_rec->switch_name[3],
6884 conn_rec->switch_name[4],
6885 conn_rec->switch_name[5],
6886 conn_rec->switch_name[6],
6887 conn_rec->switch_name[7]);
6888 }
6889 }
6fb120a7
JS
6890}
6891
6892/**
6893 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6894 * @phba: Pointer to hba context object.
6895 * @buff: Buffer containing the FCoE parameter data structure.
6896 *
6897 * This function update driver data structure with config
6898 * parameters read from config region 23.
6899 */
6900static void
6901lpfc_read_fcoe_param(struct lpfc_hba *phba,
6902 uint8_t *buff)
6903{
6904 struct lpfc_fip_param_hdr *fcoe_param_hdr;
6905 struct lpfc_fcoe_params *fcoe_param;
6906
6907 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
6908 buff;
6909 fcoe_param = (struct lpfc_fcoe_params *)
32b9793f 6910 (buff + sizeof(struct lpfc_fip_param_hdr));
6fb120a7
JS
6911
6912 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
6913 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
6914 return;
6915
6fb120a7
JS
6916 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
6917 phba->valid_vlan = 1;
6918 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
6919 0xFFF;
6920 }
6921
6922 phba->fc_map[0] = fcoe_param->fc_map[0];
6923 phba->fc_map[1] = fcoe_param->fc_map[1];
6924 phba->fc_map[2] = fcoe_param->fc_map[2];
6925 return;
6926}
6927
6928/**
6929 * lpfc_get_rec_conf23 - Get a record type in config region data.
6930 * @buff: Buffer containing config region 23 data.
6931 * @size: Size of the data buffer.
6932 * @rec_type: Record type to be searched.
6933 *
25985edc 6934 * This function searches config region data to find the beginning
6fb120a7
JS
6935 * of the record specified by record_type. If record found, this
6936 * function return pointer to the record else return NULL.
6937 */
6938static uint8_t *
6939lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
6940{
6941 uint32_t offset = 0, rec_length;
6942
6943 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
6944 (size < sizeof(uint32_t)))
6945 return NULL;
6946
6947 rec_length = buff[offset + 1];
6948
6949 /*
6950 * One TLV record has one word header and number of data words
6951 * specified in the rec_length field of the record header.
6952 */
6953 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
6954 <= size) {
6955 if (buff[offset] == rec_type)
6956 return &buff[offset];
6957
6958 if (buff[offset] == LPFC_REGION23_LAST_REC)
6959 return NULL;
6960
6961 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
6962 rec_length = buff[offset + 1];
6963 }
6964 return NULL;
6965}
6966
6967/**
6968 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6969 * @phba: Pointer to lpfc_hba data structure.
6970 * @buff: Buffer containing config region 23 data.
6971 * @size: Size of the data buffer.
6972 *
eef35c2d 6973 * This function parses the FCoE config parameters in config region 23 and
6fb120a7
JS
6974 * populate driver data structure with the parameters.
6975 */
6976void
6977lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
6978 uint8_t *buff,
6979 uint32_t size)
6980{
eb016566 6981 uint32_t offset = 0;
6fb120a7
JS
6982 uint8_t *rec_ptr;
6983
6984 /*
6985 * If data size is less than 2 words signature and version cannot be
6986 * verified.
6987 */
6988 if (size < 2*sizeof(uint32_t))
6989 return;
6990
6991 /* Check the region signature first */
6992 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
6993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6994 "2567 Config region 23 has bad signature\n");
6995 return;
6996 }
6997
6998 offset += 4;
6999
7000 /* Check the data structure version */
7001 if (buff[offset] != LPFC_REGION23_VERSION) {
7002 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7003 "2568 Config region 23 has bad version\n");
7004 return;
7005 }
7006 offset += 4;
7007
6fb120a7
JS
7008 /* Read FCoE param record */
7009 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7010 size - offset, FCOE_PARAM_TYPE);
7011 if (rec_ptr)
7012 lpfc_read_fcoe_param(phba, rec_ptr);
7013
7014 /* Read FCF connection table */
7015 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7016 size - offset, FCOE_CONN_TBL_TYPE);
7017 if (rec_ptr)
7018 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
7019
7020}