scsi: lpfc: Fix cq_id truncation in rq create
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_init.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
67073c69 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
3e21d1cb 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
50611577 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e 22 *******************************************************************/
23
dea3101e 24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
acf3368f 29#include <linux/module.h>
dea3101e 30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
92d7f7b0 33#include <linux/ctype.h>
0d878419 34#include <linux/aer.h>
5a0e3ad6 35#include <linux/slab.h>
52d52440 36#include <linux/firmware.h>
3ef6d24c 37#include <linux/miscdevice.h>
7bb03bbf 38#include <linux/percpu.h>
895427bd 39#include <linux/msi.h>
6a828b0f 40#include <linux/irq.h>
286871a6 41#include <linux/bitops.h>
31f06d2e 42#include <linux/crash_dump.h>
dcaa2136 43#include <linux/cpu.h>
93a4d6f4 44#include <linux/cpuhotplug.h>
dea3101e 45
91886523 46#include <scsi/scsi.h>
dea3101e 47#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
86c67379
JS
50#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
da0436e9 53#include "lpfc_hw4.h"
dea3101e 54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
da0436e9 56#include "lpfc_sli4.h"
ea2151b4 57#include "lpfc_nl.h"
dea3101e 58#include "lpfc_disc.h"
dea3101e 59#include "lpfc.h"
895427bd
JS
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
dea3101e 62#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
92d7f7b0 64#include "lpfc_vport.h"
dea3101e 65#include "lpfc_version.h"
12f44457 66#include "lpfc_ids.h"
dea3101e 67
93a4d6f4 68static enum cpuhp_state lpfc_cpuhp_state;
7bb03bbf 69/* Used when mapping IRQ vectors in a driver centric manner */
d7b761b0 70static uint32_t lpfc_present_cpu;
7bb03bbf 71
93a4d6f4
JS
72static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
dea3101e 75static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76static int lpfc_post_rcv_buf(struct lpfc_hba *);
5350d872 77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
da0436e9
JS
78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79static int lpfc_setup_endian_order(struct lpfc_hba *);
da0436e9 80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
8a9d2e80 81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
f358dd0c 82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
8a9d2e80 83static void lpfc_init_sgl_list(struct lpfc_hba *);
da0436e9
JS
84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85static void lpfc_free_active_sgl(struct lpfc_hba *);
86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
618a5230
JS
91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
1ba981fd 93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
6a828b0f 94static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
aa6ff309 95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
dea3101e 96
97static struct scsi_transport_template *lpfc_transport_template = NULL;
92d7f7b0 98static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
dea3101e 99static DEFINE_IDR(lpfc_hba_index);
f358dd0c 100#define LPFC_NVMET_BUF_POST 254
5e633302 101static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
dea3101e 102
e59058c4 103/**
3621a710 104 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
e59058c4
JS
105 * @phba: pointer to lpfc hba data structure.
106 *
107 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
108 * mailbox command. It retrieves the revision information from the HBA and
109 * collects the Vital Product Data (VPD) about the HBA for preparing the
110 * configuration of the HBA.
111 *
112 * Return codes:
113 * 0 - success.
114 * -ERESTART - requests the SLI layer to reset the HBA and try again.
115 * Any other value - indicates an error.
116 **/
dea3101e 117int
2e0fef85 118lpfc_config_port_prep(struct lpfc_hba *phba)
dea3101e 119{
120 lpfc_vpd_t *vp = &phba->vpd;
121 int i = 0, rc;
122 LPFC_MBOXQ_t *pmb;
123 MAILBOX_t *mb;
124 char *lpfc_vpd_data = NULL;
125 uint16_t offset = 0;
126 static char licensed[56] =
127 "key unlock for use with gnu public licensed code only\0";
65a29c16 128 static int init_key = 1;
dea3101e 129
130 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
131 if (!pmb) {
2e0fef85 132 phba->link_state = LPFC_HBA_ERROR;
dea3101e 133 return -ENOMEM;
134 }
135
04c68496 136 mb = &pmb->u.mb;
2e0fef85 137 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 138
139 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
65a29c16
JS
140 if (init_key) {
141 uint32_t *ptext = (uint32_t *) licensed;
dea3101e 142
65a29c16
JS
143 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
144 *ptext = cpu_to_be32(*ptext);
145 init_key = 0;
146 }
dea3101e 147
148 lpfc_read_nv(phba, pmb);
149 memset((char*)mb->un.varRDnvp.rsvd3, 0,
150 sizeof (mb->un.varRDnvp.rsvd3));
151 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
152 sizeof (licensed));
153
154 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
155
156 if (rc != MBX_SUCCESS) {
372c187b 157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
e8b62011 158 "0324 Config Port initialization "
dea3101e 159 "error, mbxCmd x%x READ_NVPARM, "
160 "mbxStatus x%x\n",
dea3101e 161 mb->mbxCommand, mb->mbxStatus);
162 mempool_free(pmb, phba->mbox_mem_pool);
163 return -ERESTART;
164 }
165 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
2e0fef85
JS
166 sizeof(phba->wwnn));
167 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
168 sizeof(phba->wwpn));
dea3101e 169 }
170
dfb75133
MW
171 /*
172 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
173 * which was already set in lpfc_get_cfgparam()
174 */
175 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
92d7f7b0 176
dea3101e 177 /* Setup and issue mailbox READ REV command */
178 lpfc_read_rev(phba, pmb);
179 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
180 if (rc != MBX_SUCCESS) {
372c187b 181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
e8b62011 182 "0439 Adapter failed to init, mbxCmd x%x "
dea3101e 183 "READ_REV, mbxStatus x%x\n",
dea3101e 184 mb->mbxCommand, mb->mbxStatus);
185 mempool_free( pmb, phba->mbox_mem_pool);
186 return -ERESTART;
187 }
188
92d7f7b0 189
1de933f3
JSEC
190 /*
191 * The value of rr must be 1 since the driver set the cv field to 1.
192 * This setting requires the FW to set all revision fields.
dea3101e 193 */
1de933f3 194 if (mb->un.varRdRev.rr == 0) {
dea3101e 195 vp->rev.rBit = 0;
372c187b 196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
e8b62011
JS
197 "0440 Adapter failed to init, READ_REV has "
198 "missing revision information.\n");
dea3101e 199 mempool_free(pmb, phba->mbox_mem_pool);
200 return -ERESTART;
dea3101e 201 }
202
495a714c
JS
203 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
204 mempool_free(pmb, phba->mbox_mem_pool);
ed957684 205 return -EINVAL;
495a714c 206 }
ed957684 207
dea3101e 208 /* Save information as VPD data */
1de933f3 209 vp->rev.rBit = 1;
92d7f7b0 210 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
1de933f3
JSEC
211 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
212 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
213 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
214 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
dea3101e 215 vp->rev.biuRev = mb->un.varRdRev.biuRev;
216 vp->rev.smRev = mb->un.varRdRev.smRev;
217 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
218 vp->rev.endecRev = mb->un.varRdRev.endecRev;
219 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
220 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
221 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
222 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
223 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
224 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
225
92d7f7b0
JS
226 /* If the sli feature level is less then 9, we must
227 * tear down all RPIs and VPIs on link down if NPIV
228 * is enabled.
229 */
230 if (vp->rev.feaLevelHigh < 9)
231 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
232
dea3101e 233 if (lpfc_is_LC_HBA(phba->pcidev->device))
234 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
235 sizeof (phba->RandomData));
236
dea3101e 237 /* Get adapter VPD information */
dea3101e 238 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
239 if (!lpfc_vpd_data)
d7c255b2 240 goto out_free_mbox;
dea3101e 241 do {
a0c87cbd 242 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
dea3101e 243 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
244
245 if (rc != MBX_SUCCESS) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 247 "0441 VPD not present on adapter, "
dea3101e 248 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
dea3101e 249 mb->mbxCommand, mb->mbxStatus);
74b72a59 250 mb->un.varDmp.word_cnt = 0;
dea3101e 251 }
04c68496
JS
252 /* dump mem may return a zero when finished or we got a
253 * mailbox error, either way we are done.
254 */
255 if (mb->un.varDmp.word_cnt == 0)
256 break;
d91e3abb 257
e4ec1022
JS
258 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
259 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
d7c255b2 260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
e4ec1022
JS
261 lpfc_vpd_data + offset,
262 mb->un.varDmp.word_cnt);
263 offset += mb->un.varDmp.word_cnt;
264 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
d91e3abb 265
74b72a59 266 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
dea3101e 267
268 kfree(lpfc_vpd_data);
dea3101e 269out_free_mbox:
270 mempool_free(pmb, phba->mbox_mem_pool);
271 return 0;
272}
273
e59058c4 274/**
3621a710 275 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
e59058c4
JS
276 * @phba: pointer to lpfc hba data structure.
277 * @pmboxq: pointer to the driver internal queue element for mailbox command.
278 *
279 * This is the completion handler for driver's configuring asynchronous event
280 * mailbox command to the device. If the mailbox command returns successfully,
281 * it will set internal async event support flag to 1; otherwise, it will
282 * set internal async event support flag to 0.
283 **/
57127f15
JS
284static void
285lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
286{
04c68496 287 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
57127f15
JS
288 phba->temp_sensor_support = 1;
289 else
290 phba->temp_sensor_support = 0;
291 mempool_free(pmboxq, phba->mbox_mem_pool);
292 return;
293}
294
97207482 295/**
3621a710 296 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
97207482
JS
297 * @phba: pointer to lpfc hba data structure.
298 * @pmboxq: pointer to the driver internal queue element for mailbox command.
299 *
300 * This is the completion handler for dump mailbox command for getting
301 * wake up parameters. When this command complete, the response contain
302 * Option rom version of the HBA. This function translate the version number
303 * into a human readable string and store it in OptionROMVersion.
304 **/
305static void
306lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
307{
308 struct prog_id *prg;
309 uint32_t prog_id_word;
310 char dist = ' ';
311 /* character array used for decoding dist type. */
312 char dist_char[] = "nabx";
313
04c68496 314 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
9f1e1b50 315 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482 316 return;
9f1e1b50 317 }
97207482
JS
318
319 prg = (struct prog_id *) &prog_id_word;
320
321 /* word 7 contain option rom version */
04c68496 322 prog_id_word = pmboxq->u.mb.un.varWords[7];
97207482
JS
323
324 /* Decode the Option rom version word to a readable string */
325 if (prg->dist < 4)
326 dist = dist_char[prg->dist];
327
328 if ((prg->dist == 3) && (prg->num == 0))
a2fc4aef 329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
97207482
JS
330 prg->ver, prg->rev, prg->lev);
331 else
a2fc4aef 332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
97207482
JS
333 prg->ver, prg->rev, prg->lev,
334 dist, prg->num);
9f1e1b50 335 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482
JS
336 return;
337}
338
0558056c
JS
339/**
340 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
341 * cfg_soft_wwnn, cfg_soft_wwpn
342 * @vport: pointer to lpfc vport data structure.
343 *
344 *
345 * Return codes
346 * None.
347 **/
348void
349lpfc_update_vport_wwn(struct lpfc_vport *vport)
350{
aeb3c817
JS
351 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
352 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
353
0558056c
JS
354 /* If the soft name exists then update it using the service params */
355 if (vport->phba->cfg_soft_wwnn)
356 u64_to_wwn(vport->phba->cfg_soft_wwnn,
357 vport->fc_sparam.nodeName.u.wwn);
358 if (vport->phba->cfg_soft_wwpn)
359 u64_to_wwn(vport->phba->cfg_soft_wwpn,
360 vport->fc_sparam.portName.u.wwn);
361
362 /*
363 * If the name is empty or there exists a soft name
364 * then copy the service params name, otherwise use the fc name
365 */
366 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
367 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
368 sizeof(struct lpfc_name));
369 else
370 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
371 sizeof(struct lpfc_name));
372
aeb3c817
JS
373 /*
374 * If the port name has changed, then set the Param changes flag
375 * to unreg the login
376 */
377 if (vport->fc_portname.u.wwn[0] != 0 &&
378 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
379 sizeof(struct lpfc_name)))
380 vport->vport_flag |= FAWWPN_PARAM_CHG;
381
382 if (vport->fc_portname.u.wwn[0] == 0 ||
383 vport->phba->cfg_soft_wwpn ||
384 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
385 vport->vport_flag & FAWWPN_SET) {
0558056c
JS
386 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
387 sizeof(struct lpfc_name));
aeb3c817
JS
388 vport->vport_flag &= ~FAWWPN_SET;
389 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
390 vport->vport_flag |= FAWWPN_SET;
391 }
0558056c
JS
392 else
393 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
394 sizeof(struct lpfc_name));
395}
396
e59058c4 397/**
3621a710 398 * lpfc_config_port_post - Perform lpfc initialization after config port
e59058c4
JS
399 * @phba: pointer to lpfc hba data structure.
400 *
401 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
402 * command call. It performs all internal resource and state setups on the
403 * port: post IOCB buffers, enable appropriate host interrupt attentions,
404 * ELS ring timers, etc.
405 *
406 * Return codes
407 * 0 - success.
408 * Any other value - error.
409 **/
dea3101e 410int
2e0fef85 411lpfc_config_port_post(struct lpfc_hba *phba)
dea3101e 412{
2e0fef85 413 struct lpfc_vport *vport = phba->pport;
a257bf90 414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 415 LPFC_MBOXQ_t *pmb;
416 MAILBOX_t *mb;
417 struct lpfc_dmabuf *mp;
418 struct lpfc_sli *psli = &phba->sli;
419 uint32_t status, timeout;
2e0fef85
JS
420 int i, j;
421 int rc;
dea3101e 422
7af67051
JS
423 spin_lock_irq(&phba->hbalock);
424 /*
425 * If the Config port completed correctly the HBA is not
426 * over heated any more.
427 */
428 if (phba->over_temp_state == HBA_OVER_TEMP)
429 phba->over_temp_state = HBA_NORMAL_TEMP;
430 spin_unlock_irq(&phba->hbalock);
431
dea3101e 432 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
433 if (!pmb) {
2e0fef85 434 phba->link_state = LPFC_HBA_ERROR;
dea3101e 435 return -ENOMEM;
436 }
04c68496 437 mb = &pmb->u.mb;
dea3101e 438
dea3101e 439 /* Get login parameters for NID. */
9f1177a3
JS
440 rc = lpfc_read_sparam(phba, pmb, 0);
441 if (rc) {
442 mempool_free(pmb, phba->mbox_mem_pool);
443 return -ENOMEM;
444 }
445
ed957684 446 pmb->vport = vport;
dea3101e 447 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
372c187b 448 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
e8b62011 449 "0448 Adapter failed init, mbxCmd x%x "
dea3101e 450 "READ_SPARM mbxStatus x%x\n",
dea3101e 451 mb->mbxCommand, mb->mbxStatus);
2e0fef85 452 phba->link_state = LPFC_HBA_ERROR;
3e1f0718 453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
9f1177a3 454 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 455 lpfc_mbuf_free(phba, mp->virt, mp->phys);
456 kfree(mp);
457 return -EIO;
458 }
459
3e1f0718 460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
dea3101e 461
2e0fef85 462 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
dea3101e 463 lpfc_mbuf_free(phba, mp->virt, mp->phys);
464 kfree(mp);
3e1f0718 465 pmb->ctx_buf = NULL;
0558056c 466 lpfc_update_vport_wwn(vport);
a257bf90
JS
467
468 /* Update the fc_host data structures with new wwn. */
469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
21e9a0a5 471 fc_host_max_npiv_vports(shost) = phba->max_vpi;
a257bf90 472
dea3101e 473 /* If no serial number in VPD data, use low 6 bytes of WWNN */
474 /* This should be consolidated into parse_vpd ? - mr */
475 if (phba->SerialNumber[0] == 0) {
476 uint8_t *outptr;
477
2e0fef85 478 outptr = &vport->fc_nodename.u.s.IEEE[0];
dea3101e 479 for (i = 0; i < 12; i++) {
480 status = *outptr++;
481 j = ((status & 0xf0) >> 4);
482 if (j <= 9)
483 phba->SerialNumber[i] =
484 (char)((uint8_t) 0x30 + (uint8_t) j);
485 else
486 phba->SerialNumber[i] =
487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
488 i++;
489 j = (status & 0xf);
490 if (j <= 9)
491 phba->SerialNumber[i] =
492 (char)((uint8_t) 0x30 + (uint8_t) j);
493 else
494 phba->SerialNumber[i] =
495 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
496 }
497 }
498
dea3101e 499 lpfc_read_config(phba, pmb);
ed957684 500 pmb->vport = vport;
dea3101e 501 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
372c187b 502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
e8b62011 503 "0453 Adapter failed to init, mbxCmd x%x "
dea3101e 504 "READ_CONFIG, mbxStatus x%x\n",
dea3101e 505 mb->mbxCommand, mb->mbxStatus);
2e0fef85 506 phba->link_state = LPFC_HBA_ERROR;
dea3101e 507 mempool_free( pmb, phba->mbox_mem_pool);
508 return -EIO;
509 }
510
a0c87cbd
JS
511 /* Check if the port is disabled */
512 lpfc_sli_read_link_ste(phba);
513
dea3101e 514 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
f6770e7d 515 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
572709e2
JS
516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
517 "3359 HBA queue depth changed from %d to %d\n",
f6770e7d
JS
518 phba->cfg_hba_queue_depth,
519 mb->un.varRdConfig.max_xri);
520 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
572709e2 521 }
dea3101e 522
523 phba->lmt = mb->un.varRdConfig.lmt;
74b72a59
JW
524
525 /* Get the default values for Model Name and Description */
526 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
527
2e0fef85 528 phba->link_state = LPFC_LINK_DOWN;
dea3101e 529
0b727fea 530 /* Only process IOCBs on ELS ring till hba_state is READY */
895427bd
JS
531 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
532 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
533 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
534 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
dea3101e 535
536 /* Post receive buffers for desired rings */
ed957684
JS
537 if (phba->sli_rev != 3)
538 lpfc_post_rcv_buf(phba);
dea3101e 539
9399627f
JS
540 /*
541 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
542 */
543 if (phba->intr_type == MSIX) {
544 rc = lpfc_config_msi(phba, pmb);
545 if (rc) {
546 mempool_free(pmb, phba->mbox_mem_pool);
547 return -EIO;
548 }
549 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
550 if (rc != MBX_SUCCESS) {
372c187b 551 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9399627f
JS
552 "0352 Config MSI mailbox command "
553 "failed, mbxCmd x%x, mbxStatus x%x\n",
04c68496
JS
554 pmb->u.mb.mbxCommand,
555 pmb->u.mb.mbxStatus);
9399627f
JS
556 mempool_free(pmb, phba->mbox_mem_pool);
557 return -EIO;
558 }
559 }
560
04c68496 561 spin_lock_irq(&phba->hbalock);
9399627f
JS
562 /* Initialize ERATT handling flag */
563 phba->hba_flag &= ~HBA_ERATT_HANDLED;
564
dea3101e 565 /* Enable appropriate host interrupts */
9940b97b
JS
566 if (lpfc_readl(phba->HCregaddr, &status)) {
567 spin_unlock_irq(&phba->hbalock);
568 return -EIO;
569 }
dea3101e 570 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
571 if (psli->num_rings > 0)
572 status |= HC_R0INT_ENA;
573 if (psli->num_rings > 1)
574 status |= HC_R1INT_ENA;
575 if (psli->num_rings > 2)
576 status |= HC_R2INT_ENA;
577 if (psli->num_rings > 3)
578 status |= HC_R3INT_ENA;
579
875fbdfe
JSEC
580 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
581 (phba->cfg_poll & DISABLE_FCP_RING_INT))
9399627f 582 status &= ~(HC_R0INT_ENA);
875fbdfe 583
dea3101e 584 writel(status, phba->HCregaddr);
585 readl(phba->HCregaddr); /* flush */
2e0fef85 586 spin_unlock_irq(&phba->hbalock);
dea3101e 587
9399627f
JS
588 /* Set up ring-0 (ELS) timer */
589 timeout = phba->fc_ratov * 2;
256ec0d0
JS
590 mod_timer(&vport->els_tmofunc,
591 jiffies + msecs_to_jiffies(1000 * timeout));
9399627f 592 /* Set up heart beat (HB) timer */
256ec0d0
JS
593 mod_timer(&phba->hb_tmofunc,
594 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
a22d73b6 595 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
858c9f6c 596 phba->last_completion_time = jiffies;
9399627f 597 /* Set up error attention (ERATT) polling timer */
256ec0d0 598 mod_timer(&phba->eratt_poll,
65791f1f 599 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
dea3101e 600
a0c87cbd 601 if (phba->hba_flag & LINK_DISABLED) {
372c187b
DK
602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
603 "2598 Adapter Link is disabled.\n");
a0c87cbd
JS
604 lpfc_down_link(phba, pmb);
605 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
606 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
607 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
372c187b
DK
608 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
609 "2599 Adapter failed to issue DOWN_LINK"
610 " mbox command rc 0x%x\n", rc);
a0c87cbd
JS
611
612 mempool_free(pmb, phba->mbox_mem_pool);
613 return -EIO;
614 }
e40a02c1 615 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
026abb87
JS
616 mempool_free(pmb, phba->mbox_mem_pool);
617 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
618 if (rc)
619 return rc;
dea3101e 620 }
621 /* MBOX buffer will be freed in mbox compl */
57127f15 622 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
623 if (!pmb) {
624 phba->link_state = LPFC_HBA_ERROR;
625 return -ENOMEM;
626 }
627
57127f15
JS
628 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
629 pmb->mbox_cmpl = lpfc_config_async_cmpl;
630 pmb->vport = phba->pport;
631 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
dea3101e 632
57127f15 633 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
372c187b 634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
57127f15 635 "0456 Adapter failed to issue "
e4e74273 636 "ASYNCEVT_ENABLE mbox status x%x\n",
57127f15
JS
637 rc);
638 mempool_free(pmb, phba->mbox_mem_pool);
639 }
97207482
JS
640
641 /* Get Option rom version */
642 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
643 if (!pmb) {
644 phba->link_state = LPFC_HBA_ERROR;
645 return -ENOMEM;
646 }
647
97207482
JS
648 lpfc_dump_wakeup_param(phba, pmb);
649 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
650 pmb->vport = phba->pport;
651 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
652
653 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
372c187b
DK
654 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
655 "0435 Adapter failed "
e4e74273 656 "to get Option ROM version status x%x\n", rc);
97207482
JS
657 mempool_free(pmb, phba->mbox_mem_pool);
658 }
659
d7c255b2 660 return 0;
ce8b3ce5
JS
661}
662
84d1b006
JS
663/**
664 * lpfc_hba_init_link - Initialize the FC link
665 * @phba: pointer to lpfc hba data structure.
6e7288d9 666 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
667 *
668 * This routine will issue the INIT_LINK mailbox command call.
669 * It is available to other drivers through the lpfc_hba data
670 * structure for use as a delayed link up mechanism with the
671 * module parameter lpfc_suppress_link_up.
672 *
673 * Return code
674 * 0 - success
675 * Any other value - error
676 **/
e399b228 677static int
6e7288d9 678lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
1b51197d
JS
679{
680 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
681}
682
683/**
684 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
685 * @phba: pointer to lpfc hba data structure.
686 * @fc_topology: desired fc topology.
687 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
688 *
689 * This routine will issue the INIT_LINK mailbox command call.
690 * It is available to other drivers through the lpfc_hba data
691 * structure for use as a delayed link up mechanism with the
692 * module parameter lpfc_suppress_link_up.
693 *
694 * Return code
695 * 0 - success
696 * Any other value - error
697 **/
698int
699lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
700 uint32_t flag)
84d1b006
JS
701{
702 struct lpfc_vport *vport = phba->pport;
703 LPFC_MBOXQ_t *pmb;
704 MAILBOX_t *mb;
705 int rc;
706
707 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
708 if (!pmb) {
709 phba->link_state = LPFC_HBA_ERROR;
710 return -ENOMEM;
711 }
712 mb = &pmb->u.mb;
713 pmb->vport = vport;
714
026abb87
JS
715 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
716 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
717 !(phba->lmt & LMT_1Gb)) ||
718 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
719 !(phba->lmt & LMT_2Gb)) ||
720 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
721 !(phba->lmt & LMT_4Gb)) ||
722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
723 !(phba->lmt & LMT_8Gb)) ||
724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
725 !(phba->lmt & LMT_10Gb)) ||
726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
d38dd52c
JS
727 !(phba->lmt & LMT_16Gb)) ||
728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
fbd8a6ba
JS
729 !(phba->lmt & LMT_32Gb)) ||
730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
731 !(phba->lmt & LMT_64Gb))) {
026abb87 732 /* Reset link speed to auto */
372c187b
DK
733 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
734 "1302 Invalid speed for this board:%d "
735 "Reset link speed to auto.\n",
736 phba->cfg_link_speed);
026abb87
JS
737 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
738 }
1b51197d 739 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
84d1b006 740 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1b51197d
JS
741 if (phba->sli_rev < LPFC_SLI_REV4)
742 lpfc_set_loopback_flag(phba);
6e7288d9 743 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
76a95d75 744 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
372c187b
DK
745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
746 "0498 Adapter failed to init, mbxCmd x%x "
747 "INIT_LINK, mbxStatus x%x\n",
748 mb->mbxCommand, mb->mbxStatus);
76a95d75
JS
749 if (phba->sli_rev <= LPFC_SLI_REV3) {
750 /* Clear all interrupt enable conditions */
751 writel(0, phba->HCregaddr);
752 readl(phba->HCregaddr); /* flush */
753 /* Clear all pending interrupts */
754 writel(0xffffffff, phba->HAregaddr);
755 readl(phba->HAregaddr); /* flush */
756 }
84d1b006 757 phba->link_state = LPFC_HBA_ERROR;
6e7288d9 758 if (rc != MBX_BUSY || flag == MBX_POLL)
84d1b006
JS
759 mempool_free(pmb, phba->mbox_mem_pool);
760 return -EIO;
761 }
e40a02c1 762 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
6e7288d9
JS
763 if (flag == MBX_POLL)
764 mempool_free(pmb, phba->mbox_mem_pool);
84d1b006
JS
765
766 return 0;
767}
768
769/**
770 * lpfc_hba_down_link - this routine downs the FC link
6e7288d9
JS
771 * @phba: pointer to lpfc hba data structure.
772 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
773 *
774 * This routine will issue the DOWN_LINK mailbox command call.
775 * It is available to other drivers through the lpfc_hba data
776 * structure for use to stop the link.
777 *
778 * Return code
779 * 0 - success
780 * Any other value - error
781 **/
e399b228 782static int
6e7288d9 783lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
84d1b006
JS
784{
785 LPFC_MBOXQ_t *pmb;
786 int rc;
787
788 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
789 if (!pmb) {
790 phba->link_state = LPFC_HBA_ERROR;
791 return -ENOMEM;
792 }
793
372c187b
DK
794 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
795 "0491 Adapter Link is disabled.\n");
84d1b006
JS
796 lpfc_down_link(phba, pmb);
797 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6e7288d9 798 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
84d1b006 799 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
372c187b
DK
800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
801 "2522 Adapter failed to issue DOWN_LINK"
802 " mbox command rc 0x%x\n", rc);
84d1b006
JS
803
804 mempool_free(pmb, phba->mbox_mem_pool);
805 return -EIO;
806 }
6e7288d9
JS
807 if (flag == MBX_POLL)
808 mempool_free(pmb, phba->mbox_mem_pool);
809
84d1b006
JS
810 return 0;
811}
812
e59058c4 813/**
3621a710 814 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
e59058c4
JS
815 * @phba: pointer to lpfc HBA data structure.
816 *
817 * This routine will do LPFC uninitialization before the HBA is reset when
818 * bringing down the SLI Layer.
819 *
820 * Return codes
821 * 0 - success.
822 * Any other value - error.
823 **/
dea3101e 824int
2e0fef85 825lpfc_hba_down_prep(struct lpfc_hba *phba)
dea3101e 826{
1b32f6aa
JS
827 struct lpfc_vport **vports;
828 int i;
3772a991
JS
829
830 if (phba->sli_rev <= LPFC_SLI_REV3) {
831 /* Disable interrupts */
832 writel(0, phba->HCregaddr);
833 readl(phba->HCregaddr); /* flush */
834 }
dea3101e 835
1b32f6aa
JS
836 if (phba->pport->load_flag & FC_UNLOADING)
837 lpfc_cleanup_discovery_resources(phba->pport);
838 else {
839 vports = lpfc_create_vport_work_array(phba);
840 if (vports != NULL)
3772a991
JS
841 for (i = 0; i <= phba->max_vports &&
842 vports[i] != NULL; i++)
1b32f6aa
JS
843 lpfc_cleanup_discovery_resources(vports[i]);
844 lpfc_destroy_vport_work_array(phba, vports);
7f5f3d0d
JS
845 }
846 return 0;
dea3101e 847}
848
68e814f5
JS
849/**
850 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
851 * rspiocb which got deferred
852 *
853 * @phba: pointer to lpfc HBA data structure.
854 *
855 * This routine will cleanup completed slow path events after HBA is reset
856 * when bringing down the SLI Layer.
857 *
858 *
859 * Return codes
860 * void.
861 **/
862static void
863lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
864{
865 struct lpfc_iocbq *rspiocbq;
866 struct hbq_dmabuf *dmabuf;
867 struct lpfc_cq_event *cq_event;
868
869 spin_lock_irq(&phba->hbalock);
870 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
871 spin_unlock_irq(&phba->hbalock);
872
873 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
874 /* Get the response iocb from the head of work queue */
875 spin_lock_irq(&phba->hbalock);
876 list_remove_head(&phba->sli4_hba.sp_queue_event,
877 cq_event, struct lpfc_cq_event, list);
878 spin_unlock_irq(&phba->hbalock);
879
880 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
881 case CQE_CODE_COMPL_WQE:
882 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
883 cq_event);
884 lpfc_sli_release_iocbq(phba, rspiocbq);
885 break;
886 case CQE_CODE_RECEIVE:
887 case CQE_CODE_RECEIVE_V1:
888 dmabuf = container_of(cq_event, struct hbq_dmabuf,
889 cq_event);
890 lpfc_in_buf_free(phba, &dmabuf->dbuf);
891 }
892 }
893}
894
e59058c4 895/**
bcece5f5 896 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
e59058c4
JS
897 * @phba: pointer to lpfc HBA data structure.
898 *
bcece5f5
JS
899 * This routine will cleanup posted ELS buffers after the HBA is reset
900 * when bringing down the SLI Layer.
901 *
e59058c4
JS
902 *
903 * Return codes
bcece5f5 904 * void.
e59058c4 905 **/
bcece5f5
JS
906static void
907lpfc_hba_free_post_buf(struct lpfc_hba *phba)
41415862
JW
908{
909 struct lpfc_sli *psli = &phba->sli;
910 struct lpfc_sli_ring *pring;
911 struct lpfc_dmabuf *mp, *next_mp;
07eab624
JS
912 LIST_HEAD(buflist);
913 int count;
41415862 914
92d7f7b0
JS
915 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
916 lpfc_sli_hbqbuf_free_all(phba);
917 else {
918 /* Cleanup preposted buffers on the ELS ring */
895427bd 919 pring = &psli->sli3_ring[LPFC_ELS_RING];
07eab624
JS
920 spin_lock_irq(&phba->hbalock);
921 list_splice_init(&pring->postbufq, &buflist);
922 spin_unlock_irq(&phba->hbalock);
923
924 count = 0;
925 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
92d7f7b0 926 list_del(&mp->list);
07eab624 927 count++;
92d7f7b0
JS
928 lpfc_mbuf_free(phba, mp->virt, mp->phys);
929 kfree(mp);
930 }
07eab624
JS
931
932 spin_lock_irq(&phba->hbalock);
933 pring->postbufq_cnt -= count;
bcece5f5 934 spin_unlock_irq(&phba->hbalock);
41415862 935 }
bcece5f5
JS
936}
937
938/**
939 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
940 * @phba: pointer to lpfc HBA data structure.
941 *
942 * This routine will cleanup the txcmplq after the HBA is reset when bringing
943 * down the SLI Layer.
944 *
945 * Return codes
946 * void
947 **/
948static void
949lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
950{
951 struct lpfc_sli *psli = &phba->sli;
895427bd 952 struct lpfc_queue *qp = NULL;
bcece5f5
JS
953 struct lpfc_sli_ring *pring;
954 LIST_HEAD(completions);
955 int i;
c1dd9111 956 struct lpfc_iocbq *piocb, *next_iocb;
bcece5f5 957
895427bd
JS
958 if (phba->sli_rev != LPFC_SLI_REV4) {
959 for (i = 0; i < psli->num_rings; i++) {
960 pring = &psli->sli3_ring[i];
bcece5f5 961 spin_lock_irq(&phba->hbalock);
895427bd
JS
962 /* At this point in time the HBA is either reset or DOA
963 * Nothing should be on txcmplq as it will
964 * NEVER complete.
965 */
966 list_splice_init(&pring->txcmplq, &completions);
967 pring->txcmplq_cnt = 0;
bcece5f5 968 spin_unlock_irq(&phba->hbalock);
09372820 969
895427bd
JS
970 lpfc_sli_abort_iocb_ring(phba, pring);
971 }
a257bf90 972 /* Cancel all the IOCBs from the completions list */
895427bd
JS
973 lpfc_sli_cancel_iocbs(phba, &completions,
974 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
975 return;
976 }
977 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
978 pring = qp->pring;
979 if (!pring)
980 continue;
981 spin_lock_irq(&pring->ring_lock);
c1dd9111
JS
982 list_for_each_entry_safe(piocb, next_iocb,
983 &pring->txcmplq, list)
984 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
895427bd
JS
985 list_splice_init(&pring->txcmplq, &completions);
986 pring->txcmplq_cnt = 0;
987 spin_unlock_irq(&pring->ring_lock);
41415862
JW
988 lpfc_sli_abort_iocb_ring(phba, pring);
989 }
895427bd
JS
990 /* Cancel all the IOCBs from the completions list */
991 lpfc_sli_cancel_iocbs(phba, &completions,
992 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
bcece5f5 993}
41415862 994
bcece5f5
JS
995/**
996 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
bcece5f5
JS
997 * @phba: pointer to lpfc HBA data structure.
998 *
999 * This routine will do uninitialization after the HBA is reset when bring
1000 * down the SLI Layer.
1001 *
1002 * Return codes
1003 * 0 - success.
1004 * Any other value - error.
1005 **/
1006static int
1007lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1008{
1009 lpfc_hba_free_post_buf(phba);
1010 lpfc_hba_clean_txcmplq(phba);
41415862
JW
1011 return 0;
1012}
5af5eee7 1013
da0436e9
JS
1014/**
1015 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1016 * @phba: pointer to lpfc HBA data structure.
1017 *
1018 * This routine will do uninitialization after the HBA is reset when bring
1019 * down the SLI Layer.
1020 *
1021 * Return codes
af901ca1 1022 * 0 - success.
da0436e9
JS
1023 * Any other value - error.
1024 **/
1025static int
1026lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1027{
c490850a 1028 struct lpfc_io_buf *psb, *psb_next;
7cacae2a 1029 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
5e5b511d 1030 struct lpfc_sli4_hdw_queue *qp;
da0436e9 1031 LIST_HEAD(aborts);
895427bd 1032 LIST_HEAD(nvme_aborts);
86c67379 1033 LIST_HEAD(nvmet_aborts);
0f65ff68 1034 struct lpfc_sglq *sglq_entry = NULL;
5e5b511d 1035 int cnt, idx;
0f65ff68 1036
895427bd
JS
1037
1038 lpfc_sli_hbqbuf_free_all(phba);
bcece5f5
JS
1039 lpfc_hba_clean_txcmplq(phba);
1040
da0436e9
JS
1041 /* At this point in time the HBA is either reset or DOA. Either
1042 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
895427bd 1043 * on the lpfc_els_sgl_list so that it can either be freed if the
da0436e9
JS
1044 * driver is unloading or reposted if the driver is restarting
1045 * the port.
1046 */
a789241e 1047
895427bd 1048 /* sgl_list_lock required because worker thread uses this
da0436e9
JS
1049 * list.
1050 */
a789241e 1051 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
0f65ff68
JS
1052 list_for_each_entry(sglq_entry,
1053 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1054 sglq_entry->state = SGL_FREED;
1055
da0436e9 1056 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
895427bd
JS
1057 &phba->sli4_hba.lpfc_els_sgl_list);
1058
f358dd0c 1059
a789241e 1060 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
5e5b511d
JS
1061
1062 /* abts_xxxx_buf_list_lock required because worker thread uses this
da0436e9
JS
1063 * list.
1064 */
a789241e 1065 spin_lock_irq(&phba->hbalock);
5e5b511d
JS
1066 cnt = 0;
1067 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1068 qp = &phba->sli4_hba.hdwq[idx];
da0436e9 1069
c00f62e6
JS
1070 spin_lock(&qp->abts_io_buf_list_lock);
1071 list_splice_init(&qp->lpfc_abts_io_buf_list,
5e5b511d 1072 &aborts);
68e814f5 1073
0794d601 1074 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
86c67379
JS
1075 psb->pCmd = NULL;
1076 psb->status = IOSTAT_SUCCESS;
cf1a1d3e 1077 cnt++;
86c67379 1078 }
5e5b511d
JS
1079 spin_lock(&qp->io_buf_list_put_lock);
1080 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1081 qp->put_io_bufs += qp->abts_scsi_io_bufs;
c00f62e6 1082 qp->put_io_bufs += qp->abts_nvme_io_bufs;
5e5b511d 1083 qp->abts_scsi_io_bufs = 0;
c00f62e6 1084 qp->abts_nvme_io_bufs = 0;
5e5b511d 1085 spin_unlock(&qp->io_buf_list_put_lock);
c00f62e6 1086 spin_unlock(&qp->abts_io_buf_list_lock);
5e5b511d 1087 }
731eedcb 1088 spin_unlock_irq(&phba->hbalock);
86c67379 1089
5e5b511d 1090 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
731eedcb 1091 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
5e5b511d
JS
1092 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1093 &nvmet_aborts);
731eedcb 1094 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
86c67379 1095 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
7b7f551b 1096 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
6c621a22 1097 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
86c67379 1098 }
895427bd 1099 }
895427bd 1100
68e814f5 1101 lpfc_sli4_free_sp_events(phba);
5e5b511d 1102 return cnt;
da0436e9
JS
1103}
1104
1105/**
1106 * lpfc_hba_down_post - Wrapper func for hba down post routine
1107 * @phba: pointer to lpfc HBA data structure.
1108 *
1109 * This routine wraps the actual SLI3 or SLI4 routine for performing
1110 * uninitialization after the HBA is reset when bring down the SLI Layer.
1111 *
1112 * Return codes
af901ca1 1113 * 0 - success.
da0436e9
JS
1114 * Any other value - error.
1115 **/
1116int
1117lpfc_hba_down_post(struct lpfc_hba *phba)
1118{
1119 return (*phba->lpfc_hba_down_post)(phba);
1120}
41415862 1121
e59058c4 1122/**
3621a710 1123 * lpfc_hb_timeout - The HBA-timer timeout handler
fe614acd 1124 * @t: timer context used to obtain the pointer to lpfc hba data structure.
e59058c4
JS
1125 *
1126 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1127 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1128 * work-port-events bitmap and the worker thread is notified. This timeout
1129 * event will be used by the worker thread to invoke the actual timeout
1130 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1131 * be performed in the timeout handler and the HBA timeout event bit shall
1132 * be cleared by the worker thread after it has taken the event bitmap out.
1133 **/
a6ababd2 1134static void
f22eb4d3 1135lpfc_hb_timeout(struct timer_list *t)
858c9f6c
JS
1136{
1137 struct lpfc_hba *phba;
5e9d9b82 1138 uint32_t tmo_posted;
858c9f6c
JS
1139 unsigned long iflag;
1140
f22eb4d3 1141 phba = from_timer(phba, t, hb_tmofunc);
9399627f
JS
1142
1143 /* Check for heart beat timeout conditions */
858c9f6c 1144 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
5e9d9b82
JS
1145 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1146 if (!tmo_posted)
858c9f6c
JS
1147 phba->pport->work_port_events |= WORKER_HB_TMO;
1148 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1149
9399627f 1150 /* Tell the worker thread there is work to do */
5e9d9b82
JS
1151 if (!tmo_posted)
1152 lpfc_worker_wake_up(phba);
858c9f6c
JS
1153 return;
1154}
1155
19ca7609
JS
1156/**
1157 * lpfc_rrq_timeout - The RRQ-timer timeout handler
fe614acd 1158 * @t: timer context used to obtain the pointer to lpfc hba data structure.
19ca7609
JS
1159 *
1160 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1161 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1162 * work-port-events bitmap and the worker thread is notified. This timeout
1163 * event will be used by the worker thread to invoke the actual timeout
1164 * handler routine, lpfc_rrq_handler. Any periodical operations will
1165 * be performed in the timeout handler and the RRQ timeout event bit shall
1166 * be cleared by the worker thread after it has taken the event bitmap out.
1167 **/
1168static void
f22eb4d3 1169lpfc_rrq_timeout(struct timer_list *t)
19ca7609
JS
1170{
1171 struct lpfc_hba *phba;
19ca7609
JS
1172 unsigned long iflag;
1173
f22eb4d3 1174 phba = from_timer(phba, t, rrq_tmr);
19ca7609 1175 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
06918ac5
JS
1176 if (!(phba->pport->load_flag & FC_UNLOADING))
1177 phba->hba_flag |= HBA_RRQ_ACTIVE;
1178 else
1179 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
19ca7609 1180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
06918ac5
JS
1181
1182 if (!(phba->pport->load_flag & FC_UNLOADING))
1183 lpfc_worker_wake_up(phba);
19ca7609
JS
1184}
1185
e59058c4 1186/**
3621a710 1187 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
e59058c4
JS
1188 * @phba: pointer to lpfc hba data structure.
1189 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1190 *
1191 * This is the callback function to the lpfc heart-beat mailbox command.
1192 * If configured, the lpfc driver issues the heart-beat mailbox command to
1193 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1194 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1195 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1196 * heart-beat outstanding state. Once the mailbox command comes back and
1197 * no error conditions detected, the heart-beat mailbox command timer is
1198 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1199 * state is cleared for the next heart-beat. If the timer expired with the
1200 * heart-beat outstanding state set, the driver will put the HBA offline.
1201 **/
858c9f6c
JS
1202static void
1203lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1204{
1205 unsigned long drvr_flag;
1206
1207 spin_lock_irqsave(&phba->hbalock, drvr_flag);
a22d73b6 1208 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
858c9f6c
JS
1209 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1210
a22d73b6 1211 /* Check and reset heart-beat timer if necessary */
858c9f6c
JS
1212 mempool_free(pmboxq, phba->mbox_mem_pool);
1213 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1214 !(phba->link_state == LPFC_HBA_ERROR) &&
51ef4c26 1215 !(phba->pport->load_flag & FC_UNLOADING))
858c9f6c 1216 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1217 jiffies +
1218 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1219 return;
1220}
1221
fe614acd 1222/*
317aeb83
DK
1223 * lpfc_idle_stat_delay_work - idle_stat tracking
1224 *
1225 * This routine tracks per-cq idle_stat and determines polling decisions.
1226 *
1227 * Return codes:
1228 * None
1229 **/
1230static void
1231lpfc_idle_stat_delay_work(struct work_struct *work)
1232{
1233 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1234 struct lpfc_hba,
1235 idle_stat_delay_work);
1236 struct lpfc_queue *cq;
1237 struct lpfc_sli4_hdw_queue *hdwq;
1238 struct lpfc_idle_stat *idle_stat;
1239 u32 i, idle_percent;
1240 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1241
1242 if (phba->pport->load_flag & FC_UNLOADING)
1243 return;
1244
1245 if (phba->link_state == LPFC_HBA_ERROR ||
1246 phba->pport->fc_flag & FC_OFFLINE_MODE)
1247 goto requeue;
1248
1249 for_each_present_cpu(i) {
1250 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1251 cq = hdwq->io_cq;
1252
1253 /* Skip if we've already handled this cq's primary CPU */
1254 if (cq->chann != i)
1255 continue;
1256
1257 idle_stat = &phba->sli4_hba.idle_stat[i];
1258
1259 /* get_cpu_idle_time returns values as running counters. Thus,
1260 * to know the amount for this period, the prior counter values
1261 * need to be subtracted from the current counter values.
1262 * From there, the idle time stat can be calculated as a
1263 * percentage of 100 - the sum of the other consumption times.
1264 */
1265 wall_idle = get_cpu_idle_time(i, &wall, 1);
1266 diff_idle = wall_idle - idle_stat->prev_idle;
1267 diff_wall = wall - idle_stat->prev_wall;
1268
1269 if (diff_wall <= diff_idle)
1270 busy_time = 0;
1271 else
1272 busy_time = diff_wall - diff_idle;
1273
1274 idle_percent = div64_u64(100 * busy_time, diff_wall);
1275 idle_percent = 100 - idle_percent;
1276
1277 if (idle_percent < 15)
1278 cq->poll_mode = LPFC_QUEUE_WORK;
1279 else
1280 cq->poll_mode = LPFC_IRQ_POLL;
1281
1282 idle_stat->prev_idle = wall_idle;
1283 idle_stat->prev_wall = wall;
1284 }
1285
1286requeue:
1287 schedule_delayed_work(&phba->idle_stat_delay_work,
1288 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1289}
1290
32517fc0
JS
1291static void
1292lpfc_hb_eq_delay_work(struct work_struct *work)
1293{
1294 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1295 struct lpfc_hba, eq_delay_work);
1296 struct lpfc_eq_intr_info *eqi, *eqi_new;
1297 struct lpfc_queue *eq, *eq_next;
8156d378 1298 unsigned char *ena_delay = NULL;
32517fc0
JS
1299 uint32_t usdelay;
1300 int i;
1301
1302 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1303 return;
1304
1305 if (phba->link_state == LPFC_HBA_ERROR ||
1306 phba->pport->fc_flag & FC_OFFLINE_MODE)
1307 goto requeue;
1308
8156d378
JS
1309 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1310 GFP_KERNEL);
1311 if (!ena_delay)
32517fc0
JS
1312 goto requeue;
1313
8156d378
JS
1314 for (i = 0; i < phba->cfg_irq_chann; i++) {
1315 /* Get the EQ corresponding to the IRQ vector */
1316 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1317 if (!eq)
1318 continue;
1319 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1320 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1321 ena_delay[eq->last_cpu] = 1;
8d34a59c 1322 }
8156d378 1323 }
32517fc0
JS
1324
1325 for_each_present_cpu(i) {
32517fc0 1326 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
8156d378
JS
1327 if (ena_delay[i]) {
1328 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1329 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1330 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1331 } else {
1332 usdelay = 0;
8d34a59c 1333 }
32517fc0 1334
32517fc0
JS
1335 eqi->icnt = 0;
1336
1337 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
8156d378 1338 if (unlikely(eq->last_cpu != i)) {
32517fc0
JS
1339 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1340 eq->last_cpu);
1341 list_move_tail(&eq->cpu_list, &eqi_new->list);
1342 continue;
1343 }
1344 if (usdelay != eq->q_mode)
1345 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1346 usdelay);
1347 }
1348 }
1349
8156d378 1350 kfree(ena_delay);
32517fc0
JS
1351
1352requeue:
1353 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1354 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1355}
1356
c490850a
JS
1357/**
1358 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1359 * @phba: pointer to lpfc hba data structure.
1360 *
1361 * For each heartbeat, this routine does some heuristic methods to adjust
1362 * XRI distribution. The goal is to fully utilize free XRIs.
1363 **/
1364static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1365{
1366 u32 i;
1367 u32 hwq_count;
1368
1369 hwq_count = phba->cfg_hdw_queue;
1370 for (i = 0; i < hwq_count; i++) {
1371 /* Adjust XRIs in private pool */
1372 lpfc_adjust_pvt_pool_count(phba, i);
1373
1374 /* Adjust high watermark */
1375 lpfc_adjust_high_watermark(phba, i);
1376
1377#ifdef LPFC_MXP_STAT
1378 /* Snapshot pbl, pvt and busy count */
1379 lpfc_snapshot_mxp(phba, i);
1380#endif
1381 }
1382}
1383
a22d73b6
JS
1384/**
1385 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1386 * @phba: pointer to lpfc hba data structure.
1387 *
1388 * If a HB mbox is not already in progrees, this routine will allocate
1389 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1390 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1391 **/
1392int
1393lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1394{
1395 LPFC_MBOXQ_t *pmboxq;
1396 int retval;
1397
1398 /* Is a Heartbeat mbox already in progress */
1399 if (phba->hba_flag & HBA_HBEAT_INP)
1400 return 0;
1401
1402 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1403 if (!pmboxq)
1404 return -ENOMEM;
1405
1406 lpfc_heart_beat(phba, pmboxq);
1407 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1408 pmboxq->vport = phba->pport;
1409 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1410
1411 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1412 mempool_free(pmboxq, phba->mbox_mem_pool);
1413 return -ENXIO;
1414 }
1415 phba->hba_flag |= HBA_HBEAT_INP;
1416
1417 return 0;
1418}
1419
1420/**
1421 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1422 * @phba: pointer to lpfc hba data structure.
1423 *
1424 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1425 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1426 * of the value of lpfc_enable_hba_heartbeat.
1427 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1428 * try to issue a MBX_HEARTBEAT mbox command.
1429 **/
1430void
1431lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1432{
1433 if (phba->cfg_enable_hba_heartbeat)
1434 return;
1435 phba->hba_flag |= HBA_HBEAT_TMO;
1436}
1437
e59058c4 1438/**
3621a710 1439 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
e59058c4
JS
1440 * @phba: pointer to lpfc hba data structure.
1441 *
1442 * This is the actual HBA-timer timeout handler to be invoked by the worker
1443 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1444 * handler performs any periodic operations needed for the device. If such
1445 * periodic event has already been attended to either in the interrupt handler
1446 * or by processing slow-ring or fast-ring events within the HBA-timer
1447 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1448 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1449 * is configured and there is no heart-beat mailbox command outstanding, a
1450 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1451 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1452 * to offline.
1453 **/
858c9f6c
JS
1454void
1455lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1456{
45ed1190 1457 struct lpfc_vport **vports;
0ff10d46 1458 struct lpfc_dmabuf *buf_ptr;
a22d73b6
JS
1459 int retval = 0;
1460 int i, tmo;
858c9f6c 1461 struct lpfc_sli *psli = &phba->sli;
0ff10d46 1462 LIST_HEAD(completions);
858c9f6c 1463
c490850a
JS
1464 if (phba->cfg_xri_rebalancing) {
1465 /* Multi-XRI pools handler */
1466 lpfc_hb_mxp_handler(phba);
1467 }
858c9f6c 1468
45ed1190
JS
1469 vports = lpfc_create_vport_work_array(phba);
1470 if (vports != NULL)
4258e98e 1471 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
45ed1190 1472 lpfc_rcv_seq_check_edtov(vports[i]);
e3ba04c9 1473 lpfc_fdmi_change_check(vports[i]);
4258e98e 1474 }
45ed1190
JS
1475 lpfc_destroy_vport_work_array(phba, vports);
1476
858c9f6c 1477 if ((phba->link_state == LPFC_HBA_ERROR) ||
51ef4c26 1478 (phba->pport->load_flag & FC_UNLOADING) ||
858c9f6c
JS
1479 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1480 return;
1481
0ff10d46
JS
1482 if (phba->elsbuf_cnt &&
1483 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1484 spin_lock_irq(&phba->hbalock);
1485 list_splice_init(&phba->elsbuf, &completions);
1486 phba->elsbuf_cnt = 0;
1487 phba->elsbuf_prev_cnt = 0;
1488 spin_unlock_irq(&phba->hbalock);
1489
1490 while (!list_empty(&completions)) {
1491 list_remove_head(&completions, buf_ptr,
1492 struct lpfc_dmabuf, list);
1493 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1494 kfree(buf_ptr);
1495 }
1496 }
1497 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1498
858c9f6c 1499 /* If there is no heart beat outstanding, issue a heartbeat command */
13815c83 1500 if (phba->cfg_enable_hba_heartbeat) {
a22d73b6
JS
1501 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1502 spin_lock_irq(&phba->pport->work_port_lock);
1503 if (time_after(phba->last_completion_time +
1504 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1505 jiffies)) {
1506 spin_unlock_irq(&phba->pport->work_port_lock);
1507 if (phba->hba_flag & HBA_HBEAT_INP)
1508 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1509 else
1510 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1511 goto out;
1512 }
1513 spin_unlock_irq(&phba->pport->work_port_lock);
1514
1515 /* Check if a MBX_HEARTBEAT is already in progress */
1516 if (phba->hba_flag & HBA_HBEAT_INP) {
1517 /*
1518 * If heart beat timeout called with HBA_HBEAT_INP set
1519 * we need to give the hb mailbox cmd a chance to
1520 * complete or TMO.
1521 */
1522 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1523 "0459 Adapter heartbeat still outstanding: "
1524 "last compl time was %d ms.\n",
1525 jiffies_to_msecs(jiffies
1526 - phba->last_completion_time));
1527 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1528 } else {
bc73905a
JS
1529 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1530 (list_empty(&psli->mboxq))) {
bc73905a 1531
a22d73b6
JS
1532 retval = lpfc_issue_hb_mbox(phba);
1533 if (retval) {
1534 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1535 goto out;
bc73905a
JS
1536 }
1537 phba->skipped_hb = 0;
bc73905a
JS
1538 } else if (time_before_eq(phba->last_completion_time,
1539 phba->skipped_hb)) {
1540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1541 "2857 Last completion time not "
1542 " updated in %d ms\n",
1543 jiffies_to_msecs(jiffies
1544 - phba->last_completion_time));
1545 } else
1546 phba->skipped_hb = jiffies;
1547
a22d73b6
JS
1548 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1549 goto out;
858c9f6c 1550 }
4258e98e 1551 } else {
a22d73b6
JS
1552 /* Check to see if we want to force a MBX_HEARTBEAT */
1553 if (phba->hba_flag & HBA_HBEAT_TMO) {
1554 retval = lpfc_issue_hb_mbox(phba);
1555 if (retval)
1556 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1557 else
1558 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1559 goto out;
1560 }
1561 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
858c9f6c 1562 }
a22d73b6
JS
1563out:
1564 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
858c9f6c
JS
1565}
1566
e59058c4 1567/**
3621a710 1568 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
e59058c4
JS
1569 * @phba: pointer to lpfc hba data structure.
1570 *
1571 * This routine is called to bring the HBA offline when HBA hardware error
1572 * other than Port Error 6 has been detected.
1573 **/
09372820
JS
1574static void
1575lpfc_offline_eratt(struct lpfc_hba *phba)
1576{
1577 struct lpfc_sli *psli = &phba->sli;
1578
1579 spin_lock_irq(&phba->hbalock);
f4b4c68f 1580 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
09372820 1581 spin_unlock_irq(&phba->hbalock);
618a5230 1582 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
09372820
JS
1583
1584 lpfc_offline(phba);
1585 lpfc_reset_barrier(phba);
f4b4c68f 1586 spin_lock_irq(&phba->hbalock);
09372820 1587 lpfc_sli_brdreset(phba);
f4b4c68f 1588 spin_unlock_irq(&phba->hbalock);
09372820
JS
1589 lpfc_hba_down_post(phba);
1590 lpfc_sli_brdready(phba, HS_MBRDY);
1591 lpfc_unblock_mgmt_io(phba);
1592 phba->link_state = LPFC_HBA_ERROR;
1593 return;
1594}
1595
da0436e9
JS
1596/**
1597 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1598 * @phba: pointer to lpfc hba data structure.
1599 *
1600 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1601 * other than Port Error 6 has been detected.
1602 **/
a88dbb6a 1603void
da0436e9
JS
1604lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1605{
946727dc
JS
1606 spin_lock_irq(&phba->hbalock);
1607 phba->link_state = LPFC_HBA_ERROR;
1608 spin_unlock_irq(&phba->hbalock);
1609
618a5230 1610 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
c00f62e6 1611 lpfc_sli_flush_io_rings(phba);
da0436e9 1612 lpfc_offline(phba);
da0436e9 1613 lpfc_hba_down_post(phba);
da0436e9 1614 lpfc_unblock_mgmt_io(phba);
da0436e9
JS
1615}
1616
a257bf90
JS
1617/**
1618 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1619 * @phba: pointer to lpfc hba data structure.
1620 *
1621 * This routine is invoked to handle the deferred HBA hardware error
1622 * conditions. This type of error is indicated by HBA by setting ER1
1623 * and another ER bit in the host status register. The driver will
1624 * wait until the ER1 bit clears before handling the error condition.
1625 **/
1626static void
1627lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1628{
1629 uint32_t old_host_status = phba->work_hs;
a257bf90
JS
1630 struct lpfc_sli *psli = &phba->sli;
1631
f4b4c68f
JS
1632 /* If the pci channel is offline, ignore possible errors,
1633 * since we cannot communicate with the pci card anyway.
1634 */
1635 if (pci_channel_offline(phba->pcidev)) {
1636 spin_lock_irq(&phba->hbalock);
1637 phba->hba_flag &= ~DEFER_ERATT;
1638 spin_unlock_irq(&phba->hbalock);
1639 return;
1640 }
1641
372c187b
DK
1642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1643 "0479 Deferred Adapter Hardware Error "
1644 "Data: x%x x%x x%x\n",
1645 phba->work_hs, phba->work_status[0],
1646 phba->work_status[1]);
a257bf90
JS
1647
1648 spin_lock_irq(&phba->hbalock);
f4b4c68f 1649 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
a257bf90
JS
1650 spin_unlock_irq(&phba->hbalock);
1651
1652
1653 /*
1654 * Firmware stops when it triggred erratt. That could cause the I/Os
1655 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1656 * SCSI layer retry it after re-establishing link.
1657 */
db55fba8 1658 lpfc_sli_abort_fcp_rings(phba);
a257bf90
JS
1659
1660 /*
1661 * There was a firmware error. Take the hba offline and then
1662 * attempt to restart it.
1663 */
618a5230 1664 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
a257bf90
JS
1665 lpfc_offline(phba);
1666
1667 /* Wait for the ER1 bit to clear.*/
1668 while (phba->work_hs & HS_FFER1) {
1669 msleep(100);
9940b97b
JS
1670 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1671 phba->work_hs = UNPLUG_ERR ;
1672 break;
1673 }
a257bf90
JS
1674 /* If driver is unloading let the worker thread continue */
1675 if (phba->pport->load_flag & FC_UNLOADING) {
1676 phba->work_hs = 0;
1677 break;
1678 }
1679 }
1680
1681 /*
1682 * This is to ptrotect against a race condition in which
1683 * first write to the host attention register clear the
1684 * host status register.
1685 */
1686 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1687 phba->work_hs = old_host_status & ~HS_FFER1;
1688
3772a991 1689 spin_lock_irq(&phba->hbalock);
a257bf90 1690 phba->hba_flag &= ~DEFER_ERATT;
3772a991 1691 spin_unlock_irq(&phba->hbalock);
a257bf90
JS
1692 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1693 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1694}
1695
3772a991
JS
1696static void
1697lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1698{
1699 struct lpfc_board_event_header board_event;
1700 struct Scsi_Host *shost;
1701
1702 board_event.event_type = FC_REG_BOARD_EVENT;
1703 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1704 shost = lpfc_shost_from_vport(phba->pport);
1705 fc_host_post_vendor_event(shost, fc_get_event_number(),
1706 sizeof(board_event),
1707 (char *) &board_event,
1708 LPFC_NL_VENDOR_ID);
1709}
1710
e59058c4 1711/**
3772a991 1712 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
e59058c4
JS
1713 * @phba: pointer to lpfc hba data structure.
1714 *
1715 * This routine is invoked to handle the following HBA hardware error
1716 * conditions:
1717 * 1 - HBA error attention interrupt
1718 * 2 - DMA ring index out of range
1719 * 3 - Mailbox command came back as unknown
1720 **/
3772a991
JS
1721static void
1722lpfc_handle_eratt_s3(struct lpfc_hba *phba)
dea3101e 1723{
2e0fef85 1724 struct lpfc_vport *vport = phba->pport;
2e0fef85 1725 struct lpfc_sli *psli = &phba->sli;
d2873e4c 1726 uint32_t event_data;
57127f15
JS
1727 unsigned long temperature;
1728 struct temp_event temp_event_data;
92d7f7b0 1729 struct Scsi_Host *shost;
2e0fef85 1730
8d63f375 1731 /* If the pci channel is offline, ignore possible errors,
3772a991
JS
1732 * since we cannot communicate with the pci card anyway.
1733 */
1734 if (pci_channel_offline(phba->pcidev)) {
1735 spin_lock_irq(&phba->hbalock);
1736 phba->hba_flag &= ~DEFER_ERATT;
1737 spin_unlock_irq(&phba->hbalock);
8d63f375 1738 return;
3772a991
JS
1739 }
1740
13815c83
JS
1741 /* If resets are disabled then leave the HBA alone and return */
1742 if (!phba->cfg_enable_hba_reset)
1743 return;
dea3101e 1744
ea2151b4 1745 /* Send an internal error event to mgmt application */
3772a991 1746 lpfc_board_errevt_to_mgmt(phba);
ea2151b4 1747
a257bf90
JS
1748 if (phba->hba_flag & DEFER_ERATT)
1749 lpfc_handle_deferred_eratt(phba);
1750
dcf2a4e0
JS
1751 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1752 if (phba->work_hs & HS_FFER6)
1753 /* Re-establishing Link */
1754 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1755 "1301 Re-establishing Link "
1756 "Data: x%x x%x x%x\n",
1757 phba->work_hs, phba->work_status[0],
1758 phba->work_status[1]);
1759 if (phba->work_hs & HS_FFER8)
1760 /* Device Zeroization */
1761 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1762 "2861 Host Authentication device "
1763 "zeroization Data:x%x x%x x%x\n",
1764 phba->work_hs, phba->work_status[0],
1765 phba->work_status[1]);
58da1ffb 1766
92d7f7b0 1767 spin_lock_irq(&phba->hbalock);
f4b4c68f 1768 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
92d7f7b0 1769 spin_unlock_irq(&phba->hbalock);
dea3101e 1770
1771 /*
1772 * Firmware stops when it triggled erratt with HS_FFER6.
1773 * That could cause the I/Os dropped by the firmware.
1774 * Error iocb (I/O) on txcmplq and let the SCSI layer
1775 * retry it after re-establishing link.
1776 */
db55fba8 1777 lpfc_sli_abort_fcp_rings(phba);
dea3101e 1778
dea3101e 1779 /*
1780 * There was a firmware error. Take the hba offline and then
1781 * attempt to restart it.
1782 */
618a5230 1783 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
dea3101e 1784 lpfc_offline(phba);
41415862 1785 lpfc_sli_brdrestart(phba);
dea3101e 1786 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
46fa311e 1787 lpfc_unblock_mgmt_io(phba);
dea3101e 1788 return;
1789 }
46fa311e 1790 lpfc_unblock_mgmt_io(phba);
57127f15
JS
1791 } else if (phba->work_hs & HS_CRIT_TEMP) {
1792 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1793 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1794 temp_event_data.event_code = LPFC_CRIT_TEMP;
1795 temp_event_data.data = (uint32_t)temperature;
1796
372c187b 1797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
d7c255b2 1798 "0406 Adapter maximum temperature exceeded "
57127f15
JS
1799 "(%ld), taking this port offline "
1800 "Data: x%x x%x x%x\n",
1801 temperature, phba->work_hs,
1802 phba->work_status[0], phba->work_status[1]);
1803
1804 shost = lpfc_shost_from_vport(phba->pport);
1805 fc_host_post_vendor_event(shost, fc_get_event_number(),
1806 sizeof(temp_event_data),
1807 (char *) &temp_event_data,
1808 SCSI_NL_VID_TYPE_PCI
1809 | PCI_VENDOR_ID_EMULEX);
1810
7af67051 1811 spin_lock_irq(&phba->hbalock);
7af67051
JS
1812 phba->over_temp_state = HBA_OVER_TEMP;
1813 spin_unlock_irq(&phba->hbalock);
09372820 1814 lpfc_offline_eratt(phba);
57127f15 1815
dea3101e 1816 } else {
1817 /* The if clause above forces this code path when the status
9399627f
JS
1818 * failure is a value other than FFER6. Do not call the offline
1819 * twice. This is the adapter hardware error path.
dea3101e 1820 */
372c187b 1821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
e8b62011 1822 "0457 Adapter Hardware Error "
dea3101e 1823 "Data: x%x x%x x%x\n",
e8b62011 1824 phba->work_hs,
dea3101e 1825 phba->work_status[0], phba->work_status[1]);
1826
d2873e4c 1827 event_data = FC_REG_DUMP_EVENT;
92d7f7b0 1828 shost = lpfc_shost_from_vport(vport);
2e0fef85 1829 fc_host_post_vendor_event(shost, fc_get_event_number(),
d2873e4c
JS
1830 sizeof(event_data), (char *) &event_data,
1831 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1832
09372820 1833 lpfc_offline_eratt(phba);
dea3101e 1834 }
9399627f 1835 return;
dea3101e 1836}
1837
618a5230
JS
1838/**
1839 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1840 * @phba: pointer to lpfc hba data structure.
1841 * @mbx_action: flag for mailbox shutdown action.
fe614acd 1842 * @en_rn_msg: send reset/port recovery message.
618a5230
JS
1843 * This routine is invoked to perform an SLI4 port PCI function reset in
1844 * response to port status register polling attention. It waits for port
1845 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1846 * During this process, interrupt vectors are freed and later requested
1847 * for handling possible port resource change.
1848 **/
1849static int
e10b2022
JS
1850lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1851 bool en_rn_msg)
618a5230
JS
1852{
1853 int rc;
1854 uint32_t intr_mode;
a9978e39 1855 LPFC_MBOXQ_t *mboxq;
618a5230 1856
27d6ac0a 1857 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
65791f1f
JS
1858 LPFC_SLI_INTF_IF_TYPE_2) {
1859 /*
1860 * On error status condition, driver need to wait for port
1861 * ready before performing reset.
1862 */
1863 rc = lpfc_sli4_pdev_status_reg_wait(phba);
0e916ee7 1864 if (rc)
65791f1f
JS
1865 return rc;
1866 }
0e916ee7 1867
65791f1f
JS
1868 /* need reset: attempt for port recovery */
1869 if (en_rn_msg)
0b3ad32e 1870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
65791f1f
JS
1871 "2887 Reset Needed: Attempting Port "
1872 "Recovery...\n");
3ba6216a
JS
1873
1874 /* If we are no wait, the HBA has been reset and is not
a9978e39
JS
1875 * functional, thus we should clear
1876 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
3ba6216a
JS
1877 */
1878 if (mbx_action == LPFC_MBX_NO_WAIT) {
1879 spin_lock_irq(&phba->hbalock);
1880 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
a9978e39
JS
1881 if (phba->sli.mbox_active) {
1882 mboxq = phba->sli.mbox_active;
1883 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1884 __lpfc_mbox_cmpl_put(phba, mboxq);
1885 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1886 phba->sli.mbox_active = NULL;
1887 }
3ba6216a
JS
1888 spin_unlock_irq(&phba->hbalock);
1889 }
1890
65791f1f 1891 lpfc_offline_prep(phba, mbx_action);
c00f62e6 1892 lpfc_sli_flush_io_rings(phba);
65791f1f
JS
1893 lpfc_offline(phba);
1894 /* release interrupt for possible resource change */
1895 lpfc_sli4_disable_intr(phba);
5a9eeff5
JS
1896 rc = lpfc_sli_brdrestart(phba);
1897 if (rc) {
372c187b 1898 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5a9eeff5
JS
1899 "6309 Failed to restart board\n");
1900 return rc;
1901 }
65791f1f
JS
1902 /* request and enable interrupt */
1903 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1904 if (intr_mode == LPFC_INTR_ERROR) {
372c187b 1905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
65791f1f
JS
1906 "3175 Failed to enable interrupt\n");
1907 return -EIO;
618a5230 1908 }
65791f1f
JS
1909 phba->intr_mode = intr_mode;
1910 rc = lpfc_online(phba);
1911 if (rc == 0)
1912 lpfc_unblock_mgmt_io(phba);
1913
618a5230
JS
1914 return rc;
1915}
1916
da0436e9
JS
1917/**
1918 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1919 * @phba: pointer to lpfc hba data structure.
1920 *
1921 * This routine is invoked to handle the SLI4 HBA hardware error attention
1922 * conditions.
1923 **/
1924static void
1925lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1926{
1927 struct lpfc_vport *vport = phba->pport;
1928 uint32_t event_data;
1929 struct Scsi_Host *shost;
2fcee4bf 1930 uint32_t if_type;
2e90f4b5
JS
1931 struct lpfc_register portstat_reg = {0};
1932 uint32_t reg_err1, reg_err2;
1933 uint32_t uerrlo_reg, uemasklo_reg;
65791f1f 1934 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
e10b2022 1935 bool en_rn_msg = true;
946727dc 1936 struct temp_event temp_event_data;
65791f1f
JS
1937 struct lpfc_register portsmphr_reg;
1938 int rc, i;
da0436e9
JS
1939
1940 /* If the pci channel is offline, ignore possible errors, since
1941 * we cannot communicate with the pci card anyway.
1942 */
32a93100 1943 if (pci_channel_offline(phba->pcidev)) {
372c187b 1944 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
32a93100
JS
1945 "3166 pci channel is offline\n");
1946 lpfc_sli4_offline_eratt(phba);
da0436e9 1947 return;
32a93100 1948 }
da0436e9 1949
65791f1f 1950 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2fcee4bf
JS
1951 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1952 switch (if_type) {
1953 case LPFC_SLI_INTF_IF_TYPE_0:
2e90f4b5
JS
1954 pci_rd_rc1 = lpfc_readl(
1955 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1956 &uerrlo_reg);
1957 pci_rd_rc2 = lpfc_readl(
1958 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1959 &uemasklo_reg);
1960 /* consider PCI bus read error as pci_channel_offline */
1961 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1962 return;
65791f1f
JS
1963 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1964 lpfc_sli4_offline_eratt(phba);
1965 return;
1966 }
372c187b 1967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
65791f1f
JS
1968 "7623 Checking UE recoverable");
1969
1970 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1971 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1972 &portsmphr_reg.word0))
1973 continue;
1974
1975 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1976 &portsmphr_reg);
1977 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1978 LPFC_PORT_SEM_UE_RECOVERABLE)
1979 break;
1980 /*Sleep for 1Sec, before checking SEMAPHORE */
1981 msleep(1000);
1982 }
1983
372c187b 1984 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
65791f1f
JS
1985 "4827 smphr_port_status x%x : Waited %dSec",
1986 smphr_port_status, i);
1987
1988 /* Recoverable UE, reset the HBA device */
1989 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1990 LPFC_PORT_SEM_UE_RECOVERABLE) {
1991 for (i = 0; i < 20; i++) {
1992 msleep(1000);
1993 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1994 &portsmphr_reg.word0) &&
1995 (LPFC_POST_STAGE_PORT_READY ==
1996 bf_get(lpfc_port_smphr_port_status,
1997 &portsmphr_reg))) {
1998 rc = lpfc_sli4_port_sta_fn_reset(phba,
1999 LPFC_MBX_NO_WAIT, en_rn_msg);
2000 if (rc == 0)
2001 return;
372c187b
DK
2002 lpfc_printf_log(phba, KERN_ERR,
2003 LOG_TRACE_EVENT,
65791f1f
JS
2004 "4215 Failed to recover UE");
2005 break;
2006 }
2007 }
2008 }
372c187b 2009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
65791f1f
JS
2010 "7624 Firmware not ready: Failing UE recovery,"
2011 " waited %dSec", i);
8c24a4f6 2012 phba->link_state = LPFC_HBA_ERROR;
2fcee4bf 2013 break;
946727dc 2014
2fcee4bf 2015 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 2016 case LPFC_SLI_INTF_IF_TYPE_6:
2e90f4b5
JS
2017 pci_rd_rc1 = lpfc_readl(
2018 phba->sli4_hba.u.if_type2.STATUSregaddr,
2019 &portstat_reg.word0);
2020 /* consider PCI bus read error as pci_channel_offline */
6b5151fd 2021 if (pci_rd_rc1 == -EIO) {
372c187b 2022 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6b5151fd
JS
2023 "3151 PCI bus read access failure: x%x\n",
2024 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
32a93100 2025 lpfc_sli4_offline_eratt(phba);
2e90f4b5 2026 return;
6b5151fd 2027 }
2e90f4b5
JS
2028 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2029 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2fcee4bf 2030 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
372c187b
DK
2031 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2032 "2889 Port Overtemperature event, "
2033 "taking port offline Data: x%x x%x\n",
2034 reg_err1, reg_err2);
946727dc 2035
310429ef 2036 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
946727dc
JS
2037 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2038 temp_event_data.event_code = LPFC_CRIT_TEMP;
2039 temp_event_data.data = 0xFFFFFFFF;
2040
2041 shost = lpfc_shost_from_vport(phba->pport);
2042 fc_host_post_vendor_event(shost, fc_get_event_number(),
2043 sizeof(temp_event_data),
2044 (char *)&temp_event_data,
2045 SCSI_NL_VID_TYPE_PCI
2046 | PCI_VENDOR_ID_EMULEX);
2047
2fcee4bf
JS
2048 spin_lock_irq(&phba->hbalock);
2049 phba->over_temp_state = HBA_OVER_TEMP;
2050 spin_unlock_irq(&phba->hbalock);
2051 lpfc_sli4_offline_eratt(phba);
946727dc 2052 return;
2fcee4bf 2053 }
2e90f4b5 2054 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
e10b2022 2055 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
372c187b 2056 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
e10b2022
JS
2057 "3143 Port Down: Firmware Update "
2058 "Detected\n");
2059 en_rn_msg = false;
2060 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2e90f4b5 2061 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
372c187b 2062 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2e90f4b5
JS
2063 "3144 Port Down: Debug Dump\n");
2064 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2065 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
372c187b 2066 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2e90f4b5 2067 "3145 Port Down: Provisioning\n");
618a5230 2068
946727dc
JS
2069 /* If resets are disabled then leave the HBA alone and return */
2070 if (!phba->cfg_enable_hba_reset)
2071 return;
2072
618a5230 2073 /* Check port status register for function reset */
e10b2022
JS
2074 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2075 en_rn_msg);
618a5230
JS
2076 if (rc == 0) {
2077 /* don't report event on forced debug dump */
2078 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2079 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2080 return;
2081 else
2082 break;
2fcee4bf 2083 }
618a5230 2084 /* fall through for not able to recover */
372c187b 2085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8c24a4f6
JS
2086 "3152 Unrecoverable error\n");
2087 phba->link_state = LPFC_HBA_ERROR;
2fcee4bf
JS
2088 break;
2089 case LPFC_SLI_INTF_IF_TYPE_1:
2090 default:
2091 break;
2092 }
2e90f4b5
JS
2093 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2094 "3123 Report dump event to upper layer\n");
2095 /* Send an internal error event to mgmt application */
2096 lpfc_board_errevt_to_mgmt(phba);
2097
2098 event_data = FC_REG_DUMP_EVENT;
2099 shost = lpfc_shost_from_vport(vport);
2100 fc_host_post_vendor_event(shost, fc_get_event_number(),
2101 sizeof(event_data), (char *) &event_data,
2102 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
da0436e9
JS
2103}
2104
2105/**
2106 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2107 * @phba: pointer to lpfc HBA data structure.
2108 *
2109 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2110 * routine from the API jump table function pointer from the lpfc_hba struct.
2111 *
2112 * Return codes
af901ca1 2113 * 0 - success.
da0436e9
JS
2114 * Any other value - error.
2115 **/
2116void
2117lpfc_handle_eratt(struct lpfc_hba *phba)
2118{
2119 (*phba->lpfc_handle_eratt)(phba);
2120}
2121
e59058c4 2122/**
3621a710 2123 * lpfc_handle_latt - The HBA link event handler
e59058c4
JS
2124 * @phba: pointer to lpfc hba data structure.
2125 *
2126 * This routine is invoked from the worker thread to handle a HBA host
895427bd 2127 * attention link event. SLI3 only.
e59058c4 2128 **/
dea3101e 2129void
2e0fef85 2130lpfc_handle_latt(struct lpfc_hba *phba)
dea3101e 2131{
2e0fef85
JS
2132 struct lpfc_vport *vport = phba->pport;
2133 struct lpfc_sli *psli = &phba->sli;
dea3101e 2134 LPFC_MBOXQ_t *pmb;
2135 volatile uint32_t control;
2136 struct lpfc_dmabuf *mp;
09372820 2137 int rc = 0;
dea3101e 2138
2139 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
09372820
JS
2140 if (!pmb) {
2141 rc = 1;
dea3101e 2142 goto lpfc_handle_latt_err_exit;
09372820 2143 }
dea3101e 2144
2145 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
09372820
JS
2146 if (!mp) {
2147 rc = 2;
dea3101e 2148 goto lpfc_handle_latt_free_pmb;
09372820 2149 }
dea3101e 2150
2151 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
09372820
JS
2152 if (!mp->virt) {
2153 rc = 3;
dea3101e 2154 goto lpfc_handle_latt_free_mp;
09372820 2155 }
dea3101e 2156
6281bfe0 2157 /* Cleanup any outstanding ELS commands */
549e55cd 2158 lpfc_els_flush_all_cmd(phba);
dea3101e 2159
2160 psli->slistat.link_event++;
76a95d75
JS
2161 lpfc_read_topology(phba, pmb, mp);
2162 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2e0fef85 2163 pmb->vport = vport;
0d2b6b83 2164 /* Block ELS IOCBs until we have processed this mbox command */
895427bd 2165 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
0b727fea 2166 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
09372820
JS
2167 if (rc == MBX_NOT_FINISHED) {
2168 rc = 4;
14691150 2169 goto lpfc_handle_latt_free_mbuf;
09372820 2170 }
dea3101e 2171
2172 /* Clear Link Attention in HA REG */
2e0fef85 2173 spin_lock_irq(&phba->hbalock);
dea3101e 2174 writel(HA_LATT, phba->HAregaddr);
2175 readl(phba->HAregaddr); /* flush */
2e0fef85 2176 spin_unlock_irq(&phba->hbalock);
dea3101e 2177
2178 return;
2179
14691150 2180lpfc_handle_latt_free_mbuf:
895427bd 2181 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
14691150 2182 lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e 2183lpfc_handle_latt_free_mp:
2184 kfree(mp);
2185lpfc_handle_latt_free_pmb:
1dcb58e5 2186 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2187lpfc_handle_latt_err_exit:
2188 /* Enable Link attention interrupts */
2e0fef85 2189 spin_lock_irq(&phba->hbalock);
dea3101e 2190 psli->sli_flag |= LPFC_PROCESS_LA;
2191 control = readl(phba->HCregaddr);
2192 control |= HC_LAINT_ENA;
2193 writel(control, phba->HCregaddr);
2194 readl(phba->HCregaddr); /* flush */
2195
2196 /* Clear Link Attention in HA REG */
2197 writel(HA_LATT, phba->HAregaddr);
2198 readl(phba->HAregaddr); /* flush */
2e0fef85 2199 spin_unlock_irq(&phba->hbalock);
dea3101e 2200 lpfc_linkdown(phba);
2e0fef85 2201 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2202
372c187b
DK
2203 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2204 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
dea3101e 2205
2206 return;
2207}
2208
e59058c4 2209/**
3621a710 2210 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
e59058c4
JS
2211 * @phba: pointer to lpfc hba data structure.
2212 * @vpd: pointer to the vital product data.
2213 * @len: length of the vital product data in bytes.
2214 *
2215 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2216 * an array of characters. In this routine, the ModelName, ProgramType, and
2217 * ModelDesc, etc. fields of the phba data structure will be populated.
2218 *
2219 * Return codes
2220 * 0 - pointer to the VPD passed in is NULL
2221 * 1 - success
2222 **/
3772a991 2223int
2e0fef85 2224lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
dea3101e 2225{
2226 uint8_t lenlo, lenhi;
07da60c1 2227 int Length;
dea3101e 2228 int i, j;
2229 int finished = 0;
2230 int index = 0;
2231
2232 if (!vpd)
2233 return 0;
2234
2235 /* Vital Product */
ed957684 2236 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 2237 "0455 Vital Product Data: x%x x%x x%x x%x\n",
dea3101e 2238 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2239 (uint32_t) vpd[3]);
74b72a59 2240 while (!finished && (index < (len - 4))) {
dea3101e 2241 switch (vpd[index]) {
2242 case 0x82:
74b72a59 2243 case 0x91:
dea3101e 2244 index += 1;
2245 lenlo = vpd[index];
2246 index += 1;
2247 lenhi = vpd[index];
2248 index += 1;
2249 i = ((((unsigned short)lenhi) << 8) + lenlo);
2250 index += i;
2251 break;
2252 case 0x90:
2253 index += 1;
2254 lenlo = vpd[index];
2255 index += 1;
2256 lenhi = vpd[index];
2257 index += 1;
2258 Length = ((((unsigned short)lenhi) << 8) + lenlo);
74b72a59
JW
2259 if (Length > len - index)
2260 Length = len - index;
dea3101e 2261 while (Length > 0) {
2262 /* Look for Serial Number */
2263 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2264 index += 2;
2265 i = vpd[index];
2266 index += 1;
2267 j = 0;
2268 Length -= (3+i);
2269 while(i--) {
2270 phba->SerialNumber[j++] = vpd[index++];
2271 if (j == 31)
2272 break;
2273 }
2274 phba->SerialNumber[j] = 0;
2275 continue;
2276 }
2277 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2278 phba->vpd_flag |= VPD_MODEL_DESC;
2279 index += 2;
2280 i = vpd[index];
2281 index += 1;
2282 j = 0;
2283 Length -= (3+i);
2284 while(i--) {
2285 phba->ModelDesc[j++] = vpd[index++];
2286 if (j == 255)
2287 break;
2288 }
2289 phba->ModelDesc[j] = 0;
2290 continue;
2291 }
2292 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2293 phba->vpd_flag |= VPD_MODEL_NAME;
2294 index += 2;
2295 i = vpd[index];
2296 index += 1;
2297 j = 0;
2298 Length -= (3+i);
2299 while(i--) {
2300 phba->ModelName[j++] = vpd[index++];
2301 if (j == 79)
2302 break;
2303 }
2304 phba->ModelName[j] = 0;
2305 continue;
2306 }
2307 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2308 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2309 index += 2;
2310 i = vpd[index];
2311 index += 1;
2312 j = 0;
2313 Length -= (3+i);
2314 while(i--) {
2315 phba->ProgramType[j++] = vpd[index++];
2316 if (j == 255)
2317 break;
2318 }
2319 phba->ProgramType[j] = 0;
2320 continue;
2321 }
2322 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2323 phba->vpd_flag |= VPD_PORT;
2324 index += 2;
2325 i = vpd[index];
2326 index += 1;
2327 j = 0;
2328 Length -= (3+i);
2329 while(i--) {
cd1c8301
JS
2330 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2331 (phba->sli4_hba.pport_name_sta ==
2332 LPFC_SLI4_PPNAME_GET)) {
2333 j++;
2334 index++;
2335 } else
2336 phba->Port[j++] = vpd[index++];
2337 if (j == 19)
2338 break;
dea3101e 2339 }
cd1c8301
JS
2340 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2341 (phba->sli4_hba.pport_name_sta ==
2342 LPFC_SLI4_PPNAME_NON))
2343 phba->Port[j] = 0;
dea3101e 2344 continue;
2345 }
2346 else {
2347 index += 2;
2348 i = vpd[index];
2349 index += 1;
2350 index += i;
2351 Length -= (3 + i);
2352 }
2353 }
2354 finished = 0;
2355 break;
2356 case 0x78:
2357 finished = 1;
2358 break;
2359 default:
2360 index ++;
2361 break;
2362 }
74b72a59 2363 }
dea3101e 2364
2365 return(1);
2366}
2367
e59058c4 2368/**
3621a710 2369 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
e59058c4
JS
2370 * @phba: pointer to lpfc hba data structure.
2371 * @mdp: pointer to the data structure to hold the derived model name.
2372 * @descp: pointer to the data structure to hold the derived description.
2373 *
2374 * This routine retrieves HBA's description based on its registered PCI device
2375 * ID. The @descp passed into this function points to an array of 256 chars. It
2376 * shall be returned with the model name, maximum speed, and the host bus type.
2377 * The @mdp passed into this function points to an array of 80 chars. When the
2378 * function returns, the @mdp will be filled with the model name.
2379 **/
dea3101e 2380static void
2e0fef85 2381lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
dea3101e 2382{
2383 lpfc_vpd_t *vp;
fefcb2b6 2384 uint16_t dev_id = phba->pcidev->device;
74b72a59 2385 int max_speed;
84774a4d 2386 int GE = 0;
da0436e9 2387 int oneConnect = 0; /* default is not a oneConnect */
74b72a59 2388 struct {
a747c9ce
JS
2389 char *name;
2390 char *bus;
2391 char *function;
2392 } m = {"<Unknown>", "", ""};
74b72a59
JW
2393
2394 if (mdp && mdp[0] != '\0'
2395 && descp && descp[0] != '\0')
2396 return;
2397
fbd8a6ba
JS
2398 if (phba->lmt & LMT_64Gb)
2399 max_speed = 64;
2400 else if (phba->lmt & LMT_32Gb)
d38dd52c
JS
2401 max_speed = 32;
2402 else if (phba->lmt & LMT_16Gb)
c0c11512
JS
2403 max_speed = 16;
2404 else if (phba->lmt & LMT_10Gb)
74b72a59
JW
2405 max_speed = 10;
2406 else if (phba->lmt & LMT_8Gb)
2407 max_speed = 8;
2408 else if (phba->lmt & LMT_4Gb)
2409 max_speed = 4;
2410 else if (phba->lmt & LMT_2Gb)
2411 max_speed = 2;
4169d868 2412 else if (phba->lmt & LMT_1Gb)
74b72a59 2413 max_speed = 1;
4169d868
JS
2414 else
2415 max_speed = 0;
dea3101e 2416
2417 vp = &phba->vpd;
dea3101e 2418
e4adb204 2419 switch (dev_id) {
06325e74 2420 case PCI_DEVICE_ID_FIREFLY:
12222f4f
JS
2421 m = (typeof(m)){"LP6000", "PCI",
2422 "Obsolete, Unsupported Fibre Channel Adapter"};
06325e74 2423 break;
dea3101e 2424 case PCI_DEVICE_ID_SUPERFLY:
2425 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
12222f4f 2426 m = (typeof(m)){"LP7000", "PCI", ""};
dea3101e 2427 else
12222f4f
JS
2428 m = (typeof(m)){"LP7000E", "PCI", ""};
2429 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
dea3101e 2430 break;
2431 case PCI_DEVICE_ID_DRAGONFLY:
a747c9ce 2432 m = (typeof(m)){"LP8000", "PCI",
12222f4f 2433 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2434 break;
2435 case PCI_DEVICE_ID_CENTAUR:
2436 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
12222f4f 2437 m = (typeof(m)){"LP9002", "PCI", ""};
dea3101e 2438 else
12222f4f
JS
2439 m = (typeof(m)){"LP9000", "PCI", ""};
2440 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
dea3101e 2441 break;
2442 case PCI_DEVICE_ID_RFLY:
a747c9ce 2443 m = (typeof(m)){"LP952", "PCI",
12222f4f 2444 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2445 break;
2446 case PCI_DEVICE_ID_PEGASUS:
a747c9ce 2447 m = (typeof(m)){"LP9802", "PCI-X",
12222f4f 2448 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2449 break;
2450 case PCI_DEVICE_ID_THOR:
a747c9ce 2451 m = (typeof(m)){"LP10000", "PCI-X",
12222f4f 2452 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2453 break;
2454 case PCI_DEVICE_ID_VIPER:
a747c9ce 2455 m = (typeof(m)){"LPX1000", "PCI-X",
12222f4f 2456 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2457 break;
2458 case PCI_DEVICE_ID_PFLY:
a747c9ce 2459 m = (typeof(m)){"LP982", "PCI-X",
12222f4f 2460 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2461 break;
2462 case PCI_DEVICE_ID_TFLY:
a747c9ce 2463 m = (typeof(m)){"LP1050", "PCI-X",
12222f4f 2464 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2465 break;
2466 case PCI_DEVICE_ID_HELIOS:
a747c9ce 2467 m = (typeof(m)){"LP11000", "PCI-X2",
12222f4f 2468 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2469 break;
e4adb204 2470 case PCI_DEVICE_ID_HELIOS_SCSP:
a747c9ce 2471 m = (typeof(m)){"LP11000-SP", "PCI-X2",
12222f4f 2472 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2473 break;
2474 case PCI_DEVICE_ID_HELIOS_DCSP:
a747c9ce 2475 m = (typeof(m)){"LP11002-SP", "PCI-X2",
12222f4f 2476 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2477 break;
2478 case PCI_DEVICE_ID_NEPTUNE:
12222f4f
JS
2479 m = (typeof(m)){"LPe1000", "PCIe",
2480 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2481 break;
2482 case PCI_DEVICE_ID_NEPTUNE_SCSP:
12222f4f
JS
2483 m = (typeof(m)){"LPe1000-SP", "PCIe",
2484 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2485 break;
2486 case PCI_DEVICE_ID_NEPTUNE_DCSP:
12222f4f
JS
2487 m = (typeof(m)){"LPe1002-SP", "PCIe",
2488 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204 2489 break;
dea3101e 2490 case PCI_DEVICE_ID_BMID:
a747c9ce 2491 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
dea3101e 2492 break;
2493 case PCI_DEVICE_ID_BSMB:
12222f4f
JS
2494 m = (typeof(m)){"LP111", "PCI-X2",
2495 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2496 break;
2497 case PCI_DEVICE_ID_ZEPHYR:
a747c9ce 2498 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
dea3101e 2499 break;
e4adb204 2500 case PCI_DEVICE_ID_ZEPHYR_SCSP:
a747c9ce 2501 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
e4adb204
JSEC
2502 break;
2503 case PCI_DEVICE_ID_ZEPHYR_DCSP:
a747c9ce 2504 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
a257bf90 2505 GE = 1;
e4adb204 2506 break;
dea3101e 2507 case PCI_DEVICE_ID_ZMID:
a747c9ce 2508 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
dea3101e 2509 break;
2510 case PCI_DEVICE_ID_ZSMB:
a747c9ce 2511 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
dea3101e 2512 break;
2513 case PCI_DEVICE_ID_LP101:
12222f4f
JS
2514 m = (typeof(m)){"LP101", "PCI-X",
2515 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2516 break;
2517 case PCI_DEVICE_ID_LP10000S:
12222f4f
JS
2518 m = (typeof(m)){"LP10000-S", "PCI",
2519 "Obsolete, Unsupported Fibre Channel Adapter"};
06325e74 2520 break;
e4adb204 2521 case PCI_DEVICE_ID_LP11000S:
12222f4f
JS
2522 m = (typeof(m)){"LP11000-S", "PCI-X2",
2523 "Obsolete, Unsupported Fibre Channel Adapter"};
18a3b596 2524 break;
e4adb204 2525 case PCI_DEVICE_ID_LPE11000S:
12222f4f
JS
2526 m = (typeof(m)){"LPe11000-S", "PCIe",
2527 "Obsolete, Unsupported Fibre Channel Adapter"};
5cc36b3c 2528 break;
b87eab38 2529 case PCI_DEVICE_ID_SAT:
a747c9ce 2530 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2531 break;
2532 case PCI_DEVICE_ID_SAT_MID:
a747c9ce 2533 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2534 break;
2535 case PCI_DEVICE_ID_SAT_SMB:
a747c9ce 2536 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2537 break;
2538 case PCI_DEVICE_ID_SAT_DCSP:
a747c9ce 2539 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2540 break;
2541 case PCI_DEVICE_ID_SAT_SCSP:
a747c9ce 2542 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2543 break;
2544 case PCI_DEVICE_ID_SAT_S:
a747c9ce 2545 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
b87eab38 2546 break;
84774a4d 2547 case PCI_DEVICE_ID_HORNET:
12222f4f
JS
2548 m = (typeof(m)){"LP21000", "PCIe",
2549 "Obsolete, Unsupported FCoE Adapter"};
84774a4d
JS
2550 GE = 1;
2551 break;
2552 case PCI_DEVICE_ID_PROTEUS_VF:
a747c9ce 2553 m = (typeof(m)){"LPev12000", "PCIe IOV",
12222f4f 2554 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d
JS
2555 break;
2556 case PCI_DEVICE_ID_PROTEUS_PF:
a747c9ce 2557 m = (typeof(m)){"LPev12000", "PCIe IOV",
12222f4f 2558 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d
JS
2559 break;
2560 case PCI_DEVICE_ID_PROTEUS_S:
a747c9ce 2561 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
12222f4f 2562 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d 2563 break;
da0436e9
JS
2564 case PCI_DEVICE_ID_TIGERSHARK:
2565 oneConnect = 1;
a747c9ce 2566 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
da0436e9 2567 break;
a747c9ce 2568 case PCI_DEVICE_ID_TOMCAT:
6669f9bb 2569 oneConnect = 1;
a747c9ce
JS
2570 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2571 break;
2572 case PCI_DEVICE_ID_FALCON:
2573 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2574 "EmulexSecure Fibre"};
6669f9bb 2575 break;
98fc5dd9
JS
2576 case PCI_DEVICE_ID_BALIUS:
2577 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
12222f4f 2578 "Obsolete, Unsupported Fibre Channel Adapter"};
98fc5dd9 2579 break;
085c647c 2580 case PCI_DEVICE_ID_LANCER_FC:
c0c11512 2581 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
085c647c 2582 break;
12222f4f
JS
2583 case PCI_DEVICE_ID_LANCER_FC_VF:
2584 m = (typeof(m)){"LPe16000", "PCIe",
2585 "Obsolete, Unsupported Fibre Channel Adapter"};
2586 break;
085c647c
JS
2587 case PCI_DEVICE_ID_LANCER_FCOE:
2588 oneConnect = 1;
079b5c91 2589 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
085c647c 2590 break;
12222f4f
JS
2591 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2592 oneConnect = 1;
2593 m = (typeof(m)){"OCe15100", "PCIe",
2594 "Obsolete, Unsupported FCoE"};
2595 break;
d38dd52c
JS
2596 case PCI_DEVICE_ID_LANCER_G6_FC:
2597 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2598 break;
c238b9b6
JS
2599 case PCI_DEVICE_ID_LANCER_G7_FC:
2600 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2601 break;
f449a3d7
JS
2602 case PCI_DEVICE_ID_LANCER_G7P_FC:
2603 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2604 break;
f8cafd38
JS
2605 case PCI_DEVICE_ID_SKYHAWK:
2606 case PCI_DEVICE_ID_SKYHAWK_VF:
2607 oneConnect = 1;
2608 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2609 break;
5cc36b3c 2610 default:
a747c9ce 2611 m = (typeof(m)){"Unknown", "", ""};
e4adb204 2612 break;
dea3101e 2613 }
74b72a59
JW
2614
2615 if (mdp && mdp[0] == '\0')
2616 snprintf(mdp, 79,"%s", m.name);
c0c11512
JS
2617 /*
2618 * oneConnect hba requires special processing, they are all initiators
da0436e9
JS
2619 * and we put the port number on the end
2620 */
2621 if (descp && descp[0] == '\0') {
2622 if (oneConnect)
2623 snprintf(descp, 255,
4169d868 2624 "Emulex OneConnect %s, %s Initiator %s",
a747c9ce 2625 m.name, m.function,
da0436e9 2626 phba->Port);
4169d868
JS
2627 else if (max_speed == 0)
2628 snprintf(descp, 255,
290237d2 2629 "Emulex %s %s %s",
4169d868 2630 m.name, m.bus, m.function);
da0436e9
JS
2631 else
2632 snprintf(descp, 255,
2633 "Emulex %s %d%s %s %s",
a747c9ce
JS
2634 m.name, max_speed, (GE) ? "GE" : "Gb",
2635 m.bus, m.function);
da0436e9 2636 }
dea3101e 2637}
2638
e59058c4 2639/**
3621a710 2640 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
e59058c4
JS
2641 * @phba: pointer to lpfc hba data structure.
2642 * @pring: pointer to a IOCB ring.
2643 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2644 *
2645 * This routine posts a given number of IOCBs with the associated DMA buffer
2646 * descriptors specified by the cnt argument to the given IOCB ring.
2647 *
2648 * Return codes
2649 * The number of IOCBs NOT able to be posted to the IOCB ring.
2650 **/
dea3101e 2651int
495a714c 2652lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
dea3101e 2653{
2654 IOCB_t *icmd;
0bd4ca25 2655 struct lpfc_iocbq *iocb;
dea3101e 2656 struct lpfc_dmabuf *mp1, *mp2;
2657
2658 cnt += pring->missbufcnt;
2659
2660 /* While there are buffers to post */
2661 while (cnt > 0) {
2662 /* Allocate buffer for command iocb */
0bd4ca25 2663 iocb = lpfc_sli_get_iocbq(phba);
dea3101e 2664 if (iocb == NULL) {
2665 pring->missbufcnt = cnt;
2666 return cnt;
2667 }
dea3101e 2668 icmd = &iocb->iocb;
2669
2670 /* 2 buffers can be posted per command */
2671 /* Allocate buffer to post */
2672 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2673 if (mp1)
98c9ea5c
JS
2674 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2675 if (!mp1 || !mp1->virt) {
c9475cb0 2676 kfree(mp1);
604a3e30 2677 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2678 pring->missbufcnt = cnt;
2679 return cnt;
2680 }
2681
2682 INIT_LIST_HEAD(&mp1->list);
2683 /* Allocate buffer to post */
2684 if (cnt > 1) {
2685 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2686 if (mp2)
2687 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2688 &mp2->phys);
98c9ea5c 2689 if (!mp2 || !mp2->virt) {
c9475cb0 2690 kfree(mp2);
dea3101e 2691 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2692 kfree(mp1);
604a3e30 2693 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2694 pring->missbufcnt = cnt;
2695 return cnt;
2696 }
2697
2698 INIT_LIST_HEAD(&mp2->list);
2699 } else {
2700 mp2 = NULL;
2701 }
2702
2703 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2704 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2705 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2706 icmd->ulpBdeCount = 1;
2707 cnt--;
2708 if (mp2) {
2709 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2710 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2711 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2712 cnt--;
2713 icmd->ulpBdeCount = 2;
2714 }
2715
2716 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2717 icmd->ulpLe = 1;
2718
3772a991
JS
2719 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2720 IOCB_ERROR) {
dea3101e 2721 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2722 kfree(mp1);
2723 cnt++;
2724 if (mp2) {
2725 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2726 kfree(mp2);
2727 cnt++;
2728 }
604a3e30 2729 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2730 pring->missbufcnt = cnt;
dea3101e 2731 return cnt;
2732 }
dea3101e 2733 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
92d7f7b0 2734 if (mp2)
dea3101e 2735 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
dea3101e 2736 }
2737 pring->missbufcnt = 0;
2738 return 0;
2739}
2740
e59058c4 2741/**
3621a710 2742 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
e59058c4
JS
2743 * @phba: pointer to lpfc hba data structure.
2744 *
2745 * This routine posts initial receive IOCB buffers to the ELS ring. The
2746 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
895427bd 2747 * set to 64 IOCBs. SLI3 only.
e59058c4
JS
2748 *
2749 * Return codes
2750 * 0 - success (currently always success)
2751 **/
dea3101e 2752static int
2e0fef85 2753lpfc_post_rcv_buf(struct lpfc_hba *phba)
dea3101e 2754{
2755 struct lpfc_sli *psli = &phba->sli;
2756
2757 /* Ring 0, ELS / CT buffers */
895427bd 2758 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
dea3101e 2759 /* Ring 2 - FCP no buffers needed */
2760
2761 return 0;
2762}
2763
2764#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2765
e59058c4 2766/**
3621a710 2767 * lpfc_sha_init - Set up initial array of hash table entries
e59058c4
JS
2768 * @HashResultPointer: pointer to an array as hash table.
2769 *
2770 * This routine sets up the initial values to the array of hash table entries
2771 * for the LC HBAs.
2772 **/
dea3101e 2773static void
2774lpfc_sha_init(uint32_t * HashResultPointer)
2775{
2776 HashResultPointer[0] = 0x67452301;
2777 HashResultPointer[1] = 0xEFCDAB89;
2778 HashResultPointer[2] = 0x98BADCFE;
2779 HashResultPointer[3] = 0x10325476;
2780 HashResultPointer[4] = 0xC3D2E1F0;
2781}
2782
e59058c4 2783/**
3621a710 2784 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
e59058c4
JS
2785 * @HashResultPointer: pointer to an initial/result hash table.
2786 * @HashWorkingPointer: pointer to an working hash table.
2787 *
2788 * This routine iterates an initial hash table pointed by @HashResultPointer
2789 * with the values from the working hash table pointeed by @HashWorkingPointer.
2790 * The results are putting back to the initial hash table, returned through
2791 * the @HashResultPointer as the result hash table.
2792 **/
dea3101e 2793static void
2794lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2795{
2796 int t;
2797 uint32_t TEMP;
2798 uint32_t A, B, C, D, E;
2799 t = 16;
2800 do {
2801 HashWorkingPointer[t] =
2802 S(1,
2803 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2804 8] ^
2805 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2806 } while (++t <= 79);
2807 t = 0;
2808 A = HashResultPointer[0];
2809 B = HashResultPointer[1];
2810 C = HashResultPointer[2];
2811 D = HashResultPointer[3];
2812 E = HashResultPointer[4];
2813
2814 do {
2815 if (t < 20) {
2816 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2817 } else if (t < 40) {
2818 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2819 } else if (t < 60) {
2820 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2821 } else {
2822 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2823 }
2824 TEMP += S(5, A) + E + HashWorkingPointer[t];
2825 E = D;
2826 D = C;
2827 C = S(30, B);
2828 B = A;
2829 A = TEMP;
2830 } while (++t <= 79);
2831
2832 HashResultPointer[0] += A;
2833 HashResultPointer[1] += B;
2834 HashResultPointer[2] += C;
2835 HashResultPointer[3] += D;
2836 HashResultPointer[4] += E;
2837
2838}
2839
e59058c4 2840/**
3621a710 2841 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
e59058c4
JS
2842 * @RandomChallenge: pointer to the entry of host challenge random number array.
2843 * @HashWorking: pointer to the entry of the working hash array.
2844 *
2845 * This routine calculates the working hash array referred by @HashWorking
2846 * from the challenge random numbers associated with the host, referred by
2847 * @RandomChallenge. The result is put into the entry of the working hash
2848 * array and returned by reference through @HashWorking.
2849 **/
dea3101e 2850static void
2851lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2852{
2853 *HashWorking = (*RandomChallenge ^ *HashWorking);
2854}
2855
e59058c4 2856/**
3621a710 2857 * lpfc_hba_init - Perform special handling for LC HBA initialization
e59058c4
JS
2858 * @phba: pointer to lpfc hba data structure.
2859 * @hbainit: pointer to an array of unsigned 32-bit integers.
2860 *
2861 * This routine performs the special handling for LC HBA initialization.
2862 **/
dea3101e 2863void
2864lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2865{
2866 int t;
2867 uint32_t *HashWorking;
2e0fef85 2868 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
dea3101e 2869
bbfbbbc1 2870 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
dea3101e 2871 if (!HashWorking)
2872 return;
2873
dea3101e 2874 HashWorking[0] = HashWorking[78] = *pwwnn++;
2875 HashWorking[1] = HashWorking[79] = *pwwnn;
2876
2877 for (t = 0; t < 7; t++)
2878 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2879
2880 lpfc_sha_init(hbainit);
2881 lpfc_sha_iterate(hbainit, HashWorking);
2882 kfree(HashWorking);
2883}
2884
e59058c4 2885/**
3621a710 2886 * lpfc_cleanup - Performs vport cleanups before deleting a vport
e59058c4
JS
2887 * @vport: pointer to a virtual N_Port data structure.
2888 *
2889 * This routine performs the necessary cleanups before deleting the @vport.
2890 * It invokes the discovery state machine to perform necessary state
2891 * transitions and to release the ndlps associated with the @vport. Note,
2892 * the physical port is treated as @vport 0.
2893 **/
87af33fe 2894void
2e0fef85 2895lpfc_cleanup(struct lpfc_vport *vport)
dea3101e 2896{
87af33fe 2897 struct lpfc_hba *phba = vport->phba;
dea3101e 2898 struct lpfc_nodelist *ndlp, *next_ndlp;
a8adb832 2899 int i = 0;
dea3101e 2900
87af33fe
JS
2901 if (phba->link_state > LPFC_LINK_DOWN)
2902 lpfc_port_link_failure(vport);
2903
5e633302
GS
2904 /* Clean up VMID resources */
2905 if (lpfc_is_vmid_enabled(phba))
2906 lpfc_vmid_vport_cleanup(vport);
2907
87af33fe 2908 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
58da1ffb
JS
2909 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2910 ndlp->nlp_DID == Fabric_DID) {
2911 /* Just free up ndlp with Fabric_DID for vports */
2912 lpfc_nlp_put(ndlp);
2913 continue;
2914 }
2915
a70e63ee
JS
2916 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2917 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
eff4a01b
JS
2918 lpfc_nlp_put(ndlp);
2919 continue;
2920 }
2921
e9b11083
JS
2922 /* Fabric Ports not in UNMAPPED state are cleaned up in the
2923 * DEVICE_RM event.
2924 */
2925 if (ndlp->nlp_type & NLP_FABRIC &&
2926 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
87af33fe
JS
2927 lpfc_disc_state_machine(vport, ndlp, NULL,
2928 NLP_EVT_DEVICE_RECOVERY);
e47c9093 2929
e9b11083
JS
2930 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2931 lpfc_disc_state_machine(vport, ndlp, NULL,
2932 NLP_EVT_DEVICE_RM);
87af33fe
JS
2933 }
2934
a8adb832
JS
2935 /* At this point, ALL ndlp's should be gone
2936 * because of the previous NLP_EVT_DEVICE_RM.
2937 * Lets wait for this to happen, if needed.
2938 */
87af33fe 2939 while (!list_empty(&vport->fc_nodes)) {
a8adb832 2940 if (i++ > 3000) {
372c187b
DK
2941 lpfc_printf_vlog(vport, KERN_ERR,
2942 LOG_TRACE_EVENT,
a8adb832 2943 "0233 Nodelist not empty\n");
e47c9093
JS
2944 list_for_each_entry_safe(ndlp, next_ndlp,
2945 &vport->fc_nodes, nlp_listp) {
2946 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
e9b11083
JS
2947 LOG_TRACE_EVENT,
2948 "0282 did:x%x ndlp:x%px "
2949 "refcnt:%d xflags x%x nflag x%x\n",
2950 ndlp->nlp_DID, (void *)ndlp,
2951 kref_read(&ndlp->kref),
2952 ndlp->fc4_xpt_flags,
2953 ndlp->nlp_flag);
e47c9093 2954 }
a8adb832 2955 break;
87af33fe 2956 }
a8adb832
JS
2957
2958 /* Wait for any activity on ndlps to settle */
2959 msleep(10);
87af33fe 2960 }
1151e3ec 2961 lpfc_cleanup_vports_rrqs(vport, NULL);
dea3101e 2962}
2963
e59058c4 2964/**
3621a710 2965 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
e59058c4
JS
2966 * @vport: pointer to a virtual N_Port data structure.
2967 *
2968 * This routine stops all the timers associated with a @vport. This function
2969 * is invoked before disabling or deleting a @vport. Note that the physical
2970 * port is treated as @vport 0.
2971 **/
92d7f7b0
JS
2972void
2973lpfc_stop_vport_timers(struct lpfc_vport *vport)
dea3101e 2974{
92d7f7b0 2975 del_timer_sync(&vport->els_tmofunc);
92494144 2976 del_timer_sync(&vport->delayed_disc_tmo);
92d7f7b0
JS
2977 lpfc_can_disctmo(vport);
2978 return;
dea3101e 2979}
2980
ecfd03c6
JS
2981/**
2982 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2983 * @phba: pointer to lpfc hba data structure.
2984 *
2985 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2986 * caller of this routine should already hold the host lock.
2987 **/
2988void
2989__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2990{
5ac6b303
JS
2991 /* Clear pending FCF rediscovery wait flag */
2992 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2993
ecfd03c6
JS
2994 /* Now, try to stop the timer */
2995 del_timer(&phba->fcf.redisc_wait);
2996}
2997
2998/**
2999 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3000 * @phba: pointer to lpfc hba data structure.
3001 *
3002 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3003 * checks whether the FCF rediscovery wait timer is pending with the host
3004 * lock held before proceeding with disabling the timer and clearing the
3005 * wait timer pendig flag.
3006 **/
3007void
3008lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3009{
3010 spin_lock_irq(&phba->hbalock);
3011 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3012 /* FCF rediscovery timer already fired or stopped */
3013 spin_unlock_irq(&phba->hbalock);
3014 return;
3015 }
3016 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
5ac6b303
JS
3017 /* Clear failover in progress flags */
3018 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
ecfd03c6
JS
3019 spin_unlock_irq(&phba->hbalock);
3020}
3021
e59058c4 3022/**
3772a991 3023 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
e59058c4
JS
3024 * @phba: pointer to lpfc hba data structure.
3025 *
3026 * This routine stops all the timers associated with a HBA. This function is
3027 * invoked before either putting a HBA offline or unloading the driver.
3028 **/
3772a991
JS
3029void
3030lpfc_stop_hba_timers(struct lpfc_hba *phba)
dea3101e 3031{
cdb42bec
JS
3032 if (phba->pport)
3033 lpfc_stop_vport_timers(phba->pport);
32517fc0 3034 cancel_delayed_work_sync(&phba->eq_delay_work);
317aeb83 3035 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
2e0fef85 3036 del_timer_sync(&phba->sli.mbox_tmo);
92d7f7b0 3037 del_timer_sync(&phba->fabric_block_timer);
9399627f 3038 del_timer_sync(&phba->eratt_poll);
3772a991 3039 del_timer_sync(&phba->hb_tmofunc);
1151e3ec
JS
3040 if (phba->sli_rev == LPFC_SLI_REV4) {
3041 del_timer_sync(&phba->rrq_tmr);
3042 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3043 }
a22d73b6 3044 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3772a991
JS
3045
3046 switch (phba->pci_dev_grp) {
3047 case LPFC_PCI_DEV_LP:
3048 /* Stop any LightPulse device specific driver timers */
3049 del_timer_sync(&phba->fcp_poll_timer);
3050 break;
3051 case LPFC_PCI_DEV_OC:
cc0e5f1c 3052 /* Stop any OneConnect device specific driver timers */
ecfd03c6 3053 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3772a991
JS
3054 break;
3055 default:
372c187b 3056 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772a991
JS
3057 "0297 Invalid device group (x%x)\n",
3058 phba->pci_dev_grp);
3059 break;
3060 }
2e0fef85 3061 return;
dea3101e 3062}
3063
e59058c4 3064/**
3621a710 3065 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
e59058c4 3066 * @phba: pointer to lpfc hba data structure.
fe614acd 3067 * @mbx_action: flag for mailbox no wait action.
e59058c4
JS
3068 *
3069 * This routine marks a HBA's management interface as blocked. Once the HBA's
3070 * management interface is marked as blocked, all the user space access to
3071 * the HBA, whether they are from sysfs interface or libdfc interface will
3072 * all be blocked. The HBA is set to block the management interface when the
3073 * driver prepares the HBA interface for online or offline.
3074 **/
a6ababd2 3075static void
618a5230 3076lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
a6ababd2
AB
3077{
3078 unsigned long iflag;
6e7288d9
JS
3079 uint8_t actcmd = MBX_HEARTBEAT;
3080 unsigned long timeout;
3081
a6ababd2
AB
3082 spin_lock_irqsave(&phba->hbalock, iflag);
3083 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
618a5230
JS
3084 spin_unlock_irqrestore(&phba->hbalock, iflag);
3085 if (mbx_action == LPFC_MBX_NO_WAIT)
3086 return;
3087 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3088 spin_lock_irqsave(&phba->hbalock, iflag);
a183a15f 3089 if (phba->sli.mbox_active) {
6e7288d9 3090 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
a183a15f
JS
3091 /* Determine how long we might wait for the active mailbox
3092 * command to be gracefully completed by firmware.
3093 */
3094 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3095 phba->sli.mbox_active) * 1000) + jiffies;
3096 }
a6ababd2 3097 spin_unlock_irqrestore(&phba->hbalock, iflag);
a183a15f 3098
6e7288d9
JS
3099 /* Wait for the outstnading mailbox command to complete */
3100 while (phba->sli.mbox_active) {
3101 /* Check active mailbox complete status every 2ms */
3102 msleep(2);
3103 if (time_after(jiffies, timeout)) {
372c187b
DK
3104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3105 "2813 Mgmt IO is Blocked %x "
3106 "- mbox cmd %x still active\n",
3107 phba->sli.sli_flag, actcmd);
6e7288d9
JS
3108 break;
3109 }
3110 }
a6ababd2
AB
3111}
3112
6b5151fd
JS
3113/**
3114 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3115 * @phba: pointer to lpfc hba data structure.
3116 *
3117 * Allocate RPIs for all active remote nodes. This is needed whenever
3118 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3119 * is to fixup the temporary rpi assignments.
3120 **/
3121void
3122lpfc_sli4_node_prep(struct lpfc_hba *phba)
3123{
3124 struct lpfc_nodelist *ndlp, *next_ndlp;
3125 struct lpfc_vport **vports;
9d3d340d 3126 int i, rpi;
6b5151fd
JS
3127
3128 if (phba->sli_rev != LPFC_SLI_REV4)
3129 return;
3130
3131 vports = lpfc_create_vport_work_array(phba);
9d3d340d
JS
3132 if (vports == NULL)
3133 return;
6b5151fd 3134
9d3d340d
JS
3135 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3136 if (vports[i]->load_flag & FC_UNLOADING)
3137 continue;
3138
3139 list_for_each_entry_safe(ndlp, next_ndlp,
3140 &vports[i]->fc_nodes,
3141 nlp_listp) {
9d3d340d
JS
3142 rpi = lpfc_sli4_alloc_rpi(phba);
3143 if (rpi == LPFC_RPI_ALLOC_ERROR) {
307e3380 3144 /* TODO print log? */
9d3d340d 3145 continue;
6b5151fd 3146 }
9d3d340d 3147 ndlp->nlp_rpi = rpi;
0f154226
JS
3148 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3149 LOG_NODE | LOG_DISCOVERY,
3150 "0009 Assign RPI x%x to ndlp x%px "
307e3380 3151 "DID:x%06x flg:x%x\n",
0f154226 3152 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
307e3380 3153 ndlp->nlp_flag);
6b5151fd
JS
3154 }
3155 }
3156 lpfc_destroy_vport_work_array(phba, vports);
3157}
3158
c490850a
JS
3159/**
3160 * lpfc_create_expedite_pool - create expedite pool
3161 * @phba: pointer to lpfc hba data structure.
3162 *
3163 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3164 * to expedite pool. Mark them as expedite.
3165 **/
3999df75 3166static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
c490850a
JS
3167{
3168 struct lpfc_sli4_hdw_queue *qp;
3169 struct lpfc_io_buf *lpfc_ncmd;
3170 struct lpfc_io_buf *lpfc_ncmd_next;
3171 struct lpfc_epd_pool *epd_pool;
3172 unsigned long iflag;
3173
3174 epd_pool = &phba->epd_pool;
3175 qp = &phba->sli4_hba.hdwq[0];
3176
3177 spin_lock_init(&epd_pool->lock);
3178 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3179 spin_lock(&epd_pool->lock);
3180 INIT_LIST_HEAD(&epd_pool->list);
3181 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3182 &qp->lpfc_io_buf_list_put, list) {
3183 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3184 lpfc_ncmd->expedite = true;
3185 qp->put_io_bufs--;
3186 epd_pool->count++;
3187 if (epd_pool->count >= XRI_BATCH)
3188 break;
3189 }
3190 spin_unlock(&epd_pool->lock);
3191 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3192}
3193
3194/**
3195 * lpfc_destroy_expedite_pool - destroy expedite pool
3196 * @phba: pointer to lpfc hba data structure.
3197 *
3198 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3199 * of HWQ 0. Clear the mark.
3200 **/
3999df75 3201static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
c490850a
JS
3202{
3203 struct lpfc_sli4_hdw_queue *qp;
3204 struct lpfc_io_buf *lpfc_ncmd;
3205 struct lpfc_io_buf *lpfc_ncmd_next;
3206 struct lpfc_epd_pool *epd_pool;
3207 unsigned long iflag;
3208
3209 epd_pool = &phba->epd_pool;
3210 qp = &phba->sli4_hba.hdwq[0];
3211
3212 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3213 spin_lock(&epd_pool->lock);
3214 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3215 &epd_pool->list, list) {
3216 list_move_tail(&lpfc_ncmd->list,
3217 &qp->lpfc_io_buf_list_put);
3218 lpfc_ncmd->flags = false;
3219 qp->put_io_bufs++;
3220 epd_pool->count--;
3221 }
3222 spin_unlock(&epd_pool->lock);
3223 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3224}
3225
3226/**
3227 * lpfc_create_multixri_pools - create multi-XRI pools
3228 * @phba: pointer to lpfc hba data structure.
3229 *
3230 * This routine initialize public, private per HWQ. Then, move XRIs from
3231 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3232 * Initialized.
3233 **/
3234void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3235{
3236 u32 i, j;
3237 u32 hwq_count;
3238 u32 count_per_hwq;
3239 struct lpfc_io_buf *lpfc_ncmd;
3240 struct lpfc_io_buf *lpfc_ncmd_next;
3241 unsigned long iflag;
3242 struct lpfc_sli4_hdw_queue *qp;
3243 struct lpfc_multixri_pool *multixri_pool;
3244 struct lpfc_pbl_pool *pbl_pool;
3245 struct lpfc_pvt_pool *pvt_pool;
3246
3247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3248 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3249 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3250 phba->sli4_hba.io_xri_cnt);
3251
3252 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3253 lpfc_create_expedite_pool(phba);
3254
3255 hwq_count = phba->cfg_hdw_queue;
3256 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3257
3258 for (i = 0; i < hwq_count; i++) {
3259 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3260
3261 if (!multixri_pool) {
3262 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3263 "1238 Failed to allocate memory for "
3264 "multixri_pool\n");
3265
3266 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3267 lpfc_destroy_expedite_pool(phba);
3268
3269 j = 0;
3270 while (j < i) {
3271 qp = &phba->sli4_hba.hdwq[j];
3272 kfree(qp->p_multixri_pool);
3273 j++;
3274 }
3275 phba->cfg_xri_rebalancing = 0;
3276 return;
3277 }
3278
3279 qp = &phba->sli4_hba.hdwq[i];
3280 qp->p_multixri_pool = multixri_pool;
3281
3282 multixri_pool->xri_limit = count_per_hwq;
3283 multixri_pool->rrb_next_hwqid = i;
3284
3285 /* Deal with public free xri pool */
3286 pbl_pool = &multixri_pool->pbl_pool;
3287 spin_lock_init(&pbl_pool->lock);
3288 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3289 spin_lock(&pbl_pool->lock);
3290 INIT_LIST_HEAD(&pbl_pool->list);
3291 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3292 &qp->lpfc_io_buf_list_put, list) {
3293 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3294 qp->put_io_bufs--;
3295 pbl_pool->count++;
3296 }
3297 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3298 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3299 pbl_pool->count, i);
3300 spin_unlock(&pbl_pool->lock);
3301 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3302
3303 /* Deal with private free xri pool */
3304 pvt_pool = &multixri_pool->pvt_pool;
3305 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3306 pvt_pool->low_watermark = XRI_BATCH;
3307 spin_lock_init(&pvt_pool->lock);
3308 spin_lock_irqsave(&pvt_pool->lock, iflag);
3309 INIT_LIST_HEAD(&pvt_pool->list);
3310 pvt_pool->count = 0;
3311 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3312 }
3313}
3314
3315/**
3316 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3317 * @phba: pointer to lpfc hba data structure.
3318 *
3319 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3320 **/
3999df75 3321static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
c490850a
JS
3322{
3323 u32 i;
3324 u32 hwq_count;
3325 struct lpfc_io_buf *lpfc_ncmd;
3326 struct lpfc_io_buf *lpfc_ncmd_next;
3327 unsigned long iflag;
3328 struct lpfc_sli4_hdw_queue *qp;
3329 struct lpfc_multixri_pool *multixri_pool;
3330 struct lpfc_pbl_pool *pbl_pool;
3331 struct lpfc_pvt_pool *pvt_pool;
3332
3333 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3334 lpfc_destroy_expedite_pool(phba);
3335
c00f62e6
JS
3336 if (!(phba->pport->load_flag & FC_UNLOADING))
3337 lpfc_sli_flush_io_rings(phba);
c66a9197 3338
c490850a
JS
3339 hwq_count = phba->cfg_hdw_queue;
3340
3341 for (i = 0; i < hwq_count; i++) {
3342 qp = &phba->sli4_hba.hdwq[i];
3343 multixri_pool = qp->p_multixri_pool;
3344 if (!multixri_pool)
3345 continue;
3346
3347 qp->p_multixri_pool = NULL;
3348
3349 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3350
3351 /* Deal with public free xri pool */
3352 pbl_pool = &multixri_pool->pbl_pool;
3353 spin_lock(&pbl_pool->lock);
3354
3355 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3356 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3357 pbl_pool->count, i);
3358
3359 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3360 &pbl_pool->list, list) {
3361 list_move_tail(&lpfc_ncmd->list,
3362 &qp->lpfc_io_buf_list_put);
3363 qp->put_io_bufs++;
3364 pbl_pool->count--;
3365 }
3366
3367 INIT_LIST_HEAD(&pbl_pool->list);
3368 pbl_pool->count = 0;
3369
3370 spin_unlock(&pbl_pool->lock);
3371
3372 /* Deal with private free xri pool */
3373 pvt_pool = &multixri_pool->pvt_pool;
3374 spin_lock(&pvt_pool->lock);
3375
3376 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3377 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3378 pvt_pool->count, i);
3379
3380 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3381 &pvt_pool->list, list) {
3382 list_move_tail(&lpfc_ncmd->list,
3383 &qp->lpfc_io_buf_list_put);
3384 qp->put_io_bufs++;
3385 pvt_pool->count--;
3386 }
3387
3388 INIT_LIST_HEAD(&pvt_pool->list);
3389 pvt_pool->count = 0;
3390
3391 spin_unlock(&pvt_pool->lock);
3392 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3393
3394 kfree(multixri_pool);
3395 }
3396}
3397
e59058c4 3398/**
3621a710 3399 * lpfc_online - Initialize and bring a HBA online
e59058c4
JS
3400 * @phba: pointer to lpfc hba data structure.
3401 *
3402 * This routine initializes the HBA and brings a HBA online. During this
3403 * process, the management interface is blocked to prevent user space access
3404 * to the HBA interfering with the driver initialization.
3405 *
3406 * Return codes
3407 * 0 - successful
3408 * 1 - failed
3409 **/
dea3101e 3410int
2e0fef85 3411lpfc_online(struct lpfc_hba *phba)
dea3101e 3412{
372bd282 3413 struct lpfc_vport *vport;
549e55cd 3414 struct lpfc_vport **vports;
a145fda3 3415 int i, error = 0;
16a3a208 3416 bool vpis_cleared = false;
2e0fef85 3417
dea3101e 3418 if (!phba)
3419 return 0;
372bd282 3420 vport = phba->pport;
dea3101e 3421
2e0fef85 3422 if (!(vport->fc_flag & FC_OFFLINE_MODE))
dea3101e 3423 return 0;
3424
ed957684 3425 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 3426 "0458 Bring Adapter online\n");
dea3101e 3427
618a5230 3428 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
46fa311e 3429
da0436e9
JS
3430 if (phba->sli_rev == LPFC_SLI_REV4) {
3431 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3432 lpfc_unblock_mgmt_io(phba);
3433 return 1;
3434 }
16a3a208
JS
3435 spin_lock_irq(&phba->hbalock);
3436 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3437 vpis_cleared = true;
3438 spin_unlock_irq(&phba->hbalock);
a145fda3
DK
3439
3440 /* Reestablish the local initiator port.
3441 * The offline process destroyed the previous lport.
3442 */
3443 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3444 !phba->nvmet_support) {
3445 error = lpfc_nvme_create_localport(phba->pport);
3446 if (error)
372c187b 3447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
a145fda3
DK
3448 "6132 NVME restore reg failed "
3449 "on nvmei error x%x\n", error);
3450 }
da0436e9 3451 } else {
895427bd 3452 lpfc_sli_queue_init(phba);
da0436e9
JS
3453 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3454 lpfc_unblock_mgmt_io(phba);
3455 return 1;
3456 }
46fa311e 3457 }
dea3101e 3458
549e55cd 3459 vports = lpfc_create_vport_work_array(phba);
aeb6641f 3460 if (vports != NULL) {
da0436e9 3461 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
3462 struct Scsi_Host *shost;
3463 shost = lpfc_shost_from_vport(vports[i]);
3464 spin_lock_irq(shost->host_lock);
3465 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3466 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3467 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
16a3a208 3468 if (phba->sli_rev == LPFC_SLI_REV4) {
1c6834a7 3469 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
16a3a208
JS
3470 if ((vpis_cleared) &&
3471 (vports[i]->port_type !=
3472 LPFC_PHYSICAL_PORT))
3473 vports[i]->vpi = 0;
3474 }
549e55cd
JS
3475 spin_unlock_irq(shost->host_lock);
3476 }
aeb6641f
AB
3477 }
3478 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 3479
c490850a
JS
3480 if (phba->cfg_xri_rebalancing)
3481 lpfc_create_multixri_pools(phba);
3482
93a4d6f4
JS
3483 lpfc_cpuhp_add(phba);
3484
46fa311e 3485 lpfc_unblock_mgmt_io(phba);
dea3101e 3486 return 0;
3487}
3488
e59058c4 3489/**
3621a710 3490 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
e59058c4
JS
3491 * @phba: pointer to lpfc hba data structure.
3492 *
3493 * This routine marks a HBA's management interface as not blocked. Once the
3494 * HBA's management interface is marked as not blocked, all the user space
3495 * access to the HBA, whether they are from sysfs interface or libdfc
3496 * interface will be allowed. The HBA is set to block the management interface
3497 * when the driver prepares the HBA interface for online or offline and then
3498 * set to unblock the management interface afterwards.
3499 **/
46fa311e
JS
3500void
3501lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3502{
3503 unsigned long iflag;
3504
2e0fef85
JS
3505 spin_lock_irqsave(&phba->hbalock, iflag);
3506 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3507 spin_unlock_irqrestore(&phba->hbalock, iflag);
46fa311e
JS
3508}
3509
e59058c4 3510/**
3621a710 3511 * lpfc_offline_prep - Prepare a HBA to be brought offline
e59058c4 3512 * @phba: pointer to lpfc hba data structure.
fe614acd 3513 * @mbx_action: flag for mailbox shutdown action.
e59058c4
JS
3514 *
3515 * This routine is invoked to prepare a HBA to be brought offline. It performs
3516 * unregistration login to all the nodes on all vports and flushes the mailbox
3517 * queue to make it ready to be brought offline.
3518 **/
46fa311e 3519void
618a5230 3520lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
46fa311e 3521{
2e0fef85 3522 struct lpfc_vport *vport = phba->pport;
46fa311e 3523 struct lpfc_nodelist *ndlp, *next_ndlp;
87af33fe 3524 struct lpfc_vport **vports;
72100cc4 3525 struct Scsi_Host *shost;
87af33fe 3526 int i;
dea3101e 3527
2e0fef85 3528 if (vport->fc_flag & FC_OFFLINE_MODE)
46fa311e 3529 return;
dea3101e 3530
618a5230 3531 lpfc_block_mgmt_io(phba, mbx_action);
dea3101e 3532
3533 lpfc_linkdown(phba);
3534
87af33fe
JS
3535 /* Issue an unreg_login to all nodes on all vports */
3536 vports = lpfc_create_vport_work_array(phba);
3537 if (vports != NULL) {
da0436e9 3538 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8adb832
JS
3539 if (vports[i]->load_flag & FC_UNLOADING)
3540 continue;
72100cc4
JS
3541 shost = lpfc_shost_from_vport(vports[i]);
3542 spin_lock_irq(shost->host_lock);
c868595d 3543 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
695a814e
JS
3544 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3545 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
72100cc4 3546 spin_unlock_irq(shost->host_lock);
695a814e 3547
87af33fe
JS
3548 shost = lpfc_shost_from_vport(vports[i]);
3549 list_for_each_entry_safe(ndlp, next_ndlp,
3550 &vports[i]->fc_nodes,
3551 nlp_listp) {
0f154226 3552
c6adba15 3553 spin_lock_irq(&ndlp->lock);
87af33fe 3554 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
c6adba15 3555 spin_unlock_irq(&ndlp->lock);
affbe244
JS
3556
3557 lpfc_unreg_rpi(vports[i], ndlp);
6b5151fd
JS
3558 /*
3559 * Whenever an SLI4 port goes offline, free the
401ee0c1
JS
3560 * RPI. Get a new RPI when the adapter port
3561 * comes back online.
6b5151fd 3562 */
be6bb941 3563 if (phba->sli_rev == LPFC_SLI_REV4) {
e9b11083 3564 lpfc_printf_vlog(vports[i], KERN_INFO,
0f154226
JS
3565 LOG_NODE | LOG_DISCOVERY,
3566 "0011 Free RPI x%x on "
f1156125 3567 "ndlp: x%px did x%x\n",
0f154226 3568 ndlp->nlp_rpi, ndlp,
307e3380 3569 ndlp->nlp_DID);
6b5151fd 3570 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
0f154226 3571 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
be6bb941 3572 }
307e3380
JS
3573
3574 if (ndlp->nlp_type & NLP_FABRIC) {
3575 lpfc_disc_state_machine(vports[i], ndlp,
3576 NULL, NLP_EVT_DEVICE_RECOVERY);
e9b11083
JS
3577
3578 /* Don't remove the node unless the
3579 * has been unregistered with the
3580 * transport. If so, let dev_loss
3581 * take care of the node.
3582 */
3583 if (!(ndlp->fc4_xpt_flags &
3584 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3585 lpfc_disc_state_machine
3586 (vports[i], ndlp,
3587 NULL,
3588 NLP_EVT_DEVICE_RM);
307e3380 3589 }
87af33fe
JS
3590 }
3591 }
3592 }
09372820 3593 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 3594
618a5230 3595 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
f485c18d
DK
3596
3597 if (phba->wq)
3598 flush_workqueue(phba->wq);
46fa311e
JS
3599}
3600
e59058c4 3601/**
3621a710 3602 * lpfc_offline - Bring a HBA offline
e59058c4
JS
3603 * @phba: pointer to lpfc hba data structure.
3604 *
3605 * This routine actually brings a HBA offline. It stops all the timers
3606 * associated with the HBA, brings down the SLI layer, and eventually
3607 * marks the HBA as in offline state for the upper layer protocol.
3608 **/
46fa311e 3609void
2e0fef85 3610lpfc_offline(struct lpfc_hba *phba)
46fa311e 3611{
549e55cd
JS
3612 struct Scsi_Host *shost;
3613 struct lpfc_vport **vports;
3614 int i;
46fa311e 3615
549e55cd 3616 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
46fa311e 3617 return;
688a8863 3618
da0436e9
JS
3619 /* stop port and all timers associated with this hba */
3620 lpfc_stop_port(phba);
4b40d02b
DK
3621
3622 /* Tear down the local and target port registrations. The
3623 * nvme transports need to cleanup.
3624 */
3625 lpfc_nvmet_destroy_targetport(phba);
3626 lpfc_nvme_destroy_localport(phba->pport);
3627
51ef4c26
JS
3628 vports = lpfc_create_vport_work_array(phba);
3629 if (vports != NULL)
da0436e9 3630 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
51ef4c26 3631 lpfc_stop_vport_timers(vports[i]);
09372820 3632 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0 3633 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 3634 "0460 Bring Adapter offline\n");
dea3101e 3635 /* Bring down the SLI Layer and cleanup. The HBA is offline
3636 now. */
3637 lpfc_sli_hba_down(phba);
92d7f7b0 3638 spin_lock_irq(&phba->hbalock);
7054a606 3639 phba->work_ha = 0;
92d7f7b0 3640 spin_unlock_irq(&phba->hbalock);
549e55cd
JS
3641 vports = lpfc_create_vport_work_array(phba);
3642 if (vports != NULL)
da0436e9 3643 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd 3644 shost = lpfc_shost_from_vport(vports[i]);
549e55cd
JS
3645 spin_lock_irq(shost->host_lock);
3646 vports[i]->work_port_events = 0;
3647 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3648 spin_unlock_irq(shost->host_lock);
3649 }
09372820 3650 lpfc_destroy_vport_work_array(phba, vports);
f0871ab6
JS
3651 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3652 * in hba_unset
3653 */
3654 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3655 __lpfc_cpuhp_remove(phba);
c490850a
JS
3656
3657 if (phba->cfg_xri_rebalancing)
3658 lpfc_destroy_multixri_pools(phba);
dea3101e 3659}
3660
e59058c4 3661/**
3621a710 3662 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
e59058c4
JS
3663 * @phba: pointer to lpfc hba data structure.
3664 *
3665 * This routine is to free all the SCSI buffers and IOCBs from the driver
3666 * list back to kernel. It is called from lpfc_pci_remove_one to free
3667 * the internal resources before the device is removed from the system.
e59058c4 3668 **/
8a9d2e80 3669static void
2e0fef85 3670lpfc_scsi_free(struct lpfc_hba *phba)
dea3101e 3671{
c490850a 3672 struct lpfc_io_buf *sb, *sb_next;
dea3101e 3673
895427bd
JS
3674 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3675 return;
3676
2e0fef85 3677 spin_lock_irq(&phba->hbalock);
a40fc5f0 3678
dea3101e 3679 /* Release all the lpfc_scsi_bufs maintained by this host. */
a40fc5f0
JS
3680
3681 spin_lock(&phba->scsi_buf_list_put_lock);
3682 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3683 list) {
dea3101e 3684 list_del(&sb->list);
771db5c0 3685 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
92d7f7b0 3686 sb->dma_handle);
dea3101e 3687 kfree(sb);
3688 phba->total_scsi_bufs--;
3689 }
a40fc5f0
JS
3690 spin_unlock(&phba->scsi_buf_list_put_lock);
3691
3692 spin_lock(&phba->scsi_buf_list_get_lock);
3693 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3694 list) {
dea3101e 3695 list_del(&sb->list);
771db5c0 3696 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
92d7f7b0 3697 sb->dma_handle);
dea3101e 3698 kfree(sb);
3699 phba->total_scsi_bufs--;
3700 }
a40fc5f0 3701 spin_unlock(&phba->scsi_buf_list_get_lock);
2e0fef85 3702 spin_unlock_irq(&phba->hbalock);
8a9d2e80 3703}
0794d601 3704
895427bd 3705/**
5e5b511d 3706 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
895427bd
JS
3707 * @phba: pointer to lpfc hba data structure.
3708 *
0794d601 3709 * This routine is to free all the IO buffers and IOCBs from the driver
895427bd
JS
3710 * list back to kernel. It is called from lpfc_pci_remove_one to free
3711 * the internal resources before the device is removed from the system.
3712 **/
c490850a 3713void
5e5b511d 3714lpfc_io_free(struct lpfc_hba *phba)
895427bd 3715{
c490850a 3716 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
5e5b511d
JS
3717 struct lpfc_sli4_hdw_queue *qp;
3718 int idx;
895427bd 3719
5e5b511d
JS
3720 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3721 qp = &phba->sli4_hba.hdwq[idx];
3722 /* Release all the lpfc_nvme_bufs maintained by this host. */
3723 spin_lock(&qp->io_buf_list_put_lock);
3724 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3725 &qp->lpfc_io_buf_list_put,
3726 list) {
3727 list_del(&lpfc_ncmd->list);
3728 qp->put_io_bufs--;
3729 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3730 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
d79c9e9d
JS
3731 if (phba->cfg_xpsgl && !phba->nvmet_support)
3732 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3733 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
5e5b511d
JS
3734 kfree(lpfc_ncmd);
3735 qp->total_io_bufs--;
3736 }
3737 spin_unlock(&qp->io_buf_list_put_lock);
3738
3739 spin_lock(&qp->io_buf_list_get_lock);
3740 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3741 &qp->lpfc_io_buf_list_get,
3742 list) {
3743 list_del(&lpfc_ncmd->list);
3744 qp->get_io_bufs--;
3745 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3746 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
d79c9e9d
JS
3747 if (phba->cfg_xpsgl && !phba->nvmet_support)
3748 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3749 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
5e5b511d
JS
3750 kfree(lpfc_ncmd);
3751 qp->total_io_bufs--;
3752 }
3753 spin_unlock(&qp->io_buf_list_get_lock);
895427bd 3754 }
895427bd 3755}
0794d601 3756
8a9d2e80 3757/**
895427bd 3758 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
8a9d2e80
JS
3759 * @phba: pointer to lpfc hba data structure.
3760 *
3761 * This routine first calculates the sizes of the current els and allocated
3762 * scsi sgl lists, and then goes through all sgls to updates the physical
3763 * XRIs assigned due to port function reset. During port initialization, the
3764 * current els and allocated scsi sgl lists are 0s.
3765 *
3766 * Return codes
3767 * 0 - successful (for now, it always returns 0)
3768 **/
3769int
895427bd 3770lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
8a9d2e80
JS
3771{
3772 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
895427bd 3773 uint16_t i, lxri, xri_cnt, els_xri_cnt;
8a9d2e80 3774 LIST_HEAD(els_sgl_list);
8a9d2e80
JS
3775 int rc;
3776
3777 /*
3778 * update on pci function's els xri-sgl list
3779 */
3780 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
895427bd 3781
8a9d2e80
JS
3782 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3783 /* els xri-sgl expanded */
3784 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3785 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3786 "3157 ELS xri-sgl count increased from "
3787 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3788 els_xri_cnt);
3789 /* allocate the additional els sgls */
3790 for (i = 0; i < xri_cnt; i++) {
3791 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3792 GFP_KERNEL);
3793 if (sglq_entry == NULL) {
372c187b
DK
3794 lpfc_printf_log(phba, KERN_ERR,
3795 LOG_TRACE_EVENT,
8a9d2e80
JS
3796 "2562 Failure to allocate an "
3797 "ELS sgl entry:%d\n", i);
3798 rc = -ENOMEM;
3799 goto out_free_mem;
3800 }
3801 sglq_entry->buff_type = GEN_BUFF_TYPE;
3802 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3803 &sglq_entry->phys);
3804 if (sglq_entry->virt == NULL) {
3805 kfree(sglq_entry);
372c187b
DK
3806 lpfc_printf_log(phba, KERN_ERR,
3807 LOG_TRACE_EVENT,
8a9d2e80
JS
3808 "2563 Failure to allocate an "
3809 "ELS mbuf:%d\n", i);
3810 rc = -ENOMEM;
3811 goto out_free_mem;
3812 }
3813 sglq_entry->sgl = sglq_entry->virt;
3814 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3815 sglq_entry->state = SGL_FREED;
3816 list_add_tail(&sglq_entry->list, &els_sgl_list);
3817 }
a789241e 3818 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
895427bd
JS
3819 list_splice_init(&els_sgl_list,
3820 &phba->sli4_hba.lpfc_els_sgl_list);
a789241e 3821 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8a9d2e80
JS
3822 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3823 /* els xri-sgl shrinked */
3824 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3825 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3826 "3158 ELS xri-sgl count decreased from "
3827 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3828 els_xri_cnt);
a789241e 3829 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
895427bd
JS
3830 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3831 &els_sgl_list);
8a9d2e80
JS
3832 /* release extra els sgls from list */
3833 for (i = 0; i < xri_cnt; i++) {
3834 list_remove_head(&els_sgl_list,
3835 sglq_entry, struct lpfc_sglq, list);
3836 if (sglq_entry) {
895427bd
JS
3837 __lpfc_mbuf_free(phba, sglq_entry->virt,
3838 sglq_entry->phys);
8a9d2e80
JS
3839 kfree(sglq_entry);
3840 }
3841 }
895427bd
JS
3842 list_splice_init(&els_sgl_list,
3843 &phba->sli4_hba.lpfc_els_sgl_list);
a789241e 3844 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8a9d2e80
JS
3845 } else
3846 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3847 "3163 ELS xri-sgl count unchanged: %d\n",
3848 els_xri_cnt);
3849 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3850
3851 /* update xris to els sgls on the list */
3852 sglq_entry = NULL;
3853 sglq_entry_next = NULL;
3854 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
895427bd 3855 &phba->sli4_hba.lpfc_els_sgl_list, list) {
8a9d2e80
JS
3856 lxri = lpfc_sli4_next_xritag(phba);
3857 if (lxri == NO_XRI) {
372c187b
DK
3858 lpfc_printf_log(phba, KERN_ERR,
3859 LOG_TRACE_EVENT,
8a9d2e80
JS
3860 "2400 Failed to allocate xri for "
3861 "ELS sgl\n");
3862 rc = -ENOMEM;
3863 goto out_free_mem;
3864 }
3865 sglq_entry->sli4_lxritag = lxri;
3866 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3867 }
895427bd
JS
3868 return 0;
3869
3870out_free_mem:
3871 lpfc_free_els_sgl_list(phba);
3872 return rc;
3873}
3874
f358dd0c
JS
3875/**
3876 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3877 * @phba: pointer to lpfc hba data structure.
3878 *
3879 * This routine first calculates the sizes of the current els and allocated
3880 * scsi sgl lists, and then goes through all sgls to updates the physical
3881 * XRIs assigned due to port function reset. During port initialization, the
3882 * current els and allocated scsi sgl lists are 0s.
3883 *
3884 * Return codes
3885 * 0 - successful (for now, it always returns 0)
3886 **/
3887int
3888lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3889{
3890 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3891 uint16_t i, lxri, xri_cnt, els_xri_cnt;
6c621a22 3892 uint16_t nvmet_xri_cnt;
f358dd0c
JS
3893 LIST_HEAD(nvmet_sgl_list);
3894 int rc;
3895
3896 /*
3897 * update on pci function's nvmet xri-sgl list
3898 */
3899 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
61f3d4bf 3900
6c621a22
JS
3901 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3902 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
f358dd0c
JS
3903 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3904 /* els xri-sgl expanded */
3905 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3906 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3907 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3908 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3909 /* allocate the additional nvmet sgls */
3910 for (i = 0; i < xri_cnt; i++) {
3911 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3912 GFP_KERNEL);
3913 if (sglq_entry == NULL) {
372c187b
DK
3914 lpfc_printf_log(phba, KERN_ERR,
3915 LOG_TRACE_EVENT,
f358dd0c
JS
3916 "6303 Failure to allocate an "
3917 "NVMET sgl entry:%d\n", i);
3918 rc = -ENOMEM;
3919 goto out_free_mem;
3920 }
3921 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3922 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3923 &sglq_entry->phys);
3924 if (sglq_entry->virt == NULL) {
3925 kfree(sglq_entry);
372c187b
DK
3926 lpfc_printf_log(phba, KERN_ERR,
3927 LOG_TRACE_EVENT,
f358dd0c
JS
3928 "6304 Failure to allocate an "
3929 "NVMET buf:%d\n", i);
3930 rc = -ENOMEM;
3931 goto out_free_mem;
3932 }
3933 sglq_entry->sgl = sglq_entry->virt;
3934 memset(sglq_entry->sgl, 0,
3935 phba->cfg_sg_dma_buf_size);
3936 sglq_entry->state = SGL_FREED;
3937 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3938 }
3939 spin_lock_irq(&phba->hbalock);
3940 spin_lock(&phba->sli4_hba.sgl_list_lock);
3941 list_splice_init(&nvmet_sgl_list,
3942 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3943 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3944 spin_unlock_irq(&phba->hbalock);
3945 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3946 /* nvmet xri-sgl shrunk */
3947 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3948 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3949 "6305 NVMET xri-sgl count decreased from "
3950 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3951 nvmet_xri_cnt);
3952 spin_lock_irq(&phba->hbalock);
3953 spin_lock(&phba->sli4_hba.sgl_list_lock);
3954 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3955 &nvmet_sgl_list);
3956 /* release extra nvmet sgls from list */
3957 for (i = 0; i < xri_cnt; i++) {
3958 list_remove_head(&nvmet_sgl_list,
3959 sglq_entry, struct lpfc_sglq, list);
3960 if (sglq_entry) {
3961 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3962 sglq_entry->phys);
3963 kfree(sglq_entry);
3964 }
3965 }
3966 list_splice_init(&nvmet_sgl_list,
3967 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3968 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3969 spin_unlock_irq(&phba->hbalock);
3970 } else
3971 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3972 "6306 NVMET xri-sgl count unchanged: %d\n",
3973 nvmet_xri_cnt);
3974 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3975
3976 /* update xris to nvmet sgls on the list */
3977 sglq_entry = NULL;
3978 sglq_entry_next = NULL;
3979 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3980 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3981 lxri = lpfc_sli4_next_xritag(phba);
3982 if (lxri == NO_XRI) {
372c187b
DK
3983 lpfc_printf_log(phba, KERN_ERR,
3984 LOG_TRACE_EVENT,
f358dd0c
JS
3985 "6307 Failed to allocate xri for "
3986 "NVMET sgl\n");
3987 rc = -ENOMEM;
3988 goto out_free_mem;
3989 }
3990 sglq_entry->sli4_lxritag = lxri;
3991 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3992 }
3993 return 0;
3994
3995out_free_mem:
3996 lpfc_free_nvmet_sgl_list(phba);
3997 return rc;
3998}
3999
5e5b511d
JS
4000int
4001lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4002{
4003 LIST_HEAD(blist);
4004 struct lpfc_sli4_hdw_queue *qp;
c490850a
JS
4005 struct lpfc_io_buf *lpfc_cmd;
4006 struct lpfc_io_buf *iobufp, *prev_iobufp;
5e5b511d
JS
4007 int idx, cnt, xri, inserted;
4008
4009 cnt = 0;
4010 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4011 qp = &phba->sli4_hba.hdwq[idx];
4012 spin_lock_irq(&qp->io_buf_list_get_lock);
4013 spin_lock(&qp->io_buf_list_put_lock);
4014
4015 /* Take everything off the get and put lists */
4016 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4017 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4018 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4019 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4020 cnt += qp->get_io_bufs + qp->put_io_bufs;
4021 qp->get_io_bufs = 0;
4022 qp->put_io_bufs = 0;
4023 qp->total_io_bufs = 0;
4024 spin_unlock(&qp->io_buf_list_put_lock);
4025 spin_unlock_irq(&qp->io_buf_list_get_lock);
4026 }
4027
4028 /*
4029 * Take IO buffers off blist and put on cbuf sorted by XRI.
4030 * This is because POST_SGL takes a sequential range of XRIs
4031 * to post to the firmware.
4032 */
4033 for (idx = 0; idx < cnt; idx++) {
c490850a 4034 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
5e5b511d
JS
4035 if (!lpfc_cmd)
4036 return cnt;
4037 if (idx == 0) {
4038 list_add_tail(&lpfc_cmd->list, cbuf);
4039 continue;
4040 }
4041 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4042 inserted = 0;
4043 prev_iobufp = NULL;
4044 list_for_each_entry(iobufp, cbuf, list) {
4045 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4046 if (prev_iobufp)
4047 list_add(&lpfc_cmd->list,
4048 &prev_iobufp->list);
4049 else
4050 list_add(&lpfc_cmd->list, cbuf);
4051 inserted = 1;
4052 break;
4053 }
4054 prev_iobufp = iobufp;
4055 }
4056 if (!inserted)
4057 list_add_tail(&lpfc_cmd->list, cbuf);
4058 }
4059 return cnt;
4060}
4061
4062int
4063lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4064{
4065 struct lpfc_sli4_hdw_queue *qp;
c490850a 4066 struct lpfc_io_buf *lpfc_cmd;
5e5b511d
JS
4067 int idx, cnt;
4068
4069 qp = phba->sli4_hba.hdwq;
4070 cnt = 0;
4071 while (!list_empty(cbuf)) {
4072 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4073 list_remove_head(cbuf, lpfc_cmd,
c490850a 4074 struct lpfc_io_buf, list);
5e5b511d
JS
4075 if (!lpfc_cmd)
4076 return cnt;
4077 cnt++;
4078 qp = &phba->sli4_hba.hdwq[idx];
1fbf9742
JS
4079 lpfc_cmd->hdwq_no = idx;
4080 lpfc_cmd->hdwq = qp;
5e5b511d
JS
4081 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4082 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4083 spin_lock(&qp->io_buf_list_put_lock);
4084 list_add_tail(&lpfc_cmd->list,
4085 &qp->lpfc_io_buf_list_put);
4086 qp->put_io_bufs++;
4087 qp->total_io_bufs++;
4088 spin_unlock(&qp->io_buf_list_put_lock);
4089 }
4090 }
4091 return cnt;
4092}
4093
895427bd 4094/**
5e5b511d 4095 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
895427bd
JS
4096 * @phba: pointer to lpfc hba data structure.
4097 *
4098 * This routine first calculates the sizes of the current els and allocated
4099 * scsi sgl lists, and then goes through all sgls to updates the physical
4100 * XRIs assigned due to port function reset. During port initialization, the
4101 * current els and allocated scsi sgl lists are 0s.
4102 *
4103 * Return codes
4104 * 0 - successful (for now, it always returns 0)
4105 **/
4106int
5e5b511d 4107lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
895427bd 4108{
c490850a 4109 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
0794d601 4110 uint16_t i, lxri, els_xri_cnt;
5e5b511d
JS
4111 uint16_t io_xri_cnt, io_xri_max;
4112 LIST_HEAD(io_sgl_list);
0794d601 4113 int rc, cnt;
8a9d2e80 4114
895427bd 4115 /*
0794d601 4116 * update on pci function's allocated nvme xri-sgl list
895427bd 4117 */
8a9d2e80 4118
0794d601
JS
4119 /* maximum number of xris available for nvme buffers */
4120 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
5e5b511d
JS
4121 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4122 phba->sli4_hba.io_xri_max = io_xri_max;
895427bd 4123
e8c0a779 4124 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
0794d601
JS
4125 "6074 Current allocated XRI sgl count:%d, "
4126 "maximum XRI count:%d\n",
5e5b511d
JS
4127 phba->sli4_hba.io_xri_cnt,
4128 phba->sli4_hba.io_xri_max);
8a9d2e80 4129
5e5b511d 4130 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
8a9d2e80 4131
5e5b511d 4132 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
0794d601 4133 /* max nvme xri shrunk below the allocated nvme buffers */
5e5b511d
JS
4134 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4135 phba->sli4_hba.io_xri_max;
0794d601 4136 /* release the extra allocated nvme buffers */
5e5b511d
JS
4137 for (i = 0; i < io_xri_cnt; i++) {
4138 list_remove_head(&io_sgl_list, lpfc_ncmd,
c490850a 4139 struct lpfc_io_buf, list);
0794d601 4140 if (lpfc_ncmd) {
771db5c0 4141 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
0794d601
JS
4142 lpfc_ncmd->data,
4143 lpfc_ncmd->dma_handle);
4144 kfree(lpfc_ncmd);
a2fc4aef 4145 }
8a9d2e80 4146 }
5e5b511d 4147 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
8a9d2e80
JS
4148 }
4149
0794d601
JS
4150 /* update xris associated to remaining allocated nvme buffers */
4151 lpfc_ncmd = NULL;
4152 lpfc_ncmd_next = NULL;
5e5b511d 4153 phba->sli4_hba.io_xri_cnt = cnt;
0794d601 4154 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
5e5b511d 4155 &io_sgl_list, list) {
8a9d2e80
JS
4156 lxri = lpfc_sli4_next_xritag(phba);
4157 if (lxri == NO_XRI) {
372c187b
DK
4158 lpfc_printf_log(phba, KERN_ERR,
4159 LOG_TRACE_EVENT,
0794d601
JS
4160 "6075 Failed to allocate xri for "
4161 "nvme buffer\n");
8a9d2e80
JS
4162 rc = -ENOMEM;
4163 goto out_free_mem;
4164 }
0794d601
JS
4165 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4166 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
8a9d2e80 4167 }
5e5b511d 4168 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
dea3101e 4169 return 0;
8a9d2e80
JS
4170
4171out_free_mem:
5e5b511d 4172 lpfc_io_free(phba);
8a9d2e80 4173 return rc;
dea3101e 4174}
4175
0794d601 4176/**
5e5b511d 4177 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
fe614acd
LJ
4178 * @phba: Pointer to lpfc hba data structure.
4179 * @num_to_alloc: The requested number of buffers to allocate.
0794d601
JS
4180 *
4181 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4182 * the nvme buffer contains all the necessary information needed to initiate
4183 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4184 * them on a list, it post them to the port by using SGL block post.
4185 *
4186 * Return codes:
5e5b511d 4187 * int - number of IO buffers that were allocated and posted.
0794d601
JS
4188 * 0 = failure, less than num_to_alloc is a partial failure.
4189 **/
4190int
5e5b511d 4191lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
0794d601 4192{
c490850a 4193 struct lpfc_io_buf *lpfc_ncmd;
0794d601
JS
4194 struct lpfc_iocbq *pwqeq;
4195 uint16_t iotag, lxri = 0;
4196 int bcnt, num_posted;
4197 LIST_HEAD(prep_nblist);
4198 LIST_HEAD(post_nblist);
4199 LIST_HEAD(nvme_nblist);
4200
5e5b511d 4201 phba->sli4_hba.io_xri_cnt = 0;
0794d601 4202 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
7f9989ba 4203 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
0794d601
JS
4204 if (!lpfc_ncmd)
4205 break;
4206 /*
4207 * Get memory from the pci pool to map the virt space to
4208 * pci bus space for an I/O. The DMA buffer includes the
4209 * number of SGE's necessary to support the sg_tablesize.
4210 */
a5c990ee
TM
4211 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4212 GFP_KERNEL,
4213 &lpfc_ncmd->dma_handle);
0794d601
JS
4214 if (!lpfc_ncmd->data) {
4215 kfree(lpfc_ncmd);
4216 break;
4217 }
0794d601 4218
d79c9e9d
JS
4219 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4220 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4221 } else {
4222 /*
4223 * 4K Page alignment is CRITICAL to BlockGuard, double
4224 * check to be sure.
4225 */
4226 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4227 (((unsigned long)(lpfc_ncmd->data) &
4228 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
372c187b
DK
4229 lpfc_printf_log(phba, KERN_ERR,
4230 LOG_TRACE_EVENT,
d79c9e9d
JS
4231 "3369 Memory alignment err: "
4232 "addr=%lx\n",
4233 (unsigned long)lpfc_ncmd->data);
4234 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4235 lpfc_ncmd->data,
4236 lpfc_ncmd->dma_handle);
4237 kfree(lpfc_ncmd);
4238 break;
4239 }
0794d601
JS
4240 }
4241
d79c9e9d
JS
4242 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4243
0794d601
JS
4244 lxri = lpfc_sli4_next_xritag(phba);
4245 if (lxri == NO_XRI) {
4246 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4247 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4248 kfree(lpfc_ncmd);
4249 break;
4250 }
4251 pwqeq = &lpfc_ncmd->cur_iocbq;
4252
4253 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4254 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4255 if (iotag == 0) {
4256 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4257 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4258 kfree(lpfc_ncmd);
372c187b 4259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0794d601
JS
4260 "6121 Failed to allocate IOTAG for"
4261 " XRI:0x%x\n", lxri);
4262 lpfc_sli4_free_xri(phba, lxri);
4263 break;
4264 }
4265 pwqeq->sli4_lxritag = lxri;
4266 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4267 pwqeq->context1 = lpfc_ncmd;
4268
4269 /* Initialize local short-hand pointers. */
4270 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4271 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4272 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
c2017260 4273 spin_lock_init(&lpfc_ncmd->buf_lock);
0794d601
JS
4274
4275 /* add the nvme buffer to a post list */
4276 list_add_tail(&lpfc_ncmd->list, &post_nblist);
5e5b511d 4277 phba->sli4_hba.io_xri_cnt++;
0794d601
JS
4278 }
4279 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4280 "6114 Allocate %d out of %d requested new NVME "
4281 "buffers\n", bcnt, num_to_alloc);
4282
4283 /* post the list of nvme buffer sgls to port if available */
4284 if (!list_empty(&post_nblist))
5e5b511d 4285 num_posted = lpfc_sli4_post_io_sgl_list(
0794d601
JS
4286 phba, &post_nblist, bcnt);
4287 else
4288 num_posted = 0;
4289
4290 return num_posted;
4291}
4292
96418b5e
JS
4293static uint64_t
4294lpfc_get_wwpn(struct lpfc_hba *phba)
4295{
4296 uint64_t wwn;
4297 int rc;
4298 LPFC_MBOXQ_t *mboxq;
4299 MAILBOX_t *mb;
4300
96418b5e
JS
4301 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4302 GFP_KERNEL);
4303 if (!mboxq)
4304 return (uint64_t)-1;
4305
4306 /* First get WWN of HBA instance */
4307 lpfc_read_nv(phba, mboxq);
4308 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4309 if (rc != MBX_SUCCESS) {
372c187b 4310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
96418b5e
JS
4311 "6019 Mailbox failed , mbxCmd x%x "
4312 "READ_NV, mbxStatus x%x\n",
4313 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4314 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4315 mempool_free(mboxq, phba->mbox_mem_pool);
4316 return (uint64_t) -1;
4317 }
4318 mb = &mboxq->u.mb;
4319 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4320 /* wwn is WWPN of HBA instance */
4321 mempool_free(mboxq, phba->mbox_mem_pool);
4322 if (phba->sli_rev == LPFC_SLI_REV4)
4323 return be64_to_cpu(wwn);
4324 else
286871a6 4325 return rol64(wwn, 32);
96418b5e
JS
4326}
4327
5e633302
GS
4328/**
4329 * lpfc_vmid_res_alloc - Allocates resources for VMID
4330 * @phba: pointer to lpfc hba data structure.
4331 * @vport: pointer to vport data structure
4332 *
4333 * This routine allocated the resources needed for the VMID.
4334 *
4335 * Return codes
4336 * 0 on Success
4337 * Non-0 on Failure
4338 */
4339static int
4340lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4341{
4342 /* VMID feature is supported only on SLI4 */
4343 if (phba->sli_rev == LPFC_SLI_REV3) {
4344 phba->cfg_vmid_app_header = 0;
4345 phba->cfg_vmid_priority_tagging = 0;
4346 }
4347
4348 if (lpfc_is_vmid_enabled(phba)) {
4349 vport->vmid =
4350 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4351 GFP_KERNEL);
4352 if (!vport->vmid)
4353 return -ENOMEM;
4354
4355 rwlock_init(&vport->vmid_lock);
4356
4357 /* Set the VMID parameters for the vport */
4358 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4359 vport->vmid_inactivity_timeout =
4360 phba->cfg_vmid_inactivity_timeout;
4361 vport->max_vmid = phba->cfg_max_vmid;
4362 vport->cur_vmid_cnt = 0;
4363
4364 vport->vmid_priority_range = bitmap_zalloc
4365 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4366
4367 if (!vport->vmid_priority_range) {
4368 kfree(vport->vmid);
4369 return -ENOMEM;
4370 }
4371
4372 hash_init(vport->hash_table);
4373 }
4374 return 0;
4375}
4376
e59058c4 4377/**
3621a710 4378 * lpfc_create_port - Create an FC port
e59058c4
JS
4379 * @phba: pointer to lpfc hba data structure.
4380 * @instance: a unique integer ID to this FC port.
4381 * @dev: pointer to the device data structure.
4382 *
4383 * This routine creates a FC port for the upper layer protocol. The FC port
4384 * can be created on top of either a physical port or a virtual port provided
4385 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4386 * and associates the FC port created before adding the shost into the SCSI
4387 * layer.
4388 *
4389 * Return codes
4390 * @vport - pointer to the virtual N_Port data structure.
4391 * NULL - port create failed.
4392 **/
2e0fef85 4393struct lpfc_vport *
3de2a653 4394lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
47a8617c 4395{
2e0fef85 4396 struct lpfc_vport *vport;
895427bd 4397 struct Scsi_Host *shost = NULL;
c90b4480 4398 struct scsi_host_template *template;
2e0fef85 4399 int error = 0;
96418b5e
JS
4400 int i;
4401 uint64_t wwn;
4402 bool use_no_reset_hba = false;
56bc8028 4403 int rc;
96418b5e 4404
56bc8028
JS
4405 if (lpfc_no_hba_reset_cnt) {
4406 if (phba->sli_rev < LPFC_SLI_REV4 &&
4407 dev == &phba->pcidev->dev) {
4408 /* Reset the port first */
4409 lpfc_sli_brdrestart(phba);
4410 rc = lpfc_sli_chipset_init(phba);
4411 if (rc)
4412 return NULL;
4413 }
4414 wwn = lpfc_get_wwpn(phba);
4415 }
96418b5e
JS
4416
4417 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4418 if (wwn == lpfc_no_hba_reset[i]) {
372c187b
DK
4419 lpfc_printf_log(phba, KERN_ERR,
4420 LOG_TRACE_EVENT,
96418b5e
JS
4421 "6020 Setting use_no_reset port=%llx\n",
4422 wwn);
4423 use_no_reset_hba = true;
4424 break;
4425 }
4426 }
47a8617c 4427
c90b4480
JS
4428 /* Seed template for SCSI host registration */
4429 if (dev == &phba->pcidev->dev) {
4430 template = &phba->port_template;
4431
4432 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4433 /* Seed physical port template */
4434 memcpy(template, &lpfc_template, sizeof(*template));
4435
7c30bb62 4436 if (use_no_reset_hba)
c90b4480 4437 /* template is for a no reset SCSI Host */
c90b4480 4438 template->eh_host_reset_handler = NULL;
c90b4480
JS
4439
4440 /* Template for all vports this physical port creates */
4441 memcpy(&phba->vport_template, &lpfc_template,
4442 sizeof(*template));
c90b4480
JS
4443 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4444 phba->vport_template.eh_bus_reset_handler = NULL;
4445 phba->vport_template.eh_host_reset_handler = NULL;
4446 phba->vport_template.vendor_id = 0;
4447
4448 /* Initialize the host templates with updated value */
4449 if (phba->sli_rev == LPFC_SLI_REV4) {
4450 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4451 phba->vport_template.sg_tablesize =
4452 phba->cfg_scsi_seg_cnt;
4453 } else {
4454 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4455 phba->vport_template.sg_tablesize =
4456 phba->cfg_sg_seg_cnt;
4457 }
4458
895427bd 4459 } else {
c90b4480
JS
4460 /* NVMET is for physical port only */
4461 memcpy(template, &lpfc_template_nvme,
4462 sizeof(*template));
895427bd 4463 }
c90b4480
JS
4464 } else {
4465 template = &phba->vport_template;
ea4142f6 4466 }
c90b4480
JS
4467
4468 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
2e0fef85
JS
4469 if (!shost)
4470 goto out;
47a8617c 4471
2e0fef85
JS
4472 vport = (struct lpfc_vport *) shost->hostdata;
4473 vport->phba = phba;
2e0fef85 4474 vport->load_flag |= FC_LOADING;
92d7f7b0 4475 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7f5f3d0d 4476 vport->fc_rscn_flush = 0;
3de2a653 4477 lpfc_get_vport_cfgparam(vport);
895427bd 4478
f6e84790
JS
4479 /* Adjust value in vport */
4480 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4481
2e0fef85
JS
4482 shost->unique_id = instance;
4483 shost->max_id = LPFC_MAX_TARGET;
3de2a653 4484 shost->max_lun = vport->cfg_max_luns;
2e0fef85
JS
4485 shost->this_id = -1;
4486 shost->max_cmd_len = 16;
6a828b0f 4487
da0436e9 4488 if (phba->sli_rev == LPFC_SLI_REV4) {
77ffd346
JS
4489 if (!phba->cfg_fcp_mq_threshold ||
4490 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4491 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4492
4493 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4494 phba->cfg_fcp_mq_threshold);
6a828b0f 4495
28baac74 4496 shost->dma_boundary =
cb5172ea 4497 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
d79c9e9d
JS
4498
4499 if (phba->cfg_xpsgl && !phba->nvmet_support)
4500 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4501 else
4502 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
ace44e48
JS
4503 } else
4504 /* SLI-3 has a limited number of hardware queues (3),
4505 * thus there is only one for FCP processing.
4506 */
4507 shost->nr_hw_queues = 1;
81301a9b 4508
47a8617c 4509 /*
2e0fef85
JS
4510 * Set initial can_queue value since 0 is no longer supported and
4511 * scsi_add_host will fail. This will be adjusted later based on the
4512 * max xri value determined in hba setup.
47a8617c 4513 */
2e0fef85 4514 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3de2a653 4515 if (dev != &phba->pcidev->dev) {
92d7f7b0
JS
4516 shost->transportt = lpfc_vport_transport_template;
4517 vport->port_type = LPFC_NPIV_PORT;
4518 } else {
4519 shost->transportt = lpfc_transport_template;
4520 vport->port_type = LPFC_PHYSICAL_PORT;
4521 }
47a8617c 4522
c90b4480
JS
4523 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4524 "9081 CreatePort TMPLATE type %x TBLsize %d "
4525 "SEGcnt %d/%d\n",
4526 vport->port_type, shost->sg_tablesize,
4527 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4528
5e633302
GS
4529 /* Allocate the resources for VMID */
4530 rc = lpfc_vmid_res_alloc(phba, vport);
4531
4532 if (rc)
4533 goto out;
4534
2e0fef85
JS
4535 /* Initialize all internally managed lists. */
4536 INIT_LIST_HEAD(&vport->fc_nodes);
da0436e9 4537 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2e0fef85 4538 spin_lock_init(&vport->work_port_lock);
47a8617c 4539
f22eb4d3 4540 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
47a8617c 4541
f22eb4d3 4542 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
92494144 4543
f22eb4d3 4544 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
92494144 4545
aa6ff309
JS
4546 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4547 lpfc_setup_bg(phba, shost);
4548
d139b9bd 4549 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2e0fef85
JS
4550 if (error)
4551 goto out_put_shost;
47a8617c 4552
523128e5 4553 spin_lock_irq(&phba->port_list_lock);
2e0fef85 4554 list_add_tail(&vport->listentry, &phba->port_list);
523128e5 4555 spin_unlock_irq(&phba->port_list_lock);
2e0fef85 4556 return vport;
47a8617c 4557
2e0fef85 4558out_put_shost:
5e633302
GS
4559 kfree(vport->vmid);
4560 bitmap_free(vport->vmid_priority_range);
2e0fef85
JS
4561 scsi_host_put(shost);
4562out:
4563 return NULL;
47a8617c
JS
4564}
4565
e59058c4 4566/**
3621a710 4567 * destroy_port - destroy an FC port
e59058c4
JS
4568 * @vport: pointer to an lpfc virtual N_Port data structure.
4569 *
4570 * This routine destroys a FC port from the upper layer protocol. All the
4571 * resources associated with the port are released.
4572 **/
2e0fef85
JS
4573void
4574destroy_port(struct lpfc_vport *vport)
47a8617c 4575{
92d7f7b0
JS
4576 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4577 struct lpfc_hba *phba = vport->phba;
47a8617c 4578
858c9f6c 4579 lpfc_debugfs_terminate(vport);
92d7f7b0
JS
4580 fc_remove_host(shost);
4581 scsi_remove_host(shost);
47a8617c 4582
523128e5 4583 spin_lock_irq(&phba->port_list_lock);
92d7f7b0 4584 list_del_init(&vport->listentry);
523128e5 4585 spin_unlock_irq(&phba->port_list_lock);
47a8617c 4586
92d7f7b0 4587 lpfc_cleanup(vport);
47a8617c 4588 return;
47a8617c
JS
4589}
4590
e59058c4 4591/**
3621a710 4592 * lpfc_get_instance - Get a unique integer ID
e59058c4
JS
4593 *
4594 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4595 * uses the kernel idr facility to perform the task.
4596 *
4597 * Return codes:
4598 * instance - a unique integer ID allocated as the new instance.
4599 * -1 - lpfc get instance failed.
4600 **/
92d7f7b0
JS
4601int
4602lpfc_get_instance(void)
4603{
ab516036
TH
4604 int ret;
4605
4606 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4607 return ret < 0 ? -1 : ret;
47a8617c
JS
4608}
4609
e59058c4 4610/**
3621a710 4611 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
e59058c4
JS
4612 * @shost: pointer to SCSI host data structure.
4613 * @time: elapsed time of the scan in jiffies.
4614 *
4615 * This routine is called by the SCSI layer with a SCSI host to determine
4616 * whether the scan host is finished.
4617 *
4618 * Note: there is no scan_start function as adapter initialization will have
4619 * asynchronously kicked off the link initialization.
4620 *
4621 * Return codes
4622 * 0 - SCSI host scan is not over yet.
4623 * 1 - SCSI host scan is over.
4624 **/
47a8617c
JS
4625int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4626{
2e0fef85
JS
4627 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4628 struct lpfc_hba *phba = vport->phba;
858c9f6c 4629 int stat = 0;
47a8617c 4630
858c9f6c
JS
4631 spin_lock_irq(shost->host_lock);
4632
51ef4c26 4633 if (vport->load_flag & FC_UNLOADING) {
858c9f6c
JS
4634 stat = 1;
4635 goto finished;
4636 }
256ec0d0 4637 if (time >= msecs_to_jiffies(30 * 1000)) {
2e0fef85 4638 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4639 "0461 Scanning longer than 30 "
4640 "seconds. Continuing initialization\n");
858c9f6c 4641 stat = 1;
47a8617c 4642 goto finished;
2e0fef85 4643 }
256ec0d0
JS
4644 if (time >= msecs_to_jiffies(15 * 1000) &&
4645 phba->link_state <= LPFC_LINK_DOWN) {
2e0fef85 4646 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4647 "0465 Link down longer than 15 "
4648 "seconds. Continuing initialization\n");
858c9f6c 4649 stat = 1;
47a8617c 4650 goto finished;
2e0fef85 4651 }
47a8617c 4652
2e0fef85 4653 if (vport->port_state != LPFC_VPORT_READY)
858c9f6c 4654 goto finished;
2e0fef85 4655 if (vport->num_disc_nodes || vport->fc_prli_sent)
858c9f6c 4656 goto finished;
256ec0d0 4657 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
858c9f6c 4658 goto finished;
2e0fef85 4659 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
858c9f6c
JS
4660 goto finished;
4661
4662 stat = 1;
47a8617c
JS
4663
4664finished:
858c9f6c
JS
4665 spin_unlock_irq(shost->host_lock);
4666 return stat;
92d7f7b0 4667}
47a8617c 4668
3999df75 4669static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
cd71348a
JS
4670{
4671 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4672 struct lpfc_hba *phba = vport->phba;
4673
4674 fc_host_supported_speeds(shost) = 0;
a1e4d3d8
DK
4675 /*
4676 * Avoid reporting supported link speed for FCoE as it can't be
4677 * controlled via FCoE.
4678 */
4679 if (phba->hba_flag & HBA_FCOE_MODE)
4680 return;
4681
1dc5ec24
JS
4682 if (phba->lmt & LMT_128Gb)
4683 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
cd71348a
JS
4684 if (phba->lmt & LMT_64Gb)
4685 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4686 if (phba->lmt & LMT_32Gb)
4687 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4688 if (phba->lmt & LMT_16Gb)
4689 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4690 if (phba->lmt & LMT_10Gb)
4691 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4692 if (phba->lmt & LMT_8Gb)
4693 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4694 if (phba->lmt & LMT_4Gb)
4695 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4696 if (phba->lmt & LMT_2Gb)
4697 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4698 if (phba->lmt & LMT_1Gb)
4699 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4700}
4701
e59058c4 4702/**
3621a710 4703 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
e59058c4
JS
4704 * @shost: pointer to SCSI host data structure.
4705 *
4706 * This routine initializes a given SCSI host attributes on a FC port. The
4707 * SCSI host can be either on top of a physical port or a virtual port.
4708 **/
92d7f7b0
JS
4709void lpfc_host_attrib_init(struct Scsi_Host *shost)
4710{
4711 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4712 struct lpfc_hba *phba = vport->phba;
47a8617c 4713 /*
2e0fef85 4714 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
47a8617c
JS
4715 */
4716
2e0fef85
JS
4717 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4718 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
47a8617c
JS
4719 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4720
4721 memset(fc_host_supported_fc4s(shost), 0,
2e0fef85 4722 sizeof(fc_host_supported_fc4s(shost)));
47a8617c
JS
4723 fc_host_supported_fc4s(shost)[2] = 1;
4724 fc_host_supported_fc4s(shost)[7] = 1;
4725
92d7f7b0
JS
4726 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4727 sizeof fc_host_symbolic_name(shost));
47a8617c 4728
cd71348a 4729 lpfc_host_supported_speeds_set(shost);
47a8617c
JS
4730
4731 fc_host_maxframe_size(shost) =
2e0fef85
JS
4732 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4733 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
47a8617c 4734
0af5d708
MC
4735 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4736
47a8617c
JS
4737 /* This value is also unchanging */
4738 memset(fc_host_active_fc4s(shost), 0,
2e0fef85 4739 sizeof(fc_host_active_fc4s(shost)));
47a8617c
JS
4740 fc_host_active_fc4s(shost)[2] = 1;
4741 fc_host_active_fc4s(shost)[7] = 1;
4742
92d7f7b0 4743 fc_host_max_npiv_vports(shost) = phba->max_vpi;
47a8617c 4744 spin_lock_irq(shost->host_lock);
51ef4c26 4745 vport->load_flag &= ~FC_LOADING;
47a8617c 4746 spin_unlock_irq(shost->host_lock);
47a8617c 4747}
dea3101e 4748
e59058c4 4749/**
da0436e9 4750 * lpfc_stop_port_s3 - Stop SLI3 device port
e59058c4
JS
4751 * @phba: pointer to lpfc hba data structure.
4752 *
da0436e9
JS
4753 * This routine is invoked to stop an SLI3 device port, it stops the device
4754 * from generating interrupts and stops the device driver's timers for the
4755 * device.
e59058c4 4756 **/
da0436e9
JS
4757static void
4758lpfc_stop_port_s3(struct lpfc_hba *phba)
db2378e0 4759{
da0436e9
JS
4760 /* Clear all interrupt enable conditions */
4761 writel(0, phba->HCregaddr);
4762 readl(phba->HCregaddr); /* flush */
4763 /* Clear all pending interrupts */
4764 writel(0xffffffff, phba->HAregaddr);
4765 readl(phba->HAregaddr); /* flush */
db2378e0 4766
da0436e9
JS
4767 /* Reset some HBA SLI setup states */
4768 lpfc_stop_hba_timers(phba);
4769 phba->pport->work_port_events = 0;
4770}
db2378e0 4771
da0436e9
JS
4772/**
4773 * lpfc_stop_port_s4 - Stop SLI4 device port
4774 * @phba: pointer to lpfc hba data structure.
4775 *
4776 * This routine is invoked to stop an SLI4 device port, it stops the device
4777 * from generating interrupts and stops the device driver's timers for the
4778 * device.
4779 **/
4780static void
4781lpfc_stop_port_s4(struct lpfc_hba *phba)
4782{
4783 /* Reset some HBA SLI4 setup states */
4784 lpfc_stop_hba_timers(phba);
cdb42bec
JS
4785 if (phba->pport)
4786 phba->pport->work_port_events = 0;
da0436e9 4787 phba->sli4_hba.intr_enable = 0;
da0436e9 4788}
9399627f 4789
da0436e9
JS
4790/**
4791 * lpfc_stop_port - Wrapper function for stopping hba port
4792 * @phba: Pointer to HBA context object.
4793 *
4794 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4795 * the API jump table function pointer from the lpfc_hba struct.
4796 **/
4797void
4798lpfc_stop_port(struct lpfc_hba *phba)
4799{
4800 phba->lpfc_stop_port(phba);
f485c18d
DK
4801
4802 if (phba->wq)
4803 flush_workqueue(phba->wq);
da0436e9 4804}
db2378e0 4805
ecfd03c6
JS
4806/**
4807 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4808 * @phba: Pointer to hba for which this call is being executed.
4809 *
4810 * This routine starts the timer waiting for the FCF rediscovery to complete.
4811 **/
4812void
4813lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4814{
4815 unsigned long fcf_redisc_wait_tmo =
4816 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4817 /* Start fcf rediscovery wait period timer */
4818 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4819 spin_lock_irq(&phba->hbalock);
4820 /* Allow action to new fcf asynchronous event */
4821 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4822 /* Mark the FCF rediscovery pending state */
4823 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4824 spin_unlock_irq(&phba->hbalock);
4825}
4826
4827/**
4828 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
fe614acd 4829 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
ecfd03c6
JS
4830 *
4831 * This routine is invoked when waiting for FCF table rediscover has been
4832 * timed out. If new FCF record(s) has (have) been discovered during the
4833 * wait period, a new FCF event shall be added to the FCOE async event
4834 * list, and then worker thread shall be waked up for processing from the
4835 * worker thread context.
4836 **/
e399b228 4837static void
f22eb4d3 4838lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
ecfd03c6 4839{
f22eb4d3 4840 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
ecfd03c6
JS
4841
4842 /* Don't send FCF rediscovery event if timer cancelled */
4843 spin_lock_irq(&phba->hbalock);
4844 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4845 spin_unlock_irq(&phba->hbalock);
4846 return;
4847 }
4848 /* Clear FCF rediscovery timer pending flag */
4849 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4850 /* FCF rediscovery event to worker thread */
4851 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4852 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 4853 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 4854 "2776 FCF rediscover quiescent timer expired\n");
ecfd03c6
JS
4855 /* wake up worker thread */
4856 lpfc_worker_wake_up(phba);
4857}
4858
20397179
GS
4859/**
4860 * lpfc_vmid_poll - VMID timeout detection
50baa159 4861 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
20397179
GS
4862 *
4863 * This routine is invoked when there is no I/O on by a VM for the specified
4864 * amount of time. When this situation is detected, the VMID has to be
4865 * deregistered from the switch and all the local resources freed. The VMID
4866 * will be reassigned to the VM once the I/O begins.
4867 **/
4868static void
4869lpfc_vmid_poll(struct timer_list *t)
4870{
4871 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
4872 u32 wake_up = 0;
4873
4874 /* check if there is a need to issue QFPA */
4875 if (phba->pport->vmid_priority_tagging) {
4876 wake_up = 1;
4877 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
4878 }
4879
4880 /* Is the vmid inactivity timer enabled */
4881 if (phba->pport->vmid_inactivity_timeout ||
4882 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
4883 wake_up = 1;
4884 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
4885 }
4886
4887 if (wake_up)
4888 lpfc_worker_wake_up(phba);
4889
4890 /* restart the timer for the next iteration */
4891 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
4892 LPFC_VMID_TIMER));
4893}
4894
e59058c4 4895/**
da0436e9 4896 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
e59058c4 4897 * @phba: pointer to lpfc hba data structure.
da0436e9 4898 * @acqe_link: pointer to the async link completion queue entry.
e59058c4 4899 *
23288b78 4900 * This routine is to parse the SLI4 link-attention link fault code.
e59058c4 4901 **/
23288b78 4902static void
da0436e9
JS
4903lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4904 struct lpfc_acqe_link *acqe_link)
db2378e0 4905{
da0436e9
JS
4906 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4907 case LPFC_ASYNC_LINK_FAULT_NONE:
4908 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4909 case LPFC_ASYNC_LINK_FAULT_REMOTE:
23288b78 4910 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
da0436e9
JS
4911 break;
4912 default:
372c187b 4913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
23288b78 4914 "0398 Unknown link fault code: x%x\n",
da0436e9 4915 bf_get(lpfc_acqe_link_fault, acqe_link));
da0436e9
JS
4916 break;
4917 }
db2378e0
JS
4918}
4919
5b75da2f 4920/**
da0436e9 4921 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5b75da2f 4922 * @phba: pointer to lpfc hba data structure.
da0436e9 4923 * @acqe_link: pointer to the async link completion queue entry.
5b75da2f 4924 *
da0436e9
JS
4925 * This routine is to parse the SLI4 link attention type and translate it
4926 * into the base driver's link attention type coding.
5b75da2f 4927 *
da0436e9
JS
4928 * Return: Link attention type in terms of base driver's coding.
4929 **/
4930static uint8_t
4931lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4932 struct lpfc_acqe_link *acqe_link)
5b75da2f 4933{
da0436e9 4934 uint8_t att_type;
5b75da2f 4935
da0436e9
JS
4936 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4937 case LPFC_ASYNC_LINK_STATUS_DOWN:
4938 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
76a95d75 4939 att_type = LPFC_ATT_LINK_DOWN;
da0436e9
JS
4940 break;
4941 case LPFC_ASYNC_LINK_STATUS_UP:
4942 /* Ignore physical link up events - wait for logical link up */
76a95d75 4943 att_type = LPFC_ATT_RESERVED;
da0436e9
JS
4944 break;
4945 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
76a95d75 4946 att_type = LPFC_ATT_LINK_UP;
da0436e9
JS
4947 break;
4948 default:
372c187b 4949 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
4950 "0399 Invalid link attention type: x%x\n",
4951 bf_get(lpfc_acqe_link_status, acqe_link));
76a95d75 4952 att_type = LPFC_ATT_RESERVED;
da0436e9 4953 break;
5b75da2f 4954 }
da0436e9 4955 return att_type;
5b75da2f
JS
4956}
4957
8b68cd52
JS
4958/**
4959 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4960 * @phba: pointer to lpfc hba data structure.
4961 *
4962 * This routine is to get an SLI3 FC port's link speed in Mbps.
4963 *
4964 * Return: link speed in terms of Mbps.
4965 **/
4966uint32_t
4967lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4968{
4969 uint32_t link_speed;
4970
4971 if (!lpfc_is_link_up(phba))
4972 return 0;
4973
a085e87c
JS
4974 if (phba->sli_rev <= LPFC_SLI_REV3) {
4975 switch (phba->fc_linkspeed) {
4976 case LPFC_LINK_SPEED_1GHZ:
4977 link_speed = 1000;
4978 break;
4979 case LPFC_LINK_SPEED_2GHZ:
4980 link_speed = 2000;
4981 break;
4982 case LPFC_LINK_SPEED_4GHZ:
4983 link_speed = 4000;
4984 break;
4985 case LPFC_LINK_SPEED_8GHZ:
4986 link_speed = 8000;
4987 break;
4988 case LPFC_LINK_SPEED_10GHZ:
4989 link_speed = 10000;
4990 break;
4991 case LPFC_LINK_SPEED_16GHZ:
4992 link_speed = 16000;
4993 break;
4994 default:
4995 link_speed = 0;
4996 }
4997 } else {
4998 if (phba->sli4_hba.link_state.logical_speed)
4999 link_speed =
5000 phba->sli4_hba.link_state.logical_speed;
5001 else
5002 link_speed = phba->sli4_hba.link_state.speed;
8b68cd52
JS
5003 }
5004 return link_speed;
5005}
5006
5007/**
5008 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5009 * @phba: pointer to lpfc hba data structure.
5010 * @evt_code: asynchronous event code.
5011 * @speed_code: asynchronous event link speed code.
5012 *
5013 * This routine is to parse the giving SLI4 async event link speed code into
5014 * value of Mbps for the link speed.
5015 *
5016 * Return: link speed in terms of Mbps.
5017 **/
5018static uint32_t
5019lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5020 uint8_t speed_code)
5021{
5022 uint32_t port_speed;
5023
5024 switch (evt_code) {
5025 case LPFC_TRAILER_CODE_LINK:
5026 switch (speed_code) {
26d830ec 5027 case LPFC_ASYNC_LINK_SPEED_ZERO:
8b68cd52
JS
5028 port_speed = 0;
5029 break;
26d830ec 5030 case LPFC_ASYNC_LINK_SPEED_10MBPS:
8b68cd52
JS
5031 port_speed = 10;
5032 break;
26d830ec 5033 case LPFC_ASYNC_LINK_SPEED_100MBPS:
8b68cd52
JS
5034 port_speed = 100;
5035 break;
26d830ec 5036 case LPFC_ASYNC_LINK_SPEED_1GBPS:
8b68cd52
JS
5037 port_speed = 1000;
5038 break;
26d830ec 5039 case LPFC_ASYNC_LINK_SPEED_10GBPS:
8b68cd52
JS
5040 port_speed = 10000;
5041 break;
26d830ec
JS
5042 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5043 port_speed = 20000;
5044 break;
5045 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5046 port_speed = 25000;
5047 break;
5048 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5049 port_speed = 40000;
5050 break;
a1e4d3d8
DK
5051 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5052 port_speed = 100000;
5053 break;
8b68cd52
JS
5054 default:
5055 port_speed = 0;
5056 }
5057 break;
5058 case LPFC_TRAILER_CODE_FC:
5059 switch (speed_code) {
26d830ec 5060 case LPFC_FC_LA_SPEED_UNKNOWN:
8b68cd52
JS
5061 port_speed = 0;
5062 break;
26d830ec 5063 case LPFC_FC_LA_SPEED_1G:
8b68cd52
JS
5064 port_speed = 1000;
5065 break;
26d830ec 5066 case LPFC_FC_LA_SPEED_2G:
8b68cd52
JS
5067 port_speed = 2000;
5068 break;
26d830ec 5069 case LPFC_FC_LA_SPEED_4G:
8b68cd52
JS
5070 port_speed = 4000;
5071 break;
26d830ec 5072 case LPFC_FC_LA_SPEED_8G:
8b68cd52
JS
5073 port_speed = 8000;
5074 break;
26d830ec 5075 case LPFC_FC_LA_SPEED_10G:
8b68cd52
JS
5076 port_speed = 10000;
5077 break;
26d830ec 5078 case LPFC_FC_LA_SPEED_16G:
8b68cd52
JS
5079 port_speed = 16000;
5080 break;
d38dd52c
JS
5081 case LPFC_FC_LA_SPEED_32G:
5082 port_speed = 32000;
5083 break;
fbd8a6ba
JS
5084 case LPFC_FC_LA_SPEED_64G:
5085 port_speed = 64000;
5086 break;
1dc5ec24
JS
5087 case LPFC_FC_LA_SPEED_128G:
5088 port_speed = 128000;
5089 break;
8b68cd52
JS
5090 default:
5091 port_speed = 0;
5092 }
5093 break;
5094 default:
5095 port_speed = 0;
5096 }
5097 return port_speed;
5098}
5099
da0436e9 5100/**
70f3c073 5101 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
da0436e9
JS
5102 * @phba: pointer to lpfc hba data structure.
5103 * @acqe_link: pointer to the async link completion queue entry.
5104 *
70f3c073 5105 * This routine is to handle the SLI4 asynchronous FCoE link event.
da0436e9
JS
5106 **/
5107static void
5108lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5109 struct lpfc_acqe_link *acqe_link)
5110{
5111 struct lpfc_dmabuf *mp;
5112 LPFC_MBOXQ_t *pmb;
5113 MAILBOX_t *mb;
76a95d75 5114 struct lpfc_mbx_read_top *la;
da0436e9 5115 uint8_t att_type;
76a95d75 5116 int rc;
da0436e9
JS
5117
5118 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
76a95d75 5119 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
da0436e9 5120 return;
32b9793f 5121 phba->fcoe_eventtag = acqe_link->event_tag;
da0436e9
JS
5122 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5123 if (!pmb) {
372c187b 5124 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
5125 "0395 The mboxq allocation failed\n");
5126 return;
5127 }
5128 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5129 if (!mp) {
372c187b 5130 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
5131 "0396 The lpfc_dmabuf allocation failed\n");
5132 goto out_free_pmb;
5133 }
5134 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5135 if (!mp->virt) {
372c187b 5136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
5137 "0397 The mbuf allocation failed\n");
5138 goto out_free_dmabuf;
5139 }
5140
5141 /* Cleanup any outstanding ELS commands */
5142 lpfc_els_flush_all_cmd(phba);
5143
5144 /* Block ELS IOCBs until we have done process link event */
895427bd 5145 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
da0436e9
JS
5146
5147 /* Update link event statistics */
5148 phba->sli.slistat.link_event++;
5149
76a95d75
JS
5150 /* Create lpfc_handle_latt mailbox command from link ACQE */
5151 lpfc_read_topology(phba, pmb, mp);
5152 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
da0436e9
JS
5153 pmb->vport = phba->pport;
5154
da0436e9
JS
5155 /* Keep the link status for extra SLI4 state machine reference */
5156 phba->sli4_hba.link_state.speed =
8b68cd52
JS
5157 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5158 bf_get(lpfc_acqe_link_speed, acqe_link));
da0436e9
JS
5159 phba->sli4_hba.link_state.duplex =
5160 bf_get(lpfc_acqe_link_duplex, acqe_link);
5161 phba->sli4_hba.link_state.status =
5162 bf_get(lpfc_acqe_link_status, acqe_link);
70f3c073
JS
5163 phba->sli4_hba.link_state.type =
5164 bf_get(lpfc_acqe_link_type, acqe_link);
5165 phba->sli4_hba.link_state.number =
5166 bf_get(lpfc_acqe_link_number, acqe_link);
da0436e9
JS
5167 phba->sli4_hba.link_state.fault =
5168 bf_get(lpfc_acqe_link_fault, acqe_link);
65467b6b 5169 phba->sli4_hba.link_state.logical_speed =
8b68cd52
JS
5170 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5171
70f3c073 5172 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
c31098ce
JS
5173 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5174 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5175 "Logical speed:%dMbps Fault:%d\n",
70f3c073
JS
5176 phba->sli4_hba.link_state.speed,
5177 phba->sli4_hba.link_state.topology,
5178 phba->sli4_hba.link_state.status,
5179 phba->sli4_hba.link_state.type,
5180 phba->sli4_hba.link_state.number,
8b68cd52 5181 phba->sli4_hba.link_state.logical_speed,
70f3c073 5182 phba->sli4_hba.link_state.fault);
76a95d75
JS
5183 /*
5184 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5185 * topology info. Note: Optional for non FC-AL ports.
5186 */
5187 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5188 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5189 if (rc == MBX_NOT_FINISHED)
5190 goto out_free_dmabuf;
5191 return;
5192 }
5193 /*
5194 * For FCoE Mode: fill in all the topology information we need and call
5195 * the READ_TOPOLOGY completion routine to continue without actually
5196 * sending the READ_TOPOLOGY mailbox command to the port.
5197 */
23288b78 5198 /* Initialize completion status */
76a95d75 5199 mb = &pmb->u.mb;
23288b78
JS
5200 mb->mbxStatus = MBX_SUCCESS;
5201
5202 /* Parse port fault information field */
5203 lpfc_sli4_parse_latt_fault(phba, acqe_link);
76a95d75
JS
5204
5205 /* Parse and translate link attention fields */
5206 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5207 la->eventTag = acqe_link->event_tag;
5208 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5209 bf_set(lpfc_mbx_read_top_link_spd, la,
a085e87c 5210 (bf_get(lpfc_acqe_link_speed, acqe_link)));
76a95d75
JS
5211
5212 /* Fake the the following irrelvant fields */
5213 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5214 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5215 bf_set(lpfc_mbx_read_top_il, la, 0);
5216 bf_set(lpfc_mbx_read_top_pb, la, 0);
5217 bf_set(lpfc_mbx_read_top_fa, la, 0);
5218 bf_set(lpfc_mbx_read_top_mm, la, 0);
da0436e9
JS
5219
5220 /* Invoke the lpfc_handle_latt mailbox command callback function */
76a95d75 5221 lpfc_mbx_cmpl_read_topology(phba, pmb);
da0436e9 5222
5b75da2f 5223 return;
da0436e9
JS
5224
5225out_free_dmabuf:
5226 kfree(mp);
5227out_free_pmb:
5228 mempool_free(pmb, phba->mbox_mem_pool);
5229}
5230
1dc5ec24
JS
5231/**
5232 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5233 * topology.
5234 * @phba: pointer to lpfc hba data structure.
1dc5ec24
JS
5235 * @speed_code: asynchronous event link speed code.
5236 *
5237 * This routine is to parse the giving SLI4 async event link speed code into
5238 * value of Read topology link speed.
5239 *
5240 * Return: link speed in terms of Read topology.
5241 **/
5242static uint8_t
5243lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5244{
5245 uint8_t port_speed;
5246
5247 switch (speed_code) {
5248 case LPFC_FC_LA_SPEED_1G:
5249 port_speed = LPFC_LINK_SPEED_1GHZ;
5250 break;
5251 case LPFC_FC_LA_SPEED_2G:
5252 port_speed = LPFC_LINK_SPEED_2GHZ;
5253 break;
5254 case LPFC_FC_LA_SPEED_4G:
5255 port_speed = LPFC_LINK_SPEED_4GHZ;
5256 break;
5257 case LPFC_FC_LA_SPEED_8G:
5258 port_speed = LPFC_LINK_SPEED_8GHZ;
5259 break;
5260 case LPFC_FC_LA_SPEED_16G:
5261 port_speed = LPFC_LINK_SPEED_16GHZ;
5262 break;
5263 case LPFC_FC_LA_SPEED_32G:
5264 port_speed = LPFC_LINK_SPEED_32GHZ;
5265 break;
5266 case LPFC_FC_LA_SPEED_64G:
5267 port_speed = LPFC_LINK_SPEED_64GHZ;
5268 break;
5269 case LPFC_FC_LA_SPEED_128G:
5270 port_speed = LPFC_LINK_SPEED_128GHZ;
5271 break;
5272 case LPFC_FC_LA_SPEED_256G:
5273 port_speed = LPFC_LINK_SPEED_256GHZ;
5274 break;
5275 default:
5276 port_speed = 0;
5277 break;
5278 }
5279
5280 return port_speed;
5281}
5282
5283#define trunk_link_status(__idx)\
5284 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5285 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5286 "Link up" : "Link down") : "NA"
5287/* Did port __idx reported an error */
5288#define trunk_port_fault(__idx)\
5289 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5290 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5291
5292static void
5293lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5294 struct lpfc_acqe_fc_la *acqe_fc)
5295{
5296 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5297 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5298
5299 phba->sli4_hba.link_state.speed =
5300 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5301 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5302
5303 phba->sli4_hba.link_state.logical_speed =
b8e6f136 5304 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
1dc5ec24
JS
5305 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5306 phba->fc_linkspeed =
5307 lpfc_async_link_speed_to_read_top(
5308 phba,
5309 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5310
5311 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5312 phba->trunk_link.link0.state =
5313 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5314 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
529b3ddc 5315 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
1dc5ec24
JS
5316 }
5317 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5318 phba->trunk_link.link1.state =
5319 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5320 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
529b3ddc 5321 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
1dc5ec24
JS
5322 }
5323 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5324 phba->trunk_link.link2.state =
5325 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5326 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
529b3ddc 5327 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
1dc5ec24
JS
5328 }
5329 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5330 phba->trunk_link.link3.state =
5331 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5332 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
529b3ddc 5333 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
1dc5ec24
JS
5334 }
5335
372c187b 5336 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1dc5ec24
JS
5337 "2910 Async FC Trunking Event - Speed:%d\n"
5338 "\tLogical speed:%d "
5339 "port0: %s port1: %s port2: %s port3: %s\n",
5340 phba->sli4_hba.link_state.speed,
5341 phba->sli4_hba.link_state.logical_speed,
5342 trunk_link_status(0), trunk_link_status(1),
5343 trunk_link_status(2), trunk_link_status(3));
5344
5345 if (port_fault)
372c187b 5346 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1dc5ec24
JS
5347 "3202 trunk error:0x%x (%s) seen on port0:%s "
5348 /*
5349 * SLI-4: We have only 0xA error codes
5350 * defined as of now. print an appropriate
5351 * message in case driver needs to be updated.
5352 */
5353 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5354 "UNDEFINED. update driver." : trunk_errmsg[err],
5355 trunk_port_fault(0), trunk_port_fault(1),
5356 trunk_port_fault(2), trunk_port_fault(3));
5357}
5358
5359
70f3c073
JS
5360/**
5361 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
5362 * @phba: pointer to lpfc hba data structure.
5363 * @acqe_fc: pointer to the async fc completion queue entry.
5364 *
5365 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
5366 * that the event was received and then issue a read_topology mailbox command so
5367 * that the rest of the driver will treat it the same as SLI3.
5368 **/
5369static void
5370lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5371{
5372 struct lpfc_dmabuf *mp;
5373 LPFC_MBOXQ_t *pmb;
7bdedb34
JS
5374 MAILBOX_t *mb;
5375 struct lpfc_mbx_read_top *la;
70f3c073
JS
5376 int rc;
5377
5378 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5379 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
372c187b 5380 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
70f3c073
JS
5381 "2895 Non FC link Event detected.(%d)\n",
5382 bf_get(lpfc_trailer_type, acqe_fc));
5383 return;
5384 }
1dc5ec24
JS
5385
5386 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5387 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5388 lpfc_update_trunk_link_status(phba, acqe_fc);
5389 return;
5390 }
5391
70f3c073
JS
5392 /* Keep the link status for extra SLI4 state machine reference */
5393 phba->sli4_hba.link_state.speed =
8b68cd52
JS
5394 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5395 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
70f3c073
JS
5396 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5397 phba->sli4_hba.link_state.topology =
5398 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5399 phba->sli4_hba.link_state.status =
5400 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5401 phba->sli4_hba.link_state.type =
5402 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5403 phba->sli4_hba.link_state.number =
5404 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5405 phba->sli4_hba.link_state.fault =
5406 bf_get(lpfc_acqe_link_fault, acqe_fc);
b8e6f136
JS
5407
5408 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5409 LPFC_FC_LA_TYPE_LINK_DOWN)
5410 phba->sli4_hba.link_state.logical_speed = 0;
5411 else if (!phba->sli4_hba.conf_trunk)
5412 phba->sli4_hba.link_state.logical_speed =
8b68cd52 5413 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
b8e6f136 5414
70f3c073
JS
5415 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5416 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5417 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5418 "%dMbps Fault:%d\n",
5419 phba->sli4_hba.link_state.speed,
5420 phba->sli4_hba.link_state.topology,
5421 phba->sli4_hba.link_state.status,
5422 phba->sli4_hba.link_state.type,
5423 phba->sli4_hba.link_state.number,
8b68cd52 5424 phba->sli4_hba.link_state.logical_speed,
70f3c073
JS
5425 phba->sli4_hba.link_state.fault);
5426 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5427 if (!pmb) {
372c187b 5428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
70f3c073
JS
5429 "2897 The mboxq allocation failed\n");
5430 return;
5431 }
5432 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5433 if (!mp) {
372c187b 5434 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
70f3c073
JS
5435 "2898 The lpfc_dmabuf allocation failed\n");
5436 goto out_free_pmb;
5437 }
5438 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5439 if (!mp->virt) {
372c187b 5440 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
70f3c073
JS
5441 "2899 The mbuf allocation failed\n");
5442 goto out_free_dmabuf;
5443 }
5444
5445 /* Cleanup any outstanding ELS commands */
5446 lpfc_els_flush_all_cmd(phba);
5447
5448 /* Block ELS IOCBs until we have done process link event */
895427bd 5449 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
70f3c073
JS
5450
5451 /* Update link event statistics */
5452 phba->sli.slistat.link_event++;
5453
5454 /* Create lpfc_handle_latt mailbox command from link ACQE */
5455 lpfc_read_topology(phba, pmb, mp);
5456 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5457 pmb->vport = phba->pport;
5458
7bdedb34 5459 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
ae9e28f3
JS
5460 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5461
5462 switch (phba->sli4_hba.link_state.status) {
5463 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5464 phba->link_flag |= LS_MDS_LINK_DOWN;
5465 break;
5466 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5467 phba->link_flag |= LS_MDS_LOOPBACK;
5468 break;
5469 default:
5470 break;
5471 }
5472
23288b78 5473 /* Initialize completion status */
7bdedb34 5474 mb = &pmb->u.mb;
23288b78
JS
5475 mb->mbxStatus = MBX_SUCCESS;
5476
5477 /* Parse port fault information field */
5478 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
7bdedb34
JS
5479
5480 /* Parse and translate link attention fields */
5481 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5482 la->eventTag = acqe_fc->event_tag;
7bdedb34 5483
aeb3c817
JS
5484 if (phba->sli4_hba.link_state.status ==
5485 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5486 bf_set(lpfc_mbx_read_top_att_type, la,
5487 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5488 } else {
5489 bf_set(lpfc_mbx_read_top_att_type, la,
5490 LPFC_FC_LA_TYPE_LINK_DOWN);
5491 }
7bdedb34
JS
5492 /* Invoke the mailbox command callback function */
5493 lpfc_mbx_cmpl_read_topology(phba, pmb);
5494
5495 return;
5496 }
5497
70f3c073
JS
5498 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5499 if (rc == MBX_NOT_FINISHED)
5500 goto out_free_dmabuf;
5501 return;
5502
5503out_free_dmabuf:
5504 kfree(mp);
5505out_free_pmb:
5506 mempool_free(pmb, phba->mbox_mem_pool);
5507}
5508
5509/**
5510 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
5511 * @phba: pointer to lpfc hba data structure.
fe614acd 5512 * @acqe_sli: pointer to the async SLI completion queue entry.
70f3c073
JS
5513 *
5514 * This routine is to handle the SLI4 asynchronous SLI events.
5515 **/
5516static void
5517lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5518{
4b8bae08 5519 char port_name;
8c1312e1 5520 char message[128];
4b8bae08 5521 uint8_t status;
946727dc 5522 uint8_t evt_type;
448193b5 5523 uint8_t operational = 0;
946727dc 5524 struct temp_event temp_event_data;
4b8bae08 5525 struct lpfc_acqe_misconfigured_event *misconfigured;
946727dc 5526 struct Scsi_Host *shost;
cd71348a
JS
5527 struct lpfc_vport **vports;
5528 int rc, i;
946727dc
JS
5529
5530 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
4b8bae08 5531
448193b5 5532 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d11ed16d
JS
5533 "2901 Async SLI event - Type:%d, Event Data: x%08x "
5534 "x%08x x%08x x%08x\n", evt_type,
448193b5 5535 acqe_sli->event_data1, acqe_sli->event_data2,
d11ed16d 5536 acqe_sli->reserved, acqe_sli->trailer);
4b8bae08
JS
5537
5538 port_name = phba->Port[0];
5539 if (port_name == 0x00)
5540 port_name = '?'; /* get port name is empty */
5541
946727dc
JS
5542 switch (evt_type) {
5543 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5544 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5545 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5546 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5547
5548 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5549 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5550 acqe_sli->event_data1, port_name);
5551
310429ef 5552 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
946727dc
JS
5553 shost = lpfc_shost_from_vport(phba->pport);
5554 fc_host_post_vendor_event(shost, fc_get_event_number(),
5555 sizeof(temp_event_data),
5556 (char *)&temp_event_data,
5557 SCSI_NL_VID_TYPE_PCI
5558 | PCI_VENDOR_ID_EMULEX);
5559 break;
5560 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5561 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5562 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5563 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5564
5565 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5566 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5567 acqe_sli->event_data1, port_name);
5568
5569 shost = lpfc_shost_from_vport(phba->pport);
5570 fc_host_post_vendor_event(shost, fc_get_event_number(),
5571 sizeof(temp_event_data),
5572 (char *)&temp_event_data,
5573 SCSI_NL_VID_TYPE_PCI
5574 | PCI_VENDOR_ID_EMULEX);
5575 break;
5576 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5577 misconfigured = (struct lpfc_acqe_misconfigured_event *)
4b8bae08
JS
5578 &acqe_sli->event_data1;
5579
946727dc
JS
5580 /* fetch the status for this port */
5581 switch (phba->sli4_hba.lnk_info.lnk_no) {
5582 case LPFC_LINK_NUMBER_0:
448193b5
JS
5583 status = bf_get(lpfc_sli_misconfigured_port0_state,
5584 &misconfigured->theEvent);
5585 operational = bf_get(lpfc_sli_misconfigured_port0_op,
4b8bae08 5586 &misconfigured->theEvent);
946727dc
JS
5587 break;
5588 case LPFC_LINK_NUMBER_1:
448193b5
JS
5589 status = bf_get(lpfc_sli_misconfigured_port1_state,
5590 &misconfigured->theEvent);
5591 operational = bf_get(lpfc_sli_misconfigured_port1_op,
4b8bae08 5592 &misconfigured->theEvent);
946727dc
JS
5593 break;
5594 case LPFC_LINK_NUMBER_2:
448193b5
JS
5595 status = bf_get(lpfc_sli_misconfigured_port2_state,
5596 &misconfigured->theEvent);
5597 operational = bf_get(lpfc_sli_misconfigured_port2_op,
4b8bae08 5598 &misconfigured->theEvent);
946727dc
JS
5599 break;
5600 case LPFC_LINK_NUMBER_3:
448193b5
JS
5601 status = bf_get(lpfc_sli_misconfigured_port3_state,
5602 &misconfigured->theEvent);
5603 operational = bf_get(lpfc_sli_misconfigured_port3_op,
4b8bae08 5604 &misconfigured->theEvent);
946727dc
JS
5605 break;
5606 default:
372c187b 5607 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
448193b5
JS
5608 "3296 "
5609 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5610 "event: Invalid link %d",
5611 phba->sli4_hba.lnk_info.lnk_no);
5612 return;
946727dc 5613 }
4b8bae08 5614
448193b5
JS
5615 /* Skip if optic state unchanged */
5616 if (phba->sli4_hba.lnk_info.optic_state == status)
5617 return;
5618
946727dc
JS
5619 switch (status) {
5620 case LPFC_SLI_EVENT_STATUS_VALID:
448193b5
JS
5621 sprintf(message, "Physical Link is functional");
5622 break;
946727dc
JS
5623 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5624 sprintf(message, "Optics faulted/incorrectly "
5625 "installed/not installed - Reseat optics, "
5626 "if issue not resolved, replace.");
5627 break;
5628 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5629 sprintf(message,
5630 "Optics of two types installed - Remove one "
5631 "optic or install matching pair of optics.");
5632 break;
5633 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5634 sprintf(message, "Incompatible optics - Replace with "
292098be 5635 "compatible optics for card to function.");
946727dc 5636 break;
448193b5
JS
5637 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5638 sprintf(message, "Unqualified optics - Replace with "
5639 "Avago optics for Warranty and Technical "
5640 "Support - Link is%s operational",
2ea259ee 5641 (operational) ? " not" : "");
448193b5
JS
5642 break;
5643 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5644 sprintf(message, "Uncertified optics - Replace with "
5645 "Avago-certified optics to enable link "
5646 "operation - Link is%s operational",
2ea259ee 5647 (operational) ? " not" : "");
448193b5 5648 break;
946727dc
JS
5649 default:
5650 /* firmware is reporting a status we don't know about */
5651 sprintf(message, "Unknown event status x%02x", status);
5652 break;
5653 }
cd71348a
JS
5654
5655 /* Issue READ_CONFIG mbox command to refresh supported speeds */
5656 rc = lpfc_sli4_read_config(phba);
3952e91f 5657 if (rc) {
cd71348a 5658 phba->lmt = 0;
372c187b
DK
5659 lpfc_printf_log(phba, KERN_ERR,
5660 LOG_TRACE_EVENT,
cd71348a 5661 "3194 Unable to retrieve supported "
3952e91f 5662 "speeds, rc = 0x%x\n", rc);
cd71348a
JS
5663 }
5664 vports = lpfc_create_vport_work_array(phba);
5665 if (vports != NULL) {
5666 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5667 i++) {
5668 shost = lpfc_shost_from_vport(vports[i]);
5669 lpfc_host_supported_speeds_set(shost);
5670 }
5671 }
5672 lpfc_destroy_vport_work_array(phba, vports);
5673
448193b5 5674 phba->sli4_hba.lnk_info.optic_state = status;
946727dc 5675 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
448193b5 5676 "3176 Port Name %c %s\n", port_name, message);
946727dc
JS
5677 break;
5678 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5679 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5680 "3192 Remote DPort Test Initiated - "
5681 "Event Data1:x%08x Event Data2: x%08x\n",
5682 acqe_sli->event_data1, acqe_sli->event_data2);
4b8bae08 5683 break;
e7d85952
JS
5684 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5685 /* Misconfigured WWN. Reports that the SLI Port is configured
5686 * to use FA-WWN, but the attached device doesn’t support it.
5687 * No driver action is required.
5688 * Event Data1 - N.A, Event Data2 - N.A
5689 */
5690 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5691 "2699 Misconfigured FA-WWN - Attached device does "
5692 "not support FA-WWN\n");
5693 break;
d11ed16d
JS
5694 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5695 /* EEPROM failure. No driver action is required */
5696 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5697 "2518 EEPROM failure - "
5698 "Event Data1: x%08x Event Data2: x%08x\n",
5699 acqe_sli->event_data1, acqe_sli->event_data2);
5700 break;
4b8bae08 5701 default:
946727dc 5702 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
d11ed16d 5703 "3193 Unrecognized SLI event, type: 0x%x",
946727dc 5704 evt_type);
4b8bae08
JS
5705 break;
5706 }
70f3c073
JS
5707}
5708
fc2b989b
JS
5709/**
5710 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
5711 * @vport: pointer to vport data structure.
5712 *
5713 * This routine is to perform Clear Virtual Link (CVL) on a vport in
5714 * response to a CVL event.
5715 *
5716 * Return the pointer to the ndlp with the vport if successful, otherwise
5717 * return NULL.
5718 **/
5719static struct lpfc_nodelist *
5720lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5721{
5722 struct lpfc_nodelist *ndlp;
5723 struct Scsi_Host *shost;
5724 struct lpfc_hba *phba;
5725
5726 if (!vport)
5727 return NULL;
fc2b989b
JS
5728 phba = vport->phba;
5729 if (!phba)
5730 return NULL;
78730cfe
JS
5731 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5732 if (!ndlp) {
5733 /* Cannot find existing Fabric ndlp, so allocate a new one */
9d3d340d 5734 ndlp = lpfc_nlp_init(vport, Fabric_DID);
78730cfe
JS
5735 if (!ndlp)
5736 return 0;
78730cfe
JS
5737 /* Set the node type */
5738 ndlp->nlp_type |= NLP_FABRIC;
5739 /* Put ndlp onto node list */
5740 lpfc_enqueue_node(vport, ndlp);
78730cfe 5741 }
63e801ce
JS
5742 if ((phba->pport->port_state < LPFC_FLOGI) &&
5743 (phba->pport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
5744 return NULL;
5745 /* If virtual link is not yet instantiated ignore CVL */
63e801ce
JS
5746 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5747 && (vport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
5748 return NULL;
5749 shost = lpfc_shost_from_vport(vport);
5750 if (!shost)
5751 return NULL;
5752 lpfc_linkdown_port(vport);
5753 lpfc_cleanup_pending_mbox(vport);
5754 spin_lock_irq(shost->host_lock);
5755 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5756 spin_unlock_irq(shost->host_lock);
5757
5758 return ndlp;
5759}
5760
5761/**
5762 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
fe614acd 5763 * @phba: pointer to lpfc hba data structure.
fc2b989b
JS
5764 *
5765 * This routine is to perform Clear Virtual Link (CVL) on all vports in
5766 * response to a FCF dead event.
5767 **/
5768static void
5769lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5770{
5771 struct lpfc_vport **vports;
5772 int i;
5773
5774 vports = lpfc_create_vport_work_array(phba);
5775 if (vports)
5776 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5777 lpfc_sli4_perform_vport_cvl(vports[i]);
5778 lpfc_destroy_vport_work_array(phba, vports);
5779}
5780
da0436e9 5781/**
76a95d75 5782 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
da0436e9 5783 * @phba: pointer to lpfc hba data structure.
fe614acd 5784 * @acqe_fip: pointer to the async fcoe completion queue entry.
da0436e9
JS
5785 *
5786 * This routine is to handle the SLI4 asynchronous fcoe event.
5787 **/
5788static void
76a95d75 5789lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
70f3c073 5790 struct lpfc_acqe_fip *acqe_fip)
da0436e9 5791{
70f3c073 5792 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
da0436e9 5793 int rc;
6669f9bb
JS
5794 struct lpfc_vport *vport;
5795 struct lpfc_nodelist *ndlp;
695a814e
JS
5796 int active_vlink_present;
5797 struct lpfc_vport **vports;
5798 int i;
da0436e9 5799
70f3c073
JS
5800 phba->fc_eventTag = acqe_fip->event_tag;
5801 phba->fcoe_eventtag = acqe_fip->event_tag;
da0436e9 5802 switch (event_type) {
70f3c073
JS
5803 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5804 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5805 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
372c187b 5806 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
a93ff37a
JS
5807 "2546 New FCF event, evt_tag:x%x, "
5808 "index:x%x\n",
70f3c073
JS
5809 acqe_fip->event_tag,
5810 acqe_fip->index);
999d813f
JS
5811 else
5812 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5813 LOG_DISCOVERY,
a93ff37a
JS
5814 "2788 FCF param modified event, "
5815 "evt_tag:x%x, index:x%x\n",
70f3c073
JS
5816 acqe_fip->event_tag,
5817 acqe_fip->index);
38b92ef8 5818 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
0c9ab6f5
JS
5819 /*
5820 * During period of FCF discovery, read the FCF
5821 * table record indexed by the event to update
a93ff37a 5822 * FCF roundrobin failover eligible FCF bmask.
0c9ab6f5
JS
5823 */
5824 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5825 LOG_DISCOVERY,
a93ff37a
JS
5826 "2779 Read FCF (x%x) for updating "
5827 "roundrobin FCF failover bmask\n",
70f3c073
JS
5828 acqe_fip->index);
5829 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
0c9ab6f5 5830 }
38b92ef8
JS
5831
5832 /* If the FCF discovery is in progress, do nothing. */
3804dc84 5833 spin_lock_irq(&phba->hbalock);
a93ff37a 5834 if (phba->hba_flag & FCF_TS_INPROG) {
38b92ef8
JS
5835 spin_unlock_irq(&phba->hbalock);
5836 break;
5837 }
5838 /* If fast FCF failover rescan event is pending, do nothing */
036cad1f 5839 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
38b92ef8
JS
5840 spin_unlock_irq(&phba->hbalock);
5841 break;
5842 }
5843
c2b9712e
JS
5844 /* If the FCF has been in discovered state, do nothing. */
5845 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3804dc84
JS
5846 spin_unlock_irq(&phba->hbalock);
5847 break;
5848 }
5849 spin_unlock_irq(&phba->hbalock);
38b92ef8 5850
0c9ab6f5
JS
5851 /* Otherwise, scan the entire FCF table and re-discover SAN */
5852 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a
JS
5853 "2770 Start FCF table scan per async FCF "
5854 "event, evt_tag:x%x, index:x%x\n",
70f3c073 5855 acqe_fip->event_tag, acqe_fip->index);
0c9ab6f5
JS
5856 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5857 LPFC_FCOE_FCF_GET_FIRST);
da0436e9 5858 if (rc)
372c187b 5859 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0c9ab6f5 5860 "2547 Issue FCF scan read FCF mailbox "
a93ff37a 5861 "command failed (x%x)\n", rc);
da0436e9
JS
5862 break;
5863
70f3c073 5864 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
372c187b
DK
5865 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5866 "2548 FCF Table full count 0x%x tag 0x%x\n",
5867 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5868 acqe_fip->event_tag);
da0436e9
JS
5869 break;
5870
70f3c073 5871 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
80c17849 5872 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
372c187b
DK
5873 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5874 "2549 FCF (x%x) disconnected from network, "
5875 "tag:x%x\n", acqe_fip->index,
5876 acqe_fip->event_tag);
38b92ef8
JS
5877 /*
5878 * If we are in the middle of FCF failover process, clear
5879 * the corresponding FCF bit in the roundrobin bitmap.
da0436e9 5880 */
fc2b989b 5881 spin_lock_irq(&phba->hbalock);
a1cadfef
JS
5882 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5883 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
fc2b989b 5884 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 5885 /* Update FLOGI FCF failover eligible FCF bmask */
70f3c073 5886 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
fc2b989b
JS
5887 break;
5888 }
38b92ef8
JS
5889 spin_unlock_irq(&phba->hbalock);
5890
5891 /* If the event is not for currently used fcf do nothing */
70f3c073 5892 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
38b92ef8
JS
5893 break;
5894
5895 /*
5896 * Otherwise, request the port to rediscover the entire FCF
5897 * table for a fast recovery from case that the current FCF
5898 * is no longer valid as we are not in the middle of FCF
5899 * failover process already.
5900 */
c2b9712e
JS
5901 spin_lock_irq(&phba->hbalock);
5902 /* Mark the fast failover process in progress */
5903 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5904 spin_unlock_irq(&phba->hbalock);
5905
5906 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5907 "2771 Start FCF fast failover process due to "
5908 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5909 "\n", acqe_fip->event_tag, acqe_fip->index);
5910 rc = lpfc_sli4_redisc_fcf_table(phba);
5911 if (rc) {
5912 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
372c187b 5913 LOG_TRACE_EVENT,
7afc0ce9 5914 "2772 Issue FCF rediscover mailbox "
c2b9712e
JS
5915 "command failed, fail through to FCF "
5916 "dead event\n");
5917 spin_lock_irq(&phba->hbalock);
5918 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5919 spin_unlock_irq(&phba->hbalock);
5920 /*
5921 * Last resort will fail over by treating this
5922 * as a link down to FCF registration.
5923 */
5924 lpfc_sli4_fcf_dead_failthrough(phba);
5925 } else {
5926 /* Reset FCF roundrobin bmask for new discovery */
5927 lpfc_sli4_clear_fcf_rr_bmask(phba);
5928 /*
5929 * Handling fast FCF failover to a DEAD FCF event is
5930 * considered equalivant to receiving CVL to all vports.
5931 */
5932 lpfc_sli4_perform_all_vport_cvl(phba);
5933 }
da0436e9 5934 break;
70f3c073 5935 case LPFC_FIP_EVENT_TYPE_CVL:
80c17849 5936 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
372c187b
DK
5937 lpfc_printf_log(phba, KERN_ERR,
5938 LOG_TRACE_EVENT,
6669f9bb 5939 "2718 Clear Virtual Link Received for VPI 0x%x"
70f3c073 5940 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6d368e53 5941
6669f9bb 5942 vport = lpfc_find_vport_by_vpid(phba,
5248a749 5943 acqe_fip->index);
fc2b989b 5944 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6669f9bb
JS
5945 if (!ndlp)
5946 break;
695a814e
JS
5947 active_vlink_present = 0;
5948
5949 vports = lpfc_create_vport_work_array(phba);
5950 if (vports) {
5951 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5952 i++) {
5953 if ((!(vports[i]->fc_flag &
5954 FC_VPORT_CVL_RCVD)) &&
5955 (vports[i]->port_state > LPFC_FDISC)) {
5956 active_vlink_present = 1;
5957 break;
5958 }
5959 }
5960 lpfc_destroy_vport_work_array(phba, vports);
5961 }
5962
cc82355a
JS
5963 /*
5964 * Don't re-instantiate if vport is marked for deletion.
5965 * If we are here first then vport_delete is going to wait
5966 * for discovery to complete.
5967 */
5968 if (!(vport->load_flag & FC_UNLOADING) &&
5969 active_vlink_present) {
695a814e
JS
5970 /*
5971 * If there are other active VLinks present,
5972 * re-instantiate the Vlink using FDISC.
5973 */
256ec0d0
JS
5974 mod_timer(&ndlp->nlp_delayfunc,
5975 jiffies + msecs_to_jiffies(1000));
c6adba15 5976 spin_lock_irq(&ndlp->lock);
6669f9bb 5977 ndlp->nlp_flag |= NLP_DELAY_TMO;
c6adba15 5978 spin_unlock_irq(&ndlp->lock);
695a814e
JS
5979 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5980 vport->port_state = LPFC_FDISC;
5981 } else {
ecfd03c6
JS
5982 /*
5983 * Otherwise, we request port to rediscover
5984 * the entire FCF table for a fast recovery
5985 * from possible case that the current FCF
0c9ab6f5
JS
5986 * is no longer valid if we are not already
5987 * in the FCF failover process.
ecfd03c6 5988 */
fc2b989b 5989 spin_lock_irq(&phba->hbalock);
0c9ab6f5 5990 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
fc2b989b
JS
5991 spin_unlock_irq(&phba->hbalock);
5992 break;
5993 }
5994 /* Mark the fast failover process in progress */
0c9ab6f5 5995 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
fc2b989b 5996 spin_unlock_irq(&phba->hbalock);
0c9ab6f5
JS
5997 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5998 LOG_DISCOVERY,
a93ff37a 5999 "2773 Start FCF failover per CVL, "
70f3c073 6000 "evt_tag:x%x\n", acqe_fip->event_tag);
ecfd03c6 6001 rc = lpfc_sli4_redisc_fcf_table(phba);
fc2b989b 6002 if (rc) {
0c9ab6f5 6003 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
372c187b 6004 LOG_TRACE_EVENT,
0c9ab6f5 6005 "2774 Issue FCF rediscover "
7afc0ce9 6006 "mailbox command failed, "
0c9ab6f5 6007 "through to CVL event\n");
fc2b989b 6008 spin_lock_irq(&phba->hbalock);
0c9ab6f5 6009 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b 6010 spin_unlock_irq(&phba->hbalock);
ecfd03c6
JS
6011 /*
6012 * Last resort will be re-try on the
6013 * the current registered FCF entry.
6014 */
6015 lpfc_retry_pport_discovery(phba);
38b92ef8
JS
6016 } else
6017 /*
6018 * Reset FCF roundrobin bmask for new
6019 * discovery.
6020 */
7d791df7 6021 lpfc_sli4_clear_fcf_rr_bmask(phba);
6669f9bb
JS
6022 }
6023 break;
da0436e9 6024 default:
372c187b
DK
6025 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6026 "0288 Unknown FCoE event type 0x%x event tag "
6027 "0x%x\n", event_type, acqe_fip->event_tag);
da0436e9
JS
6028 break;
6029 }
6030}
6031
6032/**
6033 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6034 * @phba: pointer to lpfc hba data structure.
fe614acd 6035 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
da0436e9
JS
6036 *
6037 * This routine is to handle the SLI4 asynchronous dcbx event.
6038 **/
6039static void
6040lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6041 struct lpfc_acqe_dcbx *acqe_dcbx)
6042{
4d9ab994 6043 phba->fc_eventTag = acqe_dcbx->event_tag;
372c187b 6044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
6045 "0290 The SLI4 DCBX asynchronous event is not "
6046 "handled yet\n");
6047}
6048
b19a061a
JS
6049/**
6050 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6051 * @phba: pointer to lpfc hba data structure.
fe614acd 6052 * @acqe_grp5: pointer to the async grp5 completion queue entry.
b19a061a
JS
6053 *
6054 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
6055 * is an asynchronous notified of a logical link speed change. The Port
6056 * reports the logical link speed in units of 10Mbps.
6057 **/
6058static void
6059lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6060 struct lpfc_acqe_grp5 *acqe_grp5)
6061{
6062 uint16_t prev_ll_spd;
6063
6064 phba->fc_eventTag = acqe_grp5->event_tag;
6065 phba->fcoe_eventtag = acqe_grp5->event_tag;
6066 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
6067 phba->sli4_hba.link_state.logical_speed =
8b68cd52 6068 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
b19a061a
JS
6069 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6070 "2789 GRP5 Async Event: Updating logical link speed "
8b68cd52
JS
6071 "from %dMbps to %dMbps\n", prev_ll_spd,
6072 phba->sli4_hba.link_state.logical_speed);
b19a061a
JS
6073}
6074
da0436e9
JS
6075/**
6076 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
6077 * @phba: pointer to lpfc hba data structure.
6078 *
6079 * This routine is invoked by the worker thread to process all the pending
6080 * SLI4 asynchronous events.
6081 **/
6082void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
6083{
6084 struct lpfc_cq_event *cq_event;
e7dab164 6085 unsigned long iflags;
da0436e9
JS
6086
6087 /* First, declare the async event has been handled */
e7dab164 6088 spin_lock_irqsave(&phba->hbalock, iflags);
da0436e9 6089 phba->hba_flag &= ~ASYNC_EVENT;
e7dab164
JS
6090 spin_unlock_irqrestore(&phba->hbalock, iflags);
6091
da0436e9 6092 /* Now, handle all the async events */
e7dab164 6093 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
da0436e9 6094 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
da0436e9
JS
6095 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
6096 cq_event, struct lpfc_cq_event, list);
e7dab164
JS
6097 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
6098 iflags);
6099
da0436e9
JS
6100 /* Process the asynchronous event */
6101 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
6102 case LPFC_TRAILER_CODE_LINK:
6103 lpfc_sli4_async_link_evt(phba,
6104 &cq_event->cqe.acqe_link);
6105 break;
6106 case LPFC_TRAILER_CODE_FCOE:
70f3c073 6107 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
da0436e9
JS
6108 break;
6109 case LPFC_TRAILER_CODE_DCBX:
6110 lpfc_sli4_async_dcbx_evt(phba,
6111 &cq_event->cqe.acqe_dcbx);
6112 break;
b19a061a
JS
6113 case LPFC_TRAILER_CODE_GRP5:
6114 lpfc_sli4_async_grp5_evt(phba,
6115 &cq_event->cqe.acqe_grp5);
6116 break;
70f3c073
JS
6117 case LPFC_TRAILER_CODE_FC:
6118 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
6119 break;
6120 case LPFC_TRAILER_CODE_SLI:
6121 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
6122 break;
da0436e9 6123 default:
372c187b
DK
6124 lpfc_printf_log(phba, KERN_ERR,
6125 LOG_TRACE_EVENT,
291c2548 6126 "1804 Invalid asynchronous event code: "
da0436e9
JS
6127 "x%x\n", bf_get(lpfc_trailer_code,
6128 &cq_event->cqe.mcqe_cmpl));
6129 break;
6130 }
e7dab164 6131
da0436e9
JS
6132 /* Free the completion event processed to the free pool */
6133 lpfc_sli4_cq_event_release(phba, cq_event);
e7dab164 6134 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
da0436e9 6135 }
e7dab164 6136 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
da0436e9
JS
6137}
6138
ecfd03c6
JS
6139/**
6140 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
6141 * @phba: pointer to lpfc hba data structure.
6142 *
6143 * This routine is invoked by the worker thread to process FCF table
6144 * rediscovery pending completion event.
6145 **/
6146void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
6147{
6148 int rc;
6149
6150 spin_lock_irq(&phba->hbalock);
6151 /* Clear FCF rediscovery timeout event */
6152 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
6153 /* Clear driver fast failover FCF record flag */
6154 phba->fcf.failover_rec.flag = 0;
6155 /* Set state for FCF fast failover */
6156 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
6157 spin_unlock_irq(&phba->hbalock);
6158
6159 /* Scan FCF table from the first entry to re-discover SAN */
0c9ab6f5 6160 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a 6161 "2777 Start post-quiescent FCF table scan\n");
0c9ab6f5 6162 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
ecfd03c6 6163 if (rc)
372c187b 6164 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0c9ab6f5
JS
6165 "2747 Issue FCF scan read FCF mailbox "
6166 "command failed 0x%x\n", rc);
ecfd03c6
JS
6167}
6168
da0436e9
JS
6169/**
6170 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
6171 * @phba: pointer to lpfc hba data structure.
6172 * @dev_grp: The HBA PCI-Device group number.
6173 *
6174 * This routine is invoked to set up the per HBA PCI-Device group function
6175 * API jump table entries.
6176 *
6177 * Return: 0 if success, otherwise -ENODEV
6178 **/
6179int
6180lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6181{
6182 int rc;
6183
6184 /* Set up lpfc PCI-device group */
6185 phba->pci_dev_grp = dev_grp;
6186
6187 /* The LPFC_PCI_DEV_OC uses SLI4 */
6188 if (dev_grp == LPFC_PCI_DEV_OC)
6189 phba->sli_rev = LPFC_SLI_REV4;
6190
6191 /* Set up device INIT API function jump table */
6192 rc = lpfc_init_api_table_setup(phba, dev_grp);
6193 if (rc)
6194 return -ENODEV;
6195 /* Set up SCSI API function jump table */
6196 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
6197 if (rc)
6198 return -ENODEV;
6199 /* Set up SLI API function jump table */
6200 rc = lpfc_sli_api_table_setup(phba, dev_grp);
6201 if (rc)
6202 return -ENODEV;
6203 /* Set up MBOX API function jump table */
6204 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
6205 if (rc)
6206 return -ENODEV;
6207
6208 return 0;
5b75da2f
JS
6209}
6210
6211/**
3621a710 6212 * lpfc_log_intr_mode - Log the active interrupt mode
5b75da2f
JS
6213 * @phba: pointer to lpfc hba data structure.
6214 * @intr_mode: active interrupt mode adopted.
6215 *
6216 * This routine it invoked to log the currently used active interrupt mode
6217 * to the device.
3772a991
JS
6218 **/
6219static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5b75da2f
JS
6220{
6221 switch (intr_mode) {
6222 case 0:
6223 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6224 "0470 Enable INTx interrupt mode.\n");
6225 break;
6226 case 1:
6227 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6228 "0481 Enabled MSI interrupt mode.\n");
6229 break;
6230 case 2:
6231 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6232 "0480 Enabled MSI-X interrupt mode.\n");
6233 break;
6234 default:
372c187b 6235 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5b75da2f
JS
6236 "0482 Illegal interrupt mode.\n");
6237 break;
6238 }
6239 return;
6240}
6241
5b75da2f 6242/**
3772a991 6243 * lpfc_enable_pci_dev - Enable a generic PCI device.
5b75da2f
JS
6244 * @phba: pointer to lpfc hba data structure.
6245 *
3772a991
JS
6246 * This routine is invoked to enable the PCI device that is common to all
6247 * PCI devices.
5b75da2f
JS
6248 *
6249 * Return codes
af901ca1 6250 * 0 - successful
3772a991 6251 * other values - error
5b75da2f 6252 **/
3772a991
JS
6253static int
6254lpfc_enable_pci_dev(struct lpfc_hba *phba)
5b75da2f 6255{
3772a991 6256 struct pci_dev *pdev;
5b75da2f 6257
3772a991
JS
6258 /* Obtain PCI device reference */
6259 if (!phba->pcidev)
6260 goto out_error;
6261 else
6262 pdev = phba->pcidev;
3772a991
JS
6263 /* Enable PCI device */
6264 if (pci_enable_device_mem(pdev))
6265 goto out_error;
6266 /* Request PCI resource for the device */
e0c0483c 6267 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
3772a991
JS
6268 goto out_disable_device;
6269 /* Set up device as PCI master and save state for EEH */
6270 pci_set_master(pdev);
6271 pci_try_set_mwi(pdev);
6272 pci_save_state(pdev);
5b75da2f 6273
0558056c 6274 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
453193e0 6275 if (pci_is_pcie(pdev))
0558056c
JS
6276 pdev->needs_freset = 1;
6277
3772a991 6278 return 0;
5b75da2f 6279
3772a991
JS
6280out_disable_device:
6281 pci_disable_device(pdev);
6282out_error:
372c187b 6283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
e0c0483c 6284 "1401 Failed to enable pci device\n");
3772a991 6285 return -ENODEV;
5b75da2f
JS
6286}
6287
6288/**
3772a991 6289 * lpfc_disable_pci_dev - Disable a generic PCI device.
5b75da2f
JS
6290 * @phba: pointer to lpfc hba data structure.
6291 *
3772a991
JS
6292 * This routine is invoked to disable the PCI device that is common to all
6293 * PCI devices.
5b75da2f
JS
6294 **/
6295static void
3772a991 6296lpfc_disable_pci_dev(struct lpfc_hba *phba)
5b75da2f 6297{
3772a991 6298 struct pci_dev *pdev;
5b75da2f 6299
3772a991
JS
6300 /* Obtain PCI device reference */
6301 if (!phba->pcidev)
6302 return;
6303 else
6304 pdev = phba->pcidev;
3772a991 6305 /* Release PCI resource and disable PCI device */
e0c0483c 6306 pci_release_mem_regions(pdev);
3772a991 6307 pci_disable_device(pdev);
5b75da2f
JS
6308
6309 return;
6310}
6311
e59058c4 6312/**
3772a991
JS
6313 * lpfc_reset_hba - Reset a hba
6314 * @phba: pointer to lpfc hba data structure.
e59058c4 6315 *
3772a991
JS
6316 * This routine is invoked to reset a hba device. It brings the HBA
6317 * offline, performs a board restart, and then brings the board back
6318 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
6319 * on outstanding mailbox commands.
e59058c4 6320 **/
3772a991
JS
6321void
6322lpfc_reset_hba(struct lpfc_hba *phba)
dea3101e 6323{
3772a991
JS
6324 /* If resets are disabled then set error state and return. */
6325 if (!phba->cfg_enable_hba_reset) {
6326 phba->link_state = LPFC_HBA_ERROR;
6327 return;
6328 }
9ec58ec7
JS
6329
6330 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
6331 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
ee62021a 6332 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9ec58ec7 6333 } else {
ee62021a 6334 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
9ec58ec7
JS
6335 lpfc_sli_flush_io_rings(phba);
6336 }
3772a991
JS
6337 lpfc_offline(phba);
6338 lpfc_sli_brdrestart(phba);
6339 lpfc_online(phba);
6340 lpfc_unblock_mgmt_io(phba);
6341}
dea3101e 6342
0a96e975
JS
6343/**
6344 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6345 * @phba: pointer to lpfc hba data structure.
6346 *
6347 * This function enables the PCI SR-IOV virtual functions to a physical
6348 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6349 * enable the number of virtual functions to the physical function. As
6350 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6351 * API call does not considered as an error condition for most of the device.
6352 **/
6353uint16_t
6354lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6355{
6356 struct pci_dev *pdev = phba->pcidev;
6357 uint16_t nr_virtfn;
6358 int pos;
6359
0a96e975
JS
6360 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6361 if (pos == 0)
6362 return 0;
6363
6364 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6365 return nr_virtfn;
6366}
6367
912e3acd
JS
6368/**
6369 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6370 * @phba: pointer to lpfc hba data structure.
6371 * @nr_vfn: number of virtual functions to be enabled.
6372 *
6373 * This function enables the PCI SR-IOV virtual functions to a physical
6374 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6375 * enable the number of virtual functions to the physical function. As
6376 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6377 * API call does not considered as an error condition for most of the device.
6378 **/
6379int
6380lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6381{
6382 struct pci_dev *pdev = phba->pcidev;
0a96e975 6383 uint16_t max_nr_vfn;
912e3acd
JS
6384 int rc;
6385
0a96e975
JS
6386 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6387 if (nr_vfn > max_nr_vfn) {
372c187b 6388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0a96e975
JS
6389 "3057 Requested vfs (%d) greater than "
6390 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6391 return -EINVAL;
6392 }
6393
912e3acd
JS
6394 rc = pci_enable_sriov(pdev, nr_vfn);
6395 if (rc) {
6396 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6397 "2806 Failed to enable sriov on this device "
6398 "with vfn number nr_vf:%d, rc:%d\n",
6399 nr_vfn, rc);
6400 } else
6401 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6402 "2807 Successful enable sriov on this device "
6403 "with vfn number nr_vf:%d\n", nr_vfn);
6404 return rc;
6405}
6406
3772a991 6407/**
895427bd 6408 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3772a991
JS
6409 * @phba: pointer to lpfc hba data structure.
6410 *
895427bd
JS
6411 * This routine is invoked to set up the driver internal resources before the
6412 * device specific resource setup to support the HBA device it attached to.
3772a991
JS
6413 *
6414 * Return codes
895427bd
JS
6415 * 0 - successful
6416 * other values - error
3772a991
JS
6417 **/
6418static int
895427bd 6419lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3772a991 6420{
895427bd 6421 struct lpfc_sli *psli = &phba->sli;
dea3101e 6422
2e0fef85 6423 /*
895427bd 6424 * Driver resources common to all SLI revisions
2e0fef85 6425 */
895427bd 6426 atomic_set(&phba->fast_event_count, 0);
372c187b
DK
6427 atomic_set(&phba->dbg_log_idx, 0);
6428 atomic_set(&phba->dbg_log_cnt, 0);
6429 atomic_set(&phba->dbg_log_dmping, 0);
895427bd 6430 spin_lock_init(&phba->hbalock);
dea3101e 6431
523128e5
JS
6432 /* Initialize port_list spinlock */
6433 spin_lock_init(&phba->port_list_lock);
895427bd 6434 INIT_LIST_HEAD(&phba->port_list);
523128e5 6435
895427bd
JS
6436 INIT_LIST_HEAD(&phba->work_list);
6437 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6438
6439 /* Initialize the wait queue head for the kernel thread */
6440 init_waitqueue_head(&phba->work_waitq);
6441
6442 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
f358dd0c 6443 "1403 Protocols supported %s %s %s\n",
895427bd
JS
6444 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6445 "SCSI" : " "),
6446 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
f358dd0c
JS
6447 "NVME" : " "),
6448 (phba->nvmet_support ? "NVMET" : " "));
895427bd 6449
0794d601
JS
6450 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6451 spin_lock_init(&phba->scsi_buf_list_get_lock);
6452 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6453 spin_lock_init(&phba->scsi_buf_list_put_lock);
6454 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
895427bd
JS
6455
6456 /* Initialize the fabric iocb list */
6457 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6458
6459 /* Initialize list to save ELS buffers */
6460 INIT_LIST_HEAD(&phba->elsbuf);
6461
6462 /* Initialize FCF connection rec list */
6463 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6464
6465 /* Initialize OAS configuration list */
6466 spin_lock_init(&phba->devicelock);
6467 INIT_LIST_HEAD(&phba->luns);
858c9f6c 6468
3772a991 6469 /* MBOX heartbeat timer */
f22eb4d3 6470 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
3772a991 6471 /* Fabric block timer */
f22eb4d3 6472 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
3772a991 6473 /* EA polling mode timer */
f22eb4d3 6474 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
895427bd 6475 /* Heartbeat timer */
f22eb4d3 6476 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
895427bd 6477
32517fc0
JS
6478 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6479
317aeb83
DK
6480 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
6481 lpfc_idle_stat_delay_work);
6482
895427bd
JS
6483 return 0;
6484}
6485
6486/**
6487 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
6488 * @phba: pointer to lpfc hba data structure.
6489 *
6490 * This routine is invoked to set up the driver internal resources specific to
6491 * support the SLI-3 HBA device it attached to.
6492 *
6493 * Return codes
6494 * 0 - successful
6495 * other values - error
6496 **/
6497static int
6498lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6499{
0794d601 6500 int rc, entry_sz;
895427bd
JS
6501
6502 /*
6503 * Initialize timers used by driver
6504 */
6505
6506 /* FCP polling mode timer */
f22eb4d3 6507 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
dea3101e 6508
3772a991
JS
6509 /* Host attention work mask setup */
6510 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6511 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
dea3101e 6512
3772a991
JS
6513 /* Get all the module params for configuring this host */
6514 lpfc_get_cfgparam(phba);
895427bd
JS
6515 /* Set up phase-1 common device driver resources */
6516
6517 rc = lpfc_setup_driver_resource_phase1(phba);
6518 if (rc)
6519 return -ENODEV;
6520
49198b37
JS
6521 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6522 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6523 /* check for menlo minimum sg count */
6524 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6525 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6526 }
6527
895427bd 6528 if (!phba->sli.sli3_ring)
6396bb22
KC
6529 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6530 sizeof(struct lpfc_sli_ring),
6531 GFP_KERNEL);
895427bd 6532 if (!phba->sli.sli3_ring)
2a76a283
JS
6533 return -ENOMEM;
6534
dea3101e 6535 /*
96f7077f 6536 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
3772a991 6537 * used to create the sg_dma_buf_pool must be dynamically calculated.
dea3101e 6538 */
3772a991 6539
0794d601
JS
6540 if (phba->sli_rev == LPFC_SLI_REV4)
6541 entry_sz = sizeof(struct sli4_sge);
6542 else
6543 entry_sz = sizeof(struct ulp_bde64);
6544
96f7077f 6545 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
3772a991 6546 if (phba->cfg_enable_bg) {
96f7077f
JS
6547 /*
6548 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
6549 * the FCP rsp, and a BDE for each. Sice we have no control
6550 * over how many protection data segments the SCSI Layer
6551 * will hand us (ie: there could be one for every block
6552 * in the IO), we just allocate enough BDEs to accomidate
6553 * our max amount and we need to limit lpfc_sg_seg_cnt to
6554 * minimize the risk of running out.
6555 */
6556 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6557 sizeof(struct fcp_rsp) +
0794d601 6558 (LPFC_MAX_SG_SEG_CNT * entry_sz);
96f7077f
JS
6559
6560 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6561 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6562
6563 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
6564 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6565 } else {
6566 /*
6567 * The scsi_buf for a regular I/O will hold the FCP cmnd,
6568 * the FCP rsp, a BDE for each, and a BDE for up to
6569 * cfg_sg_seg_cnt data segments.
6570 */
6571 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6572 sizeof(struct fcp_rsp) +
0794d601 6573 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
96f7077f
JS
6574
6575 /* Total BDEs in BPL for scsi_sg_list */
6576 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
901a920f 6577 }
dea3101e 6578
96f7077f 6579 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
c90b4480 6580 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
96f7077f
JS
6581 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6582 phba->cfg_total_seg_cnt);
dea3101e 6583
3772a991
JS
6584 phba->max_vpi = LPFC_MAX_VPI;
6585 /* This will be set to correct value after config_port mbox */
6586 phba->max_vports = 0;
dea3101e 6587
3772a991
JS
6588 /*
6589 * Initialize the SLI Layer to run with lpfc HBAs.
6590 */
6591 lpfc_sli_setup(phba);
895427bd 6592 lpfc_sli_queue_init(phba);
ed957684 6593
3772a991
JS
6594 /* Allocate device driver memory */
6595 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6596 return -ENOMEM;
51ef4c26 6597
d79c9e9d
JS
6598 phba->lpfc_sg_dma_buf_pool =
6599 dma_pool_create("lpfc_sg_dma_buf_pool",
6600 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6601 BPL_ALIGN_SZ, 0);
6602
6603 if (!phba->lpfc_sg_dma_buf_pool)
6604 goto fail_free_mem;
6605
6606 phba->lpfc_cmd_rsp_buf_pool =
6607 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6608 &phba->pcidev->dev,
6609 sizeof(struct fcp_cmnd) +
6610 sizeof(struct fcp_rsp),
6611 BPL_ALIGN_SZ, 0);
6612
6613 if (!phba->lpfc_cmd_rsp_buf_pool)
6614 goto fail_free_dma_buf_pool;
6615
912e3acd
JS
6616 /*
6617 * Enable sr-iov virtual functions if supported and configured
6618 * through the module parameter.
6619 */
6620 if (phba->cfg_sriov_nr_virtfn > 0) {
6621 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6622 phba->cfg_sriov_nr_virtfn);
6623 if (rc) {
6624 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6625 "2808 Requested number of SR-IOV "
6626 "virtual functions (%d) is not "
6627 "supported\n",
6628 phba->cfg_sriov_nr_virtfn);
6629 phba->cfg_sriov_nr_virtfn = 0;
6630 }
6631 }
6632
3772a991 6633 return 0;
d79c9e9d
JS
6634
6635fail_free_dma_buf_pool:
6636 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6637 phba->lpfc_sg_dma_buf_pool = NULL;
6638fail_free_mem:
6639 lpfc_mem_free(phba);
6640 return -ENOMEM;
3772a991 6641}
ed957684 6642
3772a991
JS
6643/**
6644 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
6645 * @phba: pointer to lpfc hba data structure.
6646 *
6647 * This routine is invoked to unset the driver internal resources set up
6648 * specific for supporting the SLI-3 HBA device it attached to.
6649 **/
6650static void
6651lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6652{
6653 /* Free device driver memory allocated */
6654 lpfc_mem_free_all(phba);
3163f725 6655
3772a991
JS
6656 return;
6657}
dea3101e 6658
3772a991 6659/**
da0436e9 6660 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3772a991
JS
6661 * @phba: pointer to lpfc hba data structure.
6662 *
da0436e9
JS
6663 * This routine is invoked to set up the driver internal resources specific to
6664 * support the SLI-4 HBA device it attached to.
3772a991
JS
6665 *
6666 * Return codes
af901ca1 6667 * 0 - successful
da0436e9 6668 * other values - error
3772a991
JS
6669 **/
6670static int
da0436e9 6671lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3772a991 6672{
28baac74 6673 LPFC_MBOXQ_t *mboxq;
f358dd0c 6674 MAILBOX_t *mb;
895427bd 6675 int rc, i, max_buf_size;
09294d46 6676 int longs;
81e6a637 6677 int extra;
f358dd0c 6678 uint64_t wwn;
b92dc72d
JS
6679 u32 if_type;
6680 u32 if_fam;
da0436e9 6681
895427bd 6682 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
eede4970 6683 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
895427bd
JS
6684 phba->sli4_hba.curr_disp_cpu = 0;
6685
716d3bc5
JS
6686 /* Get all the module params for configuring this host */
6687 lpfc_get_cfgparam(phba);
6688
895427bd
JS
6689 /* Set up phase-1 common device driver resources */
6690 rc = lpfc_setup_driver_resource_phase1(phba);
6691 if (rc)
6692 return -ENODEV;
6693
da0436e9
JS
6694 /* Before proceed, wait for POST done and device ready */
6695 rc = lpfc_sli4_post_status_check(phba);
6696 if (rc)
6697 return -ENODEV;
6698
3cee98db
JS
6699 /* Allocate all driver workqueues here */
6700
6701 /* The lpfc_wq workqueue for deferred irq use */
6702 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6703
3772a991 6704 /*
da0436e9 6705 * Initialize timers used by driver
3772a991 6706 */
3772a991 6707
f22eb4d3 6708 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
3772a991 6709
ecfd03c6 6710 /* FCF rediscover timer */
f22eb4d3 6711 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
ecfd03c6 6712
7ad20aa9
JS
6713 /*
6714 * Control structure for handling external multi-buffer mailbox
6715 * command pass-through.
6716 */
6717 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6718 sizeof(struct lpfc_mbox_ext_buf_ctx));
6719 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6720
da0436e9 6721 phba->max_vpi = LPFC_MAX_VPI;
67d12733 6722
da0436e9
JS
6723 /* This will be set to correct value after the read_config mbox */
6724 phba->max_vports = 0;
3772a991 6725
da0436e9
JS
6726 /* Program the default value of vlan_id and fc_map */
6727 phba->valid_vlan = 0;
6728 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6729 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6730 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3772a991 6731
2a76a283
JS
6732 /*
6733 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
895427bd
JS
6734 * we will associate a new ring, for each EQ/CQ/WQ tuple.
6735 * The WQ create will allocate the ring.
2a76a283 6736 */
09294d46 6737
da0436e9 6738 /* Initialize buffer queue management fields */
895427bd 6739 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
da0436e9
JS
6740 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6741 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3772a991 6742
20397179
GS
6743 /* for VMID idle timeout if VMID is enabled */
6744 if (lpfc_is_vmid_enabled(phba))
6745 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
6746
da0436e9
JS
6747 /*
6748 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6749 */
c00f62e6
JS
6750 /* Initialize the Abort buffer list used by driver */
6751 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6752 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
895427bd
JS
6753
6754 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6755 /* Initialize the Abort nvme buffer list used by driver */
5e5b511d 6756 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
86c67379 6757 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
a8cf5dfe 6758 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
79d8c4ce
JS
6759 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6760 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
895427bd
JS
6761 }
6762
da0436e9 6763 /* This abort list used by worker thread */
895427bd 6764 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
a8cf5dfe 6765 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
e7dab164
JS
6766 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
6767 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
3772a991 6768
da0436e9 6769 /*
6d368e53 6770 * Initialize driver internal slow-path work queues
da0436e9 6771 */
3772a991 6772
da0436e9
JS
6773 /* Driver internel slow-path CQ Event pool */
6774 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6775 /* Response IOCB work queue list */
45ed1190 6776 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
da0436e9
JS
6777 /* Asynchronous event CQ Event work queue list */
6778 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
da0436e9
JS
6779 /* Slow-path XRI aborted CQ Event work queue list */
6780 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6781 /* Receive queue CQ Event work queue list */
6782 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6783
6d368e53
JS
6784 /* Initialize extent block lists. */
6785 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6786 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6787 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6788 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6789
d1f525aa
JS
6790 /* Initialize mboxq lists. If the early init routines fail
6791 * these lists need to be correctly initialized.
6792 */
6793 INIT_LIST_HEAD(&phba->sli.mboxq);
6794 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6795
448193b5
JS
6796 /* initialize optic_state to 0xFF */
6797 phba->sli4_hba.lnk_info.optic_state = 0xff;
6798
da0436e9
JS
6799 /* Allocate device driver memory */
6800 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6801 if (rc)
6802 return -ENOMEM;
6803
2fcee4bf 6804 /* IF Type 2 ports get initialized now. */
27d6ac0a 6805 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2fcee4bf
JS
6806 LPFC_SLI_INTF_IF_TYPE_2) {
6807 rc = lpfc_pci_function_reset(phba);
895427bd
JS
6808 if (unlikely(rc)) {
6809 rc = -ENODEV;
6810 goto out_free_mem;
6811 }
946727dc 6812 phba->temp_sensor_support = 1;
2fcee4bf
JS
6813 }
6814
da0436e9
JS
6815 /* Create the bootstrap mailbox command */
6816 rc = lpfc_create_bootstrap_mbox(phba);
6817 if (unlikely(rc))
6818 goto out_free_mem;
6819
6820 /* Set up the host's endian order with the device. */
6821 rc = lpfc_setup_endian_order(phba);
6822 if (unlikely(rc))
6823 goto out_free_bsmbx;
6824
6825 /* Set up the hba's configuration parameters. */
6826 rc = lpfc_sli4_read_config(phba);
cff261f6
JS
6827 if (unlikely(rc))
6828 goto out_free_bsmbx;
6829 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
da0436e9
JS
6830 if (unlikely(rc))
6831 goto out_free_bsmbx;
6832
2fcee4bf
JS
6833 /* IF Type 0 ports get initialized now. */
6834 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6835 LPFC_SLI_INTF_IF_TYPE_0) {
6836 rc = lpfc_pci_function_reset(phba);
6837 if (unlikely(rc))
6838 goto out_free_bsmbx;
6839 }
da0436e9 6840
cb5172ea
JS
6841 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6842 GFP_KERNEL);
6843 if (!mboxq) {
6844 rc = -ENOMEM;
6845 goto out_free_bsmbx;
6846 }
6847
f358dd0c 6848 /* Check for NVMET being configured */
895427bd 6849 phba->nvmet_support = 0;
f358dd0c
JS
6850 if (lpfc_enable_nvmet_cnt) {
6851
6852 /* First get WWN of HBA instance */
6853 lpfc_read_nv(phba, mboxq);
6854 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6855 if (rc != MBX_SUCCESS) {
372c187b
DK
6856 lpfc_printf_log(phba, KERN_ERR,
6857 LOG_TRACE_EVENT,
f358dd0c
JS
6858 "6016 Mailbox failed , mbxCmd x%x "
6859 "READ_NV, mbxStatus x%x\n",
6860 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6861 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
d1f525aa 6862 mempool_free(mboxq, phba->mbox_mem_pool);
f358dd0c
JS
6863 rc = -EIO;
6864 goto out_free_bsmbx;
6865 }
6866 mb = &mboxq->u.mb;
6867 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6868 sizeof(uint64_t));
6869 wwn = cpu_to_be64(wwn);
6870 phba->sli4_hba.wwnn.u.name = wwn;
6871 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6872 sizeof(uint64_t));
6873 /* wwn is WWPN of HBA instance */
6874 wwn = cpu_to_be64(wwn);
6875 phba->sli4_hba.wwpn.u.name = wwn;
6876
6877 /* Check to see if it matches any module parameter */
6878 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6879 if (wwn == lpfc_enable_nvmet[i]) {
7d708033 6880#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3c603be9
JS
6881 if (lpfc_nvmet_mem_alloc(phba))
6882 break;
6883
6884 phba->nvmet_support = 1; /* a match */
6885
372c187b
DK
6886 lpfc_printf_log(phba, KERN_ERR,
6887 LOG_TRACE_EVENT,
f358dd0c
JS
6888 "6017 NVME Target %016llx\n",
6889 wwn);
7d708033 6890#else
372c187b
DK
6891 lpfc_printf_log(phba, KERN_ERR,
6892 LOG_TRACE_EVENT,
7d708033
JS
6893 "6021 Can't enable NVME Target."
6894 " NVME_TARGET_FC infrastructure"
6895 " is not in kernel\n");
6896#endif
c490850a
JS
6897 /* Not supported for NVMET */
6898 phba->cfg_xri_rebalancing = 0;
3048e3e8
DK
6899 if (phba->irq_chann_mode == NHT_MODE) {
6900 phba->cfg_irq_chann =
6901 phba->sli4_hba.num_present_cpu;
6902 phba->cfg_hdw_queue =
6903 phba->sli4_hba.num_present_cpu;
6904 phba->irq_chann_mode = NORMAL_MODE;
6905 }
3c603be9 6906 break;
f358dd0c
JS
6907 }
6908 }
6909 }
895427bd
JS
6910
6911 lpfc_nvme_mod_param_dep(phba);
6912
fedd3b7b
JS
6913 /*
6914 * Get sli4 parameters that override parameters from Port capabilities.
6d368e53
JS
6915 * If this call fails, it isn't critical unless the SLI4 parameters come
6916 * back in conflict.
fedd3b7b 6917 */
6d368e53
JS
6918 rc = lpfc_get_sli4_parameters(phba, mboxq);
6919 if (rc) {
b92dc72d
JS
6920 if_type = bf_get(lpfc_sli_intf_if_type,
6921 &phba->sli4_hba.sli_intf);
6922 if_fam = bf_get(lpfc_sli_intf_sli_family,
6923 &phba->sli4_hba.sli_intf);
6d368e53
JS
6924 if (phba->sli4_hba.extents_in_use &&
6925 phba->sli4_hba.rpi_hdrs_in_use) {
372c187b
DK
6926 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6927 "2999 Unsupported SLI4 Parameters "
6928 "Extents and RPI headers enabled.\n");
b92dc72d
JS
6929 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6930 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6931 mempool_free(mboxq, phba->mbox_mem_pool);
6932 rc = -EIO;
6933 goto out_free_bsmbx;
6934 }
6935 }
6936 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6937 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6938 mempool_free(mboxq, phba->mbox_mem_pool);
6939 rc = -EIO;
6940 goto out_free_bsmbx;
6d368e53
JS
6941 }
6942 }
895427bd 6943
d79c9e9d
JS
6944 /*
6945 * 1 for cmd, 1 for rsp, NVME adds an extra one
6946 * for boundary conditions in its max_sgl_segment template.
6947 */
6948 extra = 2;
6949 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6950 extra++;
6951
6952 /*
6953 * It doesn't matter what family our adapter is in, we are
6954 * limited to 2 Pages, 512 SGEs, for our SGL.
6955 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6956 */
6957 max_buf_size = (2 * SLI4_PAGE_SIZE);
6958
6959 /*
6960 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6961 * used to create the sg_dma_buf_pool must be calculated.
6962 */
6963 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6964 /* Both cfg_enable_bg and cfg_external_dif code paths */
6965
6966 /*
6967 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6968 * the FCP rsp, and a SGE. Sice we have no control
6969 * over how many protection segments the SCSI Layer
6970 * will hand us (ie: there could be one for every block
6971 * in the IO), just allocate enough SGEs to accomidate
6972 * our max amount and we need to limit lpfc_sg_seg_cnt
6973 * to minimize the risk of running out.
6974 */
6975 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6976 sizeof(struct fcp_rsp) + max_buf_size;
6977
6978 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6979 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6980
6981 /*
6982 * If supporting DIF, reduce the seg count for scsi to
6983 * allow room for the DIF sges.
6984 */
6985 if (phba->cfg_enable_bg &&
6986 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6987 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6988 else
6989 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6990
6991 } else {
6992 /*
6993 * The scsi_buf for a regular I/O holds the FCP cmnd,
6994 * the FCP rsp, a SGE for each, and a SGE for up to
6995 * cfg_sg_seg_cnt data segments.
6996 */
6997 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6998 sizeof(struct fcp_rsp) +
6999 ((phba->cfg_sg_seg_cnt + extra) *
7000 sizeof(struct sli4_sge));
7001
7002 /* Total SGEs for scsi_sg_list */
7003 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
7004 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
7005
7006 /*
7007 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
7008 * need to post 1 page for the SGL.
7009 */
7010 }
7011
7012 if (phba->cfg_xpsgl && !phba->nvmet_support)
7013 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
7014 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
7015 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
7016 else
7017 phba->cfg_sg_dma_buf_size =
7018 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
7019
7020 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
7021 sizeof(struct sli4_sge);
7022
7023 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
7024 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7025 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
7026 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
7027 "6300 Reducing NVME sg segment "
7028 "cnt to %d\n",
7029 LPFC_MAX_NVME_SEG_CNT);
7030 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
7031 } else
7032 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
7033 }
7034
d79c9e9d
JS
7035 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7036 "9087 sg_seg_cnt:%d dmabuf_size:%d "
7037 "total:%d scsi:%d nvme:%d\n",
7038 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7039 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
7040 phba->cfg_nvme_seg_cnt);
7041
7042 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
7043 i = phba->cfg_sg_dma_buf_size;
7044 else
7045 i = SLI4_PAGE_SIZE;
7046
7047 phba->lpfc_sg_dma_buf_pool =
7048 dma_pool_create("lpfc_sg_dma_buf_pool",
7049 &phba->pcidev->dev,
7050 phba->cfg_sg_dma_buf_size,
7051 i, 0);
7052 if (!phba->lpfc_sg_dma_buf_pool)
7053 goto out_free_bsmbx;
7054
7055 phba->lpfc_cmd_rsp_buf_pool =
7056 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7057 &phba->pcidev->dev,
7058 sizeof(struct fcp_cmnd) +
7059 sizeof(struct fcp_rsp),
7060 i, 0);
7061 if (!phba->lpfc_cmd_rsp_buf_pool)
7062 goto out_free_sg_dma_buf;
7063
cb5172ea 7064 mempool_free(mboxq, phba->mbox_mem_pool);
1ba981fd
JS
7065
7066 /* Verify OAS is supported */
7067 lpfc_sli4_oas_verify(phba);
1ba981fd 7068
d2cc9bcd
JS
7069 /* Verify RAS support on adapter */
7070 lpfc_sli4_ras_init(phba);
7071
5350d872
JS
7072 /* Verify all the SLI4 queues */
7073 rc = lpfc_sli4_queue_verify(phba);
da0436e9 7074 if (rc)
d79c9e9d 7075 goto out_free_cmd_rsp_buf;
da0436e9
JS
7076
7077 /* Create driver internal CQE event pool */
7078 rc = lpfc_sli4_cq_event_pool_create(phba);
7079 if (rc)
d79c9e9d 7080 goto out_free_cmd_rsp_buf;
da0436e9 7081
8a9d2e80
JS
7082 /* Initialize sgl lists per host */
7083 lpfc_init_sgl_list(phba);
7084
7085 /* Allocate and initialize active sgl array */
da0436e9
JS
7086 rc = lpfc_init_active_sgl_array(phba);
7087 if (rc) {
372c187b 7088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9 7089 "1430 Failed to initialize sgl list.\n");
8a9d2e80 7090 goto out_destroy_cq_event_pool;
da0436e9 7091 }
da0436e9
JS
7092 rc = lpfc_sli4_init_rpi_hdrs(phba);
7093 if (rc) {
372c187b 7094 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
7095 "1432 Failed to initialize rpi headers.\n");
7096 goto out_free_active_sgl;
7097 }
7098
a93ff37a 7099 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
0c9ab6f5 7100 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6396bb22 7101 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
0c9ab6f5
JS
7102 GFP_KERNEL);
7103 if (!phba->fcf.fcf_rr_bmask) {
372c187b 7104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0c9ab6f5
JS
7105 "2759 Failed allocate memory for FCF round "
7106 "robin failover bmask\n");
0558056c 7107 rc = -ENOMEM;
0c9ab6f5
JS
7108 goto out_remove_rpi_hdrs;
7109 }
7110
6a828b0f 7111 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
cdb42bec
JS
7112 sizeof(struct lpfc_hba_eq_hdl),
7113 GFP_KERNEL);
895427bd 7114 if (!phba->sli4_hba.hba_eq_hdl) {
372c187b 7115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
67d12733
JS
7116 "2572 Failed allocate memory for "
7117 "fast-path per-EQ handle array\n");
7118 rc = -ENOMEM;
7119 goto out_free_fcf_rr_bmask;
da0436e9
JS
7120 }
7121
222e9239 7122 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
895427bd
JS
7123 sizeof(struct lpfc_vector_map_info),
7124 GFP_KERNEL);
7bb03bbf 7125 if (!phba->sli4_hba.cpu_map) {
372c187b 7126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7bb03bbf
JS
7127 "3327 Failed allocate memory for msi-x "
7128 "interrupt vector mapping\n");
7129 rc = -ENOMEM;
895427bd 7130 goto out_free_hba_eq_hdl;
7bb03bbf 7131 }
b246de17 7132
32517fc0
JS
7133 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
7134 if (!phba->sli4_hba.eq_info) {
372c187b 7135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
32517fc0
JS
7136 "3321 Failed allocation for per_cpu stats\n");
7137 rc = -ENOMEM;
7138 goto out_free_hba_cpu_map;
7139 }
840eda96 7140
317aeb83
DK
7141 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
7142 sizeof(*phba->sli4_hba.idle_stat),
7143 GFP_KERNEL);
7144 if (!phba->sli4_hba.idle_stat) {
372c187b 7145 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
317aeb83
DK
7146 "3390 Failed allocation for idle_stat\n");
7147 rc = -ENOMEM;
7148 goto out_free_hba_eq_info;
7149 }
7150
840eda96
JS
7151#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7152 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
7153 if (!phba->sli4_hba.c_stat) {
372c187b 7154 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
840eda96
JS
7155 "3332 Failed allocating per cpu hdwq stats\n");
7156 rc = -ENOMEM;
317aeb83 7157 goto out_free_hba_idle_stat;
840eda96
JS
7158 }
7159#endif
7160
912e3acd
JS
7161 /*
7162 * Enable sr-iov virtual functions if supported and configured
7163 * through the module parameter.
7164 */
7165 if (phba->cfg_sriov_nr_virtfn > 0) {
7166 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7167 phba->cfg_sriov_nr_virtfn);
7168 if (rc) {
7169 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7170 "3020 Requested number of SR-IOV "
7171 "virtual functions (%d) is not "
7172 "supported\n",
7173 phba->cfg_sriov_nr_virtfn);
7174 phba->cfg_sriov_nr_virtfn = 0;
7175 }
7176 }
7177
5248a749 7178 return 0;
da0436e9 7179
840eda96 7180#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
317aeb83
DK
7181out_free_hba_idle_stat:
7182 kfree(phba->sli4_hba.idle_stat);
7183#endif
840eda96
JS
7184out_free_hba_eq_info:
7185 free_percpu(phba->sli4_hba.eq_info);
32517fc0
JS
7186out_free_hba_cpu_map:
7187 kfree(phba->sli4_hba.cpu_map);
895427bd
JS
7188out_free_hba_eq_hdl:
7189 kfree(phba->sli4_hba.hba_eq_hdl);
0c9ab6f5
JS
7190out_free_fcf_rr_bmask:
7191 kfree(phba->fcf.fcf_rr_bmask);
da0436e9
JS
7192out_remove_rpi_hdrs:
7193 lpfc_sli4_remove_rpi_hdrs(phba);
7194out_free_active_sgl:
7195 lpfc_free_active_sgl(phba);
da0436e9
JS
7196out_destroy_cq_event_pool:
7197 lpfc_sli4_cq_event_pool_destroy(phba);
d79c9e9d
JS
7198out_free_cmd_rsp_buf:
7199 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
7200 phba->lpfc_cmd_rsp_buf_pool = NULL;
7201out_free_sg_dma_buf:
7202 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7203 phba->lpfc_sg_dma_buf_pool = NULL;
da0436e9
JS
7204out_free_bsmbx:
7205 lpfc_destroy_bootstrap_mbox(phba);
7206out_free_mem:
7207 lpfc_mem_free(phba);
7208 return rc;
7209}
7210
7211/**
7212 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
7213 * @phba: pointer to lpfc hba data structure.
7214 *
7215 * This routine is invoked to unset the driver internal resources set up
7216 * specific for supporting the SLI-4 HBA device it attached to.
7217 **/
7218static void
7219lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
7220{
7221 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7222
32517fc0 7223 free_percpu(phba->sli4_hba.eq_info);
840eda96
JS
7224#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7225 free_percpu(phba->sli4_hba.c_stat);
7226#endif
317aeb83 7227 kfree(phba->sli4_hba.idle_stat);
32517fc0 7228
7bb03bbf
JS
7229 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
7230 kfree(phba->sli4_hba.cpu_map);
222e9239 7231 phba->sli4_hba.num_possible_cpu = 0;
7bb03bbf 7232 phba->sli4_hba.num_present_cpu = 0;
76fd07a6 7233 phba->sli4_hba.curr_disp_cpu = 0;
3048e3e8 7234 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
7bb03bbf 7235
da0436e9 7236 /* Free memory allocated for fast-path work queue handles */
895427bd 7237 kfree(phba->sli4_hba.hba_eq_hdl);
da0436e9
JS
7238
7239 /* Free the allocated rpi headers. */
7240 lpfc_sli4_remove_rpi_hdrs(phba);
d11e31dd 7241 lpfc_sli4_remove_rpis(phba);
da0436e9 7242
0c9ab6f5
JS
7243 /* Free eligible FCF index bmask */
7244 kfree(phba->fcf.fcf_rr_bmask);
7245
da0436e9
JS
7246 /* Free the ELS sgl list */
7247 lpfc_free_active_sgl(phba);
8a9d2e80 7248 lpfc_free_els_sgl_list(phba);
f358dd0c 7249 lpfc_free_nvmet_sgl_list(phba);
da0436e9 7250
da0436e9
JS
7251 /* Free the completion queue EQ event pool */
7252 lpfc_sli4_cq_event_release_all(phba);
7253 lpfc_sli4_cq_event_pool_destroy(phba);
7254
6d368e53
JS
7255 /* Release resource identifiers. */
7256 lpfc_sli4_dealloc_resource_identifiers(phba);
7257
da0436e9
JS
7258 /* Free the bsmbx region. */
7259 lpfc_destroy_bootstrap_mbox(phba);
7260
7261 /* Free the SLI Layer memory with SLI4 HBAs */
7262 lpfc_mem_free_all(phba);
7263
7264 /* Free the current connect table */
7265 list_for_each_entry_safe(conn_entry, next_conn_entry,
4d9ab994
JS
7266 &phba->fcf_conn_rec_list, list) {
7267 list_del_init(&conn_entry->list);
da0436e9 7268 kfree(conn_entry);
4d9ab994 7269 }
da0436e9
JS
7270
7271 return;
7272}
7273
7274/**
25985edc 7275 * lpfc_init_api_table_setup - Set up init api function jump table
da0436e9
JS
7276 * @phba: The hba struct for which this call is being executed.
7277 * @dev_grp: The HBA PCI-Device group number.
7278 *
7279 * This routine sets up the device INIT interface API function jump table
7280 * in @phba struct.
7281 *
7282 * Returns: 0 - success, -ENODEV - failure.
7283 **/
7284int
7285lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7286{
84d1b006
JS
7287 phba->lpfc_hba_init_link = lpfc_hba_init_link;
7288 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7f86059a 7289 phba->lpfc_selective_reset = lpfc_selective_reset;
da0436e9
JS
7290 switch (dev_grp) {
7291 case LPFC_PCI_DEV_LP:
7292 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
7293 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
7294 phba->lpfc_stop_port = lpfc_stop_port_s3;
7295 break;
7296 case LPFC_PCI_DEV_OC:
7297 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
7298 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
7299 phba->lpfc_stop_port = lpfc_stop_port_s4;
7300 break;
7301 default:
372c187b 7302 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
7303 "1431 Invalid HBA PCI-device group: 0x%x\n",
7304 dev_grp);
7305 return -ENODEV;
da0436e9
JS
7306 }
7307 return 0;
7308}
7309
da0436e9
JS
7310/**
7311 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
7312 * @phba: pointer to lpfc hba data structure.
7313 *
7314 * This routine is invoked to set up the driver internal resources after the
7315 * device specific resource setup to support the HBA device it attached to.
7316 *
7317 * Return codes
af901ca1 7318 * 0 - successful
da0436e9
JS
7319 * other values - error
7320 **/
7321static int
7322lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7323{
7324 int error;
7325
7326 /* Startup the kernel thread for this host adapter. */
7327 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7328 "lpfc_worker_%d", phba->brd_no);
7329 if (IS_ERR(phba->worker_thread)) {
7330 error = PTR_ERR(phba->worker_thread);
7331 return error;
3772a991
JS
7332 }
7333
7334 return 0;
7335}
7336
7337/**
7338 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
7339 * @phba: pointer to lpfc hba data structure.
7340 *
7341 * This routine is invoked to unset the driver internal resources set up after
7342 * the device specific resource setup for supporting the HBA device it
7343 * attached to.
7344 **/
7345static void
7346lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7347{
f485c18d
DK
7348 if (phba->wq) {
7349 flush_workqueue(phba->wq);
7350 destroy_workqueue(phba->wq);
7351 phba->wq = NULL;
7352 }
7353
3772a991 7354 /* Stop kernel worker thread */
0cdb84ec
JS
7355 if (phba->worker_thread)
7356 kthread_stop(phba->worker_thread);
3772a991
JS
7357}
7358
7359/**
7360 * lpfc_free_iocb_list - Free iocb list.
7361 * @phba: pointer to lpfc hba data structure.
7362 *
7363 * This routine is invoked to free the driver's IOCB list and memory.
7364 **/
6c621a22 7365void
3772a991
JS
7366lpfc_free_iocb_list(struct lpfc_hba *phba)
7367{
7368 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7369
7370 spin_lock_irq(&phba->hbalock);
7371 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7372 &phba->lpfc_iocb_list, list) {
7373 list_del(&iocbq_entry->list);
7374 kfree(iocbq_entry);
7375 phba->total_iocbq_bufs--;
98c9ea5c 7376 }
3772a991
JS
7377 spin_unlock_irq(&phba->hbalock);
7378
7379 return;
7380}
7381
7382/**
7383 * lpfc_init_iocb_list - Allocate and initialize iocb list.
7384 * @phba: pointer to lpfc hba data structure.
fe614acd 7385 * @iocb_count: number of requested iocbs
3772a991
JS
7386 *
7387 * This routine is invoked to allocate and initizlize the driver's IOCB
7388 * list and set up the IOCB tag array accordingly.
7389 *
7390 * Return codes
af901ca1 7391 * 0 - successful
3772a991
JS
7392 * other values - error
7393 **/
6c621a22 7394int
3772a991
JS
7395lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7396{
7397 struct lpfc_iocbq *iocbq_entry = NULL;
7398 uint16_t iotag;
7399 int i;
dea3101e 7400
7401 /* Initialize and populate the iocb list per host. */
7402 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3772a991 7403 for (i = 0; i < iocb_count; i++) {
dd00cc48 7404 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
dea3101e 7405 if (iocbq_entry == NULL) {
7406 printk(KERN_ERR "%s: only allocated %d iocbs of "
7407 "expected %d count. Unloading driver.\n",
a5f7337f 7408 __func__, i, iocb_count);
dea3101e 7409 goto out_free_iocbq;
7410 }
7411
604a3e30
JB
7412 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7413 if (iotag == 0) {
3772a991 7414 kfree(iocbq_entry);
604a3e30 7415 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3772a991 7416 "Unloading driver.\n", __func__);
604a3e30
JB
7417 goto out_free_iocbq;
7418 }
6d368e53 7419 iocbq_entry->sli4_lxritag = NO_XRI;
3772a991 7420 iocbq_entry->sli4_xritag = NO_XRI;
2e0fef85
JS
7421
7422 spin_lock_irq(&phba->hbalock);
dea3101e 7423 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7424 phba->total_iocbq_bufs++;
2e0fef85 7425 spin_unlock_irq(&phba->hbalock);
dea3101e 7426 }
7427
3772a991 7428 return 0;
dea3101e 7429
3772a991
JS
7430out_free_iocbq:
7431 lpfc_free_iocb_list(phba);
dea3101e 7432
3772a991
JS
7433 return -ENOMEM;
7434}
5e9d9b82 7435
3772a991 7436/**
8a9d2e80 7437 * lpfc_free_sgl_list - Free a given sgl list.
da0436e9 7438 * @phba: pointer to lpfc hba data structure.
8a9d2e80 7439 * @sglq_list: pointer to the head of sgl list.
3772a991 7440 *
8a9d2e80 7441 * This routine is invoked to free a give sgl list and memory.
3772a991 7442 **/
8a9d2e80
JS
7443void
7444lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
3772a991 7445{
da0436e9 7446 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8a9d2e80
JS
7447
7448 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7449 list_del(&sglq_entry->list);
7450 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7451 kfree(sglq_entry);
7452 }
7453}
7454
7455/**
7456 * lpfc_free_els_sgl_list - Free els sgl list.
7457 * @phba: pointer to lpfc hba data structure.
7458 *
7459 * This routine is invoked to free the driver's els sgl list and memory.
7460 **/
7461static void
7462lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7463{
da0436e9 7464 LIST_HEAD(sglq_list);
dea3101e 7465
8a9d2e80 7466 /* Retrieve all els sgls from driver list */
a789241e 7467 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
895427bd 7468 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
a789241e 7469 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
dea3101e 7470
8a9d2e80
JS
7471 /* Now free the sgl list */
7472 lpfc_free_sgl_list(phba, &sglq_list);
da0436e9 7473}
92d7f7b0 7474
f358dd0c
JS
7475/**
7476 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
7477 * @phba: pointer to lpfc hba data structure.
7478 *
7479 * This routine is invoked to free the driver's nvmet sgl list and memory.
7480 **/
7481static void
7482lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7483{
7484 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7485 LIST_HEAD(sglq_list);
7486
7487 /* Retrieve all nvmet sgls from driver list */
7488 spin_lock_irq(&phba->hbalock);
7489 spin_lock(&phba->sli4_hba.sgl_list_lock);
7490 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7491 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7492 spin_unlock_irq(&phba->hbalock);
7493
7494 /* Now free the sgl list */
7495 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7496 list_del(&sglq_entry->list);
7497 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7498 kfree(sglq_entry);
7499 }
4b40d02b
DK
7500
7501 /* Update the nvmet_xri_cnt to reflect no current sgls.
7502 * The next initialization cycle sets the count and allocates
7503 * the sgls over again.
7504 */
7505 phba->sli4_hba.nvmet_xri_cnt = 0;
f358dd0c
JS
7506}
7507
da0436e9
JS
7508/**
7509 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
7510 * @phba: pointer to lpfc hba data structure.
7511 *
7512 * This routine is invoked to allocate the driver's active sgl memory.
7513 * This array will hold the sglq_entry's for active IOs.
7514 **/
7515static int
7516lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7517{
7518 int size;
7519 size = sizeof(struct lpfc_sglq *);
7520 size *= phba->sli4_hba.max_cfg_param.max_xri;
7521
7522 phba->sli4_hba.lpfc_sglq_active_list =
7523 kzalloc(size, GFP_KERNEL);
7524 if (!phba->sli4_hba.lpfc_sglq_active_list)
7525 return -ENOMEM;
7526 return 0;
3772a991
JS
7527}
7528
7529/**
da0436e9 7530 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3772a991
JS
7531 * @phba: pointer to lpfc hba data structure.
7532 *
da0436e9
JS
7533 * This routine is invoked to walk through the array of active sglq entries
7534 * and free all of the resources.
7535 * This is just a place holder for now.
3772a991
JS
7536 **/
7537static void
da0436e9 7538lpfc_free_active_sgl(struct lpfc_hba *phba)
3772a991 7539{
da0436e9 7540 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3772a991
JS
7541}
7542
7543/**
da0436e9 7544 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3772a991
JS
7545 * @phba: pointer to lpfc hba data structure.
7546 *
da0436e9
JS
7547 * This routine is invoked to allocate and initizlize the driver's sgl
7548 * list and set up the sgl xritag tag array accordingly.
3772a991 7549 *
3772a991 7550 **/
8a9d2e80 7551static void
da0436e9 7552lpfc_init_sgl_list(struct lpfc_hba *phba)
3772a991 7553{
da0436e9 7554 /* Initialize and populate the sglq list per host/VF. */
895427bd 7555 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
da0436e9 7556 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
f358dd0c 7557 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
86c67379 7558 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
da0436e9 7559
8a9d2e80
JS
7560 /* els xri-sgl book keeping */
7561 phba->sli4_hba.els_xri_cnt = 0;
0ff10d46 7562
895427bd 7563 /* nvme xri-buffer book keeping */
5e5b511d 7564 phba->sli4_hba.io_xri_cnt = 0;
da0436e9
JS
7565}
7566
7567/**
7568 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7569 * @phba: pointer to lpfc hba data structure.
7570 *
7571 * This routine is invoked to post rpi header templates to the
88a2cfbb 7572 * port for those SLI4 ports that do not support extents. This routine
da0436e9 7573 * posts a PAGE_SIZE memory region to the port to hold up to
88a2cfbb
JS
7574 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
7575 * and should be called only when interrupts are disabled.
da0436e9
JS
7576 *
7577 * Return codes
af901ca1 7578 * 0 - successful
88a2cfbb 7579 * -ERROR - otherwise.
da0436e9
JS
7580 **/
7581int
7582lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7583{
7584 int rc = 0;
da0436e9
JS
7585 struct lpfc_rpi_hdr *rpi_hdr;
7586
7587 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
ff78d8f9 7588 if (!phba->sli4_hba.rpi_hdrs_in_use)
6d368e53 7589 return rc;
6d368e53
JS
7590 if (phba->sli4_hba.extents_in_use)
7591 return -EIO;
da0436e9
JS
7592
7593 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7594 if (!rpi_hdr) {
372c187b 7595 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
7596 "0391 Error during rpi post operation\n");
7597 lpfc_sli4_remove_rpis(phba);
7598 rc = -ENODEV;
7599 }
7600
7601 return rc;
7602}
7603
7604/**
7605 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
7606 * @phba: pointer to lpfc hba data structure.
7607 *
7608 * This routine is invoked to allocate a single 4KB memory region to
7609 * support rpis and stores them in the phba. This single region
7610 * provides support for up to 64 rpis. The region is used globally
7611 * by the device.
7612 *
7613 * Returns:
7614 * A valid rpi hdr on success.
7615 * A NULL pointer on any failure.
7616 **/
7617struct lpfc_rpi_hdr *
7618lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7619{
7620 uint16_t rpi_limit, curr_rpi_range;
7621 struct lpfc_dmabuf *dmabuf;
7622 struct lpfc_rpi_hdr *rpi_hdr;
7623
6d368e53
JS
7624 /*
7625 * If the SLI4 port supports extents, posting the rpi header isn't
7626 * required. Set the expected maximum count and let the actual value
7627 * get set when extents are fully allocated.
7628 */
7629 if (!phba->sli4_hba.rpi_hdrs_in_use)
7630 return NULL;
7631 if (phba->sli4_hba.extents_in_use)
7632 return NULL;
7633
7634 /* The limit on the logical index is just the max_rpi count. */
845d9e8d 7635 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
da0436e9
JS
7636
7637 spin_lock_irq(&phba->hbalock);
6d368e53
JS
7638 /*
7639 * Establish the starting RPI in this header block. The starting
7640 * rpi is normalized to a zero base because the physical rpi is
7641 * port based.
7642 */
97f2ecf1 7643 curr_rpi_range = phba->sli4_hba.next_rpi;
da0436e9
JS
7644 spin_unlock_irq(&phba->hbalock);
7645
845d9e8d
JS
7646 /* Reached full RPI range */
7647 if (curr_rpi_range == rpi_limit)
6d368e53 7648 return NULL;
845d9e8d 7649
da0436e9
JS
7650 /*
7651 * First allocate the protocol header region for the port. The
7652 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
7653 */
7654 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7655 if (!dmabuf)
7656 return NULL;
7657
750afb08
LC
7658 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7659 LPFC_HDR_TEMPLATE_SIZE,
7660 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
7661 if (!dmabuf->virt) {
7662 rpi_hdr = NULL;
7663 goto err_free_dmabuf;
7664 }
7665
da0436e9
JS
7666 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7667 rpi_hdr = NULL;
7668 goto err_free_coherent;
7669 }
7670
7671 /* Save the rpi header data for cleanup later. */
7672 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7673 if (!rpi_hdr)
7674 goto err_free_coherent;
7675
7676 rpi_hdr->dmabuf = dmabuf;
7677 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7678 rpi_hdr->page_count = 1;
7679 spin_lock_irq(&phba->hbalock);
6d368e53
JS
7680
7681 /* The rpi_hdr stores the logical index only. */
7682 rpi_hdr->start_rpi = curr_rpi_range;
845d9e8d 7683 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
da0436e9
JS
7684 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7685
da0436e9
JS
7686 spin_unlock_irq(&phba->hbalock);
7687 return rpi_hdr;
7688
7689 err_free_coherent:
7690 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7691 dmabuf->virt, dmabuf->phys);
7692 err_free_dmabuf:
7693 kfree(dmabuf);
7694 return NULL;
7695}
7696
7697/**
7698 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
7699 * @phba: pointer to lpfc hba data structure.
7700 *
7701 * This routine is invoked to remove all memory resources allocated
6d368e53
JS
7702 * to support rpis for SLI4 ports not supporting extents. This routine
7703 * presumes the caller has released all rpis consumed by fabric or port
7704 * logins and is prepared to have the header pages removed.
da0436e9
JS
7705 **/
7706void
7707lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7708{
7709 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7710
6d368e53
JS
7711 if (!phba->sli4_hba.rpi_hdrs_in_use)
7712 goto exit;
7713
da0436e9
JS
7714 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7715 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7716 list_del(&rpi_hdr->list);
7717 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7718 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7719 kfree(rpi_hdr->dmabuf);
7720 kfree(rpi_hdr);
7721 }
6d368e53
JS
7722 exit:
7723 /* There are no rpis available to the port now. */
7724 phba->sli4_hba.next_rpi = 0;
da0436e9
JS
7725}
7726
7727/**
7728 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
7729 * @pdev: pointer to pci device data structure.
7730 *
7731 * This routine is invoked to allocate the driver hba data structure for an
7732 * HBA device. If the allocation is successful, the phba reference to the
7733 * PCI device data structure is set.
7734 *
7735 * Return codes
af901ca1 7736 * pointer to @phba - successful
da0436e9
JS
7737 * NULL - error
7738 **/
7739static struct lpfc_hba *
7740lpfc_hba_alloc(struct pci_dev *pdev)
7741{
7742 struct lpfc_hba *phba;
7743
7744 /* Allocate memory for HBA structure */
7745 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7746 if (!phba) {
e34ccdfe 7747 dev_err(&pdev->dev, "failed to allocate hba struct\n");
da0436e9
JS
7748 return NULL;
7749 }
7750
7751 /* Set reference to PCI device in HBA structure */
7752 phba->pcidev = pdev;
7753
7754 /* Assign an unused board number */
7755 phba->brd_no = lpfc_get_instance();
7756 if (phba->brd_no < 0) {
7757 kfree(phba);
7758 return NULL;
7759 }
65791f1f 7760 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
da0436e9 7761
4fede78f 7762 spin_lock_init(&phba->ct_ev_lock);
f1c3b0fc
JS
7763 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7764
da0436e9
JS
7765 return phba;
7766}
7767
7768/**
7769 * lpfc_hba_free - Free driver hba data structure with a device.
7770 * @phba: pointer to lpfc hba data structure.
7771 *
7772 * This routine is invoked to free the driver hba data structure with an
7773 * HBA device.
7774 **/
7775static void
7776lpfc_hba_free(struct lpfc_hba *phba)
7777{
5e5b511d
JS
7778 if (phba->sli_rev == LPFC_SLI_REV4)
7779 kfree(phba->sli4_hba.hdwq);
7780
da0436e9
JS
7781 /* Release the driver assigned board number */
7782 idr_remove(&lpfc_hba_index, phba->brd_no);
7783
895427bd
JS
7784 /* Free memory allocated with sli3 rings */
7785 kfree(phba->sli.sli3_ring);
7786 phba->sli.sli3_ring = NULL;
2a76a283 7787
da0436e9
JS
7788 kfree(phba);
7789 return;
7790}
7791
7792/**
7793 * lpfc_create_shost - Create hba physical port with associated scsi host.
7794 * @phba: pointer to lpfc hba data structure.
7795 *
7796 * This routine is invoked to create HBA physical port and associate a SCSI
7797 * host with it.
7798 *
7799 * Return codes
af901ca1 7800 * 0 - successful
da0436e9
JS
7801 * other values - error
7802 **/
7803static int
7804lpfc_create_shost(struct lpfc_hba *phba)
7805{
7806 struct lpfc_vport *vport;
7807 struct Scsi_Host *shost;
7808
7809 /* Initialize HBA FC structure */
7810 phba->fc_edtov = FF_DEF_EDTOV;
7811 phba->fc_ratov = FF_DEF_RATOV;
7812 phba->fc_altov = FF_DEF_ALTOV;
7813 phba->fc_arbtov = FF_DEF_ARBTOV;
7814
d7c47992 7815 atomic_set(&phba->sdev_cnt, 0);
da0436e9
JS
7816 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7817 if (!vport)
7818 return -ENODEV;
7819
7820 shost = lpfc_shost_from_vport(vport);
7821 phba->pport = vport;
2ea259ee 7822
f358dd0c
JS
7823 if (phba->nvmet_support) {
7824 /* Only 1 vport (pport) will support NVME target */
ea85a20c
JS
7825 phba->targetport = NULL;
7826 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7827 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7828 "6076 NVME Target Found\n");
f358dd0c
JS
7829 }
7830
da0436e9
JS
7831 lpfc_debugfs_initialize(vport);
7832 /* Put reference to SCSI host to driver's device private data */
7833 pci_set_drvdata(phba->pcidev, shost);
2e0fef85 7834
4258e98e
JS
7835 /*
7836 * At this point we are fully registered with PSA. In addition,
7837 * any initial discovery should be completed.
7838 */
7839 vport->load_flag |= FC_ALLOW_FDMI;
8663cbbe
JS
7840 if (phba->cfg_enable_SmartSAN ||
7841 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
4258e98e
JS
7842
7843 /* Setup appropriate attribute masks */
7844 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8663cbbe 7845 if (phba->cfg_enable_SmartSAN)
4258e98e
JS
7846 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7847 else
7848 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7849 }
3772a991
JS
7850 return 0;
7851}
db2378e0 7852
3772a991
JS
7853/**
7854 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7855 * @phba: pointer to lpfc hba data structure.
7856 *
7857 * This routine is invoked to destroy HBA physical port and the associated
7858 * SCSI host.
7859 **/
7860static void
7861lpfc_destroy_shost(struct lpfc_hba *phba)
7862{
7863 struct lpfc_vport *vport = phba->pport;
7864
7865 /* Destroy physical port that associated with the SCSI host */
7866 destroy_port(vport);
7867
7868 return;
7869}
7870
7871/**
7872 * lpfc_setup_bg - Setup Block guard structures and debug areas.
7873 * @phba: pointer to lpfc hba data structure.
7874 * @shost: the shost to be used to detect Block guard settings.
7875 *
7876 * This routine sets up the local Block guard protocol settings for @shost.
7877 * This routine also allocates memory for debugging bg buffers.
7878 **/
7879static void
7880lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7881{
bbeb79b9
JS
7882 uint32_t old_mask;
7883 uint32_t old_guard;
7884
b3b98b74 7885 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
3772a991
JS
7886 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7887 "1478 Registering BlockGuard with the "
7888 "SCSI layer\n");
bbeb79b9 7889
b3b98b74
JS
7890 old_mask = phba->cfg_prot_mask;
7891 old_guard = phba->cfg_prot_guard;
bbeb79b9
JS
7892
7893 /* Only allow supported values */
b3b98b74 7894 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
bbeb79b9
JS
7895 SHOST_DIX_TYPE0_PROTECTION |
7896 SHOST_DIX_TYPE1_PROTECTION);
b3b98b74
JS
7897 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7898 SHOST_DIX_GUARD_CRC);
bbeb79b9
JS
7899
7900 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
b3b98b74
JS
7901 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7902 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
bbeb79b9 7903
b3b98b74
JS
7904 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7905 if ((old_mask != phba->cfg_prot_mask) ||
7906 (old_guard != phba->cfg_prot_guard))
372c187b 7907 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
bbeb79b9
JS
7908 "1475 Registering BlockGuard with the "
7909 "SCSI layer: mask %d guard %d\n",
b3b98b74
JS
7910 phba->cfg_prot_mask,
7911 phba->cfg_prot_guard);
bbeb79b9 7912
b3b98b74
JS
7913 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7914 scsi_host_set_guard(shost, phba->cfg_prot_guard);
bbeb79b9 7915 } else
372c187b 7916 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
bbeb79b9
JS
7917 "1479 Not Registering BlockGuard with the SCSI "
7918 "layer, Bad protection parameters: %d %d\n",
7919 old_mask, old_guard);
3772a991 7920 }
3772a991
JS
7921}
7922
7923/**
7924 * lpfc_post_init_setup - Perform necessary device post initialization setup.
7925 * @phba: pointer to lpfc hba data structure.
7926 *
7927 * This routine is invoked to perform all the necessary post initialization
7928 * setup for the device.
7929 **/
7930static void
7931lpfc_post_init_setup(struct lpfc_hba *phba)
7932{
7933 struct Scsi_Host *shost;
7934 struct lpfc_adapter_event_header adapter_event;
7935
7936 /* Get the default values for Model Name and Description */
7937 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7938
7939 /*
7940 * hba setup may have changed the hba_queue_depth so we need to
7941 * adjust the value of can_queue.
7942 */
7943 shost = pci_get_drvdata(phba->pcidev);
7944 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3772a991
JS
7945
7946 lpfc_host_attrib_init(shost);
7947
7948 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7949 spin_lock_irq(shost->host_lock);
7950 lpfc_poll_start_timer(phba);
7951 spin_unlock_irq(shost->host_lock);
7952 }
7953
7954 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7955 "0428 Perform SCSI scan\n");
7956 /* Send board arrival event to upper layer */
7957 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7958 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7959 fc_host_post_vendor_event(shost, fc_get_event_number(),
7960 sizeof(adapter_event),
7961 (char *) &adapter_event,
7962 LPFC_NL_VENDOR_ID);
7963 return;
7964}
7965
7966/**
7967 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7968 * @phba: pointer to lpfc hba data structure.
7969 *
7970 * This routine is invoked to set up the PCI device memory space for device
7971 * with SLI-3 interface spec.
7972 *
7973 * Return codes
af901ca1 7974 * 0 - successful
3772a991
JS
7975 * other values - error
7976 **/
7977static int
7978lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7979{
f30e1bfd 7980 struct pci_dev *pdev = phba->pcidev;
3772a991
JS
7981 unsigned long bar0map_len, bar2map_len;
7982 int i, hbq_count;
7983 void *ptr;
56de8357 7984 int error;
3772a991 7985
f30e1bfd 7986 if (!pdev)
56de8357 7987 return -ENODEV;
3772a991
JS
7988
7989 /* Set the device DMA mask size */
56de8357
HR
7990 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7991 if (error)
7992 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7993 if (error)
f30e1bfd 7994 return error;
56de8357 7995 error = -ENODEV;
3772a991
JS
7996
7997 /* Get the bus address of Bar0 and Bar2 and the number of bytes
7998 * required by each mapping.
7999 */
8000 phba->pci_bar0_map = pci_resource_start(pdev, 0);
8001 bar0map_len = pci_resource_len(pdev, 0);
8002
8003 phba->pci_bar2_map = pci_resource_start(pdev, 2);
8004 bar2map_len = pci_resource_len(pdev, 2);
8005
8006 /* Map HBA SLIM to a kernel virtual address. */
8007 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
8008 if (!phba->slim_memmap_p) {
8009 dev_printk(KERN_ERR, &pdev->dev,
8010 "ioremap failed for SLIM memory.\n");
8011 goto out;
8012 }
8013
8014 /* Map HBA Control Registers to a kernel virtual address. */
8015 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
8016 if (!phba->ctrl_regs_memmap_p) {
8017 dev_printk(KERN_ERR, &pdev->dev,
8018 "ioremap failed for HBA control registers.\n");
8019 goto out_iounmap_slim;
8020 }
8021
8022 /* Allocate memory for SLI-2 structures */
750afb08
LC
8023 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8024 &phba->slim2p.phys, GFP_KERNEL);
3772a991
JS
8025 if (!phba->slim2p.virt)
8026 goto out_iounmap;
8027
3772a991 8028 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7a470277
JS
8029 phba->mbox_ext = (phba->slim2p.virt +
8030 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
3772a991
JS
8031 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
8032 phba->IOCBs = (phba->slim2p.virt +
8033 offsetof(struct lpfc_sli2_slim, IOCBs));
8034
8035 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
8036 lpfc_sli_hbq_size(),
8037 &phba->hbqslimp.phys,
8038 GFP_KERNEL);
8039 if (!phba->hbqslimp.virt)
8040 goto out_free_slim;
8041
8042 hbq_count = lpfc_sli_hbq_count();
8043 ptr = phba->hbqslimp.virt;
8044 for (i = 0; i < hbq_count; ++i) {
8045 phba->hbqs[i].hbq_virt = ptr;
8046 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
8047 ptr += (lpfc_hbq_defs[i]->entry_count *
8048 sizeof(struct lpfc_hbq_entry));
8049 }
8050 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
8051 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
8052
8053 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
8054
3772a991
JS
8055 phba->MBslimaddr = phba->slim_memmap_p;
8056 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
8057 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
8058 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
8059 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
8060
8061 return 0;
8062
8063out_free_slim:
8064 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8065 phba->slim2p.virt, phba->slim2p.phys);
8066out_iounmap:
8067 iounmap(phba->ctrl_regs_memmap_p);
8068out_iounmap_slim:
8069 iounmap(phba->slim_memmap_p);
8070out:
8071 return error;
8072}
8073
8074/**
8075 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
8076 * @phba: pointer to lpfc hba data structure.
8077 *
8078 * This routine is invoked to unset the PCI device memory space for device
8079 * with SLI-3 interface spec.
8080 **/
8081static void
8082lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
8083{
8084 struct pci_dev *pdev;
8085
8086 /* Obtain PCI device reference */
8087 if (!phba->pcidev)
8088 return;
8089 else
8090 pdev = phba->pcidev;
8091
8092 /* Free coherent DMA memory allocated */
8093 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8094 phba->hbqslimp.virt, phba->hbqslimp.phys);
8095 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8096 phba->slim2p.virt, phba->slim2p.phys);
8097
8098 /* I/O memory unmap */
8099 iounmap(phba->ctrl_regs_memmap_p);
8100 iounmap(phba->slim_memmap_p);
8101
8102 return;
8103}
8104
8105/**
da0436e9 8106 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
3772a991
JS
8107 * @phba: pointer to lpfc hba data structure.
8108 *
da0436e9
JS
8109 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
8110 * done and check status.
3772a991 8111 *
da0436e9 8112 * Return 0 if successful, otherwise -ENODEV.
3772a991 8113 **/
da0436e9
JS
8114int
8115lpfc_sli4_post_status_check(struct lpfc_hba *phba)
3772a991 8116{
2fcee4bf
JS
8117 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
8118 struct lpfc_register reg_data;
8119 int i, port_error = 0;
8120 uint32_t if_type;
3772a991 8121
9940b97b
JS
8122 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
8123 memset(&reg_data, 0, sizeof(reg_data));
2fcee4bf 8124 if (!phba->sli4_hba.PSMPHRregaddr)
da0436e9 8125 return -ENODEV;
3772a991 8126
da0436e9
JS
8127 /* Wait up to 30 seconds for the SLI Port POST done and ready */
8128 for (i = 0; i < 3000; i++) {
9940b97b
JS
8129 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
8130 &portsmphr_reg.word0) ||
8131 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
2fcee4bf 8132 /* Port has a fatal POST error, break out */
da0436e9
JS
8133 port_error = -ENODEV;
8134 break;
8135 }
2fcee4bf
JS
8136 if (LPFC_POST_STAGE_PORT_READY ==
8137 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
da0436e9 8138 break;
da0436e9 8139 msleep(10);
3772a991
JS
8140 }
8141
2fcee4bf
JS
8142 /*
8143 * If there was a port error during POST, then don't proceed with
8144 * other register reads as the data may not be valid. Just exit.
8145 */
8146 if (port_error) {
372c187b 8147 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2fcee4bf
JS
8148 "1408 Port Failed POST - portsmphr=0x%x, "
8149 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
8150 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
8151 portsmphr_reg.word0,
8152 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
8153 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
8154 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
8155 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
8156 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
8157 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
8158 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
8159 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
8160 } else {
28baac74 8161 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2fcee4bf
JS
8162 "2534 Device Info: SLIFamily=0x%x, "
8163 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
8164 "SLIHint_2=0x%x, FT=0x%x\n",
28baac74
JS
8165 bf_get(lpfc_sli_intf_sli_family,
8166 &phba->sli4_hba.sli_intf),
8167 bf_get(lpfc_sli_intf_slirev,
8168 &phba->sli4_hba.sli_intf),
085c647c
JS
8169 bf_get(lpfc_sli_intf_if_type,
8170 &phba->sli4_hba.sli_intf),
8171 bf_get(lpfc_sli_intf_sli_hint1,
28baac74 8172 &phba->sli4_hba.sli_intf),
085c647c
JS
8173 bf_get(lpfc_sli_intf_sli_hint2,
8174 &phba->sli4_hba.sli_intf),
8175 bf_get(lpfc_sli_intf_func_type,
28baac74 8176 &phba->sli4_hba.sli_intf));
2fcee4bf
JS
8177 /*
8178 * Check for other Port errors during the initialization
8179 * process. Fail the load if the port did not come up
8180 * correctly.
8181 */
8182 if_type = bf_get(lpfc_sli_intf_if_type,
8183 &phba->sli4_hba.sli_intf);
8184 switch (if_type) {
8185 case LPFC_SLI_INTF_IF_TYPE_0:
8186 phba->sli4_hba.ue_mask_lo =
8187 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
8188 phba->sli4_hba.ue_mask_hi =
8189 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
8190 uerrlo_reg.word0 =
8191 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
8192 uerrhi_reg.word0 =
8193 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
8194 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
8195 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
372c187b
DK
8196 lpfc_printf_log(phba, KERN_ERR,
8197 LOG_TRACE_EVENT,
2fcee4bf
JS
8198 "1422 Unrecoverable Error "
8199 "Detected during POST "
8200 "uerr_lo_reg=0x%x, "
8201 "uerr_hi_reg=0x%x, "
8202 "ue_mask_lo_reg=0x%x, "
8203 "ue_mask_hi_reg=0x%x\n",
8204 uerrlo_reg.word0,
8205 uerrhi_reg.word0,
8206 phba->sli4_hba.ue_mask_lo,
8207 phba->sli4_hba.ue_mask_hi);
8208 port_error = -ENODEV;
8209 }
8210 break;
8211 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 8212 case LPFC_SLI_INTF_IF_TYPE_6:
2fcee4bf 8213 /* Final checks. The port status should be clean. */
9940b97b
JS
8214 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8215 &reg_data.word0) ||
0558056c
JS
8216 (bf_get(lpfc_sliport_status_err, &reg_data) &&
8217 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
2fcee4bf
JS
8218 phba->work_status[0] =
8219 readl(phba->sli4_hba.u.if_type2.
8220 ERR1regaddr);
8221 phba->work_status[1] =
8222 readl(phba->sli4_hba.u.if_type2.
8223 ERR2regaddr);
372c187b 8224 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8fcb8acd
JS
8225 "2888 Unrecoverable port error "
8226 "following POST: port status reg "
8227 "0x%x, port_smphr reg 0x%x, "
2fcee4bf
JS
8228 "error 1=0x%x, error 2=0x%x\n",
8229 reg_data.word0,
8230 portsmphr_reg.word0,
8231 phba->work_status[0],
8232 phba->work_status[1]);
8233 port_error = -ENODEV;
8234 }
8235 break;
8236 case LPFC_SLI_INTF_IF_TYPE_1:
8237 default:
8238 break;
8239 }
28baac74 8240 }
da0436e9
JS
8241 return port_error;
8242}
3772a991 8243
da0436e9
JS
8244/**
8245 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
8246 * @phba: pointer to lpfc hba data structure.
2fcee4bf 8247 * @if_type: The SLI4 interface type getting configured.
da0436e9
JS
8248 *
8249 * This routine is invoked to set up SLI4 BAR0 PCI config space register
8250 * memory map.
8251 **/
8252static void
2fcee4bf
JS
8253lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
8254{
8255 switch (if_type) {
8256 case LPFC_SLI_INTF_IF_TYPE_0:
8257 phba->sli4_hba.u.if_type0.UERRLOregaddr =
8258 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8259 phba->sli4_hba.u.if_type0.UERRHIregaddr =
8260 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8261 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8262 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8263 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8264 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8265 phba->sli4_hba.SLIINTFregaddr =
8266 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8267 break;
8268 case LPFC_SLI_INTF_IF_TYPE_2:
0cf07f84
JS
8269 phba->sli4_hba.u.if_type2.EQDregaddr =
8270 phba->sli4_hba.conf_regs_memmap_p +
8271 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
2fcee4bf 8272 phba->sli4_hba.u.if_type2.ERR1regaddr =
88a2cfbb
JS
8273 phba->sli4_hba.conf_regs_memmap_p +
8274 LPFC_CTL_PORT_ER1_OFFSET;
2fcee4bf 8275 phba->sli4_hba.u.if_type2.ERR2regaddr =
88a2cfbb
JS
8276 phba->sli4_hba.conf_regs_memmap_p +
8277 LPFC_CTL_PORT_ER2_OFFSET;
2fcee4bf 8278 phba->sli4_hba.u.if_type2.CTRLregaddr =
88a2cfbb
JS
8279 phba->sli4_hba.conf_regs_memmap_p +
8280 LPFC_CTL_PORT_CTL_OFFSET;
2fcee4bf 8281 phba->sli4_hba.u.if_type2.STATUSregaddr =
88a2cfbb
JS
8282 phba->sli4_hba.conf_regs_memmap_p +
8283 LPFC_CTL_PORT_STA_OFFSET;
2fcee4bf
JS
8284 phba->sli4_hba.SLIINTFregaddr =
8285 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8286 phba->sli4_hba.PSMPHRregaddr =
88a2cfbb
JS
8287 phba->sli4_hba.conf_regs_memmap_p +
8288 LPFC_CTL_PORT_SEM_OFFSET;
2fcee4bf 8289 phba->sli4_hba.RQDBregaddr =
962bc51b
JS
8290 phba->sli4_hba.conf_regs_memmap_p +
8291 LPFC_ULP0_RQ_DOORBELL;
2fcee4bf 8292 phba->sli4_hba.WQDBregaddr =
962bc51b
JS
8293 phba->sli4_hba.conf_regs_memmap_p +
8294 LPFC_ULP0_WQ_DOORBELL;
9dd35425 8295 phba->sli4_hba.CQDBregaddr =
2fcee4bf 8296 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9dd35425 8297 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
2fcee4bf
JS
8298 phba->sli4_hba.MQDBregaddr =
8299 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8300 phba->sli4_hba.BMBXregaddr =
8301 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8302 break;
27d6ac0a
JS
8303 case LPFC_SLI_INTF_IF_TYPE_6:
8304 phba->sli4_hba.u.if_type2.EQDregaddr =
8305 phba->sli4_hba.conf_regs_memmap_p +
8306 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8307 phba->sli4_hba.u.if_type2.ERR1regaddr =
8308 phba->sli4_hba.conf_regs_memmap_p +
8309 LPFC_CTL_PORT_ER1_OFFSET;
8310 phba->sli4_hba.u.if_type2.ERR2regaddr =
8311 phba->sli4_hba.conf_regs_memmap_p +
8312 LPFC_CTL_PORT_ER2_OFFSET;
8313 phba->sli4_hba.u.if_type2.CTRLregaddr =
8314 phba->sli4_hba.conf_regs_memmap_p +
8315 LPFC_CTL_PORT_CTL_OFFSET;
8316 phba->sli4_hba.u.if_type2.STATUSregaddr =
8317 phba->sli4_hba.conf_regs_memmap_p +
8318 LPFC_CTL_PORT_STA_OFFSET;
8319 phba->sli4_hba.PSMPHRregaddr =
8320 phba->sli4_hba.conf_regs_memmap_p +
8321 LPFC_CTL_PORT_SEM_OFFSET;
8322 phba->sli4_hba.BMBXregaddr =
8323 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8324 break;
2fcee4bf
JS
8325 case LPFC_SLI_INTF_IF_TYPE_1:
8326 default:
8327 dev_printk(KERN_ERR, &phba->pcidev->dev,
8328 "FATAL - unsupported SLI4 interface type - %d\n",
8329 if_type);
8330 break;
8331 }
da0436e9 8332}
3772a991 8333
da0436e9
JS
8334/**
8335 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
8336 * @phba: pointer to lpfc hba data structure.
fe614acd 8337 * @if_type: sli if type to operate on.
da0436e9 8338 *
27d6ac0a 8339 * This routine is invoked to set up SLI4 BAR1 register memory map.
da0436e9
JS
8340 **/
8341static void
27d6ac0a 8342lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
da0436e9 8343{
27d6ac0a
JS
8344 switch (if_type) {
8345 case LPFC_SLI_INTF_IF_TYPE_0:
8346 phba->sli4_hba.PSMPHRregaddr =
8347 phba->sli4_hba.ctrl_regs_memmap_p +
8348 LPFC_SLIPORT_IF0_SMPHR;
8349 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8350 LPFC_HST_ISR0;
8351 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8352 LPFC_HST_IMR0;
8353 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8354 LPFC_HST_ISCR0;
8355 break;
8356 case LPFC_SLI_INTF_IF_TYPE_6:
8357 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8358 LPFC_IF6_RQ_DOORBELL;
8359 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8360 LPFC_IF6_WQ_DOORBELL;
8361 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8362 LPFC_IF6_CQ_DOORBELL;
8363 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8364 LPFC_IF6_EQ_DOORBELL;
8365 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8366 LPFC_IF6_MQ_DOORBELL;
8367 break;
8368 case LPFC_SLI_INTF_IF_TYPE_2:
8369 case LPFC_SLI_INTF_IF_TYPE_1:
8370 default:
8371 dev_err(&phba->pcidev->dev,
8372 "FATAL - unsupported SLI4 interface type - %d\n",
8373 if_type);
8374 break;
8375 }
3772a991
JS
8376}
8377
8378/**
da0436e9 8379 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
3772a991 8380 * @phba: pointer to lpfc hba data structure.
da0436e9 8381 * @vf: virtual function number
3772a991 8382 *
da0436e9
JS
8383 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
8384 * based on the given viftual function number, @vf.
8385 *
8386 * Return 0 if successful, otherwise -ENODEV.
3772a991 8387 **/
da0436e9
JS
8388static int
8389lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
3772a991 8390{
da0436e9
JS
8391 if (vf > LPFC_VIR_FUNC_MAX)
8392 return -ENODEV;
3772a991 8393
da0436e9 8394 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
962bc51b
JS
8395 vf * LPFC_VFR_PAGE_SIZE +
8396 LPFC_ULP0_RQ_DOORBELL);
da0436e9 8397 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
962bc51b
JS
8398 vf * LPFC_VFR_PAGE_SIZE +
8399 LPFC_ULP0_WQ_DOORBELL);
9dd35425
JS
8400 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8401 vf * LPFC_VFR_PAGE_SIZE +
8402 LPFC_EQCQ_DOORBELL);
8403 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
da0436e9
JS
8404 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8405 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8406 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8407 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8408 return 0;
3772a991
JS
8409}
8410
8411/**
da0436e9 8412 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
3772a991
JS
8413 * @phba: pointer to lpfc hba data structure.
8414 *
da0436e9
JS
8415 * This routine is invoked to create the bootstrap mailbox
8416 * region consistent with the SLI-4 interface spec. This
8417 * routine allocates all memory necessary to communicate
8418 * mailbox commands to the port and sets up all alignment
8419 * needs. No locks are expected to be held when calling
8420 * this routine.
3772a991
JS
8421 *
8422 * Return codes
af901ca1 8423 * 0 - successful
d439d286 8424 * -ENOMEM - could not allocated memory.
da0436e9 8425 **/
3772a991 8426static int
da0436e9 8427lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 8428{
da0436e9
JS
8429 uint32_t bmbx_size;
8430 struct lpfc_dmabuf *dmabuf;
8431 struct dma_address *dma_address;
8432 uint32_t pa_addr;
8433 uint64_t phys_addr;
8434
8435 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8436 if (!dmabuf)
8437 return -ENOMEM;
3772a991 8438
da0436e9
JS
8439 /*
8440 * The bootstrap mailbox region is comprised of 2 parts
8441 * plus an alignment restriction of 16 bytes.
8442 */
8443 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
750afb08
LC
8444 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8445 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
8446 if (!dmabuf->virt) {
8447 kfree(dmabuf);
8448 return -ENOMEM;
3772a991
JS
8449 }
8450
da0436e9
JS
8451 /*
8452 * Initialize the bootstrap mailbox pointers now so that the register
8453 * operations are simple later. The mailbox dma address is required
8454 * to be 16-byte aligned. Also align the virtual memory as each
8455 * maibox is copied into the bmbx mailbox region before issuing the
8456 * command to the port.
8457 */
8458 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8459 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8460
8461 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8462 LPFC_ALIGN_16_BYTE);
8463 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8464 LPFC_ALIGN_16_BYTE);
8465
8466 /*
8467 * Set the high and low physical addresses now. The SLI4 alignment
8468 * requirement is 16 bytes and the mailbox is posted to the port
8469 * as two 30-bit addresses. The other data is a bit marking whether
8470 * the 30-bit address is the high or low address.
8471 * Upcast bmbx aphys to 64bits so shift instruction compiles
8472 * clean on 32 bit machines.
8473 */
8474 dma_address = &phba->sli4_hba.bmbx.dma_address;
8475 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8476 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8477 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8478 LPFC_BMBX_BIT1_ADDR_HI);
8479
8480 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8481 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8482 LPFC_BMBX_BIT1_ADDR_LO);
8483 return 0;
3772a991
JS
8484}
8485
8486/**
da0436e9 8487 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
3772a991
JS
8488 * @phba: pointer to lpfc hba data structure.
8489 *
da0436e9
JS
8490 * This routine is invoked to teardown the bootstrap mailbox
8491 * region and release all host resources. This routine requires
8492 * the caller to ensure all mailbox commands recovered, no
8493 * additional mailbox comands are sent, and interrupts are disabled
8494 * before calling this routine.
8495 *
8496 **/
3772a991 8497static void
da0436e9 8498lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 8499{
da0436e9
JS
8500 dma_free_coherent(&phba->pcidev->dev,
8501 phba->sli4_hba.bmbx.bmbx_size,
8502 phba->sli4_hba.bmbx.dmabuf->virt,
8503 phba->sli4_hba.bmbx.dmabuf->phys);
8504
8505 kfree(phba->sli4_hba.bmbx.dmabuf);
8506 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
3772a991
JS
8507}
8508
83c6cb1a
JS
8509static const char * const lpfc_topo_to_str[] = {
8510 "Loop then P2P",
8511 "Loopback",
8512 "P2P Only",
8513 "Unsupported",
8514 "Loop Only",
8515 "Unsupported",
8516 "P2P then Loop",
8517};
8518
fe614acd
LJ
8519#define LINK_FLAGS_DEF 0x0
8520#define LINK_FLAGS_P2P 0x1
8521#define LINK_FLAGS_LOOP 0x2
83c6cb1a
JS
8522/**
8523 * lpfc_map_topology - Map the topology read from READ_CONFIG
8524 * @phba: pointer to lpfc hba data structure.
fe614acd 8525 * @rd_config: pointer to read config data
83c6cb1a
JS
8526 *
8527 * This routine is invoked to map the topology values as read
8528 * from the read config mailbox command. If the persistent
8529 * topology feature is supported, the firmware will provide the
8530 * saved topology information to be used in INIT_LINK
83c6cb1a 8531 **/
83c6cb1a
JS
8532static void
8533lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8534{
8535 u8 ptv, tf, pt;
8536
8537 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8538 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8539 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8540
8541 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8542 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8543 ptv, tf, pt);
8544 if (!ptv) {
8545 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8546 "2019 FW does not support persistent topology "
8547 "Using driver parameter defined value [%s]",
8548 lpfc_topo_to_str[phba->cfg_topology]);
8549 return;
8550 }
8551 /* FW supports persistent topology - override module parameter value */
8552 phba->hba_flag |= HBA_PERSISTENT_TOPO;
8553 switch (phba->pcidev->device) {
8554 case PCI_DEVICE_ID_LANCER_G7_FC:
83c6cb1a
JS
8555 case PCI_DEVICE_ID_LANCER_G6_FC:
8556 if (!tf) {
8557 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8558 ? FLAGS_TOPOLOGY_MODE_LOOP
8559 : FLAGS_TOPOLOGY_MODE_PT_PT);
8560 } else {
8561 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8562 }
8563 break;
8564 default: /* G5 */
8565 if (tf) {
8566 /* If topology failover set - pt is '0' or '1' */
8567 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8568 FLAGS_TOPOLOGY_MODE_LOOP_PT);
8569 } else {
8570 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8571 ? FLAGS_TOPOLOGY_MODE_PT_PT
8572 : FLAGS_TOPOLOGY_MODE_LOOP);
8573 }
8574 break;
8575 }
8576 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8577 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8578 "2020 Using persistent topology value [%s]",
8579 lpfc_topo_to_str[phba->cfg_topology]);
8580 } else {
8581 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8582 "2021 Invalid topology values from FW "
8583 "Using driver parameter defined value [%s]",
8584 lpfc_topo_to_str[phba->cfg_topology]);
8585 }
8586}
8587
3772a991 8588/**
da0436e9 8589 * lpfc_sli4_read_config - Get the config parameters.
3772a991
JS
8590 * @phba: pointer to lpfc hba data structure.
8591 *
da0436e9
JS
8592 * This routine is invoked to read the configuration parameters from the HBA.
8593 * The configuration parameters are used to set the base and maximum values
8594 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
8595 * allocation for the port.
3772a991
JS
8596 *
8597 * Return codes
af901ca1 8598 * 0 - successful
25985edc 8599 * -ENOMEM - No available memory
d439d286 8600 * -EIO - The mailbox failed to complete successfully.
3772a991 8601 **/
ff78d8f9 8602int
da0436e9 8603lpfc_sli4_read_config(struct lpfc_hba *phba)
3772a991 8604{
da0436e9
JS
8605 LPFC_MBOXQ_t *pmb;
8606 struct lpfc_mbx_read_config *rd_config;
912e3acd
JS
8607 union lpfc_sli4_cfg_shdr *shdr;
8608 uint32_t shdr_status, shdr_add_status;
8609 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8610 struct lpfc_rsrc_desc_fcfcoe *desc;
8aa134a8 8611 char *pdesc_0;
c691816e 8612 uint16_t forced_link_speed;
6a828b0f 8613 uint32_t if_type, qmin;
8aa134a8 8614 int length, i, rc = 0, rc2;
3772a991 8615
da0436e9
JS
8616 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8617 if (!pmb) {
372c187b 8618 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
8619 "2011 Unable to allocate memory for issuing "
8620 "SLI_CONFIG_SPECIAL mailbox command\n");
8621 return -ENOMEM;
3772a991
JS
8622 }
8623
da0436e9 8624 lpfc_read_config(phba, pmb);
3772a991 8625
da0436e9
JS
8626 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8627 if (rc != MBX_SUCCESS) {
372c187b
DK
8628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8629 "2012 Mailbox failed , mbxCmd x%x "
8630 "READ_CONFIG, mbxStatus x%x\n",
8631 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8632 bf_get(lpfc_mqe_status, &pmb->u.mqe));
da0436e9
JS
8633 rc = -EIO;
8634 } else {
8635 rd_config = &pmb->u.mqe.un.rd_config;
ff78d8f9
JS
8636 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8637 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8638 phba->sli4_hba.lnk_info.lnk_tp =
8639 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8640 phba->sli4_hba.lnk_info.lnk_no =
8641 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8642 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8643 "3081 lnk_type:%d, lnk_numb:%d\n",
8644 phba->sli4_hba.lnk_info.lnk_tp,
8645 phba->sli4_hba.lnk_info.lnk_no);
8646 } else
8647 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8648 "3082 Mailbox (x%x) returned ldv:x0\n",
8649 bf_get(lpfc_mqe_command, &pmb->u.mqe));
44fd7fe3
JS
8650 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8651 phba->bbcredit_support = 1;
8652 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8653 }
8654
1dc5ec24
JS
8655 phba->sli4_hba.conf_trunk =
8656 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
6d368e53
JS
8657 phba->sli4_hba.extents_in_use =
8658 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
da0436e9
JS
8659 phba->sli4_hba.max_cfg_param.max_xri =
8660 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
31f06d2e
JS
8661 /* Reduce resource usage in kdump environment */
8662 if (is_kdump_kernel() &&
8663 phba->sli4_hba.max_cfg_param.max_xri > 512)
8664 phba->sli4_hba.max_cfg_param.max_xri = 512;
da0436e9
JS
8665 phba->sli4_hba.max_cfg_param.xri_base =
8666 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8667 phba->sli4_hba.max_cfg_param.max_vpi =
8668 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8b47ae69
JS
8669 /* Limit the max we support */
8670 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8671 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
da0436e9
JS
8672 phba->sli4_hba.max_cfg_param.vpi_base =
8673 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8674 phba->sli4_hba.max_cfg_param.max_rpi =
8675 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8676 phba->sli4_hba.max_cfg_param.rpi_base =
8677 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8678 phba->sli4_hba.max_cfg_param.max_vfi =
8679 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8680 phba->sli4_hba.max_cfg_param.vfi_base =
8681 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8682 phba->sli4_hba.max_cfg_param.max_fcfi =
8683 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
da0436e9
JS
8684 phba->sli4_hba.max_cfg_param.max_eq =
8685 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8686 phba->sli4_hba.max_cfg_param.max_rq =
8687 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8688 phba->sli4_hba.max_cfg_param.max_wq =
8689 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8690 phba->sli4_hba.max_cfg_param.max_cq =
8691 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8692 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8693 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8694 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8695 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5ffc266e
JS
8696 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8697 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
da0436e9 8698 phba->max_vports = phba->max_vpi;
83c6cb1a 8699 lpfc_map_topology(phba, rd_config);
da0436e9 8700 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6d368e53
JS
8701 "2003 cfg params Extents? %d "
8702 "XRI(B:%d M:%d), "
da0436e9
JS
8703 "VPI(B:%d M:%d) "
8704 "VFI(B:%d M:%d) "
8705 "RPI(B:%d M:%d) "
a1e4d3d8 8706 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
6d368e53 8707 phba->sli4_hba.extents_in_use,
da0436e9
JS
8708 phba->sli4_hba.max_cfg_param.xri_base,
8709 phba->sli4_hba.max_cfg_param.max_xri,
8710 phba->sli4_hba.max_cfg_param.vpi_base,
8711 phba->sli4_hba.max_cfg_param.max_vpi,
8712 phba->sli4_hba.max_cfg_param.vfi_base,
8713 phba->sli4_hba.max_cfg_param.max_vfi,
8714 phba->sli4_hba.max_cfg_param.rpi_base,
8715 phba->sli4_hba.max_cfg_param.max_rpi,
2ea259ee
JS
8716 phba->sli4_hba.max_cfg_param.max_fcfi,
8717 phba->sli4_hba.max_cfg_param.max_eq,
8718 phba->sli4_hba.max_cfg_param.max_cq,
8719 phba->sli4_hba.max_cfg_param.max_wq,
a1e4d3d8
DK
8720 phba->sli4_hba.max_cfg_param.max_rq,
8721 phba->lmt);
2ea259ee 8722
d38f33b3 8723 /*
6a828b0f
JS
8724 * Calculate queue resources based on how
8725 * many WQ/CQ/EQs are available.
d38f33b3 8726 */
6a828b0f
JS
8727 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8728 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8729 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8730 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8731 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8732 /*
8733 * Whats left after this can go toward NVME / FCP.
8734 * The minus 4 accounts for ELS, NVME LS, MBOX
8735 * plus one extra. When configured for
8736 * NVMET, FCP io channel WQs are not created.
8737 */
8738 qmin -= 4;
d38f33b3 8739
6a828b0f
JS
8740 /* Check to see if there is enough for NVME */
8741 if ((phba->cfg_irq_chann > qmin) ||
8742 (phba->cfg_hdw_queue > qmin)) {
372c187b 8743 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9e3e365a
DK
8744 "2005 Reducing Queues - "
8745 "FW resource limitation: "
6a828b0f
JS
8746 "WQ %d CQ %d EQ %d: min %d: "
8747 "IRQ %d HDWQ %d\n",
d38f33b3
JS
8748 phba->sli4_hba.max_cfg_param.max_wq,
8749 phba->sli4_hba.max_cfg_param.max_cq,
6a828b0f
JS
8750 phba->sli4_hba.max_cfg_param.max_eq,
8751 qmin, phba->cfg_irq_chann,
cdb42bec 8752 phba->cfg_hdw_queue);
d38f33b3 8753
6a828b0f
JS
8754 if (phba->cfg_irq_chann > qmin)
8755 phba->cfg_irq_chann = qmin;
8756 if (phba->cfg_hdw_queue > qmin)
8757 phba->cfg_hdw_queue = qmin;
d38f33b3 8758 }
3772a991 8759 }
912e3acd
JS
8760
8761 if (rc)
8762 goto read_cfg_out;
da0436e9 8763
c691816e
JS
8764 /* Update link speed if forced link speed is supported */
8765 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
27d6ac0a 8766 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
c691816e
JS
8767 forced_link_speed =
8768 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8769 if (forced_link_speed) {
8770 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8771
8772 switch (forced_link_speed) {
8773 case LINK_SPEED_1G:
8774 phba->cfg_link_speed =
8775 LPFC_USER_LINK_SPEED_1G;
8776 break;
8777 case LINK_SPEED_2G:
8778 phba->cfg_link_speed =
8779 LPFC_USER_LINK_SPEED_2G;
8780 break;
8781 case LINK_SPEED_4G:
8782 phba->cfg_link_speed =
8783 LPFC_USER_LINK_SPEED_4G;
8784 break;
8785 case LINK_SPEED_8G:
8786 phba->cfg_link_speed =
8787 LPFC_USER_LINK_SPEED_8G;
8788 break;
8789 case LINK_SPEED_10G:
8790 phba->cfg_link_speed =
8791 LPFC_USER_LINK_SPEED_10G;
8792 break;
8793 case LINK_SPEED_16G:
8794 phba->cfg_link_speed =
8795 LPFC_USER_LINK_SPEED_16G;
8796 break;
8797 case LINK_SPEED_32G:
8798 phba->cfg_link_speed =
8799 LPFC_USER_LINK_SPEED_32G;
8800 break;
fbd8a6ba
JS
8801 case LINK_SPEED_64G:
8802 phba->cfg_link_speed =
8803 LPFC_USER_LINK_SPEED_64G;
8804 break;
c691816e
JS
8805 case 0xffff:
8806 phba->cfg_link_speed =
8807 LPFC_USER_LINK_SPEED_AUTO;
8808 break;
8809 default:
372c187b
DK
8810 lpfc_printf_log(phba, KERN_ERR,
8811 LOG_TRACE_EVENT,
c691816e
JS
8812 "0047 Unrecognized link "
8813 "speed : %d\n",
8814 forced_link_speed);
8815 phba->cfg_link_speed =
8816 LPFC_USER_LINK_SPEED_AUTO;
8817 }
8818 }
8819 }
8820
da0436e9 8821 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
572709e2
JS
8822 length = phba->sli4_hba.max_cfg_param.max_xri -
8823 lpfc_sli4_get_els_iocb_cnt(phba);
8824 if (phba->cfg_hba_queue_depth > length) {
8825 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8826 "3361 HBA queue depth changed from %d to %d\n",
8827 phba->cfg_hba_queue_depth, length);
8828 phba->cfg_hba_queue_depth = length;
8829 }
912e3acd 8830
27d6ac0a 8831 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
912e3acd
JS
8832 LPFC_SLI_INTF_IF_TYPE_2)
8833 goto read_cfg_out;
8834
8835 /* get the pf# and vf# for SLI4 if_type 2 port */
8836 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8837 sizeof(struct lpfc_sli4_cfg_mhdr));
8838 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8839 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8840 length, LPFC_SLI4_MBX_EMBED);
8841
8aa134a8 8842 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
912e3acd
JS
8843 shdr = (union lpfc_sli4_cfg_shdr *)
8844 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8845 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8846 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8aa134a8 8847 if (rc2 || shdr_status || shdr_add_status) {
372c187b 8848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
912e3acd
JS
8849 "3026 Mailbox failed , mbxCmd x%x "
8850 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8851 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8852 bf_get(lpfc_mqe_status, &pmb->u.mqe));
912e3acd
JS
8853 goto read_cfg_out;
8854 }
8855
8856 /* search for fc_fcoe resrouce descriptor */
8857 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
912e3acd 8858
8aa134a8
JS
8859 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8860 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8861 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8862 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8863 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8864 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8865 goto read_cfg_out;
8866
912e3acd 8867 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8aa134a8 8868 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
912e3acd 8869 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8aa134a8 8870 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
912e3acd
JS
8871 phba->sli4_hba.iov.pf_number =
8872 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8873 phba->sli4_hba.iov.vf_number =
8874 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8875 break;
8876 }
8877 }
8878
8879 if (i < LPFC_RSRC_DESC_MAX_NUM)
8880 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8881 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8882 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8883 phba->sli4_hba.iov.vf_number);
8aa134a8 8884 else
372c187b 8885 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
912e3acd 8886 "3028 GET_FUNCTION_CONFIG: failed to find "
c4dba187 8887 "Resource Descriptor:x%x\n",
912e3acd 8888 LPFC_RSRC_DESC_TYPE_FCFCOE);
912e3acd
JS
8889
8890read_cfg_out:
8891 mempool_free(pmb, phba->mbox_mem_pool);
da0436e9 8892 return rc;
3772a991
JS
8893}
8894
8895/**
2fcee4bf 8896 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
3772a991
JS
8897 * @phba: pointer to lpfc hba data structure.
8898 *
2fcee4bf
JS
8899 * This routine is invoked to setup the port-side endian order when
8900 * the port if_type is 0. This routine has no function for other
8901 * if_types.
da0436e9
JS
8902 *
8903 * Return codes
af901ca1 8904 * 0 - successful
25985edc 8905 * -ENOMEM - No available memory
d439d286 8906 * -EIO - The mailbox failed to complete successfully.
3772a991 8907 **/
da0436e9
JS
8908static int
8909lpfc_setup_endian_order(struct lpfc_hba *phba)
3772a991 8910{
da0436e9 8911 LPFC_MBOXQ_t *mboxq;
2fcee4bf 8912 uint32_t if_type, rc = 0;
da0436e9
JS
8913 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8914 HOST_ENDIAN_HIGH_WORD1};
3772a991 8915
2fcee4bf
JS
8916 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8917 switch (if_type) {
8918 case LPFC_SLI_INTF_IF_TYPE_0:
8919 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8920 GFP_KERNEL);
8921 if (!mboxq) {
372c187b 8922 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2fcee4bf
JS
8923 "0492 Unable to allocate memory for "
8924 "issuing SLI_CONFIG_SPECIAL mailbox "
8925 "command\n");
8926 return -ENOMEM;
8927 }
3772a991 8928
2fcee4bf
JS
8929 /*
8930 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8931 * two words to contain special data values and no other data.
8932 */
8933 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8934 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8935 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8936 if (rc != MBX_SUCCESS) {
372c187b 8937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2fcee4bf
JS
8938 "0493 SLI_CONFIG_SPECIAL mailbox "
8939 "failed with status x%x\n",
8940 rc);
8941 rc = -EIO;
8942 }
8943 mempool_free(mboxq, phba->mbox_mem_pool);
8944 break;
27d6ac0a 8945 case LPFC_SLI_INTF_IF_TYPE_6:
2fcee4bf
JS
8946 case LPFC_SLI_INTF_IF_TYPE_2:
8947 case LPFC_SLI_INTF_IF_TYPE_1:
8948 default:
8949 break;
da0436e9 8950 }
da0436e9 8951 return rc;
3772a991
JS
8952}
8953
8954/**
895427bd 8955 * lpfc_sli4_queue_verify - Verify and update EQ counts
3772a991
JS
8956 * @phba: pointer to lpfc hba data structure.
8957 *
895427bd
JS
8958 * This routine is invoked to check the user settable queue counts for EQs.
8959 * After this routine is called the counts will be set to valid values that
5350d872
JS
8960 * adhere to the constraints of the system's interrupt vectors and the port's
8961 * queue resources.
da0436e9
JS
8962 *
8963 * Return codes
af901ca1 8964 * 0 - successful
25985edc 8965 * -ENOMEM - No available memory
3772a991 8966 **/
da0436e9 8967static int
5350d872 8968lpfc_sli4_queue_verify(struct lpfc_hba *phba)
3772a991 8969{
da0436e9 8970 /*
67d12733 8971 * Sanity check for configured queue parameters against the run-time
da0436e9
JS
8972 * device parameters
8973 */
3772a991 8974
bcb24f65 8975 if (phba->nvmet_support) {
97a9ed3b
JS
8976 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8977 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
982ab128
JS
8978 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8979 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
bcb24f65 8980 }
895427bd
JS
8981
8982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6a828b0f
JS
8983 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8984 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8985 phba->cfg_nvmet_mrq);
3772a991 8986
da0436e9
JS
8987 /* Get EQ depth from module parameter, fake the default for now */
8988 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8989 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
3772a991 8990
5350d872
JS
8991 /* Get CQ depth from module parameter, fake the default for now */
8992 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8993 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
895427bd
JS
8994 return 0;
8995}
8996
8997static int
c00f62e6 8998lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
895427bd
JS
8999{
9000 struct lpfc_queue *qdesc;
c00f62e6 9001 u32 wqesize;
c1a21ebc 9002 int cpu;
895427bd 9003
c00f62e6
JS
9004 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
9005 /* Create Fast Path IO CQs */
c176ffa0 9006 if (phba->enab_exp_wqcq_pages)
a51e41b6
JS
9007 /* Increase the CQ size when WQEs contain an embedded cdb */
9008 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
9009 phba->sli4_hba.cq_esize,
c1a21ebc 9010 LPFC_CQE_EXP_COUNT, cpu);
a51e41b6
JS
9011
9012 else
9013 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9014 phba->sli4_hba.cq_esize,
c1a21ebc 9015 phba->sli4_hba.cq_ecount, cpu);
895427bd 9016 if (!qdesc) {
372c187b
DK
9017 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9018 "0499 Failed allocate fast-path IO CQ (%d)\n",
9019 idx);
895427bd
JS
9020 return 1;
9021 }
7365f6fd 9022 qdesc->qe_valid = 1;
c00f62e6 9023 qdesc->hdwq = idx;
c1a21ebc 9024 qdesc->chann = cpu;
c00f62e6 9025 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
895427bd 9026
c00f62e6 9027 /* Create Fast Path IO WQs */
c176ffa0 9028 if (phba->enab_exp_wqcq_pages) {
a51e41b6 9029 /* Increase the WQ size when WQEs contain an embedded cdb */
c176ffa0
JS
9030 wqesize = (phba->fcp_embed_io) ?
9031 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
a51e41b6 9032 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
c176ffa0 9033 wqesize,
c1a21ebc 9034 LPFC_WQE_EXP_COUNT, cpu);
c176ffa0 9035 } else
a51e41b6
JS
9036 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9037 phba->sli4_hba.wq_esize,
c1a21ebc 9038 phba->sli4_hba.wq_ecount, cpu);
c176ffa0 9039
895427bd 9040 if (!qdesc) {
372c187b 9041 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
c00f62e6
JS
9042 "0503 Failed allocate fast-path IO WQ (%d)\n",
9043 idx);
895427bd
JS
9044 return 1;
9045 }
c00f62e6
JS
9046 qdesc->hdwq = idx;
9047 qdesc->chann = cpu;
9048 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
895427bd 9049 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
5350d872 9050 return 0;
5350d872
JS
9051}
9052
9053/**
9054 * lpfc_sli4_queue_create - Create all the SLI4 queues
9055 * @phba: pointer to lpfc hba data structure.
9056 *
9057 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
9058 * operation. For each SLI4 queue type, the parameters such as queue entry
9059 * count (queue depth) shall be taken from the module parameter. For now,
9060 * we just use some constant number as place holder.
9061 *
9062 * Return codes
4907cb7b 9063 * 0 - successful
5350d872
JS
9064 * -ENOMEM - No availble memory
9065 * -EIO - The mailbox failed to complete successfully.
9066 **/
9067int
9068lpfc_sli4_queue_create(struct lpfc_hba *phba)
9069{
9070 struct lpfc_queue *qdesc;
657add4e 9071 int idx, cpu, eqcpu;
5e5b511d 9072 struct lpfc_sli4_hdw_queue *qp;
657add4e
JS
9073 struct lpfc_vector_map_info *cpup;
9074 struct lpfc_vector_map_info *eqcpup;
32517fc0 9075 struct lpfc_eq_intr_info *eqi;
5350d872
JS
9076
9077 /*
67d12733 9078 * Create HBA Record arrays.
895427bd 9079 * Both NVME and FCP will share that same vectors / EQs
5350d872 9080 */
67d12733
JS
9081 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
9082 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
9083 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
9084 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
9085 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
9086 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
895427bd
JS
9087 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
9088 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
9089 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
9090 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
67d12733 9091
cdb42bec 9092 if (!phba->sli4_hba.hdwq) {
5e5b511d
JS
9093 phba->sli4_hba.hdwq = kcalloc(
9094 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
9095 GFP_KERNEL);
9096 if (!phba->sli4_hba.hdwq) {
372c187b 9097 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5e5b511d
JS
9098 "6427 Failed allocate memory for "
9099 "fast-path Hardware Queue array\n");
895427bd
JS
9100 goto out_error;
9101 }
5e5b511d
JS
9102 /* Prepare hardware queues to take IO buffers */
9103 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9104 qp = &phba->sli4_hba.hdwq[idx];
9105 spin_lock_init(&qp->io_buf_list_get_lock);
9106 spin_lock_init(&qp->io_buf_list_put_lock);
9107 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
9108 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
9109 qp->get_io_bufs = 0;
9110 qp->put_io_bufs = 0;
9111 qp->total_io_bufs = 0;
c00f62e6
JS
9112 spin_lock_init(&qp->abts_io_buf_list_lock);
9113 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
5e5b511d 9114 qp->abts_scsi_io_bufs = 0;
5e5b511d 9115 qp->abts_nvme_io_bufs = 0;
d79c9e9d
JS
9116 INIT_LIST_HEAD(&qp->sgl_list);
9117 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
9118 spin_lock_init(&qp->hdwq_lock);
895427bd 9119 }
67d12733
JS
9120 }
9121
cdb42bec 9122 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
2d7dbc4c
JS
9123 if (phba->nvmet_support) {
9124 phba->sli4_hba.nvmet_cqset = kcalloc(
9125 phba->cfg_nvmet_mrq,
9126 sizeof(struct lpfc_queue *),
9127 GFP_KERNEL);
9128 if (!phba->sli4_hba.nvmet_cqset) {
372c187b 9129 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9130 "3121 Fail allocate memory for "
9131 "fast-path CQ set array\n");
9132 goto out_error;
9133 }
9134 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
9135 phba->cfg_nvmet_mrq,
9136 sizeof(struct lpfc_queue *),
9137 GFP_KERNEL);
9138 if (!phba->sli4_hba.nvmet_mrq_hdr) {
372c187b 9139 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9140 "3122 Fail allocate memory for "
9141 "fast-path RQ set hdr array\n");
9142 goto out_error;
9143 }
9144 phba->sli4_hba.nvmet_mrq_data = kcalloc(
9145 phba->cfg_nvmet_mrq,
9146 sizeof(struct lpfc_queue *),
9147 GFP_KERNEL);
9148 if (!phba->sli4_hba.nvmet_mrq_data) {
372c187b 9149 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9150 "3124 Fail allocate memory for "
9151 "fast-path RQ set data array\n");
9152 goto out_error;
9153 }
9154 }
da0436e9 9155 }
67d12733 9156
895427bd 9157 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
67d12733 9158
895427bd 9159 /* Create HBA Event Queues (EQs) */
657add4e
JS
9160 for_each_present_cpu(cpu) {
9161 /* We only want to create 1 EQ per vector, even though
9162 * multiple CPUs might be using that vector. so only
9163 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
6a828b0f 9164 */
657add4e
JS
9165 cpup = &phba->sli4_hba.cpu_map[cpu];
9166 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
6a828b0f 9167 continue;
657add4e
JS
9168
9169 /* Get a ptr to the Hardware Queue associated with this CPU */
9170 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9171
9172 /* Allocate an EQ */
81b96eda
JS
9173 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9174 phba->sli4_hba.eq_esize,
c1a21ebc 9175 phba->sli4_hba.eq_ecount, cpu);
da0436e9 9176 if (!qdesc) {
372c187b 9177 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
657add4e
JS
9178 "0497 Failed allocate EQ (%d)\n",
9179 cpup->hdwq);
67d12733 9180 goto out_error;
da0436e9 9181 }
7365f6fd 9182 qdesc->qe_valid = 1;
657add4e 9183 qdesc->hdwq = cpup->hdwq;
3ad348d9 9184 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
32517fc0 9185 qdesc->last_cpu = qdesc->chann;
657add4e
JS
9186
9187 /* Save the allocated EQ in the Hardware Queue */
9188 qp->hba_eq = qdesc;
9189
32517fc0
JS
9190 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
9191 list_add(&qdesc->cpu_list, &eqi->list);
895427bd 9192 }
67d12733 9193
657add4e
JS
9194 /* Now we need to populate the other Hardware Queues, that share
9195 * an IRQ vector, with the associated EQ ptr.
9196 */
9197 for_each_present_cpu(cpu) {
9198 cpup = &phba->sli4_hba.cpu_map[cpu];
9199
9200 /* Check for EQ already allocated in previous loop */
9201 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
9202 continue;
9203
9204 /* Check for multiple CPUs per hdwq */
9205 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9206 if (qp->hba_eq)
9207 continue;
9208
9209 /* We need to share an EQ for this hdwq */
9210 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
9211 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
9212 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
9213 }
67d12733 9214
c00f62e6 9215 /* Allocate IO Path SLI4 CQ/WQs */
6a828b0f 9216 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
c00f62e6 9217 if (lpfc_alloc_io_wq_cq(phba, idx))
67d12733 9218 goto out_error;
6a828b0f 9219 }
da0436e9 9220
c00f62e6
JS
9221 if (phba->nvmet_support) {
9222 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9223 cpu = lpfc_find_cpu_handle(phba, idx,
9224 LPFC_FIND_BY_HDWQ);
9225 qdesc = lpfc_sli4_queue_alloc(phba,
81b96eda
JS
9226 LPFC_DEFAULT_PAGE_SIZE,
9227 phba->sli4_hba.cq_esize,
c1a21ebc
JS
9228 phba->sli4_hba.cq_ecount,
9229 cpu);
c00f62e6 9230 if (!qdesc) {
372c187b 9231 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
cdb42bec
JS
9232 "3142 Failed allocate NVME "
9233 "CQ Set (%d)\n", idx);
c00f62e6 9234 goto out_error;
2d7dbc4c 9235 }
c00f62e6
JS
9236 qdesc->qe_valid = 1;
9237 qdesc->hdwq = idx;
9238 qdesc->chann = cpu;
9239 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
2d7dbc4c
JS
9240 }
9241 }
9242
da0436e9 9243 /*
67d12733 9244 * Create Slow Path Completion Queues (CQs)
da0436e9
JS
9245 */
9246
c1a21ebc 9247 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
da0436e9 9248 /* Create slow-path Mailbox Command Complete Queue */
81b96eda
JS
9249 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9250 phba->sli4_hba.cq_esize,
c1a21ebc 9251 phba->sli4_hba.cq_ecount, cpu);
da0436e9 9252 if (!qdesc) {
372c187b 9253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9 9254 "0500 Failed allocate slow-path mailbox CQ\n");
67d12733 9255 goto out_error;
da0436e9 9256 }
7365f6fd 9257 qdesc->qe_valid = 1;
da0436e9
JS
9258 phba->sli4_hba.mbx_cq = qdesc;
9259
9260 /* Create slow-path ELS Complete Queue */
81b96eda
JS
9261 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9262 phba->sli4_hba.cq_esize,
c1a21ebc 9263 phba->sli4_hba.cq_ecount, cpu);
da0436e9 9264 if (!qdesc) {
372c187b 9265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9 9266 "0501 Failed allocate slow-path ELS CQ\n");
67d12733 9267 goto out_error;
da0436e9 9268 }
7365f6fd 9269 qdesc->qe_valid = 1;
c00f62e6 9270 qdesc->chann = cpu;
da0436e9
JS
9271 phba->sli4_hba.els_cq = qdesc;
9272
da0436e9 9273
5350d872 9274 /*
67d12733 9275 * Create Slow Path Work Queues (WQs)
5350d872 9276 */
da0436e9
JS
9277
9278 /* Create Mailbox Command Queue */
da0436e9 9279
81b96eda
JS
9280 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9281 phba->sli4_hba.mq_esize,
c1a21ebc 9282 phba->sli4_hba.mq_ecount, cpu);
da0436e9 9283 if (!qdesc) {
372c187b 9284 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9 9285 "0505 Failed allocate slow-path MQ\n");
67d12733 9286 goto out_error;
da0436e9 9287 }
c00f62e6 9288 qdesc->chann = cpu;
da0436e9
JS
9289 phba->sli4_hba.mbx_wq = qdesc;
9290
9291 /*
67d12733 9292 * Create ELS Work Queues
da0436e9 9293 */
da0436e9
JS
9294
9295 /* Create slow-path ELS Work Queue */
81b96eda
JS
9296 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9297 phba->sli4_hba.wq_esize,
c1a21ebc 9298 phba->sli4_hba.wq_ecount, cpu);
da0436e9 9299 if (!qdesc) {
372c187b 9300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9 9301 "0504 Failed allocate slow-path ELS WQ\n");
67d12733 9302 goto out_error;
da0436e9 9303 }
c00f62e6 9304 qdesc->chann = cpu;
da0436e9 9305 phba->sli4_hba.els_wq = qdesc;
895427bd
JS
9306 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9307
9308 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9309 /* Create NVME LS Complete Queue */
81b96eda
JS
9310 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9311 phba->sli4_hba.cq_esize,
c1a21ebc 9312 phba->sli4_hba.cq_ecount, cpu);
895427bd 9313 if (!qdesc) {
372c187b 9314 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895427bd
JS
9315 "6079 Failed allocate NVME LS CQ\n");
9316 goto out_error;
9317 }
c00f62e6 9318 qdesc->chann = cpu;
7365f6fd 9319 qdesc->qe_valid = 1;
895427bd
JS
9320 phba->sli4_hba.nvmels_cq = qdesc;
9321
9322 /* Create NVME LS Work Queue */
81b96eda
JS
9323 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9324 phba->sli4_hba.wq_esize,
c1a21ebc 9325 phba->sli4_hba.wq_ecount, cpu);
895427bd 9326 if (!qdesc) {
372c187b 9327 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895427bd
JS
9328 "6080 Failed allocate NVME LS WQ\n");
9329 goto out_error;
9330 }
c00f62e6 9331 qdesc->chann = cpu;
895427bd
JS
9332 phba->sli4_hba.nvmels_wq = qdesc;
9333 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9334 }
da0436e9 9335
da0436e9
JS
9336 /*
9337 * Create Receive Queue (RQ)
9338 */
da0436e9
JS
9339
9340 /* Create Receive Queue for header */
81b96eda
JS
9341 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9342 phba->sli4_hba.rq_esize,
c1a21ebc 9343 phba->sli4_hba.rq_ecount, cpu);
da0436e9 9344 if (!qdesc) {
372c187b 9345 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9 9346 "0506 Failed allocate receive HRQ\n");
67d12733 9347 goto out_error;
da0436e9
JS
9348 }
9349 phba->sli4_hba.hdr_rq = qdesc;
9350
9351 /* Create Receive Queue for data */
81b96eda
JS
9352 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9353 phba->sli4_hba.rq_esize,
c1a21ebc 9354 phba->sli4_hba.rq_ecount, cpu);
da0436e9 9355 if (!qdesc) {
372c187b 9356 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9 9357 "0507 Failed allocate receive DRQ\n");
67d12733 9358 goto out_error;
da0436e9
JS
9359 }
9360 phba->sli4_hba.dat_rq = qdesc;
9361
cdb42bec
JS
9362 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9363 phba->nvmet_support) {
2d7dbc4c 9364 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
c1a21ebc
JS
9365 cpu = lpfc_find_cpu_handle(phba, idx,
9366 LPFC_FIND_BY_HDWQ);
2d7dbc4c
JS
9367 /* Create NVMET Receive Queue for header */
9368 qdesc = lpfc_sli4_queue_alloc(phba,
81b96eda 9369 LPFC_DEFAULT_PAGE_SIZE,
2d7dbc4c 9370 phba->sli4_hba.rq_esize,
c1a21ebc
JS
9371 LPFC_NVMET_RQE_DEF_COUNT,
9372 cpu);
2d7dbc4c 9373 if (!qdesc) {
372c187b 9374 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9375 "3146 Failed allocate "
9376 "receive HRQ\n");
9377 goto out_error;
9378 }
5e5b511d 9379 qdesc->hdwq = idx;
2d7dbc4c
JS
9380 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9381
9382 /* Only needed for header of RQ pair */
c1a21ebc
JS
9383 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9384 GFP_KERNEL,
9385 cpu_to_node(cpu));
2d7dbc4c 9386 if (qdesc->rqbp == NULL) {
372c187b 9387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9388 "6131 Failed allocate "
9389 "Header RQBP\n");
9390 goto out_error;
9391 }
9392
4b40d02b
DK
9393 /* Put list in known state in case driver load fails. */
9394 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9395
2d7dbc4c
JS
9396 /* Create NVMET Receive Queue for data */
9397 qdesc = lpfc_sli4_queue_alloc(phba,
81b96eda 9398 LPFC_DEFAULT_PAGE_SIZE,
2d7dbc4c 9399 phba->sli4_hba.rq_esize,
c1a21ebc
JS
9400 LPFC_NVMET_RQE_DEF_COUNT,
9401 cpu);
2d7dbc4c 9402 if (!qdesc) {
372c187b 9403 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9404 "3156 Failed allocate "
9405 "receive DRQ\n");
9406 goto out_error;
9407 }
5e5b511d 9408 qdesc->hdwq = idx;
2d7dbc4c
JS
9409 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9410 }
9411 }
9412
4c47efc1
JS
9413 /* Clear NVME stats */
9414 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9415 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9416 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9417 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9418 }
9419 }
4c47efc1
JS
9420
9421 /* Clear SCSI stats */
9422 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9423 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9424 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9425 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9426 }
9427 }
9428
da0436e9
JS
9429 return 0;
9430
da0436e9 9431out_error:
67d12733 9432 lpfc_sli4_queue_destroy(phba);
da0436e9
JS
9433 return -ENOMEM;
9434}
9435
895427bd
JS
9436static inline void
9437__lpfc_sli4_release_queue(struct lpfc_queue **qp)
9438{
9439 if (*qp != NULL) {
9440 lpfc_sli4_queue_free(*qp);
9441 *qp = NULL;
9442 }
9443}
9444
9445static inline void
9446lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9447{
9448 int idx;
9449
9450 if (*qs == NULL)
9451 return;
9452
9453 for (idx = 0; idx < max; idx++)
9454 __lpfc_sli4_release_queue(&(*qs)[idx]);
9455
9456 kfree(*qs);
9457 *qs = NULL;
9458}
9459
9460static inline void
6a828b0f 9461lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
895427bd 9462{
6a828b0f 9463 struct lpfc_sli4_hdw_queue *hdwq;
657add4e 9464 struct lpfc_queue *eq;
cdb42bec
JS
9465 uint32_t idx;
9466
6a828b0f 9467 hdwq = phba->sli4_hba.hdwq;
6a828b0f 9468
657add4e
JS
9469 /* Loop thru all Hardware Queues */
9470 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9471 /* Free the CQ/WQ corresponding to the Hardware Queue */
c00f62e6
JS
9472 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9473 lpfc_sli4_queue_free(hdwq[idx].io_wq);
821bc882 9474 hdwq[idx].hba_eq = NULL;
c00f62e6
JS
9475 hdwq[idx].io_cq = NULL;
9476 hdwq[idx].io_wq = NULL;
d79c9e9d
JS
9477 if (phba->cfg_xpsgl && !phba->nvmet_support)
9478 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9479 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
895427bd 9480 }
657add4e
JS
9481 /* Loop thru all IRQ vectors */
9482 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9483 /* Free the EQ corresponding to the IRQ vector */
9484 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9485 lpfc_sli4_queue_free(eq);
9486 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9487 }
895427bd
JS
9488}
9489
da0436e9
JS
9490/**
9491 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
9492 * @phba: pointer to lpfc hba data structure.
9493 *
9494 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
9495 * operation.
9496 *
9497 * Return codes
af901ca1 9498 * 0 - successful
25985edc 9499 * -ENOMEM - No available memory
d439d286 9500 * -EIO - The mailbox failed to complete successfully.
da0436e9 9501 **/
5350d872 9502void
da0436e9
JS
9503lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9504{
4645f7b5
JS
9505 /*
9506 * Set FREE_INIT before beginning to free the queues.
9507 * Wait until the users of queues to acknowledge to
9508 * release queues by clearing FREE_WAIT.
9509 */
9510 spin_lock_irq(&phba->hbalock);
9511 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9512 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9513 spin_unlock_irq(&phba->hbalock);
9514 msleep(20);
9515 spin_lock_irq(&phba->hbalock);
9516 }
9517 spin_unlock_irq(&phba->hbalock);
9518
93a4d6f4
JS
9519 lpfc_sli4_cleanup_poll_list(phba);
9520
895427bd 9521 /* Release HBA eqs */
cdb42bec 9522 if (phba->sli4_hba.hdwq)
6a828b0f 9523 lpfc_sli4_release_hdwq(phba);
895427bd 9524
bcb24f65
JS
9525 if (phba->nvmet_support) {
9526 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9527 phba->cfg_nvmet_mrq);
2d7dbc4c 9528
bcb24f65
JS
9529 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9530 phba->cfg_nvmet_mrq);
9531 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9532 phba->cfg_nvmet_mrq);
9533 }
2d7dbc4c 9534
895427bd
JS
9535 /* Release mailbox command work queue */
9536 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9537
9538 /* Release ELS work queue */
9539 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9540
9541 /* Release ELS work queue */
9542 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9543
9544 /* Release unsolicited receive queue */
9545 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9546 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9547
9548 /* Release ELS complete queue */
9549 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9550
9551 /* Release NVME LS complete queue */
9552 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9553
9554 /* Release mailbox command complete queue */
9555 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9556
9557 /* Everything on this list has been freed */
9558 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
4645f7b5
JS
9559
9560 /* Done with freeing the queues */
9561 spin_lock_irq(&phba->hbalock);
9562 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9563 spin_unlock_irq(&phba->hbalock);
895427bd
JS
9564}
9565
895427bd
JS
9566int
9567lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9568{
9569 struct lpfc_rqb *rqbp;
9570 struct lpfc_dmabuf *h_buf;
9571 struct rqb_dmabuf *rqb_buffer;
9572
9573 rqbp = rq->rqbp;
9574 while (!list_empty(&rqbp->rqb_buffer_list)) {
9575 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9576 struct lpfc_dmabuf, list);
9577
9578 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9579 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9580 rqbp->buffer_count--;
67d12733 9581 }
895427bd
JS
9582 return 1;
9583}
67d12733 9584
895427bd
JS
9585static int
9586lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9587 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9588 int qidx, uint32_t qtype)
9589{
9590 struct lpfc_sli_ring *pring;
9591 int rc;
9592
9593 if (!eq || !cq || !wq) {
372c187b 9594 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895427bd
JS
9595 "6085 Fast-path %s (%d) not allocated\n",
9596 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9597 return -ENOMEM;
9598 }
9599
9600 /* create the Cq first */
9601 rc = lpfc_cq_create(phba, cq, eq,
9602 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9603 if (rc) {
372c187b
DK
9604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9605 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9606 qidx, (uint32_t)rc);
895427bd 9607 return rc;
67d12733
JS
9608 }
9609
895427bd 9610 if (qtype != LPFC_MBOX) {
cdb42bec 9611 /* Setup cq_map for fast lookup */
895427bd
JS
9612 if (cq_map)
9613 *cq_map = cq->queue_id;
da0436e9 9614
895427bd
JS
9615 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9616 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9617 qidx, cq->queue_id, qidx, eq->queue_id);
da0436e9 9618
895427bd
JS
9619 /* create the wq */
9620 rc = lpfc_wq_create(phba, wq, cq, qtype);
9621 if (rc) {
372c187b 9622 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
c835c085 9623 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
895427bd
JS
9624 qidx, (uint32_t)rc);
9625 /* no need to tear down cq - caller will do so */
9626 return rc;
9627 }
da0436e9 9628
895427bd
JS
9629 /* Bind this CQ/WQ to the NVME ring */
9630 pring = wq->pring;
9631 pring->sli.sli4.wqp = (void *)wq;
9632 cq->pring = pring;
da0436e9 9633
895427bd
JS
9634 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9635 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9636 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9637 } else {
9638 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9639 if (rc) {
372c187b
DK
9640 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9641 "0539 Failed setup of slow-path MQ: "
9642 "rc = 0x%x\n", rc);
895427bd
JS
9643 /* no need to tear down cq - caller will do so */
9644 return rc;
9645 }
da0436e9 9646
895427bd
JS
9647 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9648 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9649 phba->sli4_hba.mbx_wq->queue_id,
9650 phba->sli4_hba.mbx_cq->queue_id);
67d12733 9651 }
da0436e9 9652
895427bd 9653 return 0;
da0436e9
JS
9654}
9655
6a828b0f
JS
9656/**
9657 * lpfc_setup_cq_lookup - Setup the CQ lookup table
9658 * @phba: pointer to lpfc hba data structure.
9659 *
9660 * This routine will populate the cq_lookup table by all
9661 * available CQ queue_id's.
9662 **/
3999df75 9663static void
6a828b0f
JS
9664lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9665{
9666 struct lpfc_queue *eq, *childq;
6a828b0f
JS
9667 int qidx;
9668
6a828b0f
JS
9669 memset(phba->sli4_hba.cq_lookup, 0,
9670 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
657add4e 9671 /* Loop thru all IRQ vectors */
6a828b0f 9672 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
657add4e
JS
9673 /* Get the EQ corresponding to the IRQ vector */
9674 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
6a828b0f
JS
9675 if (!eq)
9676 continue;
657add4e 9677 /* Loop through all CQs associated with that EQ */
6a828b0f
JS
9678 list_for_each_entry(childq, &eq->child_list, list) {
9679 if (childq->queue_id > phba->sli4_hba.cq_max)
9680 continue;
c00f62e6 9681 if (childq->subtype == LPFC_IO)
6a828b0f
JS
9682 phba->sli4_hba.cq_lookup[childq->queue_id] =
9683 childq;
9684 }
9685 }
9686}
9687
da0436e9
JS
9688/**
9689 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
9690 * @phba: pointer to lpfc hba data structure.
9691 *
9692 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
9693 * operation.
9694 *
9695 * Return codes
af901ca1 9696 * 0 - successful
25985edc 9697 * -ENOMEM - No available memory
d439d286 9698 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
9699 **/
9700int
9701lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9702{
962bc51b
JS
9703 uint32_t shdr_status, shdr_add_status;
9704 union lpfc_sli4_cfg_shdr *shdr;
657add4e 9705 struct lpfc_vector_map_info *cpup;
cdb42bec 9706 struct lpfc_sli4_hdw_queue *qp;
962bc51b 9707 LPFC_MBOXQ_t *mboxq;
657add4e 9708 int qidx, cpu;
cb733e35 9709 uint32_t length, usdelay;
895427bd 9710 int rc = -ENOMEM;
962bc51b
JS
9711
9712 /* Check for dual-ULP support */
9713 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9714 if (!mboxq) {
372c187b 9715 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
962bc51b
JS
9716 "3249 Unable to allocate memory for "
9717 "QUERY_FW_CFG mailbox command\n");
9718 return -ENOMEM;
9719 }
9720 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9721 sizeof(struct lpfc_sli4_cfg_mhdr));
9722 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9723 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9724 length, LPFC_SLI4_MBX_EMBED);
9725
9726 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9727
9728 shdr = (union lpfc_sli4_cfg_shdr *)
9729 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9730 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9731 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9732 if (shdr_status || shdr_add_status || rc) {
372c187b 9733 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
962bc51b
JS
9734 "3250 QUERY_FW_CFG mailbox failed with status "
9735 "x%x add_status x%x, mbx status x%x\n",
9736 shdr_status, shdr_add_status, rc);
304ee432 9737 mempool_free(mboxq, phba->mbox_mem_pool);
962bc51b
JS
9738 rc = -ENXIO;
9739 goto out_error;
9740 }
9741
9742 phba->sli4_hba.fw_func_mode =
9743 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9744 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9745 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
8b017a30
JS
9746 phba->sli4_hba.physical_port =
9747 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
962bc51b
JS
9748 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9749 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9750 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9751 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9752
304ee432 9753 mempool_free(mboxq, phba->mbox_mem_pool);
da0436e9
JS
9754
9755 /*
67d12733 9756 * Set up HBA Event Queues (EQs)
da0436e9 9757 */
cdb42bec 9758 qp = phba->sli4_hba.hdwq;
da0436e9 9759
67d12733 9760 /* Set up HBA event queue */
cdb42bec 9761 if (!qp) {
372c187b 9762 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2e90f4b5 9763 "3147 Fast-path EQs not allocated\n");
1b51197d 9764 rc = -ENOMEM;
67d12733 9765 goto out_error;
2e90f4b5 9766 }
657add4e
JS
9767
9768 /* Loop thru all IRQ vectors */
6a828b0f 9769 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
657add4e
JS
9770 /* Create HBA Event Queues (EQs) in order */
9771 for_each_present_cpu(cpu) {
9772 cpup = &phba->sli4_hba.cpu_map[cpu];
9773
9774 /* Look for the CPU thats using that vector with
9775 * LPFC_CPU_FIRST_IRQ set.
9776 */
9777 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9778 continue;
9779 if (qidx != cpup->eq)
9780 continue;
9781
9782 /* Create an EQ for that vector */
9783 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9784 phba->cfg_fcp_imax);
9785 if (rc) {
372c187b 9786 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
657add4e
JS
9787 "0523 Failed setup of fast-path"
9788 " EQ (%d), rc = 0x%x\n",
9789 cpup->eq, (uint32_t)rc);
9790 goto out_destroy;
9791 }
9792
9793 /* Save the EQ for that vector in the hba_eq_hdl */
9794 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9795 qp[cpup->hdwq].hba_eq;
9796
9797 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9798 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9799 cpup->eq,
9800 qp[cpup->hdwq].hba_eq->queue_id);
da0436e9 9801 }
67d12733
JS
9802 }
9803
657add4e 9804 /* Loop thru all Hardware Queues */
cdb42bec 9805 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
657add4e
JS
9806 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9807 cpup = &phba->sli4_hba.cpu_map[cpu];
9808
9809 /* Create the CQ/WQ corresponding to the Hardware Queue */
cdb42bec 9810 rc = lpfc_create_wq_cq(phba,
657add4e 9811 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
c00f62e6
JS
9812 qp[qidx].io_cq,
9813 qp[qidx].io_wq,
9814 &phba->sli4_hba.hdwq[qidx].io_cq_map,
9815 qidx,
9816 LPFC_IO);
cdb42bec 9817 if (rc) {
372c187b 9818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895427bd 9819 "0535 Failed to setup fastpath "
c00f62e6 9820 "IO WQ/CQ (%d), rc = 0x%x\n",
895427bd 9821 qidx, (uint32_t)rc);
cdb42bec 9822 goto out_destroy;
895427bd 9823 }
67d12733 9824 }
895427bd 9825
da0436e9 9826 /*
895427bd 9827 * Set up Slow Path Complete Queues (CQs)
da0436e9
JS
9828 */
9829
895427bd 9830 /* Set up slow-path MBOX CQ/MQ */
da0436e9 9831
895427bd 9832 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
372c187b 9833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895427bd
JS
9834 "0528 %s not allocated\n",
9835 phba->sli4_hba.mbx_cq ?
d1f525aa 9836 "Mailbox WQ" : "Mailbox CQ");
1b51197d 9837 rc = -ENOMEM;
895427bd 9838 goto out_destroy;
da0436e9 9839 }
da0436e9 9840
cdb42bec 9841 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
d1f525aa
JS
9842 phba->sli4_hba.mbx_cq,
9843 phba->sli4_hba.mbx_wq,
9844 NULL, 0, LPFC_MBOX);
da0436e9 9845 if (rc) {
372c187b 9846 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895427bd
JS
9847 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9848 (uint32_t)rc);
9849 goto out_destroy;
da0436e9 9850 }
2d7dbc4c
JS
9851 if (phba->nvmet_support) {
9852 if (!phba->sli4_hba.nvmet_cqset) {
372c187b 9853 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9854 "3165 Fast-path NVME CQ Set "
9855 "array not allocated\n");
9856 rc = -ENOMEM;
9857 goto out_destroy;
9858 }
9859 if (phba->cfg_nvmet_mrq > 1) {
9860 rc = lpfc_cq_create_set(phba,
9861 phba->sli4_hba.nvmet_cqset,
cdb42bec 9862 qp,
2d7dbc4c
JS
9863 LPFC_WCQ, LPFC_NVMET);
9864 if (rc) {
372c187b 9865 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9866 "3164 Failed setup of NVME CQ "
9867 "Set, rc = 0x%x\n",
9868 (uint32_t)rc);
9869 goto out_destroy;
9870 }
9871 } else {
9872 /* Set up NVMET Receive Complete Queue */
9873 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
cdb42bec 9874 qp[0].hba_eq,
2d7dbc4c
JS
9875 LPFC_WCQ, LPFC_NVMET);
9876 if (rc) {
372c187b 9877 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9878 "6089 Failed setup NVMET CQ: "
9879 "rc = 0x%x\n", (uint32_t)rc);
9880 goto out_destroy;
9881 }
81b96eda
JS
9882 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9883
2d7dbc4c
JS
9884 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9885 "6090 NVMET CQ setup: cq-id=%d, "
9886 "parent eq-id=%d\n",
9887 phba->sli4_hba.nvmet_cqset[0]->queue_id,
cdb42bec 9888 qp[0].hba_eq->queue_id);
2d7dbc4c
JS
9889 }
9890 }
da0436e9 9891
895427bd
JS
9892 /* Set up slow-path ELS WQ/CQ */
9893 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
372c187b 9894 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895427bd
JS
9895 "0530 ELS %s not allocated\n",
9896 phba->sli4_hba.els_cq ? "WQ" : "CQ");
1b51197d 9897 rc = -ENOMEM;
895427bd 9898 goto out_destroy;
da0436e9 9899 }
cdb42bec
JS
9900 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9901 phba->sli4_hba.els_cq,
9902 phba->sli4_hba.els_wq,
9903 NULL, 0, LPFC_ELS);
da0436e9 9904 if (rc) {
372c187b 9905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
cdb42bec
JS
9906 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9907 (uint32_t)rc);
895427bd 9908 goto out_destroy;
da0436e9
JS
9909 }
9910 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9911 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9912 phba->sli4_hba.els_wq->queue_id,
9913 phba->sli4_hba.els_cq->queue_id);
9914
cdb42bec 9915 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
895427bd
JS
9916 /* Set up NVME LS Complete Queue */
9917 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
372c187b 9918 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895427bd
JS
9919 "6091 LS %s not allocated\n",
9920 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9921 rc = -ENOMEM;
9922 goto out_destroy;
9923 }
cdb42bec
JS
9924 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9925 phba->sli4_hba.nvmels_cq,
9926 phba->sli4_hba.nvmels_wq,
9927 NULL, 0, LPFC_NVME_LS);
895427bd 9928 if (rc) {
372c187b 9929 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
cdb42bec
JS
9930 "0526 Failed setup of NVVME LS WQ/CQ: "
9931 "rc = 0x%x\n", (uint32_t)rc);
895427bd
JS
9932 goto out_destroy;
9933 }
9934
9935 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9936 "6096 ELS WQ setup: wq-id=%d, "
9937 "parent cq-id=%d\n",
9938 phba->sli4_hba.nvmels_wq->queue_id,
9939 phba->sli4_hba.nvmels_cq->queue_id);
9940 }
9941
2d7dbc4c
JS
9942 /*
9943 * Create NVMET Receive Queue (RQ)
9944 */
9945 if (phba->nvmet_support) {
9946 if ((!phba->sli4_hba.nvmet_cqset) ||
9947 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9948 (!phba->sli4_hba.nvmet_mrq_data)) {
372c187b 9949 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9950 "6130 MRQ CQ Queues not "
9951 "allocated\n");
9952 rc = -ENOMEM;
9953 goto out_destroy;
9954 }
9955 if (phba->cfg_nvmet_mrq > 1) {
9956 rc = lpfc_mrq_create(phba,
9957 phba->sli4_hba.nvmet_mrq_hdr,
9958 phba->sli4_hba.nvmet_mrq_data,
9959 phba->sli4_hba.nvmet_cqset,
9960 LPFC_NVMET);
9961 if (rc) {
372c187b 9962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9963 "6098 Failed setup of NVMET "
9964 "MRQ: rc = 0x%x\n",
9965 (uint32_t)rc);
9966 goto out_destroy;
9967 }
9968
9969 } else {
9970 rc = lpfc_rq_create(phba,
9971 phba->sli4_hba.nvmet_mrq_hdr[0],
9972 phba->sli4_hba.nvmet_mrq_data[0],
9973 phba->sli4_hba.nvmet_cqset[0],
9974 LPFC_NVMET);
9975 if (rc) {
372c187b 9976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2d7dbc4c
JS
9977 "6057 Failed setup of NVMET "
9978 "Receive Queue: rc = 0x%x\n",
9979 (uint32_t)rc);
9980 goto out_destroy;
9981 }
9982
9983 lpfc_printf_log(
9984 phba, KERN_INFO, LOG_INIT,
9985 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9986 "dat-rq-id=%d parent cq-id=%d\n",
9987 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9988 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9989 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9990
9991 }
9992 }
9993
da0436e9 9994 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
372c187b 9995 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9 9996 "0540 Receive Queue not allocated\n");
1b51197d 9997 rc = -ENOMEM;
895427bd 9998 goto out_destroy;
da0436e9 9999 }
73d91e50 10000
da0436e9 10001 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
4d9ab994 10002 phba->sli4_hba.els_cq, LPFC_USOL);
da0436e9 10003 if (rc) {
372c187b 10004 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9 10005 "0541 Failed setup of Receive Queue: "
a2fc4aef 10006 "rc = 0x%x\n", (uint32_t)rc);
895427bd 10007 goto out_destroy;
da0436e9 10008 }
73d91e50 10009
da0436e9
JS
10010 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10011 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
10012 "parent cq-id=%d\n",
10013 phba->sli4_hba.hdr_rq->queue_id,
10014 phba->sli4_hba.dat_rq->queue_id,
4d9ab994 10015 phba->sli4_hba.els_cq->queue_id);
1ba981fd 10016
cb733e35
JS
10017 if (phba->cfg_fcp_imax)
10018 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
10019 else
10020 usdelay = 0;
10021
6a828b0f 10022 for (qidx = 0; qidx < phba->cfg_irq_chann;
cdb42bec 10023 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
0cf07f84 10024 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
cb733e35 10025 usdelay);
43140ca6 10026
6a828b0f
JS
10027 if (phba->sli4_hba.cq_max) {
10028 kfree(phba->sli4_hba.cq_lookup);
10029 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
10030 sizeof(struct lpfc_queue *), GFP_KERNEL);
10031 if (!phba->sli4_hba.cq_lookup) {
372c187b 10032 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6a828b0f
JS
10033 "0549 Failed setup of CQ Lookup table: "
10034 "size 0x%x\n", phba->sli4_hba.cq_max);
fad28e3d 10035 rc = -ENOMEM;
895427bd 10036 goto out_destroy;
1ba981fd 10037 }
6a828b0f 10038 lpfc_setup_cq_lookup(phba);
1ba981fd 10039 }
da0436e9
JS
10040 return 0;
10041
895427bd
JS
10042out_destroy:
10043 lpfc_sli4_queue_unset(phba);
da0436e9
JS
10044out_error:
10045 return rc;
10046}
10047
10048/**
10049 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
10050 * @phba: pointer to lpfc hba data structure.
10051 *
10052 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
10053 * operation.
10054 *
10055 * Return codes
af901ca1 10056 * 0 - successful
25985edc 10057 * -ENOMEM - No available memory
d439d286 10058 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
10059 **/
10060void
10061lpfc_sli4_queue_unset(struct lpfc_hba *phba)
10062{
cdb42bec 10063 struct lpfc_sli4_hdw_queue *qp;
657add4e 10064 struct lpfc_queue *eq;
895427bd 10065 int qidx;
da0436e9
JS
10066
10067 /* Unset mailbox command work queue */
895427bd
JS
10068 if (phba->sli4_hba.mbx_wq)
10069 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
10070
10071 /* Unset NVME LS work queue */
10072 if (phba->sli4_hba.nvmels_wq)
10073 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
10074
da0436e9 10075 /* Unset ELS work queue */
019c0d66 10076 if (phba->sli4_hba.els_wq)
895427bd
JS
10077 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
10078
da0436e9 10079 /* Unset unsolicited receive queue */
895427bd
JS
10080 if (phba->sli4_hba.hdr_rq)
10081 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
10082 phba->sli4_hba.dat_rq);
10083
da0436e9 10084 /* Unset mailbox command complete queue */
895427bd
JS
10085 if (phba->sli4_hba.mbx_cq)
10086 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
10087
da0436e9 10088 /* Unset ELS complete queue */
895427bd
JS
10089 if (phba->sli4_hba.els_cq)
10090 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
10091
10092 /* Unset NVME LS complete queue */
10093 if (phba->sli4_hba.nvmels_cq)
10094 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
10095
bcb24f65
JS
10096 if (phba->nvmet_support) {
10097 /* Unset NVMET MRQ queue */
10098 if (phba->sli4_hba.nvmet_mrq_hdr) {
10099 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10100 lpfc_rq_destroy(
10101 phba,
2d7dbc4c
JS
10102 phba->sli4_hba.nvmet_mrq_hdr[qidx],
10103 phba->sli4_hba.nvmet_mrq_data[qidx]);
bcb24f65 10104 }
2d7dbc4c 10105
bcb24f65
JS
10106 /* Unset NVMET CQ Set complete queue */
10107 if (phba->sli4_hba.nvmet_cqset) {
10108 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
10109 lpfc_cq_destroy(
10110 phba, phba->sli4_hba.nvmet_cqset[qidx]);
10111 }
2d7dbc4c
JS
10112 }
10113
cdb42bec
JS
10114 /* Unset fast-path SLI4 queues */
10115 if (phba->sli4_hba.hdwq) {
657add4e 10116 /* Loop thru all Hardware Queues */
cdb42bec 10117 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
657add4e 10118 /* Destroy the CQ/WQ corresponding to Hardware Queue */
cdb42bec 10119 qp = &phba->sli4_hba.hdwq[qidx];
c00f62e6
JS
10120 lpfc_wq_destroy(phba, qp->io_wq);
10121 lpfc_cq_destroy(phba, qp->io_cq);
657add4e
JS
10122 }
10123 /* Loop thru all IRQ vectors */
10124 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10125 /* Destroy the EQ corresponding to the IRQ vector */
10126 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10127 lpfc_eq_destroy(phba, eq);
cdb42bec
JS
10128 }
10129 }
895427bd 10130
6a828b0f
JS
10131 kfree(phba->sli4_hba.cq_lookup);
10132 phba->sli4_hba.cq_lookup = NULL;
10133 phba->sli4_hba.cq_max = 0;
da0436e9
JS
10134}
10135
10136/**
10137 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
10138 * @phba: pointer to lpfc hba data structure.
10139 *
10140 * This routine is invoked to allocate and set up a pool of completion queue
10141 * events. The body of the completion queue event is a completion queue entry
10142 * CQE. For now, this pool is used for the interrupt service routine to queue
10143 * the following HBA completion queue events for the worker thread to process:
10144 * - Mailbox asynchronous events
10145 * - Receive queue completion unsolicited events
10146 * Later, this can be used for all the slow-path events.
10147 *
10148 * Return codes
af901ca1 10149 * 0 - successful
25985edc 10150 * -ENOMEM - No available memory
da0436e9
JS
10151 **/
10152static int
10153lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
10154{
10155 struct lpfc_cq_event *cq_event;
10156 int i;
10157
10158 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
10159 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
10160 if (!cq_event)
10161 goto out_pool_create_fail;
10162 list_add_tail(&cq_event->list,
10163 &phba->sli4_hba.sp_cqe_event_pool);
10164 }
10165 return 0;
10166
10167out_pool_create_fail:
10168 lpfc_sli4_cq_event_pool_destroy(phba);
10169 return -ENOMEM;
10170}
10171
10172/**
10173 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
10174 * @phba: pointer to lpfc hba data structure.
10175 *
10176 * This routine is invoked to free the pool of completion queue events at
10177 * driver unload time. Note that, it is the responsibility of the driver
10178 * cleanup routine to free all the outstanding completion-queue events
10179 * allocated from this pool back into the pool before invoking this routine
10180 * to destroy the pool.
10181 **/
10182static void
10183lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
10184{
10185 struct lpfc_cq_event *cq_event, *next_cq_event;
10186
10187 list_for_each_entry_safe(cq_event, next_cq_event,
10188 &phba->sli4_hba.sp_cqe_event_pool, list) {
10189 list_del(&cq_event->list);
10190 kfree(cq_event);
10191 }
10192}
10193
10194/**
10195 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10196 * @phba: pointer to lpfc hba data structure.
10197 *
10198 * This routine is the lock free version of the API invoked to allocate a
10199 * completion-queue event from the free pool.
10200 *
10201 * Return: Pointer to the newly allocated completion-queue event if successful
10202 * NULL otherwise.
10203 **/
10204struct lpfc_cq_event *
10205__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10206{
10207 struct lpfc_cq_event *cq_event = NULL;
10208
10209 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
10210 struct lpfc_cq_event, list);
10211 return cq_event;
10212}
10213
10214/**
10215 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
10216 * @phba: pointer to lpfc hba data structure.
10217 *
10218 * This routine is the lock version of the API invoked to allocate a
10219 * completion-queue event from the free pool.
10220 *
10221 * Return: Pointer to the newly allocated completion-queue event if successful
10222 * NULL otherwise.
10223 **/
10224struct lpfc_cq_event *
10225lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
10226{
10227 struct lpfc_cq_event *cq_event;
10228 unsigned long iflags;
10229
10230 spin_lock_irqsave(&phba->hbalock, iflags);
10231 cq_event = __lpfc_sli4_cq_event_alloc(phba);
10232 spin_unlock_irqrestore(&phba->hbalock, iflags);
10233 return cq_event;
10234}
10235
10236/**
10237 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10238 * @phba: pointer to lpfc hba data structure.
10239 * @cq_event: pointer to the completion queue event to be freed.
10240 *
10241 * This routine is the lock free version of the API invoked to release a
10242 * completion-queue event back into the free pool.
10243 **/
10244void
10245__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10246 struct lpfc_cq_event *cq_event)
10247{
10248 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
10249}
10250
10251/**
10252 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
10253 * @phba: pointer to lpfc hba data structure.
10254 * @cq_event: pointer to the completion queue event to be freed.
10255 *
10256 * This routine is the lock version of the API invoked to release a
10257 * completion-queue event back into the free pool.
10258 **/
10259void
10260lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
10261 struct lpfc_cq_event *cq_event)
10262{
10263 unsigned long iflags;
10264 spin_lock_irqsave(&phba->hbalock, iflags);
10265 __lpfc_sli4_cq_event_release(phba, cq_event);
10266 spin_unlock_irqrestore(&phba->hbalock, iflags);
10267}
10268
10269/**
10270 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
10271 * @phba: pointer to lpfc hba data structure.
10272 *
10273 * This routine is to free all the pending completion-queue events to the
10274 * back into the free pool for device reset.
10275 **/
10276static void
10277lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10278{
e7dab164
JS
10279 LIST_HEAD(cq_event_list);
10280 struct lpfc_cq_event *cq_event;
da0436e9
JS
10281 unsigned long iflags;
10282
10283 /* Retrieve all the pending WCQEs from pending WCQE lists */
e7dab164 10284
da0436e9 10285 /* Pending ELS XRI abort events */
e7dab164 10286 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
da0436e9 10287 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
e7dab164
JS
10288 &cq_event_list);
10289 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10290
da0436e9 10291 /* Pending asynnc events */
e7dab164 10292 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
da0436e9 10293 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
e7dab164
JS
10294 &cq_event_list);
10295 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
da0436e9 10296
e7dab164
JS
10297 while (!list_empty(&cq_event_list)) {
10298 list_remove_head(&cq_event_list, cq_event,
10299 struct lpfc_cq_event, list);
10300 lpfc_sli4_cq_event_release(phba, cq_event);
da0436e9
JS
10301 }
10302}
10303
10304/**
10305 * lpfc_pci_function_reset - Reset pci function.
10306 * @phba: pointer to lpfc hba data structure.
10307 *
10308 * This routine is invoked to request a PCI function reset. It will destroys
10309 * all resources assigned to the PCI function which originates this request.
10310 *
10311 * Return codes
af901ca1 10312 * 0 - successful
25985edc 10313 * -ENOMEM - No available memory
d439d286 10314 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
10315 **/
10316int
10317lpfc_pci_function_reset(struct lpfc_hba *phba)
10318{
10319 LPFC_MBOXQ_t *mboxq;
2fcee4bf 10320 uint32_t rc = 0, if_type;
da0436e9 10321 uint32_t shdr_status, shdr_add_status;
2f6fa2c9
JS
10322 uint32_t rdy_chk;
10323 uint32_t port_reset = 0;
da0436e9 10324 union lpfc_sli4_cfg_shdr *shdr;
2fcee4bf 10325 struct lpfc_register reg_data;
2b81f942 10326 uint16_t devid;
da0436e9 10327
2fcee4bf
JS
10328 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10329 switch (if_type) {
10330 case LPFC_SLI_INTF_IF_TYPE_0:
10331 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10332 GFP_KERNEL);
10333 if (!mboxq) {
372c187b 10334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2fcee4bf
JS
10335 "0494 Unable to allocate memory for "
10336 "issuing SLI_FUNCTION_RESET mailbox "
10337 "command\n");
10338 return -ENOMEM;
10339 }
da0436e9 10340
2fcee4bf
JS
10341 /* Setup PCI function reset mailbox-ioctl command */
10342 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10343 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10344 LPFC_SLI4_MBX_EMBED);
10345 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10346 shdr = (union lpfc_sli4_cfg_shdr *)
10347 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10348 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10349 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10350 &shdr->response);
304ee432 10351 mempool_free(mboxq, phba->mbox_mem_pool);
2fcee4bf 10352 if (shdr_status || shdr_add_status || rc) {
372c187b 10353 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2fcee4bf
JS
10354 "0495 SLI_FUNCTION_RESET mailbox "
10355 "failed with status x%x add_status x%x,"
10356 " mbx status x%x\n",
10357 shdr_status, shdr_add_status, rc);
10358 rc = -ENXIO;
10359 }
10360 break;
10361 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 10362 case LPFC_SLI_INTF_IF_TYPE_6:
2f6fa2c9
JS
10363wait:
10364 /*
10365 * Poll the Port Status Register and wait for RDY for
10366 * up to 30 seconds. If the port doesn't respond, treat
10367 * it as an error.
10368 */
77d093fb 10369 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
2f6fa2c9
JS
10370 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10371 STATUSregaddr, &reg_data.word0)) {
10372 rc = -ENODEV;
10373 goto out;
10374 }
10375 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
10376 break;
10377 msleep(20);
10378 }
10379
10380 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
10381 phba->work_status[0] = readl(
10382 phba->sli4_hba.u.if_type2.ERR1regaddr);
10383 phba->work_status[1] = readl(
10384 phba->sli4_hba.u.if_type2.ERR2regaddr);
372c187b 10385 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2f6fa2c9
JS
10386 "2890 Port not ready, port status reg "
10387 "0x%x error 1=0x%x, error 2=0x%x\n",
10388 reg_data.word0,
10389 phba->work_status[0],
10390 phba->work_status[1]);
10391 rc = -ENODEV;
10392 goto out;
10393 }
10394
10395 if (!port_reset) {
10396 /*
10397 * Reset the port now
10398 */
2fcee4bf
JS
10399 reg_data.word0 = 0;
10400 bf_set(lpfc_sliport_ctrl_end, &reg_data,
10401 LPFC_SLIPORT_LITTLE_ENDIAN);
10402 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
10403 LPFC_SLIPORT_INIT_PORT);
10404 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10405 CTRLregaddr);
8fcb8acd 10406 /* flush */
2b81f942
JS
10407 pci_read_config_word(phba->pcidev,
10408 PCI_DEVICE_ID, &devid);
2fcee4bf 10409
2f6fa2c9
JS
10410 port_reset = 1;
10411 msleep(20);
10412 goto wait;
10413 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
10414 rc = -ENODEV;
10415 goto out;
2fcee4bf
JS
10416 }
10417 break;
2f6fa2c9 10418
2fcee4bf
JS
10419 case LPFC_SLI_INTF_IF_TYPE_1:
10420 default:
10421 break;
da0436e9 10422 }
2fcee4bf 10423
73d91e50 10424out:
2fcee4bf 10425 /* Catch the not-ready port failure after a port reset. */
2f6fa2c9 10426 if (rc) {
372c187b 10427 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
229adb0e 10428 "3317 HBA not functional: IP Reset Failed "
2f6fa2c9 10429 "try: echo fw_reset > board_mode\n");
2fcee4bf 10430 rc = -ENODEV;
229adb0e 10431 }
2fcee4bf 10432
da0436e9
JS
10433 return rc;
10434}
10435
da0436e9
JS
10436/**
10437 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
10438 * @phba: pointer to lpfc hba data structure.
10439 *
10440 * This routine is invoked to set up the PCI device memory space for device
10441 * with SLI-4 interface spec.
10442 *
10443 * Return codes
af901ca1 10444 * 0 - successful
da0436e9
JS
10445 * other values - error
10446 **/
10447static int
10448lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10449{
f30e1bfd 10450 struct pci_dev *pdev = phba->pcidev;
da0436e9 10451 unsigned long bar0map_len, bar1map_len, bar2map_len;
3a487ff7 10452 int error;
2fcee4bf 10453 uint32_t if_type;
da0436e9 10454
f30e1bfd 10455 if (!pdev)
56de8357 10456 return -ENODEV;
da0436e9
JS
10457
10458 /* Set the device DMA mask size */
56de8357
HR
10459 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10460 if (error)
10461 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10462 if (error)
f30e1bfd 10463 return error;
da0436e9 10464
2fcee4bf
JS
10465 /*
10466 * The BARs and register set definitions and offset locations are
10467 * dependent on the if_type.
10468 */
10469 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10470 &phba->sli4_hba.sli_intf.word0)) {
3a487ff7 10471 return -ENODEV;
2fcee4bf
JS
10472 }
10473
10474 /* There is no SLI3 failback for SLI4 devices. */
10475 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10476 LPFC_SLI_INTF_VALID) {
372c187b 10477 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2fcee4bf
JS
10478 "2894 SLI_INTF reg contents invalid "
10479 "sli_intf reg 0x%x\n",
10480 phba->sli4_hba.sli_intf.word0);
3a487ff7 10481 return -ENODEV;
2fcee4bf
JS
10482 }
10483
10484 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10485 /*
10486 * Get the bus address of SLI4 device Bar regions and the
10487 * number of bytes required by each mapping. The mapping of the
10488 * particular PCI BARs regions is dependent on the type of
10489 * SLI4 device.
da0436e9 10490 */
f5ca6f2e
JS
10491 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10492 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10493 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
2fcee4bf
JS
10494
10495 /*
10496 * Map SLI4 PCI Config Space Register base to a kernel virtual
10497 * addr
10498 */
10499 phba->sli4_hba.conf_regs_memmap_p =
10500 ioremap(phba->pci_bar0_map, bar0map_len);
10501 if (!phba->sli4_hba.conf_regs_memmap_p) {
10502 dev_printk(KERN_ERR, &pdev->dev,
10503 "ioremap failed for SLI4 PCI config "
10504 "registers.\n");
3a487ff7 10505 return -ENODEV;
2fcee4bf 10506 }
f5ca6f2e 10507 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
2fcee4bf
JS
10508 /* Set up BAR0 PCI config space register memory map */
10509 lpfc_sli4_bar0_register_memmap(phba, if_type);
1dfb5a47
JS
10510 } else {
10511 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10512 bar0map_len = pci_resource_len(pdev, 1);
27d6ac0a 10513 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
2fcee4bf
JS
10514 dev_printk(KERN_ERR, &pdev->dev,
10515 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
3a487ff7 10516 return -ENODEV;
2fcee4bf
JS
10517 }
10518 phba->sli4_hba.conf_regs_memmap_p =
da0436e9 10519 ioremap(phba->pci_bar0_map, bar0map_len);
2fcee4bf
JS
10520 if (!phba->sli4_hba.conf_regs_memmap_p) {
10521 dev_printk(KERN_ERR, &pdev->dev,
10522 "ioremap failed for SLI4 PCI config "
10523 "registers.\n");
3a487ff7 10524 return -ENODEV;
2fcee4bf
JS
10525 }
10526 lpfc_sli4_bar0_register_memmap(phba, if_type);
da0436e9
JS
10527 }
10528
e4b9794e
JS
10529 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10530 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10531 /*
10532 * Map SLI4 if type 0 HBA Control Register base to a
10533 * kernel virtual address and setup the registers.
10534 */
10535 phba->pci_bar1_map = pci_resource_start(pdev,
10536 PCI_64BIT_BAR2);
10537 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10538 phba->sli4_hba.ctrl_regs_memmap_p =
10539 ioremap(phba->pci_bar1_map,
10540 bar1map_len);
10541 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10542 dev_err(&pdev->dev,
10543 "ioremap failed for SLI4 HBA "
10544 "control registers.\n");
10545 error = -ENOMEM;
10546 goto out_iounmap_conf;
10547 }
10548 phba->pci_bar2_memmap_p =
10549 phba->sli4_hba.ctrl_regs_memmap_p;
27d6ac0a 10550 lpfc_sli4_bar1_register_memmap(phba, if_type);
e4b9794e
JS
10551 } else {
10552 error = -ENOMEM;
2fcee4bf
JS
10553 goto out_iounmap_conf;
10554 }
da0436e9
JS
10555 }
10556
27d6ac0a
JS
10557 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10558 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10559 /*
10560 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
10561 * virtual address and setup the registers.
10562 */
10563 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10564 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10565 phba->sli4_hba.drbl_regs_memmap_p =
10566 ioremap(phba->pci_bar1_map, bar1map_len);
10567 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10568 dev_err(&pdev->dev,
10569 "ioremap failed for SLI4 HBA doorbell registers.\n");
3a487ff7 10570 error = -ENOMEM;
27d6ac0a
JS
10571 goto out_iounmap_conf;
10572 }
10573 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10574 lpfc_sli4_bar1_register_memmap(phba, if_type);
10575 }
10576
e4b9794e
JS
10577 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10578 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10579 /*
10580 * Map SLI4 if type 0 HBA Doorbell Register base to
10581 * a kernel virtual address and setup the registers.
10582 */
10583 phba->pci_bar2_map = pci_resource_start(pdev,
10584 PCI_64BIT_BAR4);
10585 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10586 phba->sli4_hba.drbl_regs_memmap_p =
10587 ioremap(phba->pci_bar2_map,
10588 bar2map_len);
10589 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10590 dev_err(&pdev->dev,
10591 "ioremap failed for SLI4 HBA"
10592 " doorbell registers.\n");
10593 error = -ENOMEM;
10594 goto out_iounmap_ctrl;
10595 }
10596 phba->pci_bar4_memmap_p =
10597 phba->sli4_hba.drbl_regs_memmap_p;
10598 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10599 if (error)
10600 goto out_iounmap_all;
10601 } else {
10602 error = -ENOMEM;
2fcee4bf 10603 goto out_iounmap_all;
e4b9794e 10604 }
da0436e9
JS
10605 }
10606
1351e69f
JS
10607 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10608 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10609 /*
10610 * Map SLI4 if type 6 HBA DPP Register base to a kernel
10611 * virtual address and setup the registers.
10612 */
10613 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10614 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10615 phba->sli4_hba.dpp_regs_memmap_p =
10616 ioremap(phba->pci_bar2_map, bar2map_len);
10617 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10618 dev_err(&pdev->dev,
10619 "ioremap failed for SLI4 HBA dpp registers.\n");
3a487ff7 10620 error = -ENOMEM;
1351e69f
JS
10621 goto out_iounmap_ctrl;
10622 }
10623 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10624 }
10625
b71413dd 10626 /* Set up the EQ/CQ register handeling functions now */
27d6ac0a
JS
10627 switch (if_type) {
10628 case LPFC_SLI_INTF_IF_TYPE_0:
10629 case LPFC_SLI_INTF_IF_TYPE_2:
b71413dd 10630 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
32517fc0
JS
10631 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10632 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
27d6ac0a
JS
10633 break;
10634 case LPFC_SLI_INTF_IF_TYPE_6:
10635 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
32517fc0
JS
10636 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10637 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
27d6ac0a
JS
10638 break;
10639 default:
10640 break;
b71413dd
JS
10641 }
10642
da0436e9
JS
10643 return 0;
10644
10645out_iounmap_all:
10646 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10647out_iounmap_ctrl:
10648 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10649out_iounmap_conf:
10650 iounmap(phba->sli4_hba.conf_regs_memmap_p);
3a487ff7 10651
da0436e9
JS
10652 return error;
10653}
10654
10655/**
10656 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
10657 * @phba: pointer to lpfc hba data structure.
10658 *
10659 * This routine is invoked to unset the PCI device memory space for device
10660 * with SLI-4 interface spec.
10661 **/
10662static void
10663lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10664{
2e90f4b5
JS
10665 uint32_t if_type;
10666 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
da0436e9 10667
2e90f4b5
JS
10668 switch (if_type) {
10669 case LPFC_SLI_INTF_IF_TYPE_0:
10670 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10671 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10672 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10673 break;
10674 case LPFC_SLI_INTF_IF_TYPE_2:
10675 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10676 break;
27d6ac0a
JS
10677 case LPFC_SLI_INTF_IF_TYPE_6:
10678 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10679 iounmap(phba->sli4_hba.conf_regs_memmap_p);
0b439194
JS
10680 if (phba->sli4_hba.dpp_regs_memmap_p)
10681 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
27d6ac0a 10682 break;
2e90f4b5
JS
10683 case LPFC_SLI_INTF_IF_TYPE_1:
10684 default:
10685 dev_printk(KERN_ERR, &phba->pcidev->dev,
10686 "FATAL - unsupported SLI4 interface type - %d\n",
10687 if_type);
10688 break;
10689 }
da0436e9
JS
10690}
10691
10692/**
10693 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
10694 * @phba: pointer to lpfc hba data structure.
10695 *
10696 * This routine is invoked to enable the MSI-X interrupt vectors to device
45ffac19 10697 * with SLI-3 interface specs.
da0436e9
JS
10698 *
10699 * Return codes
af901ca1 10700 * 0 - successful
da0436e9
JS
10701 * other values - error
10702 **/
10703static int
10704lpfc_sli_enable_msix(struct lpfc_hba *phba)
10705{
45ffac19 10706 int rc;
da0436e9
JS
10707 LPFC_MBOXQ_t *pmb;
10708
10709 /* Set up MSI-X multi-message vectors */
45ffac19
CH
10710 rc = pci_alloc_irq_vectors(phba->pcidev,
10711 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10712 if (rc < 0) {
da0436e9
JS
10713 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10714 "0420 PCI enable MSI-X failed (%d)\n", rc);
029165ac 10715 goto vec_fail_out;
da0436e9 10716 }
45ffac19 10717
da0436e9
JS
10718 /*
10719 * Assign MSI-X vectors to interrupt handlers
10720 */
10721
10722 /* vector-0 is associated to slow-path handler */
45ffac19 10723 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
ed243d37 10724 &lpfc_sli_sp_intr_handler, 0,
da0436e9
JS
10725 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10726 if (rc) {
10727 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10728 "0421 MSI-X slow-path request_irq failed "
10729 "(%d)\n", rc);
10730 goto msi_fail_out;
10731 }
10732
10733 /* vector-1 is associated to fast-path handler */
45ffac19 10734 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
ed243d37 10735 &lpfc_sli_fp_intr_handler, 0,
da0436e9
JS
10736 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10737
10738 if (rc) {
10739 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10740 "0429 MSI-X fast-path request_irq failed "
10741 "(%d)\n", rc);
10742 goto irq_fail_out;
10743 }
10744
10745 /*
10746 * Configure HBA MSI-X attention conditions to messages
10747 */
10748 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10749
10750 if (!pmb) {
10751 rc = -ENOMEM;
372c187b 10752 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
10753 "0474 Unable to allocate memory for issuing "
10754 "MBOX_CONFIG_MSI command\n");
10755 goto mem_fail_out;
10756 }
10757 rc = lpfc_config_msi(phba, pmb);
10758 if (rc)
10759 goto mbx_fail_out;
10760 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10761 if (rc != MBX_SUCCESS) {
10762 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10763 "0351 Config MSI mailbox command failed, "
10764 "mbxCmd x%x, mbxStatus x%x\n",
10765 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10766 goto mbx_fail_out;
10767 }
10768
10769 /* Free memory allocated for mailbox command */
10770 mempool_free(pmb, phba->mbox_mem_pool);
10771 return rc;
10772
10773mbx_fail_out:
10774 /* Free memory allocated for mailbox command */
10775 mempool_free(pmb, phba->mbox_mem_pool);
10776
10777mem_fail_out:
10778 /* free the irq already requested */
45ffac19 10779 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
da0436e9
JS
10780
10781irq_fail_out:
10782 /* free the irq already requested */
45ffac19 10783 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
da0436e9
JS
10784
10785msi_fail_out:
10786 /* Unconfigure MSI-X capability structure */
45ffac19 10787 pci_free_irq_vectors(phba->pcidev);
029165ac
AG
10788
10789vec_fail_out:
da0436e9
JS
10790 return rc;
10791}
10792
da0436e9
JS
10793/**
10794 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
10795 * @phba: pointer to lpfc hba data structure.
10796 *
10797 * This routine is invoked to enable the MSI interrupt mode to device with
10798 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
10799 * enable the MSI vector. The device driver is responsible for calling the
10800 * request_irq() to register MSI vector with a interrupt the handler, which
10801 * is done in this function.
10802 *
10803 * Return codes
af901ca1 10804 * 0 - successful
da0436e9
JS
10805 * other values - error
10806 */
10807static int
10808lpfc_sli_enable_msi(struct lpfc_hba *phba)
10809{
10810 int rc;
10811
10812 rc = pci_enable_msi(phba->pcidev);
10813 if (!rc)
10814 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10815 "0462 PCI enable MSI mode success.\n");
10816 else {
10817 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10818 "0471 PCI enable MSI mode failed (%d)\n", rc);
10819 return rc;
10820 }
10821
10822 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
ed243d37 10823 0, LPFC_DRIVER_NAME, phba);
da0436e9
JS
10824 if (rc) {
10825 pci_disable_msi(phba->pcidev);
10826 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10827 "0478 MSI request_irq failed (%d)\n", rc);
10828 }
10829 return rc;
10830}
10831
da0436e9
JS
10832/**
10833 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
10834 * @phba: pointer to lpfc hba data structure.
fe614acd 10835 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
da0436e9
JS
10836 *
10837 * This routine is invoked to enable device interrupt and associate driver's
10838 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
10839 * spec. Depends on the interrupt mode configured to the driver, the driver
10840 * will try to fallback from the configured interrupt mode to an interrupt
10841 * mode which is supported by the platform, kernel, and device in the order
10842 * of:
10843 * MSI-X -> MSI -> IRQ.
10844 *
10845 * Return codes
af901ca1 10846 * 0 - successful
da0436e9
JS
10847 * other values - error
10848 **/
10849static uint32_t
10850lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10851{
10852 uint32_t intr_mode = LPFC_INTR_ERROR;
10853 int retval;
10854
d2f2547e
JS
10855 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
10856 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10857 if (retval)
10858 return intr_mode;
10859 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
10860
da0436e9 10861 if (cfg_mode == 2) {
d2f2547e
JS
10862 /* Now, try to enable MSI-X interrupt mode */
10863 retval = lpfc_sli_enable_msix(phba);
da0436e9 10864 if (!retval) {
d2f2547e
JS
10865 /* Indicate initialization to MSI-X mode */
10866 phba->intr_type = MSIX;
10867 intr_mode = 2;
da0436e9
JS
10868 }
10869 }
10870
10871 /* Fallback to MSI if MSI-X initialization failed */
10872 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10873 retval = lpfc_sli_enable_msi(phba);
10874 if (!retval) {
10875 /* Indicate initialization to MSI mode */
10876 phba->intr_type = MSI;
10877 intr_mode = 1;
10878 }
10879 }
10880
10881 /* Fallback to INTx if both MSI-X/MSI initalization failed */
10882 if (phba->intr_type == NONE) {
10883 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10884 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10885 if (!retval) {
10886 /* Indicate initialization to INTx mode */
10887 phba->intr_type = INTx;
10888 intr_mode = 0;
10889 }
10890 }
10891 return intr_mode;
10892}
10893
10894/**
10895 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10896 * @phba: pointer to lpfc hba data structure.
10897 *
10898 * This routine is invoked to disable device interrupt and disassociate the
10899 * driver's interrupt handler(s) from interrupt vector(s) to device with
10900 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10901 * release the interrupt vector(s) for the message signaled interrupt.
10902 **/
10903static void
10904lpfc_sli_disable_intr(struct lpfc_hba *phba)
10905{
45ffac19
CH
10906 int nr_irqs, i;
10907
da0436e9 10908 if (phba->intr_type == MSIX)
45ffac19
CH
10909 nr_irqs = LPFC_MSIX_VECTORS;
10910 else
10911 nr_irqs = 1;
10912
10913 for (i = 0; i < nr_irqs; i++)
10914 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10915 pci_free_irq_vectors(phba->pcidev);
da0436e9
JS
10916
10917 /* Reset interrupt management states */
10918 phba->intr_type = NONE;
10919 phba->sli.slistat.sli_intr = 0;
da0436e9
JS
10920}
10921
6a828b0f 10922/**
657add4e 10923 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
6a828b0f
JS
10924 * @phba: pointer to lpfc hba data structure.
10925 * @id: EQ vector index or Hardware Queue index
10926 * @match: LPFC_FIND_BY_EQ = match by EQ
10927 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
657add4e 10928 * Return the CPU that matches the selection criteria
6a828b0f
JS
10929 */
10930static uint16_t
10931lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10932{
10933 struct lpfc_vector_map_info *cpup;
10934 int cpu;
10935
657add4e 10936 /* Loop through all CPUs */
222e9239
JS
10937 for_each_present_cpu(cpu) {
10938 cpup = &phba->sli4_hba.cpu_map[cpu];
657add4e
JS
10939
10940 /* If we are matching by EQ, there may be multiple CPUs using
10941 * using the same vector, so select the one with
10942 * LPFC_CPU_FIRST_IRQ set.
10943 */
6a828b0f 10944 if ((match == LPFC_FIND_BY_EQ) &&
657add4e 10945 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
6a828b0f
JS
10946 (cpup->eq == id))
10947 return cpu;
657add4e
JS
10948
10949 /* If matching by HDWQ, select the first CPU that matches */
6a828b0f
JS
10950 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10951 return cpu;
6a828b0f
JS
10952 }
10953 return 0;
10954}
10955
6a828b0f
JS
10956#ifdef CONFIG_X86
10957/**
10958 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10959 * @phba: pointer to lpfc hba data structure.
10960 * @cpu: CPU map index
10961 * @phys_id: CPU package physical id
10962 * @core_id: CPU core id
10963 */
10964static int
10965lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10966 uint16_t phys_id, uint16_t core_id)
10967{
10968 struct lpfc_vector_map_info *cpup;
10969 int idx;
10970
222e9239
JS
10971 for_each_present_cpu(idx) {
10972 cpup = &phba->sli4_hba.cpu_map[idx];
6a828b0f
JS
10973 /* Does the cpup match the one we are looking for */
10974 if ((cpup->phys_id == phys_id) &&
10975 (cpup->core_id == core_id) &&
222e9239 10976 (cpu != idx))
6a828b0f 10977 return 1;
6a828b0f
JS
10978 }
10979 return 0;
10980}
10981#endif
10982
dcaa2136
JS
10983/*
10984 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
10985 * @phba: pointer to lpfc hba data structure.
10986 * @eqidx: index for eq and irq vector
10987 * @flag: flags to set for vector_map structure
10988 * @cpu: cpu used to index vector_map structure
10989 *
10990 * The routine assigns eq info into vector_map structure
10991 */
10992static inline void
10993lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10994 unsigned int cpu)
10995{
10996 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10997 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10998
10999 cpup->eq = eqidx;
11000 cpup->flag |= flag;
11001
11002 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11003 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
11004 cpu, eqhdl->irq, cpup->eq, cpup->flag);
11005}
11006
11007/**
11008 * lpfc_cpu_map_array_init - Initialize cpu_map structure
11009 * @phba: pointer to lpfc hba data structure.
11010 *
11011 * The routine initializes the cpu_map array structure
11012 */
11013static void
11014lpfc_cpu_map_array_init(struct lpfc_hba *phba)
11015{
11016 struct lpfc_vector_map_info *cpup;
11017 struct lpfc_eq_intr_info *eqi;
11018 int cpu;
11019
11020 for_each_possible_cpu(cpu) {
11021 cpup = &phba->sli4_hba.cpu_map[cpu];
11022 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
11023 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
11024 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
11025 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
11026 cpup->flag = 0;
11027 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
11028 INIT_LIST_HEAD(&eqi->list);
11029 eqi->icnt = 0;
11030 }
11031}
11032
11033/**
11034 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
11035 * @phba: pointer to lpfc hba data structure.
11036 *
11037 * The routine initializes the hba_eq_hdl array structure
11038 */
11039static void
11040lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
11041{
11042 struct lpfc_hba_eq_hdl *eqhdl;
11043 int i;
11044
11045 for (i = 0; i < phba->cfg_irq_chann; i++) {
11046 eqhdl = lpfc_get_eq_hdl(i);
11047 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
11048 eqhdl->phba = phba;
11049 }
11050}
11051
7bb03bbf 11052/**
895427bd 11053 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
7bb03bbf 11054 * @phba: pointer to lpfc hba data structure.
895427bd
JS
11055 * @vectors: number of msix vectors allocated.
11056 *
11057 * The routine will figure out the CPU affinity assignment for every
6a828b0f 11058 * MSI-X vector allocated for the HBA.
895427bd
JS
11059 * In addition, the CPU to IO channel mapping will be calculated
11060 * and the phba->sli4_hba.cpu_map array will reflect this.
7bb03bbf 11061 */
895427bd
JS
11062static void
11063lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
7bb03bbf 11064{
3ad348d9 11065 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
6a828b0f
JS
11066 int max_phys_id, min_phys_id;
11067 int max_core_id, min_core_id;
7bb03bbf 11068 struct lpfc_vector_map_info *cpup;
d9954a2d 11069 struct lpfc_vector_map_info *new_cpup;
7bb03bbf
JS
11070#ifdef CONFIG_X86
11071 struct cpuinfo_x86 *cpuinfo;
11072#endif
840eda96
JS
11073#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11074 struct lpfc_hdwq_stat *c_stat;
11075#endif
7bb03bbf 11076
6a828b0f 11077 max_phys_id = 0;
d9954a2d 11078 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
6a828b0f 11079 max_core_id = 0;
d9954a2d 11080 min_core_id = LPFC_VECTOR_MAP_EMPTY;
7bb03bbf
JS
11081
11082 /* Update CPU map with physical id and core id of each CPU */
222e9239
JS
11083 for_each_present_cpu(cpu) {
11084 cpup = &phba->sli4_hba.cpu_map[cpu];
7bb03bbf
JS
11085#ifdef CONFIG_X86
11086 cpuinfo = &cpu_data(cpu);
11087 cpup->phys_id = cpuinfo->phys_proc_id;
11088 cpup->core_id = cpuinfo->cpu_core_id;
d9954a2d
JS
11089 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
11090 cpup->flag |= LPFC_CPU_MAP_HYPER;
7bb03bbf
JS
11091#else
11092 /* No distinction between CPUs for other platforms */
11093 cpup->phys_id = 0;
6a828b0f 11094 cpup->core_id = cpu;
7bb03bbf 11095#endif
6a828b0f 11096
b3295c2a 11097 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3ad348d9
JS
11098 "3328 CPU %d physid %d coreid %d flag x%x\n",
11099 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
6a828b0f
JS
11100
11101 if (cpup->phys_id > max_phys_id)
11102 max_phys_id = cpup->phys_id;
11103 if (cpup->phys_id < min_phys_id)
11104 min_phys_id = cpup->phys_id;
11105
11106 if (cpup->core_id > max_core_id)
11107 max_core_id = cpup->core_id;
11108 if (cpup->core_id < min_core_id)
11109 min_core_id = cpup->core_id;
7bb03bbf 11110 }
7bb03bbf 11111
d9954a2d
JS
11112 /* After looking at each irq vector assigned to this pcidev, its
11113 * possible to see that not ALL CPUs have been accounted for.
657add4e
JS
11114 * Next we will set any unassigned (unaffinitized) cpu map
11115 * entries to a IRQ on the same phys_id.
d9954a2d
JS
11116 */
11117 first_cpu = cpumask_first(cpu_present_mask);
11118 start_cpu = first_cpu;
11119
11120 for_each_present_cpu(cpu) {
11121 cpup = &phba->sli4_hba.cpu_map[cpu];
11122
11123 /* Is this CPU entry unassigned */
11124 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11125 /* Mark CPU as IRQ not assigned by the kernel */
11126 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11127
657add4e 11128 /* If so, find a new_cpup thats on the the SAME
d9954a2d
JS
11129 * phys_id as cpup. start_cpu will start where we
11130 * left off so all unassigned entries don't get assgined
11131 * the IRQ of the first entry.
11132 */
11133 new_cpu = start_cpu;
11134 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11135 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11136 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
dcaa2136 11137 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
d9954a2d
JS
11138 (new_cpup->phys_id == cpup->phys_id))
11139 goto found_same;
11140 new_cpu = cpumask_next(
11141 new_cpu, cpu_present_mask);
11142 if (new_cpu == nr_cpumask_bits)
11143 new_cpu = first_cpu;
11144 }
11145 /* At this point, we leave the CPU as unassigned */
11146 continue;
11147found_same:
11148 /* We found a matching phys_id, so copy the IRQ info */
11149 cpup->eq = new_cpup->eq;
d9954a2d
JS
11150
11151 /* Bump start_cpu to the next slot to minmize the
11152 * chance of having multiple unassigned CPU entries
11153 * selecting the same IRQ.
11154 */
11155 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11156 if (start_cpu == nr_cpumask_bits)
11157 start_cpu = first_cpu;
11158
657add4e 11159 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
d9954a2d 11160 "3337 Set Affinity: CPU %d "
dcaa2136 11161 "eq %d from peer cpu %d same "
d9954a2d 11162 "phys_id (%d)\n",
dcaa2136
JS
11163 cpu, cpup->eq, new_cpu,
11164 cpup->phys_id);
d9954a2d
JS
11165 }
11166 }
11167
11168 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
11169 start_cpu = first_cpu;
11170
11171 for_each_present_cpu(cpu) {
11172 cpup = &phba->sli4_hba.cpu_map[cpu];
11173
11174 /* Is this entry unassigned */
11175 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11176 /* Mark it as IRQ not assigned by the kernel */
11177 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11178
657add4e 11179 /* If so, find a new_cpup thats on ANY phys_id
d9954a2d
JS
11180 * as the cpup. start_cpu will start where we
11181 * left off so all unassigned entries don't get
11182 * assigned the IRQ of the first entry.
11183 */
11184 new_cpu = start_cpu;
11185 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11186 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11187 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
dcaa2136 11188 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
d9954a2d
JS
11189 goto found_any;
11190 new_cpu = cpumask_next(
11191 new_cpu, cpu_present_mask);
11192 if (new_cpu == nr_cpumask_bits)
11193 new_cpu = first_cpu;
11194 }
11195 /* We should never leave an entry unassigned */
11196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11197 "3339 Set Affinity: CPU %d "
dcaa2136
JS
11198 "eq %d UNASSIGNED\n",
11199 cpup->hdwq, cpup->eq);
d9954a2d
JS
11200 continue;
11201found_any:
11202 /* We found an available entry, copy the IRQ info */
11203 cpup->eq = new_cpup->eq;
d9954a2d
JS
11204
11205 /* Bump start_cpu to the next slot to minmize the
11206 * chance of having multiple unassigned CPU entries
11207 * selecting the same IRQ.
11208 */
11209 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11210 if (start_cpu == nr_cpumask_bits)
11211 start_cpu = first_cpu;
11212
657add4e 11213 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
d9954a2d 11214 "3338 Set Affinity: CPU %d "
dcaa2136
JS
11215 "eq %d from peer cpu %d (%d/%d)\n",
11216 cpu, cpup->eq, new_cpu,
d9954a2d
JS
11217 new_cpup->phys_id, new_cpup->core_id);
11218 }
11219 }
657add4e 11220
3ad348d9
JS
11221 /* Assign hdwq indices that are unique across all cpus in the map
11222 * that are also FIRST_CPUs.
11223 */
11224 idx = 0;
11225 for_each_present_cpu(cpu) {
11226 cpup = &phba->sli4_hba.cpu_map[cpu];
11227
11228 /* Only FIRST IRQs get a hdwq index assignment. */
11229 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11230 continue;
11231
11232 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
11233 cpup->hdwq = idx;
11234 idx++;
bc2736e9 11235 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3ad348d9 11236 "3333 Set Affinity: CPU %d (phys %d core %d): "
dcaa2136 11237 "hdwq %d eq %d flg x%x\n",
3ad348d9 11238 cpu, cpup->phys_id, cpup->core_id,
dcaa2136 11239 cpup->hdwq, cpup->eq, cpup->flag);
3ad348d9 11240 }
bc227dde 11241 /* Associate a hdwq with each cpu_map entry
657add4e
JS
11242 * This will be 1 to 1 - hdwq to cpu, unless there are less
11243 * hardware queues then CPUs. For that case we will just round-robin
11244 * the available hardware queues as they get assigned to CPUs.
3ad348d9
JS
11245 * The next_idx is the idx from the FIRST_CPU loop above to account
11246 * for irq_chann < hdwq. The idx is used for round-robin assignments
11247 * and needs to start at 0.
657add4e 11248 */
3ad348d9 11249 next_idx = idx;
657add4e 11250 start_cpu = 0;
3ad348d9 11251 idx = 0;
657add4e
JS
11252 for_each_present_cpu(cpu) {
11253 cpup = &phba->sli4_hba.cpu_map[cpu];
657add4e 11254
3ad348d9
JS
11255 /* FIRST cpus are already mapped. */
11256 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11257 continue;
11258
11259 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
11260 * of the unassigned cpus to the next idx so that all
11261 * hdw queues are fully utilized.
11262 */
11263 if (next_idx < phba->cfg_hdw_queue) {
11264 cpup->hdwq = next_idx;
11265 next_idx++;
11266 continue;
11267 }
11268
11269 /* Not a First CPU and all hdw_queues are used. Reuse a
11270 * Hardware Queue for another CPU, so be smart about it
11271 * and pick one that has its IRQ/EQ mapped to the same phys_id
11272 * (CPU package) and core_id.
11273 */
11274 new_cpu = start_cpu;
11275 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11276 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11277 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11278 new_cpup->phys_id == cpup->phys_id &&
11279 new_cpup->core_id == cpup->core_id) {
11280 goto found_hdwq;
657add4e 11281 }
3ad348d9
JS
11282 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11283 if (new_cpu == nr_cpumask_bits)
11284 new_cpu = first_cpu;
11285 }
657add4e 11286
3ad348d9
JS
11287 /* If we can't match both phys_id and core_id,
11288 * settle for just a phys_id match.
11289 */
11290 new_cpu = start_cpu;
11291 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11292 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11293 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11294 new_cpup->phys_id == cpup->phys_id)
11295 goto found_hdwq;
11296
11297 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11298 if (new_cpu == nr_cpumask_bits)
11299 new_cpu = first_cpu;
657add4e 11300 }
3ad348d9
JS
11301
11302 /* Otherwise just round robin on cfg_hdw_queue */
11303 cpup->hdwq = idx % phba->cfg_hdw_queue;
11304 idx++;
11305 goto logit;
11306 found_hdwq:
11307 /* We found an available entry, copy the IRQ info */
11308 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11309 if (start_cpu == nr_cpumask_bits)
11310 start_cpu = first_cpu;
11311 cpup->hdwq = new_cpup->hdwq;
11312 logit:
bc2736e9 11313 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
657add4e 11314 "3335 Set Affinity: CPU %d (phys %d core %d): "
dcaa2136 11315 "hdwq %d eq %d flg x%x\n",
657add4e 11316 cpu, cpup->phys_id, cpup->core_id,
dcaa2136 11317 cpup->hdwq, cpup->eq, cpup->flag);
657add4e
JS
11318 }
11319
bc227dde
JS
11320 /*
11321 * Initialize the cpu_map slots for not-present cpus in case
11322 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
11323 */
11324 idx = 0;
11325 for_each_possible_cpu(cpu) {
11326 cpup = &phba->sli4_hba.cpu_map[cpu];
840eda96
JS
11327#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11328 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11329 c_stat->hdwq_no = cpup->hdwq;
11330#endif
bc227dde
JS
11331 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11332 continue;
11333
11334 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
840eda96
JS
11335#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11336 c_stat->hdwq_no = cpup->hdwq;
11337#endif
bc227dde
JS
11338 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11339 "3340 Set Affinity: not present "
11340 "CPU %d hdwq %d\n",
11341 cpu, cpup->hdwq);
657add4e
JS
11342 }
11343
11344 /* The cpu_map array will be used later during initialization
11345 * when EQ / CQ / WQs are allocated and configured.
11346 */
b3295c2a 11347 return;
7bb03bbf 11348}
7bb03bbf 11349
93a4d6f4
JS
11350/**
11351 * lpfc_cpuhp_get_eq
11352 *
11353 * @phba: pointer to lpfc hba data structure.
11354 * @cpu: cpu going offline
fe614acd 11355 * @eqlist: eq list to append to
93a4d6f4 11356 */
a99c8074 11357static int
93a4d6f4
JS
11358lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11359 struct list_head *eqlist)
11360{
93a4d6f4
JS
11361 const struct cpumask *maskp;
11362 struct lpfc_queue *eq;
a99c8074 11363 struct cpumask *tmp;
93a4d6f4
JS
11364 u16 idx;
11365
a99c8074
JS
11366 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11367 if (!tmp)
11368 return -ENOMEM;
11369
93a4d6f4
JS
11370 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11371 maskp = pci_irq_get_affinity(phba->pcidev, idx);
11372 if (!maskp)
11373 continue;
11374 /*
11375 * if irq is not affinitized to the cpu going
11376 * then we don't need to poll the eq attached
11377 * to it.
11378 */
a99c8074 11379 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
93a4d6f4
JS
11380 continue;
11381 /* get the cpus that are online and are affini-
11382 * tized to this irq vector. If the count is
11383 * more than 1 then cpuhp is not going to shut-
11384 * down this vector. Since this cpu has not
11385 * gone offline yet, we need >1.
11386 */
a99c8074
JS
11387 cpumask_and(tmp, maskp, cpu_online_mask);
11388 if (cpumask_weight(tmp) > 1)
93a4d6f4
JS
11389 continue;
11390
11391 /* Now that we have an irq to shutdown, get the eq
11392 * mapped to this irq. Note: multiple hdwq's in
11393 * the software can share an eq, but eventually
11394 * only eq will be mapped to this vector
11395 */
dcaa2136
JS
11396 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11397 list_add(&eq->_poll_list, eqlist);
93a4d6f4 11398 }
a99c8074
JS
11399 kfree(tmp);
11400 return 0;
93a4d6f4
JS
11401}
11402
11403static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11404{
11405 if (phba->sli_rev != LPFC_SLI_REV4)
11406 return;
11407
11408 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11409 &phba->cpuhp);
11410 /*
11411 * unregistering the instance doesn't stop the polling
11412 * timer. Wait for the poll timer to retire.
11413 */
11414 synchronize_rcu();
11415 del_timer_sync(&phba->cpuhp_poll_timer);
11416}
11417
11418static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11419{
11420 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11421 return;
11422
11423 __lpfc_cpuhp_remove(phba);
11424}
11425
11426static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11427{
11428 if (phba->sli_rev != LPFC_SLI_REV4)
11429 return;
11430
11431 rcu_read_lock();
11432
f861f596 11433 if (!list_empty(&phba->poll_list))
93a4d6f4
JS
11434 mod_timer(&phba->cpuhp_poll_timer,
11435 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
93a4d6f4
JS
11436
11437 rcu_read_unlock();
11438
11439 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11440 &phba->cpuhp);
11441}
11442
11443static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11444{
11445 if (phba->pport->load_flag & FC_UNLOADING) {
11446 *retval = -EAGAIN;
11447 return true;
11448 }
11449
11450 if (phba->sli_rev != LPFC_SLI_REV4) {
11451 *retval = 0;
11452 return true;
11453 }
11454
11455 /* proceed with the hotplug */
11456 return false;
11457}
11458
dcaa2136
JS
11459/**
11460 * lpfc_irq_set_aff - set IRQ affinity
11461 * @eqhdl: EQ handle
11462 * @cpu: cpu to set affinity
11463 *
11464 **/
11465static inline void
11466lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11467{
11468 cpumask_clear(&eqhdl->aff_mask);
11469 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11470 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11471 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11472}
11473
11474/**
11475 * lpfc_irq_clear_aff - clear IRQ affinity
11476 * @eqhdl: EQ handle
11477 *
11478 **/
11479static inline void
11480lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11481{
11482 cpumask_clear(&eqhdl->aff_mask);
11483 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
dcaa2136
JS
11484}
11485
11486/**
11487 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
11488 * @phba: pointer to HBA context object.
11489 * @cpu: cpu going offline/online
11490 * @offline: true, cpu is going offline. false, cpu is coming online.
11491 *
11492 * If cpu is going offline, we'll try our best effort to find the next
3048e3e8
DK
11493 * online cpu on the phba's original_mask and migrate all offlining IRQ
11494 * affinities.
dcaa2136 11495 *
3048e3e8 11496 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
dcaa2136 11497 *
3048e3e8 11498 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
dcaa2136
JS
11499 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
11500 *
11501 **/
11502static void
11503lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11504{
11505 struct lpfc_vector_map_info *cpup;
11506 struct cpumask *aff_mask;
11507 unsigned int cpu_select, cpu_next, idx;
3048e3e8 11508 const struct cpumask *orig_mask;
dcaa2136 11509
3048e3e8 11510 if (phba->irq_chann_mode == NORMAL_MODE)
dcaa2136
JS
11511 return;
11512
3048e3e8 11513 orig_mask = &phba->sli4_hba.irq_aff_mask;
dcaa2136 11514
3048e3e8 11515 if (!cpumask_test_cpu(cpu, orig_mask))
dcaa2136
JS
11516 return;
11517
11518 cpup = &phba->sli4_hba.cpu_map[cpu];
11519
11520 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11521 return;
11522
11523 if (offline) {
3048e3e8
DK
11524 /* Find next online CPU on original mask */
11525 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
11526 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
dcaa2136
JS
11527
11528 /* Found a valid CPU */
11529 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11530 /* Go through each eqhdl and ensure offlining
11531 * cpu aff_mask is migrated
11532 */
11533 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11534 aff_mask = lpfc_get_aff_mask(idx);
11535
11536 /* Migrate affinity */
11537 if (cpumask_test_cpu(cpu, aff_mask))
11538 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11539 cpu_select);
11540 }
11541 } else {
11542 /* Rely on irqbalance if no online CPUs left on NUMA */
11543 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11544 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11545 }
11546 } else {
11547 /* Migrate affinity back to this CPU */
11548 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
11549 }
11550}
11551
93a4d6f4
JS
11552static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11553{
11554 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11555 struct lpfc_queue *eq, *next;
11556 LIST_HEAD(eqlist);
11557 int retval;
11558
11559 if (!phba) {
11560 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11561 return 0;
11562 }
11563
11564 if (__lpfc_cpuhp_checks(phba, &retval))
11565 return retval;
11566
dcaa2136
JS
11567 lpfc_irq_rebalance(phba, cpu, true);
11568
a99c8074
JS
11569 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11570 if (retval)
11571 return retval;
93a4d6f4
JS
11572
11573 /* start polling on these eq's */
11574 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11575 list_del_init(&eq->_poll_list);
11576 lpfc_sli4_start_polling(eq);
11577 }
11578
11579 return 0;
11580}
11581
11582static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11583{
11584 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11585 struct lpfc_queue *eq, *next;
11586 unsigned int n;
11587 int retval;
11588
11589 if (!phba) {
11590 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11591 return 0;
11592 }
11593
11594 if (__lpfc_cpuhp_checks(phba, &retval))
11595 return retval;
11596
dcaa2136
JS
11597 lpfc_irq_rebalance(phba, cpu, false);
11598
93a4d6f4
JS
11599 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11600 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11601 if (n == cpu)
11602 lpfc_sli4_stop_polling(eq);
11603 }
11604
11605 return 0;
11606}
11607
da0436e9
JS
11608/**
11609 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
11610 * @phba: pointer to lpfc hba data structure.
11611 *
11612 * This routine is invoked to enable the MSI-X interrupt vectors to device
dcaa2136
JS
11613 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
11614 * to cpus on the system.
11615 *
11616 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
11617 * the number of cpus on the same numa node as this adapter. The vectors are
11618 * allocated without requesting OS affinity mapping. A vector will be
11619 * allocated and assigned to each online and offline cpu. If the cpu is
11620 * online, then affinity will be set to that cpu. If the cpu is offline, then
11621 * affinity will be set to the nearest peer cpu within the numa node that is
11622 * online. If there are no online cpus within the numa node, affinity is not
11623 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
11624 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
11625 * configured.
11626 *
11627 * If numa mode is not enabled and there is more than 1 vector allocated, then
11628 * the driver relies on the managed irq interface where the OS assigns vector to
11629 * cpu affinity. The driver will then use that affinity mapping to setup its
11630 * cpu mapping table.
da0436e9
JS
11631 *
11632 * Return codes
af901ca1 11633 * 0 - successful
da0436e9
JS
11634 * other values - error
11635 **/
11636static int
11637lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11638{
75baf696 11639 int vectors, rc, index;
b83d005e 11640 char *name;
3048e3e8 11641 const struct cpumask *aff_mask = NULL;
dcaa2136 11642 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
17105d95 11643 struct lpfc_vector_map_info *cpup;
dcaa2136
JS
11644 struct lpfc_hba_eq_hdl *eqhdl;
11645 const struct cpumask *maskp;
dcaa2136 11646 unsigned int flags = PCI_IRQ_MSIX;
da0436e9
JS
11647
11648 /* Set up MSI-X multi-message vectors */
6a828b0f 11649 vectors = phba->cfg_irq_chann;
45ffac19 11650
3048e3e8
DK
11651 if (phba->irq_chann_mode != NORMAL_MODE)
11652 aff_mask = &phba->sli4_hba.irq_aff_mask;
11653
11654 if (aff_mask) {
11655 cpu_cnt = cpumask_weight(aff_mask);
dcaa2136
JS
11656 vectors = min(phba->cfg_irq_chann, cpu_cnt);
11657
3048e3e8
DK
11658 /* cpu: iterates over aff_mask including offline or online
11659 * cpu_select: iterates over online aff_mask to set affinity
dcaa2136 11660 */
3048e3e8
DK
11661 cpu = cpumask_first(aff_mask);
11662 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
dcaa2136
JS
11663 } else {
11664 flags |= PCI_IRQ_AFFINITY;
11665 }
11666
11667 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
4f871e1b 11668 if (rc < 0) {
da0436e9
JS
11669 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11670 "0484 PCI enable MSI-X failed (%d)\n", rc);
029165ac 11671 goto vec_fail_out;
da0436e9 11672 }
4f871e1b 11673 vectors = rc;
75baf696 11674
7bb03bbf 11675 /* Assign MSI-X vectors to interrupt handlers */
67d12733 11676 for (index = 0; index < vectors; index++) {
dcaa2136
JS
11677 eqhdl = lpfc_get_eq_hdl(index);
11678 name = eqhdl->handler_name;
b83d005e
JS
11679 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11680 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
4305f183 11681 LPFC_DRIVER_HANDLER_NAME"%d", index);
da0436e9 11682
dcaa2136 11683 eqhdl->idx = index;
7370d10a
JS
11684 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11685 &lpfc_sli4_hba_intr_handler, 0,
dcaa2136 11686 name, eqhdl);
da0436e9
JS
11687 if (rc) {
11688 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11689 "0486 MSI-X fast-path (%d) "
11690 "request_irq failed (%d)\n", index, rc);
11691 goto cfg_fail_out;
11692 }
dcaa2136
JS
11693
11694 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11695
3048e3e8 11696 if (aff_mask) {
dcaa2136
JS
11697 /* If found a neighboring online cpu, set affinity */
11698 if (cpu_select < nr_cpu_ids)
11699 lpfc_irq_set_aff(eqhdl, cpu_select);
11700
11701 /* Assign EQ to cpu_map */
11702 lpfc_assign_eq_map_info(phba, index,
11703 LPFC_CPU_FIRST_IRQ,
11704 cpu);
11705
3048e3e8
DK
11706 /* Iterate to next offline or online cpu in aff_mask */
11707 cpu = cpumask_next(cpu, aff_mask);
dcaa2136 11708
3048e3e8
DK
11709 /* Find next online cpu in aff_mask to set affinity */
11710 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
dcaa2136
JS
11711 } else if (vectors == 1) {
11712 cpu = cpumask_first(cpu_present_mask);
11713 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11714 cpu);
11715 } else {
11716 maskp = pci_irq_get_affinity(phba->pcidev, index);
11717
dcaa2136
JS
11718 /* Loop through all CPUs associated with vector index */
11719 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
17105d95
DK
11720 cpup = &phba->sli4_hba.cpu_map[cpu];
11721
dcaa2136
JS
11722 /* If this is the first CPU thats assigned to
11723 * this vector, set LPFC_CPU_FIRST_IRQ.
17105d95
DK
11724 *
11725 * With certain platforms its possible that irq
11726 * vectors are affinitized to all the cpu's.
11727 * This can result in each cpu_map.eq to be set
11728 * to the last vector, resulting in overwrite
11729 * of all the previous cpu_map.eq. Ensure that
11730 * each vector receives a place in cpu_map.
11731 * Later call to lpfc_cpu_affinity_check will
11732 * ensure we are nicely balanced out.
dcaa2136 11733 */
17105d95
DK
11734 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
11735 continue;
dcaa2136 11736 lpfc_assign_eq_map_info(phba, index,
17105d95 11737 LPFC_CPU_FIRST_IRQ,
dcaa2136 11738 cpu);
17105d95 11739 break;
dcaa2136
JS
11740 }
11741 }
da0436e9
JS
11742 }
11743
6a828b0f 11744 if (vectors != phba->cfg_irq_chann) {
372c187b 11745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
82c3e9ba
JS
11746 "3238 Reducing IO channels to match number of "
11747 "MSI-X vectors, requested %d got %d\n",
6a828b0f
JS
11748 phba->cfg_irq_chann, vectors);
11749 if (phba->cfg_irq_chann > vectors)
11750 phba->cfg_irq_chann = vectors;
82c3e9ba 11751 }
7bb03bbf 11752
da0436e9
JS
11753 return rc;
11754
11755cfg_fail_out:
11756 /* free the irq already requested */
dcaa2136
JS
11757 for (--index; index >= 0; index--) {
11758 eqhdl = lpfc_get_eq_hdl(index);
11759 lpfc_irq_clear_aff(eqhdl);
11760 irq_set_affinity_hint(eqhdl->irq, NULL);
11761 free_irq(eqhdl->irq, eqhdl);
11762 }
da0436e9 11763
da0436e9 11764 /* Unconfigure MSI-X capability structure */
45ffac19 11765 pci_free_irq_vectors(phba->pcidev);
029165ac
AG
11766
11767vec_fail_out:
da0436e9
JS
11768 return rc;
11769}
11770
da0436e9
JS
11771/**
11772 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
11773 * @phba: pointer to lpfc hba data structure.
11774 *
11775 * This routine is invoked to enable the MSI interrupt mode to device with
07b1b914
JS
11776 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
11777 * called to enable the MSI vector. The device driver is responsible for
11778 * calling the request_irq() to register MSI vector with a interrupt the
11779 * handler, which is done in this function.
da0436e9
JS
11780 *
11781 * Return codes
af901ca1 11782 * 0 - successful
da0436e9
JS
11783 * other values - error
11784 **/
11785static int
11786lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11787{
11788 int rc, index;
dcaa2136
JS
11789 unsigned int cpu;
11790 struct lpfc_hba_eq_hdl *eqhdl;
da0436e9 11791
07b1b914
JS
11792 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11793 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11794 if (rc > 0)
da0436e9
JS
11795 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11796 "0487 PCI enable MSI mode success.\n");
11797 else {
11798 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11799 "0488 PCI enable MSI mode failed (%d)\n", rc);
07b1b914 11800 return rc ? rc : -1;
da0436e9
JS
11801 }
11802
11803 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
ed243d37 11804 0, LPFC_DRIVER_NAME, phba);
da0436e9 11805 if (rc) {
07b1b914 11806 pci_free_irq_vectors(phba->pcidev);
da0436e9
JS
11807 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11808 "0490 MSI request_irq failed (%d)\n", rc);
75baf696 11809 return rc;
da0436e9
JS
11810 }
11811
dcaa2136
JS
11812 eqhdl = lpfc_get_eq_hdl(0);
11813 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11814
11815 cpu = cpumask_first(cpu_present_mask);
11816 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11817
6a828b0f 11818 for (index = 0; index < phba->cfg_irq_chann; index++) {
dcaa2136
JS
11819 eqhdl = lpfc_get_eq_hdl(index);
11820 eqhdl->idx = index;
da0436e9
JS
11821 }
11822
75baf696 11823 return 0;
da0436e9
JS
11824}
11825
da0436e9
JS
11826/**
11827 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
11828 * @phba: pointer to lpfc hba data structure.
fe614acd 11829 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
da0436e9
JS
11830 *
11831 * This routine is invoked to enable device interrupt and associate driver's
11832 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
11833 * interface spec. Depends on the interrupt mode configured to the driver,
11834 * the driver will try to fallback from the configured interrupt mode to an
11835 * interrupt mode which is supported by the platform, kernel, and device in
11836 * the order of:
11837 * MSI-X -> MSI -> IRQ.
11838 *
11839 * Return codes
af901ca1 11840 * 0 - successful
da0436e9
JS
11841 * other values - error
11842 **/
11843static uint32_t
11844lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11845{
11846 uint32_t intr_mode = LPFC_INTR_ERROR;
895427bd 11847 int retval, idx;
da0436e9
JS
11848
11849 if (cfg_mode == 2) {
11850 /* Preparation before conf_msi mbox cmd */
11851 retval = 0;
11852 if (!retval) {
11853 /* Now, try to enable MSI-X interrupt mode */
11854 retval = lpfc_sli4_enable_msix(phba);
11855 if (!retval) {
11856 /* Indicate initialization to MSI-X mode */
11857 phba->intr_type = MSIX;
11858 intr_mode = 2;
11859 }
11860 }
11861 }
11862
11863 /* Fallback to MSI if MSI-X initialization failed */
11864 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11865 retval = lpfc_sli4_enable_msi(phba);
11866 if (!retval) {
11867 /* Indicate initialization to MSI mode */
11868 phba->intr_type = MSI;
11869 intr_mode = 1;
11870 }
11871 }
11872
11873 /* Fallback to INTx if both MSI-X/MSI initalization failed */
11874 if (phba->intr_type == NONE) {
11875 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11876 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11877 if (!retval) {
895427bd 11878 struct lpfc_hba_eq_hdl *eqhdl;
dcaa2136 11879 unsigned int cpu;
895427bd 11880
da0436e9
JS
11881 /* Indicate initialization to INTx mode */
11882 phba->intr_type = INTx;
11883 intr_mode = 0;
895427bd 11884
dcaa2136
JS
11885 eqhdl = lpfc_get_eq_hdl(0);
11886 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11887
11888 cpu = cpumask_first(cpu_present_mask);
11889 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11890 cpu);
6a828b0f 11891 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
dcaa2136 11892 eqhdl = lpfc_get_eq_hdl(idx);
895427bd 11893 eqhdl->idx = idx;
1ba981fd 11894 }
da0436e9
JS
11895 }
11896 }
11897 return intr_mode;
11898}
11899
11900/**
11901 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
11902 * @phba: pointer to lpfc hba data structure.
11903 *
11904 * This routine is invoked to disable device interrupt and disassociate
11905 * the driver's interrupt handler(s) from interrupt vector(s) to device
11906 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
11907 * will release the interrupt vector(s) for the message signaled interrupt.
11908 **/
11909static void
11910lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11911{
11912 /* Disable the currently initialized interrupt mode */
45ffac19
CH
11913 if (phba->intr_type == MSIX) {
11914 int index;
dcaa2136 11915 struct lpfc_hba_eq_hdl *eqhdl;
45ffac19
CH
11916
11917 /* Free up MSI-X multi-message vectors */
6a828b0f 11918 for (index = 0; index < phba->cfg_irq_chann; index++) {
dcaa2136
JS
11919 eqhdl = lpfc_get_eq_hdl(index);
11920 lpfc_irq_clear_aff(eqhdl);
11921 irq_set_affinity_hint(eqhdl->irq, NULL);
11922 free_irq(eqhdl->irq, eqhdl);
b3295c2a 11923 }
45ffac19 11924 } else {
da0436e9 11925 free_irq(phba->pcidev->irq, phba);
45ffac19
CH
11926 }
11927
11928 pci_free_irq_vectors(phba->pcidev);
da0436e9
JS
11929
11930 /* Reset interrupt management states */
11931 phba->intr_type = NONE;
11932 phba->sli.slistat.sli_intr = 0;
da0436e9
JS
11933}
11934
11935/**
11936 * lpfc_unset_hba - Unset SLI3 hba device initialization
11937 * @phba: pointer to lpfc hba data structure.
11938 *
11939 * This routine is invoked to unset the HBA device initialization steps to
11940 * a device with SLI-3 interface spec.
11941 **/
11942static void
11943lpfc_unset_hba(struct lpfc_hba *phba)
11944{
11945 struct lpfc_vport *vport = phba->pport;
11946 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11947
11948 spin_lock_irq(shost->host_lock);
11949 vport->load_flag |= FC_UNLOADING;
11950 spin_unlock_irq(shost->host_lock);
11951
72859909
JS
11952 kfree(phba->vpi_bmask);
11953 kfree(phba->vpi_ids);
11954
da0436e9
JS
11955 lpfc_stop_hba_timers(phba);
11956
11957 phba->pport->work_port_events = 0;
11958
11959 lpfc_sli_hba_down(phba);
11960
11961 lpfc_sli_brdrestart(phba);
11962
11963 lpfc_sli_disable_intr(phba);
11964
11965 return;
11966}
11967
5af5eee7
JS
11968/**
11969 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
11970 * @phba: Pointer to HBA context object.
11971 *
11972 * This function is called in the SLI4 code path to wait for completion
11973 * of device's XRIs exchange busy. It will check the XRI exchange busy
11974 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
11975 * that, it will check the XRI exchange busy on outstanding FCP and ELS
11976 * I/Os every 30 seconds, log error message, and wait forever. Only when
11977 * all XRI exchange busy complete, the driver unload shall proceed with
11978 * invoking the function reset ioctl mailbox command to the CNA and the
11979 * the rest of the driver unload resource release.
11980 **/
11981static void
11982lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11983{
5e5b511d 11984 struct lpfc_sli4_hdw_queue *qp;
c00f62e6 11985 int idx, ccnt;
5af5eee7 11986 int wait_time = 0;
5e5b511d 11987 int io_xri_cmpl = 1;
86c67379 11988 int nvmet_xri_cmpl = 1;
5af5eee7
JS
11989 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11990
c3725bdc
JS
11991 /* Driver just aborted IOs during the hba_unset process. Pause
11992 * here to give the HBA time to complete the IO and get entries
11993 * into the abts lists.
11994 */
11995 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11996
11997 /* Wait for NVME pending IO to flush back to transport. */
11998 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11999 lpfc_nvme_wait_for_io_drain(phba);
12000
5e5b511d 12001 ccnt = 0;
5e5b511d
JS
12002 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
12003 qp = &phba->sli4_hba.hdwq[idx];
c00f62e6
JS
12004 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
12005 if (!io_xri_cmpl) /* if list is NOT empty */
12006 ccnt++;
5e5b511d
JS
12007 }
12008 if (ccnt)
12009 io_xri_cmpl = 0;
5e5b511d 12010
86c67379 12011 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
86c67379
JS
12012 nvmet_xri_cmpl =
12013 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
12014 }
895427bd 12015
c00f62e6 12016 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
5af5eee7 12017 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
68c9b55d 12018 if (!nvmet_xri_cmpl)
372c187b 12019 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
68c9b55d
JS
12020 "6424 NVMET XRI exchange busy "
12021 "wait time: %d seconds.\n",
12022 wait_time/1000);
5e5b511d 12023 if (!io_xri_cmpl)
372c187b 12024 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
c00f62e6 12025 "6100 IO XRI exchange busy "
5af5eee7
JS
12026 "wait time: %d seconds.\n",
12027 wait_time/1000);
12028 if (!els_xri_cmpl)
372c187b 12029 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5af5eee7
JS
12030 "2878 ELS XRI exchange busy "
12031 "wait time: %d seconds.\n",
12032 wait_time/1000);
12033 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
12034 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
12035 } else {
12036 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
12037 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
12038 }
5e5b511d
JS
12039
12040 ccnt = 0;
5e5b511d
JS
12041 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
12042 qp = &phba->sli4_hba.hdwq[idx];
c00f62e6
JS
12043 io_xri_cmpl = list_empty(
12044 &qp->lpfc_abts_io_buf_list);
12045 if (!io_xri_cmpl) /* if list is NOT empty */
12046 ccnt++;
5e5b511d
JS
12047 }
12048 if (ccnt)
12049 io_xri_cmpl = 0;
5e5b511d 12050
86c67379 12051 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
86c67379
JS
12052 nvmet_xri_cmpl = list_empty(
12053 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
12054 }
5af5eee7
JS
12055 els_xri_cmpl =
12056 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
f358dd0c 12057
5af5eee7
JS
12058 }
12059}
12060
da0436e9
JS
12061/**
12062 * lpfc_sli4_hba_unset - Unset the fcoe hba
12063 * @phba: Pointer to HBA context object.
12064 *
12065 * This function is called in the SLI4 code path to reset the HBA's FCoE
12066 * function. The caller is not required to hold any lock. This routine
12067 * issues PCI function reset mailbox command to reset the FCoE function.
12068 * At the end of the function, it calls lpfc_hba_down_post function to
12069 * free any pending commands.
12070 **/
12071static void
12072lpfc_sli4_hba_unset(struct lpfc_hba *phba)
12073{
12074 int wait_cnt = 0;
12075 LPFC_MBOXQ_t *mboxq;
912e3acd 12076 struct pci_dev *pdev = phba->pcidev;
da0436e9
JS
12077
12078 lpfc_stop_hba_timers(phba);
cdb42bec
JS
12079 if (phba->pport)
12080 phba->sli4_hba.intr_enable = 0;
da0436e9
JS
12081
12082 /*
12083 * Gracefully wait out the potential current outstanding asynchronous
12084 * mailbox command.
12085 */
12086
12087 /* First, block any pending async mailbox command from posted */
12088 spin_lock_irq(&phba->hbalock);
12089 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12090 spin_unlock_irq(&phba->hbalock);
12091 /* Now, trying to wait it out if we can */
12092 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
12093 msleep(10);
12094 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
12095 break;
12096 }
12097 /* Forcefully release the outstanding mailbox command if timed out */
12098 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
12099 spin_lock_irq(&phba->hbalock);
12100 mboxq = phba->sli.mbox_active;
12101 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
12102 __lpfc_mbox_cmpl_put(phba, mboxq);
12103 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12104 phba->sli.mbox_active = NULL;
12105 spin_unlock_irq(&phba->hbalock);
12106 }
12107
5af5eee7
JS
12108 /* Abort all iocbs associated with the hba */
12109 lpfc_sli_hba_iocb_abort(phba);
12110
12111 /* Wait for completion of device XRI exchange busy */
12112 lpfc_sli4_xri_exchange_busy_wait(phba);
12113
93a4d6f4 12114 /* per-phba callback de-registration for hotplug event */
46da547e
SP
12115 if (phba->pport)
12116 lpfc_cpuhp_remove(phba);
93a4d6f4 12117
da0436e9
JS
12118 /* Disable PCI subsystem interrupt */
12119 lpfc_sli4_disable_intr(phba);
12120
912e3acd
JS
12121 /* Disable SR-IOV if enabled */
12122 if (phba->cfg_sriov_nr_virtfn)
12123 pci_disable_sriov(pdev);
12124
da0436e9
JS
12125 /* Stop kthread signal shall trigger work_done one more time */
12126 kthread_stop(phba->worker_thread);
12127
d2cc9bcd 12128 /* Disable FW logging to host memory */
1165a5c2 12129 lpfc_ras_stop_fwlog(phba);
d2cc9bcd 12130
d1f525aa
JS
12131 /* Unset the queues shared with the hardware then release all
12132 * allocated resources.
12133 */
12134 lpfc_sli4_queue_unset(phba);
12135 lpfc_sli4_queue_destroy(phba);
12136
3677a3a7
JS
12137 /* Reset SLI4 HBA FCoE function */
12138 lpfc_pci_function_reset(phba);
12139
1165a5c2
JS
12140 /* Free RAS DMA memory */
12141 if (phba->ras_fwlog.ras_enabled)
12142 lpfc_sli4_ras_dma_free(phba);
12143
da0436e9 12144 /* Stop the SLI4 device port */
1ffdd2c0
JS
12145 if (phba->pport)
12146 phba->pport->work_port_events = 0;
da0436e9
JS
12147}
12148
fedd3b7b
JS
12149/**
12150 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
12151 * @phba: Pointer to HBA context object.
12152 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
12153 *
12154 * This function is called in the SLI4 code path to read the port's
12155 * sli4 capabilities.
12156 *
12157 * This function may be be called from any context that can block-wait
12158 * for the completion. The expectation is that this routine is called
12159 * typically from probe_one or from the online routine.
12160 **/
12161int
12162lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
12163{
12164 int rc;
12165 struct lpfc_mqe *mqe = &mboxq->u.mqe;
12166 struct lpfc_pc_sli4_params *sli4_params;
a183a15f 12167 uint32_t mbox_tmo;
fedd3b7b 12168 int length;
bf316c78 12169 bool exp_wqcq_pages = true;
fedd3b7b
JS
12170 struct lpfc_sli4_parameters *mbx_sli4_parameters;
12171
6d368e53
JS
12172 /*
12173 * By default, the driver assumes the SLI4 port requires RPI
12174 * header postings. The SLI4_PARAM response will correct this
12175 * assumption.
12176 */
12177 phba->sli4_hba.rpi_hdrs_in_use = 1;
12178
fedd3b7b
JS
12179 /* Read the port's SLI4 Config Parameters */
12180 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
12181 sizeof(struct lpfc_sli4_cfg_mhdr));
12182 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
12183 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
12184 length, LPFC_SLI4_MBX_EMBED);
12185 if (!phba->sli4_hba.intr_enable)
12186 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
a183a15f
JS
12187 else {
12188 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
12189 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
12190 }
fedd3b7b
JS
12191 if (unlikely(rc))
12192 return rc;
12193 sli4_params = &phba->sli4_hba.pc_sli4_params;
12194 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
12195 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
12196 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
12197 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
12198 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
12199 mbx_sli4_parameters);
12200 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
12201 mbx_sli4_parameters);
12202 if (bf_get(cfg_phwq, mbx_sli4_parameters))
12203 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
12204 else
12205 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
12206 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
b62232ba
JS
12207 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
12208 mbx_sli4_parameters);
1ba981fd 12209 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
fedd3b7b
JS
12210 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
12211 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
12212 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
12213 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
7365f6fd
JS
12214 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
12215 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
0c651878 12216 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
66e9e6bf 12217 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
83c6cb1a 12218 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
fedd3b7b
JS
12219 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
12220 mbx_sli4_parameters);
895427bd 12221 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
fedd3b7b
JS
12222 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
12223 mbx_sli4_parameters);
6d368e53
JS
12224 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
12225 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
c15e0704 12226
d79c9e9d
JS
12227 /* Check for Extended Pre-Registered SGL support */
12228 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
12229
c15e0704
JS
12230 /* Check for firmware nvme support */
12231 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12232 bf_get(cfg_xib, mbx_sli4_parameters));
12233
12234 if (rc) {
12235 /* Save this to indicate the Firmware supports NVME */
12236 sli4_params->nvme = 1;
12237
12238 /* Firmware NVME support, check driver FC4 NVME support */
12239 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12240 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12241 "6133 Disabling NVME support: "
12242 "FC4 type not supported: x%x\n",
12243 phba->cfg_enable_fc4_type);
12244 goto fcponly;
12245 }
12246 } else {
12247 /* No firmware NVME support, check driver FC4 NVME support */
12248 sli4_params->nvme = 0;
12249 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12250 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12251 "6101 Disabling NVME support: Not "
12252 "supported by firmware (%d %d) x%x\n",
12253 bf_get(cfg_nvme, mbx_sli4_parameters),
12254 bf_get(cfg_xib, mbx_sli4_parameters),
12255 phba->cfg_enable_fc4_type);
12256fcponly:
c15e0704
JS
12257 phba->nvmet_support = 0;
12258 phba->cfg_nvmet_mrq = 0;
6a224b47 12259 phba->cfg_nvme_seg_cnt = 0;
c15e0704
JS
12260
12261 /* If no FC4 type support, move to just SCSI support */
12262 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12263 return -ENODEV;
12264 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12265 }
895427bd 12266 }
0558056c 12267
c26c265b 12268 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
a5f7337f 12269 * accommodate 512K and 1M IOs in a single nvme buf.
c26c265b 12270 */
a5f7337f 12271 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
c26c265b 12272 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
c26c265b 12273
137ddf03
JS
12274 /* Enable embedded Payload BDE if support is indicated */
12275 if (bf_get(cfg_pbde, mbx_sli4_parameters))
12276 phba->cfg_enable_pbde = 1;
12277 else
414abe0a 12278 phba->cfg_enable_pbde = 0;
0bc2b7c5 12279
20aefac3
JS
12280 /*
12281 * To support Suppress Response feature we must satisfy 3 conditions.
12282 * lpfc_suppress_rsp module parameter must be set (default).
12283 * In SLI4-Parameters Descriptor:
12284 * Extended Inline Buffers (XIB) must be supported.
12285 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
12286 * (double negative).
12287 */
12288 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
12289 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
f358dd0c 12290 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
20aefac3
JS
12291 else
12292 phba->cfg_suppress_rsp = 0;
f358dd0c 12293
0cf07f84
JS
12294 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
12295 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
12296
0558056c
JS
12297 /* Make sure that sge_supp_len can be handled by the driver */
12298 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
12299 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
12300
b5c53958 12301 /*
c176ffa0
JS
12302 * Check whether the adapter supports an embedded copy of the
12303 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
12304 * to use this option, 128-byte WQEs must be used.
b5c53958
JS
12305 */
12306 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
12307 phba->fcp_embed_io = 1;
12308 else
12309 phba->fcp_embed_io = 0;
7bdedb34 12310
0bc2b7c5 12311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
414abe0a 12312 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
0bc2b7c5 12313 bf_get(cfg_xib, mbx_sli4_parameters),
414abe0a 12314 phba->cfg_enable_pbde,
ae463b60 12315 phba->fcp_embed_io, sli4_params->nvme,
4e565cf0 12316 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
0bc2b7c5 12317
bf316c78
JS
12318 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
12319 LPFC_SLI_INTF_IF_TYPE_2) &&
12320 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
c221768b 12321 LPFC_SLI_INTF_FAMILY_LNCR_A0))
bf316c78
JS
12322 exp_wqcq_pages = false;
12323
c176ffa0
JS
12324 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
12325 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
bf316c78 12326 exp_wqcq_pages &&
c176ffa0
JS
12327 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
12328 phba->enab_exp_wqcq_pages = 1;
12329 else
12330 phba->enab_exp_wqcq_pages = 0;
7bdedb34
JS
12331 /*
12332 * Check if the SLI port supports MDS Diagnostics
12333 */
12334 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
12335 phba->mds_diags_support = 1;
12336 else
12337 phba->mds_diags_support = 0;
d2cc9bcd 12338
0d8af096
JS
12339 /*
12340 * Check if the SLI port supports NSLER
12341 */
12342 if (bf_get(cfg_nsler, mbx_sli4_parameters))
12343 phba->nsler = 1;
12344 else
12345 phba->nsler = 0;
12346
8aaa7bcf
JS
12347 /* Save PB info for use during HBA setup */
12348 sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
12349 sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters);
12350 sli4_params->mib_size = mbx_sli4_parameters->mib_size;
12351 sli4_params->mi_value = LPFC_DFLT_MIB_VAL;
12352
12353 /* Next we check for Vendor MIB support */
12354 if (sli4_params->mi_ver && phba->cfg_enable_mi)
12355 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
12356
12357 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12358 "6461 MIB attr %d enable %d FDMI %d buf %d:%d\n",
12359 sli4_params->mi_ver, phba->cfg_enable_mi,
12360 sli4_params->mi_value, sli4_params->mib_bde_cnt,
12361 sli4_params->mib_size);
fedd3b7b
JS
12362 return 0;
12363}
12364
da0436e9
JS
12365/**
12366 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
12367 * @pdev: pointer to PCI device
12368 * @pid: pointer to PCI device identifier
12369 *
12370 * This routine is to be called to attach a device with SLI-3 interface spec
12371 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12372 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
12373 * information of the device and driver to see if the driver state that it can
12374 * support this kind of device. If the match is successful, the driver core
12375 * invokes this routine. If this routine determines it can claim the HBA, it
12376 * does all the initialization that it needs to do to handle the HBA properly.
12377 *
12378 * Return code
12379 * 0 - driver can claim the device
12380 * negative value - driver can not claim the device
12381 **/
6f039790 12382static int
da0436e9
JS
12383lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
12384{
12385 struct lpfc_hba *phba;
12386 struct lpfc_vport *vport = NULL;
6669f9bb 12387 struct Scsi_Host *shost = NULL;
da0436e9
JS
12388 int error;
12389 uint32_t cfg_mode, intr_mode;
12390
12391 /* Allocate memory for HBA structure */
12392 phba = lpfc_hba_alloc(pdev);
12393 if (!phba)
12394 return -ENOMEM;
12395
12396 /* Perform generic PCI device enabling operation */
12397 error = lpfc_enable_pci_dev(phba);
079b5c91 12398 if (error)
da0436e9 12399 goto out_free_phba;
da0436e9
JS
12400
12401 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
12402 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
12403 if (error)
12404 goto out_disable_pci_dev;
12405
12406 /* Set up SLI-3 specific device PCI memory space */
12407 error = lpfc_sli_pci_mem_setup(phba);
12408 if (error) {
12409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12410 "1402 Failed to set up pci memory space.\n");
12411 goto out_disable_pci_dev;
12412 }
12413
da0436e9
JS
12414 /* Set up SLI-3 specific device driver resources */
12415 error = lpfc_sli_driver_resource_setup(phba);
12416 if (error) {
12417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12418 "1404 Failed to set up driver resource.\n");
12419 goto out_unset_pci_mem_s3;
12420 }
12421
12422 /* Initialize and populate the iocb list per host */
d1f525aa 12423
da0436e9
JS
12424 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
12425 if (error) {
12426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12427 "1405 Failed to initialize iocb list.\n");
12428 goto out_unset_driver_resource_s3;
12429 }
12430
12431 /* Set up common device driver resources */
12432 error = lpfc_setup_driver_resource_phase2(phba);
12433 if (error) {
12434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12435 "1406 Failed to set up driver resource.\n");
12436 goto out_free_iocb_list;
12437 }
12438
079b5c91
JS
12439 /* Get the default values for Model Name and Description */
12440 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12441
da0436e9
JS
12442 /* Create SCSI host to the physical port */
12443 error = lpfc_create_shost(phba);
12444 if (error) {
12445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12446 "1407 Failed to create scsi host.\n");
12447 goto out_unset_driver_resource;
12448 }
12449
12450 /* Configure sysfs attributes */
12451 vport = phba->pport;
12452 error = lpfc_alloc_sysfs_attr(vport);
12453 if (error) {
12454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12455 "1476 Failed to allocate sysfs attr\n");
12456 goto out_destroy_shost;
12457 }
12458
6669f9bb 12459 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
da0436e9
JS
12460 /* Now, trying to enable interrupt and bring up the device */
12461 cfg_mode = phba->cfg_use_msi;
12462 while (true) {
12463 /* Put device to a known state before enabling interrupt */
12464 lpfc_stop_port(phba);
12465 /* Configure and enable interrupt */
12466 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
12467 if (intr_mode == LPFC_INTR_ERROR) {
372c187b 12468 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
12469 "0431 Failed to enable interrupt.\n");
12470 error = -ENODEV;
12471 goto out_free_sysfs_attr;
12472 }
12473 /* SLI-3 HBA setup */
12474 if (lpfc_sli_hba_setup(phba)) {
372c187b 12475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
12476 "1477 Failed to set up hba\n");
12477 error = -ENODEV;
12478 goto out_remove_device;
12479 }
12480
12481 /* Wait 50ms for the interrupts of previous mailbox commands */
12482 msleep(50);
12483 /* Check active interrupts on message signaled interrupts */
12484 if (intr_mode == 0 ||
12485 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
12486 /* Log the current active interrupt mode */
12487 phba->intr_mode = intr_mode;
12488 lpfc_log_intr_mode(phba, intr_mode);
12489 break;
12490 } else {
12491 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12492 "0447 Configure interrupt mode (%d) "
12493 "failed active interrupt test.\n",
12494 intr_mode);
12495 /* Disable the current interrupt mode */
12496 lpfc_sli_disable_intr(phba);
12497 /* Try next level of interrupt mode */
12498 cfg_mode = --intr_mode;
12499 }
12500 }
12501
12502 /* Perform post initialization setup */
12503 lpfc_post_init_setup(phba);
12504
12505 /* Check if there are static vports to be created. */
12506 lpfc_create_static_vport(phba);
12507
12508 return 0;
12509
12510out_remove_device:
12511 lpfc_unset_hba(phba);
12512out_free_sysfs_attr:
12513 lpfc_free_sysfs_attr(vport);
12514out_destroy_shost:
12515 lpfc_destroy_shost(phba);
12516out_unset_driver_resource:
12517 lpfc_unset_driver_resource_phase2(phba);
12518out_free_iocb_list:
12519 lpfc_free_iocb_list(phba);
12520out_unset_driver_resource_s3:
12521 lpfc_sli_driver_resource_unset(phba);
12522out_unset_pci_mem_s3:
12523 lpfc_sli_pci_mem_unset(phba);
12524out_disable_pci_dev:
12525 lpfc_disable_pci_dev(phba);
6669f9bb
JS
12526 if (shost)
12527 scsi_host_put(shost);
da0436e9
JS
12528out_free_phba:
12529 lpfc_hba_free(phba);
12530 return error;
12531}
12532
12533/**
12534 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
12535 * @pdev: pointer to PCI device
12536 *
12537 * This routine is to be called to disattach a device with SLI-3 interface
12538 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
12539 * removed from PCI bus, it performs all the necessary cleanup for the HBA
12540 * device to be removed from the PCI subsystem properly.
12541 **/
6f039790 12542static void
da0436e9
JS
12543lpfc_pci_remove_one_s3(struct pci_dev *pdev)
12544{
12545 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12546 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
12547 struct lpfc_vport **vports;
12548 struct lpfc_hba *phba = vport->phba;
12549 int i;
da0436e9
JS
12550
12551 spin_lock_irq(&phba->hbalock);
12552 vport->load_flag |= FC_UNLOADING;
12553 spin_unlock_irq(&phba->hbalock);
12554
12555 lpfc_free_sysfs_attr(vport);
12556
12557 /* Release all the vports against this physical port */
12558 vports = lpfc_create_vport_work_array(phba);
12559 if (vports != NULL)
587a37f6
JS
12560 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12561 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12562 continue;
da0436e9 12563 fc_vport_terminate(vports[i]->fc_vport);
587a37f6 12564 }
da0436e9
JS
12565 lpfc_destroy_vport_work_array(phba, vports);
12566
95f0ef8a 12567 /* Remove FC host with the physical port */
da0436e9 12568 fc_remove_host(shost);
e9b11083 12569 scsi_remove_host(shost);
d613b6a7 12570
95f0ef8a 12571 /* Clean up all nodes, mailboxes and IOs. */
da0436e9
JS
12572 lpfc_cleanup(vport);
12573
12574 /*
12575 * Bring down the SLI Layer. This step disable all interrupts,
12576 * clears the rings, discards all mailbox commands, and resets
12577 * the HBA.
12578 */
12579
48e34d0f 12580 /* HBA interrupt will be disabled after this call */
da0436e9
JS
12581 lpfc_sli_hba_down(phba);
12582 /* Stop kthread signal shall trigger work_done one more time */
12583 kthread_stop(phba->worker_thread);
12584 /* Final cleanup of txcmplq and reset the HBA */
12585 lpfc_sli_brdrestart(phba);
12586
72859909
JS
12587 kfree(phba->vpi_bmask);
12588 kfree(phba->vpi_ids);
12589
da0436e9 12590 lpfc_stop_hba_timers(phba);
523128e5 12591 spin_lock_irq(&phba->port_list_lock);
da0436e9 12592 list_del_init(&vport->listentry);
523128e5 12593 spin_unlock_irq(&phba->port_list_lock);
da0436e9
JS
12594
12595 lpfc_debugfs_terminate(vport);
12596
912e3acd
JS
12597 /* Disable SR-IOV if enabled */
12598 if (phba->cfg_sriov_nr_virtfn)
12599 pci_disable_sriov(pdev);
12600
da0436e9
JS
12601 /* Disable interrupt */
12602 lpfc_sli_disable_intr(phba);
12603
da0436e9
JS
12604 scsi_host_put(shost);
12605
12606 /*
12607 * Call scsi_free before mem_free since scsi bufs are released to their
12608 * corresponding pools here.
12609 */
12610 lpfc_scsi_free(phba);
0794d601
JS
12611 lpfc_free_iocb_list(phba);
12612
da0436e9
JS
12613 lpfc_mem_free_all(phba);
12614
12615 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12616 phba->hbqslimp.virt, phba->hbqslimp.phys);
12617
12618 /* Free resources associated with SLI2 interface */
12619 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12620 phba->slim2p.virt, phba->slim2p.phys);
12621
12622 /* unmap adapter SLIM and Control Registers */
12623 iounmap(phba->ctrl_regs_memmap_p);
12624 iounmap(phba->slim_memmap_p);
12625
12626 lpfc_hba_free(phba);
12627
e0c0483c 12628 pci_release_mem_regions(pdev);
da0436e9
JS
12629 pci_disable_device(pdev);
12630}
12631
12632/**
12633 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
ef6fa16b 12634 * @dev_d: pointer to device
da0436e9
JS
12635 *
12636 * This routine is to be called from the kernel's PCI subsystem to support
12637 * system Power Management (PM) to device with SLI-3 interface spec. When
12638 * PM invokes this method, it quiesces the device by stopping the driver's
12639 * worker thread for the device, turning off device's interrupt and DMA,
12640 * and bring the device offline. Note that as the driver implements the
12641 * minimum PM requirements to a power-aware driver's PM support for the
12642 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12643 * to the suspend() method call will be treated as SUSPEND and the driver will
12644 * fully reinitialize its device during resume() method call, the driver will
12645 * set device to PCI_D3hot state in PCI config space instead of setting it
12646 * according to the @msg provided by the PM.
12647 *
12648 * Return code
12649 * 0 - driver suspended the device
12650 * Error otherwise
12651 **/
ef6fa16b
VG
12652static int __maybe_unused
12653lpfc_pci_suspend_one_s3(struct device *dev_d)
da0436e9 12654{
ef6fa16b 12655 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
da0436e9
JS
12656 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12657
12658 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12659 "0473 PCI device Power Management suspend.\n");
12660
12661 /* Bring down the device */
618a5230 12662 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
da0436e9
JS
12663 lpfc_offline(phba);
12664 kthread_stop(phba->worker_thread);
12665
12666 /* Disable interrupt from device */
12667 lpfc_sli_disable_intr(phba);
12668
da0436e9
JS
12669 return 0;
12670}
12671
12672/**
12673 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
ef6fa16b 12674 * @dev_d: pointer to device
da0436e9
JS
12675 *
12676 * This routine is to be called from the kernel's PCI subsystem to support
12677 * system Power Management (PM) to device with SLI-3 interface spec. When PM
12678 * invokes this method, it restores the device's PCI config space state and
12679 * fully reinitializes the device and brings it online. Note that as the
12680 * driver implements the minimum PM requirements to a power-aware driver's
12681 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
12682 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
12683 * driver will fully reinitialize its device during resume() method call,
12684 * the device will be set to PCI_D0 directly in PCI config space before
12685 * restoring the state.
12686 *
12687 * Return code
12688 * 0 - driver suspended the device
12689 * Error otherwise
12690 **/
ef6fa16b
VG
12691static int __maybe_unused
12692lpfc_pci_resume_one_s3(struct device *dev_d)
da0436e9 12693{
ef6fa16b 12694 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
da0436e9
JS
12695 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12696 uint32_t intr_mode;
12697 int error;
12698
12699 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12700 "0452 PCI device Power Management resume.\n");
12701
da0436e9
JS
12702 /* Startup the kernel thread for this host adapter. */
12703 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12704 "lpfc_worker_%d", phba->brd_no);
12705 if (IS_ERR(phba->worker_thread)) {
12706 error = PTR_ERR(phba->worker_thread);
12707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12708 "0434 PM resume failed to start worker "
12709 "thread: error=x%x.\n", error);
12710 return error;
12711 }
12712
12713 /* Configure and enable interrupt */
12714 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12715 if (intr_mode == LPFC_INTR_ERROR) {
372c187b 12716 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
12717 "0430 PM resume Failed to enable interrupt\n");
12718 return -EIO;
12719 } else
12720 phba->intr_mode = intr_mode;
12721
12722 /* Restart HBA and bring it online */
12723 lpfc_sli_brdrestart(phba);
12724 lpfc_online(phba);
12725
12726 /* Log the current active interrupt mode */
12727 lpfc_log_intr_mode(phba, phba->intr_mode);
12728
12729 return 0;
12730}
12731
891478a2
JS
12732/**
12733 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
12734 * @phba: pointer to lpfc hba data structure.
12735 *
12736 * This routine is called to prepare the SLI3 device for PCI slot recover. It
e2af0d2e 12737 * aborts all the outstanding SCSI I/Os to the pci device.
891478a2
JS
12738 **/
12739static void
12740lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12741{
372c187b 12742 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
891478a2 12743 "2723 PCI channel I/O abort preparing for recovery\n");
e2af0d2e
JS
12744
12745 /*
12746 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12747 * and let the SCSI mid-layer to retry them to recover.
12748 */
db55fba8 12749 lpfc_sli_abort_fcp_rings(phba);
891478a2
JS
12750}
12751
0d878419
JS
12752/**
12753 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
12754 * @phba: pointer to lpfc hba data structure.
12755 *
12756 * This routine is called to prepare the SLI3 device for PCI slot reset. It
12757 * disables the device interrupt and pci device, and aborts the internal FCP
12758 * pending I/Os.
12759 **/
12760static void
12761lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12762{
372c187b 12763 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
891478a2 12764 "2710 PCI channel disable preparing for reset\n");
e2af0d2e 12765
75baf696 12766 /* Block any management I/Os to the device */
618a5230 12767 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
75baf696 12768
e2af0d2e
JS
12769 /* Block all SCSI devices' I/Os on the host */
12770 lpfc_scsi_dev_block(phba);
12771
ea714f3d 12772 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
c00f62e6 12773 lpfc_sli_flush_io_rings(phba);
ea714f3d 12774
e2af0d2e
JS
12775 /* stop all timers */
12776 lpfc_stop_hba_timers(phba);
12777
0d878419
JS
12778 /* Disable interrupt and pci device */
12779 lpfc_sli_disable_intr(phba);
12780 pci_disable_device(phba->pcidev);
0d878419
JS
12781}
12782
12783/**
12784 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
12785 * @phba: pointer to lpfc hba data structure.
12786 *
12787 * This routine is called to prepare the SLI3 device for PCI slot permanently
12788 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12789 * pending I/Os.
12790 **/
12791static void
75baf696 12792lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
0d878419 12793{
372c187b 12794 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
891478a2 12795 "2711 PCI channel permanent disable for failure\n");
e2af0d2e
JS
12796 /* Block all SCSI devices' I/Os on the host */
12797 lpfc_scsi_dev_block(phba);
12798
12799 /* stop all timers */
12800 lpfc_stop_hba_timers(phba);
12801
0d878419 12802 /* Clean up all driver's outstanding SCSI I/Os */
c00f62e6 12803 lpfc_sli_flush_io_rings(phba);
0d878419
JS
12804}
12805
da0436e9
JS
12806/**
12807 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
12808 * @pdev: pointer to PCI device.
12809 * @state: the current PCI connection state.
12810 *
12811 * This routine is called from the PCI subsystem for I/O error handling to
12812 * device with SLI-3 interface spec. This function is called by the PCI
12813 * subsystem after a PCI bus error affecting this device has been detected.
12814 * When this function is invoked, it will need to stop all the I/Os and
12815 * interrupt(s) to the device. Once that is done, it will return
12816 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
12817 * as desired.
12818 *
12819 * Return codes
0d878419 12820 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
da0436e9
JS
12821 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12822 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12823 **/
12824static pci_ers_result_t
12825lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12826{
12827 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12828 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
da0436e9 12829
0d878419
JS
12830 switch (state) {
12831 case pci_channel_io_normal:
891478a2
JS
12832 /* Non-fatal error, prepare for recovery */
12833 lpfc_sli_prep_dev_for_recover(phba);
0d878419
JS
12834 return PCI_ERS_RESULT_CAN_RECOVER;
12835 case pci_channel_io_frozen:
12836 /* Fatal error, prepare for slot reset */
12837 lpfc_sli_prep_dev_for_reset(phba);
12838 return PCI_ERS_RESULT_NEED_RESET;
12839 case pci_channel_io_perm_failure:
12840 /* Permanent failure, prepare for device down */
75baf696 12841 lpfc_sli_prep_dev_for_perm_failure(phba);
da0436e9 12842 return PCI_ERS_RESULT_DISCONNECT;
0d878419
JS
12843 default:
12844 /* Unknown state, prepare and request slot reset */
372c187b 12845 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0d878419
JS
12846 "0472 Unknown PCI error state: x%x\n", state);
12847 lpfc_sli_prep_dev_for_reset(phba);
12848 return PCI_ERS_RESULT_NEED_RESET;
da0436e9 12849 }
da0436e9
JS
12850}
12851
12852/**
12853 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
12854 * @pdev: pointer to PCI device.
12855 *
12856 * This routine is called from the PCI subsystem for error handling to
12857 * device with SLI-3 interface spec. This is called after PCI bus has been
12858 * reset to restart the PCI card from scratch, as if from a cold-boot.
12859 * During the PCI subsystem error recovery, after driver returns
12860 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12861 * recovery and then call this routine before calling the .resume method
12862 * to recover the device. This function will initialize the HBA device,
12863 * enable the interrupt, but it will just put the HBA to offline state
12864 * without passing any I/O traffic.
12865 *
12866 * Return codes
12867 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
12868 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12869 */
12870static pci_ers_result_t
12871lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12872{
12873 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12874 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12875 struct lpfc_sli *psli = &phba->sli;
12876 uint32_t intr_mode;
12877
12878 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12879 if (pci_enable_device_mem(pdev)) {
12880 printk(KERN_ERR "lpfc: Cannot re-enable "
12881 "PCI device after reset.\n");
12882 return PCI_ERS_RESULT_DISCONNECT;
12883 }
12884
12885 pci_restore_state(pdev);
1dfb5a47
JS
12886
12887 /*
12888 * As the new kernel behavior of pci_restore_state() API call clears
12889 * device saved_state flag, need to save the restored state again.
12890 */
12891 pci_save_state(pdev);
12892
da0436e9
JS
12893 if (pdev->is_busmaster)
12894 pci_set_master(pdev);
12895
12896 spin_lock_irq(&phba->hbalock);
12897 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12898 spin_unlock_irq(&phba->hbalock);
12899
12900 /* Configure and enable interrupt */
12901 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12902 if (intr_mode == LPFC_INTR_ERROR) {
372c187b 12903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9
JS
12904 "0427 Cannot re-enable interrupt after "
12905 "slot reset.\n");
12906 return PCI_ERS_RESULT_DISCONNECT;
12907 } else
12908 phba->intr_mode = intr_mode;
12909
75baf696 12910 /* Take device offline, it will perform cleanup */
618a5230 12911 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
da0436e9
JS
12912 lpfc_offline(phba);
12913 lpfc_sli_brdrestart(phba);
12914
12915 /* Log the current active interrupt mode */
12916 lpfc_log_intr_mode(phba, phba->intr_mode);
12917
12918 return PCI_ERS_RESULT_RECOVERED;
12919}
12920
12921/**
12922 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
12923 * @pdev: pointer to PCI device
12924 *
12925 * This routine is called from the PCI subsystem for error handling to device
12926 * with SLI-3 interface spec. It is called when kernel error recovery tells
12927 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12928 * error recovery. After this call, traffic can start to flow from this device
12929 * again.
12930 */
12931static void
12932lpfc_io_resume_s3(struct pci_dev *pdev)
12933{
12934 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12935 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3772a991 12936
e2af0d2e 12937 /* Bring device online, it will be no-op for non-fatal error resume */
da0436e9
JS
12938 lpfc_online(phba);
12939}
3772a991 12940
da0436e9
JS
12941/**
12942 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
12943 * @phba: pointer to lpfc hba data structure.
12944 *
12945 * returns the number of ELS/CT IOCBs to reserve
12946 **/
12947int
12948lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12949{
12950 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12951
f1126688
JS
12952 if (phba->sli_rev == LPFC_SLI_REV4) {
12953 if (max_xri <= 100)
6a9c52cf 12954 return 10;
f1126688 12955 else if (max_xri <= 256)
6a9c52cf 12956 return 25;
f1126688 12957 else if (max_xri <= 512)
6a9c52cf 12958 return 50;
f1126688 12959 else if (max_xri <= 1024)
6a9c52cf 12960 return 100;
8a9d2e80 12961 else if (max_xri <= 1536)
6a9c52cf 12962 return 150;
8a9d2e80
JS
12963 else if (max_xri <= 2048)
12964 return 200;
12965 else
12966 return 250;
f1126688
JS
12967 } else
12968 return 0;
3772a991
JS
12969}
12970
895427bd
JS
12971/**
12972 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
12973 * @phba: pointer to lpfc hba data structure.
12974 *
f358dd0c 12975 * returns the number of ELS/CT + NVMET IOCBs to reserve
895427bd
JS
12976 **/
12977int
12978lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12979{
12980 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12981
f358dd0c
JS
12982 if (phba->nvmet_support)
12983 max_xri += LPFC_NVMET_BUF_POST;
895427bd
JS
12984 return max_xri;
12985}
12986
12987
0a5ce731 12988static int
1feb8204
JS
12989lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12990 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12991 const struct firmware *fw)
12992{
0a5ce731
JS
12993 int rc;
12994
12995 /* Three cases: (1) FW was not supported on the detected adapter.
12996 * (2) FW update has been locked out administratively.
12997 * (3) Some other error during FW update.
12998 * In each case, an unmaskable message is written to the console
12999 * for admin diagnosis.
13000 */
13001 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
a72d56b2 13002 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
5792a0e8 13003 magic_number != MAGIC_NUMBER_G6) ||
a72d56b2 13004 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
5792a0e8 13005 magic_number != MAGIC_NUMBER_G7)) {
372c187b 13006 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0a5ce731
JS
13007 "3030 This firmware version is not supported on"
13008 " this HBA model. Device:%x Magic:%x Type:%x "
13009 "ID:%x Size %d %zd\n",
13010 phba->pcidev->device, magic_number, ftype, fid,
13011 fsize, fw->size);
13012 rc = -EINVAL;
13013 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
372c187b 13014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0a5ce731
JS
13015 "3021 Firmware downloads have been prohibited "
13016 "by a system configuration setting on "
13017 "Device:%x Magic:%x Type:%x ID:%x Size %d "
13018 "%zd\n",
13019 phba->pcidev->device, magic_number, ftype, fid,
13020 fsize, fw->size);
13021 rc = -EACCES;
13022 } else {
372c187b 13023 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0a5ce731
JS
13024 "3022 FW Download failed. Add Status x%x "
13025 "Device:%x Magic:%x Type:%x ID:%x Size %d "
13026 "%zd\n",
13027 offset, phba->pcidev->device, magic_number,
13028 ftype, fid, fsize, fw->size);
13029 rc = -EIO;
13030 }
13031 return rc;
1feb8204
JS
13032}
13033
52d52440
JS
13034/**
13035 * lpfc_write_firmware - attempt to write a firmware image to the port
52d52440 13036 * @fw: pointer to firmware image returned from request_firmware.
0a5ce731 13037 * @context: pointer to firmware image returned from request_firmware.
52d52440 13038 *
52d52440 13039 **/
ce396282
JS
13040static void
13041lpfc_write_firmware(const struct firmware *fw, void *context)
52d52440 13042{
ce396282 13043 struct lpfc_hba *phba = (struct lpfc_hba *)context;
6b5151fd 13044 char fwrev[FW_REV_STR_SIZE];
ce396282 13045 struct lpfc_grp_hdr *image;
52d52440
JS
13046 struct list_head dma_buffer_list;
13047 int i, rc = 0;
13048 struct lpfc_dmabuf *dmabuf, *next;
13049 uint32_t offset = 0, temp_offset = 0;
6b6ef5db 13050 uint32_t magic_number, ftype, fid, fsize;
52d52440 13051
c71ab861 13052 /* It can be null in no-wait mode, sanity check */
ce396282
JS
13053 if (!fw) {
13054 rc = -ENXIO;
13055 goto out;
13056 }
13057 image = (struct lpfc_grp_hdr *)fw->data;
13058
6b6ef5db
JS
13059 magic_number = be32_to_cpu(image->magic_number);
13060 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
1feb8204 13061 fid = bf_get_be32(lpfc_grp_hdr_id, image);
6b6ef5db
JS
13062 fsize = be32_to_cpu(image->size);
13063
52d52440 13064 INIT_LIST_HEAD(&dma_buffer_list);
52d52440 13065 lpfc_decode_firmware_rev(phba, fwrev, 1);
88a2cfbb 13066 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
372c187b 13067 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
ce396282 13068 "3023 Updating Firmware, Current Version:%s "
52d52440 13069 "New Version:%s\n",
88a2cfbb 13070 fwrev, image->revision);
52d52440
JS
13071 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
13072 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
13073 GFP_KERNEL);
13074 if (!dmabuf) {
13075 rc = -ENOMEM;
ce396282 13076 goto release_out;
52d52440
JS
13077 }
13078 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
13079 SLI4_PAGE_SIZE,
13080 &dmabuf->phys,
13081 GFP_KERNEL);
13082 if (!dmabuf->virt) {
13083 kfree(dmabuf);
13084 rc = -ENOMEM;
ce396282 13085 goto release_out;
52d52440
JS
13086 }
13087 list_add_tail(&dmabuf->list, &dma_buffer_list);
13088 }
13089 while (offset < fw->size) {
13090 temp_offset = offset;
13091 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
079b5c91 13092 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
52d52440
JS
13093 memcpy(dmabuf->virt,
13094 fw->data + temp_offset,
079b5c91
JS
13095 fw->size - temp_offset);
13096 temp_offset = fw->size;
52d52440
JS
13097 break;
13098 }
52d52440
JS
13099 memcpy(dmabuf->virt, fw->data + temp_offset,
13100 SLI4_PAGE_SIZE);
88a2cfbb 13101 temp_offset += SLI4_PAGE_SIZE;
52d52440
JS
13102 }
13103 rc = lpfc_wr_object(phba, &dma_buffer_list,
13104 (fw->size - offset), &offset);
1feb8204 13105 if (rc) {
0a5ce731
JS
13106 rc = lpfc_log_write_firmware_error(phba, offset,
13107 magic_number,
13108 ftype,
13109 fid,
13110 fsize,
13111 fw);
ce396282 13112 goto release_out;
1feb8204 13113 }
52d52440
JS
13114 }
13115 rc = offset;
1feb8204 13116 } else
372c187b 13117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1feb8204
JS
13118 "3029 Skipped Firmware update, Current "
13119 "Version:%s New Version:%s\n",
13120 fwrev, image->revision);
ce396282
JS
13121
13122release_out:
52d52440
JS
13123 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
13124 list_del(&dmabuf->list);
13125 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
13126 dmabuf->virt, dmabuf->phys);
13127 kfree(dmabuf);
13128 }
ce396282
JS
13129 release_firmware(fw);
13130out:
0a5ce731 13131 if (rc < 0)
372c187b 13132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0a5ce731
JS
13133 "3062 Firmware update error, status %d.\n", rc);
13134 else
372c187b 13135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0a5ce731 13136 "3024 Firmware update success: size %d.\n", rc);
52d52440
JS
13137}
13138
c71ab861
JS
13139/**
13140 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
13141 * @phba: pointer to lpfc hba data structure.
fe614acd 13142 * @fw_upgrade: which firmware to update.
c71ab861
JS
13143 *
13144 * This routine is called to perform Linux generic firmware upgrade on device
13145 * that supports such feature.
13146 **/
13147int
13148lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
13149{
13150 uint8_t file_name[ELX_MODEL_NAME_SIZE];
13151 int ret;
13152 const struct firmware *fw;
13153
13154 /* Only supported on SLI4 interface type 2 for now */
27d6ac0a 13155 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
c71ab861
JS
13156 LPFC_SLI_INTF_IF_TYPE_2)
13157 return -EPERM;
13158
13159 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
13160
13161 if (fw_upgrade == INT_FW_UPGRADE) {
0733d839 13162 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
c71ab861
JS
13163 file_name, &phba->pcidev->dev,
13164 GFP_KERNEL, (void *)phba,
13165 lpfc_write_firmware);
13166 } else if (fw_upgrade == RUN_FW_UPGRADE) {
13167 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
13168 if (!ret)
13169 lpfc_write_firmware(fw, (void *)phba);
13170 } else {
13171 ret = -EINVAL;
13172 }
13173
13174 return ret;
13175}
13176
3772a991 13177/**
da0436e9 13178 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
3772a991
JS
13179 * @pdev: pointer to PCI device
13180 * @pid: pointer to PCI device identifier
13181 *
da0436e9
JS
13182 * This routine is called from the kernel's PCI subsystem to device with
13183 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991 13184 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
da0436e9
JS
13185 * information of the device and driver to see if the driver state that it
13186 * can support this kind of device. If the match is successful, the driver
13187 * core invokes this routine. If this routine determines it can claim the HBA,
13188 * it does all the initialization that it needs to do to handle the HBA
13189 * properly.
3772a991
JS
13190 *
13191 * Return code
13192 * 0 - driver can claim the device
13193 * negative value - driver can not claim the device
13194 **/
6f039790 13195static int
da0436e9 13196lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
3772a991
JS
13197{
13198 struct lpfc_hba *phba;
13199 struct lpfc_vport *vport = NULL;
6669f9bb 13200 struct Scsi_Host *shost = NULL;
6c621a22 13201 int error;
3772a991
JS
13202 uint32_t cfg_mode, intr_mode;
13203
13204 /* Allocate memory for HBA structure */
13205 phba = lpfc_hba_alloc(pdev);
13206 if (!phba)
13207 return -ENOMEM;
13208
13209 /* Perform generic PCI device enabling operation */
13210 error = lpfc_enable_pci_dev(phba);
079b5c91 13211 if (error)
3772a991 13212 goto out_free_phba;
3772a991 13213
da0436e9
JS
13214 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
13215 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
3772a991
JS
13216 if (error)
13217 goto out_disable_pci_dev;
13218
da0436e9
JS
13219 /* Set up SLI-4 specific device PCI memory space */
13220 error = lpfc_sli4_pci_mem_setup(phba);
3772a991
JS
13221 if (error) {
13222 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 13223 "1410 Failed to set up pci memory space.\n");
3772a991
JS
13224 goto out_disable_pci_dev;
13225 }
13226
da0436e9
JS
13227 /* Set up SLI-4 Specific device driver resources */
13228 error = lpfc_sli4_driver_resource_setup(phba);
3772a991
JS
13229 if (error) {
13230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
13231 "1412 Failed to set up driver resource.\n");
13232 goto out_unset_pci_mem_s4;
3772a991
JS
13233 }
13234
19ca7609 13235 INIT_LIST_HEAD(&phba->active_rrq_list);
7d791df7 13236 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
19ca7609 13237
3772a991
JS
13238 /* Set up common device driver resources */
13239 error = lpfc_setup_driver_resource_phase2(phba);
13240 if (error) {
13241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 13242 "1414 Failed to set up driver resource.\n");
6c621a22 13243 goto out_unset_driver_resource_s4;
3772a991
JS
13244 }
13245
079b5c91
JS
13246 /* Get the default values for Model Name and Description */
13247 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13248
3772a991 13249 /* Now, trying to enable interrupt and bring up the device */
5b75da2f 13250 cfg_mode = phba->cfg_use_msi;
5b75da2f 13251
7b15db32 13252 /* Put device to a known state before enabling interrupt */
cdb42bec 13253 phba->pport = NULL;
7b15db32 13254 lpfc_stop_port(phba);
895427bd 13255
dcaa2136
JS
13256 /* Init cpu_map array */
13257 lpfc_cpu_map_array_init(phba);
13258
13259 /* Init hba_eq_hdl array */
13260 lpfc_hba_eq_hdl_array_init(phba);
13261
7b15db32
JS
13262 /* Configure and enable interrupt */
13263 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13264 if (intr_mode == LPFC_INTR_ERROR) {
372c187b 13265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7b15db32
JS
13266 "0426 Failed to enable interrupt.\n");
13267 error = -ENODEV;
cdb42bec 13268 goto out_unset_driver_resource;
7b15db32
JS
13269 }
13270 /* Default to single EQ for non-MSI-X */
895427bd 13271 if (phba->intr_type != MSIX) {
6a828b0f 13272 phba->cfg_irq_chann = 1;
2d7dbc4c 13273 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
2d7dbc4c
JS
13274 if (phba->nvmet_support)
13275 phba->cfg_nvmet_mrq = 1;
13276 }
cdb42bec 13277 }
6a828b0f 13278 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
cdb42bec
JS
13279
13280 /* Create SCSI host to the physical port */
13281 error = lpfc_create_shost(phba);
13282 if (error) {
13283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13284 "1415 Failed to create scsi host.\n");
13285 goto out_disable_intr;
13286 }
13287 vport = phba->pport;
13288 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13289
13290 /* Configure sysfs attributes */
13291 error = lpfc_alloc_sysfs_attr(vport);
13292 if (error) {
13293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13294 "1416 Failed to allocate sysfs attr\n");
13295 goto out_destroy_shost;
895427bd
JS
13296 }
13297
7b15db32
JS
13298 /* Set up SLI-4 HBA */
13299 if (lpfc_sli4_hba_setup(phba)) {
372c187b 13300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7b15db32
JS
13301 "1421 Failed to set up hba\n");
13302 error = -ENODEV;
cdb42bec 13303 goto out_free_sysfs_attr;
98c9ea5c 13304 }
858c9f6c 13305
7b15db32
JS
13306 /* Log the current active interrupt mode */
13307 phba->intr_mode = intr_mode;
13308 lpfc_log_intr_mode(phba, intr_mode);
13309
3772a991
JS
13310 /* Perform post initialization setup */
13311 lpfc_post_init_setup(phba);
dea3101e 13312
01649561
JS
13313 /* NVME support in FW earlier in the driver load corrects the
13314 * FC4 type making a check for nvme_support unnecessary.
13315 */
0794d601
JS
13316 if (phba->nvmet_support == 0) {
13317 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13318 /* Create NVME binding with nvme_fc_transport. This
13319 * ensures the vport is initialized. If the localport
13320 * create fails, it should not unload the driver to
13321 * support field issues.
13322 */
13323 error = lpfc_nvme_create_localport(vport);
13324 if (error) {
372c187b 13325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0794d601
JS
13326 "6004 NVME registration "
13327 "failed, error x%x\n",
13328 error);
13329 }
01649561
JS
13330 }
13331 }
895427bd 13332
c71ab861
JS
13333 /* check for firmware upgrade or downgrade */
13334 if (phba->cfg_request_firmware_upgrade)
db6f1c2f 13335 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
52d52440 13336
1c6834a7
JS
13337 /* Check if there are static vports to be created. */
13338 lpfc_create_static_vport(phba);
d2cc9bcd
JS
13339
13340 /* Enable RAS FW log support */
13341 lpfc_sli4_ras_setup(phba);
13342
93a4d6f4 13343 INIT_LIST_HEAD(&phba->poll_list);
f861f596 13344 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
93a4d6f4
JS
13345 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13346
dea3101e 13347 return 0;
13348
5b75da2f
JS
13349out_free_sysfs_attr:
13350 lpfc_free_sysfs_attr(vport);
3772a991
JS
13351out_destroy_shost:
13352 lpfc_destroy_shost(phba);
cdb42bec
JS
13353out_disable_intr:
13354 lpfc_sli4_disable_intr(phba);
3772a991
JS
13355out_unset_driver_resource:
13356 lpfc_unset_driver_resource_phase2(phba);
da0436e9
JS
13357out_unset_driver_resource_s4:
13358 lpfc_sli4_driver_resource_unset(phba);
13359out_unset_pci_mem_s4:
13360 lpfc_sli4_pci_mem_unset(phba);
3772a991
JS
13361out_disable_pci_dev:
13362 lpfc_disable_pci_dev(phba);
6669f9bb
JS
13363 if (shost)
13364 scsi_host_put(shost);
2e0fef85 13365out_free_phba:
3772a991 13366 lpfc_hba_free(phba);
dea3101e 13367 return error;
13368}
13369
e59058c4 13370/**
da0436e9 13371 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
e59058c4
JS
13372 * @pdev: pointer to PCI device
13373 *
da0436e9
JS
13374 * This routine is called from the kernel's PCI subsystem to device with
13375 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991
JS
13376 * removed from PCI bus, it performs all the necessary cleanup for the HBA
13377 * device to be removed from the PCI subsystem properly.
e59058c4 13378 **/
6f039790 13379static void
da0436e9 13380lpfc_pci_remove_one_s4(struct pci_dev *pdev)
dea3101e 13381{
da0436e9 13382 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2e0fef85 13383 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
eada272d 13384 struct lpfc_vport **vports;
da0436e9 13385 struct lpfc_hba *phba = vport->phba;
eada272d 13386 int i;
8a4df120 13387
da0436e9 13388 /* Mark the device unloading flag */
549e55cd 13389 spin_lock_irq(&phba->hbalock);
51ef4c26 13390 vport->load_flag |= FC_UNLOADING;
549e55cd 13391 spin_unlock_irq(&phba->hbalock);
2e0fef85 13392
858c9f6c
JS
13393 lpfc_free_sysfs_attr(vport);
13394
eada272d
JS
13395 /* Release all the vports against this physical port */
13396 vports = lpfc_create_vport_work_array(phba);
13397 if (vports != NULL)
587a37f6
JS
13398 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13399 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13400 continue;
eada272d 13401 fc_vport_terminate(vports[i]->fc_vport);
587a37f6 13402 }
eada272d
JS
13403 lpfc_destroy_vport_work_array(phba, vports);
13404
95f0ef8a 13405 /* Remove FC host with the physical port */
858c9f6c 13406 fc_remove_host(shost);
e9b11083 13407 scsi_remove_host(shost);
da0436e9 13408
d613b6a7
JS
13409 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
13410 * localports are destroyed after to cleanup all transport memory.
895427bd 13411 */
87af33fe 13412 lpfc_cleanup(vport);
d613b6a7
JS
13413 lpfc_nvmet_destroy_targetport(phba);
13414 lpfc_nvme_destroy_localport(vport);
87af33fe 13415
c490850a
JS
13416 /* De-allocate multi-XRI pools */
13417 if (phba->cfg_xri_rebalancing)
13418 lpfc_destroy_multixri_pools(phba);
13419
281d6190
JS
13420 /*
13421 * Bring down the SLI Layer. This step disables all interrupts,
13422 * clears the rings, discards all mailbox commands, and resets
13423 * the HBA FCoE function.
13424 */
13425 lpfc_debugfs_terminate(vport);
a257bf90 13426
1901762f 13427 lpfc_stop_hba_timers(phba);
523128e5 13428 spin_lock_irq(&phba->port_list_lock);
858c9f6c 13429 list_del_init(&vport->listentry);
523128e5 13430 spin_unlock_irq(&phba->port_list_lock);
858c9f6c 13431
3677a3a7 13432 /* Perform scsi free before driver resource_unset since scsi
da0436e9 13433 * buffers are released to their corresponding pools here.
2e0fef85 13434 */
5e5b511d 13435 lpfc_io_free(phba);
01649561 13436 lpfc_free_iocb_list(phba);
5e5b511d 13437 lpfc_sli4_hba_unset(phba);
67d12733 13438
0cdb84ec 13439 lpfc_unset_driver_resource_phase2(phba);
da0436e9 13440 lpfc_sli4_driver_resource_unset(phba);
ed957684 13441
da0436e9
JS
13442 /* Unmap adapter Control and Doorbell registers */
13443 lpfc_sli4_pci_mem_unset(phba);
2e0fef85 13444
da0436e9
JS
13445 /* Release PCI resources and disable device's PCI function */
13446 scsi_host_put(shost);
13447 lpfc_disable_pci_dev(phba);
2e0fef85 13448
da0436e9 13449 /* Finally, free the driver's device data structure */
3772a991 13450 lpfc_hba_free(phba);
2e0fef85 13451
da0436e9 13452 return;
dea3101e 13453}
13454
3a55b532 13455/**
da0436e9 13456 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
ef6fa16b 13457 * @dev_d: pointer to device
3a55b532 13458 *
da0436e9
JS
13459 * This routine is called from the kernel's PCI subsystem to support system
13460 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
13461 * this method, it quiesces the device by stopping the driver's worker
13462 * thread for the device, turning off device's interrupt and DMA, and bring
13463 * the device offline. Note that as the driver implements the minimum PM
13464 * requirements to a power-aware driver's PM support for suspend/resume -- all
13465 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
13466 * method call will be treated as SUSPEND and the driver will fully
13467 * reinitialize its device during resume() method call, the driver will set
13468 * device to PCI_D3hot state in PCI config space instead of setting it
3772a991 13469 * according to the @msg provided by the PM.
3a55b532
JS
13470 *
13471 * Return code
3772a991
JS
13472 * 0 - driver suspended the device
13473 * Error otherwise
3a55b532 13474 **/
ef6fa16b
VG
13475static int __maybe_unused
13476lpfc_pci_suspend_one_s4(struct device *dev_d)
3a55b532 13477{
ef6fa16b 13478 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
3a55b532
JS
13479 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13480
13481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
75baf696 13482 "2843 PCI device Power Management suspend.\n");
3a55b532
JS
13483
13484 /* Bring down the device */
618a5230 13485 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
3a55b532
JS
13486 lpfc_offline(phba);
13487 kthread_stop(phba->worker_thread);
13488
13489 /* Disable interrupt from device */
da0436e9 13490 lpfc_sli4_disable_intr(phba);
5350d872 13491 lpfc_sli4_queue_destroy(phba);
3a55b532 13492
3a55b532
JS
13493 return 0;
13494}
13495
13496/**
da0436e9 13497 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
ef6fa16b 13498 * @dev_d: pointer to device
3a55b532 13499 *
da0436e9
JS
13500 * This routine is called from the kernel's PCI subsystem to support system
13501 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
13502 * this method, it restores the device's PCI config space state and fully
13503 * reinitializes the device and brings it online. Note that as the driver
13504 * implements the minimum PM requirements to a power-aware driver's PM for
13505 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
13506 * to the suspend() method call will be treated as SUSPEND and the driver
13507 * will fully reinitialize its device during resume() method call, the device
13508 * will be set to PCI_D0 directly in PCI config space before restoring the
13509 * state.
3a55b532
JS
13510 *
13511 * Return code
3772a991
JS
13512 * 0 - driver suspended the device
13513 * Error otherwise
3a55b532 13514 **/
ef6fa16b
VG
13515static int __maybe_unused
13516lpfc_pci_resume_one_s4(struct device *dev_d)
3a55b532 13517{
ef6fa16b 13518 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
3a55b532 13519 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
5b75da2f 13520 uint32_t intr_mode;
3a55b532
JS
13521 int error;
13522
13523 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
da0436e9 13524 "0292 PCI device Power Management resume.\n");
3a55b532 13525
da0436e9 13526 /* Startup the kernel thread for this host adapter. */
3a55b532
JS
13527 phba->worker_thread = kthread_run(lpfc_do_work, phba,
13528 "lpfc_worker_%d", phba->brd_no);
13529 if (IS_ERR(phba->worker_thread)) {
13530 error = PTR_ERR(phba->worker_thread);
13531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 13532 "0293 PM resume failed to start worker "
3a55b532
JS
13533 "thread: error=x%x.\n", error);
13534 return error;
13535 }
13536
5b75da2f 13537 /* Configure and enable interrupt */
da0436e9 13538 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
5b75da2f 13539 if (intr_mode == LPFC_INTR_ERROR) {
372c187b 13540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
da0436e9 13541 "0294 PM resume Failed to enable interrupt\n");
5b75da2f
JS
13542 return -EIO;
13543 } else
13544 phba->intr_mode = intr_mode;
3a55b532
JS
13545
13546 /* Restart HBA and bring it online */
13547 lpfc_sli_brdrestart(phba);
13548 lpfc_online(phba);
13549
5b75da2f
JS
13550 /* Log the current active interrupt mode */
13551 lpfc_log_intr_mode(phba, phba->intr_mode);
13552
3a55b532
JS
13553 return 0;
13554}
13555
75baf696
JS
13556/**
13557 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
13558 * @phba: pointer to lpfc hba data structure.
13559 *
13560 * This routine is called to prepare the SLI4 device for PCI slot recover. It
13561 * aborts all the outstanding SCSI I/Os to the pci device.
13562 **/
13563static void
13564lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
13565{
372c187b 13566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
75baf696
JS
13567 "2828 PCI channel I/O abort preparing for recovery\n");
13568 /*
13569 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
13570 * and let the SCSI mid-layer to retry them to recover.
13571 */
db55fba8 13572 lpfc_sli_abort_fcp_rings(phba);
75baf696
JS
13573}
13574
13575/**
13576 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
13577 * @phba: pointer to lpfc hba data structure.
13578 *
13579 * This routine is called to prepare the SLI4 device for PCI slot reset. It
13580 * disables the device interrupt and pci device, and aborts the internal FCP
13581 * pending I/Os.
13582 **/
13583static void
13584lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
13585{
372c187b 13586 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
75baf696
JS
13587 "2826 PCI channel disable preparing for reset\n");
13588
13589 /* Block any management I/Os to the device */
618a5230 13590 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
75baf696
JS
13591
13592 /* Block all SCSI devices' I/Os on the host */
13593 lpfc_scsi_dev_block(phba);
13594
c00f62e6
JS
13595 /* Flush all driver's outstanding I/Os as we are to reset */
13596 lpfc_sli_flush_io_rings(phba);
c3725bdc 13597
75baf696
JS
13598 /* stop all timers */
13599 lpfc_stop_hba_timers(phba);
13600
13601 /* Disable interrupt and pci device */
13602 lpfc_sli4_disable_intr(phba);
5350d872 13603 lpfc_sli4_queue_destroy(phba);
75baf696 13604 pci_disable_device(phba->pcidev);
75baf696
JS
13605}
13606
13607/**
13608 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
13609 * @phba: pointer to lpfc hba data structure.
13610 *
13611 * This routine is called to prepare the SLI4 device for PCI slot permanently
13612 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
13613 * pending I/Os.
13614 **/
13615static void
13616lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13617{
372c187b 13618 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
75baf696
JS
13619 "2827 PCI channel permanent disable for failure\n");
13620
13621 /* Block all SCSI devices' I/Os on the host */
13622 lpfc_scsi_dev_block(phba);
13623
13624 /* stop all timers */
13625 lpfc_stop_hba_timers(phba);
13626
c00f62e6
JS
13627 /* Clean up all driver's outstanding I/Os */
13628 lpfc_sli_flush_io_rings(phba);
75baf696
JS
13629}
13630
8d63f375 13631/**
da0436e9 13632 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
e59058c4
JS
13633 * @pdev: pointer to PCI device.
13634 * @state: the current PCI connection state.
8d63f375 13635 *
da0436e9
JS
13636 * This routine is called from the PCI subsystem for error handling to device
13637 * with SLI-4 interface spec. This function is called by the PCI subsystem
13638 * after a PCI bus error affecting this device has been detected. When this
13639 * function is invoked, it will need to stop all the I/Os and interrupt(s)
13640 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
13641 * for the PCI subsystem to perform proper recovery as desired.
e59058c4
JS
13642 *
13643 * Return codes
3772a991
JS
13644 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13645 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
e59058c4 13646 **/
3772a991 13647static pci_ers_result_t
da0436e9 13648lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8d63f375 13649{
75baf696
JS
13650 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13651 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13652
13653 switch (state) {
13654 case pci_channel_io_normal:
13655 /* Non-fatal error, prepare for recovery */
13656 lpfc_sli4_prep_dev_for_recover(phba);
13657 return PCI_ERS_RESULT_CAN_RECOVER;
13658 case pci_channel_io_frozen:
13659 /* Fatal error, prepare for slot reset */
13660 lpfc_sli4_prep_dev_for_reset(phba);
13661 return PCI_ERS_RESULT_NEED_RESET;
13662 case pci_channel_io_perm_failure:
13663 /* Permanent failure, prepare for device down */
13664 lpfc_sli4_prep_dev_for_perm_failure(phba);
13665 return PCI_ERS_RESULT_DISCONNECT;
13666 default:
13667 /* Unknown state, prepare and request slot reset */
372c187b 13668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
75baf696
JS
13669 "2825 Unknown PCI error state: x%x\n", state);
13670 lpfc_sli4_prep_dev_for_reset(phba);
13671 return PCI_ERS_RESULT_NEED_RESET;
13672 }
8d63f375
LV
13673}
13674
13675/**
da0436e9 13676 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
e59058c4
JS
13677 * @pdev: pointer to PCI device.
13678 *
da0436e9
JS
13679 * This routine is called from the PCI subsystem for error handling to device
13680 * with SLI-4 interface spec. It is called after PCI bus has been reset to
13681 * restart the PCI card from scratch, as if from a cold-boot. During the
13682 * PCI subsystem error recovery, after the driver returns
3772a991 13683 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
da0436e9
JS
13684 * recovery and then call this routine before calling the .resume method to
13685 * recover the device. This function will initialize the HBA device, enable
13686 * the interrupt, but it will just put the HBA to offline state without
13687 * passing any I/O traffic.
8d63f375 13688 *
e59058c4 13689 * Return codes
3772a991
JS
13690 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13691 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8d63f375 13692 */
3772a991 13693static pci_ers_result_t
da0436e9 13694lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8d63f375 13695{
75baf696
JS
13696 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13697 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13698 struct lpfc_sli *psli = &phba->sli;
13699 uint32_t intr_mode;
13700
13701 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13702 if (pci_enable_device_mem(pdev)) {
13703 printk(KERN_ERR "lpfc: Cannot re-enable "
13704 "PCI device after reset.\n");
13705 return PCI_ERS_RESULT_DISCONNECT;
13706 }
13707
13708 pci_restore_state(pdev);
0a96e975
JS
13709
13710 /*
13711 * As the new kernel behavior of pci_restore_state() API call clears
13712 * device saved_state flag, need to save the restored state again.
13713 */
13714 pci_save_state(pdev);
13715
75baf696
JS
13716 if (pdev->is_busmaster)
13717 pci_set_master(pdev);
13718
13719 spin_lock_irq(&phba->hbalock);
13720 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13721 spin_unlock_irq(&phba->hbalock);
13722
13723 /* Configure and enable interrupt */
13724 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13725 if (intr_mode == LPFC_INTR_ERROR) {
372c187b 13726 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
75baf696
JS
13727 "2824 Cannot re-enable interrupt after "
13728 "slot reset.\n");
13729 return PCI_ERS_RESULT_DISCONNECT;
13730 } else
13731 phba->intr_mode = intr_mode;
13732
13733 /* Log the current active interrupt mode */
13734 lpfc_log_intr_mode(phba, phba->intr_mode);
13735
8d63f375
LV
13736 return PCI_ERS_RESULT_RECOVERED;
13737}
13738
13739/**
da0436e9 13740 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
e59058c4 13741 * @pdev: pointer to PCI device
8d63f375 13742 *
3772a991 13743 * This routine is called from the PCI subsystem for error handling to device
da0436e9 13744 * with SLI-4 interface spec. It is called when kernel error recovery tells
3772a991
JS
13745 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
13746 * error recovery. After this call, traffic can start to flow from this device
13747 * again.
da0436e9 13748 **/
3772a991 13749static void
da0436e9 13750lpfc_io_resume_s4(struct pci_dev *pdev)
8d63f375 13751{
75baf696
JS
13752 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13753 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13754
13755 /*
13756 * In case of slot reset, as function reset is performed through
13757 * mailbox command which needs DMA to be enabled, this operation
13758 * has to be moved to the io resume phase. Taking device offline
13759 * will perform the necessary cleanup.
13760 */
13761 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13762 /* Perform device reset */
618a5230 13763 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
75baf696
JS
13764 lpfc_offline(phba);
13765 lpfc_sli_brdrestart(phba);
13766 /* Bring the device back online */
13767 lpfc_online(phba);
13768 }
8d63f375
LV
13769}
13770
3772a991
JS
13771/**
13772 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
13773 * @pdev: pointer to PCI device
13774 * @pid: pointer to PCI device identifier
13775 *
13776 * This routine is to be registered to the kernel's PCI subsystem. When an
13777 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
13778 * at PCI device-specific information of the device and driver to see if the
13779 * driver state that it can support this kind of device. If the match is
13780 * successful, the driver core invokes this routine. This routine dispatches
13781 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
13782 * do all the initialization that it needs to do to handle the HBA device
13783 * properly.
13784 *
13785 * Return code
13786 * 0 - driver can claim the device
13787 * negative value - driver can not claim the device
13788 **/
6f039790 13789static int
3772a991
JS
13790lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13791{
13792 int rc;
8fa38513 13793 struct lpfc_sli_intf intf;
3772a991 13794
28baac74 13795 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
3772a991
JS
13796 return -ENODEV;
13797
8fa38513 13798 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
28baac74 13799 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
da0436e9 13800 rc = lpfc_pci_probe_one_s4(pdev, pid);
8fa38513 13801 else
3772a991 13802 rc = lpfc_pci_probe_one_s3(pdev, pid);
8fa38513 13803
3772a991
JS
13804 return rc;
13805}
13806
13807/**
13808 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
13809 * @pdev: pointer to PCI device
13810 *
13811 * This routine is to be registered to the kernel's PCI subsystem. When an
13812 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
13813 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
13814 * remove routine, which will perform all the necessary cleanup for the
13815 * device to be removed from the PCI subsystem properly.
13816 **/
6f039790 13817static void
3772a991
JS
13818lpfc_pci_remove_one(struct pci_dev *pdev)
13819{
13820 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13821 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13822
13823 switch (phba->pci_dev_grp) {
13824 case LPFC_PCI_DEV_LP:
13825 lpfc_pci_remove_one_s3(pdev);
13826 break;
da0436e9
JS
13827 case LPFC_PCI_DEV_OC:
13828 lpfc_pci_remove_one_s4(pdev);
13829 break;
3772a991 13830 default:
372c187b 13831 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772a991
JS
13832 "1424 Invalid PCI device group: 0x%x\n",
13833 phba->pci_dev_grp);
13834 break;
13835 }
13836 return;
13837}
13838
13839/**
13840 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
ef6fa16b 13841 * @dev: pointer to device
3772a991
JS
13842 *
13843 * This routine is to be registered to the kernel's PCI subsystem to support
13844 * system Power Management (PM). When PM invokes this method, it dispatches
13845 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
13846 * suspend the device.
13847 *
13848 * Return code
13849 * 0 - driver suspended the device
13850 * Error otherwise
13851 **/
ef6fa16b
VG
13852static int __maybe_unused
13853lpfc_pci_suspend_one(struct device *dev)
3772a991 13854{
ef6fa16b 13855 struct Scsi_Host *shost = dev_get_drvdata(dev);
3772a991
JS
13856 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13857 int rc = -ENODEV;
13858
13859 switch (phba->pci_dev_grp) {
13860 case LPFC_PCI_DEV_LP:
ef6fa16b 13861 rc = lpfc_pci_suspend_one_s3(dev);
3772a991 13862 break;
da0436e9 13863 case LPFC_PCI_DEV_OC:
ef6fa16b 13864 rc = lpfc_pci_suspend_one_s4(dev);
da0436e9 13865 break;
3772a991 13866 default:
372c187b 13867 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772a991
JS
13868 "1425 Invalid PCI device group: 0x%x\n",
13869 phba->pci_dev_grp);
13870 break;
13871 }
13872 return rc;
13873}
13874
13875/**
13876 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
ef6fa16b 13877 * @dev: pointer to device
3772a991
JS
13878 *
13879 * This routine is to be registered to the kernel's PCI subsystem to support
13880 * system Power Management (PM). When PM invokes this method, it dispatches
13881 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
13882 * resume the device.
13883 *
13884 * Return code
13885 * 0 - driver suspended the device
13886 * Error otherwise
13887 **/
ef6fa16b
VG
13888static int __maybe_unused
13889lpfc_pci_resume_one(struct device *dev)
3772a991 13890{
ef6fa16b 13891 struct Scsi_Host *shost = dev_get_drvdata(dev);
3772a991
JS
13892 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13893 int rc = -ENODEV;
13894
13895 switch (phba->pci_dev_grp) {
13896 case LPFC_PCI_DEV_LP:
ef6fa16b 13897 rc = lpfc_pci_resume_one_s3(dev);
3772a991 13898 break;
da0436e9 13899 case LPFC_PCI_DEV_OC:
ef6fa16b 13900 rc = lpfc_pci_resume_one_s4(dev);
da0436e9 13901 break;
3772a991 13902 default:
372c187b 13903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772a991
JS
13904 "1426 Invalid PCI device group: 0x%x\n",
13905 phba->pci_dev_grp);
13906 break;
13907 }
13908 return rc;
13909}
13910
13911/**
13912 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
13913 * @pdev: pointer to PCI device.
13914 * @state: the current PCI connection state.
13915 *
13916 * This routine is registered to the PCI subsystem for error handling. This
13917 * function is called by the PCI subsystem after a PCI bus error affecting
13918 * this device has been detected. When this routine is invoked, it dispatches
13919 * the action to the proper SLI-3 or SLI-4 device error detected handling
13920 * routine, which will perform the proper error detected operation.
13921 *
13922 * Return codes
13923 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13924 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13925 **/
13926static pci_ers_result_t
13927lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13928{
13929 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13930 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13931 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13932
13933 switch (phba->pci_dev_grp) {
13934 case LPFC_PCI_DEV_LP:
13935 rc = lpfc_io_error_detected_s3(pdev, state);
13936 break;
da0436e9
JS
13937 case LPFC_PCI_DEV_OC:
13938 rc = lpfc_io_error_detected_s4(pdev, state);
13939 break;
3772a991 13940 default:
372c187b 13941 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772a991
JS
13942 "1427 Invalid PCI device group: 0x%x\n",
13943 phba->pci_dev_grp);
13944 break;
13945 }
13946 return rc;
13947}
13948
13949/**
13950 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
13951 * @pdev: pointer to PCI device.
13952 *
13953 * This routine is registered to the PCI subsystem for error handling. This
13954 * function is called after PCI bus has been reset to restart the PCI card
13955 * from scratch, as if from a cold-boot. When this routine is invoked, it
13956 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
13957 * routine, which will perform the proper device reset.
13958 *
13959 * Return codes
13960 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13961 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13962 **/
13963static pci_ers_result_t
13964lpfc_io_slot_reset(struct pci_dev *pdev)
13965{
13966 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13967 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13968 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13969
13970 switch (phba->pci_dev_grp) {
13971 case LPFC_PCI_DEV_LP:
13972 rc = lpfc_io_slot_reset_s3(pdev);
13973 break;
da0436e9
JS
13974 case LPFC_PCI_DEV_OC:
13975 rc = lpfc_io_slot_reset_s4(pdev);
13976 break;
3772a991 13977 default:
372c187b 13978 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772a991
JS
13979 "1428 Invalid PCI device group: 0x%x\n",
13980 phba->pci_dev_grp);
13981 break;
13982 }
13983 return rc;
13984}
13985
13986/**
13987 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
13988 * @pdev: pointer to PCI device
13989 *
13990 * This routine is registered to the PCI subsystem for error handling. It
13991 * is called when kernel error recovery tells the lpfc driver that it is
13992 * OK to resume normal PCI operation after PCI bus error recovery. When
13993 * this routine is invoked, it dispatches the action to the proper SLI-3
13994 * or SLI-4 device io_resume routine, which will resume the device operation.
13995 **/
13996static void
13997lpfc_io_resume(struct pci_dev *pdev)
13998{
13999 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14000 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14001
14002 switch (phba->pci_dev_grp) {
14003 case LPFC_PCI_DEV_LP:
14004 lpfc_io_resume_s3(pdev);
14005 break;
da0436e9
JS
14006 case LPFC_PCI_DEV_OC:
14007 lpfc_io_resume_s4(pdev);
14008 break;
3772a991 14009 default:
372c187b 14010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772a991
JS
14011 "1429 Invalid PCI device group: 0x%x\n",
14012 phba->pci_dev_grp);
14013 break;
14014 }
14015 return;
14016}
14017
1ba981fd
JS
14018/**
14019 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
14020 * @phba: pointer to lpfc hba data structure.
14021 *
14022 * This routine checks to see if OAS is supported for this adapter. If
14023 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
14024 * the enable oas flag is cleared and the pool created for OAS device data
14025 * is destroyed.
14026 *
14027 **/
c7092975 14028static void
1ba981fd
JS
14029lpfc_sli4_oas_verify(struct lpfc_hba *phba)
14030{
14031
14032 if (!phba->cfg_EnableXLane)
14033 return;
14034
14035 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
14036 phba->cfg_fof = 1;
14037 } else {
f38fa0bb 14038 phba->cfg_fof = 0;
c3e5aac3 14039 mempool_destroy(phba->device_data_mem_pool);
1ba981fd
JS
14040 phba->device_data_mem_pool = NULL;
14041 }
14042
14043 return;
14044}
14045
d2cc9bcd
JS
14046/**
14047 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
14048 * @phba: pointer to lpfc hba data structure.
14049 *
14050 * This routine checks to see if RAS is supported by the adapter. Check the
14051 * function through which RAS support enablement is to be done.
14052 **/
14053void
14054lpfc_sli4_ras_init(struct lpfc_hba *phba)
14055{
14056 switch (phba->pcidev->device) {
14057 case PCI_DEVICE_ID_LANCER_G6_FC:
14058 case PCI_DEVICE_ID_LANCER_G7_FC:
14059 phba->ras_fwlog.ras_hwsupport = true;
cb34990b
JS
14060 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
14061 phba->cfg_ras_fwlog_buffsize)
d2cc9bcd
JS
14062 phba->ras_fwlog.ras_enabled = true;
14063 else
14064 phba->ras_fwlog.ras_enabled = false;
14065 break;
14066 default:
14067 phba->ras_fwlog.ras_hwsupport = false;
14068 }
14069}
14070
1ba981fd 14071
dea3101e 14072MODULE_DEVICE_TABLE(pci, lpfc_id_table);
14073
a55b2d21 14074static const struct pci_error_handlers lpfc_err_handler = {
8d63f375
LV
14075 .error_detected = lpfc_io_error_detected,
14076 .slot_reset = lpfc_io_slot_reset,
14077 .resume = lpfc_io_resume,
14078};
14079
ef6fa16b
VG
14080static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
14081 lpfc_pci_suspend_one,
14082 lpfc_pci_resume_one);
14083
dea3101e 14084static struct pci_driver lpfc_driver = {
14085 .name = LPFC_DRIVER_NAME,
14086 .id_table = lpfc_id_table,
14087 .probe = lpfc_pci_probe_one,
6f039790 14088 .remove = lpfc_pci_remove_one,
85e8a239 14089 .shutdown = lpfc_pci_remove_one,
ef6fa16b 14090 .driver.pm = &lpfc_pci_pm_ops_one,
2e0fef85 14091 .err_handler = &lpfc_err_handler,
dea3101e 14092};
14093
3ef6d24c 14094static const struct file_operations lpfc_mgmt_fop = {
858feacd 14095 .owner = THIS_MODULE,
3ef6d24c
JS
14096};
14097
14098static struct miscdevice lpfc_mgmt_dev = {
14099 .minor = MISC_DYNAMIC_MINOR,
14100 .name = "lpfcmgmt",
14101 .fops = &lpfc_mgmt_fop,
14102};
14103
e59058c4 14104/**
3621a710 14105 * lpfc_init - lpfc module initialization routine
e59058c4
JS
14106 *
14107 * This routine is to be invoked when the lpfc module is loaded into the
14108 * kernel. The special kernel macro module_init() is used to indicate the
14109 * role of this routine to the kernel as lpfc module entry point.
14110 *
14111 * Return codes
14112 * 0 - successful
14113 * -ENOMEM - FC attach transport failed
14114 * all others - failed
14115 */
dea3101e 14116static int __init
14117lpfc_init(void)
14118{
14119 int error = 0;
14120
bc2736e9
AB
14121 pr_info(LPFC_MODULE_DESC "\n");
14122 pr_info(LPFC_COPYRIGHT "\n");
dea3101e 14123
3ef6d24c
JS
14124 error = misc_register(&lpfc_mgmt_dev);
14125 if (error)
14126 printk(KERN_ERR "Could not register lpfcmgmt device, "
14127 "misc_register returned with status %d", error);
14128
1eaff536 14129 error = -ENOMEM;
458c083e
JS
14130 lpfc_transport_functions.vport_create = lpfc_vport_create;
14131 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
dea3101e 14132 lpfc_transport_template =
14133 fc_attach_transport(&lpfc_transport_functions);
7ee5d43e 14134 if (lpfc_transport_template == NULL)
1eaff536 14135 goto unregister;
458c083e
JS
14136 lpfc_vport_transport_template =
14137 fc_attach_transport(&lpfc_vport_transport_functions);
14138 if (lpfc_vport_transport_template == NULL) {
14139 fc_release_transport(lpfc_transport_template);
1eaff536 14140 goto unregister;
7ee5d43e 14141 }
840a4701 14142 lpfc_wqe_cmd_template();
bd3061ba 14143 lpfc_nvmet_cmd_template();
7bb03bbf
JS
14144
14145 /* Initialize in case vector mapping is needed */
2ea259ee 14146 lpfc_present_cpu = num_present_cpus();
7bb03bbf 14147
93a4d6f4
JS
14148 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14149 "lpfc/sli4:online",
14150 lpfc_cpu_online, lpfc_cpu_offline);
14151 if (error < 0)
14152 goto cpuhp_failure;
14153 lpfc_cpuhp_state = error;
14154
dea3101e 14155 error = pci_register_driver(&lpfc_driver);
93a4d6f4
JS
14156 if (error)
14157 goto unwind;
14158
14159 return error;
14160
14161unwind:
14162 cpuhp_remove_multi_state(lpfc_cpuhp_state);
14163cpuhp_failure:
14164 fc_release_transport(lpfc_transport_template);
14165 fc_release_transport(lpfc_vport_transport_template);
1eaff536
JX
14166unregister:
14167 misc_deregister(&lpfc_mgmt_dev);
dea3101e 14168
14169 return error;
14170}
14171
372c187b
DK
14172void lpfc_dmp_dbg(struct lpfc_hba *phba)
14173{
14174 unsigned int start_idx;
14175 unsigned int dbg_cnt;
14176 unsigned int temp_idx;
14177 int i;
14178 int j = 0;
e8613084
JS
14179 unsigned long rem_nsec, iflags;
14180 bool log_verbose = false;
14181 struct lpfc_vport *port_iterator;
372c187b 14182
0b3ad32e
JS
14183 /* Don't dump messages if we explicitly set log_verbose for the
14184 * physical port or any vport.
14185 */
372c187b
DK
14186 if (phba->cfg_log_verbose)
14187 return;
14188
e8613084
JS
14189 spin_lock_irqsave(&phba->port_list_lock, iflags);
14190 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
14191 if (port_iterator->load_flag & FC_UNLOADING)
14192 continue;
14193 if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
14194 if (port_iterator->cfg_log_verbose)
14195 log_verbose = true;
14196
14197 scsi_host_put(lpfc_shost_from_vport(port_iterator));
14198
14199 if (log_verbose) {
14200 spin_unlock_irqrestore(&phba->port_list_lock,
14201 iflags);
0b3ad32e
JS
14202 return;
14203 }
14204 }
14205 }
e8613084 14206 spin_unlock_irqrestore(&phba->port_list_lock, iflags);
0b3ad32e 14207
372c187b
DK
14208 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
14209 return;
14210
14211 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
14212 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
0b3ad32e
JS
14213 if (!dbg_cnt)
14214 goto out;
372c187b
DK
14215 temp_idx = start_idx;
14216 if (dbg_cnt >= DBG_LOG_SZ) {
14217 dbg_cnt = DBG_LOG_SZ;
14218 temp_idx -= 1;
14219 } else {
14220 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
14221 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
14222 } else {
77dd7d7b 14223 if (start_idx < dbg_cnt)
372c187b 14224 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
77dd7d7b 14225 else
372c187b 14226 start_idx -= dbg_cnt;
372c187b
DK
14227 }
14228 }
14229 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
14230 start_idx, temp_idx, dbg_cnt);
14231
14232 for (i = 0; i < dbg_cnt; i++) {
14233 if ((start_idx + i) < DBG_LOG_SZ)
77dd7d7b 14234 temp_idx = (start_idx + i) % DBG_LOG_SZ;
372c187b
DK
14235 else
14236 temp_idx = j++;
14237 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
14238 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
14239 temp_idx,
14240 (unsigned long)phba->dbg_log[temp_idx].t_ns,
14241 rem_nsec / 1000,
14242 phba->dbg_log[temp_idx].log);
14243 }
0b3ad32e 14244out:
372c187b
DK
14245 atomic_set(&phba->dbg_log_cnt, 0);
14246 atomic_set(&phba->dbg_log_dmping, 0);
14247}
14248
7fa03c77 14249__printf(2, 3)
372c187b
DK
14250void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
14251{
14252 unsigned int idx;
14253 va_list args;
14254 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
14255 struct va_format vaf;
14256
14257
14258 va_start(args, fmt);
14259 if (unlikely(dbg_dmping)) {
14260 vaf.fmt = fmt;
14261 vaf.va = &args;
14262 dev_info(&phba->pcidev->dev, "%pV", &vaf);
14263 va_end(args);
14264 return;
14265 }
14266 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
14267 DBG_LOG_SZ;
14268
14269 atomic_inc(&phba->dbg_log_cnt);
14270
14271 vscnprintf(phba->dbg_log[idx].log,
14272 sizeof(phba->dbg_log[idx].log), fmt, args);
14273 va_end(args);
14274
14275 phba->dbg_log[idx].t_ns = local_clock();
14276}
14277
e59058c4 14278/**
3621a710 14279 * lpfc_exit - lpfc module removal routine
e59058c4
JS
14280 *
14281 * This routine is invoked when the lpfc module is removed from the kernel.
14282 * The special kernel macro module_exit() is used to indicate the role of
14283 * this routine to the kernel as lpfc module exit point.
14284 */
dea3101e 14285static void __exit
14286lpfc_exit(void)
14287{
3ef6d24c 14288 misc_deregister(&lpfc_mgmt_dev);
dea3101e 14289 pci_unregister_driver(&lpfc_driver);
93a4d6f4 14290 cpuhp_remove_multi_state(lpfc_cpuhp_state);
dea3101e 14291 fc_release_transport(lpfc_transport_template);
458c083e 14292 fc_release_transport(lpfc_vport_transport_template);
7973967f 14293 idr_destroy(&lpfc_hba_index);
dea3101e 14294}
14295
14296module_init(lpfc_init);
14297module_exit(lpfc_exit);
14298MODULE_LICENSE("GPL");
14299MODULE_DESCRIPTION(LPFC_MODULE_DESC);
d080abe0 14300MODULE_AUTHOR("Broadcom");
dea3101e 14301MODULE_VERSION("0:" LPFC_DRIVER_VERSION);