scsi: lpfc: Fix nvme sg_seg_cnt display if HBA does not support NVME
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_init.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
0d041215 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
3e21d1cb 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
50611577 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e 22 *******************************************************************/
23
dea3101e 24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
acf3368f 29#include <linux/module.h>
dea3101e 30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
92d7f7b0 33#include <linux/ctype.h>
0d878419 34#include <linux/aer.h>
5a0e3ad6 35#include <linux/slab.h>
52d52440 36#include <linux/firmware.h>
3ef6d24c 37#include <linux/miscdevice.h>
7bb03bbf 38#include <linux/percpu.h>
895427bd 39#include <linux/msi.h>
6a828b0f 40#include <linux/irq.h>
286871a6 41#include <linux/bitops.h>
31f06d2e 42#include <linux/crash_dump.h>
dea3101e 43
91886523 44#include <scsi/scsi.h>
dea3101e 45#include <scsi/scsi_device.h>
46#include <scsi/scsi_host.h>
47#include <scsi/scsi_transport_fc.h>
86c67379
JS
48#include <scsi/scsi_tcq.h>
49#include <scsi/fc/fc_fs.h>
50
51#include <linux/nvme-fc-driver.h>
dea3101e 52
da0436e9 53#include "lpfc_hw4.h"
dea3101e 54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
da0436e9 56#include "lpfc_sli4.h"
ea2151b4 57#include "lpfc_nl.h"
dea3101e 58#include "lpfc_disc.h"
dea3101e 59#include "lpfc.h"
895427bd
JS
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
86c67379 62#include "lpfc_nvmet.h"
dea3101e 63#include "lpfc_logmsg.h"
64#include "lpfc_crtn.h"
92d7f7b0 65#include "lpfc_vport.h"
dea3101e 66#include "lpfc_version.h"
12f44457 67#include "lpfc_ids.h"
dea3101e 68
81301a9b
JS
69char *_dump_buf_data;
70unsigned long _dump_buf_data_order;
71char *_dump_buf_dif;
72unsigned long _dump_buf_dif_order;
73spinlock_t _dump_buf_lock;
74
7bb03bbf 75/* Used when mapping IRQ vectors in a driver centric manner */
d7b761b0 76static uint32_t lpfc_present_cpu;
7bb03bbf 77
dea3101e 78static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
79static int lpfc_post_rcv_buf(struct lpfc_hba *);
5350d872 80static int lpfc_sli4_queue_verify(struct lpfc_hba *);
da0436e9
JS
81static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
82static int lpfc_setup_endian_order(struct lpfc_hba *);
da0436e9 83static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
8a9d2e80 84static void lpfc_free_els_sgl_list(struct lpfc_hba *);
f358dd0c 85static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
8a9d2e80 86static void lpfc_init_sgl_list(struct lpfc_hba *);
da0436e9
JS
87static int lpfc_init_active_sgl_array(struct lpfc_hba *);
88static void lpfc_free_active_sgl(struct lpfc_hba *);
89static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
90static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
91static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
92static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
93static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
618a5230
JS
94static void lpfc_sli4_disable_intr(struct lpfc_hba *);
95static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
1ba981fd 96static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
6a828b0f 97static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
aa6ff309 98static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
dea3101e 99
100static struct scsi_transport_template *lpfc_transport_template = NULL;
92d7f7b0 101static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
dea3101e 102static DEFINE_IDR(lpfc_hba_index);
f358dd0c 103#define LPFC_NVMET_BUF_POST 254
dea3101e 104
e59058c4 105/**
3621a710 106 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
e59058c4
JS
107 * @phba: pointer to lpfc hba data structure.
108 *
109 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
110 * mailbox command. It retrieves the revision information from the HBA and
111 * collects the Vital Product Data (VPD) about the HBA for preparing the
112 * configuration of the HBA.
113 *
114 * Return codes:
115 * 0 - success.
116 * -ERESTART - requests the SLI layer to reset the HBA and try again.
117 * Any other value - indicates an error.
118 **/
dea3101e 119int
2e0fef85 120lpfc_config_port_prep(struct lpfc_hba *phba)
dea3101e 121{
122 lpfc_vpd_t *vp = &phba->vpd;
123 int i = 0, rc;
124 LPFC_MBOXQ_t *pmb;
125 MAILBOX_t *mb;
126 char *lpfc_vpd_data = NULL;
127 uint16_t offset = 0;
128 static char licensed[56] =
129 "key unlock for use with gnu public licensed code only\0";
65a29c16 130 static int init_key = 1;
dea3101e 131
132 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
133 if (!pmb) {
2e0fef85 134 phba->link_state = LPFC_HBA_ERROR;
dea3101e 135 return -ENOMEM;
136 }
137
04c68496 138 mb = &pmb->u.mb;
2e0fef85 139 phba->link_state = LPFC_INIT_MBX_CMDS;
dea3101e 140
141 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
65a29c16
JS
142 if (init_key) {
143 uint32_t *ptext = (uint32_t *) licensed;
dea3101e 144
65a29c16
JS
145 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
146 *ptext = cpu_to_be32(*ptext);
147 init_key = 0;
148 }
dea3101e 149
150 lpfc_read_nv(phba, pmb);
151 memset((char*)mb->un.varRDnvp.rsvd3, 0,
152 sizeof (mb->un.varRDnvp.rsvd3));
153 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
154 sizeof (licensed));
155
156 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
157
158 if (rc != MBX_SUCCESS) {
ed957684 159 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
e8b62011 160 "0324 Config Port initialization "
dea3101e 161 "error, mbxCmd x%x READ_NVPARM, "
162 "mbxStatus x%x\n",
dea3101e 163 mb->mbxCommand, mb->mbxStatus);
164 mempool_free(pmb, phba->mbox_mem_pool);
165 return -ERESTART;
166 }
167 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
2e0fef85
JS
168 sizeof(phba->wwnn));
169 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
170 sizeof(phba->wwpn));
dea3101e 171 }
172
dfb75133
MW
173 /*
174 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
175 * which was already set in lpfc_get_cfgparam()
176 */
177 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
92d7f7b0 178
dea3101e 179 /* Setup and issue mailbox READ REV command */
180 lpfc_read_rev(phba, pmb);
181 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
182 if (rc != MBX_SUCCESS) {
ed957684 183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 184 "0439 Adapter failed to init, mbxCmd x%x "
dea3101e 185 "READ_REV, mbxStatus x%x\n",
dea3101e 186 mb->mbxCommand, mb->mbxStatus);
187 mempool_free( pmb, phba->mbox_mem_pool);
188 return -ERESTART;
189 }
190
92d7f7b0 191
1de933f3
JSEC
192 /*
193 * The value of rr must be 1 since the driver set the cv field to 1.
194 * This setting requires the FW to set all revision fields.
dea3101e 195 */
1de933f3 196 if (mb->un.varRdRev.rr == 0) {
dea3101e 197 vp->rev.rBit = 0;
1de933f3 198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011
JS
199 "0440 Adapter failed to init, READ_REV has "
200 "missing revision information.\n");
dea3101e 201 mempool_free(pmb, phba->mbox_mem_pool);
202 return -ERESTART;
dea3101e 203 }
204
495a714c
JS
205 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
206 mempool_free(pmb, phba->mbox_mem_pool);
ed957684 207 return -EINVAL;
495a714c 208 }
ed957684 209
dea3101e 210 /* Save information as VPD data */
1de933f3 211 vp->rev.rBit = 1;
92d7f7b0 212 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
1de933f3
JSEC
213 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
214 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
215 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
216 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
dea3101e 217 vp->rev.biuRev = mb->un.varRdRev.biuRev;
218 vp->rev.smRev = mb->un.varRdRev.smRev;
219 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
220 vp->rev.endecRev = mb->un.varRdRev.endecRev;
221 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
222 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
223 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
224 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
225 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
226 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
227
92d7f7b0
JS
228 /* If the sli feature level is less then 9, we must
229 * tear down all RPIs and VPIs on link down if NPIV
230 * is enabled.
231 */
232 if (vp->rev.feaLevelHigh < 9)
233 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
234
dea3101e 235 if (lpfc_is_LC_HBA(phba->pcidev->device))
236 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
237 sizeof (phba->RandomData));
238
dea3101e 239 /* Get adapter VPD information */
dea3101e 240 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
241 if (!lpfc_vpd_data)
d7c255b2 242 goto out_free_mbox;
dea3101e 243 do {
a0c87cbd 244 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
dea3101e 245 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
246
247 if (rc != MBX_SUCCESS) {
248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 249 "0441 VPD not present on adapter, "
dea3101e 250 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
dea3101e 251 mb->mbxCommand, mb->mbxStatus);
74b72a59 252 mb->un.varDmp.word_cnt = 0;
dea3101e 253 }
04c68496
JS
254 /* dump mem may return a zero when finished or we got a
255 * mailbox error, either way we are done.
256 */
257 if (mb->un.varDmp.word_cnt == 0)
258 break;
74b72a59
JW
259 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
260 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
d7c255b2
JS
261 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
262 lpfc_vpd_data + offset,
92d7f7b0 263 mb->un.varDmp.word_cnt);
dea3101e 264 offset += mb->un.varDmp.word_cnt;
74b72a59
JW
265 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
266 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
dea3101e 267
268 kfree(lpfc_vpd_data);
dea3101e 269out_free_mbox:
270 mempool_free(pmb, phba->mbox_mem_pool);
271 return 0;
272}
273
e59058c4 274/**
3621a710 275 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
e59058c4
JS
276 * @phba: pointer to lpfc hba data structure.
277 * @pmboxq: pointer to the driver internal queue element for mailbox command.
278 *
279 * This is the completion handler for driver's configuring asynchronous event
280 * mailbox command to the device. If the mailbox command returns successfully,
281 * it will set internal async event support flag to 1; otherwise, it will
282 * set internal async event support flag to 0.
283 **/
57127f15
JS
284static void
285lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
286{
04c68496 287 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
57127f15
JS
288 phba->temp_sensor_support = 1;
289 else
290 phba->temp_sensor_support = 0;
291 mempool_free(pmboxq, phba->mbox_mem_pool);
292 return;
293}
294
97207482 295/**
3621a710 296 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
97207482
JS
297 * @phba: pointer to lpfc hba data structure.
298 * @pmboxq: pointer to the driver internal queue element for mailbox command.
299 *
300 * This is the completion handler for dump mailbox command for getting
301 * wake up parameters. When this command complete, the response contain
302 * Option rom version of the HBA. This function translate the version number
303 * into a human readable string and store it in OptionROMVersion.
304 **/
305static void
306lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
307{
308 struct prog_id *prg;
309 uint32_t prog_id_word;
310 char dist = ' ';
311 /* character array used for decoding dist type. */
312 char dist_char[] = "nabx";
313
04c68496 314 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
9f1e1b50 315 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482 316 return;
9f1e1b50 317 }
97207482
JS
318
319 prg = (struct prog_id *) &prog_id_word;
320
321 /* word 7 contain option rom version */
04c68496 322 prog_id_word = pmboxq->u.mb.un.varWords[7];
97207482
JS
323
324 /* Decode the Option rom version word to a readable string */
325 if (prg->dist < 4)
326 dist = dist_char[prg->dist];
327
328 if ((prg->dist == 3) && (prg->num == 0))
a2fc4aef 329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
97207482
JS
330 prg->ver, prg->rev, prg->lev);
331 else
a2fc4aef 332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
97207482
JS
333 prg->ver, prg->rev, prg->lev,
334 dist, prg->num);
9f1e1b50 335 mempool_free(pmboxq, phba->mbox_mem_pool);
97207482
JS
336 return;
337}
338
0558056c
JS
339/**
340 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
341 * cfg_soft_wwnn, cfg_soft_wwpn
342 * @vport: pointer to lpfc vport data structure.
343 *
344 *
345 * Return codes
346 * None.
347 **/
348void
349lpfc_update_vport_wwn(struct lpfc_vport *vport)
350{
aeb3c817
JS
351 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
352 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
353
0558056c
JS
354 /* If the soft name exists then update it using the service params */
355 if (vport->phba->cfg_soft_wwnn)
356 u64_to_wwn(vport->phba->cfg_soft_wwnn,
357 vport->fc_sparam.nodeName.u.wwn);
358 if (vport->phba->cfg_soft_wwpn)
359 u64_to_wwn(vport->phba->cfg_soft_wwpn,
360 vport->fc_sparam.portName.u.wwn);
361
362 /*
363 * If the name is empty or there exists a soft name
364 * then copy the service params name, otherwise use the fc name
365 */
366 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
367 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
368 sizeof(struct lpfc_name));
369 else
370 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
371 sizeof(struct lpfc_name));
372
aeb3c817
JS
373 /*
374 * If the port name has changed, then set the Param changes flag
375 * to unreg the login
376 */
377 if (vport->fc_portname.u.wwn[0] != 0 &&
378 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
379 sizeof(struct lpfc_name)))
380 vport->vport_flag |= FAWWPN_PARAM_CHG;
381
382 if (vport->fc_portname.u.wwn[0] == 0 ||
383 vport->phba->cfg_soft_wwpn ||
384 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
385 vport->vport_flag & FAWWPN_SET) {
0558056c
JS
386 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
387 sizeof(struct lpfc_name));
aeb3c817
JS
388 vport->vport_flag &= ~FAWWPN_SET;
389 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
390 vport->vport_flag |= FAWWPN_SET;
391 }
0558056c
JS
392 else
393 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
394 sizeof(struct lpfc_name));
395}
396
e59058c4 397/**
3621a710 398 * lpfc_config_port_post - Perform lpfc initialization after config port
e59058c4
JS
399 * @phba: pointer to lpfc hba data structure.
400 *
401 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
402 * command call. It performs all internal resource and state setups on the
403 * port: post IOCB buffers, enable appropriate host interrupt attentions,
404 * ELS ring timers, etc.
405 *
406 * Return codes
407 * 0 - success.
408 * Any other value - error.
409 **/
dea3101e 410int
2e0fef85 411lpfc_config_port_post(struct lpfc_hba *phba)
dea3101e 412{
2e0fef85 413 struct lpfc_vport *vport = phba->pport;
a257bf90 414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
dea3101e 415 LPFC_MBOXQ_t *pmb;
416 MAILBOX_t *mb;
417 struct lpfc_dmabuf *mp;
418 struct lpfc_sli *psli = &phba->sli;
419 uint32_t status, timeout;
2e0fef85
JS
420 int i, j;
421 int rc;
dea3101e 422
7af67051
JS
423 spin_lock_irq(&phba->hbalock);
424 /*
425 * If the Config port completed correctly the HBA is not
426 * over heated any more.
427 */
428 if (phba->over_temp_state == HBA_OVER_TEMP)
429 phba->over_temp_state = HBA_NORMAL_TEMP;
430 spin_unlock_irq(&phba->hbalock);
431
dea3101e 432 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
433 if (!pmb) {
2e0fef85 434 phba->link_state = LPFC_HBA_ERROR;
dea3101e 435 return -ENOMEM;
436 }
04c68496 437 mb = &pmb->u.mb;
dea3101e 438
dea3101e 439 /* Get login parameters for NID. */
9f1177a3
JS
440 rc = lpfc_read_sparam(phba, pmb, 0);
441 if (rc) {
442 mempool_free(pmb, phba->mbox_mem_pool);
443 return -ENOMEM;
444 }
445
ed957684 446 pmb->vport = vport;
dea3101e 447 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 449 "0448 Adapter failed init, mbxCmd x%x "
dea3101e 450 "READ_SPARM mbxStatus x%x\n",
dea3101e 451 mb->mbxCommand, mb->mbxStatus);
2e0fef85 452 phba->link_state = LPFC_HBA_ERROR;
3e1f0718 453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
9f1177a3 454 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 455 lpfc_mbuf_free(phba, mp->virt, mp->phys);
456 kfree(mp);
457 return -EIO;
458 }
459
3e1f0718 460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
dea3101e 461
2e0fef85 462 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
dea3101e 463 lpfc_mbuf_free(phba, mp->virt, mp->phys);
464 kfree(mp);
3e1f0718 465 pmb->ctx_buf = NULL;
0558056c 466 lpfc_update_vport_wwn(vport);
a257bf90
JS
467
468 /* Update the fc_host data structures with new wwn. */
469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
21e9a0a5 471 fc_host_max_npiv_vports(shost) = phba->max_vpi;
a257bf90 472
dea3101e 473 /* If no serial number in VPD data, use low 6 bytes of WWNN */
474 /* This should be consolidated into parse_vpd ? - mr */
475 if (phba->SerialNumber[0] == 0) {
476 uint8_t *outptr;
477
2e0fef85 478 outptr = &vport->fc_nodename.u.s.IEEE[0];
dea3101e 479 for (i = 0; i < 12; i++) {
480 status = *outptr++;
481 j = ((status & 0xf0) >> 4);
482 if (j <= 9)
483 phba->SerialNumber[i] =
484 (char)((uint8_t) 0x30 + (uint8_t) j);
485 else
486 phba->SerialNumber[i] =
487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
488 i++;
489 j = (status & 0xf);
490 if (j <= 9)
491 phba->SerialNumber[i] =
492 (char)((uint8_t) 0x30 + (uint8_t) j);
493 else
494 phba->SerialNumber[i] =
495 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
496 }
497 }
498
dea3101e 499 lpfc_read_config(phba, pmb);
ed957684 500 pmb->vport = vport;
dea3101e 501 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
ed957684 502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 503 "0453 Adapter failed to init, mbxCmd x%x "
dea3101e 504 "READ_CONFIG, mbxStatus x%x\n",
dea3101e 505 mb->mbxCommand, mb->mbxStatus);
2e0fef85 506 phba->link_state = LPFC_HBA_ERROR;
dea3101e 507 mempool_free( pmb, phba->mbox_mem_pool);
508 return -EIO;
509 }
510
a0c87cbd
JS
511 /* Check if the port is disabled */
512 lpfc_sli_read_link_ste(phba);
513
dea3101e 514 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
572709e2
JS
515 i = (mb->un.varRdConfig.max_xri + 1);
516 if (phba->cfg_hba_queue_depth > i) {
517 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
518 "3359 HBA queue depth changed from %d to %d\n",
519 phba->cfg_hba_queue_depth, i);
520 phba->cfg_hba_queue_depth = i;
521 }
522
523 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
524 i = (mb->un.varRdConfig.max_xri >> 3);
525 if (phba->pport->cfg_lun_queue_depth > i) {
526 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
527 "3360 LUN queue depth changed from %d to %d\n",
528 phba->pport->cfg_lun_queue_depth, i);
529 phba->pport->cfg_lun_queue_depth = i;
530 }
dea3101e 531
532 phba->lmt = mb->un.varRdConfig.lmt;
74b72a59
JW
533
534 /* Get the default values for Model Name and Description */
535 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
536
2e0fef85 537 phba->link_state = LPFC_LINK_DOWN;
dea3101e 538
0b727fea 539 /* Only process IOCBs on ELS ring till hba_state is READY */
895427bd
JS
540 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
541 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
542 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
543 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
dea3101e 544
545 /* Post receive buffers for desired rings */
ed957684
JS
546 if (phba->sli_rev != 3)
547 lpfc_post_rcv_buf(phba);
dea3101e 548
9399627f
JS
549 /*
550 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
551 */
552 if (phba->intr_type == MSIX) {
553 rc = lpfc_config_msi(phba, pmb);
554 if (rc) {
555 mempool_free(pmb, phba->mbox_mem_pool);
556 return -EIO;
557 }
558 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
559 if (rc != MBX_SUCCESS) {
560 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
561 "0352 Config MSI mailbox command "
562 "failed, mbxCmd x%x, mbxStatus x%x\n",
04c68496
JS
563 pmb->u.mb.mbxCommand,
564 pmb->u.mb.mbxStatus);
9399627f
JS
565 mempool_free(pmb, phba->mbox_mem_pool);
566 return -EIO;
567 }
568 }
569
04c68496 570 spin_lock_irq(&phba->hbalock);
9399627f
JS
571 /* Initialize ERATT handling flag */
572 phba->hba_flag &= ~HBA_ERATT_HANDLED;
573
dea3101e 574 /* Enable appropriate host interrupts */
9940b97b
JS
575 if (lpfc_readl(phba->HCregaddr, &status)) {
576 spin_unlock_irq(&phba->hbalock);
577 return -EIO;
578 }
dea3101e 579 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
580 if (psli->num_rings > 0)
581 status |= HC_R0INT_ENA;
582 if (psli->num_rings > 1)
583 status |= HC_R1INT_ENA;
584 if (psli->num_rings > 2)
585 status |= HC_R2INT_ENA;
586 if (psli->num_rings > 3)
587 status |= HC_R3INT_ENA;
588
875fbdfe
JSEC
589 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
590 (phba->cfg_poll & DISABLE_FCP_RING_INT))
9399627f 591 status &= ~(HC_R0INT_ENA);
875fbdfe 592
dea3101e 593 writel(status, phba->HCregaddr);
594 readl(phba->HCregaddr); /* flush */
2e0fef85 595 spin_unlock_irq(&phba->hbalock);
dea3101e 596
9399627f
JS
597 /* Set up ring-0 (ELS) timer */
598 timeout = phba->fc_ratov * 2;
256ec0d0
JS
599 mod_timer(&vport->els_tmofunc,
600 jiffies + msecs_to_jiffies(1000 * timeout));
9399627f 601 /* Set up heart beat (HB) timer */
256ec0d0
JS
602 mod_timer(&phba->hb_tmofunc,
603 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
604 phba->hb_outstanding = 0;
605 phba->last_completion_time = jiffies;
9399627f 606 /* Set up error attention (ERATT) polling timer */
256ec0d0 607 mod_timer(&phba->eratt_poll,
65791f1f 608 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
dea3101e 609
a0c87cbd
JS
610 if (phba->hba_flag & LINK_DISABLED) {
611 lpfc_printf_log(phba,
612 KERN_ERR, LOG_INIT,
613 "2598 Adapter Link is disabled.\n");
614 lpfc_down_link(phba, pmb);
615 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
616 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
617 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
618 lpfc_printf_log(phba,
619 KERN_ERR, LOG_INIT,
620 "2599 Adapter failed to issue DOWN_LINK"
621 " mbox command rc 0x%x\n", rc);
622
623 mempool_free(pmb, phba->mbox_mem_pool);
624 return -EIO;
625 }
e40a02c1 626 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
026abb87
JS
627 mempool_free(pmb, phba->mbox_mem_pool);
628 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
629 if (rc)
630 return rc;
dea3101e 631 }
632 /* MBOX buffer will be freed in mbox compl */
57127f15 633 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
634 if (!pmb) {
635 phba->link_state = LPFC_HBA_ERROR;
636 return -ENOMEM;
637 }
638
57127f15
JS
639 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
640 pmb->mbox_cmpl = lpfc_config_async_cmpl;
641 pmb->vport = phba->pport;
642 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
dea3101e 643
57127f15
JS
644 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
645 lpfc_printf_log(phba,
646 KERN_ERR,
647 LOG_INIT,
648 "0456 Adapter failed to issue "
e4e74273 649 "ASYNCEVT_ENABLE mbox status x%x\n",
57127f15
JS
650 rc);
651 mempool_free(pmb, phba->mbox_mem_pool);
652 }
97207482
JS
653
654 /* Get Option rom version */
655 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9f1177a3
JS
656 if (!pmb) {
657 phba->link_state = LPFC_HBA_ERROR;
658 return -ENOMEM;
659 }
660
97207482
JS
661 lpfc_dump_wakeup_param(phba, pmb);
662 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
663 pmb->vport = phba->pport;
664 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
665
666 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
e4e74273 668 "to get Option ROM version status x%x\n", rc);
97207482
JS
669 mempool_free(pmb, phba->mbox_mem_pool);
670 }
671
d7c255b2 672 return 0;
ce8b3ce5
JS
673}
674
84d1b006
JS
675/**
676 * lpfc_hba_init_link - Initialize the FC link
677 * @phba: pointer to lpfc hba data structure.
6e7288d9 678 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
679 *
680 * This routine will issue the INIT_LINK mailbox command call.
681 * It is available to other drivers through the lpfc_hba data
682 * structure for use as a delayed link up mechanism with the
683 * module parameter lpfc_suppress_link_up.
684 *
685 * Return code
686 * 0 - success
687 * Any other value - error
688 **/
e399b228 689static int
6e7288d9 690lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
1b51197d
JS
691{
692 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
693}
694
695/**
696 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
697 * @phba: pointer to lpfc hba data structure.
698 * @fc_topology: desired fc topology.
699 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
700 *
701 * This routine will issue the INIT_LINK mailbox command call.
702 * It is available to other drivers through the lpfc_hba data
703 * structure for use as a delayed link up mechanism with the
704 * module parameter lpfc_suppress_link_up.
705 *
706 * Return code
707 * 0 - success
708 * Any other value - error
709 **/
710int
711lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
712 uint32_t flag)
84d1b006
JS
713{
714 struct lpfc_vport *vport = phba->pport;
715 LPFC_MBOXQ_t *pmb;
716 MAILBOX_t *mb;
717 int rc;
718
719 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
720 if (!pmb) {
721 phba->link_state = LPFC_HBA_ERROR;
722 return -ENOMEM;
723 }
724 mb = &pmb->u.mb;
725 pmb->vport = vport;
726
026abb87
JS
727 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
729 !(phba->lmt & LMT_1Gb)) ||
730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
731 !(phba->lmt & LMT_2Gb)) ||
732 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
733 !(phba->lmt & LMT_4Gb)) ||
734 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
735 !(phba->lmt & LMT_8Gb)) ||
736 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
737 !(phba->lmt & LMT_10Gb)) ||
738 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
d38dd52c
JS
739 !(phba->lmt & LMT_16Gb)) ||
740 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
fbd8a6ba
JS
741 !(phba->lmt & LMT_32Gb)) ||
742 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
743 !(phba->lmt & LMT_64Gb))) {
026abb87
JS
744 /* Reset link speed to auto */
745 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
746 "1302 Invalid speed for this board:%d "
747 "Reset link speed to auto.\n",
748 phba->cfg_link_speed);
749 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
750 }
1b51197d 751 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
84d1b006 752 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1b51197d
JS
753 if (phba->sli_rev < LPFC_SLI_REV4)
754 lpfc_set_loopback_flag(phba);
6e7288d9 755 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
76a95d75 756 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
84d1b006
JS
757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
758 "0498 Adapter failed to init, mbxCmd x%x "
759 "INIT_LINK, mbxStatus x%x\n",
760 mb->mbxCommand, mb->mbxStatus);
76a95d75
JS
761 if (phba->sli_rev <= LPFC_SLI_REV3) {
762 /* Clear all interrupt enable conditions */
763 writel(0, phba->HCregaddr);
764 readl(phba->HCregaddr); /* flush */
765 /* Clear all pending interrupts */
766 writel(0xffffffff, phba->HAregaddr);
767 readl(phba->HAregaddr); /* flush */
768 }
84d1b006 769 phba->link_state = LPFC_HBA_ERROR;
6e7288d9 770 if (rc != MBX_BUSY || flag == MBX_POLL)
84d1b006
JS
771 mempool_free(pmb, phba->mbox_mem_pool);
772 return -EIO;
773 }
e40a02c1 774 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
6e7288d9
JS
775 if (flag == MBX_POLL)
776 mempool_free(pmb, phba->mbox_mem_pool);
84d1b006
JS
777
778 return 0;
779}
780
781/**
782 * lpfc_hba_down_link - this routine downs the FC link
6e7288d9
JS
783 * @phba: pointer to lpfc hba data structure.
784 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
84d1b006
JS
785 *
786 * This routine will issue the DOWN_LINK mailbox command call.
787 * It is available to other drivers through the lpfc_hba data
788 * structure for use to stop the link.
789 *
790 * Return code
791 * 0 - success
792 * Any other value - error
793 **/
e399b228 794static int
6e7288d9 795lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
84d1b006
JS
796{
797 LPFC_MBOXQ_t *pmb;
798 int rc;
799
800 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
801 if (!pmb) {
802 phba->link_state = LPFC_HBA_ERROR;
803 return -ENOMEM;
804 }
805
806 lpfc_printf_log(phba,
807 KERN_ERR, LOG_INIT,
808 "0491 Adapter Link is disabled.\n");
809 lpfc_down_link(phba, pmb);
810 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6e7288d9 811 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
84d1b006
JS
812 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
813 lpfc_printf_log(phba,
814 KERN_ERR, LOG_INIT,
815 "2522 Adapter failed to issue DOWN_LINK"
816 " mbox command rc 0x%x\n", rc);
817
818 mempool_free(pmb, phba->mbox_mem_pool);
819 return -EIO;
820 }
6e7288d9
JS
821 if (flag == MBX_POLL)
822 mempool_free(pmb, phba->mbox_mem_pool);
823
84d1b006
JS
824 return 0;
825}
826
e59058c4 827/**
3621a710 828 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
e59058c4
JS
829 * @phba: pointer to lpfc HBA data structure.
830 *
831 * This routine will do LPFC uninitialization before the HBA is reset when
832 * bringing down the SLI Layer.
833 *
834 * Return codes
835 * 0 - success.
836 * Any other value - error.
837 **/
dea3101e 838int
2e0fef85 839lpfc_hba_down_prep(struct lpfc_hba *phba)
dea3101e 840{
1b32f6aa
JS
841 struct lpfc_vport **vports;
842 int i;
3772a991
JS
843
844 if (phba->sli_rev <= LPFC_SLI_REV3) {
845 /* Disable interrupts */
846 writel(0, phba->HCregaddr);
847 readl(phba->HCregaddr); /* flush */
848 }
dea3101e 849
1b32f6aa
JS
850 if (phba->pport->load_flag & FC_UNLOADING)
851 lpfc_cleanup_discovery_resources(phba->pport);
852 else {
853 vports = lpfc_create_vport_work_array(phba);
854 if (vports != NULL)
3772a991
JS
855 for (i = 0; i <= phba->max_vports &&
856 vports[i] != NULL; i++)
1b32f6aa
JS
857 lpfc_cleanup_discovery_resources(vports[i]);
858 lpfc_destroy_vport_work_array(phba, vports);
7f5f3d0d
JS
859 }
860 return 0;
dea3101e 861}
862
68e814f5
JS
863/**
864 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
865 * rspiocb which got deferred
866 *
867 * @phba: pointer to lpfc HBA data structure.
868 *
869 * This routine will cleanup completed slow path events after HBA is reset
870 * when bringing down the SLI Layer.
871 *
872 *
873 * Return codes
874 * void.
875 **/
876static void
877lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
878{
879 struct lpfc_iocbq *rspiocbq;
880 struct hbq_dmabuf *dmabuf;
881 struct lpfc_cq_event *cq_event;
882
883 spin_lock_irq(&phba->hbalock);
884 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
885 spin_unlock_irq(&phba->hbalock);
886
887 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
888 /* Get the response iocb from the head of work queue */
889 spin_lock_irq(&phba->hbalock);
890 list_remove_head(&phba->sli4_hba.sp_queue_event,
891 cq_event, struct lpfc_cq_event, list);
892 spin_unlock_irq(&phba->hbalock);
893
894 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
895 case CQE_CODE_COMPL_WQE:
896 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
897 cq_event);
898 lpfc_sli_release_iocbq(phba, rspiocbq);
899 break;
900 case CQE_CODE_RECEIVE:
901 case CQE_CODE_RECEIVE_V1:
902 dmabuf = container_of(cq_event, struct hbq_dmabuf,
903 cq_event);
904 lpfc_in_buf_free(phba, &dmabuf->dbuf);
905 }
906 }
907}
908
e59058c4 909/**
bcece5f5 910 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
e59058c4
JS
911 * @phba: pointer to lpfc HBA data structure.
912 *
bcece5f5
JS
913 * This routine will cleanup posted ELS buffers after the HBA is reset
914 * when bringing down the SLI Layer.
915 *
e59058c4
JS
916 *
917 * Return codes
bcece5f5 918 * void.
e59058c4 919 **/
bcece5f5
JS
920static void
921lpfc_hba_free_post_buf(struct lpfc_hba *phba)
41415862
JW
922{
923 struct lpfc_sli *psli = &phba->sli;
924 struct lpfc_sli_ring *pring;
925 struct lpfc_dmabuf *mp, *next_mp;
07eab624
JS
926 LIST_HEAD(buflist);
927 int count;
41415862 928
92d7f7b0
JS
929 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
930 lpfc_sli_hbqbuf_free_all(phba);
931 else {
932 /* Cleanup preposted buffers on the ELS ring */
895427bd 933 pring = &psli->sli3_ring[LPFC_ELS_RING];
07eab624
JS
934 spin_lock_irq(&phba->hbalock);
935 list_splice_init(&pring->postbufq, &buflist);
936 spin_unlock_irq(&phba->hbalock);
937
938 count = 0;
939 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
92d7f7b0 940 list_del(&mp->list);
07eab624 941 count++;
92d7f7b0
JS
942 lpfc_mbuf_free(phba, mp->virt, mp->phys);
943 kfree(mp);
944 }
07eab624
JS
945
946 spin_lock_irq(&phba->hbalock);
947 pring->postbufq_cnt -= count;
bcece5f5 948 spin_unlock_irq(&phba->hbalock);
41415862 949 }
bcece5f5
JS
950}
951
952/**
953 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
954 * @phba: pointer to lpfc HBA data structure.
955 *
956 * This routine will cleanup the txcmplq after the HBA is reset when bringing
957 * down the SLI Layer.
958 *
959 * Return codes
960 * void
961 **/
962static void
963lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
964{
965 struct lpfc_sli *psli = &phba->sli;
895427bd 966 struct lpfc_queue *qp = NULL;
bcece5f5
JS
967 struct lpfc_sli_ring *pring;
968 LIST_HEAD(completions);
969 int i;
c1dd9111 970 struct lpfc_iocbq *piocb, *next_iocb;
bcece5f5 971
895427bd
JS
972 if (phba->sli_rev != LPFC_SLI_REV4) {
973 for (i = 0; i < psli->num_rings; i++) {
974 pring = &psli->sli3_ring[i];
bcece5f5 975 spin_lock_irq(&phba->hbalock);
895427bd
JS
976 /* At this point in time the HBA is either reset or DOA
977 * Nothing should be on txcmplq as it will
978 * NEVER complete.
979 */
980 list_splice_init(&pring->txcmplq, &completions);
981 pring->txcmplq_cnt = 0;
bcece5f5 982 spin_unlock_irq(&phba->hbalock);
09372820 983
895427bd
JS
984 lpfc_sli_abort_iocb_ring(phba, pring);
985 }
a257bf90 986 /* Cancel all the IOCBs from the completions list */
895427bd
JS
987 lpfc_sli_cancel_iocbs(phba, &completions,
988 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
989 return;
990 }
991 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
992 pring = qp->pring;
993 if (!pring)
994 continue;
995 spin_lock_irq(&pring->ring_lock);
c1dd9111
JS
996 list_for_each_entry_safe(piocb, next_iocb,
997 &pring->txcmplq, list)
998 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
895427bd
JS
999 list_splice_init(&pring->txcmplq, &completions);
1000 pring->txcmplq_cnt = 0;
1001 spin_unlock_irq(&pring->ring_lock);
41415862
JW
1002 lpfc_sli_abort_iocb_ring(phba, pring);
1003 }
895427bd
JS
1004 /* Cancel all the IOCBs from the completions list */
1005 lpfc_sli_cancel_iocbs(phba, &completions,
1006 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
bcece5f5 1007}
41415862 1008
bcece5f5
JS
1009/**
1010 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1011 int i;
1012 * @phba: pointer to lpfc HBA data structure.
1013 *
1014 * This routine will do uninitialization after the HBA is reset when bring
1015 * down the SLI Layer.
1016 *
1017 * Return codes
1018 * 0 - success.
1019 * Any other value - error.
1020 **/
1021static int
1022lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1023{
1024 lpfc_hba_free_post_buf(phba);
1025 lpfc_hba_clean_txcmplq(phba);
41415862
JW
1026 return 0;
1027}
5af5eee7 1028
da0436e9
JS
1029/**
1030 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1031 * @phba: pointer to lpfc HBA data structure.
1032 *
1033 * This routine will do uninitialization after the HBA is reset when bring
1034 * down the SLI Layer.
1035 *
1036 * Return codes
af901ca1 1037 * 0 - success.
da0436e9
JS
1038 * Any other value - error.
1039 **/
1040static int
1041lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1042{
c490850a 1043 struct lpfc_io_buf *psb, *psb_next;
86c67379 1044 struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
5e5b511d 1045 struct lpfc_sli4_hdw_queue *qp;
da0436e9 1046 LIST_HEAD(aborts);
895427bd 1047 LIST_HEAD(nvme_aborts);
86c67379 1048 LIST_HEAD(nvmet_aborts);
0f65ff68 1049 struct lpfc_sglq *sglq_entry = NULL;
5e5b511d 1050 int cnt, idx;
0f65ff68 1051
895427bd
JS
1052
1053 lpfc_sli_hbqbuf_free_all(phba);
bcece5f5
JS
1054 lpfc_hba_clean_txcmplq(phba);
1055
da0436e9
JS
1056 /* At this point in time the HBA is either reset or DOA. Either
1057 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
895427bd 1058 * on the lpfc_els_sgl_list so that it can either be freed if the
da0436e9
JS
1059 * driver is unloading or reposted if the driver is restarting
1060 * the port.
1061 */
895427bd 1062 spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
da0436e9 1063 /* scsl_buf_list */
895427bd 1064 /* sgl_list_lock required because worker thread uses this
da0436e9
JS
1065 * list.
1066 */
895427bd 1067 spin_lock(&phba->sli4_hba.sgl_list_lock);
0f65ff68
JS
1068 list_for_each_entry(sglq_entry,
1069 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1070 sglq_entry->state = SGL_FREED;
1071
da0436e9 1072 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
895427bd
JS
1073 &phba->sli4_hba.lpfc_els_sgl_list);
1074
f358dd0c 1075
895427bd 1076 spin_unlock(&phba->sli4_hba.sgl_list_lock);
5e5b511d
JS
1077
1078 /* abts_xxxx_buf_list_lock required because worker thread uses this
da0436e9
JS
1079 * list.
1080 */
5e5b511d
JS
1081 cnt = 0;
1082 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1083 qp = &phba->sli4_hba.hdwq[idx];
da0436e9 1084
5e5b511d
JS
1085 spin_lock(&qp->abts_scsi_buf_list_lock);
1086 list_splice_init(&qp->lpfc_abts_scsi_buf_list,
1087 &aborts);
68e814f5 1088
0794d601 1089 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
86c67379
JS
1090 psb->pCmd = NULL;
1091 psb->status = IOSTAT_SUCCESS;
cf1a1d3e 1092 cnt++;
86c67379 1093 }
5e5b511d
JS
1094 spin_lock(&qp->io_buf_list_put_lock);
1095 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1096 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1097 qp->abts_scsi_io_bufs = 0;
1098 spin_unlock(&qp->io_buf_list_put_lock);
1099 spin_unlock(&qp->abts_scsi_buf_list_lock);
86c67379 1100
5e5b511d
JS
1101 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1102 spin_lock(&qp->abts_nvme_buf_list_lock);
1103 list_splice_init(&qp->lpfc_abts_nvme_buf_list,
1104 &nvme_aborts);
1105 list_for_each_entry_safe(psb, psb_next, &nvme_aborts,
1106 list) {
1107 psb->pCmd = NULL;
1108 psb->status = IOSTAT_SUCCESS;
1109 cnt++;
1110 }
1111 spin_lock(&qp->io_buf_list_put_lock);
1112 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1113 qp->abts_nvme_io_bufs = 0;
1114 list_splice_init(&nvme_aborts,
1115 &qp->lpfc_io_buf_list_put);
1116 spin_unlock(&qp->io_buf_list_put_lock);
1117 spin_unlock(&qp->abts_nvme_buf_list_lock);
68e814f5 1118
86c67379 1119 }
5e5b511d 1120 }
731eedcb 1121 spin_unlock_irq(&phba->hbalock);
86c67379 1122
5e5b511d 1123 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
731eedcb 1124 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
5e5b511d
JS
1125 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1126 &nvmet_aborts);
731eedcb 1127 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
86c67379
JS
1128 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1129 ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
6c621a22 1130 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
86c67379 1131 }
895427bd 1132 }
895427bd 1133
68e814f5 1134 lpfc_sli4_free_sp_events(phba);
5e5b511d 1135 return cnt;
da0436e9
JS
1136}
1137
1138/**
1139 * lpfc_hba_down_post - Wrapper func for hba down post routine
1140 * @phba: pointer to lpfc HBA data structure.
1141 *
1142 * This routine wraps the actual SLI3 or SLI4 routine for performing
1143 * uninitialization after the HBA is reset when bring down the SLI Layer.
1144 *
1145 * Return codes
af901ca1 1146 * 0 - success.
da0436e9
JS
1147 * Any other value - error.
1148 **/
1149int
1150lpfc_hba_down_post(struct lpfc_hba *phba)
1151{
1152 return (*phba->lpfc_hba_down_post)(phba);
1153}
41415862 1154
e59058c4 1155/**
3621a710 1156 * lpfc_hb_timeout - The HBA-timer timeout handler
e59058c4
JS
1157 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1158 *
1159 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1160 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1161 * work-port-events bitmap and the worker thread is notified. This timeout
1162 * event will be used by the worker thread to invoke the actual timeout
1163 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1164 * be performed in the timeout handler and the HBA timeout event bit shall
1165 * be cleared by the worker thread after it has taken the event bitmap out.
1166 **/
a6ababd2 1167static void
f22eb4d3 1168lpfc_hb_timeout(struct timer_list *t)
858c9f6c
JS
1169{
1170 struct lpfc_hba *phba;
5e9d9b82 1171 uint32_t tmo_posted;
858c9f6c
JS
1172 unsigned long iflag;
1173
f22eb4d3 1174 phba = from_timer(phba, t, hb_tmofunc);
9399627f
JS
1175
1176 /* Check for heart beat timeout conditions */
858c9f6c 1177 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
5e9d9b82
JS
1178 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1179 if (!tmo_posted)
858c9f6c
JS
1180 phba->pport->work_port_events |= WORKER_HB_TMO;
1181 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1182
9399627f 1183 /* Tell the worker thread there is work to do */
5e9d9b82
JS
1184 if (!tmo_posted)
1185 lpfc_worker_wake_up(phba);
858c9f6c
JS
1186 return;
1187}
1188
19ca7609
JS
1189/**
1190 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1191 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1192 *
1193 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1194 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1195 * work-port-events bitmap and the worker thread is notified. This timeout
1196 * event will be used by the worker thread to invoke the actual timeout
1197 * handler routine, lpfc_rrq_handler. Any periodical operations will
1198 * be performed in the timeout handler and the RRQ timeout event bit shall
1199 * be cleared by the worker thread after it has taken the event bitmap out.
1200 **/
1201static void
f22eb4d3 1202lpfc_rrq_timeout(struct timer_list *t)
19ca7609
JS
1203{
1204 struct lpfc_hba *phba;
19ca7609
JS
1205 unsigned long iflag;
1206
f22eb4d3 1207 phba = from_timer(phba, t, rrq_tmr);
19ca7609 1208 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
06918ac5
JS
1209 if (!(phba->pport->load_flag & FC_UNLOADING))
1210 phba->hba_flag |= HBA_RRQ_ACTIVE;
1211 else
1212 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
19ca7609 1213 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
06918ac5
JS
1214
1215 if (!(phba->pport->load_flag & FC_UNLOADING))
1216 lpfc_worker_wake_up(phba);
19ca7609
JS
1217}
1218
e59058c4 1219/**
3621a710 1220 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
e59058c4
JS
1221 * @phba: pointer to lpfc hba data structure.
1222 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1223 *
1224 * This is the callback function to the lpfc heart-beat mailbox command.
1225 * If configured, the lpfc driver issues the heart-beat mailbox command to
1226 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1227 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1228 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1229 * heart-beat outstanding state. Once the mailbox command comes back and
1230 * no error conditions detected, the heart-beat mailbox command timer is
1231 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1232 * state is cleared for the next heart-beat. If the timer expired with the
1233 * heart-beat outstanding state set, the driver will put the HBA offline.
1234 **/
858c9f6c
JS
1235static void
1236lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1237{
1238 unsigned long drvr_flag;
1239
1240 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1241 phba->hb_outstanding = 0;
1242 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1243
9399627f 1244 /* Check and reset heart-beat timer is necessary */
858c9f6c
JS
1245 mempool_free(pmboxq, phba->mbox_mem_pool);
1246 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1247 !(phba->link_state == LPFC_HBA_ERROR) &&
51ef4c26 1248 !(phba->pport->load_flag & FC_UNLOADING))
858c9f6c 1249 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1250 jiffies +
1251 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1252 return;
1253}
1254
32517fc0
JS
1255static void
1256lpfc_hb_eq_delay_work(struct work_struct *work)
1257{
1258 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1259 struct lpfc_hba, eq_delay_work);
1260 struct lpfc_eq_intr_info *eqi, *eqi_new;
1261 struct lpfc_queue *eq, *eq_next;
1262 unsigned char *eqcnt = NULL;
1263 uint32_t usdelay;
1264 int i;
8d34a59c 1265 bool update = false;
32517fc0
JS
1266
1267 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1268 return;
1269
1270 if (phba->link_state == LPFC_HBA_ERROR ||
1271 phba->pport->fc_flag & FC_OFFLINE_MODE)
1272 goto requeue;
1273
1274 eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
1275 GFP_KERNEL);
1276 if (!eqcnt)
1277 goto requeue;
1278
8d34a59c
JS
1279 if (phba->cfg_irq_chann > 1) {
1280 /* Loop thru all IRQ vectors */
1281 for (i = 0; i < phba->cfg_irq_chann; i++) {
1282 /* Get the EQ corresponding to the IRQ vector */
1283 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1284 if (!eq)
1285 continue;
1286 if (eq->q_mode) {
1287 update = true;
1288 break;
1289 }
1290 if (eqcnt[eq->last_cpu] < 2)
1291 eqcnt[eq->last_cpu]++;
1292 }
1293 } else
1294 update = true;
32517fc0
JS
1295
1296 for_each_present_cpu(i) {
32517fc0 1297 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
8d34a59c
JS
1298 if (!update && eqcnt[i] < 2) {
1299 eqi->icnt = 0;
1300 continue;
1301 }
32517fc0
JS
1302
1303 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
1304 LPFC_EQ_DELAY_STEP;
1305 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1306 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1307
1308 eqi->icnt = 0;
1309
1310 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1311 if (eq->last_cpu != i) {
1312 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1313 eq->last_cpu);
1314 list_move_tail(&eq->cpu_list, &eqi_new->list);
1315 continue;
1316 }
1317 if (usdelay != eq->q_mode)
1318 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1319 usdelay);
1320 }
1321 }
1322
1323 kfree(eqcnt);
1324
1325requeue:
1326 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1327 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1328}
1329
c490850a
JS
1330/**
1331 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1332 * @phba: pointer to lpfc hba data structure.
1333 *
1334 * For each heartbeat, this routine does some heuristic methods to adjust
1335 * XRI distribution. The goal is to fully utilize free XRIs.
1336 **/
1337static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1338{
1339 u32 i;
1340 u32 hwq_count;
1341
1342 hwq_count = phba->cfg_hdw_queue;
1343 for (i = 0; i < hwq_count; i++) {
1344 /* Adjust XRIs in private pool */
1345 lpfc_adjust_pvt_pool_count(phba, i);
1346
1347 /* Adjust high watermark */
1348 lpfc_adjust_high_watermark(phba, i);
1349
1350#ifdef LPFC_MXP_STAT
1351 /* Snapshot pbl, pvt and busy count */
1352 lpfc_snapshot_mxp(phba, i);
1353#endif
1354 }
1355}
1356
e59058c4 1357/**
3621a710 1358 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
e59058c4
JS
1359 * @phba: pointer to lpfc hba data structure.
1360 *
1361 * This is the actual HBA-timer timeout handler to be invoked by the worker
1362 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1363 * handler performs any periodic operations needed for the device. If such
1364 * periodic event has already been attended to either in the interrupt handler
1365 * or by processing slow-ring or fast-ring events within the HBA-timer
1366 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1367 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1368 * is configured and there is no heart-beat mailbox command outstanding, a
1369 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1370 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1371 * to offline.
1372 **/
858c9f6c
JS
1373void
1374lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1375{
45ed1190 1376 struct lpfc_vport **vports;
858c9f6c 1377 LPFC_MBOXQ_t *pmboxq;
0ff10d46 1378 struct lpfc_dmabuf *buf_ptr;
45ed1190 1379 int retval, i;
858c9f6c 1380 struct lpfc_sli *psli = &phba->sli;
0ff10d46 1381 LIST_HEAD(completions);
858c9f6c 1382
c490850a
JS
1383 if (phba->cfg_xri_rebalancing) {
1384 /* Multi-XRI pools handler */
1385 lpfc_hb_mxp_handler(phba);
1386 }
858c9f6c 1387
45ed1190
JS
1388 vports = lpfc_create_vport_work_array(phba);
1389 if (vports != NULL)
4258e98e 1390 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
45ed1190 1391 lpfc_rcv_seq_check_edtov(vports[i]);
4258e98e
JS
1392 lpfc_fdmi_num_disc_check(vports[i]);
1393 }
45ed1190
JS
1394 lpfc_destroy_vport_work_array(phba, vports);
1395
858c9f6c 1396 if ((phba->link_state == LPFC_HBA_ERROR) ||
51ef4c26 1397 (phba->pport->load_flag & FC_UNLOADING) ||
858c9f6c
JS
1398 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1399 return;
1400
1401 spin_lock_irq(&phba->pport->work_port_lock);
858c9f6c 1402
256ec0d0
JS
1403 if (time_after(phba->last_completion_time +
1404 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1405 jiffies)) {
858c9f6c
JS
1406 spin_unlock_irq(&phba->pport->work_port_lock);
1407 if (!phba->hb_outstanding)
1408 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1409 jiffies +
1410 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1411 else
1412 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1413 jiffies +
1414 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c
JS
1415 return;
1416 }
1417 spin_unlock_irq(&phba->pport->work_port_lock);
1418
0ff10d46
JS
1419 if (phba->elsbuf_cnt &&
1420 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1421 spin_lock_irq(&phba->hbalock);
1422 list_splice_init(&phba->elsbuf, &completions);
1423 phba->elsbuf_cnt = 0;
1424 phba->elsbuf_prev_cnt = 0;
1425 spin_unlock_irq(&phba->hbalock);
1426
1427 while (!list_empty(&completions)) {
1428 list_remove_head(&completions, buf_ptr,
1429 struct lpfc_dmabuf, list);
1430 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1431 kfree(buf_ptr);
1432 }
1433 }
1434 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1435
858c9f6c 1436 /* If there is no heart beat outstanding, issue a heartbeat command */
13815c83
JS
1437 if (phba->cfg_enable_hba_heartbeat) {
1438 if (!phba->hb_outstanding) {
bc73905a
JS
1439 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1440 (list_empty(&psli->mboxq))) {
1441 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1442 GFP_KERNEL);
1443 if (!pmboxq) {
1444 mod_timer(&phba->hb_tmofunc,
1445 jiffies +
256ec0d0
JS
1446 msecs_to_jiffies(1000 *
1447 LPFC_HB_MBOX_INTERVAL));
bc73905a
JS
1448 return;
1449 }
1450
1451 lpfc_heart_beat(phba, pmboxq);
1452 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1453 pmboxq->vport = phba->pport;
1454 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1455 MBX_NOWAIT);
1456
1457 if (retval != MBX_BUSY &&
1458 retval != MBX_SUCCESS) {
1459 mempool_free(pmboxq,
1460 phba->mbox_mem_pool);
1461 mod_timer(&phba->hb_tmofunc,
1462 jiffies +
256ec0d0
JS
1463 msecs_to_jiffies(1000 *
1464 LPFC_HB_MBOX_INTERVAL));
bc73905a
JS
1465 return;
1466 }
1467 phba->skipped_hb = 0;
1468 phba->hb_outstanding = 1;
1469 } else if (time_before_eq(phba->last_completion_time,
1470 phba->skipped_hb)) {
1471 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1472 "2857 Last completion time not "
1473 " updated in %d ms\n",
1474 jiffies_to_msecs(jiffies
1475 - phba->last_completion_time));
1476 } else
1477 phba->skipped_hb = jiffies;
1478
858c9f6c 1479 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1480 jiffies +
1481 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c 1482 return;
13815c83
JS
1483 } else {
1484 /*
1485 * If heart beat timeout called with hb_outstanding set
dcf2a4e0
JS
1486 * we need to give the hb mailbox cmd a chance to
1487 * complete or TMO.
13815c83 1488 */
dcf2a4e0
JS
1489 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1490 "0459 Adapter heartbeat still out"
1491 "standing:last compl time was %d ms.\n",
1492 jiffies_to_msecs(jiffies
1493 - phba->last_completion_time));
1494 mod_timer(&phba->hb_tmofunc,
256ec0d0
JS
1495 jiffies +
1496 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
858c9f6c 1497 }
4258e98e
JS
1498 } else {
1499 mod_timer(&phba->hb_tmofunc,
1500 jiffies +
1501 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
858c9f6c
JS
1502 }
1503}
1504
e59058c4 1505/**
3621a710 1506 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
e59058c4
JS
1507 * @phba: pointer to lpfc hba data structure.
1508 *
1509 * This routine is called to bring the HBA offline when HBA hardware error
1510 * other than Port Error 6 has been detected.
1511 **/
09372820
JS
1512static void
1513lpfc_offline_eratt(struct lpfc_hba *phba)
1514{
1515 struct lpfc_sli *psli = &phba->sli;
1516
1517 spin_lock_irq(&phba->hbalock);
f4b4c68f 1518 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
09372820 1519 spin_unlock_irq(&phba->hbalock);
618a5230 1520 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
09372820
JS
1521
1522 lpfc_offline(phba);
1523 lpfc_reset_barrier(phba);
f4b4c68f 1524 spin_lock_irq(&phba->hbalock);
09372820 1525 lpfc_sli_brdreset(phba);
f4b4c68f 1526 spin_unlock_irq(&phba->hbalock);
09372820
JS
1527 lpfc_hba_down_post(phba);
1528 lpfc_sli_brdready(phba, HS_MBRDY);
1529 lpfc_unblock_mgmt_io(phba);
1530 phba->link_state = LPFC_HBA_ERROR;
1531 return;
1532}
1533
da0436e9
JS
1534/**
1535 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1536 * @phba: pointer to lpfc hba data structure.
1537 *
1538 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1539 * other than Port Error 6 has been detected.
1540 **/
a88dbb6a 1541void
da0436e9
JS
1542lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1543{
946727dc
JS
1544 spin_lock_irq(&phba->hbalock);
1545 phba->link_state = LPFC_HBA_ERROR;
1546 spin_unlock_irq(&phba->hbalock);
1547
618a5230 1548 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
84f2ddf8
JS
1549 lpfc_sli_flush_fcp_rings(phba);
1550 lpfc_sli_flush_nvme_rings(phba);
da0436e9 1551 lpfc_offline(phba);
da0436e9 1552 lpfc_hba_down_post(phba);
da0436e9 1553 lpfc_unblock_mgmt_io(phba);
da0436e9
JS
1554}
1555
a257bf90
JS
1556/**
1557 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1558 * @phba: pointer to lpfc hba data structure.
1559 *
1560 * This routine is invoked to handle the deferred HBA hardware error
1561 * conditions. This type of error is indicated by HBA by setting ER1
1562 * and another ER bit in the host status register. The driver will
1563 * wait until the ER1 bit clears before handling the error condition.
1564 **/
1565static void
1566lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1567{
1568 uint32_t old_host_status = phba->work_hs;
a257bf90
JS
1569 struct lpfc_sli *psli = &phba->sli;
1570
f4b4c68f
JS
1571 /* If the pci channel is offline, ignore possible errors,
1572 * since we cannot communicate with the pci card anyway.
1573 */
1574 if (pci_channel_offline(phba->pcidev)) {
1575 spin_lock_irq(&phba->hbalock);
1576 phba->hba_flag &= ~DEFER_ERATT;
1577 spin_unlock_irq(&phba->hbalock);
1578 return;
1579 }
1580
a257bf90
JS
1581 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1582 "0479 Deferred Adapter Hardware Error "
1583 "Data: x%x x%x x%x\n",
1584 phba->work_hs,
1585 phba->work_status[0], phba->work_status[1]);
1586
1587 spin_lock_irq(&phba->hbalock);
f4b4c68f 1588 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
a257bf90
JS
1589 spin_unlock_irq(&phba->hbalock);
1590
1591
1592 /*
1593 * Firmware stops when it triggred erratt. That could cause the I/Os
1594 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1595 * SCSI layer retry it after re-establishing link.
1596 */
db55fba8 1597 lpfc_sli_abort_fcp_rings(phba);
a257bf90
JS
1598
1599 /*
1600 * There was a firmware error. Take the hba offline and then
1601 * attempt to restart it.
1602 */
618a5230 1603 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
a257bf90
JS
1604 lpfc_offline(phba);
1605
1606 /* Wait for the ER1 bit to clear.*/
1607 while (phba->work_hs & HS_FFER1) {
1608 msleep(100);
9940b97b
JS
1609 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1610 phba->work_hs = UNPLUG_ERR ;
1611 break;
1612 }
a257bf90
JS
1613 /* If driver is unloading let the worker thread continue */
1614 if (phba->pport->load_flag & FC_UNLOADING) {
1615 phba->work_hs = 0;
1616 break;
1617 }
1618 }
1619
1620 /*
1621 * This is to ptrotect against a race condition in which
1622 * first write to the host attention register clear the
1623 * host status register.
1624 */
1625 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1626 phba->work_hs = old_host_status & ~HS_FFER1;
1627
3772a991 1628 spin_lock_irq(&phba->hbalock);
a257bf90 1629 phba->hba_flag &= ~DEFER_ERATT;
3772a991 1630 spin_unlock_irq(&phba->hbalock);
a257bf90
JS
1631 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1632 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1633}
1634
3772a991
JS
1635static void
1636lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1637{
1638 struct lpfc_board_event_header board_event;
1639 struct Scsi_Host *shost;
1640
1641 board_event.event_type = FC_REG_BOARD_EVENT;
1642 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1643 shost = lpfc_shost_from_vport(phba->pport);
1644 fc_host_post_vendor_event(shost, fc_get_event_number(),
1645 sizeof(board_event),
1646 (char *) &board_event,
1647 LPFC_NL_VENDOR_ID);
1648}
1649
e59058c4 1650/**
3772a991 1651 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
e59058c4
JS
1652 * @phba: pointer to lpfc hba data structure.
1653 *
1654 * This routine is invoked to handle the following HBA hardware error
1655 * conditions:
1656 * 1 - HBA error attention interrupt
1657 * 2 - DMA ring index out of range
1658 * 3 - Mailbox command came back as unknown
1659 **/
3772a991
JS
1660static void
1661lpfc_handle_eratt_s3(struct lpfc_hba *phba)
dea3101e 1662{
2e0fef85 1663 struct lpfc_vport *vport = phba->pport;
2e0fef85 1664 struct lpfc_sli *psli = &phba->sli;
d2873e4c 1665 uint32_t event_data;
57127f15
JS
1666 unsigned long temperature;
1667 struct temp_event temp_event_data;
92d7f7b0 1668 struct Scsi_Host *shost;
2e0fef85 1669
8d63f375 1670 /* If the pci channel is offline, ignore possible errors,
3772a991
JS
1671 * since we cannot communicate with the pci card anyway.
1672 */
1673 if (pci_channel_offline(phba->pcidev)) {
1674 spin_lock_irq(&phba->hbalock);
1675 phba->hba_flag &= ~DEFER_ERATT;
1676 spin_unlock_irq(&phba->hbalock);
8d63f375 1677 return;
3772a991
JS
1678 }
1679
13815c83
JS
1680 /* If resets are disabled then leave the HBA alone and return */
1681 if (!phba->cfg_enable_hba_reset)
1682 return;
dea3101e 1683
ea2151b4 1684 /* Send an internal error event to mgmt application */
3772a991 1685 lpfc_board_errevt_to_mgmt(phba);
ea2151b4 1686
a257bf90
JS
1687 if (phba->hba_flag & DEFER_ERATT)
1688 lpfc_handle_deferred_eratt(phba);
1689
dcf2a4e0
JS
1690 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1691 if (phba->work_hs & HS_FFER6)
1692 /* Re-establishing Link */
1693 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1694 "1301 Re-establishing Link "
1695 "Data: x%x x%x x%x\n",
1696 phba->work_hs, phba->work_status[0],
1697 phba->work_status[1]);
1698 if (phba->work_hs & HS_FFER8)
1699 /* Device Zeroization */
1700 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1701 "2861 Host Authentication device "
1702 "zeroization Data:x%x x%x x%x\n",
1703 phba->work_hs, phba->work_status[0],
1704 phba->work_status[1]);
58da1ffb 1705
92d7f7b0 1706 spin_lock_irq(&phba->hbalock);
f4b4c68f 1707 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
92d7f7b0 1708 spin_unlock_irq(&phba->hbalock);
dea3101e 1709
1710 /*
1711 * Firmware stops when it triggled erratt with HS_FFER6.
1712 * That could cause the I/Os dropped by the firmware.
1713 * Error iocb (I/O) on txcmplq and let the SCSI layer
1714 * retry it after re-establishing link.
1715 */
db55fba8 1716 lpfc_sli_abort_fcp_rings(phba);
dea3101e 1717
dea3101e 1718 /*
1719 * There was a firmware error. Take the hba offline and then
1720 * attempt to restart it.
1721 */
618a5230 1722 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
dea3101e 1723 lpfc_offline(phba);
41415862 1724 lpfc_sli_brdrestart(phba);
dea3101e 1725 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
46fa311e 1726 lpfc_unblock_mgmt_io(phba);
dea3101e 1727 return;
1728 }
46fa311e 1729 lpfc_unblock_mgmt_io(phba);
57127f15
JS
1730 } else if (phba->work_hs & HS_CRIT_TEMP) {
1731 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1732 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1733 temp_event_data.event_code = LPFC_CRIT_TEMP;
1734 temp_event_data.data = (uint32_t)temperature;
1735
1736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
d7c255b2 1737 "0406 Adapter maximum temperature exceeded "
57127f15
JS
1738 "(%ld), taking this port offline "
1739 "Data: x%x x%x x%x\n",
1740 temperature, phba->work_hs,
1741 phba->work_status[0], phba->work_status[1]);
1742
1743 shost = lpfc_shost_from_vport(phba->pport);
1744 fc_host_post_vendor_event(shost, fc_get_event_number(),
1745 sizeof(temp_event_data),
1746 (char *) &temp_event_data,
1747 SCSI_NL_VID_TYPE_PCI
1748 | PCI_VENDOR_ID_EMULEX);
1749
7af67051 1750 spin_lock_irq(&phba->hbalock);
7af67051
JS
1751 phba->over_temp_state = HBA_OVER_TEMP;
1752 spin_unlock_irq(&phba->hbalock);
09372820 1753 lpfc_offline_eratt(phba);
57127f15 1754
dea3101e 1755 } else {
1756 /* The if clause above forces this code path when the status
9399627f
JS
1757 * failure is a value other than FFER6. Do not call the offline
1758 * twice. This is the adapter hardware error path.
dea3101e 1759 */
1760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e8b62011 1761 "0457 Adapter Hardware Error "
dea3101e 1762 "Data: x%x x%x x%x\n",
e8b62011 1763 phba->work_hs,
dea3101e 1764 phba->work_status[0], phba->work_status[1]);
1765
d2873e4c 1766 event_data = FC_REG_DUMP_EVENT;
92d7f7b0 1767 shost = lpfc_shost_from_vport(vport);
2e0fef85 1768 fc_host_post_vendor_event(shost, fc_get_event_number(),
d2873e4c
JS
1769 sizeof(event_data), (char *) &event_data,
1770 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1771
09372820 1772 lpfc_offline_eratt(phba);
dea3101e 1773 }
9399627f 1774 return;
dea3101e 1775}
1776
618a5230
JS
1777/**
1778 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1779 * @phba: pointer to lpfc hba data structure.
1780 * @mbx_action: flag for mailbox shutdown action.
1781 *
1782 * This routine is invoked to perform an SLI4 port PCI function reset in
1783 * response to port status register polling attention. It waits for port
1784 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1785 * During this process, interrupt vectors are freed and later requested
1786 * for handling possible port resource change.
1787 **/
1788static int
e10b2022
JS
1789lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1790 bool en_rn_msg)
618a5230
JS
1791{
1792 int rc;
1793 uint32_t intr_mode;
1794
27d6ac0a 1795 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
65791f1f
JS
1796 LPFC_SLI_INTF_IF_TYPE_2) {
1797 /*
1798 * On error status condition, driver need to wait for port
1799 * ready before performing reset.
1800 */
1801 rc = lpfc_sli4_pdev_status_reg_wait(phba);
0e916ee7 1802 if (rc)
65791f1f
JS
1803 return rc;
1804 }
0e916ee7 1805
65791f1f
JS
1806 /* need reset: attempt for port recovery */
1807 if (en_rn_msg)
1808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1809 "2887 Reset Needed: Attempting Port "
1810 "Recovery...\n");
1811 lpfc_offline_prep(phba, mbx_action);
84f2ddf8
JS
1812 lpfc_sli_flush_fcp_rings(phba);
1813 lpfc_sli_flush_nvme_rings(phba);
65791f1f
JS
1814 lpfc_offline(phba);
1815 /* release interrupt for possible resource change */
1816 lpfc_sli4_disable_intr(phba);
5a9eeff5
JS
1817 rc = lpfc_sli_brdrestart(phba);
1818 if (rc) {
1819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1820 "6309 Failed to restart board\n");
1821 return rc;
1822 }
65791f1f
JS
1823 /* request and enable interrupt */
1824 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1825 if (intr_mode == LPFC_INTR_ERROR) {
1826 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1827 "3175 Failed to enable interrupt\n");
1828 return -EIO;
618a5230 1829 }
65791f1f
JS
1830 phba->intr_mode = intr_mode;
1831 rc = lpfc_online(phba);
1832 if (rc == 0)
1833 lpfc_unblock_mgmt_io(phba);
1834
618a5230
JS
1835 return rc;
1836}
1837
da0436e9
JS
1838/**
1839 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1840 * @phba: pointer to lpfc hba data structure.
1841 *
1842 * This routine is invoked to handle the SLI4 HBA hardware error attention
1843 * conditions.
1844 **/
1845static void
1846lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1847{
1848 struct lpfc_vport *vport = phba->pport;
1849 uint32_t event_data;
1850 struct Scsi_Host *shost;
2fcee4bf 1851 uint32_t if_type;
2e90f4b5
JS
1852 struct lpfc_register portstat_reg = {0};
1853 uint32_t reg_err1, reg_err2;
1854 uint32_t uerrlo_reg, uemasklo_reg;
65791f1f 1855 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
e10b2022 1856 bool en_rn_msg = true;
946727dc 1857 struct temp_event temp_event_data;
65791f1f
JS
1858 struct lpfc_register portsmphr_reg;
1859 int rc, i;
da0436e9
JS
1860
1861 /* If the pci channel is offline, ignore possible errors, since
1862 * we cannot communicate with the pci card anyway.
1863 */
32a93100
JS
1864 if (pci_channel_offline(phba->pcidev)) {
1865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1866 "3166 pci channel is offline\n");
1867 lpfc_sli4_offline_eratt(phba);
da0436e9 1868 return;
32a93100 1869 }
da0436e9 1870
65791f1f 1871 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2fcee4bf
JS
1872 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1873 switch (if_type) {
1874 case LPFC_SLI_INTF_IF_TYPE_0:
2e90f4b5
JS
1875 pci_rd_rc1 = lpfc_readl(
1876 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1877 &uerrlo_reg);
1878 pci_rd_rc2 = lpfc_readl(
1879 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1880 &uemasklo_reg);
1881 /* consider PCI bus read error as pci_channel_offline */
1882 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1883 return;
65791f1f
JS
1884 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1885 lpfc_sli4_offline_eratt(phba);
1886 return;
1887 }
1888 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1889 "7623 Checking UE recoverable");
1890
1891 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1892 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1893 &portsmphr_reg.word0))
1894 continue;
1895
1896 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1897 &portsmphr_reg);
1898 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1899 LPFC_PORT_SEM_UE_RECOVERABLE)
1900 break;
1901 /*Sleep for 1Sec, before checking SEMAPHORE */
1902 msleep(1000);
1903 }
1904
1905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1906 "4827 smphr_port_status x%x : Waited %dSec",
1907 smphr_port_status, i);
1908
1909 /* Recoverable UE, reset the HBA device */
1910 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1911 LPFC_PORT_SEM_UE_RECOVERABLE) {
1912 for (i = 0; i < 20; i++) {
1913 msleep(1000);
1914 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1915 &portsmphr_reg.word0) &&
1916 (LPFC_POST_STAGE_PORT_READY ==
1917 bf_get(lpfc_port_smphr_port_status,
1918 &portsmphr_reg))) {
1919 rc = lpfc_sli4_port_sta_fn_reset(phba,
1920 LPFC_MBX_NO_WAIT, en_rn_msg);
1921 if (rc == 0)
1922 return;
1923 lpfc_printf_log(phba,
1924 KERN_ERR, LOG_INIT,
1925 "4215 Failed to recover UE");
1926 break;
1927 }
1928 }
1929 }
1930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1931 "7624 Firmware not ready: Failing UE recovery,"
1932 " waited %dSec", i);
8c24a4f6 1933 phba->link_state = LPFC_HBA_ERROR;
2fcee4bf 1934 break;
946727dc 1935
2fcee4bf 1936 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 1937 case LPFC_SLI_INTF_IF_TYPE_6:
2e90f4b5
JS
1938 pci_rd_rc1 = lpfc_readl(
1939 phba->sli4_hba.u.if_type2.STATUSregaddr,
1940 &portstat_reg.word0);
1941 /* consider PCI bus read error as pci_channel_offline */
6b5151fd
JS
1942 if (pci_rd_rc1 == -EIO) {
1943 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1944 "3151 PCI bus read access failure: x%x\n",
1945 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
32a93100 1946 lpfc_sli4_offline_eratt(phba);
2e90f4b5 1947 return;
6b5151fd 1948 }
2e90f4b5
JS
1949 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1950 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2fcee4bf 1951 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2fcee4bf
JS
1952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1953 "2889 Port Overtemperature event, "
946727dc
JS
1954 "taking port offline Data: x%x x%x\n",
1955 reg_err1, reg_err2);
1956
310429ef 1957 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
946727dc
JS
1958 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1959 temp_event_data.event_code = LPFC_CRIT_TEMP;
1960 temp_event_data.data = 0xFFFFFFFF;
1961
1962 shost = lpfc_shost_from_vport(phba->pport);
1963 fc_host_post_vendor_event(shost, fc_get_event_number(),
1964 sizeof(temp_event_data),
1965 (char *)&temp_event_data,
1966 SCSI_NL_VID_TYPE_PCI
1967 | PCI_VENDOR_ID_EMULEX);
1968
2fcee4bf
JS
1969 spin_lock_irq(&phba->hbalock);
1970 phba->over_temp_state = HBA_OVER_TEMP;
1971 spin_unlock_irq(&phba->hbalock);
1972 lpfc_sli4_offline_eratt(phba);
946727dc 1973 return;
2fcee4bf 1974 }
2e90f4b5 1975 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
e10b2022 1976 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2e90f4b5 1977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e10b2022
JS
1978 "3143 Port Down: Firmware Update "
1979 "Detected\n");
1980 en_rn_msg = false;
1981 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2e90f4b5
JS
1982 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1984 "3144 Port Down: Debug Dump\n");
1985 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1986 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1987 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1988 "3145 Port Down: Provisioning\n");
618a5230 1989
946727dc
JS
1990 /* If resets are disabled then leave the HBA alone and return */
1991 if (!phba->cfg_enable_hba_reset)
1992 return;
1993
618a5230 1994 /* Check port status register for function reset */
e10b2022
JS
1995 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1996 en_rn_msg);
618a5230
JS
1997 if (rc == 0) {
1998 /* don't report event on forced debug dump */
1999 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2000 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2001 return;
2002 else
2003 break;
2fcee4bf 2004 }
618a5230 2005 /* fall through for not able to recover */
6b5151fd 2006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8c24a4f6
JS
2007 "3152 Unrecoverable error\n");
2008 phba->link_state = LPFC_HBA_ERROR;
2fcee4bf
JS
2009 break;
2010 case LPFC_SLI_INTF_IF_TYPE_1:
2011 default:
2012 break;
2013 }
2e90f4b5
JS
2014 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2015 "3123 Report dump event to upper layer\n");
2016 /* Send an internal error event to mgmt application */
2017 lpfc_board_errevt_to_mgmt(phba);
2018
2019 event_data = FC_REG_DUMP_EVENT;
2020 shost = lpfc_shost_from_vport(vport);
2021 fc_host_post_vendor_event(shost, fc_get_event_number(),
2022 sizeof(event_data), (char *) &event_data,
2023 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
da0436e9
JS
2024}
2025
2026/**
2027 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2028 * @phba: pointer to lpfc HBA data structure.
2029 *
2030 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2031 * routine from the API jump table function pointer from the lpfc_hba struct.
2032 *
2033 * Return codes
af901ca1 2034 * 0 - success.
da0436e9
JS
2035 * Any other value - error.
2036 **/
2037void
2038lpfc_handle_eratt(struct lpfc_hba *phba)
2039{
2040 (*phba->lpfc_handle_eratt)(phba);
2041}
2042
e59058c4 2043/**
3621a710 2044 * lpfc_handle_latt - The HBA link event handler
e59058c4
JS
2045 * @phba: pointer to lpfc hba data structure.
2046 *
2047 * This routine is invoked from the worker thread to handle a HBA host
895427bd 2048 * attention link event. SLI3 only.
e59058c4 2049 **/
dea3101e 2050void
2e0fef85 2051lpfc_handle_latt(struct lpfc_hba *phba)
dea3101e 2052{
2e0fef85
JS
2053 struct lpfc_vport *vport = phba->pport;
2054 struct lpfc_sli *psli = &phba->sli;
dea3101e 2055 LPFC_MBOXQ_t *pmb;
2056 volatile uint32_t control;
2057 struct lpfc_dmabuf *mp;
09372820 2058 int rc = 0;
dea3101e 2059
2060 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
09372820
JS
2061 if (!pmb) {
2062 rc = 1;
dea3101e 2063 goto lpfc_handle_latt_err_exit;
09372820 2064 }
dea3101e 2065
2066 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
09372820
JS
2067 if (!mp) {
2068 rc = 2;
dea3101e 2069 goto lpfc_handle_latt_free_pmb;
09372820 2070 }
dea3101e 2071
2072 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
09372820
JS
2073 if (!mp->virt) {
2074 rc = 3;
dea3101e 2075 goto lpfc_handle_latt_free_mp;
09372820 2076 }
dea3101e 2077
6281bfe0 2078 /* Cleanup any outstanding ELS commands */
549e55cd 2079 lpfc_els_flush_all_cmd(phba);
dea3101e 2080
2081 psli->slistat.link_event++;
76a95d75
JS
2082 lpfc_read_topology(phba, pmb, mp);
2083 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2e0fef85 2084 pmb->vport = vport;
0d2b6b83 2085 /* Block ELS IOCBs until we have processed this mbox command */
895427bd 2086 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
0b727fea 2087 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
09372820
JS
2088 if (rc == MBX_NOT_FINISHED) {
2089 rc = 4;
14691150 2090 goto lpfc_handle_latt_free_mbuf;
09372820 2091 }
dea3101e 2092
2093 /* Clear Link Attention in HA REG */
2e0fef85 2094 spin_lock_irq(&phba->hbalock);
dea3101e 2095 writel(HA_LATT, phba->HAregaddr);
2096 readl(phba->HAregaddr); /* flush */
2e0fef85 2097 spin_unlock_irq(&phba->hbalock);
dea3101e 2098
2099 return;
2100
14691150 2101lpfc_handle_latt_free_mbuf:
895427bd 2102 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
14691150 2103 lpfc_mbuf_free(phba, mp->virt, mp->phys);
dea3101e 2104lpfc_handle_latt_free_mp:
2105 kfree(mp);
2106lpfc_handle_latt_free_pmb:
1dcb58e5 2107 mempool_free(pmb, phba->mbox_mem_pool);
dea3101e 2108lpfc_handle_latt_err_exit:
2109 /* Enable Link attention interrupts */
2e0fef85 2110 spin_lock_irq(&phba->hbalock);
dea3101e 2111 psli->sli_flag |= LPFC_PROCESS_LA;
2112 control = readl(phba->HCregaddr);
2113 control |= HC_LAINT_ENA;
2114 writel(control, phba->HCregaddr);
2115 readl(phba->HCregaddr); /* flush */
2116
2117 /* Clear Link Attention in HA REG */
2118 writel(HA_LATT, phba->HAregaddr);
2119 readl(phba->HAregaddr); /* flush */
2e0fef85 2120 spin_unlock_irq(&phba->hbalock);
dea3101e 2121 lpfc_linkdown(phba);
2e0fef85 2122 phba->link_state = LPFC_HBA_ERROR;
dea3101e 2123
09372820
JS
2124 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2125 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
dea3101e 2126
2127 return;
2128}
2129
e59058c4 2130/**
3621a710 2131 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
e59058c4
JS
2132 * @phba: pointer to lpfc hba data structure.
2133 * @vpd: pointer to the vital product data.
2134 * @len: length of the vital product data in bytes.
2135 *
2136 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2137 * an array of characters. In this routine, the ModelName, ProgramType, and
2138 * ModelDesc, etc. fields of the phba data structure will be populated.
2139 *
2140 * Return codes
2141 * 0 - pointer to the VPD passed in is NULL
2142 * 1 - success
2143 **/
3772a991 2144int
2e0fef85 2145lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
dea3101e 2146{
2147 uint8_t lenlo, lenhi;
07da60c1 2148 int Length;
dea3101e 2149 int i, j;
2150 int finished = 0;
2151 int index = 0;
2152
2153 if (!vpd)
2154 return 0;
2155
2156 /* Vital Product */
ed957684 2157 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011 2158 "0455 Vital Product Data: x%x x%x x%x x%x\n",
dea3101e 2159 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2160 (uint32_t) vpd[3]);
74b72a59 2161 while (!finished && (index < (len - 4))) {
dea3101e 2162 switch (vpd[index]) {
2163 case 0x82:
74b72a59 2164 case 0x91:
dea3101e 2165 index += 1;
2166 lenlo = vpd[index];
2167 index += 1;
2168 lenhi = vpd[index];
2169 index += 1;
2170 i = ((((unsigned short)lenhi) << 8) + lenlo);
2171 index += i;
2172 break;
2173 case 0x90:
2174 index += 1;
2175 lenlo = vpd[index];
2176 index += 1;
2177 lenhi = vpd[index];
2178 index += 1;
2179 Length = ((((unsigned short)lenhi) << 8) + lenlo);
74b72a59
JW
2180 if (Length > len - index)
2181 Length = len - index;
dea3101e 2182 while (Length > 0) {
2183 /* Look for Serial Number */
2184 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2185 index += 2;
2186 i = vpd[index];
2187 index += 1;
2188 j = 0;
2189 Length -= (3+i);
2190 while(i--) {
2191 phba->SerialNumber[j++] = vpd[index++];
2192 if (j == 31)
2193 break;
2194 }
2195 phba->SerialNumber[j] = 0;
2196 continue;
2197 }
2198 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2199 phba->vpd_flag |= VPD_MODEL_DESC;
2200 index += 2;
2201 i = vpd[index];
2202 index += 1;
2203 j = 0;
2204 Length -= (3+i);
2205 while(i--) {
2206 phba->ModelDesc[j++] = vpd[index++];
2207 if (j == 255)
2208 break;
2209 }
2210 phba->ModelDesc[j] = 0;
2211 continue;
2212 }
2213 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2214 phba->vpd_flag |= VPD_MODEL_NAME;
2215 index += 2;
2216 i = vpd[index];
2217 index += 1;
2218 j = 0;
2219 Length -= (3+i);
2220 while(i--) {
2221 phba->ModelName[j++] = vpd[index++];
2222 if (j == 79)
2223 break;
2224 }
2225 phba->ModelName[j] = 0;
2226 continue;
2227 }
2228 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2229 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2230 index += 2;
2231 i = vpd[index];
2232 index += 1;
2233 j = 0;
2234 Length -= (3+i);
2235 while(i--) {
2236 phba->ProgramType[j++] = vpd[index++];
2237 if (j == 255)
2238 break;
2239 }
2240 phba->ProgramType[j] = 0;
2241 continue;
2242 }
2243 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2244 phba->vpd_flag |= VPD_PORT;
2245 index += 2;
2246 i = vpd[index];
2247 index += 1;
2248 j = 0;
2249 Length -= (3+i);
2250 while(i--) {
cd1c8301
JS
2251 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2252 (phba->sli4_hba.pport_name_sta ==
2253 LPFC_SLI4_PPNAME_GET)) {
2254 j++;
2255 index++;
2256 } else
2257 phba->Port[j++] = vpd[index++];
2258 if (j == 19)
2259 break;
dea3101e 2260 }
cd1c8301
JS
2261 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2262 (phba->sli4_hba.pport_name_sta ==
2263 LPFC_SLI4_PPNAME_NON))
2264 phba->Port[j] = 0;
dea3101e 2265 continue;
2266 }
2267 else {
2268 index += 2;
2269 i = vpd[index];
2270 index += 1;
2271 index += i;
2272 Length -= (3 + i);
2273 }
2274 }
2275 finished = 0;
2276 break;
2277 case 0x78:
2278 finished = 1;
2279 break;
2280 default:
2281 index ++;
2282 break;
2283 }
74b72a59 2284 }
dea3101e 2285
2286 return(1);
2287}
2288
e59058c4 2289/**
3621a710 2290 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
e59058c4
JS
2291 * @phba: pointer to lpfc hba data structure.
2292 * @mdp: pointer to the data structure to hold the derived model name.
2293 * @descp: pointer to the data structure to hold the derived description.
2294 *
2295 * This routine retrieves HBA's description based on its registered PCI device
2296 * ID. The @descp passed into this function points to an array of 256 chars. It
2297 * shall be returned with the model name, maximum speed, and the host bus type.
2298 * The @mdp passed into this function points to an array of 80 chars. When the
2299 * function returns, the @mdp will be filled with the model name.
2300 **/
dea3101e 2301static void
2e0fef85 2302lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
dea3101e 2303{
2304 lpfc_vpd_t *vp;
fefcb2b6 2305 uint16_t dev_id = phba->pcidev->device;
74b72a59 2306 int max_speed;
84774a4d 2307 int GE = 0;
da0436e9 2308 int oneConnect = 0; /* default is not a oneConnect */
74b72a59 2309 struct {
a747c9ce
JS
2310 char *name;
2311 char *bus;
2312 char *function;
2313 } m = {"<Unknown>", "", ""};
74b72a59
JW
2314
2315 if (mdp && mdp[0] != '\0'
2316 && descp && descp[0] != '\0')
2317 return;
2318
fbd8a6ba
JS
2319 if (phba->lmt & LMT_64Gb)
2320 max_speed = 64;
2321 else if (phba->lmt & LMT_32Gb)
d38dd52c
JS
2322 max_speed = 32;
2323 else if (phba->lmt & LMT_16Gb)
c0c11512
JS
2324 max_speed = 16;
2325 else if (phba->lmt & LMT_10Gb)
74b72a59
JW
2326 max_speed = 10;
2327 else if (phba->lmt & LMT_8Gb)
2328 max_speed = 8;
2329 else if (phba->lmt & LMT_4Gb)
2330 max_speed = 4;
2331 else if (phba->lmt & LMT_2Gb)
2332 max_speed = 2;
4169d868 2333 else if (phba->lmt & LMT_1Gb)
74b72a59 2334 max_speed = 1;
4169d868
JS
2335 else
2336 max_speed = 0;
dea3101e 2337
2338 vp = &phba->vpd;
dea3101e 2339
e4adb204 2340 switch (dev_id) {
06325e74 2341 case PCI_DEVICE_ID_FIREFLY:
12222f4f
JS
2342 m = (typeof(m)){"LP6000", "PCI",
2343 "Obsolete, Unsupported Fibre Channel Adapter"};
06325e74 2344 break;
dea3101e 2345 case PCI_DEVICE_ID_SUPERFLY:
2346 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
12222f4f 2347 m = (typeof(m)){"LP7000", "PCI", ""};
dea3101e 2348 else
12222f4f
JS
2349 m = (typeof(m)){"LP7000E", "PCI", ""};
2350 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
dea3101e 2351 break;
2352 case PCI_DEVICE_ID_DRAGONFLY:
a747c9ce 2353 m = (typeof(m)){"LP8000", "PCI",
12222f4f 2354 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2355 break;
2356 case PCI_DEVICE_ID_CENTAUR:
2357 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
12222f4f 2358 m = (typeof(m)){"LP9002", "PCI", ""};
dea3101e 2359 else
12222f4f
JS
2360 m = (typeof(m)){"LP9000", "PCI", ""};
2361 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
dea3101e 2362 break;
2363 case PCI_DEVICE_ID_RFLY:
a747c9ce 2364 m = (typeof(m)){"LP952", "PCI",
12222f4f 2365 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2366 break;
2367 case PCI_DEVICE_ID_PEGASUS:
a747c9ce 2368 m = (typeof(m)){"LP9802", "PCI-X",
12222f4f 2369 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2370 break;
2371 case PCI_DEVICE_ID_THOR:
a747c9ce 2372 m = (typeof(m)){"LP10000", "PCI-X",
12222f4f 2373 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2374 break;
2375 case PCI_DEVICE_ID_VIPER:
a747c9ce 2376 m = (typeof(m)){"LPX1000", "PCI-X",
12222f4f 2377 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2378 break;
2379 case PCI_DEVICE_ID_PFLY:
a747c9ce 2380 m = (typeof(m)){"LP982", "PCI-X",
12222f4f 2381 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2382 break;
2383 case PCI_DEVICE_ID_TFLY:
a747c9ce 2384 m = (typeof(m)){"LP1050", "PCI-X",
12222f4f 2385 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2386 break;
2387 case PCI_DEVICE_ID_HELIOS:
a747c9ce 2388 m = (typeof(m)){"LP11000", "PCI-X2",
12222f4f 2389 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2390 break;
e4adb204 2391 case PCI_DEVICE_ID_HELIOS_SCSP:
a747c9ce 2392 m = (typeof(m)){"LP11000-SP", "PCI-X2",
12222f4f 2393 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2394 break;
2395 case PCI_DEVICE_ID_HELIOS_DCSP:
a747c9ce 2396 m = (typeof(m)){"LP11002-SP", "PCI-X2",
12222f4f 2397 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2398 break;
2399 case PCI_DEVICE_ID_NEPTUNE:
12222f4f
JS
2400 m = (typeof(m)){"LPe1000", "PCIe",
2401 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2402 break;
2403 case PCI_DEVICE_ID_NEPTUNE_SCSP:
12222f4f
JS
2404 m = (typeof(m)){"LPe1000-SP", "PCIe",
2405 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204
JSEC
2406 break;
2407 case PCI_DEVICE_ID_NEPTUNE_DCSP:
12222f4f
JS
2408 m = (typeof(m)){"LPe1002-SP", "PCIe",
2409 "Obsolete, Unsupported Fibre Channel Adapter"};
e4adb204 2410 break;
dea3101e 2411 case PCI_DEVICE_ID_BMID:
a747c9ce 2412 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
dea3101e 2413 break;
2414 case PCI_DEVICE_ID_BSMB:
12222f4f
JS
2415 m = (typeof(m)){"LP111", "PCI-X2",
2416 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2417 break;
2418 case PCI_DEVICE_ID_ZEPHYR:
a747c9ce 2419 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
dea3101e 2420 break;
e4adb204 2421 case PCI_DEVICE_ID_ZEPHYR_SCSP:
a747c9ce 2422 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
e4adb204
JSEC
2423 break;
2424 case PCI_DEVICE_ID_ZEPHYR_DCSP:
a747c9ce 2425 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
a257bf90 2426 GE = 1;
e4adb204 2427 break;
dea3101e 2428 case PCI_DEVICE_ID_ZMID:
a747c9ce 2429 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
dea3101e 2430 break;
2431 case PCI_DEVICE_ID_ZSMB:
a747c9ce 2432 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
dea3101e 2433 break;
2434 case PCI_DEVICE_ID_LP101:
12222f4f
JS
2435 m = (typeof(m)){"LP101", "PCI-X",
2436 "Obsolete, Unsupported Fibre Channel Adapter"};
dea3101e 2437 break;
2438 case PCI_DEVICE_ID_LP10000S:
12222f4f
JS
2439 m = (typeof(m)){"LP10000-S", "PCI",
2440 "Obsolete, Unsupported Fibre Channel Adapter"};
06325e74 2441 break;
e4adb204 2442 case PCI_DEVICE_ID_LP11000S:
12222f4f
JS
2443 m = (typeof(m)){"LP11000-S", "PCI-X2",
2444 "Obsolete, Unsupported Fibre Channel Adapter"};
18a3b596 2445 break;
e4adb204 2446 case PCI_DEVICE_ID_LPE11000S:
12222f4f
JS
2447 m = (typeof(m)){"LPe11000-S", "PCIe",
2448 "Obsolete, Unsupported Fibre Channel Adapter"};
5cc36b3c 2449 break;
b87eab38 2450 case PCI_DEVICE_ID_SAT:
a747c9ce 2451 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2452 break;
2453 case PCI_DEVICE_ID_SAT_MID:
a747c9ce 2454 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2455 break;
2456 case PCI_DEVICE_ID_SAT_SMB:
a747c9ce 2457 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2458 break;
2459 case PCI_DEVICE_ID_SAT_DCSP:
a747c9ce 2460 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2461 break;
2462 case PCI_DEVICE_ID_SAT_SCSP:
a747c9ce 2463 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
b87eab38
JS
2464 break;
2465 case PCI_DEVICE_ID_SAT_S:
a747c9ce 2466 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
b87eab38 2467 break;
84774a4d 2468 case PCI_DEVICE_ID_HORNET:
12222f4f
JS
2469 m = (typeof(m)){"LP21000", "PCIe",
2470 "Obsolete, Unsupported FCoE Adapter"};
84774a4d
JS
2471 GE = 1;
2472 break;
2473 case PCI_DEVICE_ID_PROTEUS_VF:
a747c9ce 2474 m = (typeof(m)){"LPev12000", "PCIe IOV",
12222f4f 2475 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d
JS
2476 break;
2477 case PCI_DEVICE_ID_PROTEUS_PF:
a747c9ce 2478 m = (typeof(m)){"LPev12000", "PCIe IOV",
12222f4f 2479 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d
JS
2480 break;
2481 case PCI_DEVICE_ID_PROTEUS_S:
a747c9ce 2482 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
12222f4f 2483 "Obsolete, Unsupported Fibre Channel Adapter"};
84774a4d 2484 break;
da0436e9
JS
2485 case PCI_DEVICE_ID_TIGERSHARK:
2486 oneConnect = 1;
a747c9ce 2487 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
da0436e9 2488 break;
a747c9ce 2489 case PCI_DEVICE_ID_TOMCAT:
6669f9bb 2490 oneConnect = 1;
a747c9ce
JS
2491 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2492 break;
2493 case PCI_DEVICE_ID_FALCON:
2494 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2495 "EmulexSecure Fibre"};
6669f9bb 2496 break;
98fc5dd9
JS
2497 case PCI_DEVICE_ID_BALIUS:
2498 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
12222f4f 2499 "Obsolete, Unsupported Fibre Channel Adapter"};
98fc5dd9 2500 break;
085c647c 2501 case PCI_DEVICE_ID_LANCER_FC:
c0c11512 2502 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
085c647c 2503 break;
12222f4f
JS
2504 case PCI_DEVICE_ID_LANCER_FC_VF:
2505 m = (typeof(m)){"LPe16000", "PCIe",
2506 "Obsolete, Unsupported Fibre Channel Adapter"};
2507 break;
085c647c
JS
2508 case PCI_DEVICE_ID_LANCER_FCOE:
2509 oneConnect = 1;
079b5c91 2510 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
085c647c 2511 break;
12222f4f
JS
2512 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2513 oneConnect = 1;
2514 m = (typeof(m)){"OCe15100", "PCIe",
2515 "Obsolete, Unsupported FCoE"};
2516 break;
d38dd52c
JS
2517 case PCI_DEVICE_ID_LANCER_G6_FC:
2518 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2519 break;
c238b9b6
JS
2520 case PCI_DEVICE_ID_LANCER_G7_FC:
2521 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2522 break;
f8cafd38
JS
2523 case PCI_DEVICE_ID_SKYHAWK:
2524 case PCI_DEVICE_ID_SKYHAWK_VF:
2525 oneConnect = 1;
2526 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2527 break;
5cc36b3c 2528 default:
a747c9ce 2529 m = (typeof(m)){"Unknown", "", ""};
e4adb204 2530 break;
dea3101e 2531 }
74b72a59
JW
2532
2533 if (mdp && mdp[0] == '\0')
2534 snprintf(mdp, 79,"%s", m.name);
c0c11512
JS
2535 /*
2536 * oneConnect hba requires special processing, they are all initiators
da0436e9
JS
2537 * and we put the port number on the end
2538 */
2539 if (descp && descp[0] == '\0') {
2540 if (oneConnect)
2541 snprintf(descp, 255,
4169d868 2542 "Emulex OneConnect %s, %s Initiator %s",
a747c9ce 2543 m.name, m.function,
da0436e9 2544 phba->Port);
4169d868
JS
2545 else if (max_speed == 0)
2546 snprintf(descp, 255,
290237d2 2547 "Emulex %s %s %s",
4169d868 2548 m.name, m.bus, m.function);
da0436e9
JS
2549 else
2550 snprintf(descp, 255,
2551 "Emulex %s %d%s %s %s",
a747c9ce
JS
2552 m.name, max_speed, (GE) ? "GE" : "Gb",
2553 m.bus, m.function);
da0436e9 2554 }
dea3101e 2555}
2556
e59058c4 2557/**
3621a710 2558 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
e59058c4
JS
2559 * @phba: pointer to lpfc hba data structure.
2560 * @pring: pointer to a IOCB ring.
2561 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2562 *
2563 * This routine posts a given number of IOCBs with the associated DMA buffer
2564 * descriptors specified by the cnt argument to the given IOCB ring.
2565 *
2566 * Return codes
2567 * The number of IOCBs NOT able to be posted to the IOCB ring.
2568 **/
dea3101e 2569int
495a714c 2570lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
dea3101e 2571{
2572 IOCB_t *icmd;
0bd4ca25 2573 struct lpfc_iocbq *iocb;
dea3101e 2574 struct lpfc_dmabuf *mp1, *mp2;
2575
2576 cnt += pring->missbufcnt;
2577
2578 /* While there are buffers to post */
2579 while (cnt > 0) {
2580 /* Allocate buffer for command iocb */
0bd4ca25 2581 iocb = lpfc_sli_get_iocbq(phba);
dea3101e 2582 if (iocb == NULL) {
2583 pring->missbufcnt = cnt;
2584 return cnt;
2585 }
dea3101e 2586 icmd = &iocb->iocb;
2587
2588 /* 2 buffers can be posted per command */
2589 /* Allocate buffer to post */
2590 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2591 if (mp1)
98c9ea5c
JS
2592 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2593 if (!mp1 || !mp1->virt) {
c9475cb0 2594 kfree(mp1);
604a3e30 2595 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2596 pring->missbufcnt = cnt;
2597 return cnt;
2598 }
2599
2600 INIT_LIST_HEAD(&mp1->list);
2601 /* Allocate buffer to post */
2602 if (cnt > 1) {
2603 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2604 if (mp2)
2605 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2606 &mp2->phys);
98c9ea5c 2607 if (!mp2 || !mp2->virt) {
c9475cb0 2608 kfree(mp2);
dea3101e 2609 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2610 kfree(mp1);
604a3e30 2611 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2612 pring->missbufcnt = cnt;
2613 return cnt;
2614 }
2615
2616 INIT_LIST_HEAD(&mp2->list);
2617 } else {
2618 mp2 = NULL;
2619 }
2620
2621 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2622 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2623 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2624 icmd->ulpBdeCount = 1;
2625 cnt--;
2626 if (mp2) {
2627 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2628 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2629 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2630 cnt--;
2631 icmd->ulpBdeCount = 2;
2632 }
2633
2634 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2635 icmd->ulpLe = 1;
2636
3772a991
JS
2637 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2638 IOCB_ERROR) {
dea3101e 2639 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2640 kfree(mp1);
2641 cnt++;
2642 if (mp2) {
2643 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2644 kfree(mp2);
2645 cnt++;
2646 }
604a3e30 2647 lpfc_sli_release_iocbq(phba, iocb);
dea3101e 2648 pring->missbufcnt = cnt;
dea3101e 2649 return cnt;
2650 }
dea3101e 2651 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
92d7f7b0 2652 if (mp2)
dea3101e 2653 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
dea3101e 2654 }
2655 pring->missbufcnt = 0;
2656 return 0;
2657}
2658
e59058c4 2659/**
3621a710 2660 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
e59058c4
JS
2661 * @phba: pointer to lpfc hba data structure.
2662 *
2663 * This routine posts initial receive IOCB buffers to the ELS ring. The
2664 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
895427bd 2665 * set to 64 IOCBs. SLI3 only.
e59058c4
JS
2666 *
2667 * Return codes
2668 * 0 - success (currently always success)
2669 **/
dea3101e 2670static int
2e0fef85 2671lpfc_post_rcv_buf(struct lpfc_hba *phba)
dea3101e 2672{
2673 struct lpfc_sli *psli = &phba->sli;
2674
2675 /* Ring 0, ELS / CT buffers */
895427bd 2676 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
dea3101e 2677 /* Ring 2 - FCP no buffers needed */
2678
2679 return 0;
2680}
2681
2682#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2683
e59058c4 2684/**
3621a710 2685 * lpfc_sha_init - Set up initial array of hash table entries
e59058c4
JS
2686 * @HashResultPointer: pointer to an array as hash table.
2687 *
2688 * This routine sets up the initial values to the array of hash table entries
2689 * for the LC HBAs.
2690 **/
dea3101e 2691static void
2692lpfc_sha_init(uint32_t * HashResultPointer)
2693{
2694 HashResultPointer[0] = 0x67452301;
2695 HashResultPointer[1] = 0xEFCDAB89;
2696 HashResultPointer[2] = 0x98BADCFE;
2697 HashResultPointer[3] = 0x10325476;
2698 HashResultPointer[4] = 0xC3D2E1F0;
2699}
2700
e59058c4 2701/**
3621a710 2702 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
e59058c4
JS
2703 * @HashResultPointer: pointer to an initial/result hash table.
2704 * @HashWorkingPointer: pointer to an working hash table.
2705 *
2706 * This routine iterates an initial hash table pointed by @HashResultPointer
2707 * with the values from the working hash table pointeed by @HashWorkingPointer.
2708 * The results are putting back to the initial hash table, returned through
2709 * the @HashResultPointer as the result hash table.
2710 **/
dea3101e 2711static void
2712lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2713{
2714 int t;
2715 uint32_t TEMP;
2716 uint32_t A, B, C, D, E;
2717 t = 16;
2718 do {
2719 HashWorkingPointer[t] =
2720 S(1,
2721 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2722 8] ^
2723 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2724 } while (++t <= 79);
2725 t = 0;
2726 A = HashResultPointer[0];
2727 B = HashResultPointer[1];
2728 C = HashResultPointer[2];
2729 D = HashResultPointer[3];
2730 E = HashResultPointer[4];
2731
2732 do {
2733 if (t < 20) {
2734 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2735 } else if (t < 40) {
2736 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2737 } else if (t < 60) {
2738 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2739 } else {
2740 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2741 }
2742 TEMP += S(5, A) + E + HashWorkingPointer[t];
2743 E = D;
2744 D = C;
2745 C = S(30, B);
2746 B = A;
2747 A = TEMP;
2748 } while (++t <= 79);
2749
2750 HashResultPointer[0] += A;
2751 HashResultPointer[1] += B;
2752 HashResultPointer[2] += C;
2753 HashResultPointer[3] += D;
2754 HashResultPointer[4] += E;
2755
2756}
2757
e59058c4 2758/**
3621a710 2759 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
e59058c4
JS
2760 * @RandomChallenge: pointer to the entry of host challenge random number array.
2761 * @HashWorking: pointer to the entry of the working hash array.
2762 *
2763 * This routine calculates the working hash array referred by @HashWorking
2764 * from the challenge random numbers associated with the host, referred by
2765 * @RandomChallenge. The result is put into the entry of the working hash
2766 * array and returned by reference through @HashWorking.
2767 **/
dea3101e 2768static void
2769lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2770{
2771 *HashWorking = (*RandomChallenge ^ *HashWorking);
2772}
2773
e59058c4 2774/**
3621a710 2775 * lpfc_hba_init - Perform special handling for LC HBA initialization
e59058c4
JS
2776 * @phba: pointer to lpfc hba data structure.
2777 * @hbainit: pointer to an array of unsigned 32-bit integers.
2778 *
2779 * This routine performs the special handling for LC HBA initialization.
2780 **/
dea3101e 2781void
2782lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2783{
2784 int t;
2785 uint32_t *HashWorking;
2e0fef85 2786 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
dea3101e 2787
bbfbbbc1 2788 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
dea3101e 2789 if (!HashWorking)
2790 return;
2791
dea3101e 2792 HashWorking[0] = HashWorking[78] = *pwwnn++;
2793 HashWorking[1] = HashWorking[79] = *pwwnn;
2794
2795 for (t = 0; t < 7; t++)
2796 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2797
2798 lpfc_sha_init(hbainit);
2799 lpfc_sha_iterate(hbainit, HashWorking);
2800 kfree(HashWorking);
2801}
2802
e59058c4 2803/**
3621a710 2804 * lpfc_cleanup - Performs vport cleanups before deleting a vport
e59058c4
JS
2805 * @vport: pointer to a virtual N_Port data structure.
2806 *
2807 * This routine performs the necessary cleanups before deleting the @vport.
2808 * It invokes the discovery state machine to perform necessary state
2809 * transitions and to release the ndlps associated with the @vport. Note,
2810 * the physical port is treated as @vport 0.
2811 **/
87af33fe 2812void
2e0fef85 2813lpfc_cleanup(struct lpfc_vport *vport)
dea3101e 2814{
87af33fe 2815 struct lpfc_hba *phba = vport->phba;
dea3101e 2816 struct lpfc_nodelist *ndlp, *next_ndlp;
a8adb832 2817 int i = 0;
dea3101e 2818
87af33fe
JS
2819 if (phba->link_state > LPFC_LINK_DOWN)
2820 lpfc_port_link_failure(vport);
2821
2822 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
2823 if (!NLP_CHK_NODE_ACT(ndlp)) {
2824 ndlp = lpfc_enable_node(vport, ndlp,
2825 NLP_STE_UNUSED_NODE);
2826 if (!ndlp)
2827 continue;
2828 spin_lock_irq(&phba->ndlp_lock);
2829 NLP_SET_FREE_REQ(ndlp);
2830 spin_unlock_irq(&phba->ndlp_lock);
2831 /* Trigger the release of the ndlp memory */
2832 lpfc_nlp_put(ndlp);
2833 continue;
2834 }
2835 spin_lock_irq(&phba->ndlp_lock);
2836 if (NLP_CHK_FREE_REQ(ndlp)) {
2837 /* The ndlp should not be in memory free mode already */
2838 spin_unlock_irq(&phba->ndlp_lock);
2839 continue;
2840 } else
2841 /* Indicate request for freeing ndlp memory */
2842 NLP_SET_FREE_REQ(ndlp);
2843 spin_unlock_irq(&phba->ndlp_lock);
2844
58da1ffb
JS
2845 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2846 ndlp->nlp_DID == Fabric_DID) {
2847 /* Just free up ndlp with Fabric_DID for vports */
2848 lpfc_nlp_put(ndlp);
2849 continue;
2850 }
2851
eff4a01b
JS
2852 /* take care of nodes in unused state before the state
2853 * machine taking action.
2854 */
2855 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2856 lpfc_nlp_put(ndlp);
2857 continue;
2858 }
2859
87af33fe
JS
2860 if (ndlp->nlp_type & NLP_FABRIC)
2861 lpfc_disc_state_machine(vport, ndlp, NULL,
2862 NLP_EVT_DEVICE_RECOVERY);
e47c9093 2863
87af33fe
JS
2864 lpfc_disc_state_machine(vport, ndlp, NULL,
2865 NLP_EVT_DEVICE_RM);
2866 }
2867
a8adb832
JS
2868 /* At this point, ALL ndlp's should be gone
2869 * because of the previous NLP_EVT_DEVICE_RM.
2870 * Lets wait for this to happen, if needed.
2871 */
87af33fe 2872 while (!list_empty(&vport->fc_nodes)) {
a8adb832 2873 if (i++ > 3000) {
87af33fe 2874 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
a8adb832 2875 "0233 Nodelist not empty\n");
e47c9093
JS
2876 list_for_each_entry_safe(ndlp, next_ndlp,
2877 &vport->fc_nodes, nlp_listp) {
2878 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2879 LOG_NODE,
d7c255b2 2880 "0282 did:x%x ndlp:x%p "
e47c9093
JS
2881 "usgmap:x%x refcnt:%d\n",
2882 ndlp->nlp_DID, (void *)ndlp,
2883 ndlp->nlp_usg_map,
2c935bc5 2884 kref_read(&ndlp->kref));
e47c9093 2885 }
a8adb832 2886 break;
87af33fe 2887 }
a8adb832
JS
2888
2889 /* Wait for any activity on ndlps to settle */
2890 msleep(10);
87af33fe 2891 }
1151e3ec 2892 lpfc_cleanup_vports_rrqs(vport, NULL);
dea3101e 2893}
2894
e59058c4 2895/**
3621a710 2896 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
e59058c4
JS
2897 * @vport: pointer to a virtual N_Port data structure.
2898 *
2899 * This routine stops all the timers associated with a @vport. This function
2900 * is invoked before disabling or deleting a @vport. Note that the physical
2901 * port is treated as @vport 0.
2902 **/
92d7f7b0
JS
2903void
2904lpfc_stop_vport_timers(struct lpfc_vport *vport)
dea3101e 2905{
92d7f7b0 2906 del_timer_sync(&vport->els_tmofunc);
92494144 2907 del_timer_sync(&vport->delayed_disc_tmo);
92d7f7b0
JS
2908 lpfc_can_disctmo(vport);
2909 return;
dea3101e 2910}
2911
ecfd03c6
JS
2912/**
2913 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2914 * @phba: pointer to lpfc hba data structure.
2915 *
2916 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2917 * caller of this routine should already hold the host lock.
2918 **/
2919void
2920__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2921{
5ac6b303
JS
2922 /* Clear pending FCF rediscovery wait flag */
2923 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2924
ecfd03c6
JS
2925 /* Now, try to stop the timer */
2926 del_timer(&phba->fcf.redisc_wait);
2927}
2928
2929/**
2930 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2931 * @phba: pointer to lpfc hba data structure.
2932 *
2933 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2934 * checks whether the FCF rediscovery wait timer is pending with the host
2935 * lock held before proceeding with disabling the timer and clearing the
2936 * wait timer pendig flag.
2937 **/
2938void
2939lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2940{
2941 spin_lock_irq(&phba->hbalock);
2942 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2943 /* FCF rediscovery timer already fired or stopped */
2944 spin_unlock_irq(&phba->hbalock);
2945 return;
2946 }
2947 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
5ac6b303
JS
2948 /* Clear failover in progress flags */
2949 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
ecfd03c6
JS
2950 spin_unlock_irq(&phba->hbalock);
2951}
2952
e59058c4 2953/**
3772a991 2954 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
e59058c4
JS
2955 * @phba: pointer to lpfc hba data structure.
2956 *
2957 * This routine stops all the timers associated with a HBA. This function is
2958 * invoked before either putting a HBA offline or unloading the driver.
2959 **/
3772a991
JS
2960void
2961lpfc_stop_hba_timers(struct lpfc_hba *phba)
dea3101e 2962{
cdb42bec
JS
2963 if (phba->pport)
2964 lpfc_stop_vport_timers(phba->pport);
32517fc0 2965 cancel_delayed_work_sync(&phba->eq_delay_work);
2e0fef85 2966 del_timer_sync(&phba->sli.mbox_tmo);
92d7f7b0 2967 del_timer_sync(&phba->fabric_block_timer);
9399627f 2968 del_timer_sync(&phba->eratt_poll);
3772a991 2969 del_timer_sync(&phba->hb_tmofunc);
1151e3ec
JS
2970 if (phba->sli_rev == LPFC_SLI_REV4) {
2971 del_timer_sync(&phba->rrq_tmr);
2972 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2973 }
3772a991
JS
2974 phba->hb_outstanding = 0;
2975
2976 switch (phba->pci_dev_grp) {
2977 case LPFC_PCI_DEV_LP:
2978 /* Stop any LightPulse device specific driver timers */
2979 del_timer_sync(&phba->fcp_poll_timer);
2980 break;
2981 case LPFC_PCI_DEV_OC:
cc0e5f1c 2982 /* Stop any OneConnect device specific driver timers */
ecfd03c6 2983 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3772a991
JS
2984 break;
2985 default:
2986 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2987 "0297 Invalid device group (x%x)\n",
2988 phba->pci_dev_grp);
2989 break;
2990 }
2e0fef85 2991 return;
dea3101e 2992}
2993
e59058c4 2994/**
3621a710 2995 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
e59058c4
JS
2996 * @phba: pointer to lpfc hba data structure.
2997 *
2998 * This routine marks a HBA's management interface as blocked. Once the HBA's
2999 * management interface is marked as blocked, all the user space access to
3000 * the HBA, whether they are from sysfs interface or libdfc interface will
3001 * all be blocked. The HBA is set to block the management interface when the
3002 * driver prepares the HBA interface for online or offline.
3003 **/
a6ababd2 3004static void
618a5230 3005lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
a6ababd2
AB
3006{
3007 unsigned long iflag;
6e7288d9
JS
3008 uint8_t actcmd = MBX_HEARTBEAT;
3009 unsigned long timeout;
3010
a6ababd2
AB
3011 spin_lock_irqsave(&phba->hbalock, iflag);
3012 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
618a5230
JS
3013 spin_unlock_irqrestore(&phba->hbalock, iflag);
3014 if (mbx_action == LPFC_MBX_NO_WAIT)
3015 return;
3016 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3017 spin_lock_irqsave(&phba->hbalock, iflag);
a183a15f 3018 if (phba->sli.mbox_active) {
6e7288d9 3019 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
a183a15f
JS
3020 /* Determine how long we might wait for the active mailbox
3021 * command to be gracefully completed by firmware.
3022 */
3023 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3024 phba->sli.mbox_active) * 1000) + jiffies;
3025 }
a6ababd2 3026 spin_unlock_irqrestore(&phba->hbalock, iflag);
a183a15f 3027
6e7288d9
JS
3028 /* Wait for the outstnading mailbox command to complete */
3029 while (phba->sli.mbox_active) {
3030 /* Check active mailbox complete status every 2ms */
3031 msleep(2);
3032 if (time_after(jiffies, timeout)) {
3033 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3034 "2813 Mgmt IO is Blocked %x "
3035 "- mbox cmd %x still active\n",
3036 phba->sli.sli_flag, actcmd);
3037 break;
3038 }
3039 }
a6ababd2
AB
3040}
3041
6b5151fd
JS
3042/**
3043 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3044 * @phba: pointer to lpfc hba data structure.
3045 *
3046 * Allocate RPIs for all active remote nodes. This is needed whenever
3047 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3048 * is to fixup the temporary rpi assignments.
3049 **/
3050void
3051lpfc_sli4_node_prep(struct lpfc_hba *phba)
3052{
3053 struct lpfc_nodelist *ndlp, *next_ndlp;
3054 struct lpfc_vport **vports;
9d3d340d
JS
3055 int i, rpi;
3056 unsigned long flags;
6b5151fd
JS
3057
3058 if (phba->sli_rev != LPFC_SLI_REV4)
3059 return;
3060
3061 vports = lpfc_create_vport_work_array(phba);
9d3d340d
JS
3062 if (vports == NULL)
3063 return;
6b5151fd 3064
9d3d340d
JS
3065 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3066 if (vports[i]->load_flag & FC_UNLOADING)
3067 continue;
3068
3069 list_for_each_entry_safe(ndlp, next_ndlp,
3070 &vports[i]->fc_nodes,
3071 nlp_listp) {
3072 if (!NLP_CHK_NODE_ACT(ndlp))
3073 continue;
3074 rpi = lpfc_sli4_alloc_rpi(phba);
3075 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3076 spin_lock_irqsave(&phba->ndlp_lock, flags);
3077 NLP_CLR_NODE_ACT(ndlp);
3078 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3079 continue;
6b5151fd 3080 }
9d3d340d
JS
3081 ndlp->nlp_rpi = rpi;
3082 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3083 "0009 rpi:%x DID:%x "
3084 "flg:%x map:%x %p\n", ndlp->nlp_rpi,
3085 ndlp->nlp_DID, ndlp->nlp_flag,
3086 ndlp->nlp_usg_map, ndlp);
6b5151fd
JS
3087 }
3088 }
3089 lpfc_destroy_vport_work_array(phba, vports);
3090}
3091
c490850a
JS
3092/**
3093 * lpfc_create_expedite_pool - create expedite pool
3094 * @phba: pointer to lpfc hba data structure.
3095 *
3096 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3097 * to expedite pool. Mark them as expedite.
3098 **/
3999df75 3099static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
c490850a
JS
3100{
3101 struct lpfc_sli4_hdw_queue *qp;
3102 struct lpfc_io_buf *lpfc_ncmd;
3103 struct lpfc_io_buf *lpfc_ncmd_next;
3104 struct lpfc_epd_pool *epd_pool;
3105 unsigned long iflag;
3106
3107 epd_pool = &phba->epd_pool;
3108 qp = &phba->sli4_hba.hdwq[0];
3109
3110 spin_lock_init(&epd_pool->lock);
3111 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3112 spin_lock(&epd_pool->lock);
3113 INIT_LIST_HEAD(&epd_pool->list);
3114 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3115 &qp->lpfc_io_buf_list_put, list) {
3116 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3117 lpfc_ncmd->expedite = true;
3118 qp->put_io_bufs--;
3119 epd_pool->count++;
3120 if (epd_pool->count >= XRI_BATCH)
3121 break;
3122 }
3123 spin_unlock(&epd_pool->lock);
3124 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3125}
3126
3127/**
3128 * lpfc_destroy_expedite_pool - destroy expedite pool
3129 * @phba: pointer to lpfc hba data structure.
3130 *
3131 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3132 * of HWQ 0. Clear the mark.
3133 **/
3999df75 3134static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
c490850a
JS
3135{
3136 struct lpfc_sli4_hdw_queue *qp;
3137 struct lpfc_io_buf *lpfc_ncmd;
3138 struct lpfc_io_buf *lpfc_ncmd_next;
3139 struct lpfc_epd_pool *epd_pool;
3140 unsigned long iflag;
3141
3142 epd_pool = &phba->epd_pool;
3143 qp = &phba->sli4_hba.hdwq[0];
3144
3145 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3146 spin_lock(&epd_pool->lock);
3147 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3148 &epd_pool->list, list) {
3149 list_move_tail(&lpfc_ncmd->list,
3150 &qp->lpfc_io_buf_list_put);
3151 lpfc_ncmd->flags = false;
3152 qp->put_io_bufs++;
3153 epd_pool->count--;
3154 }
3155 spin_unlock(&epd_pool->lock);
3156 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3157}
3158
3159/**
3160 * lpfc_create_multixri_pools - create multi-XRI pools
3161 * @phba: pointer to lpfc hba data structure.
3162 *
3163 * This routine initialize public, private per HWQ. Then, move XRIs from
3164 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3165 * Initialized.
3166 **/
3167void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3168{
3169 u32 i, j;
3170 u32 hwq_count;
3171 u32 count_per_hwq;
3172 struct lpfc_io_buf *lpfc_ncmd;
3173 struct lpfc_io_buf *lpfc_ncmd_next;
3174 unsigned long iflag;
3175 struct lpfc_sli4_hdw_queue *qp;
3176 struct lpfc_multixri_pool *multixri_pool;
3177 struct lpfc_pbl_pool *pbl_pool;
3178 struct lpfc_pvt_pool *pvt_pool;
3179
3180 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3181 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3182 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3183 phba->sli4_hba.io_xri_cnt);
3184
3185 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3186 lpfc_create_expedite_pool(phba);
3187
3188 hwq_count = phba->cfg_hdw_queue;
3189 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3190
3191 for (i = 0; i < hwq_count; i++) {
3192 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3193
3194 if (!multixri_pool) {
3195 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3196 "1238 Failed to allocate memory for "
3197 "multixri_pool\n");
3198
3199 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3200 lpfc_destroy_expedite_pool(phba);
3201
3202 j = 0;
3203 while (j < i) {
3204 qp = &phba->sli4_hba.hdwq[j];
3205 kfree(qp->p_multixri_pool);
3206 j++;
3207 }
3208 phba->cfg_xri_rebalancing = 0;
3209 return;
3210 }
3211
3212 qp = &phba->sli4_hba.hdwq[i];
3213 qp->p_multixri_pool = multixri_pool;
3214
3215 multixri_pool->xri_limit = count_per_hwq;
3216 multixri_pool->rrb_next_hwqid = i;
3217
3218 /* Deal with public free xri pool */
3219 pbl_pool = &multixri_pool->pbl_pool;
3220 spin_lock_init(&pbl_pool->lock);
3221 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3222 spin_lock(&pbl_pool->lock);
3223 INIT_LIST_HEAD(&pbl_pool->list);
3224 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3225 &qp->lpfc_io_buf_list_put, list) {
3226 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3227 qp->put_io_bufs--;
3228 pbl_pool->count++;
3229 }
3230 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3231 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3232 pbl_pool->count, i);
3233 spin_unlock(&pbl_pool->lock);
3234 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3235
3236 /* Deal with private free xri pool */
3237 pvt_pool = &multixri_pool->pvt_pool;
3238 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3239 pvt_pool->low_watermark = XRI_BATCH;
3240 spin_lock_init(&pvt_pool->lock);
3241 spin_lock_irqsave(&pvt_pool->lock, iflag);
3242 INIT_LIST_HEAD(&pvt_pool->list);
3243 pvt_pool->count = 0;
3244 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3245 }
3246}
3247
3248/**
3249 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3250 * @phba: pointer to lpfc hba data structure.
3251 *
3252 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3253 **/
3999df75 3254static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
c490850a
JS
3255{
3256 u32 i;
3257 u32 hwq_count;
3258 struct lpfc_io_buf *lpfc_ncmd;
3259 struct lpfc_io_buf *lpfc_ncmd_next;
3260 unsigned long iflag;
3261 struct lpfc_sli4_hdw_queue *qp;
3262 struct lpfc_multixri_pool *multixri_pool;
3263 struct lpfc_pbl_pool *pbl_pool;
3264 struct lpfc_pvt_pool *pvt_pool;
3265
3266 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3267 lpfc_destroy_expedite_pool(phba);
3268
c66a9197
JS
3269 if (!(phba->pport->load_flag & FC_UNLOADING)) {
3270 lpfc_sli_flush_fcp_rings(phba);
3271
3272 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3273 lpfc_sli_flush_nvme_rings(phba);
3274 }
3275
c490850a
JS
3276 hwq_count = phba->cfg_hdw_queue;
3277
3278 for (i = 0; i < hwq_count; i++) {
3279 qp = &phba->sli4_hba.hdwq[i];
3280 multixri_pool = qp->p_multixri_pool;
3281 if (!multixri_pool)
3282 continue;
3283
3284 qp->p_multixri_pool = NULL;
3285
3286 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3287
3288 /* Deal with public free xri pool */
3289 pbl_pool = &multixri_pool->pbl_pool;
3290 spin_lock(&pbl_pool->lock);
3291
3292 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3293 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3294 pbl_pool->count, i);
3295
3296 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3297 &pbl_pool->list, list) {
3298 list_move_tail(&lpfc_ncmd->list,
3299 &qp->lpfc_io_buf_list_put);
3300 qp->put_io_bufs++;
3301 pbl_pool->count--;
3302 }
3303
3304 INIT_LIST_HEAD(&pbl_pool->list);
3305 pbl_pool->count = 0;
3306
3307 spin_unlock(&pbl_pool->lock);
3308
3309 /* Deal with private free xri pool */
3310 pvt_pool = &multixri_pool->pvt_pool;
3311 spin_lock(&pvt_pool->lock);
3312
3313 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3314 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3315 pvt_pool->count, i);
3316
3317 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3318 &pvt_pool->list, list) {
3319 list_move_tail(&lpfc_ncmd->list,
3320 &qp->lpfc_io_buf_list_put);
3321 qp->put_io_bufs++;
3322 pvt_pool->count--;
3323 }
3324
3325 INIT_LIST_HEAD(&pvt_pool->list);
3326 pvt_pool->count = 0;
3327
3328 spin_unlock(&pvt_pool->lock);
3329 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3330
3331 kfree(multixri_pool);
3332 }
3333}
3334
e59058c4 3335/**
3621a710 3336 * lpfc_online - Initialize and bring a HBA online
e59058c4
JS
3337 * @phba: pointer to lpfc hba data structure.
3338 *
3339 * This routine initializes the HBA and brings a HBA online. During this
3340 * process, the management interface is blocked to prevent user space access
3341 * to the HBA interfering with the driver initialization.
3342 *
3343 * Return codes
3344 * 0 - successful
3345 * 1 - failed
3346 **/
dea3101e 3347int
2e0fef85 3348lpfc_online(struct lpfc_hba *phba)
dea3101e 3349{
372bd282 3350 struct lpfc_vport *vport;
549e55cd 3351 struct lpfc_vport **vports;
a145fda3 3352 int i, error = 0;
16a3a208 3353 bool vpis_cleared = false;
2e0fef85 3354
dea3101e 3355 if (!phba)
3356 return 0;
372bd282 3357 vport = phba->pport;
dea3101e 3358
2e0fef85 3359 if (!(vport->fc_flag & FC_OFFLINE_MODE))
dea3101e 3360 return 0;
3361
ed957684 3362 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 3363 "0458 Bring Adapter online\n");
dea3101e 3364
618a5230 3365 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
46fa311e 3366
da0436e9
JS
3367 if (phba->sli_rev == LPFC_SLI_REV4) {
3368 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3369 lpfc_unblock_mgmt_io(phba);
3370 return 1;
3371 }
16a3a208
JS
3372 spin_lock_irq(&phba->hbalock);
3373 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3374 vpis_cleared = true;
3375 spin_unlock_irq(&phba->hbalock);
a145fda3
DK
3376
3377 /* Reestablish the local initiator port.
3378 * The offline process destroyed the previous lport.
3379 */
3380 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3381 !phba->nvmet_support) {
3382 error = lpfc_nvme_create_localport(phba->pport);
3383 if (error)
3384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3385 "6132 NVME restore reg failed "
3386 "on nvmei error x%x\n", error);
3387 }
da0436e9 3388 } else {
895427bd 3389 lpfc_sli_queue_init(phba);
da0436e9
JS
3390 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3391 lpfc_unblock_mgmt_io(phba);
3392 return 1;
3393 }
46fa311e 3394 }
dea3101e 3395
549e55cd 3396 vports = lpfc_create_vport_work_array(phba);
aeb6641f 3397 if (vports != NULL) {
da0436e9 3398 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
3399 struct Scsi_Host *shost;
3400 shost = lpfc_shost_from_vport(vports[i]);
3401 spin_lock_irq(shost->host_lock);
3402 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3403 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3404 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
16a3a208 3405 if (phba->sli_rev == LPFC_SLI_REV4) {
1c6834a7 3406 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
16a3a208
JS
3407 if ((vpis_cleared) &&
3408 (vports[i]->port_type !=
3409 LPFC_PHYSICAL_PORT))
3410 vports[i]->vpi = 0;
3411 }
549e55cd
JS
3412 spin_unlock_irq(shost->host_lock);
3413 }
aeb6641f
AB
3414 }
3415 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 3416
c490850a
JS
3417 if (phba->cfg_xri_rebalancing)
3418 lpfc_create_multixri_pools(phba);
3419
46fa311e 3420 lpfc_unblock_mgmt_io(phba);
dea3101e 3421 return 0;
3422}
3423
e59058c4 3424/**
3621a710 3425 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
e59058c4
JS
3426 * @phba: pointer to lpfc hba data structure.
3427 *
3428 * This routine marks a HBA's management interface as not blocked. Once the
3429 * HBA's management interface is marked as not blocked, all the user space
3430 * access to the HBA, whether they are from sysfs interface or libdfc
3431 * interface will be allowed. The HBA is set to block the management interface
3432 * when the driver prepares the HBA interface for online or offline and then
3433 * set to unblock the management interface afterwards.
3434 **/
46fa311e
JS
3435void
3436lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3437{
3438 unsigned long iflag;
3439
2e0fef85
JS
3440 spin_lock_irqsave(&phba->hbalock, iflag);
3441 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3442 spin_unlock_irqrestore(&phba->hbalock, iflag);
46fa311e
JS
3443}
3444
e59058c4 3445/**
3621a710 3446 * lpfc_offline_prep - Prepare a HBA to be brought offline
e59058c4
JS
3447 * @phba: pointer to lpfc hba data structure.
3448 *
3449 * This routine is invoked to prepare a HBA to be brought offline. It performs
3450 * unregistration login to all the nodes on all vports and flushes the mailbox
3451 * queue to make it ready to be brought offline.
3452 **/
46fa311e 3453void
618a5230 3454lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
46fa311e 3455{
2e0fef85 3456 struct lpfc_vport *vport = phba->pport;
46fa311e 3457 struct lpfc_nodelist *ndlp, *next_ndlp;
87af33fe 3458 struct lpfc_vport **vports;
72100cc4 3459 struct Scsi_Host *shost;
87af33fe 3460 int i;
dea3101e 3461
2e0fef85 3462 if (vport->fc_flag & FC_OFFLINE_MODE)
46fa311e 3463 return;
dea3101e 3464
618a5230 3465 lpfc_block_mgmt_io(phba, mbx_action);
dea3101e 3466
3467 lpfc_linkdown(phba);
3468
87af33fe
JS
3469 /* Issue an unreg_login to all nodes on all vports */
3470 vports = lpfc_create_vport_work_array(phba);
3471 if (vports != NULL) {
da0436e9 3472 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8adb832
JS
3473 if (vports[i]->load_flag & FC_UNLOADING)
3474 continue;
72100cc4
JS
3475 shost = lpfc_shost_from_vport(vports[i]);
3476 spin_lock_irq(shost->host_lock);
c868595d 3477 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
695a814e
JS
3478 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3479 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
72100cc4 3480 spin_unlock_irq(shost->host_lock);
695a814e 3481
87af33fe
JS
3482 shost = lpfc_shost_from_vport(vports[i]);
3483 list_for_each_entry_safe(ndlp, next_ndlp,
3484 &vports[i]->fc_nodes,
3485 nlp_listp) {
e47c9093
JS
3486 if (!NLP_CHK_NODE_ACT(ndlp))
3487 continue;
87af33fe
JS
3488 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3489 continue;
3490 if (ndlp->nlp_type & NLP_FABRIC) {
3491 lpfc_disc_state_machine(vports[i], ndlp,
3492 NULL, NLP_EVT_DEVICE_RECOVERY);
3493 lpfc_disc_state_machine(vports[i], ndlp,
3494 NULL, NLP_EVT_DEVICE_RM);
3495 }
3496 spin_lock_irq(shost->host_lock);
3497 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
401ee0c1 3498 spin_unlock_irq(shost->host_lock);
6b5151fd
JS
3499 /*
3500 * Whenever an SLI4 port goes offline, free the
401ee0c1
JS
3501 * RPI. Get a new RPI when the adapter port
3502 * comes back online.
6b5151fd 3503 */
be6bb941
JS
3504 if (phba->sli_rev == LPFC_SLI_REV4) {
3505 lpfc_printf_vlog(ndlp->vport,
3506 KERN_INFO, LOG_NODE,
3507 "0011 lpfc_offline: "
3508 "ndlp:x%p did %x "
3509 "usgmap:x%x rpi:%x\n",
3510 ndlp, ndlp->nlp_DID,
3511 ndlp->nlp_usg_map,
3512 ndlp->nlp_rpi);
3513
6b5151fd 3514 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
be6bb941 3515 }
87af33fe
JS
3516 lpfc_unreg_rpi(vports[i], ndlp);
3517 }
3518 }
3519 }
09372820 3520 lpfc_destroy_vport_work_array(phba, vports);
dea3101e 3521
618a5230 3522 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
f485c18d
DK
3523
3524 if (phba->wq)
3525 flush_workqueue(phba->wq);
46fa311e
JS
3526}
3527
e59058c4 3528/**
3621a710 3529 * lpfc_offline - Bring a HBA offline
e59058c4
JS
3530 * @phba: pointer to lpfc hba data structure.
3531 *
3532 * This routine actually brings a HBA offline. It stops all the timers
3533 * associated with the HBA, brings down the SLI layer, and eventually
3534 * marks the HBA as in offline state for the upper layer protocol.
3535 **/
46fa311e 3536void
2e0fef85 3537lpfc_offline(struct lpfc_hba *phba)
46fa311e 3538{
549e55cd
JS
3539 struct Scsi_Host *shost;
3540 struct lpfc_vport **vports;
3541 int i;
46fa311e 3542
549e55cd 3543 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
46fa311e 3544 return;
688a8863 3545
da0436e9
JS
3546 /* stop port and all timers associated with this hba */
3547 lpfc_stop_port(phba);
4b40d02b
DK
3548
3549 /* Tear down the local and target port registrations. The
3550 * nvme transports need to cleanup.
3551 */
3552 lpfc_nvmet_destroy_targetport(phba);
3553 lpfc_nvme_destroy_localport(phba->pport);
3554
51ef4c26
JS
3555 vports = lpfc_create_vport_work_array(phba);
3556 if (vports != NULL)
da0436e9 3557 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
51ef4c26 3558 lpfc_stop_vport_timers(vports[i]);
09372820 3559 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0 3560 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
e8b62011 3561 "0460 Bring Adapter offline\n");
dea3101e 3562 /* Bring down the SLI Layer and cleanup. The HBA is offline
3563 now. */
3564 lpfc_sli_hba_down(phba);
92d7f7b0 3565 spin_lock_irq(&phba->hbalock);
7054a606 3566 phba->work_ha = 0;
92d7f7b0 3567 spin_unlock_irq(&phba->hbalock);
549e55cd
JS
3568 vports = lpfc_create_vport_work_array(phba);
3569 if (vports != NULL)
da0436e9 3570 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd 3571 shost = lpfc_shost_from_vport(vports[i]);
549e55cd
JS
3572 spin_lock_irq(shost->host_lock);
3573 vports[i]->work_port_events = 0;
3574 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3575 spin_unlock_irq(shost->host_lock);
3576 }
09372820 3577 lpfc_destroy_vport_work_array(phba, vports);
c490850a
JS
3578
3579 if (phba->cfg_xri_rebalancing)
3580 lpfc_destroy_multixri_pools(phba);
dea3101e 3581}
3582
e59058c4 3583/**
3621a710 3584 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
e59058c4
JS
3585 * @phba: pointer to lpfc hba data structure.
3586 *
3587 * This routine is to free all the SCSI buffers and IOCBs from the driver
3588 * list back to kernel. It is called from lpfc_pci_remove_one to free
3589 * the internal resources before the device is removed from the system.
e59058c4 3590 **/
8a9d2e80 3591static void
2e0fef85 3592lpfc_scsi_free(struct lpfc_hba *phba)
dea3101e 3593{
c490850a 3594 struct lpfc_io_buf *sb, *sb_next;
dea3101e 3595
895427bd
JS
3596 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3597 return;
3598
2e0fef85 3599 spin_lock_irq(&phba->hbalock);
a40fc5f0 3600
dea3101e 3601 /* Release all the lpfc_scsi_bufs maintained by this host. */
a40fc5f0
JS
3602
3603 spin_lock(&phba->scsi_buf_list_put_lock);
3604 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3605 list) {
dea3101e 3606 list_del(&sb->list);
771db5c0 3607 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
92d7f7b0 3608 sb->dma_handle);
dea3101e 3609 kfree(sb);
3610 phba->total_scsi_bufs--;
3611 }
a40fc5f0
JS
3612 spin_unlock(&phba->scsi_buf_list_put_lock);
3613
3614 spin_lock(&phba->scsi_buf_list_get_lock);
3615 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3616 list) {
dea3101e 3617 list_del(&sb->list);
771db5c0 3618 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
92d7f7b0 3619 sb->dma_handle);
dea3101e 3620 kfree(sb);
3621 phba->total_scsi_bufs--;
3622 }
a40fc5f0 3623 spin_unlock(&phba->scsi_buf_list_get_lock);
2e0fef85 3624 spin_unlock_irq(&phba->hbalock);
8a9d2e80 3625}
0794d601 3626
895427bd 3627/**
5e5b511d 3628 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
895427bd
JS
3629 * @phba: pointer to lpfc hba data structure.
3630 *
0794d601 3631 * This routine is to free all the IO buffers and IOCBs from the driver
895427bd
JS
3632 * list back to kernel. It is called from lpfc_pci_remove_one to free
3633 * the internal resources before the device is removed from the system.
3634 **/
c490850a 3635void
5e5b511d 3636lpfc_io_free(struct lpfc_hba *phba)
895427bd 3637{
c490850a 3638 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
5e5b511d
JS
3639 struct lpfc_sli4_hdw_queue *qp;
3640 int idx;
895427bd 3641
5e5b511d
JS
3642 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3643 qp = &phba->sli4_hba.hdwq[idx];
3644 /* Release all the lpfc_nvme_bufs maintained by this host. */
3645 spin_lock(&qp->io_buf_list_put_lock);
3646 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3647 &qp->lpfc_io_buf_list_put,
3648 list) {
3649 list_del(&lpfc_ncmd->list);
3650 qp->put_io_bufs--;
3651 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3652 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3653 kfree(lpfc_ncmd);
3654 qp->total_io_bufs--;
3655 }
3656 spin_unlock(&qp->io_buf_list_put_lock);
3657
3658 spin_lock(&qp->io_buf_list_get_lock);
3659 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3660 &qp->lpfc_io_buf_list_get,
3661 list) {
3662 list_del(&lpfc_ncmd->list);
3663 qp->get_io_bufs--;
3664 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3665 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3666 kfree(lpfc_ncmd);
3667 qp->total_io_bufs--;
3668 }
3669 spin_unlock(&qp->io_buf_list_get_lock);
895427bd 3670 }
895427bd 3671}
0794d601 3672
8a9d2e80 3673/**
895427bd 3674 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
8a9d2e80
JS
3675 * @phba: pointer to lpfc hba data structure.
3676 *
3677 * This routine first calculates the sizes of the current els and allocated
3678 * scsi sgl lists, and then goes through all sgls to updates the physical
3679 * XRIs assigned due to port function reset. During port initialization, the
3680 * current els and allocated scsi sgl lists are 0s.
3681 *
3682 * Return codes
3683 * 0 - successful (for now, it always returns 0)
3684 **/
3685int
895427bd 3686lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
8a9d2e80
JS
3687{
3688 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
895427bd 3689 uint16_t i, lxri, xri_cnt, els_xri_cnt;
8a9d2e80 3690 LIST_HEAD(els_sgl_list);
8a9d2e80
JS
3691 int rc;
3692
3693 /*
3694 * update on pci function's els xri-sgl list
3695 */
3696 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
895427bd 3697
8a9d2e80
JS
3698 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3699 /* els xri-sgl expanded */
3700 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3701 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3702 "3157 ELS xri-sgl count increased from "
3703 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3704 els_xri_cnt);
3705 /* allocate the additional els sgls */
3706 for (i = 0; i < xri_cnt; i++) {
3707 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3708 GFP_KERNEL);
3709 if (sglq_entry == NULL) {
3710 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3711 "2562 Failure to allocate an "
3712 "ELS sgl entry:%d\n", i);
3713 rc = -ENOMEM;
3714 goto out_free_mem;
3715 }
3716 sglq_entry->buff_type = GEN_BUFF_TYPE;
3717 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3718 &sglq_entry->phys);
3719 if (sglq_entry->virt == NULL) {
3720 kfree(sglq_entry);
3721 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3722 "2563 Failure to allocate an "
3723 "ELS mbuf:%d\n", i);
3724 rc = -ENOMEM;
3725 goto out_free_mem;
3726 }
3727 sglq_entry->sgl = sglq_entry->virt;
3728 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3729 sglq_entry->state = SGL_FREED;
3730 list_add_tail(&sglq_entry->list, &els_sgl_list);
3731 }
38c20673 3732 spin_lock_irq(&phba->hbalock);
895427bd
JS
3733 spin_lock(&phba->sli4_hba.sgl_list_lock);
3734 list_splice_init(&els_sgl_list,
3735 &phba->sli4_hba.lpfc_els_sgl_list);
3736 spin_unlock(&phba->sli4_hba.sgl_list_lock);
38c20673 3737 spin_unlock_irq(&phba->hbalock);
8a9d2e80
JS
3738 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3739 /* els xri-sgl shrinked */
3740 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3741 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3742 "3158 ELS xri-sgl count decreased from "
3743 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3744 els_xri_cnt);
3745 spin_lock_irq(&phba->hbalock);
895427bd
JS
3746 spin_lock(&phba->sli4_hba.sgl_list_lock);
3747 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3748 &els_sgl_list);
8a9d2e80
JS
3749 /* release extra els sgls from list */
3750 for (i = 0; i < xri_cnt; i++) {
3751 list_remove_head(&els_sgl_list,
3752 sglq_entry, struct lpfc_sglq, list);
3753 if (sglq_entry) {
895427bd
JS
3754 __lpfc_mbuf_free(phba, sglq_entry->virt,
3755 sglq_entry->phys);
8a9d2e80
JS
3756 kfree(sglq_entry);
3757 }
3758 }
895427bd
JS
3759 list_splice_init(&els_sgl_list,
3760 &phba->sli4_hba.lpfc_els_sgl_list);
3761 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8a9d2e80
JS
3762 spin_unlock_irq(&phba->hbalock);
3763 } else
3764 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3765 "3163 ELS xri-sgl count unchanged: %d\n",
3766 els_xri_cnt);
3767 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3768
3769 /* update xris to els sgls on the list */
3770 sglq_entry = NULL;
3771 sglq_entry_next = NULL;
3772 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
895427bd 3773 &phba->sli4_hba.lpfc_els_sgl_list, list) {
8a9d2e80
JS
3774 lxri = lpfc_sli4_next_xritag(phba);
3775 if (lxri == NO_XRI) {
3776 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3777 "2400 Failed to allocate xri for "
3778 "ELS sgl\n");
3779 rc = -ENOMEM;
3780 goto out_free_mem;
3781 }
3782 sglq_entry->sli4_lxritag = lxri;
3783 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3784 }
895427bd
JS
3785 return 0;
3786
3787out_free_mem:
3788 lpfc_free_els_sgl_list(phba);
3789 return rc;
3790}
3791
f358dd0c
JS
3792/**
3793 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3794 * @phba: pointer to lpfc hba data structure.
3795 *
3796 * This routine first calculates the sizes of the current els and allocated
3797 * scsi sgl lists, and then goes through all sgls to updates the physical
3798 * XRIs assigned due to port function reset. During port initialization, the
3799 * current els and allocated scsi sgl lists are 0s.
3800 *
3801 * Return codes
3802 * 0 - successful (for now, it always returns 0)
3803 **/
3804int
3805lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3806{
3807 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3808 uint16_t i, lxri, xri_cnt, els_xri_cnt;
6c621a22 3809 uint16_t nvmet_xri_cnt;
f358dd0c
JS
3810 LIST_HEAD(nvmet_sgl_list);
3811 int rc;
3812
3813 /*
3814 * update on pci function's nvmet xri-sgl list
3815 */
3816 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
61f3d4bf 3817
6c621a22
JS
3818 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3819 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
f358dd0c
JS
3820 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3821 /* els xri-sgl expanded */
3822 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3823 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3824 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3825 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3826 /* allocate the additional nvmet sgls */
3827 for (i = 0; i < xri_cnt; i++) {
3828 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3829 GFP_KERNEL);
3830 if (sglq_entry == NULL) {
3831 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3832 "6303 Failure to allocate an "
3833 "NVMET sgl entry:%d\n", i);
3834 rc = -ENOMEM;
3835 goto out_free_mem;
3836 }
3837 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3838 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3839 &sglq_entry->phys);
3840 if (sglq_entry->virt == NULL) {
3841 kfree(sglq_entry);
3842 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3843 "6304 Failure to allocate an "
3844 "NVMET buf:%d\n", i);
3845 rc = -ENOMEM;
3846 goto out_free_mem;
3847 }
3848 sglq_entry->sgl = sglq_entry->virt;
3849 memset(sglq_entry->sgl, 0,
3850 phba->cfg_sg_dma_buf_size);
3851 sglq_entry->state = SGL_FREED;
3852 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3853 }
3854 spin_lock_irq(&phba->hbalock);
3855 spin_lock(&phba->sli4_hba.sgl_list_lock);
3856 list_splice_init(&nvmet_sgl_list,
3857 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3858 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3859 spin_unlock_irq(&phba->hbalock);
3860 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3861 /* nvmet xri-sgl shrunk */
3862 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3863 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3864 "6305 NVMET xri-sgl count decreased from "
3865 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3866 nvmet_xri_cnt);
3867 spin_lock_irq(&phba->hbalock);
3868 spin_lock(&phba->sli4_hba.sgl_list_lock);
3869 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3870 &nvmet_sgl_list);
3871 /* release extra nvmet sgls from list */
3872 for (i = 0; i < xri_cnt; i++) {
3873 list_remove_head(&nvmet_sgl_list,
3874 sglq_entry, struct lpfc_sglq, list);
3875 if (sglq_entry) {
3876 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3877 sglq_entry->phys);
3878 kfree(sglq_entry);
3879 }
3880 }
3881 list_splice_init(&nvmet_sgl_list,
3882 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3883 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3884 spin_unlock_irq(&phba->hbalock);
3885 } else
3886 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3887 "6306 NVMET xri-sgl count unchanged: %d\n",
3888 nvmet_xri_cnt);
3889 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3890
3891 /* update xris to nvmet sgls on the list */
3892 sglq_entry = NULL;
3893 sglq_entry_next = NULL;
3894 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3895 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3896 lxri = lpfc_sli4_next_xritag(phba);
3897 if (lxri == NO_XRI) {
3898 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3899 "6307 Failed to allocate xri for "
3900 "NVMET sgl\n");
3901 rc = -ENOMEM;
3902 goto out_free_mem;
3903 }
3904 sglq_entry->sli4_lxritag = lxri;
3905 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3906 }
3907 return 0;
3908
3909out_free_mem:
3910 lpfc_free_nvmet_sgl_list(phba);
3911 return rc;
3912}
3913
5e5b511d
JS
3914int
3915lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3916{
3917 LIST_HEAD(blist);
3918 struct lpfc_sli4_hdw_queue *qp;
c490850a
JS
3919 struct lpfc_io_buf *lpfc_cmd;
3920 struct lpfc_io_buf *iobufp, *prev_iobufp;
5e5b511d
JS
3921 int idx, cnt, xri, inserted;
3922
3923 cnt = 0;
3924 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3925 qp = &phba->sli4_hba.hdwq[idx];
3926 spin_lock_irq(&qp->io_buf_list_get_lock);
3927 spin_lock(&qp->io_buf_list_put_lock);
3928
3929 /* Take everything off the get and put lists */
3930 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3931 list_splice(&qp->lpfc_io_buf_list_put, &blist);
3932 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3933 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3934 cnt += qp->get_io_bufs + qp->put_io_bufs;
3935 qp->get_io_bufs = 0;
3936 qp->put_io_bufs = 0;
3937 qp->total_io_bufs = 0;
3938 spin_unlock(&qp->io_buf_list_put_lock);
3939 spin_unlock_irq(&qp->io_buf_list_get_lock);
3940 }
3941
3942 /*
3943 * Take IO buffers off blist and put on cbuf sorted by XRI.
3944 * This is because POST_SGL takes a sequential range of XRIs
3945 * to post to the firmware.
3946 */
3947 for (idx = 0; idx < cnt; idx++) {
c490850a 3948 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
5e5b511d
JS
3949 if (!lpfc_cmd)
3950 return cnt;
3951 if (idx == 0) {
3952 list_add_tail(&lpfc_cmd->list, cbuf);
3953 continue;
3954 }
3955 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
3956 inserted = 0;
3957 prev_iobufp = NULL;
3958 list_for_each_entry(iobufp, cbuf, list) {
3959 if (xri < iobufp->cur_iocbq.sli4_xritag) {
3960 if (prev_iobufp)
3961 list_add(&lpfc_cmd->list,
3962 &prev_iobufp->list);
3963 else
3964 list_add(&lpfc_cmd->list, cbuf);
3965 inserted = 1;
3966 break;
3967 }
3968 prev_iobufp = iobufp;
3969 }
3970 if (!inserted)
3971 list_add_tail(&lpfc_cmd->list, cbuf);
3972 }
3973 return cnt;
3974}
3975
3976int
3977lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
3978{
3979 struct lpfc_sli4_hdw_queue *qp;
c490850a 3980 struct lpfc_io_buf *lpfc_cmd;
5e5b511d
JS
3981 int idx, cnt;
3982
3983 qp = phba->sli4_hba.hdwq;
3984 cnt = 0;
3985 while (!list_empty(cbuf)) {
3986 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3987 list_remove_head(cbuf, lpfc_cmd,
c490850a 3988 struct lpfc_io_buf, list);
5e5b511d
JS
3989 if (!lpfc_cmd)
3990 return cnt;
3991 cnt++;
3992 qp = &phba->sli4_hba.hdwq[idx];
1fbf9742
JS
3993 lpfc_cmd->hdwq_no = idx;
3994 lpfc_cmd->hdwq = qp;
5e5b511d
JS
3995 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
3996 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
3997 spin_lock(&qp->io_buf_list_put_lock);
3998 list_add_tail(&lpfc_cmd->list,
3999 &qp->lpfc_io_buf_list_put);
4000 qp->put_io_bufs++;
4001 qp->total_io_bufs++;
4002 spin_unlock(&qp->io_buf_list_put_lock);
4003 }
4004 }
4005 return cnt;
4006}
4007
895427bd 4008/**
5e5b511d 4009 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
895427bd
JS
4010 * @phba: pointer to lpfc hba data structure.
4011 *
4012 * This routine first calculates the sizes of the current els and allocated
4013 * scsi sgl lists, and then goes through all sgls to updates the physical
4014 * XRIs assigned due to port function reset. During port initialization, the
4015 * current els and allocated scsi sgl lists are 0s.
4016 *
4017 * Return codes
4018 * 0 - successful (for now, it always returns 0)
4019 **/
4020int
5e5b511d 4021lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
895427bd 4022{
c490850a 4023 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
0794d601 4024 uint16_t i, lxri, els_xri_cnt;
5e5b511d
JS
4025 uint16_t io_xri_cnt, io_xri_max;
4026 LIST_HEAD(io_sgl_list);
0794d601 4027 int rc, cnt;
8a9d2e80 4028
895427bd 4029 /*
0794d601 4030 * update on pci function's allocated nvme xri-sgl list
895427bd 4031 */
8a9d2e80 4032
0794d601
JS
4033 /* maximum number of xris available for nvme buffers */
4034 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
5e5b511d
JS
4035 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4036 phba->sli4_hba.io_xri_max = io_xri_max;
895427bd 4037
e8c0a779 4038 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
0794d601
JS
4039 "6074 Current allocated XRI sgl count:%d, "
4040 "maximum XRI count:%d\n",
5e5b511d
JS
4041 phba->sli4_hba.io_xri_cnt,
4042 phba->sli4_hba.io_xri_max);
8a9d2e80 4043
5e5b511d 4044 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
8a9d2e80 4045
5e5b511d 4046 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
0794d601 4047 /* max nvme xri shrunk below the allocated nvme buffers */
5e5b511d
JS
4048 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4049 phba->sli4_hba.io_xri_max;
0794d601 4050 /* release the extra allocated nvme buffers */
5e5b511d
JS
4051 for (i = 0; i < io_xri_cnt; i++) {
4052 list_remove_head(&io_sgl_list, lpfc_ncmd,
c490850a 4053 struct lpfc_io_buf, list);
0794d601 4054 if (lpfc_ncmd) {
771db5c0 4055 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
0794d601
JS
4056 lpfc_ncmd->data,
4057 lpfc_ncmd->dma_handle);
4058 kfree(lpfc_ncmd);
a2fc4aef 4059 }
8a9d2e80 4060 }
5e5b511d 4061 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
8a9d2e80
JS
4062 }
4063
0794d601
JS
4064 /* update xris associated to remaining allocated nvme buffers */
4065 lpfc_ncmd = NULL;
4066 lpfc_ncmd_next = NULL;
5e5b511d 4067 phba->sli4_hba.io_xri_cnt = cnt;
0794d601 4068 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
5e5b511d 4069 &io_sgl_list, list) {
8a9d2e80
JS
4070 lxri = lpfc_sli4_next_xritag(phba);
4071 if (lxri == NO_XRI) {
4072 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
0794d601
JS
4073 "6075 Failed to allocate xri for "
4074 "nvme buffer\n");
8a9d2e80
JS
4075 rc = -ENOMEM;
4076 goto out_free_mem;
4077 }
0794d601
JS
4078 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4079 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
8a9d2e80 4080 }
5e5b511d 4081 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
dea3101e 4082 return 0;
8a9d2e80
JS
4083
4084out_free_mem:
5e5b511d 4085 lpfc_io_free(phba);
8a9d2e80 4086 return rc;
dea3101e 4087}
4088
0794d601 4089/**
5e5b511d 4090 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
0794d601
JS
4091 * @vport: The virtual port for which this call being executed.
4092 * @num_to_allocate: The requested number of buffers to allocate.
4093 *
4094 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4095 * the nvme buffer contains all the necessary information needed to initiate
4096 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4097 * them on a list, it post them to the port by using SGL block post.
4098 *
4099 * Return codes:
5e5b511d 4100 * int - number of IO buffers that were allocated and posted.
0794d601
JS
4101 * 0 = failure, less than num_to_alloc is a partial failure.
4102 **/
4103int
5e5b511d 4104lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
0794d601 4105{
c490850a 4106 struct lpfc_io_buf *lpfc_ncmd;
0794d601
JS
4107 struct lpfc_iocbq *pwqeq;
4108 uint16_t iotag, lxri = 0;
4109 int bcnt, num_posted;
4110 LIST_HEAD(prep_nblist);
4111 LIST_HEAD(post_nblist);
4112 LIST_HEAD(nvme_nblist);
4113
4114 /* Sanity check to ensure our sizing is right for both SCSI and NVME */
c490850a 4115 if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) {
0794d601 4116 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
f996861b 4117 "6426 Common buffer size %zd exceeds %d\n",
c490850a
JS
4118 sizeof(struct lpfc_io_buf),
4119 LPFC_COMMON_IO_BUF_SZ);
0794d601
JS
4120 return 0;
4121 }
4122
5e5b511d 4123 phba->sli4_hba.io_xri_cnt = 0;
0794d601
JS
4124 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4125 lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL);
4126 if (!lpfc_ncmd)
4127 break;
4128 /*
4129 * Get memory from the pci pool to map the virt space to
4130 * pci bus space for an I/O. The DMA buffer includes the
4131 * number of SGE's necessary to support the sg_tablesize.
4132 */
a5c990ee
TM
4133 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4134 GFP_KERNEL,
4135 &lpfc_ncmd->dma_handle);
0794d601
JS
4136 if (!lpfc_ncmd->data) {
4137 kfree(lpfc_ncmd);
4138 break;
4139 }
0794d601
JS
4140
4141 /*
4142 * 4K Page alignment is CRITICAL to BlockGuard, double check
4143 * to be sure.
4144 */
4145 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4146 (((unsigned long)(lpfc_ncmd->data) &
4147 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4148 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4149 "3369 Memory alignment err: addr=%lx\n",
4150 (unsigned long)lpfc_ncmd->data);
4151 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4152 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4153 kfree(lpfc_ncmd);
4154 break;
4155 }
4156
4157 lxri = lpfc_sli4_next_xritag(phba);
4158 if (lxri == NO_XRI) {
4159 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4160 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4161 kfree(lpfc_ncmd);
4162 break;
4163 }
4164 pwqeq = &lpfc_ncmd->cur_iocbq;
4165
4166 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4167 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4168 if (iotag == 0) {
4169 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4170 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4171 kfree(lpfc_ncmd);
4172 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
4173 "6121 Failed to allocate IOTAG for"
4174 " XRI:0x%x\n", lxri);
4175 lpfc_sli4_free_xri(phba, lxri);
4176 break;
4177 }
4178 pwqeq->sli4_lxritag = lxri;
4179 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4180 pwqeq->context1 = lpfc_ncmd;
4181
4182 /* Initialize local short-hand pointers. */
4183 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4184 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4185 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
c2017260 4186 spin_lock_init(&lpfc_ncmd->buf_lock);
0794d601
JS
4187
4188 /* add the nvme buffer to a post list */
4189 list_add_tail(&lpfc_ncmd->list, &post_nblist);
5e5b511d 4190 phba->sli4_hba.io_xri_cnt++;
0794d601
JS
4191 }
4192 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4193 "6114 Allocate %d out of %d requested new NVME "
4194 "buffers\n", bcnt, num_to_alloc);
4195
4196 /* post the list of nvme buffer sgls to port if available */
4197 if (!list_empty(&post_nblist))
5e5b511d 4198 num_posted = lpfc_sli4_post_io_sgl_list(
0794d601
JS
4199 phba, &post_nblist, bcnt);
4200 else
4201 num_posted = 0;
4202
4203 return num_posted;
4204}
4205
96418b5e
JS
4206static uint64_t
4207lpfc_get_wwpn(struct lpfc_hba *phba)
4208{
4209 uint64_t wwn;
4210 int rc;
4211 LPFC_MBOXQ_t *mboxq;
4212 MAILBOX_t *mb;
4213
96418b5e
JS
4214 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4215 GFP_KERNEL);
4216 if (!mboxq)
4217 return (uint64_t)-1;
4218
4219 /* First get WWN of HBA instance */
4220 lpfc_read_nv(phba, mboxq);
4221 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4222 if (rc != MBX_SUCCESS) {
4223 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4224 "6019 Mailbox failed , mbxCmd x%x "
4225 "READ_NV, mbxStatus x%x\n",
4226 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4227 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4228 mempool_free(mboxq, phba->mbox_mem_pool);
4229 return (uint64_t) -1;
4230 }
4231 mb = &mboxq->u.mb;
4232 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4233 /* wwn is WWPN of HBA instance */
4234 mempool_free(mboxq, phba->mbox_mem_pool);
4235 if (phba->sli_rev == LPFC_SLI_REV4)
4236 return be64_to_cpu(wwn);
4237 else
286871a6 4238 return rol64(wwn, 32);
96418b5e
JS
4239}
4240
e59058c4 4241/**
3621a710 4242 * lpfc_create_port - Create an FC port
e59058c4
JS
4243 * @phba: pointer to lpfc hba data structure.
4244 * @instance: a unique integer ID to this FC port.
4245 * @dev: pointer to the device data structure.
4246 *
4247 * This routine creates a FC port for the upper layer protocol. The FC port
4248 * can be created on top of either a physical port or a virtual port provided
4249 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4250 * and associates the FC port created before adding the shost into the SCSI
4251 * layer.
4252 *
4253 * Return codes
4254 * @vport - pointer to the virtual N_Port data structure.
4255 * NULL - port create failed.
4256 **/
2e0fef85 4257struct lpfc_vport *
3de2a653 4258lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
47a8617c 4259{
2e0fef85 4260 struct lpfc_vport *vport;
895427bd 4261 struct Scsi_Host *shost = NULL;
2e0fef85 4262 int error = 0;
96418b5e
JS
4263 int i;
4264 uint64_t wwn;
4265 bool use_no_reset_hba = false;
56bc8028 4266 int rc;
96418b5e 4267
56bc8028
JS
4268 if (lpfc_no_hba_reset_cnt) {
4269 if (phba->sli_rev < LPFC_SLI_REV4 &&
4270 dev == &phba->pcidev->dev) {
4271 /* Reset the port first */
4272 lpfc_sli_brdrestart(phba);
4273 rc = lpfc_sli_chipset_init(phba);
4274 if (rc)
4275 return NULL;
4276 }
4277 wwn = lpfc_get_wwpn(phba);
4278 }
96418b5e
JS
4279
4280 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4281 if (wwn == lpfc_no_hba_reset[i]) {
4282 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4283 "6020 Setting use_no_reset port=%llx\n",
4284 wwn);
4285 use_no_reset_hba = true;
4286 break;
4287 }
4288 }
47a8617c 4289
895427bd
JS
4290 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4291 if (dev != &phba->pcidev->dev) {
4292 shost = scsi_host_alloc(&lpfc_vport_template,
4293 sizeof(struct lpfc_vport));
4294 } else {
96418b5e 4295 if (!use_no_reset_hba)
895427bd
JS
4296 shost = scsi_host_alloc(&lpfc_template,
4297 sizeof(struct lpfc_vport));
4298 else
96418b5e 4299 shost = scsi_host_alloc(&lpfc_template_no_hr,
895427bd
JS
4300 sizeof(struct lpfc_vport));
4301 }
4302 } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
4303 shost = scsi_host_alloc(&lpfc_template_nvme,
ea4142f6
JS
4304 sizeof(struct lpfc_vport));
4305 }
2e0fef85
JS
4306 if (!shost)
4307 goto out;
47a8617c 4308
2e0fef85
JS
4309 vport = (struct lpfc_vport *) shost->hostdata;
4310 vport->phba = phba;
2e0fef85 4311 vport->load_flag |= FC_LOADING;
92d7f7b0 4312 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
7f5f3d0d 4313 vport->fc_rscn_flush = 0;
3de2a653 4314 lpfc_get_vport_cfgparam(vport);
895427bd 4315
f6e84790
JS
4316 /* Adjust value in vport */
4317 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4318
2e0fef85
JS
4319 shost->unique_id = instance;
4320 shost->max_id = LPFC_MAX_TARGET;
3de2a653 4321 shost->max_lun = vport->cfg_max_luns;
2e0fef85
JS
4322 shost->this_id = -1;
4323 shost->max_cmd_len = 16;
6a828b0f 4324
da0436e9 4325 if (phba->sli_rev == LPFC_SLI_REV4) {
6a828b0f
JS
4326 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
4327 shost->nr_hw_queues = phba->cfg_hdw_queue;
4328 else
4329 shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
4330
28baac74 4331 shost->dma_boundary =
cb5172ea 4332 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
5b9e70b2 4333 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
ace44e48
JS
4334 } else
4335 /* SLI-3 has a limited number of hardware queues (3),
4336 * thus there is only one for FCP processing.
4337 */
4338 shost->nr_hw_queues = 1;
81301a9b 4339
47a8617c 4340 /*
2e0fef85
JS
4341 * Set initial can_queue value since 0 is no longer supported and
4342 * scsi_add_host will fail. This will be adjusted later based on the
4343 * max xri value determined in hba setup.
47a8617c 4344 */
2e0fef85 4345 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3de2a653 4346 if (dev != &phba->pcidev->dev) {
92d7f7b0
JS
4347 shost->transportt = lpfc_vport_transport_template;
4348 vport->port_type = LPFC_NPIV_PORT;
4349 } else {
4350 shost->transportt = lpfc_transport_template;
4351 vport->port_type = LPFC_PHYSICAL_PORT;
4352 }
47a8617c 4353
2e0fef85
JS
4354 /* Initialize all internally managed lists. */
4355 INIT_LIST_HEAD(&vport->fc_nodes);
da0436e9 4356 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2e0fef85 4357 spin_lock_init(&vport->work_port_lock);
47a8617c 4358
f22eb4d3 4359 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
47a8617c 4360
f22eb4d3 4361 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
92494144 4362
f22eb4d3 4363 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
92494144 4364
aa6ff309
JS
4365 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4366 lpfc_setup_bg(phba, shost);
4367
d139b9bd 4368 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2e0fef85
JS
4369 if (error)
4370 goto out_put_shost;
47a8617c 4371
523128e5 4372 spin_lock_irq(&phba->port_list_lock);
2e0fef85 4373 list_add_tail(&vport->listentry, &phba->port_list);
523128e5 4374 spin_unlock_irq(&phba->port_list_lock);
2e0fef85 4375 return vport;
47a8617c 4376
2e0fef85
JS
4377out_put_shost:
4378 scsi_host_put(shost);
4379out:
4380 return NULL;
47a8617c
JS
4381}
4382
e59058c4 4383/**
3621a710 4384 * destroy_port - destroy an FC port
e59058c4
JS
4385 * @vport: pointer to an lpfc virtual N_Port data structure.
4386 *
4387 * This routine destroys a FC port from the upper layer protocol. All the
4388 * resources associated with the port are released.
4389 **/
2e0fef85
JS
4390void
4391destroy_port(struct lpfc_vport *vport)
47a8617c 4392{
92d7f7b0
JS
4393 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4394 struct lpfc_hba *phba = vport->phba;
47a8617c 4395
858c9f6c 4396 lpfc_debugfs_terminate(vport);
92d7f7b0
JS
4397 fc_remove_host(shost);
4398 scsi_remove_host(shost);
47a8617c 4399
523128e5 4400 spin_lock_irq(&phba->port_list_lock);
92d7f7b0 4401 list_del_init(&vport->listentry);
523128e5 4402 spin_unlock_irq(&phba->port_list_lock);
47a8617c 4403
92d7f7b0 4404 lpfc_cleanup(vport);
47a8617c 4405 return;
47a8617c
JS
4406}
4407
e59058c4 4408/**
3621a710 4409 * lpfc_get_instance - Get a unique integer ID
e59058c4
JS
4410 *
4411 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4412 * uses the kernel idr facility to perform the task.
4413 *
4414 * Return codes:
4415 * instance - a unique integer ID allocated as the new instance.
4416 * -1 - lpfc get instance failed.
4417 **/
92d7f7b0
JS
4418int
4419lpfc_get_instance(void)
4420{
ab516036
TH
4421 int ret;
4422
4423 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4424 return ret < 0 ? -1 : ret;
47a8617c
JS
4425}
4426
e59058c4 4427/**
3621a710 4428 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
e59058c4
JS
4429 * @shost: pointer to SCSI host data structure.
4430 * @time: elapsed time of the scan in jiffies.
4431 *
4432 * This routine is called by the SCSI layer with a SCSI host to determine
4433 * whether the scan host is finished.
4434 *
4435 * Note: there is no scan_start function as adapter initialization will have
4436 * asynchronously kicked off the link initialization.
4437 *
4438 * Return codes
4439 * 0 - SCSI host scan is not over yet.
4440 * 1 - SCSI host scan is over.
4441 **/
47a8617c
JS
4442int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4443{
2e0fef85
JS
4444 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4445 struct lpfc_hba *phba = vport->phba;
858c9f6c 4446 int stat = 0;
47a8617c 4447
858c9f6c
JS
4448 spin_lock_irq(shost->host_lock);
4449
51ef4c26 4450 if (vport->load_flag & FC_UNLOADING) {
858c9f6c
JS
4451 stat = 1;
4452 goto finished;
4453 }
256ec0d0 4454 if (time >= msecs_to_jiffies(30 * 1000)) {
2e0fef85 4455 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4456 "0461 Scanning longer than 30 "
4457 "seconds. Continuing initialization\n");
858c9f6c 4458 stat = 1;
47a8617c 4459 goto finished;
2e0fef85 4460 }
256ec0d0
JS
4461 if (time >= msecs_to_jiffies(15 * 1000) &&
4462 phba->link_state <= LPFC_LINK_DOWN) {
2e0fef85 4463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
e8b62011
JS
4464 "0465 Link down longer than 15 "
4465 "seconds. Continuing initialization\n");
858c9f6c 4466 stat = 1;
47a8617c 4467 goto finished;
2e0fef85 4468 }
47a8617c 4469
2e0fef85 4470 if (vport->port_state != LPFC_VPORT_READY)
858c9f6c 4471 goto finished;
2e0fef85 4472 if (vport->num_disc_nodes || vport->fc_prli_sent)
858c9f6c 4473 goto finished;
256ec0d0 4474 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
858c9f6c 4475 goto finished;
2e0fef85 4476 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
858c9f6c
JS
4477 goto finished;
4478
4479 stat = 1;
47a8617c
JS
4480
4481finished:
858c9f6c
JS
4482 spin_unlock_irq(shost->host_lock);
4483 return stat;
92d7f7b0 4484}
47a8617c 4485
3999df75 4486static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
cd71348a
JS
4487{
4488 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4489 struct lpfc_hba *phba = vport->phba;
4490
4491 fc_host_supported_speeds(shost) = 0;
1dc5ec24
JS
4492 if (phba->lmt & LMT_128Gb)
4493 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
cd71348a
JS
4494 if (phba->lmt & LMT_64Gb)
4495 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4496 if (phba->lmt & LMT_32Gb)
4497 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4498 if (phba->lmt & LMT_16Gb)
4499 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4500 if (phba->lmt & LMT_10Gb)
4501 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4502 if (phba->lmt & LMT_8Gb)
4503 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4504 if (phba->lmt & LMT_4Gb)
4505 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4506 if (phba->lmt & LMT_2Gb)
4507 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4508 if (phba->lmt & LMT_1Gb)
4509 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4510}
4511
e59058c4 4512/**
3621a710 4513 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
e59058c4
JS
4514 * @shost: pointer to SCSI host data structure.
4515 *
4516 * This routine initializes a given SCSI host attributes on a FC port. The
4517 * SCSI host can be either on top of a physical port or a virtual port.
4518 **/
92d7f7b0
JS
4519void lpfc_host_attrib_init(struct Scsi_Host *shost)
4520{
4521 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4522 struct lpfc_hba *phba = vport->phba;
47a8617c 4523 /*
2e0fef85 4524 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
47a8617c
JS
4525 */
4526
2e0fef85
JS
4527 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4528 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
47a8617c
JS
4529 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4530
4531 memset(fc_host_supported_fc4s(shost), 0,
2e0fef85 4532 sizeof(fc_host_supported_fc4s(shost)));
47a8617c
JS
4533 fc_host_supported_fc4s(shost)[2] = 1;
4534 fc_host_supported_fc4s(shost)[7] = 1;
4535
92d7f7b0
JS
4536 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4537 sizeof fc_host_symbolic_name(shost));
47a8617c 4538
cd71348a 4539 lpfc_host_supported_speeds_set(shost);
47a8617c
JS
4540
4541 fc_host_maxframe_size(shost) =
2e0fef85
JS
4542 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4543 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
47a8617c 4544
0af5d708
MC
4545 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4546
47a8617c
JS
4547 /* This value is also unchanging */
4548 memset(fc_host_active_fc4s(shost), 0,
2e0fef85 4549 sizeof(fc_host_active_fc4s(shost)));
47a8617c
JS
4550 fc_host_active_fc4s(shost)[2] = 1;
4551 fc_host_active_fc4s(shost)[7] = 1;
4552
92d7f7b0 4553 fc_host_max_npiv_vports(shost) = phba->max_vpi;
47a8617c 4554 spin_lock_irq(shost->host_lock);
51ef4c26 4555 vport->load_flag &= ~FC_LOADING;
47a8617c 4556 spin_unlock_irq(shost->host_lock);
47a8617c 4557}
dea3101e 4558
e59058c4 4559/**
da0436e9 4560 * lpfc_stop_port_s3 - Stop SLI3 device port
e59058c4
JS
4561 * @phba: pointer to lpfc hba data structure.
4562 *
da0436e9
JS
4563 * This routine is invoked to stop an SLI3 device port, it stops the device
4564 * from generating interrupts and stops the device driver's timers for the
4565 * device.
e59058c4 4566 **/
da0436e9
JS
4567static void
4568lpfc_stop_port_s3(struct lpfc_hba *phba)
db2378e0 4569{
da0436e9
JS
4570 /* Clear all interrupt enable conditions */
4571 writel(0, phba->HCregaddr);
4572 readl(phba->HCregaddr); /* flush */
4573 /* Clear all pending interrupts */
4574 writel(0xffffffff, phba->HAregaddr);
4575 readl(phba->HAregaddr); /* flush */
db2378e0 4576
da0436e9
JS
4577 /* Reset some HBA SLI setup states */
4578 lpfc_stop_hba_timers(phba);
4579 phba->pport->work_port_events = 0;
4580}
db2378e0 4581
da0436e9
JS
4582/**
4583 * lpfc_stop_port_s4 - Stop SLI4 device port
4584 * @phba: pointer to lpfc hba data structure.
4585 *
4586 * This routine is invoked to stop an SLI4 device port, it stops the device
4587 * from generating interrupts and stops the device driver's timers for the
4588 * device.
4589 **/
4590static void
4591lpfc_stop_port_s4(struct lpfc_hba *phba)
4592{
4593 /* Reset some HBA SLI4 setup states */
4594 lpfc_stop_hba_timers(phba);
cdb42bec
JS
4595 if (phba->pport)
4596 phba->pport->work_port_events = 0;
da0436e9 4597 phba->sli4_hba.intr_enable = 0;
da0436e9 4598}
9399627f 4599
da0436e9
JS
4600/**
4601 * lpfc_stop_port - Wrapper function for stopping hba port
4602 * @phba: Pointer to HBA context object.
4603 *
4604 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4605 * the API jump table function pointer from the lpfc_hba struct.
4606 **/
4607void
4608lpfc_stop_port(struct lpfc_hba *phba)
4609{
4610 phba->lpfc_stop_port(phba);
f485c18d
DK
4611
4612 if (phba->wq)
4613 flush_workqueue(phba->wq);
da0436e9 4614}
db2378e0 4615
ecfd03c6
JS
4616/**
4617 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4618 * @phba: Pointer to hba for which this call is being executed.
4619 *
4620 * This routine starts the timer waiting for the FCF rediscovery to complete.
4621 **/
4622void
4623lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4624{
4625 unsigned long fcf_redisc_wait_tmo =
4626 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4627 /* Start fcf rediscovery wait period timer */
4628 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4629 spin_lock_irq(&phba->hbalock);
4630 /* Allow action to new fcf asynchronous event */
4631 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4632 /* Mark the FCF rediscovery pending state */
4633 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4634 spin_unlock_irq(&phba->hbalock);
4635}
4636
4637/**
4638 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4639 * @ptr: Map to lpfc_hba data structure pointer.
4640 *
4641 * This routine is invoked when waiting for FCF table rediscover has been
4642 * timed out. If new FCF record(s) has (have) been discovered during the
4643 * wait period, a new FCF event shall be added to the FCOE async event
4644 * list, and then worker thread shall be waked up for processing from the
4645 * worker thread context.
4646 **/
e399b228 4647static void
f22eb4d3 4648lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
ecfd03c6 4649{
f22eb4d3 4650 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
ecfd03c6
JS
4651
4652 /* Don't send FCF rediscovery event if timer cancelled */
4653 spin_lock_irq(&phba->hbalock);
4654 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4655 spin_unlock_irq(&phba->hbalock);
4656 return;
4657 }
4658 /* Clear FCF rediscovery timer pending flag */
4659 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4660 /* FCF rediscovery event to worker thread */
4661 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4662 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 4663 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
a93ff37a 4664 "2776 FCF rediscover quiescent timer expired\n");
ecfd03c6
JS
4665 /* wake up worker thread */
4666 lpfc_worker_wake_up(phba);
4667}
4668
e59058c4 4669/**
da0436e9 4670 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
e59058c4 4671 * @phba: pointer to lpfc hba data structure.
da0436e9 4672 * @acqe_link: pointer to the async link completion queue entry.
e59058c4 4673 *
23288b78 4674 * This routine is to parse the SLI4 link-attention link fault code.
e59058c4 4675 **/
23288b78 4676static void
da0436e9
JS
4677lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4678 struct lpfc_acqe_link *acqe_link)
db2378e0 4679{
da0436e9
JS
4680 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4681 case LPFC_ASYNC_LINK_FAULT_NONE:
4682 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4683 case LPFC_ASYNC_LINK_FAULT_REMOTE:
23288b78 4684 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
da0436e9
JS
4685 break;
4686 default:
4687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
23288b78 4688 "0398 Unknown link fault code: x%x\n",
da0436e9 4689 bf_get(lpfc_acqe_link_fault, acqe_link));
da0436e9
JS
4690 break;
4691 }
db2378e0
JS
4692}
4693
5b75da2f 4694/**
da0436e9 4695 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5b75da2f 4696 * @phba: pointer to lpfc hba data structure.
da0436e9 4697 * @acqe_link: pointer to the async link completion queue entry.
5b75da2f 4698 *
da0436e9
JS
4699 * This routine is to parse the SLI4 link attention type and translate it
4700 * into the base driver's link attention type coding.
5b75da2f 4701 *
da0436e9
JS
4702 * Return: Link attention type in terms of base driver's coding.
4703 **/
4704static uint8_t
4705lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4706 struct lpfc_acqe_link *acqe_link)
5b75da2f 4707{
da0436e9 4708 uint8_t att_type;
5b75da2f 4709
da0436e9
JS
4710 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4711 case LPFC_ASYNC_LINK_STATUS_DOWN:
4712 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
76a95d75 4713 att_type = LPFC_ATT_LINK_DOWN;
da0436e9
JS
4714 break;
4715 case LPFC_ASYNC_LINK_STATUS_UP:
4716 /* Ignore physical link up events - wait for logical link up */
76a95d75 4717 att_type = LPFC_ATT_RESERVED;
da0436e9
JS
4718 break;
4719 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
76a95d75 4720 att_type = LPFC_ATT_LINK_UP;
da0436e9
JS
4721 break;
4722 default:
4723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4724 "0399 Invalid link attention type: x%x\n",
4725 bf_get(lpfc_acqe_link_status, acqe_link));
76a95d75 4726 att_type = LPFC_ATT_RESERVED;
da0436e9 4727 break;
5b75da2f 4728 }
da0436e9 4729 return att_type;
5b75da2f
JS
4730}
4731
8b68cd52
JS
4732/**
4733 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4734 * @phba: pointer to lpfc hba data structure.
4735 *
4736 * This routine is to get an SLI3 FC port's link speed in Mbps.
4737 *
4738 * Return: link speed in terms of Mbps.
4739 **/
4740uint32_t
4741lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4742{
4743 uint32_t link_speed;
4744
4745 if (!lpfc_is_link_up(phba))
4746 return 0;
4747
a085e87c
JS
4748 if (phba->sli_rev <= LPFC_SLI_REV3) {
4749 switch (phba->fc_linkspeed) {
4750 case LPFC_LINK_SPEED_1GHZ:
4751 link_speed = 1000;
4752 break;
4753 case LPFC_LINK_SPEED_2GHZ:
4754 link_speed = 2000;
4755 break;
4756 case LPFC_LINK_SPEED_4GHZ:
4757 link_speed = 4000;
4758 break;
4759 case LPFC_LINK_SPEED_8GHZ:
4760 link_speed = 8000;
4761 break;
4762 case LPFC_LINK_SPEED_10GHZ:
4763 link_speed = 10000;
4764 break;
4765 case LPFC_LINK_SPEED_16GHZ:
4766 link_speed = 16000;
4767 break;
4768 default:
4769 link_speed = 0;
4770 }
4771 } else {
4772 if (phba->sli4_hba.link_state.logical_speed)
4773 link_speed =
4774 phba->sli4_hba.link_state.logical_speed;
4775 else
4776 link_speed = phba->sli4_hba.link_state.speed;
8b68cd52
JS
4777 }
4778 return link_speed;
4779}
4780
4781/**
4782 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4783 * @phba: pointer to lpfc hba data structure.
4784 * @evt_code: asynchronous event code.
4785 * @speed_code: asynchronous event link speed code.
4786 *
4787 * This routine is to parse the giving SLI4 async event link speed code into
4788 * value of Mbps for the link speed.
4789 *
4790 * Return: link speed in terms of Mbps.
4791 **/
4792static uint32_t
4793lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4794 uint8_t speed_code)
4795{
4796 uint32_t port_speed;
4797
4798 switch (evt_code) {
4799 case LPFC_TRAILER_CODE_LINK:
4800 switch (speed_code) {
26d830ec 4801 case LPFC_ASYNC_LINK_SPEED_ZERO:
8b68cd52
JS
4802 port_speed = 0;
4803 break;
26d830ec 4804 case LPFC_ASYNC_LINK_SPEED_10MBPS:
8b68cd52
JS
4805 port_speed = 10;
4806 break;
26d830ec 4807 case LPFC_ASYNC_LINK_SPEED_100MBPS:
8b68cd52
JS
4808 port_speed = 100;
4809 break;
26d830ec 4810 case LPFC_ASYNC_LINK_SPEED_1GBPS:
8b68cd52
JS
4811 port_speed = 1000;
4812 break;
26d830ec 4813 case LPFC_ASYNC_LINK_SPEED_10GBPS:
8b68cd52
JS
4814 port_speed = 10000;
4815 break;
26d830ec
JS
4816 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4817 port_speed = 20000;
4818 break;
4819 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4820 port_speed = 25000;
4821 break;
4822 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4823 port_speed = 40000;
4824 break;
8b68cd52
JS
4825 default:
4826 port_speed = 0;
4827 }
4828 break;
4829 case LPFC_TRAILER_CODE_FC:
4830 switch (speed_code) {
26d830ec 4831 case LPFC_FC_LA_SPEED_UNKNOWN:
8b68cd52
JS
4832 port_speed = 0;
4833 break;
26d830ec 4834 case LPFC_FC_LA_SPEED_1G:
8b68cd52
JS
4835 port_speed = 1000;
4836 break;
26d830ec 4837 case LPFC_FC_LA_SPEED_2G:
8b68cd52
JS
4838 port_speed = 2000;
4839 break;
26d830ec 4840 case LPFC_FC_LA_SPEED_4G:
8b68cd52
JS
4841 port_speed = 4000;
4842 break;
26d830ec 4843 case LPFC_FC_LA_SPEED_8G:
8b68cd52
JS
4844 port_speed = 8000;
4845 break;
26d830ec 4846 case LPFC_FC_LA_SPEED_10G:
8b68cd52
JS
4847 port_speed = 10000;
4848 break;
26d830ec 4849 case LPFC_FC_LA_SPEED_16G:
8b68cd52
JS
4850 port_speed = 16000;
4851 break;
d38dd52c
JS
4852 case LPFC_FC_LA_SPEED_32G:
4853 port_speed = 32000;
4854 break;
fbd8a6ba
JS
4855 case LPFC_FC_LA_SPEED_64G:
4856 port_speed = 64000;
4857 break;
1dc5ec24
JS
4858 case LPFC_FC_LA_SPEED_128G:
4859 port_speed = 128000;
4860 break;
8b68cd52
JS
4861 default:
4862 port_speed = 0;
4863 }
4864 break;
4865 default:
4866 port_speed = 0;
4867 }
4868 return port_speed;
4869}
4870
da0436e9 4871/**
70f3c073 4872 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
da0436e9
JS
4873 * @phba: pointer to lpfc hba data structure.
4874 * @acqe_link: pointer to the async link completion queue entry.
4875 *
70f3c073 4876 * This routine is to handle the SLI4 asynchronous FCoE link event.
da0436e9
JS
4877 **/
4878static void
4879lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4880 struct lpfc_acqe_link *acqe_link)
4881{
4882 struct lpfc_dmabuf *mp;
4883 LPFC_MBOXQ_t *pmb;
4884 MAILBOX_t *mb;
76a95d75 4885 struct lpfc_mbx_read_top *la;
da0436e9 4886 uint8_t att_type;
76a95d75 4887 int rc;
da0436e9
JS
4888
4889 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
76a95d75 4890 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
da0436e9 4891 return;
32b9793f 4892 phba->fcoe_eventtag = acqe_link->event_tag;
da0436e9
JS
4893 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4894 if (!pmb) {
4895 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4896 "0395 The mboxq allocation failed\n");
4897 return;
4898 }
4899 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4900 if (!mp) {
4901 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4902 "0396 The lpfc_dmabuf allocation failed\n");
4903 goto out_free_pmb;
4904 }
4905 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4906 if (!mp->virt) {
4907 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4908 "0397 The mbuf allocation failed\n");
4909 goto out_free_dmabuf;
4910 }
4911
4912 /* Cleanup any outstanding ELS commands */
4913 lpfc_els_flush_all_cmd(phba);
4914
4915 /* Block ELS IOCBs until we have done process link event */
895427bd 4916 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
da0436e9
JS
4917
4918 /* Update link event statistics */
4919 phba->sli.slistat.link_event++;
4920
76a95d75
JS
4921 /* Create lpfc_handle_latt mailbox command from link ACQE */
4922 lpfc_read_topology(phba, pmb, mp);
4923 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
da0436e9
JS
4924 pmb->vport = phba->pport;
4925
da0436e9
JS
4926 /* Keep the link status for extra SLI4 state machine reference */
4927 phba->sli4_hba.link_state.speed =
8b68cd52
JS
4928 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4929 bf_get(lpfc_acqe_link_speed, acqe_link));
da0436e9
JS
4930 phba->sli4_hba.link_state.duplex =
4931 bf_get(lpfc_acqe_link_duplex, acqe_link);
4932 phba->sli4_hba.link_state.status =
4933 bf_get(lpfc_acqe_link_status, acqe_link);
70f3c073
JS
4934 phba->sli4_hba.link_state.type =
4935 bf_get(lpfc_acqe_link_type, acqe_link);
4936 phba->sli4_hba.link_state.number =
4937 bf_get(lpfc_acqe_link_number, acqe_link);
da0436e9
JS
4938 phba->sli4_hba.link_state.fault =
4939 bf_get(lpfc_acqe_link_fault, acqe_link);
65467b6b 4940 phba->sli4_hba.link_state.logical_speed =
8b68cd52
JS
4941 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4942
70f3c073 4943 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
c31098ce
JS
4944 "2900 Async FC/FCoE Link event - Speed:%dGBit "
4945 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4946 "Logical speed:%dMbps Fault:%d\n",
70f3c073
JS
4947 phba->sli4_hba.link_state.speed,
4948 phba->sli4_hba.link_state.topology,
4949 phba->sli4_hba.link_state.status,
4950 phba->sli4_hba.link_state.type,
4951 phba->sli4_hba.link_state.number,
8b68cd52 4952 phba->sli4_hba.link_state.logical_speed,
70f3c073 4953 phba->sli4_hba.link_state.fault);
76a95d75
JS
4954 /*
4955 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
4956 * topology info. Note: Optional for non FC-AL ports.
4957 */
4958 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4959 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4960 if (rc == MBX_NOT_FINISHED)
4961 goto out_free_dmabuf;
4962 return;
4963 }
4964 /*
4965 * For FCoE Mode: fill in all the topology information we need and call
4966 * the READ_TOPOLOGY completion routine to continue without actually
4967 * sending the READ_TOPOLOGY mailbox command to the port.
4968 */
23288b78 4969 /* Initialize completion status */
76a95d75 4970 mb = &pmb->u.mb;
23288b78
JS
4971 mb->mbxStatus = MBX_SUCCESS;
4972
4973 /* Parse port fault information field */
4974 lpfc_sli4_parse_latt_fault(phba, acqe_link);
76a95d75
JS
4975
4976 /* Parse and translate link attention fields */
4977 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4978 la->eventTag = acqe_link->event_tag;
4979 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4980 bf_set(lpfc_mbx_read_top_link_spd, la,
a085e87c 4981 (bf_get(lpfc_acqe_link_speed, acqe_link)));
76a95d75
JS
4982
4983 /* Fake the the following irrelvant fields */
4984 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4985 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4986 bf_set(lpfc_mbx_read_top_il, la, 0);
4987 bf_set(lpfc_mbx_read_top_pb, la, 0);
4988 bf_set(lpfc_mbx_read_top_fa, la, 0);
4989 bf_set(lpfc_mbx_read_top_mm, la, 0);
da0436e9
JS
4990
4991 /* Invoke the lpfc_handle_latt mailbox command callback function */
76a95d75 4992 lpfc_mbx_cmpl_read_topology(phba, pmb);
da0436e9 4993
5b75da2f 4994 return;
da0436e9
JS
4995
4996out_free_dmabuf:
4997 kfree(mp);
4998out_free_pmb:
4999 mempool_free(pmb, phba->mbox_mem_pool);
5000}
5001
1dc5ec24
JS
5002/**
5003 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5004 * topology.
5005 * @phba: pointer to lpfc hba data structure.
5006 * @evt_code: asynchronous event code.
5007 * @speed_code: asynchronous event link speed code.
5008 *
5009 * This routine is to parse the giving SLI4 async event link speed code into
5010 * value of Read topology link speed.
5011 *
5012 * Return: link speed in terms of Read topology.
5013 **/
5014static uint8_t
5015lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5016{
5017 uint8_t port_speed;
5018
5019 switch (speed_code) {
5020 case LPFC_FC_LA_SPEED_1G:
5021 port_speed = LPFC_LINK_SPEED_1GHZ;
5022 break;
5023 case LPFC_FC_LA_SPEED_2G:
5024 port_speed = LPFC_LINK_SPEED_2GHZ;
5025 break;
5026 case LPFC_FC_LA_SPEED_4G:
5027 port_speed = LPFC_LINK_SPEED_4GHZ;
5028 break;
5029 case LPFC_FC_LA_SPEED_8G:
5030 port_speed = LPFC_LINK_SPEED_8GHZ;
5031 break;
5032 case LPFC_FC_LA_SPEED_16G:
5033 port_speed = LPFC_LINK_SPEED_16GHZ;
5034 break;
5035 case LPFC_FC_LA_SPEED_32G:
5036 port_speed = LPFC_LINK_SPEED_32GHZ;
5037 break;
5038 case LPFC_FC_LA_SPEED_64G:
5039 port_speed = LPFC_LINK_SPEED_64GHZ;
5040 break;
5041 case LPFC_FC_LA_SPEED_128G:
5042 port_speed = LPFC_LINK_SPEED_128GHZ;
5043 break;
5044 case LPFC_FC_LA_SPEED_256G:
5045 port_speed = LPFC_LINK_SPEED_256GHZ;
5046 break;
5047 default:
5048 port_speed = 0;
5049 break;
5050 }
5051
5052 return port_speed;
5053}
5054
5055#define trunk_link_status(__idx)\
5056 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5057 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5058 "Link up" : "Link down") : "NA"
5059/* Did port __idx reported an error */
5060#define trunk_port_fault(__idx)\
5061 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5062 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5063
5064static void
5065lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5066 struct lpfc_acqe_fc_la *acqe_fc)
5067{
5068 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5069 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5070
5071 phba->sli4_hba.link_state.speed =
5072 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5073 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5074
5075 phba->sli4_hba.link_state.logical_speed =
b8e6f136 5076 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
1dc5ec24
JS
5077 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5078 phba->fc_linkspeed =
5079 lpfc_async_link_speed_to_read_top(
5080 phba,
5081 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5082
5083 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5084 phba->trunk_link.link0.state =
5085 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5086 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
529b3ddc 5087 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
1dc5ec24
JS
5088 }
5089 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5090 phba->trunk_link.link1.state =
5091 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5092 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
529b3ddc 5093 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
1dc5ec24
JS
5094 }
5095 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5096 phba->trunk_link.link2.state =
5097 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5098 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
529b3ddc 5099 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
1dc5ec24
JS
5100 }
5101 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5102 phba->trunk_link.link3.state =
5103 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5104 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
529b3ddc 5105 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
1dc5ec24
JS
5106 }
5107
5108 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5109 "2910 Async FC Trunking Event - Speed:%d\n"
5110 "\tLogical speed:%d "
5111 "port0: %s port1: %s port2: %s port3: %s\n",
5112 phba->sli4_hba.link_state.speed,
5113 phba->sli4_hba.link_state.logical_speed,
5114 trunk_link_status(0), trunk_link_status(1),
5115 trunk_link_status(2), trunk_link_status(3));
5116
5117 if (port_fault)
5118 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5119 "3202 trunk error:0x%x (%s) seen on port0:%s "
5120 /*
5121 * SLI-4: We have only 0xA error codes
5122 * defined as of now. print an appropriate
5123 * message in case driver needs to be updated.
5124 */
5125 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5126 "UNDEFINED. update driver." : trunk_errmsg[err],
5127 trunk_port_fault(0), trunk_port_fault(1),
5128 trunk_port_fault(2), trunk_port_fault(3));
5129}
5130
5131
70f3c073
JS
5132/**
5133 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
5134 * @phba: pointer to lpfc hba data structure.
5135 * @acqe_fc: pointer to the async fc completion queue entry.
5136 *
5137 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
5138 * that the event was received and then issue a read_topology mailbox command so
5139 * that the rest of the driver will treat it the same as SLI3.
5140 **/
5141static void
5142lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
5143{
5144 struct lpfc_dmabuf *mp;
5145 LPFC_MBOXQ_t *pmb;
7bdedb34
JS
5146 MAILBOX_t *mb;
5147 struct lpfc_mbx_read_top *la;
70f3c073
JS
5148 int rc;
5149
5150 if (bf_get(lpfc_trailer_type, acqe_fc) !=
5151 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
5152 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5153 "2895 Non FC link Event detected.(%d)\n",
5154 bf_get(lpfc_trailer_type, acqe_fc));
5155 return;
5156 }
1dc5ec24
JS
5157
5158 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5159 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5160 lpfc_update_trunk_link_status(phba, acqe_fc);
5161 return;
5162 }
5163
70f3c073
JS
5164 /* Keep the link status for extra SLI4 state machine reference */
5165 phba->sli4_hba.link_state.speed =
8b68cd52
JS
5166 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5167 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
70f3c073
JS
5168 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
5169 phba->sli4_hba.link_state.topology =
5170 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
5171 phba->sli4_hba.link_state.status =
5172 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
5173 phba->sli4_hba.link_state.type =
5174 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
5175 phba->sli4_hba.link_state.number =
5176 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
5177 phba->sli4_hba.link_state.fault =
5178 bf_get(lpfc_acqe_link_fault, acqe_fc);
b8e6f136
JS
5179
5180 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5181 LPFC_FC_LA_TYPE_LINK_DOWN)
5182 phba->sli4_hba.link_state.logical_speed = 0;
5183 else if (!phba->sli4_hba.conf_trunk)
5184 phba->sli4_hba.link_state.logical_speed =
8b68cd52 5185 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
b8e6f136 5186
70f3c073
JS
5187 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5188 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
5189 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
5190 "%dMbps Fault:%d\n",
5191 phba->sli4_hba.link_state.speed,
5192 phba->sli4_hba.link_state.topology,
5193 phba->sli4_hba.link_state.status,
5194 phba->sli4_hba.link_state.type,
5195 phba->sli4_hba.link_state.number,
8b68cd52 5196 phba->sli4_hba.link_state.logical_speed,
70f3c073
JS
5197 phba->sli4_hba.link_state.fault);
5198 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5199 if (!pmb) {
5200 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5201 "2897 The mboxq allocation failed\n");
5202 return;
5203 }
5204 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5205 if (!mp) {
5206 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5207 "2898 The lpfc_dmabuf allocation failed\n");
5208 goto out_free_pmb;
5209 }
5210 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5211 if (!mp->virt) {
5212 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5213 "2899 The mbuf allocation failed\n");
5214 goto out_free_dmabuf;
5215 }
5216
5217 /* Cleanup any outstanding ELS commands */
5218 lpfc_els_flush_all_cmd(phba);
5219
5220 /* Block ELS IOCBs until we have done process link event */
895427bd 5221 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
70f3c073
JS
5222
5223 /* Update link event statistics */
5224 phba->sli.slistat.link_event++;
5225
5226 /* Create lpfc_handle_latt mailbox command from link ACQE */
5227 lpfc_read_topology(phba, pmb, mp);
5228 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5229 pmb->vport = phba->pport;
5230
7bdedb34 5231 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
ae9e28f3
JS
5232 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
5233
5234 switch (phba->sli4_hba.link_state.status) {
5235 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
5236 phba->link_flag |= LS_MDS_LINK_DOWN;
5237 break;
5238 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
5239 phba->link_flag |= LS_MDS_LOOPBACK;
5240 break;
5241 default:
5242 break;
5243 }
5244
23288b78 5245 /* Initialize completion status */
7bdedb34 5246 mb = &pmb->u.mb;
23288b78
JS
5247 mb->mbxStatus = MBX_SUCCESS;
5248
5249 /* Parse port fault information field */
5250 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
7bdedb34
JS
5251
5252 /* Parse and translate link attention fields */
5253 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
5254 la->eventTag = acqe_fc->event_tag;
7bdedb34 5255
aeb3c817
JS
5256 if (phba->sli4_hba.link_state.status ==
5257 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
5258 bf_set(lpfc_mbx_read_top_att_type, la,
5259 LPFC_FC_LA_TYPE_UNEXP_WWPN);
5260 } else {
5261 bf_set(lpfc_mbx_read_top_att_type, la,
5262 LPFC_FC_LA_TYPE_LINK_DOWN);
5263 }
7bdedb34
JS
5264 /* Invoke the mailbox command callback function */
5265 lpfc_mbx_cmpl_read_topology(phba, pmb);
5266
5267 return;
5268 }
5269
70f3c073
JS
5270 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5271 if (rc == MBX_NOT_FINISHED)
5272 goto out_free_dmabuf;
5273 return;
5274
5275out_free_dmabuf:
5276 kfree(mp);
5277out_free_pmb:
5278 mempool_free(pmb, phba->mbox_mem_pool);
5279}
5280
5281/**
5282 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
5283 * @phba: pointer to lpfc hba data structure.
5284 * @acqe_fc: pointer to the async SLI completion queue entry.
5285 *
5286 * This routine is to handle the SLI4 asynchronous SLI events.
5287 **/
5288static void
5289lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
5290{
4b8bae08 5291 char port_name;
8c1312e1 5292 char message[128];
4b8bae08 5293 uint8_t status;
946727dc 5294 uint8_t evt_type;
448193b5 5295 uint8_t operational = 0;
946727dc 5296 struct temp_event temp_event_data;
4b8bae08 5297 struct lpfc_acqe_misconfigured_event *misconfigured;
946727dc 5298 struct Scsi_Host *shost;
cd71348a
JS
5299 struct lpfc_vport **vports;
5300 int rc, i;
946727dc
JS
5301
5302 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
4b8bae08 5303
448193b5
JS
5304 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5305 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
5306 "x%08x SLI Event Type:%d\n",
5307 acqe_sli->event_data1, acqe_sli->event_data2,
5308 evt_type);
4b8bae08
JS
5309
5310 port_name = phba->Port[0];
5311 if (port_name == 0x00)
5312 port_name = '?'; /* get port name is empty */
5313
946727dc
JS
5314 switch (evt_type) {
5315 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
5316 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5317 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
5318 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5319
5320 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5321 "3190 Over Temperature:%d Celsius- Port Name %c\n",
5322 acqe_sli->event_data1, port_name);
5323
310429ef 5324 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
946727dc
JS
5325 shost = lpfc_shost_from_vport(phba->pport);
5326 fc_host_post_vendor_event(shost, fc_get_event_number(),
5327 sizeof(temp_event_data),
5328 (char *)&temp_event_data,
5329 SCSI_NL_VID_TYPE_PCI
5330 | PCI_VENDOR_ID_EMULEX);
5331 break;
5332 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
5333 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
5334 temp_event_data.event_code = LPFC_NORMAL_TEMP;
5335 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
5336
5337 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5338 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
5339 acqe_sli->event_data1, port_name);
5340
5341 shost = lpfc_shost_from_vport(phba->pport);
5342 fc_host_post_vendor_event(shost, fc_get_event_number(),
5343 sizeof(temp_event_data),
5344 (char *)&temp_event_data,
5345 SCSI_NL_VID_TYPE_PCI
5346 | PCI_VENDOR_ID_EMULEX);
5347 break;
5348 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
5349 misconfigured = (struct lpfc_acqe_misconfigured_event *)
4b8bae08
JS
5350 &acqe_sli->event_data1;
5351
946727dc
JS
5352 /* fetch the status for this port */
5353 switch (phba->sli4_hba.lnk_info.lnk_no) {
5354 case LPFC_LINK_NUMBER_0:
448193b5
JS
5355 status = bf_get(lpfc_sli_misconfigured_port0_state,
5356 &misconfigured->theEvent);
5357 operational = bf_get(lpfc_sli_misconfigured_port0_op,
4b8bae08 5358 &misconfigured->theEvent);
946727dc
JS
5359 break;
5360 case LPFC_LINK_NUMBER_1:
448193b5
JS
5361 status = bf_get(lpfc_sli_misconfigured_port1_state,
5362 &misconfigured->theEvent);
5363 operational = bf_get(lpfc_sli_misconfigured_port1_op,
4b8bae08 5364 &misconfigured->theEvent);
946727dc
JS
5365 break;
5366 case LPFC_LINK_NUMBER_2:
448193b5
JS
5367 status = bf_get(lpfc_sli_misconfigured_port2_state,
5368 &misconfigured->theEvent);
5369 operational = bf_get(lpfc_sli_misconfigured_port2_op,
4b8bae08 5370 &misconfigured->theEvent);
946727dc
JS
5371 break;
5372 case LPFC_LINK_NUMBER_3:
448193b5
JS
5373 status = bf_get(lpfc_sli_misconfigured_port3_state,
5374 &misconfigured->theEvent);
5375 operational = bf_get(lpfc_sli_misconfigured_port3_op,
4b8bae08 5376 &misconfigured->theEvent);
946727dc
JS
5377 break;
5378 default:
448193b5
JS
5379 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5380 "3296 "
5381 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
5382 "event: Invalid link %d",
5383 phba->sli4_hba.lnk_info.lnk_no);
5384 return;
946727dc 5385 }
4b8bae08 5386
448193b5
JS
5387 /* Skip if optic state unchanged */
5388 if (phba->sli4_hba.lnk_info.optic_state == status)
5389 return;
5390
946727dc
JS
5391 switch (status) {
5392 case LPFC_SLI_EVENT_STATUS_VALID:
448193b5
JS
5393 sprintf(message, "Physical Link is functional");
5394 break;
946727dc
JS
5395 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
5396 sprintf(message, "Optics faulted/incorrectly "
5397 "installed/not installed - Reseat optics, "
5398 "if issue not resolved, replace.");
5399 break;
5400 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
5401 sprintf(message,
5402 "Optics of two types installed - Remove one "
5403 "optic or install matching pair of optics.");
5404 break;
5405 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
5406 sprintf(message, "Incompatible optics - Replace with "
292098be 5407 "compatible optics for card to function.");
946727dc 5408 break;
448193b5
JS
5409 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
5410 sprintf(message, "Unqualified optics - Replace with "
5411 "Avago optics for Warranty and Technical "
5412 "Support - Link is%s operational",
2ea259ee 5413 (operational) ? " not" : "");
448193b5
JS
5414 break;
5415 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
5416 sprintf(message, "Uncertified optics - Replace with "
5417 "Avago-certified optics to enable link "
5418 "operation - Link is%s operational",
2ea259ee 5419 (operational) ? " not" : "");
448193b5 5420 break;
946727dc
JS
5421 default:
5422 /* firmware is reporting a status we don't know about */
5423 sprintf(message, "Unknown event status x%02x", status);
5424 break;
5425 }
cd71348a
JS
5426
5427 /* Issue READ_CONFIG mbox command to refresh supported speeds */
5428 rc = lpfc_sli4_read_config(phba);
3952e91f 5429 if (rc) {
cd71348a
JS
5430 phba->lmt = 0;
5431 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5432 "3194 Unable to retrieve supported "
3952e91f 5433 "speeds, rc = 0x%x\n", rc);
cd71348a
JS
5434 }
5435 vports = lpfc_create_vport_work_array(phba);
5436 if (vports != NULL) {
5437 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5438 i++) {
5439 shost = lpfc_shost_from_vport(vports[i]);
5440 lpfc_host_supported_speeds_set(shost);
5441 }
5442 }
5443 lpfc_destroy_vport_work_array(phba, vports);
5444
448193b5 5445 phba->sli4_hba.lnk_info.optic_state = status;
946727dc 5446 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
448193b5 5447 "3176 Port Name %c %s\n", port_name, message);
946727dc
JS
5448 break;
5449 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
5450 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5451 "3192 Remote DPort Test Initiated - "
5452 "Event Data1:x%08x Event Data2: x%08x\n",
5453 acqe_sli->event_data1, acqe_sli->event_data2);
4b8bae08
JS
5454 break;
5455 default:
946727dc
JS
5456 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5457 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
5458 "x%08x SLI Event Type:%d\n",
5459 acqe_sli->event_data1, acqe_sli->event_data2,
5460 evt_type);
4b8bae08
JS
5461 break;
5462 }
70f3c073
JS
5463}
5464
fc2b989b
JS
5465/**
5466 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
5467 * @vport: pointer to vport data structure.
5468 *
5469 * This routine is to perform Clear Virtual Link (CVL) on a vport in
5470 * response to a CVL event.
5471 *
5472 * Return the pointer to the ndlp with the vport if successful, otherwise
5473 * return NULL.
5474 **/
5475static struct lpfc_nodelist *
5476lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
5477{
5478 struct lpfc_nodelist *ndlp;
5479 struct Scsi_Host *shost;
5480 struct lpfc_hba *phba;
5481
5482 if (!vport)
5483 return NULL;
fc2b989b
JS
5484 phba = vport->phba;
5485 if (!phba)
5486 return NULL;
78730cfe
JS
5487 ndlp = lpfc_findnode_did(vport, Fabric_DID);
5488 if (!ndlp) {
5489 /* Cannot find existing Fabric ndlp, so allocate a new one */
9d3d340d 5490 ndlp = lpfc_nlp_init(vport, Fabric_DID);
78730cfe
JS
5491 if (!ndlp)
5492 return 0;
78730cfe
JS
5493 /* Set the node type */
5494 ndlp->nlp_type |= NLP_FABRIC;
5495 /* Put ndlp onto node list */
5496 lpfc_enqueue_node(vport, ndlp);
5497 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5498 /* re-setup ndlp without removing from node list */
5499 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
5500 if (!ndlp)
5501 return 0;
5502 }
63e801ce
JS
5503 if ((phba->pport->port_state < LPFC_FLOGI) &&
5504 (phba->pport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
5505 return NULL;
5506 /* If virtual link is not yet instantiated ignore CVL */
63e801ce
JS
5507 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
5508 && (vport->port_state != LPFC_VPORT_FAILED))
fc2b989b
JS
5509 return NULL;
5510 shost = lpfc_shost_from_vport(vport);
5511 if (!shost)
5512 return NULL;
5513 lpfc_linkdown_port(vport);
5514 lpfc_cleanup_pending_mbox(vport);
5515 spin_lock_irq(shost->host_lock);
5516 vport->fc_flag |= FC_VPORT_CVL_RCVD;
5517 spin_unlock_irq(shost->host_lock);
5518
5519 return ndlp;
5520}
5521
5522/**
5523 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
5524 * @vport: pointer to lpfc hba data structure.
5525 *
5526 * This routine is to perform Clear Virtual Link (CVL) on all vports in
5527 * response to a FCF dead event.
5528 **/
5529static void
5530lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
5531{
5532 struct lpfc_vport **vports;
5533 int i;
5534
5535 vports = lpfc_create_vport_work_array(phba);
5536 if (vports)
5537 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
5538 lpfc_sli4_perform_vport_cvl(vports[i]);
5539 lpfc_destroy_vport_work_array(phba, vports);
5540}
5541
da0436e9 5542/**
76a95d75 5543 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
da0436e9
JS
5544 * @phba: pointer to lpfc hba data structure.
5545 * @acqe_link: pointer to the async fcoe completion queue entry.
5546 *
5547 * This routine is to handle the SLI4 asynchronous fcoe event.
5548 **/
5549static void
76a95d75 5550lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
70f3c073 5551 struct lpfc_acqe_fip *acqe_fip)
da0436e9 5552{
70f3c073 5553 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
da0436e9 5554 int rc;
6669f9bb
JS
5555 struct lpfc_vport *vport;
5556 struct lpfc_nodelist *ndlp;
5557 struct Scsi_Host *shost;
695a814e
JS
5558 int active_vlink_present;
5559 struct lpfc_vport **vports;
5560 int i;
da0436e9 5561
70f3c073
JS
5562 phba->fc_eventTag = acqe_fip->event_tag;
5563 phba->fcoe_eventtag = acqe_fip->event_tag;
da0436e9 5564 switch (event_type) {
70f3c073
JS
5565 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5566 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5567 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
999d813f
JS
5568 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5569 LOG_DISCOVERY,
a93ff37a
JS
5570 "2546 New FCF event, evt_tag:x%x, "
5571 "index:x%x\n",
70f3c073
JS
5572 acqe_fip->event_tag,
5573 acqe_fip->index);
999d813f
JS
5574 else
5575 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5576 LOG_DISCOVERY,
a93ff37a
JS
5577 "2788 FCF param modified event, "
5578 "evt_tag:x%x, index:x%x\n",
70f3c073
JS
5579 acqe_fip->event_tag,
5580 acqe_fip->index);
38b92ef8 5581 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
0c9ab6f5
JS
5582 /*
5583 * During period of FCF discovery, read the FCF
5584 * table record indexed by the event to update
a93ff37a 5585 * FCF roundrobin failover eligible FCF bmask.
0c9ab6f5
JS
5586 */
5587 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5588 LOG_DISCOVERY,
a93ff37a
JS
5589 "2779 Read FCF (x%x) for updating "
5590 "roundrobin FCF failover bmask\n",
70f3c073
JS
5591 acqe_fip->index);
5592 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
0c9ab6f5 5593 }
38b92ef8
JS
5594
5595 /* If the FCF discovery is in progress, do nothing. */
3804dc84 5596 spin_lock_irq(&phba->hbalock);
a93ff37a 5597 if (phba->hba_flag & FCF_TS_INPROG) {
38b92ef8
JS
5598 spin_unlock_irq(&phba->hbalock);
5599 break;
5600 }
5601 /* If fast FCF failover rescan event is pending, do nothing */
036cad1f 5602 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
38b92ef8
JS
5603 spin_unlock_irq(&phba->hbalock);
5604 break;
5605 }
5606
c2b9712e
JS
5607 /* If the FCF has been in discovered state, do nothing. */
5608 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3804dc84
JS
5609 spin_unlock_irq(&phba->hbalock);
5610 break;
5611 }
5612 spin_unlock_irq(&phba->hbalock);
38b92ef8 5613
0c9ab6f5
JS
5614 /* Otherwise, scan the entire FCF table and re-discover SAN */
5615 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a
JS
5616 "2770 Start FCF table scan per async FCF "
5617 "event, evt_tag:x%x, index:x%x\n",
70f3c073 5618 acqe_fip->event_tag, acqe_fip->index);
0c9ab6f5
JS
5619 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5620 LPFC_FCOE_FCF_GET_FIRST);
da0436e9 5621 if (rc)
0c9ab6f5
JS
5622 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5623 "2547 Issue FCF scan read FCF mailbox "
a93ff37a 5624 "command failed (x%x)\n", rc);
da0436e9
JS
5625 break;
5626
70f3c073 5627 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
da0436e9 5628 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
e4e74273 5629 "2548 FCF Table full count 0x%x tag 0x%x\n",
70f3c073
JS
5630 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5631 acqe_fip->event_tag);
da0436e9
JS
5632 break;
5633
70f3c073 5634 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
80c17849 5635 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
0c9ab6f5 5636 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
a93ff37a 5637 "2549 FCF (x%x) disconnected from network, "
70f3c073 5638 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
38b92ef8
JS
5639 /*
5640 * If we are in the middle of FCF failover process, clear
5641 * the corresponding FCF bit in the roundrobin bitmap.
da0436e9 5642 */
fc2b989b 5643 spin_lock_irq(&phba->hbalock);
a1cadfef
JS
5644 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5645 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
fc2b989b 5646 spin_unlock_irq(&phba->hbalock);
0c9ab6f5 5647 /* Update FLOGI FCF failover eligible FCF bmask */
70f3c073 5648 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
fc2b989b
JS
5649 break;
5650 }
38b92ef8
JS
5651 spin_unlock_irq(&phba->hbalock);
5652
5653 /* If the event is not for currently used fcf do nothing */
70f3c073 5654 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
38b92ef8
JS
5655 break;
5656
5657 /*
5658 * Otherwise, request the port to rediscover the entire FCF
5659 * table for a fast recovery from case that the current FCF
5660 * is no longer valid as we are not in the middle of FCF
5661 * failover process already.
5662 */
c2b9712e
JS
5663 spin_lock_irq(&phba->hbalock);
5664 /* Mark the fast failover process in progress */
5665 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5666 spin_unlock_irq(&phba->hbalock);
5667
5668 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5669 "2771 Start FCF fast failover process due to "
5670 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5671 "\n", acqe_fip->event_tag, acqe_fip->index);
5672 rc = lpfc_sli4_redisc_fcf_table(phba);
5673 if (rc) {
5674 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5675 LOG_DISCOVERY,
7afc0ce9 5676 "2772 Issue FCF rediscover mailbox "
c2b9712e
JS
5677 "command failed, fail through to FCF "
5678 "dead event\n");
5679 spin_lock_irq(&phba->hbalock);
5680 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5681 spin_unlock_irq(&phba->hbalock);
5682 /*
5683 * Last resort will fail over by treating this
5684 * as a link down to FCF registration.
5685 */
5686 lpfc_sli4_fcf_dead_failthrough(phba);
5687 } else {
5688 /* Reset FCF roundrobin bmask for new discovery */
5689 lpfc_sli4_clear_fcf_rr_bmask(phba);
5690 /*
5691 * Handling fast FCF failover to a DEAD FCF event is
5692 * considered equalivant to receiving CVL to all vports.
5693 */
5694 lpfc_sli4_perform_all_vport_cvl(phba);
5695 }
da0436e9 5696 break;
70f3c073 5697 case LPFC_FIP_EVENT_TYPE_CVL:
80c17849 5698 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
0c9ab6f5 5699 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
6669f9bb 5700 "2718 Clear Virtual Link Received for VPI 0x%x"
70f3c073 5701 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6d368e53 5702
6669f9bb 5703 vport = lpfc_find_vport_by_vpid(phba,
5248a749 5704 acqe_fip->index);
fc2b989b 5705 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6669f9bb
JS
5706 if (!ndlp)
5707 break;
695a814e
JS
5708 active_vlink_present = 0;
5709
5710 vports = lpfc_create_vport_work_array(phba);
5711 if (vports) {
5712 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5713 i++) {
5714 if ((!(vports[i]->fc_flag &
5715 FC_VPORT_CVL_RCVD)) &&
5716 (vports[i]->port_state > LPFC_FDISC)) {
5717 active_vlink_present = 1;
5718 break;
5719 }
5720 }
5721 lpfc_destroy_vport_work_array(phba, vports);
5722 }
5723
cc82355a
JS
5724 /*
5725 * Don't re-instantiate if vport is marked for deletion.
5726 * If we are here first then vport_delete is going to wait
5727 * for discovery to complete.
5728 */
5729 if (!(vport->load_flag & FC_UNLOADING) &&
5730 active_vlink_present) {
695a814e
JS
5731 /*
5732 * If there are other active VLinks present,
5733 * re-instantiate the Vlink using FDISC.
5734 */
256ec0d0
JS
5735 mod_timer(&ndlp->nlp_delayfunc,
5736 jiffies + msecs_to_jiffies(1000));
fc2b989b 5737 shost = lpfc_shost_from_vport(vport);
6669f9bb
JS
5738 spin_lock_irq(shost->host_lock);
5739 ndlp->nlp_flag |= NLP_DELAY_TMO;
5740 spin_unlock_irq(shost->host_lock);
695a814e
JS
5741 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5742 vport->port_state = LPFC_FDISC;
5743 } else {
ecfd03c6
JS
5744 /*
5745 * Otherwise, we request port to rediscover
5746 * the entire FCF table for a fast recovery
5747 * from possible case that the current FCF
0c9ab6f5
JS
5748 * is no longer valid if we are not already
5749 * in the FCF failover process.
ecfd03c6 5750 */
fc2b989b 5751 spin_lock_irq(&phba->hbalock);
0c9ab6f5 5752 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
fc2b989b
JS
5753 spin_unlock_irq(&phba->hbalock);
5754 break;
5755 }
5756 /* Mark the fast failover process in progress */
0c9ab6f5 5757 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
fc2b989b 5758 spin_unlock_irq(&phba->hbalock);
0c9ab6f5
JS
5759 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5760 LOG_DISCOVERY,
a93ff37a 5761 "2773 Start FCF failover per CVL, "
70f3c073 5762 "evt_tag:x%x\n", acqe_fip->event_tag);
ecfd03c6 5763 rc = lpfc_sli4_redisc_fcf_table(phba);
fc2b989b 5764 if (rc) {
0c9ab6f5
JS
5765 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5766 LOG_DISCOVERY,
5767 "2774 Issue FCF rediscover "
7afc0ce9 5768 "mailbox command failed, "
0c9ab6f5 5769 "through to CVL event\n");
fc2b989b 5770 spin_lock_irq(&phba->hbalock);
0c9ab6f5 5771 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
fc2b989b 5772 spin_unlock_irq(&phba->hbalock);
ecfd03c6
JS
5773 /*
5774 * Last resort will be re-try on the
5775 * the current registered FCF entry.
5776 */
5777 lpfc_retry_pport_discovery(phba);
38b92ef8
JS
5778 } else
5779 /*
5780 * Reset FCF roundrobin bmask for new
5781 * discovery.
5782 */
7d791df7 5783 lpfc_sli4_clear_fcf_rr_bmask(phba);
6669f9bb
JS
5784 }
5785 break;
da0436e9
JS
5786 default:
5787 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5788 "0288 Unknown FCoE event type 0x%x event tag "
70f3c073 5789 "0x%x\n", event_type, acqe_fip->event_tag);
da0436e9
JS
5790 break;
5791 }
5792}
5793
5794/**
5795 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5796 * @phba: pointer to lpfc hba data structure.
5797 * @acqe_link: pointer to the async dcbx completion queue entry.
5798 *
5799 * This routine is to handle the SLI4 asynchronous dcbx event.
5800 **/
5801static void
5802lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5803 struct lpfc_acqe_dcbx *acqe_dcbx)
5804{
4d9ab994 5805 phba->fc_eventTag = acqe_dcbx->event_tag;
da0436e9
JS
5806 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5807 "0290 The SLI4 DCBX asynchronous event is not "
5808 "handled yet\n");
5809}
5810
b19a061a
JS
5811/**
5812 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5813 * @phba: pointer to lpfc hba data structure.
5814 * @acqe_link: pointer to the async grp5 completion queue entry.
5815 *
5816 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5817 * is an asynchronous notified of a logical link speed change. The Port
5818 * reports the logical link speed in units of 10Mbps.
5819 **/
5820static void
5821lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5822 struct lpfc_acqe_grp5 *acqe_grp5)
5823{
5824 uint16_t prev_ll_spd;
5825
5826 phba->fc_eventTag = acqe_grp5->event_tag;
5827 phba->fcoe_eventtag = acqe_grp5->event_tag;
5828 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5829 phba->sli4_hba.link_state.logical_speed =
8b68cd52 5830 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
b19a061a
JS
5831 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5832 "2789 GRP5 Async Event: Updating logical link speed "
8b68cd52
JS
5833 "from %dMbps to %dMbps\n", prev_ll_spd,
5834 phba->sli4_hba.link_state.logical_speed);
b19a061a
JS
5835}
5836
da0436e9
JS
5837/**
5838 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5839 * @phba: pointer to lpfc hba data structure.
5840 *
5841 * This routine is invoked by the worker thread to process all the pending
5842 * SLI4 asynchronous events.
5843 **/
5844void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5845{
5846 struct lpfc_cq_event *cq_event;
5847
5848 /* First, declare the async event has been handled */
5849 spin_lock_irq(&phba->hbalock);
5850 phba->hba_flag &= ~ASYNC_EVENT;
5851 spin_unlock_irq(&phba->hbalock);
5852 /* Now, handle all the async events */
5853 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5854 /* Get the first event from the head of the event queue */
5855 spin_lock_irq(&phba->hbalock);
5856 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5857 cq_event, struct lpfc_cq_event, list);
5858 spin_unlock_irq(&phba->hbalock);
5859 /* Process the asynchronous event */
5860 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5861 case LPFC_TRAILER_CODE_LINK:
5862 lpfc_sli4_async_link_evt(phba,
5863 &cq_event->cqe.acqe_link);
5864 break;
5865 case LPFC_TRAILER_CODE_FCOE:
70f3c073 5866 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
da0436e9
JS
5867 break;
5868 case LPFC_TRAILER_CODE_DCBX:
5869 lpfc_sli4_async_dcbx_evt(phba,
5870 &cq_event->cqe.acqe_dcbx);
5871 break;
b19a061a
JS
5872 case LPFC_TRAILER_CODE_GRP5:
5873 lpfc_sli4_async_grp5_evt(phba,
5874 &cq_event->cqe.acqe_grp5);
5875 break;
70f3c073
JS
5876 case LPFC_TRAILER_CODE_FC:
5877 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5878 break;
5879 case LPFC_TRAILER_CODE_SLI:
5880 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5881 break;
da0436e9
JS
5882 default:
5883 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5884 "1804 Invalid asynchrous event code: "
5885 "x%x\n", bf_get(lpfc_trailer_code,
5886 &cq_event->cqe.mcqe_cmpl));
5887 break;
5888 }
5889 /* Free the completion event processed to the free pool */
5890 lpfc_sli4_cq_event_release(phba, cq_event);
5891 }
5892}
5893
ecfd03c6
JS
5894/**
5895 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
5896 * @phba: pointer to lpfc hba data structure.
5897 *
5898 * This routine is invoked by the worker thread to process FCF table
5899 * rediscovery pending completion event.
5900 **/
5901void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5902{
5903 int rc;
5904
5905 spin_lock_irq(&phba->hbalock);
5906 /* Clear FCF rediscovery timeout event */
5907 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5908 /* Clear driver fast failover FCF record flag */
5909 phba->fcf.failover_rec.flag = 0;
5910 /* Set state for FCF fast failover */
5911 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5912 spin_unlock_irq(&phba->hbalock);
5913
5914 /* Scan FCF table from the first entry to re-discover SAN */
0c9ab6f5 5915 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
a93ff37a 5916 "2777 Start post-quiescent FCF table scan\n");
0c9ab6f5 5917 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
ecfd03c6 5918 if (rc)
0c9ab6f5
JS
5919 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5920 "2747 Issue FCF scan read FCF mailbox "
5921 "command failed 0x%x\n", rc);
ecfd03c6
JS
5922}
5923
da0436e9
JS
5924/**
5925 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
5926 * @phba: pointer to lpfc hba data structure.
5927 * @dev_grp: The HBA PCI-Device group number.
5928 *
5929 * This routine is invoked to set up the per HBA PCI-Device group function
5930 * API jump table entries.
5931 *
5932 * Return: 0 if success, otherwise -ENODEV
5933 **/
5934int
5935lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5936{
5937 int rc;
5938
5939 /* Set up lpfc PCI-device group */
5940 phba->pci_dev_grp = dev_grp;
5941
5942 /* The LPFC_PCI_DEV_OC uses SLI4 */
5943 if (dev_grp == LPFC_PCI_DEV_OC)
5944 phba->sli_rev = LPFC_SLI_REV4;
5945
5946 /* Set up device INIT API function jump table */
5947 rc = lpfc_init_api_table_setup(phba, dev_grp);
5948 if (rc)
5949 return -ENODEV;
5950 /* Set up SCSI API function jump table */
5951 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5952 if (rc)
5953 return -ENODEV;
5954 /* Set up SLI API function jump table */
5955 rc = lpfc_sli_api_table_setup(phba, dev_grp);
5956 if (rc)
5957 return -ENODEV;
5958 /* Set up MBOX API function jump table */
5959 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5960 if (rc)
5961 return -ENODEV;
5962
5963 return 0;
5b75da2f
JS
5964}
5965
5966/**
3621a710 5967 * lpfc_log_intr_mode - Log the active interrupt mode
5b75da2f
JS
5968 * @phba: pointer to lpfc hba data structure.
5969 * @intr_mode: active interrupt mode adopted.
5970 *
5971 * This routine it invoked to log the currently used active interrupt mode
5972 * to the device.
3772a991
JS
5973 **/
5974static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5b75da2f
JS
5975{
5976 switch (intr_mode) {
5977 case 0:
5978 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5979 "0470 Enable INTx interrupt mode.\n");
5980 break;
5981 case 1:
5982 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5983 "0481 Enabled MSI interrupt mode.\n");
5984 break;
5985 case 2:
5986 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5987 "0480 Enabled MSI-X interrupt mode.\n");
5988 break;
5989 default:
5990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5991 "0482 Illegal interrupt mode.\n");
5992 break;
5993 }
5994 return;
5995}
5996
5b75da2f 5997/**
3772a991 5998 * lpfc_enable_pci_dev - Enable a generic PCI device.
5b75da2f
JS
5999 * @phba: pointer to lpfc hba data structure.
6000 *
3772a991
JS
6001 * This routine is invoked to enable the PCI device that is common to all
6002 * PCI devices.
5b75da2f
JS
6003 *
6004 * Return codes
af901ca1 6005 * 0 - successful
3772a991 6006 * other values - error
5b75da2f 6007 **/
3772a991
JS
6008static int
6009lpfc_enable_pci_dev(struct lpfc_hba *phba)
5b75da2f 6010{
3772a991 6011 struct pci_dev *pdev;
5b75da2f 6012
3772a991
JS
6013 /* Obtain PCI device reference */
6014 if (!phba->pcidev)
6015 goto out_error;
6016 else
6017 pdev = phba->pcidev;
3772a991
JS
6018 /* Enable PCI device */
6019 if (pci_enable_device_mem(pdev))
6020 goto out_error;
6021 /* Request PCI resource for the device */
e0c0483c 6022 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
3772a991
JS
6023 goto out_disable_device;
6024 /* Set up device as PCI master and save state for EEH */
6025 pci_set_master(pdev);
6026 pci_try_set_mwi(pdev);
6027 pci_save_state(pdev);
5b75da2f 6028
0558056c 6029 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
453193e0 6030 if (pci_is_pcie(pdev))
0558056c
JS
6031 pdev->needs_freset = 1;
6032
3772a991 6033 return 0;
5b75da2f 6034
3772a991
JS
6035out_disable_device:
6036 pci_disable_device(pdev);
6037out_error:
079b5c91 6038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
e0c0483c 6039 "1401 Failed to enable pci device\n");
3772a991 6040 return -ENODEV;
5b75da2f
JS
6041}
6042
6043/**
3772a991 6044 * lpfc_disable_pci_dev - Disable a generic PCI device.
5b75da2f
JS
6045 * @phba: pointer to lpfc hba data structure.
6046 *
3772a991
JS
6047 * This routine is invoked to disable the PCI device that is common to all
6048 * PCI devices.
5b75da2f
JS
6049 **/
6050static void
3772a991 6051lpfc_disable_pci_dev(struct lpfc_hba *phba)
5b75da2f 6052{
3772a991 6053 struct pci_dev *pdev;
5b75da2f 6054
3772a991
JS
6055 /* Obtain PCI device reference */
6056 if (!phba->pcidev)
6057 return;
6058 else
6059 pdev = phba->pcidev;
3772a991 6060 /* Release PCI resource and disable PCI device */
e0c0483c 6061 pci_release_mem_regions(pdev);
3772a991 6062 pci_disable_device(pdev);
5b75da2f
JS
6063
6064 return;
6065}
6066
e59058c4 6067/**
3772a991
JS
6068 * lpfc_reset_hba - Reset a hba
6069 * @phba: pointer to lpfc hba data structure.
e59058c4 6070 *
3772a991
JS
6071 * This routine is invoked to reset a hba device. It brings the HBA
6072 * offline, performs a board restart, and then brings the board back
6073 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
6074 * on outstanding mailbox commands.
e59058c4 6075 **/
3772a991
JS
6076void
6077lpfc_reset_hba(struct lpfc_hba *phba)
dea3101e 6078{
3772a991
JS
6079 /* If resets are disabled then set error state and return. */
6080 if (!phba->cfg_enable_hba_reset) {
6081 phba->link_state = LPFC_HBA_ERROR;
6082 return;
6083 }
ee62021a
JS
6084 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
6085 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6086 else
6087 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
3772a991
JS
6088 lpfc_offline(phba);
6089 lpfc_sli_brdrestart(phba);
6090 lpfc_online(phba);
6091 lpfc_unblock_mgmt_io(phba);
6092}
dea3101e 6093
0a96e975
JS
6094/**
6095 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
6096 * @phba: pointer to lpfc hba data structure.
6097 *
6098 * This function enables the PCI SR-IOV virtual functions to a physical
6099 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6100 * enable the number of virtual functions to the physical function. As
6101 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6102 * API call does not considered as an error condition for most of the device.
6103 **/
6104uint16_t
6105lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
6106{
6107 struct pci_dev *pdev = phba->pcidev;
6108 uint16_t nr_virtfn;
6109 int pos;
6110
0a96e975
JS
6111 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6112 if (pos == 0)
6113 return 0;
6114
6115 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
6116 return nr_virtfn;
6117}
6118
912e3acd
JS
6119/**
6120 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
6121 * @phba: pointer to lpfc hba data structure.
6122 * @nr_vfn: number of virtual functions to be enabled.
6123 *
6124 * This function enables the PCI SR-IOV virtual functions to a physical
6125 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
6126 * enable the number of virtual functions to the physical function. As
6127 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
6128 * API call does not considered as an error condition for most of the device.
6129 **/
6130int
6131lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
6132{
6133 struct pci_dev *pdev = phba->pcidev;
0a96e975 6134 uint16_t max_nr_vfn;
912e3acd
JS
6135 int rc;
6136
0a96e975
JS
6137 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
6138 if (nr_vfn > max_nr_vfn) {
6139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6140 "3057 Requested vfs (%d) greater than "
6141 "supported vfs (%d)", nr_vfn, max_nr_vfn);
6142 return -EINVAL;
6143 }
6144
912e3acd
JS
6145 rc = pci_enable_sriov(pdev, nr_vfn);
6146 if (rc) {
6147 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6148 "2806 Failed to enable sriov on this device "
6149 "with vfn number nr_vf:%d, rc:%d\n",
6150 nr_vfn, rc);
6151 } else
6152 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6153 "2807 Successful enable sriov on this device "
6154 "with vfn number nr_vf:%d\n", nr_vfn);
6155 return rc;
6156}
6157
3772a991 6158/**
895427bd 6159 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3772a991
JS
6160 * @phba: pointer to lpfc hba data structure.
6161 *
895427bd
JS
6162 * This routine is invoked to set up the driver internal resources before the
6163 * device specific resource setup to support the HBA device it attached to.
3772a991
JS
6164 *
6165 * Return codes
895427bd
JS
6166 * 0 - successful
6167 * other values - error
3772a991
JS
6168 **/
6169static int
895427bd 6170lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3772a991 6171{
895427bd 6172 struct lpfc_sli *psli = &phba->sli;
dea3101e 6173
2e0fef85 6174 /*
895427bd 6175 * Driver resources common to all SLI revisions
2e0fef85 6176 */
895427bd
JS
6177 atomic_set(&phba->fast_event_count, 0);
6178 spin_lock_init(&phba->hbalock);
dea3101e 6179
895427bd
JS
6180 /* Initialize ndlp management spinlock */
6181 spin_lock_init(&phba->ndlp_lock);
6182
523128e5
JS
6183 /* Initialize port_list spinlock */
6184 spin_lock_init(&phba->port_list_lock);
895427bd 6185 INIT_LIST_HEAD(&phba->port_list);
523128e5 6186
895427bd
JS
6187 INIT_LIST_HEAD(&phba->work_list);
6188 init_waitqueue_head(&phba->wait_4_mlo_m_q);
6189
6190 /* Initialize the wait queue head for the kernel thread */
6191 init_waitqueue_head(&phba->work_waitq);
6192
6193 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
f358dd0c 6194 "1403 Protocols supported %s %s %s\n",
895427bd
JS
6195 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
6196 "SCSI" : " "),
6197 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
f358dd0c
JS
6198 "NVME" : " "),
6199 (phba->nvmet_support ? "NVMET" : " "));
895427bd 6200
0794d601
JS
6201 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6202 spin_lock_init(&phba->scsi_buf_list_get_lock);
6203 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6204 spin_lock_init(&phba->scsi_buf_list_put_lock);
6205 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
895427bd
JS
6206
6207 /* Initialize the fabric iocb list */
6208 INIT_LIST_HEAD(&phba->fabric_iocb_list);
6209
6210 /* Initialize list to save ELS buffers */
6211 INIT_LIST_HEAD(&phba->elsbuf);
6212
6213 /* Initialize FCF connection rec list */
6214 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
6215
6216 /* Initialize OAS configuration list */
6217 spin_lock_init(&phba->devicelock);
6218 INIT_LIST_HEAD(&phba->luns);
858c9f6c 6219
3772a991 6220 /* MBOX heartbeat timer */
f22eb4d3 6221 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
3772a991 6222 /* Fabric block timer */
f22eb4d3 6223 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
3772a991 6224 /* EA polling mode timer */
f22eb4d3 6225 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
895427bd 6226 /* Heartbeat timer */
f22eb4d3 6227 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
895427bd 6228
32517fc0
JS
6229 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6230
895427bd
JS
6231 return 0;
6232}
6233
6234/**
6235 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
6236 * @phba: pointer to lpfc hba data structure.
6237 *
6238 * This routine is invoked to set up the driver internal resources specific to
6239 * support the SLI-3 HBA device it attached to.
6240 *
6241 * Return codes
6242 * 0 - successful
6243 * other values - error
6244 **/
6245static int
6246lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6247{
0794d601 6248 int rc, entry_sz;
895427bd
JS
6249
6250 /*
6251 * Initialize timers used by driver
6252 */
6253
6254 /* FCP polling mode timer */
f22eb4d3 6255 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
dea3101e 6256
3772a991
JS
6257 /* Host attention work mask setup */
6258 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
6259 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
dea3101e 6260
3772a991
JS
6261 /* Get all the module params for configuring this host */
6262 lpfc_get_cfgparam(phba);
895427bd
JS
6263 /* Set up phase-1 common device driver resources */
6264
6265 rc = lpfc_setup_driver_resource_phase1(phba);
6266 if (rc)
6267 return -ENODEV;
6268
49198b37
JS
6269 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
6270 phba->menlo_flag |= HBA_MENLO_SUPPORT;
6271 /* check for menlo minimum sg count */
6272 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
6273 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
6274 }
6275
895427bd 6276 if (!phba->sli.sli3_ring)
6396bb22
KC
6277 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
6278 sizeof(struct lpfc_sli_ring),
6279 GFP_KERNEL);
895427bd 6280 if (!phba->sli.sli3_ring)
2a76a283
JS
6281 return -ENOMEM;
6282
dea3101e 6283 /*
96f7077f 6284 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
3772a991 6285 * used to create the sg_dma_buf_pool must be dynamically calculated.
dea3101e 6286 */
3772a991 6287
96f7077f
JS
6288 /* Initialize the host templates the configured values. */
6289 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
96418b5e
JS
6290 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
6291 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
96f7077f 6292
0794d601
JS
6293 if (phba->sli_rev == LPFC_SLI_REV4)
6294 entry_sz = sizeof(struct sli4_sge);
6295 else
6296 entry_sz = sizeof(struct ulp_bde64);
6297
96f7077f 6298 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
3772a991 6299 if (phba->cfg_enable_bg) {
96f7077f
JS
6300 /*
6301 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
6302 * the FCP rsp, and a BDE for each. Sice we have no control
6303 * over how many protection data segments the SCSI Layer
6304 * will hand us (ie: there could be one for every block
6305 * in the IO), we just allocate enough BDEs to accomidate
6306 * our max amount and we need to limit lpfc_sg_seg_cnt to
6307 * minimize the risk of running out.
6308 */
6309 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6310 sizeof(struct fcp_rsp) +
0794d601 6311 (LPFC_MAX_SG_SEG_CNT * entry_sz);
96f7077f
JS
6312
6313 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
6314 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
6315
6316 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
6317 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
6318 } else {
6319 /*
6320 * The scsi_buf for a regular I/O will hold the FCP cmnd,
6321 * the FCP rsp, a BDE for each, and a BDE for up to
6322 * cfg_sg_seg_cnt data segments.
6323 */
6324 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6325 sizeof(struct fcp_rsp) +
0794d601 6326 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
96f7077f
JS
6327
6328 /* Total BDEs in BPL for scsi_sg_list */
6329 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
901a920f 6330 }
dea3101e 6331
96f7077f
JS
6332 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6333 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6334 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6335 phba->cfg_total_seg_cnt);
dea3101e 6336
3772a991
JS
6337 phba->max_vpi = LPFC_MAX_VPI;
6338 /* This will be set to correct value after config_port mbox */
6339 phba->max_vports = 0;
dea3101e 6340
3772a991
JS
6341 /*
6342 * Initialize the SLI Layer to run with lpfc HBAs.
6343 */
6344 lpfc_sli_setup(phba);
895427bd 6345 lpfc_sli_queue_init(phba);
ed957684 6346
3772a991
JS
6347 /* Allocate device driver memory */
6348 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6349 return -ENOMEM;
51ef4c26 6350
912e3acd
JS
6351 /*
6352 * Enable sr-iov virtual functions if supported and configured
6353 * through the module parameter.
6354 */
6355 if (phba->cfg_sriov_nr_virtfn > 0) {
6356 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6357 phba->cfg_sriov_nr_virtfn);
6358 if (rc) {
6359 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6360 "2808 Requested number of SR-IOV "
6361 "virtual functions (%d) is not "
6362 "supported\n",
6363 phba->cfg_sriov_nr_virtfn);
6364 phba->cfg_sriov_nr_virtfn = 0;
6365 }
6366 }
6367
3772a991
JS
6368 return 0;
6369}
ed957684 6370
3772a991
JS
6371/**
6372 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
6373 * @phba: pointer to lpfc hba data structure.
6374 *
6375 * This routine is invoked to unset the driver internal resources set up
6376 * specific for supporting the SLI-3 HBA device it attached to.
6377 **/
6378static void
6379lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
6380{
6381 /* Free device driver memory allocated */
6382 lpfc_mem_free_all(phba);
3163f725 6383
3772a991
JS
6384 return;
6385}
dea3101e 6386
3772a991 6387/**
da0436e9 6388 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3772a991
JS
6389 * @phba: pointer to lpfc hba data structure.
6390 *
da0436e9
JS
6391 * This routine is invoked to set up the driver internal resources specific to
6392 * support the SLI-4 HBA device it attached to.
3772a991
JS
6393 *
6394 * Return codes
af901ca1 6395 * 0 - successful
da0436e9 6396 * other values - error
3772a991
JS
6397 **/
6398static int
da0436e9 6399lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3772a991 6400{
28baac74 6401 LPFC_MBOXQ_t *mboxq;
f358dd0c 6402 MAILBOX_t *mb;
895427bd 6403 int rc, i, max_buf_size;
28baac74
JS
6404 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
6405 struct lpfc_mqe *mqe;
09294d46 6406 int longs;
81e6a637 6407 int extra;
f358dd0c 6408 uint64_t wwn;
b92dc72d
JS
6409 u32 if_type;
6410 u32 if_fam;
da0436e9 6411
895427bd 6412 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
222e9239 6413 phba->sli4_hba.num_possible_cpu = num_possible_cpus();
895427bd
JS
6414 phba->sli4_hba.curr_disp_cpu = 0;
6415
716d3bc5
JS
6416 /* Get all the module params for configuring this host */
6417 lpfc_get_cfgparam(phba);
6418
895427bd
JS
6419 /* Set up phase-1 common device driver resources */
6420 rc = lpfc_setup_driver_resource_phase1(phba);
6421 if (rc)
6422 return -ENODEV;
6423
da0436e9
JS
6424 /* Before proceed, wait for POST done and device ready */
6425 rc = lpfc_sli4_post_status_check(phba);
6426 if (rc)
6427 return -ENODEV;
6428
3cee98db
JS
6429 /* Allocate all driver workqueues here */
6430
6431 /* The lpfc_wq workqueue for deferred irq use */
6432 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6433
3772a991 6434 /*
da0436e9 6435 * Initialize timers used by driver
3772a991 6436 */
3772a991 6437
f22eb4d3 6438 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
3772a991 6439
ecfd03c6 6440 /* FCF rediscover timer */
f22eb4d3 6441 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
ecfd03c6 6442
7ad20aa9
JS
6443 /*
6444 * Control structure for handling external multi-buffer mailbox
6445 * command pass-through.
6446 */
6447 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
6448 sizeof(struct lpfc_mbox_ext_buf_ctx));
6449 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
6450
da0436e9 6451 phba->max_vpi = LPFC_MAX_VPI;
67d12733 6452
da0436e9
JS
6453 /* This will be set to correct value after the read_config mbox */
6454 phba->max_vports = 0;
3772a991 6455
da0436e9
JS
6456 /* Program the default value of vlan_id and fc_map */
6457 phba->valid_vlan = 0;
6458 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
6459 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
6460 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3772a991 6461
2a76a283
JS
6462 /*
6463 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
895427bd
JS
6464 * we will associate a new ring, for each EQ/CQ/WQ tuple.
6465 * The WQ create will allocate the ring.
2a76a283 6466 */
09294d46 6467
81e6a637
JS
6468 /*
6469 * 1 for cmd, 1 for rsp, NVME adds an extra one
6470 * for boundary conditions in its max_sgl_segment template.
6471 */
6472 extra = 2;
6473 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6474 extra++;
6475
da0436e9 6476 /*
09294d46
JS
6477 * It doesn't matter what family our adapter is in, we are
6478 * limited to 2 Pages, 512 SGEs, for our SGL.
6479 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6480 */
6481 max_buf_size = (2 * SLI4_PAGE_SIZE);
09294d46 6482
da0436e9 6483 /*
895427bd
JS
6484 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6485 * used to create the sg_dma_buf_pool must be calculated.
da0436e9 6486 */
f44ac12f 6487 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
96f7077f 6488 /*
895427bd
JS
6489 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6490 * the FCP rsp, and a SGE. Sice we have no control
6491 * over how many protection segments the SCSI Layer
96f7077f 6492 * will hand us (ie: there could be one for every block
895427bd
JS
6493 * in the IO), just allocate enough SGEs to accomidate
6494 * our max amount and we need to limit lpfc_sg_seg_cnt
6495 * to minimize the risk of running out.
96f7077f
JS
6496 */
6497 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
895427bd 6498 sizeof(struct fcp_rsp) + max_buf_size;
96f7077f
JS
6499
6500 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6501 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6502
5b9e70b2
JS
6503 /*
6504 * If supporting DIF, reduce the seg count for scsi to
6505 * allow room for the DIF sges.
6506 */
6507 if (phba->cfg_enable_bg &&
6508 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6509 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6510 else
6511 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6512
96f7077f
JS
6513 } else {
6514 /*
895427bd 6515 * The scsi_buf for a regular I/O holds the FCP cmnd,
96f7077f
JS
6516 * the FCP rsp, a SGE for each, and a SGE for up to
6517 * cfg_sg_seg_cnt data segments.
6518 */
6519 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
895427bd 6520 sizeof(struct fcp_rsp) +
81e6a637 6521 ((phba->cfg_sg_seg_cnt + extra) *
895427bd 6522 sizeof(struct sli4_sge));
96f7077f
JS
6523
6524 /* Total SGEs for scsi_sg_list */
81e6a637 6525 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
5b9e70b2 6526 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
895427bd 6527
96f7077f 6528 /*
81e6a637 6529 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
895427bd 6530 * need to post 1 page for the SGL.
96f7077f 6531 */
085c647c 6532 }
acd6859b 6533
5b9e70b2
JS
6534 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
6535 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6536 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6537 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6538 "6300 Reducing NVME sg segment "
6539 "cnt to %d\n",
6540 LPFC_MAX_NVME_SEG_CNT);
6541 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6542 } else
6543 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6544 }
6545
96f7077f 6546 /* Initialize the host templates with the updated values. */
5b9e70b2
JS
6547 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6548 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6549 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
96f7077f
JS
6550
6551 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6552 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6553 else
6554 phba->cfg_sg_dma_buf_size =
6555 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6556
6557 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5b9e70b2
JS
6558 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6559 "total:%d scsi:%d nvme:%d\n",
96f7077f 6560 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5b9e70b2
JS
6561 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6562 phba->cfg_nvme_seg_cnt);
3772a991 6563
da0436e9 6564 /* Initialize buffer queue management fields */
895427bd 6565 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
da0436e9
JS
6566 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6567 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3772a991 6568
da0436e9
JS
6569 /*
6570 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6571 */
895427bd
JS
6572 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6573 /* Initialize the Abort scsi buffer list used by driver */
6574 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
6575 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
6576 }
6577
6578 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6579 /* Initialize the Abort nvme buffer list used by driver */
5e5b511d 6580 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
86c67379 6581 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
a8cf5dfe 6582 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
79d8c4ce
JS
6583 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6584 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
895427bd
JS
6585 }
6586
da0436e9 6587 /* This abort list used by worker thread */
895427bd 6588 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
a8cf5dfe 6589 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
3772a991 6590
da0436e9 6591 /*
6d368e53 6592 * Initialize driver internal slow-path work queues
da0436e9 6593 */
3772a991 6594
da0436e9
JS
6595 /* Driver internel slow-path CQ Event pool */
6596 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6597 /* Response IOCB work queue list */
45ed1190 6598 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
da0436e9
JS
6599 /* Asynchronous event CQ Event work queue list */
6600 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6601 /* Fast-path XRI aborted CQ Event work queue list */
6602 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
6603 /* Slow-path XRI aborted CQ Event work queue list */
6604 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6605 /* Receive queue CQ Event work queue list */
6606 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6607
6d368e53
JS
6608 /* Initialize extent block lists. */
6609 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6610 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6611 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6612 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6613
d1f525aa
JS
6614 /* Initialize mboxq lists. If the early init routines fail
6615 * these lists need to be correctly initialized.
6616 */
6617 INIT_LIST_HEAD(&phba->sli.mboxq);
6618 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6619
448193b5
JS
6620 /* initialize optic_state to 0xFF */
6621 phba->sli4_hba.lnk_info.optic_state = 0xff;
6622
da0436e9
JS
6623 /* Allocate device driver memory */
6624 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6625 if (rc)
6626 return -ENOMEM;
6627
2fcee4bf 6628 /* IF Type 2 ports get initialized now. */
27d6ac0a 6629 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2fcee4bf
JS
6630 LPFC_SLI_INTF_IF_TYPE_2) {
6631 rc = lpfc_pci_function_reset(phba);
895427bd
JS
6632 if (unlikely(rc)) {
6633 rc = -ENODEV;
6634 goto out_free_mem;
6635 }
946727dc 6636 phba->temp_sensor_support = 1;
2fcee4bf
JS
6637 }
6638
da0436e9
JS
6639 /* Create the bootstrap mailbox command */
6640 rc = lpfc_create_bootstrap_mbox(phba);
6641 if (unlikely(rc))
6642 goto out_free_mem;
6643
6644 /* Set up the host's endian order with the device. */
6645 rc = lpfc_setup_endian_order(phba);
6646 if (unlikely(rc))
6647 goto out_free_bsmbx;
6648
6649 /* Set up the hba's configuration parameters. */
6650 rc = lpfc_sli4_read_config(phba);
cff261f6
JS
6651 if (unlikely(rc))
6652 goto out_free_bsmbx;
6653 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
da0436e9
JS
6654 if (unlikely(rc))
6655 goto out_free_bsmbx;
6656
2fcee4bf
JS
6657 /* IF Type 0 ports get initialized now. */
6658 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6659 LPFC_SLI_INTF_IF_TYPE_0) {
6660 rc = lpfc_pci_function_reset(phba);
6661 if (unlikely(rc))
6662 goto out_free_bsmbx;
6663 }
da0436e9 6664
cb5172ea
JS
6665 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6666 GFP_KERNEL);
6667 if (!mboxq) {
6668 rc = -ENOMEM;
6669 goto out_free_bsmbx;
6670 }
6671
f358dd0c 6672 /* Check for NVMET being configured */
895427bd 6673 phba->nvmet_support = 0;
f358dd0c
JS
6674 if (lpfc_enable_nvmet_cnt) {
6675
6676 /* First get WWN of HBA instance */
6677 lpfc_read_nv(phba, mboxq);
6678 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6679 if (rc != MBX_SUCCESS) {
6680 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6681 "6016 Mailbox failed , mbxCmd x%x "
6682 "READ_NV, mbxStatus x%x\n",
6683 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6684 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
d1f525aa 6685 mempool_free(mboxq, phba->mbox_mem_pool);
f358dd0c
JS
6686 rc = -EIO;
6687 goto out_free_bsmbx;
6688 }
6689 mb = &mboxq->u.mb;
6690 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6691 sizeof(uint64_t));
6692 wwn = cpu_to_be64(wwn);
6693 phba->sli4_hba.wwnn.u.name = wwn;
6694 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6695 sizeof(uint64_t));
6696 /* wwn is WWPN of HBA instance */
6697 wwn = cpu_to_be64(wwn);
6698 phba->sli4_hba.wwpn.u.name = wwn;
6699
6700 /* Check to see if it matches any module parameter */
6701 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6702 if (wwn == lpfc_enable_nvmet[i]) {
7d708033 6703#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3c603be9
JS
6704 if (lpfc_nvmet_mem_alloc(phba))
6705 break;
6706
6707 phba->nvmet_support = 1; /* a match */
6708
f358dd0c
JS
6709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6710 "6017 NVME Target %016llx\n",
6711 wwn);
7d708033
JS
6712#else
6713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6714 "6021 Can't enable NVME Target."
6715 " NVME_TARGET_FC infrastructure"
6716 " is not in kernel\n");
6717#endif
c490850a
JS
6718 /* Not supported for NVMET */
6719 phba->cfg_xri_rebalancing = 0;
3c603be9 6720 break;
f358dd0c
JS
6721 }
6722 }
6723 }
895427bd
JS
6724
6725 lpfc_nvme_mod_param_dep(phba);
6726
fedd3b7b 6727 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
cb5172ea
JS
6728 lpfc_supported_pages(mboxq);
6729 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
fedd3b7b
JS
6730 if (!rc) {
6731 mqe = &mboxq->u.mqe;
6732 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6733 LPFC_MAX_SUPPORTED_PAGES);
6734 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6735 switch (pn_page[i]) {
6736 case LPFC_SLI4_PARAMETERS:
6737 phba->sli4_hba.pc_sli4_params.supported = 1;
6738 break;
6739 default:
6740 break;
6741 }
6742 }
6743 /* Read the port's SLI4 Parameters capabilities if supported. */
6744 if (phba->sli4_hba.pc_sli4_params.supported)
6745 rc = lpfc_pc_sli4_params_get(phba, mboxq);
6746 if (rc) {
6747 mempool_free(mboxq, phba->mbox_mem_pool);
6748 rc = -EIO;
6749 goto out_free_bsmbx;
cb5172ea
JS
6750 }
6751 }
65791f1f 6752
fedd3b7b
JS
6753 /*
6754 * Get sli4 parameters that override parameters from Port capabilities.
6d368e53
JS
6755 * If this call fails, it isn't critical unless the SLI4 parameters come
6756 * back in conflict.
fedd3b7b 6757 */
6d368e53
JS
6758 rc = lpfc_get_sli4_parameters(phba, mboxq);
6759 if (rc) {
b92dc72d
JS
6760 if_type = bf_get(lpfc_sli_intf_if_type,
6761 &phba->sli4_hba.sli_intf);
6762 if_fam = bf_get(lpfc_sli_intf_sli_family,
6763 &phba->sli4_hba.sli_intf);
6d368e53
JS
6764 if (phba->sli4_hba.extents_in_use &&
6765 phba->sli4_hba.rpi_hdrs_in_use) {
6766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6767 "2999 Unsupported SLI4 Parameters "
6768 "Extents and RPI headers enabled.\n");
b92dc72d
JS
6769 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6770 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6771 mempool_free(mboxq, phba->mbox_mem_pool);
6772 rc = -EIO;
6773 goto out_free_bsmbx;
6774 }
6775 }
6776 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6777 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6778 mempool_free(mboxq, phba->mbox_mem_pool);
6779 rc = -EIO;
6780 goto out_free_bsmbx;
6d368e53
JS
6781 }
6782 }
895427bd 6783
cb5172ea 6784 mempool_free(mboxq, phba->mbox_mem_pool);
1ba981fd
JS
6785
6786 /* Verify OAS is supported */
6787 lpfc_sli4_oas_verify(phba);
1ba981fd 6788
d2cc9bcd
JS
6789 /* Verify RAS support on adapter */
6790 lpfc_sli4_ras_init(phba);
6791
5350d872
JS
6792 /* Verify all the SLI4 queues */
6793 rc = lpfc_sli4_queue_verify(phba);
da0436e9
JS
6794 if (rc)
6795 goto out_free_bsmbx;
6796
6797 /* Create driver internal CQE event pool */
6798 rc = lpfc_sli4_cq_event_pool_create(phba);
6799 if (rc)
5350d872 6800 goto out_free_bsmbx;
da0436e9 6801
8a9d2e80
JS
6802 /* Initialize sgl lists per host */
6803 lpfc_init_sgl_list(phba);
6804
6805 /* Allocate and initialize active sgl array */
da0436e9
JS
6806 rc = lpfc_init_active_sgl_array(phba);
6807 if (rc) {
6808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6809 "1430 Failed to initialize sgl list.\n");
8a9d2e80 6810 goto out_destroy_cq_event_pool;
da0436e9 6811 }
da0436e9
JS
6812 rc = lpfc_sli4_init_rpi_hdrs(phba);
6813 if (rc) {
6814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6815 "1432 Failed to initialize rpi headers.\n");
6816 goto out_free_active_sgl;
6817 }
6818
a93ff37a 6819 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
0c9ab6f5 6820 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6396bb22 6821 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
0c9ab6f5
JS
6822 GFP_KERNEL);
6823 if (!phba->fcf.fcf_rr_bmask) {
6824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6825 "2759 Failed allocate memory for FCF round "
6826 "robin failover bmask\n");
0558056c 6827 rc = -ENOMEM;
0c9ab6f5
JS
6828 goto out_remove_rpi_hdrs;
6829 }
6830
6a828b0f 6831 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
cdb42bec
JS
6832 sizeof(struct lpfc_hba_eq_hdl),
6833 GFP_KERNEL);
895427bd 6834 if (!phba->sli4_hba.hba_eq_hdl) {
67d12733
JS
6835 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6836 "2572 Failed allocate memory for "
6837 "fast-path per-EQ handle array\n");
6838 rc = -ENOMEM;
6839 goto out_free_fcf_rr_bmask;
da0436e9
JS
6840 }
6841
222e9239 6842 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
895427bd
JS
6843 sizeof(struct lpfc_vector_map_info),
6844 GFP_KERNEL);
7bb03bbf
JS
6845 if (!phba->sli4_hba.cpu_map) {
6846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6847 "3327 Failed allocate memory for msi-x "
6848 "interrupt vector mapping\n");
6849 rc = -ENOMEM;
895427bd 6850 goto out_free_hba_eq_hdl;
7bb03bbf 6851 }
b246de17 6852
32517fc0
JS
6853 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
6854 if (!phba->sli4_hba.eq_info) {
6855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6856 "3321 Failed allocation for per_cpu stats\n");
6857 rc = -ENOMEM;
6858 goto out_free_hba_cpu_map;
6859 }
912e3acd
JS
6860 /*
6861 * Enable sr-iov virtual functions if supported and configured
6862 * through the module parameter.
6863 */
6864 if (phba->cfg_sriov_nr_virtfn > 0) {
6865 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6866 phba->cfg_sriov_nr_virtfn);
6867 if (rc) {
6868 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6869 "3020 Requested number of SR-IOV "
6870 "virtual functions (%d) is not "
6871 "supported\n",
6872 phba->cfg_sriov_nr_virtfn);
6873 phba->cfg_sriov_nr_virtfn = 0;
6874 }
6875 }
6876
5248a749 6877 return 0;
da0436e9 6878
32517fc0
JS
6879out_free_hba_cpu_map:
6880 kfree(phba->sli4_hba.cpu_map);
895427bd
JS
6881out_free_hba_eq_hdl:
6882 kfree(phba->sli4_hba.hba_eq_hdl);
0c9ab6f5
JS
6883out_free_fcf_rr_bmask:
6884 kfree(phba->fcf.fcf_rr_bmask);
da0436e9
JS
6885out_remove_rpi_hdrs:
6886 lpfc_sli4_remove_rpi_hdrs(phba);
6887out_free_active_sgl:
6888 lpfc_free_active_sgl(phba);
da0436e9
JS
6889out_destroy_cq_event_pool:
6890 lpfc_sli4_cq_event_pool_destroy(phba);
da0436e9
JS
6891out_free_bsmbx:
6892 lpfc_destroy_bootstrap_mbox(phba);
6893out_free_mem:
6894 lpfc_mem_free(phba);
6895 return rc;
6896}
6897
6898/**
6899 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
6900 * @phba: pointer to lpfc hba data structure.
6901 *
6902 * This routine is invoked to unset the driver internal resources set up
6903 * specific for supporting the SLI-4 HBA device it attached to.
6904 **/
6905static void
6906lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
6907{
6908 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6909
32517fc0
JS
6910 free_percpu(phba->sli4_hba.eq_info);
6911
7bb03bbf
JS
6912 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
6913 kfree(phba->sli4_hba.cpu_map);
222e9239 6914 phba->sli4_hba.num_possible_cpu = 0;
7bb03bbf 6915 phba->sli4_hba.num_present_cpu = 0;
76fd07a6 6916 phba->sli4_hba.curr_disp_cpu = 0;
7bb03bbf 6917
da0436e9 6918 /* Free memory allocated for fast-path work queue handles */
895427bd 6919 kfree(phba->sli4_hba.hba_eq_hdl);
da0436e9
JS
6920
6921 /* Free the allocated rpi headers. */
6922 lpfc_sli4_remove_rpi_hdrs(phba);
d11e31dd 6923 lpfc_sli4_remove_rpis(phba);
da0436e9 6924
0c9ab6f5
JS
6925 /* Free eligible FCF index bmask */
6926 kfree(phba->fcf.fcf_rr_bmask);
6927
da0436e9
JS
6928 /* Free the ELS sgl list */
6929 lpfc_free_active_sgl(phba);
8a9d2e80 6930 lpfc_free_els_sgl_list(phba);
f358dd0c 6931 lpfc_free_nvmet_sgl_list(phba);
da0436e9 6932
da0436e9
JS
6933 /* Free the completion queue EQ event pool */
6934 lpfc_sli4_cq_event_release_all(phba);
6935 lpfc_sli4_cq_event_pool_destroy(phba);
6936
6d368e53
JS
6937 /* Release resource identifiers. */
6938 lpfc_sli4_dealloc_resource_identifiers(phba);
6939
da0436e9
JS
6940 /* Free the bsmbx region. */
6941 lpfc_destroy_bootstrap_mbox(phba);
6942
6943 /* Free the SLI Layer memory with SLI4 HBAs */
6944 lpfc_mem_free_all(phba);
6945
6946 /* Free the current connect table */
6947 list_for_each_entry_safe(conn_entry, next_conn_entry,
4d9ab994
JS
6948 &phba->fcf_conn_rec_list, list) {
6949 list_del_init(&conn_entry->list);
da0436e9 6950 kfree(conn_entry);
4d9ab994 6951 }
da0436e9
JS
6952
6953 return;
6954}
6955
6956/**
25985edc 6957 * lpfc_init_api_table_setup - Set up init api function jump table
da0436e9
JS
6958 * @phba: The hba struct for which this call is being executed.
6959 * @dev_grp: The HBA PCI-Device group number.
6960 *
6961 * This routine sets up the device INIT interface API function jump table
6962 * in @phba struct.
6963 *
6964 * Returns: 0 - success, -ENODEV - failure.
6965 **/
6966int
6967lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6968{
84d1b006
JS
6969 phba->lpfc_hba_init_link = lpfc_hba_init_link;
6970 phba->lpfc_hba_down_link = lpfc_hba_down_link;
7f86059a 6971 phba->lpfc_selective_reset = lpfc_selective_reset;
da0436e9
JS
6972 switch (dev_grp) {
6973 case LPFC_PCI_DEV_LP:
6974 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
6975 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
6976 phba->lpfc_stop_port = lpfc_stop_port_s3;
6977 break;
6978 case LPFC_PCI_DEV_OC:
6979 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
6980 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
6981 phba->lpfc_stop_port = lpfc_stop_port_s4;
6982 break;
6983 default:
6984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6985 "1431 Invalid HBA PCI-device group: 0x%x\n",
6986 dev_grp);
6987 return -ENODEV;
6988 break;
6989 }
6990 return 0;
6991}
6992
da0436e9
JS
6993/**
6994 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
6995 * @phba: pointer to lpfc hba data structure.
6996 *
6997 * This routine is invoked to set up the driver internal resources after the
6998 * device specific resource setup to support the HBA device it attached to.
6999 *
7000 * Return codes
af901ca1 7001 * 0 - successful
da0436e9
JS
7002 * other values - error
7003 **/
7004static int
7005lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
7006{
7007 int error;
7008
7009 /* Startup the kernel thread for this host adapter. */
7010 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7011 "lpfc_worker_%d", phba->brd_no);
7012 if (IS_ERR(phba->worker_thread)) {
7013 error = PTR_ERR(phba->worker_thread);
7014 return error;
3772a991
JS
7015 }
7016
7017 return 0;
7018}
7019
7020/**
7021 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
7022 * @phba: pointer to lpfc hba data structure.
7023 *
7024 * This routine is invoked to unset the driver internal resources set up after
7025 * the device specific resource setup for supporting the HBA device it
7026 * attached to.
7027 **/
7028static void
7029lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
7030{
f485c18d
DK
7031 if (phba->wq) {
7032 flush_workqueue(phba->wq);
7033 destroy_workqueue(phba->wq);
7034 phba->wq = NULL;
7035 }
7036
3772a991 7037 /* Stop kernel worker thread */
0cdb84ec
JS
7038 if (phba->worker_thread)
7039 kthread_stop(phba->worker_thread);
3772a991
JS
7040}
7041
7042/**
7043 * lpfc_free_iocb_list - Free iocb list.
7044 * @phba: pointer to lpfc hba data structure.
7045 *
7046 * This routine is invoked to free the driver's IOCB list and memory.
7047 **/
6c621a22 7048void
3772a991
JS
7049lpfc_free_iocb_list(struct lpfc_hba *phba)
7050{
7051 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
7052
7053 spin_lock_irq(&phba->hbalock);
7054 list_for_each_entry_safe(iocbq_entry, iocbq_next,
7055 &phba->lpfc_iocb_list, list) {
7056 list_del(&iocbq_entry->list);
7057 kfree(iocbq_entry);
7058 phba->total_iocbq_bufs--;
98c9ea5c 7059 }
3772a991
JS
7060 spin_unlock_irq(&phba->hbalock);
7061
7062 return;
7063}
7064
7065/**
7066 * lpfc_init_iocb_list - Allocate and initialize iocb list.
7067 * @phba: pointer to lpfc hba data structure.
7068 *
7069 * This routine is invoked to allocate and initizlize the driver's IOCB
7070 * list and set up the IOCB tag array accordingly.
7071 *
7072 * Return codes
af901ca1 7073 * 0 - successful
3772a991
JS
7074 * other values - error
7075 **/
6c621a22 7076int
3772a991
JS
7077lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
7078{
7079 struct lpfc_iocbq *iocbq_entry = NULL;
7080 uint16_t iotag;
7081 int i;
dea3101e 7082
7083 /* Initialize and populate the iocb list per host. */
7084 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3772a991 7085 for (i = 0; i < iocb_count; i++) {
dd00cc48 7086 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
dea3101e 7087 if (iocbq_entry == NULL) {
7088 printk(KERN_ERR "%s: only allocated %d iocbs of "
7089 "expected %d count. Unloading driver.\n",
cadbd4a5 7090 __func__, i, LPFC_IOCB_LIST_CNT);
dea3101e 7091 goto out_free_iocbq;
7092 }
7093
604a3e30
JB
7094 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
7095 if (iotag == 0) {
3772a991 7096 kfree(iocbq_entry);
604a3e30 7097 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3772a991 7098 "Unloading driver.\n", __func__);
604a3e30
JB
7099 goto out_free_iocbq;
7100 }
6d368e53 7101 iocbq_entry->sli4_lxritag = NO_XRI;
3772a991 7102 iocbq_entry->sli4_xritag = NO_XRI;
2e0fef85
JS
7103
7104 spin_lock_irq(&phba->hbalock);
dea3101e 7105 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
7106 phba->total_iocbq_bufs++;
2e0fef85 7107 spin_unlock_irq(&phba->hbalock);
dea3101e 7108 }
7109
3772a991 7110 return 0;
dea3101e 7111
3772a991
JS
7112out_free_iocbq:
7113 lpfc_free_iocb_list(phba);
dea3101e 7114
3772a991
JS
7115 return -ENOMEM;
7116}
5e9d9b82 7117
3772a991 7118/**
8a9d2e80 7119 * lpfc_free_sgl_list - Free a given sgl list.
da0436e9 7120 * @phba: pointer to lpfc hba data structure.
8a9d2e80 7121 * @sglq_list: pointer to the head of sgl list.
3772a991 7122 *
8a9d2e80 7123 * This routine is invoked to free a give sgl list and memory.
3772a991 7124 **/
8a9d2e80
JS
7125void
7126lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
3772a991 7127{
da0436e9 7128 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8a9d2e80
JS
7129
7130 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
7131 list_del(&sglq_entry->list);
7132 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
7133 kfree(sglq_entry);
7134 }
7135}
7136
7137/**
7138 * lpfc_free_els_sgl_list - Free els sgl list.
7139 * @phba: pointer to lpfc hba data structure.
7140 *
7141 * This routine is invoked to free the driver's els sgl list and memory.
7142 **/
7143static void
7144lpfc_free_els_sgl_list(struct lpfc_hba *phba)
7145{
da0436e9 7146 LIST_HEAD(sglq_list);
dea3101e 7147
8a9d2e80 7148 /* Retrieve all els sgls from driver list */
da0436e9 7149 spin_lock_irq(&phba->hbalock);
895427bd
JS
7150 spin_lock(&phba->sli4_hba.sgl_list_lock);
7151 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
7152 spin_unlock(&phba->sli4_hba.sgl_list_lock);
da0436e9 7153 spin_unlock_irq(&phba->hbalock);
dea3101e 7154
8a9d2e80
JS
7155 /* Now free the sgl list */
7156 lpfc_free_sgl_list(phba, &sglq_list);
da0436e9 7157}
92d7f7b0 7158
f358dd0c
JS
7159/**
7160 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
7161 * @phba: pointer to lpfc hba data structure.
7162 *
7163 * This routine is invoked to free the driver's nvmet sgl list and memory.
7164 **/
7165static void
7166lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
7167{
7168 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7169 LIST_HEAD(sglq_list);
7170
7171 /* Retrieve all nvmet sgls from driver list */
7172 spin_lock_irq(&phba->hbalock);
7173 spin_lock(&phba->sli4_hba.sgl_list_lock);
7174 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
7175 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7176 spin_unlock_irq(&phba->hbalock);
7177
7178 /* Now free the sgl list */
7179 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
7180 list_del(&sglq_entry->list);
7181 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
7182 kfree(sglq_entry);
7183 }
4b40d02b
DK
7184
7185 /* Update the nvmet_xri_cnt to reflect no current sgls.
7186 * The next initialization cycle sets the count and allocates
7187 * the sgls over again.
7188 */
7189 phba->sli4_hba.nvmet_xri_cnt = 0;
f358dd0c
JS
7190}
7191
da0436e9
JS
7192/**
7193 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
7194 * @phba: pointer to lpfc hba data structure.
7195 *
7196 * This routine is invoked to allocate the driver's active sgl memory.
7197 * This array will hold the sglq_entry's for active IOs.
7198 **/
7199static int
7200lpfc_init_active_sgl_array(struct lpfc_hba *phba)
7201{
7202 int size;
7203 size = sizeof(struct lpfc_sglq *);
7204 size *= phba->sli4_hba.max_cfg_param.max_xri;
7205
7206 phba->sli4_hba.lpfc_sglq_active_list =
7207 kzalloc(size, GFP_KERNEL);
7208 if (!phba->sli4_hba.lpfc_sglq_active_list)
7209 return -ENOMEM;
7210 return 0;
3772a991
JS
7211}
7212
7213/**
da0436e9 7214 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3772a991
JS
7215 * @phba: pointer to lpfc hba data structure.
7216 *
da0436e9
JS
7217 * This routine is invoked to walk through the array of active sglq entries
7218 * and free all of the resources.
7219 * This is just a place holder for now.
3772a991
JS
7220 **/
7221static void
da0436e9 7222lpfc_free_active_sgl(struct lpfc_hba *phba)
3772a991 7223{
da0436e9 7224 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3772a991
JS
7225}
7226
7227/**
da0436e9 7228 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3772a991
JS
7229 * @phba: pointer to lpfc hba data structure.
7230 *
da0436e9
JS
7231 * This routine is invoked to allocate and initizlize the driver's sgl
7232 * list and set up the sgl xritag tag array accordingly.
3772a991 7233 *
3772a991 7234 **/
8a9d2e80 7235static void
da0436e9 7236lpfc_init_sgl_list(struct lpfc_hba *phba)
3772a991 7237{
da0436e9 7238 /* Initialize and populate the sglq list per host/VF. */
895427bd 7239 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
da0436e9 7240 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
f358dd0c 7241 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
86c67379 7242 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
da0436e9 7243
8a9d2e80
JS
7244 /* els xri-sgl book keeping */
7245 phba->sli4_hba.els_xri_cnt = 0;
0ff10d46 7246
895427bd 7247 /* nvme xri-buffer book keeping */
5e5b511d 7248 phba->sli4_hba.io_xri_cnt = 0;
da0436e9
JS
7249}
7250
7251/**
7252 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
7253 * @phba: pointer to lpfc hba data structure.
7254 *
7255 * This routine is invoked to post rpi header templates to the
88a2cfbb 7256 * port for those SLI4 ports that do not support extents. This routine
da0436e9 7257 * posts a PAGE_SIZE memory region to the port to hold up to
88a2cfbb
JS
7258 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
7259 * and should be called only when interrupts are disabled.
da0436e9
JS
7260 *
7261 * Return codes
af901ca1 7262 * 0 - successful
88a2cfbb 7263 * -ERROR - otherwise.
da0436e9
JS
7264 **/
7265int
7266lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
7267{
7268 int rc = 0;
da0436e9
JS
7269 struct lpfc_rpi_hdr *rpi_hdr;
7270
7271 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
ff78d8f9 7272 if (!phba->sli4_hba.rpi_hdrs_in_use)
6d368e53 7273 return rc;
6d368e53
JS
7274 if (phba->sli4_hba.extents_in_use)
7275 return -EIO;
da0436e9
JS
7276
7277 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
7278 if (!rpi_hdr) {
7279 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7280 "0391 Error during rpi post operation\n");
7281 lpfc_sli4_remove_rpis(phba);
7282 rc = -ENODEV;
7283 }
7284
7285 return rc;
7286}
7287
7288/**
7289 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
7290 * @phba: pointer to lpfc hba data structure.
7291 *
7292 * This routine is invoked to allocate a single 4KB memory region to
7293 * support rpis and stores them in the phba. This single region
7294 * provides support for up to 64 rpis. The region is used globally
7295 * by the device.
7296 *
7297 * Returns:
7298 * A valid rpi hdr on success.
7299 * A NULL pointer on any failure.
7300 **/
7301struct lpfc_rpi_hdr *
7302lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
7303{
7304 uint16_t rpi_limit, curr_rpi_range;
7305 struct lpfc_dmabuf *dmabuf;
7306 struct lpfc_rpi_hdr *rpi_hdr;
7307
6d368e53
JS
7308 /*
7309 * If the SLI4 port supports extents, posting the rpi header isn't
7310 * required. Set the expected maximum count and let the actual value
7311 * get set when extents are fully allocated.
7312 */
7313 if (!phba->sli4_hba.rpi_hdrs_in_use)
7314 return NULL;
7315 if (phba->sli4_hba.extents_in_use)
7316 return NULL;
7317
7318 /* The limit on the logical index is just the max_rpi count. */
845d9e8d 7319 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
da0436e9
JS
7320
7321 spin_lock_irq(&phba->hbalock);
6d368e53
JS
7322 /*
7323 * Establish the starting RPI in this header block. The starting
7324 * rpi is normalized to a zero base because the physical rpi is
7325 * port based.
7326 */
97f2ecf1 7327 curr_rpi_range = phba->sli4_hba.next_rpi;
da0436e9
JS
7328 spin_unlock_irq(&phba->hbalock);
7329
845d9e8d
JS
7330 /* Reached full RPI range */
7331 if (curr_rpi_range == rpi_limit)
6d368e53 7332 return NULL;
845d9e8d 7333
da0436e9
JS
7334 /*
7335 * First allocate the protocol header region for the port. The
7336 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
7337 */
7338 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7339 if (!dmabuf)
7340 return NULL;
7341
750afb08
LC
7342 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7343 LPFC_HDR_TEMPLATE_SIZE,
7344 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
7345 if (!dmabuf->virt) {
7346 rpi_hdr = NULL;
7347 goto err_free_dmabuf;
7348 }
7349
da0436e9
JS
7350 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
7351 rpi_hdr = NULL;
7352 goto err_free_coherent;
7353 }
7354
7355 /* Save the rpi header data for cleanup later. */
7356 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
7357 if (!rpi_hdr)
7358 goto err_free_coherent;
7359
7360 rpi_hdr->dmabuf = dmabuf;
7361 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
7362 rpi_hdr->page_count = 1;
7363 spin_lock_irq(&phba->hbalock);
6d368e53
JS
7364
7365 /* The rpi_hdr stores the logical index only. */
7366 rpi_hdr->start_rpi = curr_rpi_range;
845d9e8d 7367 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
da0436e9
JS
7368 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
7369
da0436e9
JS
7370 spin_unlock_irq(&phba->hbalock);
7371 return rpi_hdr;
7372
7373 err_free_coherent:
7374 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
7375 dmabuf->virt, dmabuf->phys);
7376 err_free_dmabuf:
7377 kfree(dmabuf);
7378 return NULL;
7379}
7380
7381/**
7382 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
7383 * @phba: pointer to lpfc hba data structure.
7384 *
7385 * This routine is invoked to remove all memory resources allocated
6d368e53
JS
7386 * to support rpis for SLI4 ports not supporting extents. This routine
7387 * presumes the caller has released all rpis consumed by fabric or port
7388 * logins and is prepared to have the header pages removed.
da0436e9
JS
7389 **/
7390void
7391lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
7392{
7393 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
7394
6d368e53
JS
7395 if (!phba->sli4_hba.rpi_hdrs_in_use)
7396 goto exit;
7397
da0436e9
JS
7398 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
7399 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
7400 list_del(&rpi_hdr->list);
7401 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
7402 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
7403 kfree(rpi_hdr->dmabuf);
7404 kfree(rpi_hdr);
7405 }
6d368e53
JS
7406 exit:
7407 /* There are no rpis available to the port now. */
7408 phba->sli4_hba.next_rpi = 0;
da0436e9
JS
7409}
7410
7411/**
7412 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
7413 * @pdev: pointer to pci device data structure.
7414 *
7415 * This routine is invoked to allocate the driver hba data structure for an
7416 * HBA device. If the allocation is successful, the phba reference to the
7417 * PCI device data structure is set.
7418 *
7419 * Return codes
af901ca1 7420 * pointer to @phba - successful
da0436e9
JS
7421 * NULL - error
7422 **/
7423static struct lpfc_hba *
7424lpfc_hba_alloc(struct pci_dev *pdev)
7425{
7426 struct lpfc_hba *phba;
7427
7428 /* Allocate memory for HBA structure */
7429 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
7430 if (!phba) {
e34ccdfe 7431 dev_err(&pdev->dev, "failed to allocate hba struct\n");
da0436e9
JS
7432 return NULL;
7433 }
7434
7435 /* Set reference to PCI device in HBA structure */
7436 phba->pcidev = pdev;
7437
7438 /* Assign an unused board number */
7439 phba->brd_no = lpfc_get_instance();
7440 if (phba->brd_no < 0) {
7441 kfree(phba);
7442 return NULL;
7443 }
65791f1f 7444 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
da0436e9 7445
4fede78f 7446 spin_lock_init(&phba->ct_ev_lock);
f1c3b0fc
JS
7447 INIT_LIST_HEAD(&phba->ct_ev_waiters);
7448
da0436e9
JS
7449 return phba;
7450}
7451
7452/**
7453 * lpfc_hba_free - Free driver hba data structure with a device.
7454 * @phba: pointer to lpfc hba data structure.
7455 *
7456 * This routine is invoked to free the driver hba data structure with an
7457 * HBA device.
7458 **/
7459static void
7460lpfc_hba_free(struct lpfc_hba *phba)
7461{
5e5b511d
JS
7462 if (phba->sli_rev == LPFC_SLI_REV4)
7463 kfree(phba->sli4_hba.hdwq);
7464
da0436e9
JS
7465 /* Release the driver assigned board number */
7466 idr_remove(&lpfc_hba_index, phba->brd_no);
7467
895427bd
JS
7468 /* Free memory allocated with sli3 rings */
7469 kfree(phba->sli.sli3_ring);
7470 phba->sli.sli3_ring = NULL;
2a76a283 7471
da0436e9
JS
7472 kfree(phba);
7473 return;
7474}
7475
7476/**
7477 * lpfc_create_shost - Create hba physical port with associated scsi host.
7478 * @phba: pointer to lpfc hba data structure.
7479 *
7480 * This routine is invoked to create HBA physical port and associate a SCSI
7481 * host with it.
7482 *
7483 * Return codes
af901ca1 7484 * 0 - successful
da0436e9
JS
7485 * other values - error
7486 **/
7487static int
7488lpfc_create_shost(struct lpfc_hba *phba)
7489{
7490 struct lpfc_vport *vport;
7491 struct Scsi_Host *shost;
7492
7493 /* Initialize HBA FC structure */
7494 phba->fc_edtov = FF_DEF_EDTOV;
7495 phba->fc_ratov = FF_DEF_RATOV;
7496 phba->fc_altov = FF_DEF_ALTOV;
7497 phba->fc_arbtov = FF_DEF_ARBTOV;
7498
d7c47992 7499 atomic_set(&phba->sdev_cnt, 0);
da0436e9
JS
7500 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
7501 if (!vport)
7502 return -ENODEV;
7503
7504 shost = lpfc_shost_from_vport(vport);
7505 phba->pport = vport;
2ea259ee 7506
f358dd0c
JS
7507 if (phba->nvmet_support) {
7508 /* Only 1 vport (pport) will support NVME target */
7509 if (phba->txrdy_payload_pool == NULL) {
771db5c0
RP
7510 phba->txrdy_payload_pool = dma_pool_create(
7511 "txrdy_pool", &phba->pcidev->dev,
f358dd0c
JS
7512 TXRDY_PAYLOAD_LEN, 16, 0);
7513 if (phba->txrdy_payload_pool) {
7514 phba->targetport = NULL;
7515 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7516 lpfc_printf_log(phba, KERN_INFO,
7517 LOG_INIT | LOG_NVME_DISC,
7518 "6076 NVME Target Found\n");
7519 }
7520 }
7521 }
7522
da0436e9
JS
7523 lpfc_debugfs_initialize(vport);
7524 /* Put reference to SCSI host to driver's device private data */
7525 pci_set_drvdata(phba->pcidev, shost);
2e0fef85 7526
4258e98e
JS
7527 /*
7528 * At this point we are fully registered with PSA. In addition,
7529 * any initial discovery should be completed.
7530 */
7531 vport->load_flag |= FC_ALLOW_FDMI;
8663cbbe
JS
7532 if (phba->cfg_enable_SmartSAN ||
7533 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
4258e98e
JS
7534
7535 /* Setup appropriate attribute masks */
7536 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8663cbbe 7537 if (phba->cfg_enable_SmartSAN)
4258e98e
JS
7538 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
7539 else
7540 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7541 }
3772a991
JS
7542 return 0;
7543}
db2378e0 7544
3772a991
JS
7545/**
7546 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7547 * @phba: pointer to lpfc hba data structure.
7548 *
7549 * This routine is invoked to destroy HBA physical port and the associated
7550 * SCSI host.
7551 **/
7552static void
7553lpfc_destroy_shost(struct lpfc_hba *phba)
7554{
7555 struct lpfc_vport *vport = phba->pport;
7556
7557 /* Destroy physical port that associated with the SCSI host */
7558 destroy_port(vport);
7559
7560 return;
7561}
7562
7563/**
7564 * lpfc_setup_bg - Setup Block guard structures and debug areas.
7565 * @phba: pointer to lpfc hba data structure.
7566 * @shost: the shost to be used to detect Block guard settings.
7567 *
7568 * This routine sets up the local Block guard protocol settings for @shost.
7569 * This routine also allocates memory for debugging bg buffers.
7570 **/
7571static void
7572lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7573{
bbeb79b9
JS
7574 uint32_t old_mask;
7575 uint32_t old_guard;
7576
3772a991 7577 int pagecnt = 10;
b3b98b74 7578 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
3772a991
JS
7579 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7580 "1478 Registering BlockGuard with the "
7581 "SCSI layer\n");
bbeb79b9 7582
b3b98b74
JS
7583 old_mask = phba->cfg_prot_mask;
7584 old_guard = phba->cfg_prot_guard;
bbeb79b9
JS
7585
7586 /* Only allow supported values */
b3b98b74 7587 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
bbeb79b9
JS
7588 SHOST_DIX_TYPE0_PROTECTION |
7589 SHOST_DIX_TYPE1_PROTECTION);
b3b98b74
JS
7590 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7591 SHOST_DIX_GUARD_CRC);
bbeb79b9
JS
7592
7593 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
b3b98b74
JS
7594 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7595 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
bbeb79b9 7596
b3b98b74
JS
7597 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7598 if ((old_mask != phba->cfg_prot_mask) ||
7599 (old_guard != phba->cfg_prot_guard))
bbeb79b9
JS
7600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7601 "1475 Registering BlockGuard with the "
7602 "SCSI layer: mask %d guard %d\n",
b3b98b74
JS
7603 phba->cfg_prot_mask,
7604 phba->cfg_prot_guard);
bbeb79b9 7605
b3b98b74
JS
7606 scsi_host_set_prot(shost, phba->cfg_prot_mask);
7607 scsi_host_set_guard(shost, phba->cfg_prot_guard);
bbeb79b9
JS
7608 } else
7609 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7610 "1479 Not Registering BlockGuard with the SCSI "
7611 "layer, Bad protection parameters: %d %d\n",
7612 old_mask, old_guard);
3772a991 7613 }
bbeb79b9 7614
3772a991
JS
7615 if (!_dump_buf_data) {
7616 while (pagecnt) {
7617 spin_lock_init(&_dump_buf_lock);
7618 _dump_buf_data =
7619 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7620 if (_dump_buf_data) {
6a9c52cf
JS
7621 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7622 "9043 BLKGRD: allocated %d pages for "
3772a991
JS
7623 "_dump_buf_data at 0x%p\n",
7624 (1 << pagecnt), _dump_buf_data);
7625 _dump_buf_data_order = pagecnt;
7626 memset(_dump_buf_data, 0,
7627 ((1 << PAGE_SHIFT) << pagecnt));
7628 break;
7629 } else
7630 --pagecnt;
7631 }
7632 if (!_dump_buf_data_order)
6a9c52cf
JS
7633 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7634 "9044 BLKGRD: ERROR unable to allocate "
3772a991
JS
7635 "memory for hexdump\n");
7636 } else
6a9c52cf
JS
7637 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7638 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
3772a991
JS
7639 "\n", _dump_buf_data);
7640 if (!_dump_buf_dif) {
7641 while (pagecnt) {
7642 _dump_buf_dif =
7643 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7644 if (_dump_buf_dif) {
6a9c52cf
JS
7645 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7646 "9046 BLKGRD: allocated %d pages for "
3772a991
JS
7647 "_dump_buf_dif at 0x%p\n",
7648 (1 << pagecnt), _dump_buf_dif);
7649 _dump_buf_dif_order = pagecnt;
7650 memset(_dump_buf_dif, 0,
7651 ((1 << PAGE_SHIFT) << pagecnt));
7652 break;
7653 } else
7654 --pagecnt;
7655 }
7656 if (!_dump_buf_dif_order)
6a9c52cf
JS
7657 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7658 "9047 BLKGRD: ERROR unable to allocate "
3772a991
JS
7659 "memory for hexdump\n");
7660 } else
6a9c52cf
JS
7661 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7662 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
3772a991
JS
7663 _dump_buf_dif);
7664}
7665
7666/**
7667 * lpfc_post_init_setup - Perform necessary device post initialization setup.
7668 * @phba: pointer to lpfc hba data structure.
7669 *
7670 * This routine is invoked to perform all the necessary post initialization
7671 * setup for the device.
7672 **/
7673static void
7674lpfc_post_init_setup(struct lpfc_hba *phba)
7675{
7676 struct Scsi_Host *shost;
7677 struct lpfc_adapter_event_header adapter_event;
7678
7679 /* Get the default values for Model Name and Description */
7680 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7681
7682 /*
7683 * hba setup may have changed the hba_queue_depth so we need to
7684 * adjust the value of can_queue.
7685 */
7686 shost = pci_get_drvdata(phba->pcidev);
7687 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3772a991
JS
7688
7689 lpfc_host_attrib_init(shost);
7690
7691 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7692 spin_lock_irq(shost->host_lock);
7693 lpfc_poll_start_timer(phba);
7694 spin_unlock_irq(shost->host_lock);
7695 }
7696
7697 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7698 "0428 Perform SCSI scan\n");
7699 /* Send board arrival event to upper layer */
7700 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7701 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7702 fc_host_post_vendor_event(shost, fc_get_event_number(),
7703 sizeof(adapter_event),
7704 (char *) &adapter_event,
7705 LPFC_NL_VENDOR_ID);
7706 return;
7707}
7708
7709/**
7710 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7711 * @phba: pointer to lpfc hba data structure.
7712 *
7713 * This routine is invoked to set up the PCI device memory space for device
7714 * with SLI-3 interface spec.
7715 *
7716 * Return codes
af901ca1 7717 * 0 - successful
3772a991
JS
7718 * other values - error
7719 **/
7720static int
7721lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7722{
f30e1bfd 7723 struct pci_dev *pdev = phba->pcidev;
3772a991
JS
7724 unsigned long bar0map_len, bar2map_len;
7725 int i, hbq_count;
7726 void *ptr;
56de8357 7727 int error;
3772a991 7728
f30e1bfd 7729 if (!pdev)
56de8357 7730 return -ENODEV;
3772a991
JS
7731
7732 /* Set the device DMA mask size */
56de8357
HR
7733 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7734 if (error)
7735 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7736 if (error)
f30e1bfd 7737 return error;
56de8357 7738 error = -ENODEV;
3772a991
JS
7739
7740 /* Get the bus address of Bar0 and Bar2 and the number of bytes
7741 * required by each mapping.
7742 */
7743 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7744 bar0map_len = pci_resource_len(pdev, 0);
7745
7746 phba->pci_bar2_map = pci_resource_start(pdev, 2);
7747 bar2map_len = pci_resource_len(pdev, 2);
7748
7749 /* Map HBA SLIM to a kernel virtual address. */
7750 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7751 if (!phba->slim_memmap_p) {
7752 dev_printk(KERN_ERR, &pdev->dev,
7753 "ioremap failed for SLIM memory.\n");
7754 goto out;
7755 }
7756
7757 /* Map HBA Control Registers to a kernel virtual address. */
7758 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7759 if (!phba->ctrl_regs_memmap_p) {
7760 dev_printk(KERN_ERR, &pdev->dev,
7761 "ioremap failed for HBA control registers.\n");
7762 goto out_iounmap_slim;
7763 }
7764
7765 /* Allocate memory for SLI-2 structures */
750afb08
LC
7766 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7767 &phba->slim2p.phys, GFP_KERNEL);
3772a991
JS
7768 if (!phba->slim2p.virt)
7769 goto out_iounmap;
7770
3772a991 7771 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7a470277
JS
7772 phba->mbox_ext = (phba->slim2p.virt +
7773 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
3772a991
JS
7774 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7775 phba->IOCBs = (phba->slim2p.virt +
7776 offsetof(struct lpfc_sli2_slim, IOCBs));
7777
7778 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7779 lpfc_sli_hbq_size(),
7780 &phba->hbqslimp.phys,
7781 GFP_KERNEL);
7782 if (!phba->hbqslimp.virt)
7783 goto out_free_slim;
7784
7785 hbq_count = lpfc_sli_hbq_count();
7786 ptr = phba->hbqslimp.virt;
7787 for (i = 0; i < hbq_count; ++i) {
7788 phba->hbqs[i].hbq_virt = ptr;
7789 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7790 ptr += (lpfc_hbq_defs[i]->entry_count *
7791 sizeof(struct lpfc_hbq_entry));
7792 }
7793 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7794 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7795
7796 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7797
3772a991
JS
7798 phba->MBslimaddr = phba->slim_memmap_p;
7799 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7800 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7801 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7802 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7803
7804 return 0;
7805
7806out_free_slim:
7807 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7808 phba->slim2p.virt, phba->slim2p.phys);
7809out_iounmap:
7810 iounmap(phba->ctrl_regs_memmap_p);
7811out_iounmap_slim:
7812 iounmap(phba->slim_memmap_p);
7813out:
7814 return error;
7815}
7816
7817/**
7818 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7819 * @phba: pointer to lpfc hba data structure.
7820 *
7821 * This routine is invoked to unset the PCI device memory space for device
7822 * with SLI-3 interface spec.
7823 **/
7824static void
7825lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7826{
7827 struct pci_dev *pdev;
7828
7829 /* Obtain PCI device reference */
7830 if (!phba->pcidev)
7831 return;
7832 else
7833 pdev = phba->pcidev;
7834
7835 /* Free coherent DMA memory allocated */
7836 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7837 phba->hbqslimp.virt, phba->hbqslimp.phys);
7838 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7839 phba->slim2p.virt, phba->slim2p.phys);
7840
7841 /* I/O memory unmap */
7842 iounmap(phba->ctrl_regs_memmap_p);
7843 iounmap(phba->slim_memmap_p);
7844
7845 return;
7846}
7847
7848/**
da0436e9 7849 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
3772a991
JS
7850 * @phba: pointer to lpfc hba data structure.
7851 *
da0436e9
JS
7852 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7853 * done and check status.
3772a991 7854 *
da0436e9 7855 * Return 0 if successful, otherwise -ENODEV.
3772a991 7856 **/
da0436e9
JS
7857int
7858lpfc_sli4_post_status_check(struct lpfc_hba *phba)
3772a991 7859{
2fcee4bf
JS
7860 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7861 struct lpfc_register reg_data;
7862 int i, port_error = 0;
7863 uint32_t if_type;
3772a991 7864
9940b97b
JS
7865 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7866 memset(&reg_data, 0, sizeof(reg_data));
2fcee4bf 7867 if (!phba->sli4_hba.PSMPHRregaddr)
da0436e9 7868 return -ENODEV;
3772a991 7869
da0436e9
JS
7870 /* Wait up to 30 seconds for the SLI Port POST done and ready */
7871 for (i = 0; i < 3000; i++) {
9940b97b
JS
7872 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7873 &portsmphr_reg.word0) ||
7874 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
2fcee4bf 7875 /* Port has a fatal POST error, break out */
da0436e9
JS
7876 port_error = -ENODEV;
7877 break;
7878 }
2fcee4bf
JS
7879 if (LPFC_POST_STAGE_PORT_READY ==
7880 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
da0436e9 7881 break;
da0436e9 7882 msleep(10);
3772a991
JS
7883 }
7884
2fcee4bf
JS
7885 /*
7886 * If there was a port error during POST, then don't proceed with
7887 * other register reads as the data may not be valid. Just exit.
7888 */
7889 if (port_error) {
da0436e9 7890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2fcee4bf
JS
7891 "1408 Port Failed POST - portsmphr=0x%x, "
7892 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7893 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7894 portsmphr_reg.word0,
7895 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7896 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7897 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7898 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7899 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7900 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7901 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7902 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7903 } else {
28baac74 7904 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2fcee4bf
JS
7905 "2534 Device Info: SLIFamily=0x%x, "
7906 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7907 "SLIHint_2=0x%x, FT=0x%x\n",
28baac74
JS
7908 bf_get(lpfc_sli_intf_sli_family,
7909 &phba->sli4_hba.sli_intf),
7910 bf_get(lpfc_sli_intf_slirev,
7911 &phba->sli4_hba.sli_intf),
085c647c
JS
7912 bf_get(lpfc_sli_intf_if_type,
7913 &phba->sli4_hba.sli_intf),
7914 bf_get(lpfc_sli_intf_sli_hint1,
28baac74 7915 &phba->sli4_hba.sli_intf),
085c647c
JS
7916 bf_get(lpfc_sli_intf_sli_hint2,
7917 &phba->sli4_hba.sli_intf),
7918 bf_get(lpfc_sli_intf_func_type,
28baac74 7919 &phba->sli4_hba.sli_intf));
2fcee4bf
JS
7920 /*
7921 * Check for other Port errors during the initialization
7922 * process. Fail the load if the port did not come up
7923 * correctly.
7924 */
7925 if_type = bf_get(lpfc_sli_intf_if_type,
7926 &phba->sli4_hba.sli_intf);
7927 switch (if_type) {
7928 case LPFC_SLI_INTF_IF_TYPE_0:
7929 phba->sli4_hba.ue_mask_lo =
7930 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7931 phba->sli4_hba.ue_mask_hi =
7932 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7933 uerrlo_reg.word0 =
7934 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7935 uerrhi_reg.word0 =
7936 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7937 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7938 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7940 "1422 Unrecoverable Error "
7941 "Detected during POST "
7942 "uerr_lo_reg=0x%x, "
7943 "uerr_hi_reg=0x%x, "
7944 "ue_mask_lo_reg=0x%x, "
7945 "ue_mask_hi_reg=0x%x\n",
7946 uerrlo_reg.word0,
7947 uerrhi_reg.word0,
7948 phba->sli4_hba.ue_mask_lo,
7949 phba->sli4_hba.ue_mask_hi);
7950 port_error = -ENODEV;
7951 }
7952 break;
7953 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 7954 case LPFC_SLI_INTF_IF_TYPE_6:
2fcee4bf 7955 /* Final checks. The port status should be clean. */
9940b97b
JS
7956 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7957 &reg_data.word0) ||
0558056c
JS
7958 (bf_get(lpfc_sliport_status_err, &reg_data) &&
7959 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
2fcee4bf
JS
7960 phba->work_status[0] =
7961 readl(phba->sli4_hba.u.if_type2.
7962 ERR1regaddr);
7963 phba->work_status[1] =
7964 readl(phba->sli4_hba.u.if_type2.
7965 ERR2regaddr);
7966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8fcb8acd
JS
7967 "2888 Unrecoverable port error "
7968 "following POST: port status reg "
7969 "0x%x, port_smphr reg 0x%x, "
2fcee4bf
JS
7970 "error 1=0x%x, error 2=0x%x\n",
7971 reg_data.word0,
7972 portsmphr_reg.word0,
7973 phba->work_status[0],
7974 phba->work_status[1]);
7975 port_error = -ENODEV;
7976 }
7977 break;
7978 case LPFC_SLI_INTF_IF_TYPE_1:
7979 default:
7980 break;
7981 }
28baac74 7982 }
da0436e9
JS
7983 return port_error;
7984}
3772a991 7985
da0436e9
JS
7986/**
7987 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
7988 * @phba: pointer to lpfc hba data structure.
2fcee4bf 7989 * @if_type: The SLI4 interface type getting configured.
da0436e9
JS
7990 *
7991 * This routine is invoked to set up SLI4 BAR0 PCI config space register
7992 * memory map.
7993 **/
7994static void
2fcee4bf
JS
7995lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7996{
7997 switch (if_type) {
7998 case LPFC_SLI_INTF_IF_TYPE_0:
7999 phba->sli4_hba.u.if_type0.UERRLOregaddr =
8000 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
8001 phba->sli4_hba.u.if_type0.UERRHIregaddr =
8002 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
8003 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
8004 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
8005 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
8006 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
8007 phba->sli4_hba.SLIINTFregaddr =
8008 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8009 break;
8010 case LPFC_SLI_INTF_IF_TYPE_2:
0cf07f84
JS
8011 phba->sli4_hba.u.if_type2.EQDregaddr =
8012 phba->sli4_hba.conf_regs_memmap_p +
8013 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
2fcee4bf 8014 phba->sli4_hba.u.if_type2.ERR1regaddr =
88a2cfbb
JS
8015 phba->sli4_hba.conf_regs_memmap_p +
8016 LPFC_CTL_PORT_ER1_OFFSET;
2fcee4bf 8017 phba->sli4_hba.u.if_type2.ERR2regaddr =
88a2cfbb
JS
8018 phba->sli4_hba.conf_regs_memmap_p +
8019 LPFC_CTL_PORT_ER2_OFFSET;
2fcee4bf 8020 phba->sli4_hba.u.if_type2.CTRLregaddr =
88a2cfbb
JS
8021 phba->sli4_hba.conf_regs_memmap_p +
8022 LPFC_CTL_PORT_CTL_OFFSET;
2fcee4bf 8023 phba->sli4_hba.u.if_type2.STATUSregaddr =
88a2cfbb
JS
8024 phba->sli4_hba.conf_regs_memmap_p +
8025 LPFC_CTL_PORT_STA_OFFSET;
2fcee4bf
JS
8026 phba->sli4_hba.SLIINTFregaddr =
8027 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
8028 phba->sli4_hba.PSMPHRregaddr =
88a2cfbb
JS
8029 phba->sli4_hba.conf_regs_memmap_p +
8030 LPFC_CTL_PORT_SEM_OFFSET;
2fcee4bf 8031 phba->sli4_hba.RQDBregaddr =
962bc51b
JS
8032 phba->sli4_hba.conf_regs_memmap_p +
8033 LPFC_ULP0_RQ_DOORBELL;
2fcee4bf 8034 phba->sli4_hba.WQDBregaddr =
962bc51b
JS
8035 phba->sli4_hba.conf_regs_memmap_p +
8036 LPFC_ULP0_WQ_DOORBELL;
9dd35425 8037 phba->sli4_hba.CQDBregaddr =
2fcee4bf 8038 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9dd35425 8039 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
2fcee4bf
JS
8040 phba->sli4_hba.MQDBregaddr =
8041 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
8042 phba->sli4_hba.BMBXregaddr =
8043 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8044 break;
27d6ac0a
JS
8045 case LPFC_SLI_INTF_IF_TYPE_6:
8046 phba->sli4_hba.u.if_type2.EQDregaddr =
8047 phba->sli4_hba.conf_regs_memmap_p +
8048 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
8049 phba->sli4_hba.u.if_type2.ERR1regaddr =
8050 phba->sli4_hba.conf_regs_memmap_p +
8051 LPFC_CTL_PORT_ER1_OFFSET;
8052 phba->sli4_hba.u.if_type2.ERR2regaddr =
8053 phba->sli4_hba.conf_regs_memmap_p +
8054 LPFC_CTL_PORT_ER2_OFFSET;
8055 phba->sli4_hba.u.if_type2.CTRLregaddr =
8056 phba->sli4_hba.conf_regs_memmap_p +
8057 LPFC_CTL_PORT_CTL_OFFSET;
8058 phba->sli4_hba.u.if_type2.STATUSregaddr =
8059 phba->sli4_hba.conf_regs_memmap_p +
8060 LPFC_CTL_PORT_STA_OFFSET;
8061 phba->sli4_hba.PSMPHRregaddr =
8062 phba->sli4_hba.conf_regs_memmap_p +
8063 LPFC_CTL_PORT_SEM_OFFSET;
8064 phba->sli4_hba.BMBXregaddr =
8065 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
8066 break;
2fcee4bf
JS
8067 case LPFC_SLI_INTF_IF_TYPE_1:
8068 default:
8069 dev_printk(KERN_ERR, &phba->pcidev->dev,
8070 "FATAL - unsupported SLI4 interface type - %d\n",
8071 if_type);
8072 break;
8073 }
da0436e9 8074}
3772a991 8075
da0436e9
JS
8076/**
8077 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
8078 * @phba: pointer to lpfc hba data structure.
8079 *
27d6ac0a 8080 * This routine is invoked to set up SLI4 BAR1 register memory map.
da0436e9
JS
8081 **/
8082static void
27d6ac0a 8083lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
da0436e9 8084{
27d6ac0a
JS
8085 switch (if_type) {
8086 case LPFC_SLI_INTF_IF_TYPE_0:
8087 phba->sli4_hba.PSMPHRregaddr =
8088 phba->sli4_hba.ctrl_regs_memmap_p +
8089 LPFC_SLIPORT_IF0_SMPHR;
8090 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8091 LPFC_HST_ISR0;
8092 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8093 LPFC_HST_IMR0;
8094 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
8095 LPFC_HST_ISCR0;
8096 break;
8097 case LPFC_SLI_INTF_IF_TYPE_6:
8098 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8099 LPFC_IF6_RQ_DOORBELL;
8100 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8101 LPFC_IF6_WQ_DOORBELL;
8102 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8103 LPFC_IF6_CQ_DOORBELL;
8104 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8105 LPFC_IF6_EQ_DOORBELL;
8106 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
8107 LPFC_IF6_MQ_DOORBELL;
8108 break;
8109 case LPFC_SLI_INTF_IF_TYPE_2:
8110 case LPFC_SLI_INTF_IF_TYPE_1:
8111 default:
8112 dev_err(&phba->pcidev->dev,
8113 "FATAL - unsupported SLI4 interface type - %d\n",
8114 if_type);
8115 break;
8116 }
3772a991
JS
8117}
8118
8119/**
da0436e9 8120 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
3772a991 8121 * @phba: pointer to lpfc hba data structure.
da0436e9 8122 * @vf: virtual function number
3772a991 8123 *
da0436e9
JS
8124 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
8125 * based on the given viftual function number, @vf.
8126 *
8127 * Return 0 if successful, otherwise -ENODEV.
3772a991 8128 **/
da0436e9
JS
8129static int
8130lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
3772a991 8131{
da0436e9
JS
8132 if (vf > LPFC_VIR_FUNC_MAX)
8133 return -ENODEV;
3772a991 8134
da0436e9 8135 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
962bc51b
JS
8136 vf * LPFC_VFR_PAGE_SIZE +
8137 LPFC_ULP0_RQ_DOORBELL);
da0436e9 8138 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
962bc51b
JS
8139 vf * LPFC_VFR_PAGE_SIZE +
8140 LPFC_ULP0_WQ_DOORBELL);
9dd35425
JS
8141 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8142 vf * LPFC_VFR_PAGE_SIZE +
8143 LPFC_EQCQ_DOORBELL);
8144 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
da0436e9
JS
8145 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8146 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
8147 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
8148 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
8149 return 0;
3772a991
JS
8150}
8151
8152/**
da0436e9 8153 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
3772a991
JS
8154 * @phba: pointer to lpfc hba data structure.
8155 *
da0436e9
JS
8156 * This routine is invoked to create the bootstrap mailbox
8157 * region consistent with the SLI-4 interface spec. This
8158 * routine allocates all memory necessary to communicate
8159 * mailbox commands to the port and sets up all alignment
8160 * needs. No locks are expected to be held when calling
8161 * this routine.
3772a991
JS
8162 *
8163 * Return codes
af901ca1 8164 * 0 - successful
d439d286 8165 * -ENOMEM - could not allocated memory.
da0436e9 8166 **/
3772a991 8167static int
da0436e9 8168lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 8169{
da0436e9
JS
8170 uint32_t bmbx_size;
8171 struct lpfc_dmabuf *dmabuf;
8172 struct dma_address *dma_address;
8173 uint32_t pa_addr;
8174 uint64_t phys_addr;
8175
8176 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8177 if (!dmabuf)
8178 return -ENOMEM;
3772a991 8179
da0436e9
JS
8180 /*
8181 * The bootstrap mailbox region is comprised of 2 parts
8182 * plus an alignment restriction of 16 bytes.
8183 */
8184 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
750afb08
LC
8185 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8186 &dmabuf->phys, GFP_KERNEL);
da0436e9
JS
8187 if (!dmabuf->virt) {
8188 kfree(dmabuf);
8189 return -ENOMEM;
3772a991
JS
8190 }
8191
da0436e9
JS
8192 /*
8193 * Initialize the bootstrap mailbox pointers now so that the register
8194 * operations are simple later. The mailbox dma address is required
8195 * to be 16-byte aligned. Also align the virtual memory as each
8196 * maibox is copied into the bmbx mailbox region before issuing the
8197 * command to the port.
8198 */
8199 phba->sli4_hba.bmbx.dmabuf = dmabuf;
8200 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
8201
8202 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
8203 LPFC_ALIGN_16_BYTE);
8204 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
8205 LPFC_ALIGN_16_BYTE);
8206
8207 /*
8208 * Set the high and low physical addresses now. The SLI4 alignment
8209 * requirement is 16 bytes and the mailbox is posted to the port
8210 * as two 30-bit addresses. The other data is a bit marking whether
8211 * the 30-bit address is the high or low address.
8212 * Upcast bmbx aphys to 64bits so shift instruction compiles
8213 * clean on 32 bit machines.
8214 */
8215 dma_address = &phba->sli4_hba.bmbx.dma_address;
8216 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
8217 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
8218 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
8219 LPFC_BMBX_BIT1_ADDR_HI);
8220
8221 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
8222 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
8223 LPFC_BMBX_BIT1_ADDR_LO);
8224 return 0;
3772a991
JS
8225}
8226
8227/**
da0436e9 8228 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
3772a991
JS
8229 * @phba: pointer to lpfc hba data structure.
8230 *
da0436e9
JS
8231 * This routine is invoked to teardown the bootstrap mailbox
8232 * region and release all host resources. This routine requires
8233 * the caller to ensure all mailbox commands recovered, no
8234 * additional mailbox comands are sent, and interrupts are disabled
8235 * before calling this routine.
8236 *
8237 **/
3772a991 8238static void
da0436e9 8239lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
3772a991 8240{
da0436e9
JS
8241 dma_free_coherent(&phba->pcidev->dev,
8242 phba->sli4_hba.bmbx.bmbx_size,
8243 phba->sli4_hba.bmbx.dmabuf->virt,
8244 phba->sli4_hba.bmbx.dmabuf->phys);
8245
8246 kfree(phba->sli4_hba.bmbx.dmabuf);
8247 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
3772a991
JS
8248}
8249
8250/**
da0436e9 8251 * lpfc_sli4_read_config - Get the config parameters.
3772a991
JS
8252 * @phba: pointer to lpfc hba data structure.
8253 *
da0436e9
JS
8254 * This routine is invoked to read the configuration parameters from the HBA.
8255 * The configuration parameters are used to set the base and maximum values
8256 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
8257 * allocation for the port.
3772a991
JS
8258 *
8259 * Return codes
af901ca1 8260 * 0 - successful
25985edc 8261 * -ENOMEM - No available memory
d439d286 8262 * -EIO - The mailbox failed to complete successfully.
3772a991 8263 **/
ff78d8f9 8264int
da0436e9 8265lpfc_sli4_read_config(struct lpfc_hba *phba)
3772a991 8266{
da0436e9
JS
8267 LPFC_MBOXQ_t *pmb;
8268 struct lpfc_mbx_read_config *rd_config;
912e3acd
JS
8269 union lpfc_sli4_cfg_shdr *shdr;
8270 uint32_t shdr_status, shdr_add_status;
8271 struct lpfc_mbx_get_func_cfg *get_func_cfg;
8272 struct lpfc_rsrc_desc_fcfcoe *desc;
8aa134a8 8273 char *pdesc_0;
c691816e 8274 uint16_t forced_link_speed;
6a828b0f 8275 uint32_t if_type, qmin;
8aa134a8 8276 int length, i, rc = 0, rc2;
3772a991 8277
da0436e9
JS
8278 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8279 if (!pmb) {
8280 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8281 "2011 Unable to allocate memory for issuing "
8282 "SLI_CONFIG_SPECIAL mailbox command\n");
8283 return -ENOMEM;
3772a991
JS
8284 }
8285
da0436e9 8286 lpfc_read_config(phba, pmb);
3772a991 8287
da0436e9
JS
8288 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8289 if (rc != MBX_SUCCESS) {
8290 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8291 "2012 Mailbox failed , mbxCmd x%x "
8292 "READ_CONFIG, mbxStatus x%x\n",
8293 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8294 bf_get(lpfc_mqe_status, &pmb->u.mqe));
8295 rc = -EIO;
8296 } else {
8297 rd_config = &pmb->u.mqe.un.rd_config;
ff78d8f9
JS
8298 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
8299 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
8300 phba->sli4_hba.lnk_info.lnk_tp =
8301 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
8302 phba->sli4_hba.lnk_info.lnk_no =
8303 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
8304 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8305 "3081 lnk_type:%d, lnk_numb:%d\n",
8306 phba->sli4_hba.lnk_info.lnk_tp,
8307 phba->sli4_hba.lnk_info.lnk_no);
8308 } else
8309 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8310 "3082 Mailbox (x%x) returned ldv:x0\n",
8311 bf_get(lpfc_mqe_command, &pmb->u.mqe));
44fd7fe3
JS
8312 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
8313 phba->bbcredit_support = 1;
8314 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
8315 }
8316
1dc5ec24
JS
8317 phba->sli4_hba.conf_trunk =
8318 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
6d368e53
JS
8319 phba->sli4_hba.extents_in_use =
8320 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
da0436e9
JS
8321 phba->sli4_hba.max_cfg_param.max_xri =
8322 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
31f06d2e
JS
8323 /* Reduce resource usage in kdump environment */
8324 if (is_kdump_kernel() &&
8325 phba->sli4_hba.max_cfg_param.max_xri > 512)
8326 phba->sli4_hba.max_cfg_param.max_xri = 512;
da0436e9
JS
8327 phba->sli4_hba.max_cfg_param.xri_base =
8328 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8329 phba->sli4_hba.max_cfg_param.max_vpi =
8330 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
8b47ae69
JS
8331 /* Limit the max we support */
8332 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
8333 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
da0436e9
JS
8334 phba->sli4_hba.max_cfg_param.vpi_base =
8335 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
8336 phba->sli4_hba.max_cfg_param.max_rpi =
8337 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
8338 phba->sli4_hba.max_cfg_param.rpi_base =
8339 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
8340 phba->sli4_hba.max_cfg_param.max_vfi =
8341 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
8342 phba->sli4_hba.max_cfg_param.vfi_base =
8343 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
8344 phba->sli4_hba.max_cfg_param.max_fcfi =
8345 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
da0436e9
JS
8346 phba->sli4_hba.max_cfg_param.max_eq =
8347 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
8348 phba->sli4_hba.max_cfg_param.max_rq =
8349 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
8350 phba->sli4_hba.max_cfg_param.max_wq =
8351 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
8352 phba->sli4_hba.max_cfg_param.max_cq =
8353 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
8354 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
8355 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
8356 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
8357 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5ffc266e
JS
8358 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
8359 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
da0436e9
JS
8360 phba->max_vports = phba->max_vpi;
8361 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6d368e53
JS
8362 "2003 cfg params Extents? %d "
8363 "XRI(B:%d M:%d), "
da0436e9
JS
8364 "VPI(B:%d M:%d) "
8365 "VFI(B:%d M:%d) "
8366 "RPI(B:%d M:%d) "
2ea259ee 8367 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
6d368e53 8368 phba->sli4_hba.extents_in_use,
da0436e9
JS
8369 phba->sli4_hba.max_cfg_param.xri_base,
8370 phba->sli4_hba.max_cfg_param.max_xri,
8371 phba->sli4_hba.max_cfg_param.vpi_base,
8372 phba->sli4_hba.max_cfg_param.max_vpi,
8373 phba->sli4_hba.max_cfg_param.vfi_base,
8374 phba->sli4_hba.max_cfg_param.max_vfi,
8375 phba->sli4_hba.max_cfg_param.rpi_base,
8376 phba->sli4_hba.max_cfg_param.max_rpi,
2ea259ee
JS
8377 phba->sli4_hba.max_cfg_param.max_fcfi,
8378 phba->sli4_hba.max_cfg_param.max_eq,
8379 phba->sli4_hba.max_cfg_param.max_cq,
8380 phba->sli4_hba.max_cfg_param.max_wq,
8381 phba->sli4_hba.max_cfg_param.max_rq);
8382
d38f33b3 8383 /*
6a828b0f
JS
8384 * Calculate queue resources based on how
8385 * many WQ/CQ/EQs are available.
d38f33b3 8386 */
6a828b0f
JS
8387 qmin = phba->sli4_hba.max_cfg_param.max_wq;
8388 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8389 qmin = phba->sli4_hba.max_cfg_param.max_cq;
8390 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8391 qmin = phba->sli4_hba.max_cfg_param.max_eq;
8392 /*
8393 * Whats left after this can go toward NVME / FCP.
8394 * The minus 4 accounts for ELS, NVME LS, MBOX
8395 * plus one extra. When configured for
8396 * NVMET, FCP io channel WQs are not created.
8397 */
8398 qmin -= 4;
d38f33b3 8399
6a828b0f
JS
8400 /* If NVME is configured, double the number of CQ/WQs needed */
8401 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
8402 !phba->nvmet_support)
8403 qmin /= 2;
8404
8405 /* Check to see if there is enough for NVME */
8406 if ((phba->cfg_irq_chann > qmin) ||
8407 (phba->cfg_hdw_queue > qmin)) {
8408 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8409 "2005 Reducing Queues: "
8410 "WQ %d CQ %d EQ %d: min %d: "
8411 "IRQ %d HDWQ %d\n",
d38f33b3
JS
8412 phba->sli4_hba.max_cfg_param.max_wq,
8413 phba->sli4_hba.max_cfg_param.max_cq,
6a828b0f
JS
8414 phba->sli4_hba.max_cfg_param.max_eq,
8415 qmin, phba->cfg_irq_chann,
cdb42bec 8416 phba->cfg_hdw_queue);
d38f33b3 8417
6a828b0f
JS
8418 if (phba->cfg_irq_chann > qmin)
8419 phba->cfg_irq_chann = qmin;
8420 if (phba->cfg_hdw_queue > qmin)
8421 phba->cfg_hdw_queue = qmin;
d38f33b3 8422 }
3772a991 8423 }
912e3acd
JS
8424
8425 if (rc)
8426 goto read_cfg_out;
da0436e9 8427
c691816e
JS
8428 /* Update link speed if forced link speed is supported */
8429 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
27d6ac0a 8430 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
c691816e
JS
8431 forced_link_speed =
8432 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
8433 if (forced_link_speed) {
8434 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
8435
8436 switch (forced_link_speed) {
8437 case LINK_SPEED_1G:
8438 phba->cfg_link_speed =
8439 LPFC_USER_LINK_SPEED_1G;
8440 break;
8441 case LINK_SPEED_2G:
8442 phba->cfg_link_speed =
8443 LPFC_USER_LINK_SPEED_2G;
8444 break;
8445 case LINK_SPEED_4G:
8446 phba->cfg_link_speed =
8447 LPFC_USER_LINK_SPEED_4G;
8448 break;
8449 case LINK_SPEED_8G:
8450 phba->cfg_link_speed =
8451 LPFC_USER_LINK_SPEED_8G;
8452 break;
8453 case LINK_SPEED_10G:
8454 phba->cfg_link_speed =
8455 LPFC_USER_LINK_SPEED_10G;
8456 break;
8457 case LINK_SPEED_16G:
8458 phba->cfg_link_speed =
8459 LPFC_USER_LINK_SPEED_16G;
8460 break;
8461 case LINK_SPEED_32G:
8462 phba->cfg_link_speed =
8463 LPFC_USER_LINK_SPEED_32G;
8464 break;
fbd8a6ba
JS
8465 case LINK_SPEED_64G:
8466 phba->cfg_link_speed =
8467 LPFC_USER_LINK_SPEED_64G;
8468 break;
c691816e
JS
8469 case 0xffff:
8470 phba->cfg_link_speed =
8471 LPFC_USER_LINK_SPEED_AUTO;
8472 break;
8473 default:
8474 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8475 "0047 Unrecognized link "
8476 "speed : %d\n",
8477 forced_link_speed);
8478 phba->cfg_link_speed =
8479 LPFC_USER_LINK_SPEED_AUTO;
8480 }
8481 }
8482 }
8483
da0436e9 8484 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
572709e2
JS
8485 length = phba->sli4_hba.max_cfg_param.max_xri -
8486 lpfc_sli4_get_els_iocb_cnt(phba);
8487 if (phba->cfg_hba_queue_depth > length) {
8488 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8489 "3361 HBA queue depth changed from %d to %d\n",
8490 phba->cfg_hba_queue_depth, length);
8491 phba->cfg_hba_queue_depth = length;
8492 }
912e3acd 8493
27d6ac0a 8494 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
912e3acd
JS
8495 LPFC_SLI_INTF_IF_TYPE_2)
8496 goto read_cfg_out;
8497
8498 /* get the pf# and vf# for SLI4 if_type 2 port */
8499 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
8500 sizeof(struct lpfc_sli4_cfg_mhdr));
8501 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
8502 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
8503 length, LPFC_SLI4_MBX_EMBED);
8504
8aa134a8 8505 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
912e3acd
JS
8506 shdr = (union lpfc_sli4_cfg_shdr *)
8507 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
8508 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8509 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8aa134a8 8510 if (rc2 || shdr_status || shdr_add_status) {
912e3acd
JS
8511 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8512 "3026 Mailbox failed , mbxCmd x%x "
8513 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
8514 bf_get(lpfc_mqe_command, &pmb->u.mqe),
8515 bf_get(lpfc_mqe_status, &pmb->u.mqe));
912e3acd
JS
8516 goto read_cfg_out;
8517 }
8518
8519 /* search for fc_fcoe resrouce descriptor */
8520 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
912e3acd 8521
8aa134a8
JS
8522 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
8523 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
8524 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
8525 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
8526 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
8527 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
8528 goto read_cfg_out;
8529
912e3acd 8530 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
8aa134a8 8531 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
912e3acd 8532 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
8aa134a8 8533 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
912e3acd
JS
8534 phba->sli4_hba.iov.pf_number =
8535 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
8536 phba->sli4_hba.iov.vf_number =
8537 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
8538 break;
8539 }
8540 }
8541
8542 if (i < LPFC_RSRC_DESC_MAX_NUM)
8543 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8544 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
8545 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
8546 phba->sli4_hba.iov.vf_number);
8aa134a8 8547 else
912e3acd
JS
8548 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8549 "3028 GET_FUNCTION_CONFIG: failed to find "
c4dba187 8550 "Resource Descriptor:x%x\n",
912e3acd 8551 LPFC_RSRC_DESC_TYPE_FCFCOE);
912e3acd
JS
8552
8553read_cfg_out:
8554 mempool_free(pmb, phba->mbox_mem_pool);
da0436e9 8555 return rc;
3772a991
JS
8556}
8557
8558/**
2fcee4bf 8559 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
3772a991
JS
8560 * @phba: pointer to lpfc hba data structure.
8561 *
2fcee4bf
JS
8562 * This routine is invoked to setup the port-side endian order when
8563 * the port if_type is 0. This routine has no function for other
8564 * if_types.
da0436e9
JS
8565 *
8566 * Return codes
af901ca1 8567 * 0 - successful
25985edc 8568 * -ENOMEM - No available memory
d439d286 8569 * -EIO - The mailbox failed to complete successfully.
3772a991 8570 **/
da0436e9
JS
8571static int
8572lpfc_setup_endian_order(struct lpfc_hba *phba)
3772a991 8573{
da0436e9 8574 LPFC_MBOXQ_t *mboxq;
2fcee4bf 8575 uint32_t if_type, rc = 0;
da0436e9
JS
8576 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8577 HOST_ENDIAN_HIGH_WORD1};
3772a991 8578
2fcee4bf
JS
8579 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8580 switch (if_type) {
8581 case LPFC_SLI_INTF_IF_TYPE_0:
8582 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8583 GFP_KERNEL);
8584 if (!mboxq) {
8585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8586 "0492 Unable to allocate memory for "
8587 "issuing SLI_CONFIG_SPECIAL mailbox "
8588 "command\n");
8589 return -ENOMEM;
8590 }
3772a991 8591
2fcee4bf
JS
8592 /*
8593 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8594 * two words to contain special data values and no other data.
8595 */
8596 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8597 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8598 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8599 if (rc != MBX_SUCCESS) {
8600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8601 "0493 SLI_CONFIG_SPECIAL mailbox "
8602 "failed with status x%x\n",
8603 rc);
8604 rc = -EIO;
8605 }
8606 mempool_free(mboxq, phba->mbox_mem_pool);
8607 break;
27d6ac0a 8608 case LPFC_SLI_INTF_IF_TYPE_6:
2fcee4bf
JS
8609 case LPFC_SLI_INTF_IF_TYPE_2:
8610 case LPFC_SLI_INTF_IF_TYPE_1:
8611 default:
8612 break;
da0436e9 8613 }
da0436e9 8614 return rc;
3772a991
JS
8615}
8616
8617/**
895427bd 8618 * lpfc_sli4_queue_verify - Verify and update EQ counts
3772a991
JS
8619 * @phba: pointer to lpfc hba data structure.
8620 *
895427bd
JS
8621 * This routine is invoked to check the user settable queue counts for EQs.
8622 * After this routine is called the counts will be set to valid values that
5350d872
JS
8623 * adhere to the constraints of the system's interrupt vectors and the port's
8624 * queue resources.
da0436e9
JS
8625 *
8626 * Return codes
af901ca1 8627 * 0 - successful
25985edc 8628 * -ENOMEM - No available memory
3772a991 8629 **/
da0436e9 8630static int
5350d872 8631lpfc_sli4_queue_verify(struct lpfc_hba *phba)
3772a991 8632{
da0436e9 8633 /*
67d12733 8634 * Sanity check for configured queue parameters against the run-time
da0436e9
JS
8635 * device parameters
8636 */
3772a991 8637
bcb24f65 8638 if (phba->nvmet_support) {
6a828b0f
JS
8639 if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
8640 phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
982ab128
JS
8641 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8642 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
bcb24f65 8643 }
895427bd
JS
8644
8645 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6a828b0f
JS
8646 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8647 phba->cfg_hdw_queue, phba->cfg_irq_chann,
8648 phba->cfg_nvmet_mrq);
3772a991 8649
da0436e9
JS
8650 /* Get EQ depth from module parameter, fake the default for now */
8651 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8652 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
3772a991 8653
5350d872
JS
8654 /* Get CQ depth from module parameter, fake the default for now */
8655 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8656 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
895427bd
JS
8657 return 0;
8658}
8659
8660static int
8661lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
8662{
8663 struct lpfc_queue *qdesc;
c1a21ebc 8664 int cpu;
5350d872 8665
c1a21ebc 8666 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
a51e41b6 8667 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
81b96eda 8668 phba->sli4_hba.cq_esize,
c1a21ebc 8669 LPFC_CQE_EXP_COUNT, cpu);
895427bd
JS
8670 if (!qdesc) {
8671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8672 "0508 Failed allocate fast-path NVME CQ (%d)\n",
8673 wqidx);
8674 return 1;
8675 }
7365f6fd 8676 qdesc->qe_valid = 1;
5e5b511d 8677 qdesc->hdwq = wqidx;
c1a21ebc 8678 qdesc->chann = cpu;
cdb42bec 8679 phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
895427bd 8680
a51e41b6 8681 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
c1a21ebc
JS
8682 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT,
8683 cpu);
895427bd
JS
8684 if (!qdesc) {
8685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8686 "0509 Failed allocate fast-path NVME WQ (%d)\n",
8687 wqidx);
8688 return 1;
8689 }
5e5b511d 8690 qdesc->hdwq = wqidx;
6a828b0f 8691 qdesc->chann = wqidx;
cdb42bec 8692 phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
895427bd
JS
8693 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8694 return 0;
8695}
8696
8697static int
8698lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
8699{
8700 struct lpfc_queue *qdesc;
c176ffa0 8701 uint32_t wqesize;
c1a21ebc 8702 int cpu;
895427bd 8703
c1a21ebc 8704 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
895427bd 8705 /* Create Fast Path FCP CQs */
c176ffa0 8706 if (phba->enab_exp_wqcq_pages)
a51e41b6
JS
8707 /* Increase the CQ size when WQEs contain an embedded cdb */
8708 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8709 phba->sli4_hba.cq_esize,
c1a21ebc 8710 LPFC_CQE_EXP_COUNT, cpu);
a51e41b6
JS
8711
8712 else
8713 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8714 phba->sli4_hba.cq_esize,
c1a21ebc 8715 phba->sli4_hba.cq_ecount, cpu);
895427bd
JS
8716 if (!qdesc) {
8717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8718 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
8719 return 1;
8720 }
7365f6fd 8721 qdesc->qe_valid = 1;
5e5b511d 8722 qdesc->hdwq = wqidx;
c1a21ebc 8723 qdesc->chann = cpu;
cdb42bec 8724 phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
895427bd
JS
8725
8726 /* Create Fast Path FCP WQs */
c176ffa0 8727 if (phba->enab_exp_wqcq_pages) {
a51e41b6 8728 /* Increase the WQ size when WQEs contain an embedded cdb */
c176ffa0
JS
8729 wqesize = (phba->fcp_embed_io) ?
8730 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
a51e41b6 8731 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
c176ffa0 8732 wqesize,
c1a21ebc 8733 LPFC_WQE_EXP_COUNT, cpu);
c176ffa0 8734 } else
a51e41b6
JS
8735 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8736 phba->sli4_hba.wq_esize,
c1a21ebc 8737 phba->sli4_hba.wq_ecount, cpu);
c176ffa0 8738
895427bd
JS
8739 if (!qdesc) {
8740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8741 "0503 Failed allocate fast-path FCP WQ (%d)\n",
8742 wqidx);
8743 return 1;
8744 }
5e5b511d 8745 qdesc->hdwq = wqidx;
6a828b0f 8746 qdesc->chann = wqidx;
cdb42bec 8747 phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
895427bd 8748 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
5350d872 8749 return 0;
5350d872
JS
8750}
8751
8752/**
8753 * lpfc_sli4_queue_create - Create all the SLI4 queues
8754 * @phba: pointer to lpfc hba data structure.
8755 *
8756 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
8757 * operation. For each SLI4 queue type, the parameters such as queue entry
8758 * count (queue depth) shall be taken from the module parameter. For now,
8759 * we just use some constant number as place holder.
8760 *
8761 * Return codes
4907cb7b 8762 * 0 - successful
5350d872
JS
8763 * -ENOMEM - No availble memory
8764 * -EIO - The mailbox failed to complete successfully.
8765 **/
8766int
8767lpfc_sli4_queue_create(struct lpfc_hba *phba)
8768{
8769 struct lpfc_queue *qdesc;
657add4e 8770 int idx, cpu, eqcpu;
5e5b511d 8771 struct lpfc_sli4_hdw_queue *qp;
657add4e
JS
8772 struct lpfc_vector_map_info *cpup;
8773 struct lpfc_vector_map_info *eqcpup;
32517fc0 8774 struct lpfc_eq_intr_info *eqi;
5350d872
JS
8775
8776 /*
67d12733 8777 * Create HBA Record arrays.
895427bd 8778 * Both NVME and FCP will share that same vectors / EQs
5350d872 8779 */
67d12733
JS
8780 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8781 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8782 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8783 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8784 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8785 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
895427bd
JS
8786 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8787 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8788 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8789 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
67d12733 8790
cdb42bec 8791 if (!phba->sli4_hba.hdwq) {
5e5b511d
JS
8792 phba->sli4_hba.hdwq = kcalloc(
8793 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8794 GFP_KERNEL);
8795 if (!phba->sli4_hba.hdwq) {
895427bd 8796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5e5b511d
JS
8797 "6427 Failed allocate memory for "
8798 "fast-path Hardware Queue array\n");
895427bd
JS
8799 goto out_error;
8800 }
5e5b511d
JS
8801 /* Prepare hardware queues to take IO buffers */
8802 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8803 qp = &phba->sli4_hba.hdwq[idx];
8804 spin_lock_init(&qp->io_buf_list_get_lock);
8805 spin_lock_init(&qp->io_buf_list_put_lock);
8806 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
8807 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
8808 qp->get_io_bufs = 0;
8809 qp->put_io_bufs = 0;
8810 qp->total_io_bufs = 0;
8811 spin_lock_init(&qp->abts_scsi_buf_list_lock);
8812 INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list);
8813 qp->abts_scsi_io_bufs = 0;
8814 spin_lock_init(&qp->abts_nvme_buf_list_lock);
8815 INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list);
8816 qp->abts_nvme_io_bufs = 0;
895427bd 8817 }
67d12733
JS
8818 }
8819
cdb42bec 8820 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
2d7dbc4c
JS
8821 if (phba->nvmet_support) {
8822 phba->sli4_hba.nvmet_cqset = kcalloc(
8823 phba->cfg_nvmet_mrq,
8824 sizeof(struct lpfc_queue *),
8825 GFP_KERNEL);
8826 if (!phba->sli4_hba.nvmet_cqset) {
8827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8828 "3121 Fail allocate memory for "
8829 "fast-path CQ set array\n");
8830 goto out_error;
8831 }
8832 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8833 phba->cfg_nvmet_mrq,
8834 sizeof(struct lpfc_queue *),
8835 GFP_KERNEL);
8836 if (!phba->sli4_hba.nvmet_mrq_hdr) {
8837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8838 "3122 Fail allocate memory for "
8839 "fast-path RQ set hdr array\n");
8840 goto out_error;
8841 }
8842 phba->sli4_hba.nvmet_mrq_data = kcalloc(
8843 phba->cfg_nvmet_mrq,
8844 sizeof(struct lpfc_queue *),
8845 GFP_KERNEL);
8846 if (!phba->sli4_hba.nvmet_mrq_data) {
8847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8848 "3124 Fail allocate memory for "
8849 "fast-path RQ set data array\n");
8850 goto out_error;
8851 }
8852 }
da0436e9 8853 }
67d12733 8854
895427bd 8855 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
67d12733 8856
895427bd 8857 /* Create HBA Event Queues (EQs) */
657add4e
JS
8858 for_each_present_cpu(cpu) {
8859 /* We only want to create 1 EQ per vector, even though
8860 * multiple CPUs might be using that vector. so only
8861 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
6a828b0f 8862 */
657add4e
JS
8863 cpup = &phba->sli4_hba.cpu_map[cpu];
8864 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
6a828b0f 8865 continue;
657add4e
JS
8866
8867 /* Get a ptr to the Hardware Queue associated with this CPU */
8868 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8869
8870 /* Allocate an EQ */
81b96eda
JS
8871 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8872 phba->sli4_hba.eq_esize,
c1a21ebc 8873 phba->sli4_hba.eq_ecount, cpu);
da0436e9
JS
8874 if (!qdesc) {
8875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
657add4e
JS
8876 "0497 Failed allocate EQ (%d)\n",
8877 cpup->hdwq);
67d12733 8878 goto out_error;
da0436e9 8879 }
7365f6fd 8880 qdesc->qe_valid = 1;
657add4e 8881 qdesc->hdwq = cpup->hdwq;
3ad348d9 8882 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
32517fc0 8883 qdesc->last_cpu = qdesc->chann;
657add4e
JS
8884
8885 /* Save the allocated EQ in the Hardware Queue */
8886 qp->hba_eq = qdesc;
8887
32517fc0
JS
8888 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
8889 list_add(&qdesc->cpu_list, &eqi->list);
895427bd 8890 }
67d12733 8891
657add4e
JS
8892 /* Now we need to populate the other Hardware Queues, that share
8893 * an IRQ vector, with the associated EQ ptr.
8894 */
8895 for_each_present_cpu(cpu) {
8896 cpup = &phba->sli4_hba.cpu_map[cpu];
8897
8898 /* Check for EQ already allocated in previous loop */
8899 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
8900 continue;
8901
8902 /* Check for multiple CPUs per hdwq */
8903 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
8904 if (qp->hba_eq)
8905 continue;
8906
8907 /* We need to share an EQ for this hdwq */
8908 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
8909 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
8910 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
8911 }
67d12733 8912
cdb42bec 8913 /* Allocate SCSI SLI4 CQ/WQs */
6a828b0f 8914 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
895427bd 8915 if (lpfc_alloc_fcp_wq_cq(phba, idx))
67d12733 8916 goto out_error;
6a828b0f 8917 }
da0436e9 8918
cdb42bec
JS
8919 /* Allocate NVME SLI4 CQ/WQs */
8920 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6a828b0f 8921 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
cdb42bec
JS
8922 if (lpfc_alloc_nvme_wq_cq(phba, idx))
8923 goto out_error;
6a828b0f 8924 }
67d12733 8925
cdb42bec
JS
8926 if (phba->nvmet_support) {
8927 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
c1a21ebc
JS
8928 cpu = lpfc_find_cpu_handle(phba, idx,
8929 LPFC_FIND_BY_HDWQ);
cdb42bec
JS
8930 qdesc = lpfc_sli4_queue_alloc(
8931 phba,
81b96eda
JS
8932 LPFC_DEFAULT_PAGE_SIZE,
8933 phba->sli4_hba.cq_esize,
c1a21ebc
JS
8934 phba->sli4_hba.cq_ecount,
8935 cpu);
cdb42bec
JS
8936 if (!qdesc) {
8937 lpfc_printf_log(
8938 phba, KERN_ERR, LOG_INIT,
8939 "3142 Failed allocate NVME "
8940 "CQ Set (%d)\n", idx);
8941 goto out_error;
8942 }
8943 qdesc->qe_valid = 1;
5e5b511d 8944 qdesc->hdwq = idx;
c1a21ebc 8945 qdesc->chann = cpu;
cdb42bec 8946 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
2d7dbc4c 8947 }
2d7dbc4c
JS
8948 }
8949 }
8950
da0436e9 8951 /*
67d12733 8952 * Create Slow Path Completion Queues (CQs)
da0436e9
JS
8953 */
8954
c1a21ebc 8955 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
da0436e9 8956 /* Create slow-path Mailbox Command Complete Queue */
81b96eda
JS
8957 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8958 phba->sli4_hba.cq_esize,
c1a21ebc 8959 phba->sli4_hba.cq_ecount, cpu);
da0436e9
JS
8960 if (!qdesc) {
8961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8962 "0500 Failed allocate slow-path mailbox CQ\n");
67d12733 8963 goto out_error;
da0436e9 8964 }
7365f6fd 8965 qdesc->qe_valid = 1;
da0436e9
JS
8966 phba->sli4_hba.mbx_cq = qdesc;
8967
8968 /* Create slow-path ELS Complete Queue */
81b96eda
JS
8969 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8970 phba->sli4_hba.cq_esize,
c1a21ebc 8971 phba->sli4_hba.cq_ecount, cpu);
da0436e9
JS
8972 if (!qdesc) {
8973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8974 "0501 Failed allocate slow-path ELS CQ\n");
67d12733 8975 goto out_error;
da0436e9 8976 }
7365f6fd 8977 qdesc->qe_valid = 1;
6a828b0f 8978 qdesc->chann = 0;
da0436e9
JS
8979 phba->sli4_hba.els_cq = qdesc;
8980
da0436e9 8981
5350d872 8982 /*
67d12733 8983 * Create Slow Path Work Queues (WQs)
5350d872 8984 */
da0436e9
JS
8985
8986 /* Create Mailbox Command Queue */
da0436e9 8987
81b96eda
JS
8988 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8989 phba->sli4_hba.mq_esize,
c1a21ebc 8990 phba->sli4_hba.mq_ecount, cpu);
da0436e9
JS
8991 if (!qdesc) {
8992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8993 "0505 Failed allocate slow-path MQ\n");
67d12733 8994 goto out_error;
da0436e9 8995 }
6a828b0f 8996 qdesc->chann = 0;
da0436e9
JS
8997 phba->sli4_hba.mbx_wq = qdesc;
8998
8999 /*
67d12733 9000 * Create ELS Work Queues
da0436e9 9001 */
da0436e9
JS
9002
9003 /* Create slow-path ELS Work Queue */
81b96eda
JS
9004 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9005 phba->sli4_hba.wq_esize,
c1a21ebc 9006 phba->sli4_hba.wq_ecount, cpu);
da0436e9
JS
9007 if (!qdesc) {
9008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9009 "0504 Failed allocate slow-path ELS WQ\n");
67d12733 9010 goto out_error;
da0436e9 9011 }
6a828b0f 9012 qdesc->chann = 0;
da0436e9 9013 phba->sli4_hba.els_wq = qdesc;
895427bd
JS
9014 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9015
9016 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9017 /* Create NVME LS Complete Queue */
81b96eda
JS
9018 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9019 phba->sli4_hba.cq_esize,
c1a21ebc 9020 phba->sli4_hba.cq_ecount, cpu);
895427bd
JS
9021 if (!qdesc) {
9022 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9023 "6079 Failed allocate NVME LS CQ\n");
9024 goto out_error;
9025 }
6a828b0f 9026 qdesc->chann = 0;
7365f6fd 9027 qdesc->qe_valid = 1;
895427bd
JS
9028 phba->sli4_hba.nvmels_cq = qdesc;
9029
9030 /* Create NVME LS Work Queue */
81b96eda
JS
9031 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9032 phba->sli4_hba.wq_esize,
c1a21ebc 9033 phba->sli4_hba.wq_ecount, cpu);
895427bd
JS
9034 if (!qdesc) {
9035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9036 "6080 Failed allocate NVME LS WQ\n");
9037 goto out_error;
9038 }
6a828b0f 9039 qdesc->chann = 0;
895427bd
JS
9040 phba->sli4_hba.nvmels_wq = qdesc;
9041 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9042 }
da0436e9 9043
da0436e9
JS
9044 /*
9045 * Create Receive Queue (RQ)
9046 */
da0436e9
JS
9047
9048 /* Create Receive Queue for header */
81b96eda
JS
9049 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9050 phba->sli4_hba.rq_esize,
c1a21ebc 9051 phba->sli4_hba.rq_ecount, cpu);
da0436e9
JS
9052 if (!qdesc) {
9053 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9054 "0506 Failed allocate receive HRQ\n");
67d12733 9055 goto out_error;
da0436e9
JS
9056 }
9057 phba->sli4_hba.hdr_rq = qdesc;
9058
9059 /* Create Receive Queue for data */
81b96eda
JS
9060 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
9061 phba->sli4_hba.rq_esize,
c1a21ebc 9062 phba->sli4_hba.rq_ecount, cpu);
da0436e9
JS
9063 if (!qdesc) {
9064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9065 "0507 Failed allocate receive DRQ\n");
67d12733 9066 goto out_error;
da0436e9
JS
9067 }
9068 phba->sli4_hba.dat_rq = qdesc;
9069
cdb42bec
JS
9070 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9071 phba->nvmet_support) {
2d7dbc4c 9072 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
c1a21ebc
JS
9073 cpu = lpfc_find_cpu_handle(phba, idx,
9074 LPFC_FIND_BY_HDWQ);
2d7dbc4c
JS
9075 /* Create NVMET Receive Queue for header */
9076 qdesc = lpfc_sli4_queue_alloc(phba,
81b96eda 9077 LPFC_DEFAULT_PAGE_SIZE,
2d7dbc4c 9078 phba->sli4_hba.rq_esize,
c1a21ebc
JS
9079 LPFC_NVMET_RQE_DEF_COUNT,
9080 cpu);
2d7dbc4c
JS
9081 if (!qdesc) {
9082 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9083 "3146 Failed allocate "
9084 "receive HRQ\n");
9085 goto out_error;
9086 }
5e5b511d 9087 qdesc->hdwq = idx;
2d7dbc4c
JS
9088 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
9089
9090 /* Only needed for header of RQ pair */
c1a21ebc
JS
9091 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9092 GFP_KERNEL,
9093 cpu_to_node(cpu));
2d7dbc4c
JS
9094 if (qdesc->rqbp == NULL) {
9095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9096 "6131 Failed allocate "
9097 "Header RQBP\n");
9098 goto out_error;
9099 }
9100
4b40d02b
DK
9101 /* Put list in known state in case driver load fails. */
9102 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
9103
2d7dbc4c
JS
9104 /* Create NVMET Receive Queue for data */
9105 qdesc = lpfc_sli4_queue_alloc(phba,
81b96eda 9106 LPFC_DEFAULT_PAGE_SIZE,
2d7dbc4c 9107 phba->sli4_hba.rq_esize,
c1a21ebc
JS
9108 LPFC_NVMET_RQE_DEF_COUNT,
9109 cpu);
2d7dbc4c
JS
9110 if (!qdesc) {
9111 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9112 "3156 Failed allocate "
9113 "receive DRQ\n");
9114 goto out_error;
9115 }
5e5b511d 9116 qdesc->hdwq = idx;
2d7dbc4c
JS
9117 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
9118 }
9119 }
9120
4c47efc1
JS
9121#if defined(BUILD_NVME)
9122 /* Clear NVME stats */
9123 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9124 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9125 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9126 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9127 }
9128 }
9129#endif
9130
9131 /* Clear SCSI stats */
9132 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9133 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9134 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9135 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9136 }
9137 }
9138
da0436e9
JS
9139 return 0;
9140
da0436e9 9141out_error:
67d12733 9142 lpfc_sli4_queue_destroy(phba);
da0436e9
JS
9143 return -ENOMEM;
9144}
9145
895427bd
JS
9146static inline void
9147__lpfc_sli4_release_queue(struct lpfc_queue **qp)
9148{
9149 if (*qp != NULL) {
9150 lpfc_sli4_queue_free(*qp);
9151 *qp = NULL;
9152 }
9153}
9154
9155static inline void
9156lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
9157{
9158 int idx;
9159
9160 if (*qs == NULL)
9161 return;
9162
9163 for (idx = 0; idx < max; idx++)
9164 __lpfc_sli4_release_queue(&(*qs)[idx]);
9165
9166 kfree(*qs);
9167 *qs = NULL;
9168}
9169
9170static inline void
6a828b0f 9171lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
895427bd 9172{
6a828b0f 9173 struct lpfc_sli4_hdw_queue *hdwq;
657add4e 9174 struct lpfc_queue *eq;
cdb42bec
JS
9175 uint32_t idx;
9176
6a828b0f 9177 hdwq = phba->sli4_hba.hdwq;
6a828b0f 9178
657add4e
JS
9179 /* Loop thru all Hardware Queues */
9180 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9181 /* Free the CQ/WQ corresponding to the Hardware Queue */
cdb42bec
JS
9182 lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
9183 lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
9184 lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
9185 lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
657add4e 9186 hdwq[idx].hba_eq = NULL;
cdb42bec
JS
9187 hdwq[idx].fcp_cq = NULL;
9188 hdwq[idx].nvme_cq = NULL;
9189 hdwq[idx].fcp_wq = NULL;
9190 hdwq[idx].nvme_wq = NULL;
895427bd 9191 }
657add4e
JS
9192 /* Loop thru all IRQ vectors */
9193 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9194 /* Free the EQ corresponding to the IRQ vector */
9195 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9196 lpfc_sli4_queue_free(eq);
9197 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
9198 }
895427bd
JS
9199}
9200
da0436e9
JS
9201/**
9202 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
9203 * @phba: pointer to lpfc hba data structure.
9204 *
9205 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
9206 * operation.
9207 *
9208 * Return codes
af901ca1 9209 * 0 - successful
25985edc 9210 * -ENOMEM - No available memory
d439d286 9211 * -EIO - The mailbox failed to complete successfully.
da0436e9 9212 **/
5350d872 9213void
da0436e9
JS
9214lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
9215{
4645f7b5
JS
9216 /*
9217 * Set FREE_INIT before beginning to free the queues.
9218 * Wait until the users of queues to acknowledge to
9219 * release queues by clearing FREE_WAIT.
9220 */
9221 spin_lock_irq(&phba->hbalock);
9222 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9223 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9224 spin_unlock_irq(&phba->hbalock);
9225 msleep(20);
9226 spin_lock_irq(&phba->hbalock);
9227 }
9228 spin_unlock_irq(&phba->hbalock);
9229
895427bd 9230 /* Release HBA eqs */
cdb42bec 9231 if (phba->sli4_hba.hdwq)
6a828b0f 9232 lpfc_sli4_release_hdwq(phba);
895427bd 9233
bcb24f65
JS
9234 if (phba->nvmet_support) {
9235 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
9236 phba->cfg_nvmet_mrq);
2d7dbc4c 9237
bcb24f65
JS
9238 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
9239 phba->cfg_nvmet_mrq);
9240 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
9241 phba->cfg_nvmet_mrq);
9242 }
2d7dbc4c 9243
895427bd
JS
9244 /* Release mailbox command work queue */
9245 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
9246
9247 /* Release ELS work queue */
9248 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
9249
9250 /* Release ELS work queue */
9251 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
9252
9253 /* Release unsolicited receive queue */
9254 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
9255 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
9256
9257 /* Release ELS complete queue */
9258 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
9259
9260 /* Release NVME LS complete queue */
9261 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
9262
9263 /* Release mailbox command complete queue */
9264 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
9265
9266 /* Everything on this list has been freed */
9267 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
4645f7b5
JS
9268
9269 /* Done with freeing the queues */
9270 spin_lock_irq(&phba->hbalock);
9271 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9272 spin_unlock_irq(&phba->hbalock);
895427bd
JS
9273}
9274
895427bd
JS
9275int
9276lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
9277{
9278 struct lpfc_rqb *rqbp;
9279 struct lpfc_dmabuf *h_buf;
9280 struct rqb_dmabuf *rqb_buffer;
9281
9282 rqbp = rq->rqbp;
9283 while (!list_empty(&rqbp->rqb_buffer_list)) {
9284 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
9285 struct lpfc_dmabuf, list);
9286
9287 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
9288 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
9289 rqbp->buffer_count--;
67d12733 9290 }
895427bd
JS
9291 return 1;
9292}
67d12733 9293
895427bd
JS
9294static int
9295lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
9296 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
9297 int qidx, uint32_t qtype)
9298{
9299 struct lpfc_sli_ring *pring;
9300 int rc;
9301
9302 if (!eq || !cq || !wq) {
9303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9304 "6085 Fast-path %s (%d) not allocated\n",
9305 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
9306 return -ENOMEM;
9307 }
9308
9309 /* create the Cq first */
9310 rc = lpfc_cq_create(phba, cq, eq,
9311 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
9312 if (rc) {
9313 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9314 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9315 qidx, (uint32_t)rc);
9316 return rc;
67d12733
JS
9317 }
9318
895427bd 9319 if (qtype != LPFC_MBOX) {
cdb42bec 9320 /* Setup cq_map for fast lookup */
895427bd
JS
9321 if (cq_map)
9322 *cq_map = cq->queue_id;
da0436e9 9323
895427bd
JS
9324 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9325 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
9326 qidx, cq->queue_id, qidx, eq->queue_id);
da0436e9 9327
895427bd
JS
9328 /* create the wq */
9329 rc = lpfc_wq_create(phba, wq, cq, qtype);
9330 if (rc) {
9331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
c835c085 9332 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
895427bd
JS
9333 qidx, (uint32_t)rc);
9334 /* no need to tear down cq - caller will do so */
9335 return rc;
9336 }
da0436e9 9337
895427bd
JS
9338 /* Bind this CQ/WQ to the NVME ring */
9339 pring = wq->pring;
9340 pring->sli.sli4.wqp = (void *)wq;
9341 cq->pring = pring;
da0436e9 9342
895427bd
JS
9343 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9344 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
9345 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
9346 } else {
9347 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
9348 if (rc) {
9349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9350 "0539 Failed setup of slow-path MQ: "
9351 "rc = 0x%x\n", rc);
9352 /* no need to tear down cq - caller will do so */
9353 return rc;
9354 }
da0436e9 9355
895427bd
JS
9356 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9357 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
9358 phba->sli4_hba.mbx_wq->queue_id,
9359 phba->sli4_hba.mbx_cq->queue_id);
67d12733 9360 }
da0436e9 9361
895427bd 9362 return 0;
da0436e9
JS
9363}
9364
6a828b0f
JS
9365/**
9366 * lpfc_setup_cq_lookup - Setup the CQ lookup table
9367 * @phba: pointer to lpfc hba data structure.
9368 *
9369 * This routine will populate the cq_lookup table by all
9370 * available CQ queue_id's.
9371 **/
3999df75 9372static void
6a828b0f
JS
9373lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9374{
9375 struct lpfc_queue *eq, *childq;
6a828b0f
JS
9376 int qidx;
9377
6a828b0f
JS
9378 memset(phba->sli4_hba.cq_lookup, 0,
9379 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
657add4e 9380 /* Loop thru all IRQ vectors */
6a828b0f 9381 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
657add4e
JS
9382 /* Get the EQ corresponding to the IRQ vector */
9383 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
6a828b0f
JS
9384 if (!eq)
9385 continue;
657add4e 9386 /* Loop through all CQs associated with that EQ */
6a828b0f
JS
9387 list_for_each_entry(childq, &eq->child_list, list) {
9388 if (childq->queue_id > phba->sli4_hba.cq_max)
9389 continue;
9390 if ((childq->subtype == LPFC_FCP) ||
9391 (childq->subtype == LPFC_NVME))
9392 phba->sli4_hba.cq_lookup[childq->queue_id] =
9393 childq;
9394 }
9395 }
9396}
9397
da0436e9
JS
9398/**
9399 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
9400 * @phba: pointer to lpfc hba data structure.
9401 *
9402 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
9403 * operation.
9404 *
9405 * Return codes
af901ca1 9406 * 0 - successful
25985edc 9407 * -ENOMEM - No available memory
d439d286 9408 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
9409 **/
9410int
9411lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9412{
962bc51b
JS
9413 uint32_t shdr_status, shdr_add_status;
9414 union lpfc_sli4_cfg_shdr *shdr;
657add4e 9415 struct lpfc_vector_map_info *cpup;
cdb42bec 9416 struct lpfc_sli4_hdw_queue *qp;
962bc51b 9417 LPFC_MBOXQ_t *mboxq;
657add4e 9418 int qidx, cpu;
cb733e35 9419 uint32_t length, usdelay;
895427bd 9420 int rc = -ENOMEM;
962bc51b
JS
9421
9422 /* Check for dual-ULP support */
9423 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9424 if (!mboxq) {
9425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9426 "3249 Unable to allocate memory for "
9427 "QUERY_FW_CFG mailbox command\n");
9428 return -ENOMEM;
9429 }
9430 length = (sizeof(struct lpfc_mbx_query_fw_config) -
9431 sizeof(struct lpfc_sli4_cfg_mhdr));
9432 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9433 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
9434 length, LPFC_SLI4_MBX_EMBED);
9435
9436 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9437
9438 shdr = (union lpfc_sli4_cfg_shdr *)
9439 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9440 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9441 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9442 if (shdr_status || shdr_add_status || rc) {
9443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9444 "3250 QUERY_FW_CFG mailbox failed with status "
9445 "x%x add_status x%x, mbx status x%x\n",
9446 shdr_status, shdr_add_status, rc);
9447 if (rc != MBX_TIMEOUT)
9448 mempool_free(mboxq, phba->mbox_mem_pool);
9449 rc = -ENXIO;
9450 goto out_error;
9451 }
9452
9453 phba->sli4_hba.fw_func_mode =
9454 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
9455 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
9456 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
8b017a30
JS
9457 phba->sli4_hba.physical_port =
9458 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
962bc51b
JS
9459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9460 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
9461 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
9462 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
9463
9464 if (rc != MBX_TIMEOUT)
9465 mempool_free(mboxq, phba->mbox_mem_pool);
da0436e9
JS
9466
9467 /*
67d12733 9468 * Set up HBA Event Queues (EQs)
da0436e9 9469 */
cdb42bec 9470 qp = phba->sli4_hba.hdwq;
da0436e9 9471
67d12733 9472 /* Set up HBA event queue */
cdb42bec 9473 if (!qp) {
2e90f4b5
JS
9474 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9475 "3147 Fast-path EQs not allocated\n");
1b51197d 9476 rc = -ENOMEM;
67d12733 9477 goto out_error;
2e90f4b5 9478 }
657add4e
JS
9479
9480 /* Loop thru all IRQ vectors */
6a828b0f 9481 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
657add4e
JS
9482 /* Create HBA Event Queues (EQs) in order */
9483 for_each_present_cpu(cpu) {
9484 cpup = &phba->sli4_hba.cpu_map[cpu];
9485
9486 /* Look for the CPU thats using that vector with
9487 * LPFC_CPU_FIRST_IRQ set.
9488 */
9489 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9490 continue;
9491 if (qidx != cpup->eq)
9492 continue;
9493
9494 /* Create an EQ for that vector */
9495 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9496 phba->cfg_fcp_imax);
9497 if (rc) {
9498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9499 "0523 Failed setup of fast-path"
9500 " EQ (%d), rc = 0x%x\n",
9501 cpup->eq, (uint32_t)rc);
9502 goto out_destroy;
9503 }
9504
9505 /* Save the EQ for that vector in the hba_eq_hdl */
9506 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9507 qp[cpup->hdwq].hba_eq;
9508
9509 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9510 "2584 HBA EQ setup: queue[%d]-id=%d\n",
9511 cpup->eq,
9512 qp[cpup->hdwq].hba_eq->queue_id);
da0436e9 9513 }
67d12733
JS
9514 }
9515
657add4e 9516 /* Loop thru all Hardware Queues */
cdb42bec
JS
9517 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9518 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
657add4e
JS
9519 cpu = lpfc_find_cpu_handle(phba, qidx,
9520 LPFC_FIND_BY_HDWQ);
9521 cpup = &phba->sli4_hba.cpu_map[cpu];
9522
9523 /* Create the CQ/WQ corresponding to the
9524 * Hardware Queue
9525 */
895427bd 9526 rc = lpfc_create_wq_cq(phba,
657add4e 9527 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
cdb42bec
JS
9528 qp[qidx].nvme_cq,
9529 qp[qidx].nvme_wq,
9530 &phba->sli4_hba.hdwq[qidx].nvme_cq_map,
895427bd
JS
9531 qidx, LPFC_NVME);
9532 if (rc) {
9533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9534 "6123 Failed to setup fastpath "
9535 "NVME WQ/CQ (%d), rc = 0x%x\n",
9536 qidx, (uint32_t)rc);
9537 goto out_destroy;
9538 }
9539 }
67d12733
JS
9540 }
9541
cdb42bec 9542 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
657add4e
JS
9543 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9544 cpup = &phba->sli4_hba.cpu_map[cpu];
9545
9546 /* Create the CQ/WQ corresponding to the Hardware Queue */
cdb42bec 9547 rc = lpfc_create_wq_cq(phba,
657add4e 9548 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
cdb42bec
JS
9549 qp[qidx].fcp_cq,
9550 qp[qidx].fcp_wq,
9551 &phba->sli4_hba.hdwq[qidx].fcp_cq_map,
9552 qidx, LPFC_FCP);
9553 if (rc) {
67d12733 9554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
9555 "0535 Failed to setup fastpath "
9556 "FCP WQ/CQ (%d), rc = 0x%x\n",
9557 qidx, (uint32_t)rc);
cdb42bec 9558 goto out_destroy;
895427bd 9559 }
67d12733 9560 }
895427bd 9561
da0436e9 9562 /*
895427bd 9563 * Set up Slow Path Complete Queues (CQs)
da0436e9
JS
9564 */
9565
895427bd 9566 /* Set up slow-path MBOX CQ/MQ */
da0436e9 9567
895427bd 9568 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
da0436e9 9569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
9570 "0528 %s not allocated\n",
9571 phba->sli4_hba.mbx_cq ?
d1f525aa 9572 "Mailbox WQ" : "Mailbox CQ");
1b51197d 9573 rc = -ENOMEM;
895427bd 9574 goto out_destroy;
da0436e9 9575 }
da0436e9 9576
cdb42bec 9577 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
d1f525aa
JS
9578 phba->sli4_hba.mbx_cq,
9579 phba->sli4_hba.mbx_wq,
9580 NULL, 0, LPFC_MBOX);
da0436e9
JS
9581 if (rc) {
9582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
9583 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
9584 (uint32_t)rc);
9585 goto out_destroy;
da0436e9 9586 }
2d7dbc4c
JS
9587 if (phba->nvmet_support) {
9588 if (!phba->sli4_hba.nvmet_cqset) {
9589 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9590 "3165 Fast-path NVME CQ Set "
9591 "array not allocated\n");
9592 rc = -ENOMEM;
9593 goto out_destroy;
9594 }
9595 if (phba->cfg_nvmet_mrq > 1) {
9596 rc = lpfc_cq_create_set(phba,
9597 phba->sli4_hba.nvmet_cqset,
cdb42bec 9598 qp,
2d7dbc4c
JS
9599 LPFC_WCQ, LPFC_NVMET);
9600 if (rc) {
9601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9602 "3164 Failed setup of NVME CQ "
9603 "Set, rc = 0x%x\n",
9604 (uint32_t)rc);
9605 goto out_destroy;
9606 }
9607 } else {
9608 /* Set up NVMET Receive Complete Queue */
9609 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
cdb42bec 9610 qp[0].hba_eq,
2d7dbc4c
JS
9611 LPFC_WCQ, LPFC_NVMET);
9612 if (rc) {
9613 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9614 "6089 Failed setup NVMET CQ: "
9615 "rc = 0x%x\n", (uint32_t)rc);
9616 goto out_destroy;
9617 }
81b96eda
JS
9618 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9619
2d7dbc4c
JS
9620 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9621 "6090 NVMET CQ setup: cq-id=%d, "
9622 "parent eq-id=%d\n",
9623 phba->sli4_hba.nvmet_cqset[0]->queue_id,
cdb42bec 9624 qp[0].hba_eq->queue_id);
2d7dbc4c
JS
9625 }
9626 }
da0436e9 9627
895427bd
JS
9628 /* Set up slow-path ELS WQ/CQ */
9629 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
da0436e9 9630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
895427bd
JS
9631 "0530 ELS %s not allocated\n",
9632 phba->sli4_hba.els_cq ? "WQ" : "CQ");
1b51197d 9633 rc = -ENOMEM;
895427bd 9634 goto out_destroy;
da0436e9 9635 }
cdb42bec
JS
9636 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9637 phba->sli4_hba.els_cq,
9638 phba->sli4_hba.els_wq,
9639 NULL, 0, LPFC_ELS);
da0436e9
JS
9640 if (rc) {
9641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
cdb42bec
JS
9642 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9643 (uint32_t)rc);
895427bd 9644 goto out_destroy;
da0436e9
JS
9645 }
9646 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9647 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9648 phba->sli4_hba.els_wq->queue_id,
9649 phba->sli4_hba.els_cq->queue_id);
9650
cdb42bec 9651 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
895427bd
JS
9652 /* Set up NVME LS Complete Queue */
9653 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9654 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9655 "6091 LS %s not allocated\n",
9656 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9657 rc = -ENOMEM;
9658 goto out_destroy;
9659 }
cdb42bec
JS
9660 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9661 phba->sli4_hba.nvmels_cq,
9662 phba->sli4_hba.nvmels_wq,
9663 NULL, 0, LPFC_NVME_LS);
895427bd
JS
9664 if (rc) {
9665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
cdb42bec
JS
9666 "0526 Failed setup of NVVME LS WQ/CQ: "
9667 "rc = 0x%x\n", (uint32_t)rc);
895427bd
JS
9668 goto out_destroy;
9669 }
9670
9671 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9672 "6096 ELS WQ setup: wq-id=%d, "
9673 "parent cq-id=%d\n",
9674 phba->sli4_hba.nvmels_wq->queue_id,
9675 phba->sli4_hba.nvmels_cq->queue_id);
9676 }
9677
2d7dbc4c
JS
9678 /*
9679 * Create NVMET Receive Queue (RQ)
9680 */
9681 if (phba->nvmet_support) {
9682 if ((!phba->sli4_hba.nvmet_cqset) ||
9683 (!phba->sli4_hba.nvmet_mrq_hdr) ||
9684 (!phba->sli4_hba.nvmet_mrq_data)) {
9685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9686 "6130 MRQ CQ Queues not "
9687 "allocated\n");
9688 rc = -ENOMEM;
9689 goto out_destroy;
9690 }
9691 if (phba->cfg_nvmet_mrq > 1) {
9692 rc = lpfc_mrq_create(phba,
9693 phba->sli4_hba.nvmet_mrq_hdr,
9694 phba->sli4_hba.nvmet_mrq_data,
9695 phba->sli4_hba.nvmet_cqset,
9696 LPFC_NVMET);
9697 if (rc) {
9698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9699 "6098 Failed setup of NVMET "
9700 "MRQ: rc = 0x%x\n",
9701 (uint32_t)rc);
9702 goto out_destroy;
9703 }
9704
9705 } else {
9706 rc = lpfc_rq_create(phba,
9707 phba->sli4_hba.nvmet_mrq_hdr[0],
9708 phba->sli4_hba.nvmet_mrq_data[0],
9709 phba->sli4_hba.nvmet_cqset[0],
9710 LPFC_NVMET);
9711 if (rc) {
9712 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9713 "6057 Failed setup of NVMET "
9714 "Receive Queue: rc = 0x%x\n",
9715 (uint32_t)rc);
9716 goto out_destroy;
9717 }
9718
9719 lpfc_printf_log(
9720 phba, KERN_INFO, LOG_INIT,
9721 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9722 "dat-rq-id=%d parent cq-id=%d\n",
9723 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9724 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9725 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9726
9727 }
9728 }
9729
da0436e9
JS
9730 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9731 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9732 "0540 Receive Queue not allocated\n");
1b51197d 9733 rc = -ENOMEM;
895427bd 9734 goto out_destroy;
da0436e9 9735 }
73d91e50 9736
da0436e9 9737 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
4d9ab994 9738 phba->sli4_hba.els_cq, LPFC_USOL);
da0436e9
JS
9739 if (rc) {
9740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9741 "0541 Failed setup of Receive Queue: "
a2fc4aef 9742 "rc = 0x%x\n", (uint32_t)rc);
895427bd 9743 goto out_destroy;
da0436e9 9744 }
73d91e50 9745
da0436e9
JS
9746 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9747 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9748 "parent cq-id=%d\n",
9749 phba->sli4_hba.hdr_rq->queue_id,
9750 phba->sli4_hba.dat_rq->queue_id,
4d9ab994 9751 phba->sli4_hba.els_cq->queue_id);
1ba981fd 9752
cb733e35
JS
9753 if (phba->cfg_fcp_imax)
9754 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9755 else
9756 usdelay = 0;
9757
6a828b0f 9758 for (qidx = 0; qidx < phba->cfg_irq_chann;
cdb42bec 9759 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
0cf07f84 9760 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
cb733e35 9761 usdelay);
43140ca6 9762
6a828b0f
JS
9763 if (phba->sli4_hba.cq_max) {
9764 kfree(phba->sli4_hba.cq_lookup);
9765 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9766 sizeof(struct lpfc_queue *), GFP_KERNEL);
9767 if (!phba->sli4_hba.cq_lookup) {
1ba981fd 9768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6a828b0f
JS
9769 "0549 Failed setup of CQ Lookup table: "
9770 "size 0x%x\n", phba->sli4_hba.cq_max);
fad28e3d 9771 rc = -ENOMEM;
895427bd 9772 goto out_destroy;
1ba981fd 9773 }
6a828b0f 9774 lpfc_setup_cq_lookup(phba);
1ba981fd 9775 }
da0436e9
JS
9776 return 0;
9777
895427bd
JS
9778out_destroy:
9779 lpfc_sli4_queue_unset(phba);
da0436e9
JS
9780out_error:
9781 return rc;
9782}
9783
9784/**
9785 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
9786 * @phba: pointer to lpfc hba data structure.
9787 *
9788 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
9789 * operation.
9790 *
9791 * Return codes
af901ca1 9792 * 0 - successful
25985edc 9793 * -ENOMEM - No available memory
d439d286 9794 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
9795 **/
9796void
9797lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9798{
cdb42bec 9799 struct lpfc_sli4_hdw_queue *qp;
657add4e 9800 struct lpfc_queue *eq;
895427bd 9801 int qidx;
da0436e9
JS
9802
9803 /* Unset mailbox command work queue */
895427bd
JS
9804 if (phba->sli4_hba.mbx_wq)
9805 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9806
9807 /* Unset NVME LS work queue */
9808 if (phba->sli4_hba.nvmels_wq)
9809 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9810
da0436e9 9811 /* Unset ELS work queue */
019c0d66 9812 if (phba->sli4_hba.els_wq)
895427bd
JS
9813 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9814
da0436e9 9815 /* Unset unsolicited receive queue */
895427bd
JS
9816 if (phba->sli4_hba.hdr_rq)
9817 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9818 phba->sli4_hba.dat_rq);
9819
da0436e9 9820 /* Unset mailbox command complete queue */
895427bd
JS
9821 if (phba->sli4_hba.mbx_cq)
9822 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9823
da0436e9 9824 /* Unset ELS complete queue */
895427bd
JS
9825 if (phba->sli4_hba.els_cq)
9826 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9827
9828 /* Unset NVME LS complete queue */
9829 if (phba->sli4_hba.nvmels_cq)
9830 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9831
bcb24f65
JS
9832 if (phba->nvmet_support) {
9833 /* Unset NVMET MRQ queue */
9834 if (phba->sli4_hba.nvmet_mrq_hdr) {
9835 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9836 lpfc_rq_destroy(
9837 phba,
2d7dbc4c
JS
9838 phba->sli4_hba.nvmet_mrq_hdr[qidx],
9839 phba->sli4_hba.nvmet_mrq_data[qidx]);
bcb24f65 9840 }
2d7dbc4c 9841
bcb24f65
JS
9842 /* Unset NVMET CQ Set complete queue */
9843 if (phba->sli4_hba.nvmet_cqset) {
9844 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9845 lpfc_cq_destroy(
9846 phba, phba->sli4_hba.nvmet_cqset[qidx]);
9847 }
2d7dbc4c
JS
9848 }
9849
cdb42bec
JS
9850 /* Unset fast-path SLI4 queues */
9851 if (phba->sli4_hba.hdwq) {
657add4e 9852 /* Loop thru all Hardware Queues */
cdb42bec 9853 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
657add4e 9854 /* Destroy the CQ/WQ corresponding to Hardware Queue */
cdb42bec
JS
9855 qp = &phba->sli4_hba.hdwq[qidx];
9856 lpfc_wq_destroy(phba, qp->fcp_wq);
9857 lpfc_wq_destroy(phba, qp->nvme_wq);
9858 lpfc_cq_destroy(phba, qp->fcp_cq);
9859 lpfc_cq_destroy(phba, qp->nvme_cq);
657add4e
JS
9860 }
9861 /* Loop thru all IRQ vectors */
9862 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9863 /* Destroy the EQ corresponding to the IRQ vector */
9864 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9865 lpfc_eq_destroy(phba, eq);
cdb42bec
JS
9866 }
9867 }
895427bd 9868
6a828b0f
JS
9869 kfree(phba->sli4_hba.cq_lookup);
9870 phba->sli4_hba.cq_lookup = NULL;
9871 phba->sli4_hba.cq_max = 0;
da0436e9
JS
9872}
9873
9874/**
9875 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
9876 * @phba: pointer to lpfc hba data structure.
9877 *
9878 * This routine is invoked to allocate and set up a pool of completion queue
9879 * events. The body of the completion queue event is a completion queue entry
9880 * CQE. For now, this pool is used for the interrupt service routine to queue
9881 * the following HBA completion queue events for the worker thread to process:
9882 * - Mailbox asynchronous events
9883 * - Receive queue completion unsolicited events
9884 * Later, this can be used for all the slow-path events.
9885 *
9886 * Return codes
af901ca1 9887 * 0 - successful
25985edc 9888 * -ENOMEM - No available memory
da0436e9
JS
9889 **/
9890static int
9891lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9892{
9893 struct lpfc_cq_event *cq_event;
9894 int i;
9895
9896 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9897 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9898 if (!cq_event)
9899 goto out_pool_create_fail;
9900 list_add_tail(&cq_event->list,
9901 &phba->sli4_hba.sp_cqe_event_pool);
9902 }
9903 return 0;
9904
9905out_pool_create_fail:
9906 lpfc_sli4_cq_event_pool_destroy(phba);
9907 return -ENOMEM;
9908}
9909
9910/**
9911 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
9912 * @phba: pointer to lpfc hba data structure.
9913 *
9914 * This routine is invoked to free the pool of completion queue events at
9915 * driver unload time. Note that, it is the responsibility of the driver
9916 * cleanup routine to free all the outstanding completion-queue events
9917 * allocated from this pool back into the pool before invoking this routine
9918 * to destroy the pool.
9919 **/
9920static void
9921lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9922{
9923 struct lpfc_cq_event *cq_event, *next_cq_event;
9924
9925 list_for_each_entry_safe(cq_event, next_cq_event,
9926 &phba->sli4_hba.sp_cqe_event_pool, list) {
9927 list_del(&cq_event->list);
9928 kfree(cq_event);
9929 }
9930}
9931
9932/**
9933 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9934 * @phba: pointer to lpfc hba data structure.
9935 *
9936 * This routine is the lock free version of the API invoked to allocate a
9937 * completion-queue event from the free pool.
9938 *
9939 * Return: Pointer to the newly allocated completion-queue event if successful
9940 * NULL otherwise.
9941 **/
9942struct lpfc_cq_event *
9943__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9944{
9945 struct lpfc_cq_event *cq_event = NULL;
9946
9947 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
9948 struct lpfc_cq_event, list);
9949 return cq_event;
9950}
9951
9952/**
9953 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9954 * @phba: pointer to lpfc hba data structure.
9955 *
9956 * This routine is the lock version of the API invoked to allocate a
9957 * completion-queue event from the free pool.
9958 *
9959 * Return: Pointer to the newly allocated completion-queue event if successful
9960 * NULL otherwise.
9961 **/
9962struct lpfc_cq_event *
9963lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9964{
9965 struct lpfc_cq_event *cq_event;
9966 unsigned long iflags;
9967
9968 spin_lock_irqsave(&phba->hbalock, iflags);
9969 cq_event = __lpfc_sli4_cq_event_alloc(phba);
9970 spin_unlock_irqrestore(&phba->hbalock, iflags);
9971 return cq_event;
9972}
9973
9974/**
9975 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9976 * @phba: pointer to lpfc hba data structure.
9977 * @cq_event: pointer to the completion queue event to be freed.
9978 *
9979 * This routine is the lock free version of the API invoked to release a
9980 * completion-queue event back into the free pool.
9981 **/
9982void
9983__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9984 struct lpfc_cq_event *cq_event)
9985{
9986 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
9987}
9988
9989/**
9990 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9991 * @phba: pointer to lpfc hba data structure.
9992 * @cq_event: pointer to the completion queue event to be freed.
9993 *
9994 * This routine is the lock version of the API invoked to release a
9995 * completion-queue event back into the free pool.
9996 **/
9997void
9998lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9999 struct lpfc_cq_event *cq_event)
10000{
10001 unsigned long iflags;
10002 spin_lock_irqsave(&phba->hbalock, iflags);
10003 __lpfc_sli4_cq_event_release(phba, cq_event);
10004 spin_unlock_irqrestore(&phba->hbalock, iflags);
10005}
10006
10007/**
10008 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
10009 * @phba: pointer to lpfc hba data structure.
10010 *
10011 * This routine is to free all the pending completion-queue events to the
10012 * back into the free pool for device reset.
10013 **/
10014static void
10015lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
10016{
10017 LIST_HEAD(cqelist);
10018 struct lpfc_cq_event *cqe;
10019 unsigned long iflags;
10020
10021 /* Retrieve all the pending WCQEs from pending WCQE lists */
10022 spin_lock_irqsave(&phba->hbalock, iflags);
10023 /* Pending FCP XRI abort events */
10024 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10025 &cqelist);
10026 /* Pending ELS XRI abort events */
10027 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10028 &cqelist);
10029 /* Pending asynnc events */
10030 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10031 &cqelist);
10032 spin_unlock_irqrestore(&phba->hbalock, iflags);
10033
10034 while (!list_empty(&cqelist)) {
10035 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
10036 lpfc_sli4_cq_event_release(phba, cqe);
10037 }
10038}
10039
10040/**
10041 * lpfc_pci_function_reset - Reset pci function.
10042 * @phba: pointer to lpfc hba data structure.
10043 *
10044 * This routine is invoked to request a PCI function reset. It will destroys
10045 * all resources assigned to the PCI function which originates this request.
10046 *
10047 * Return codes
af901ca1 10048 * 0 - successful
25985edc 10049 * -ENOMEM - No available memory
d439d286 10050 * -EIO - The mailbox failed to complete successfully.
da0436e9
JS
10051 **/
10052int
10053lpfc_pci_function_reset(struct lpfc_hba *phba)
10054{
10055 LPFC_MBOXQ_t *mboxq;
2fcee4bf 10056 uint32_t rc = 0, if_type;
da0436e9 10057 uint32_t shdr_status, shdr_add_status;
2f6fa2c9
JS
10058 uint32_t rdy_chk;
10059 uint32_t port_reset = 0;
da0436e9 10060 union lpfc_sli4_cfg_shdr *shdr;
2fcee4bf 10061 struct lpfc_register reg_data;
2b81f942 10062 uint16_t devid;
da0436e9 10063
2fcee4bf
JS
10064 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10065 switch (if_type) {
10066 case LPFC_SLI_INTF_IF_TYPE_0:
10067 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10068 GFP_KERNEL);
10069 if (!mboxq) {
10070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10071 "0494 Unable to allocate memory for "
10072 "issuing SLI_FUNCTION_RESET mailbox "
10073 "command\n");
10074 return -ENOMEM;
10075 }
da0436e9 10076
2fcee4bf
JS
10077 /* Setup PCI function reset mailbox-ioctl command */
10078 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10079 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
10080 LPFC_SLI4_MBX_EMBED);
10081 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10082 shdr = (union lpfc_sli4_cfg_shdr *)
10083 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10084 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10085 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
10086 &shdr->response);
10087 if (rc != MBX_TIMEOUT)
10088 mempool_free(mboxq, phba->mbox_mem_pool);
10089 if (shdr_status || shdr_add_status || rc) {
10090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10091 "0495 SLI_FUNCTION_RESET mailbox "
10092 "failed with status x%x add_status x%x,"
10093 " mbx status x%x\n",
10094 shdr_status, shdr_add_status, rc);
10095 rc = -ENXIO;
10096 }
10097 break;
10098 case LPFC_SLI_INTF_IF_TYPE_2:
27d6ac0a 10099 case LPFC_SLI_INTF_IF_TYPE_6:
2f6fa2c9
JS
10100wait:
10101 /*
10102 * Poll the Port Status Register and wait for RDY for
10103 * up to 30 seconds. If the port doesn't respond, treat
10104 * it as an error.
10105 */
77d093fb 10106 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
2f6fa2c9
JS
10107 if (lpfc_readl(phba->sli4_hba.u.if_type2.
10108 STATUSregaddr, &reg_data.word0)) {
10109 rc = -ENODEV;
10110 goto out;
10111 }
10112 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
10113 break;
10114 msleep(20);
10115 }
10116
10117 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
10118 phba->work_status[0] = readl(
10119 phba->sli4_hba.u.if_type2.ERR1regaddr);
10120 phba->work_status[1] = readl(
10121 phba->sli4_hba.u.if_type2.ERR2regaddr);
10122 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10123 "2890 Port not ready, port status reg "
10124 "0x%x error 1=0x%x, error 2=0x%x\n",
10125 reg_data.word0,
10126 phba->work_status[0],
10127 phba->work_status[1]);
10128 rc = -ENODEV;
10129 goto out;
10130 }
10131
10132 if (!port_reset) {
10133 /*
10134 * Reset the port now
10135 */
2fcee4bf
JS
10136 reg_data.word0 = 0;
10137 bf_set(lpfc_sliport_ctrl_end, &reg_data,
10138 LPFC_SLIPORT_LITTLE_ENDIAN);
10139 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
10140 LPFC_SLIPORT_INIT_PORT);
10141 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
10142 CTRLregaddr);
8fcb8acd 10143 /* flush */
2b81f942
JS
10144 pci_read_config_word(phba->pcidev,
10145 PCI_DEVICE_ID, &devid);
2fcee4bf 10146
2f6fa2c9
JS
10147 port_reset = 1;
10148 msleep(20);
10149 goto wait;
10150 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
10151 rc = -ENODEV;
10152 goto out;
2fcee4bf
JS
10153 }
10154 break;
2f6fa2c9 10155
2fcee4bf
JS
10156 case LPFC_SLI_INTF_IF_TYPE_1:
10157 default:
10158 break;
da0436e9 10159 }
2fcee4bf 10160
73d91e50 10161out:
2fcee4bf 10162 /* Catch the not-ready port failure after a port reset. */
2f6fa2c9 10163 if (rc) {
229adb0e
JS
10164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10165 "3317 HBA not functional: IP Reset Failed "
2f6fa2c9 10166 "try: echo fw_reset > board_mode\n");
2fcee4bf 10167 rc = -ENODEV;
229adb0e 10168 }
2fcee4bf 10169
da0436e9
JS
10170 return rc;
10171}
10172
da0436e9
JS
10173/**
10174 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
10175 * @phba: pointer to lpfc hba data structure.
10176 *
10177 * This routine is invoked to set up the PCI device memory space for device
10178 * with SLI-4 interface spec.
10179 *
10180 * Return codes
af901ca1 10181 * 0 - successful
da0436e9
JS
10182 * other values - error
10183 **/
10184static int
10185lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10186{
f30e1bfd 10187 struct pci_dev *pdev = phba->pcidev;
da0436e9 10188 unsigned long bar0map_len, bar1map_len, bar2map_len;
3a487ff7 10189 int error;
2fcee4bf 10190 uint32_t if_type;
da0436e9 10191
f30e1bfd 10192 if (!pdev)
56de8357 10193 return -ENODEV;
da0436e9
JS
10194
10195 /* Set the device DMA mask size */
56de8357
HR
10196 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10197 if (error)
10198 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10199 if (error)
f30e1bfd 10200 return error;
da0436e9 10201
2fcee4bf
JS
10202 /*
10203 * The BARs and register set definitions and offset locations are
10204 * dependent on the if_type.
10205 */
10206 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10207 &phba->sli4_hba.sli_intf.word0)) {
3a487ff7 10208 return -ENODEV;
2fcee4bf
JS
10209 }
10210
10211 /* There is no SLI3 failback for SLI4 devices. */
10212 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
10213 LPFC_SLI_INTF_VALID) {
10214 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10215 "2894 SLI_INTF reg contents invalid "
10216 "sli_intf reg 0x%x\n",
10217 phba->sli4_hba.sli_intf.word0);
3a487ff7 10218 return -ENODEV;
2fcee4bf
JS
10219 }
10220
10221 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10222 /*
10223 * Get the bus address of SLI4 device Bar regions and the
10224 * number of bytes required by each mapping. The mapping of the
10225 * particular PCI BARs regions is dependent on the type of
10226 * SLI4 device.
da0436e9 10227 */
f5ca6f2e
JS
10228 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
10229 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
10230 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
2fcee4bf
JS
10231
10232 /*
10233 * Map SLI4 PCI Config Space Register base to a kernel virtual
10234 * addr
10235 */
10236 phba->sli4_hba.conf_regs_memmap_p =
10237 ioremap(phba->pci_bar0_map, bar0map_len);
10238 if (!phba->sli4_hba.conf_regs_memmap_p) {
10239 dev_printk(KERN_ERR, &pdev->dev,
10240 "ioremap failed for SLI4 PCI config "
10241 "registers.\n");
3a487ff7 10242 return -ENODEV;
2fcee4bf 10243 }
f5ca6f2e 10244 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
2fcee4bf
JS
10245 /* Set up BAR0 PCI config space register memory map */
10246 lpfc_sli4_bar0_register_memmap(phba, if_type);
1dfb5a47
JS
10247 } else {
10248 phba->pci_bar0_map = pci_resource_start(pdev, 1);
10249 bar0map_len = pci_resource_len(pdev, 1);
27d6ac0a 10250 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
2fcee4bf
JS
10251 dev_printk(KERN_ERR, &pdev->dev,
10252 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
3a487ff7 10253 return -ENODEV;
2fcee4bf
JS
10254 }
10255 phba->sli4_hba.conf_regs_memmap_p =
da0436e9 10256 ioremap(phba->pci_bar0_map, bar0map_len);
2fcee4bf
JS
10257 if (!phba->sli4_hba.conf_regs_memmap_p) {
10258 dev_printk(KERN_ERR, &pdev->dev,
10259 "ioremap failed for SLI4 PCI config "
10260 "registers.\n");
3a487ff7 10261 return -ENODEV;
2fcee4bf
JS
10262 }
10263 lpfc_sli4_bar0_register_memmap(phba, if_type);
da0436e9
JS
10264 }
10265
e4b9794e
JS
10266 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10267 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
10268 /*
10269 * Map SLI4 if type 0 HBA Control Register base to a
10270 * kernel virtual address and setup the registers.
10271 */
10272 phba->pci_bar1_map = pci_resource_start(pdev,
10273 PCI_64BIT_BAR2);
10274 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10275 phba->sli4_hba.ctrl_regs_memmap_p =
10276 ioremap(phba->pci_bar1_map,
10277 bar1map_len);
10278 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
10279 dev_err(&pdev->dev,
10280 "ioremap failed for SLI4 HBA "
10281 "control registers.\n");
10282 error = -ENOMEM;
10283 goto out_iounmap_conf;
10284 }
10285 phba->pci_bar2_memmap_p =
10286 phba->sli4_hba.ctrl_regs_memmap_p;
27d6ac0a 10287 lpfc_sli4_bar1_register_memmap(phba, if_type);
e4b9794e
JS
10288 } else {
10289 error = -ENOMEM;
2fcee4bf
JS
10290 goto out_iounmap_conf;
10291 }
da0436e9
JS
10292 }
10293
27d6ac0a
JS
10294 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
10295 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
10296 /*
10297 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
10298 * virtual address and setup the registers.
10299 */
10300 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
10301 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
10302 phba->sli4_hba.drbl_regs_memmap_p =
10303 ioremap(phba->pci_bar1_map, bar1map_len);
10304 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10305 dev_err(&pdev->dev,
10306 "ioremap failed for SLI4 HBA doorbell registers.\n");
3a487ff7 10307 error = -ENOMEM;
27d6ac0a
JS
10308 goto out_iounmap_conf;
10309 }
10310 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
10311 lpfc_sli4_bar1_register_memmap(phba, if_type);
10312 }
10313
e4b9794e
JS
10314 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
10315 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10316 /*
10317 * Map SLI4 if type 0 HBA Doorbell Register base to
10318 * a kernel virtual address and setup the registers.
10319 */
10320 phba->pci_bar2_map = pci_resource_start(pdev,
10321 PCI_64BIT_BAR4);
10322 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10323 phba->sli4_hba.drbl_regs_memmap_p =
10324 ioremap(phba->pci_bar2_map,
10325 bar2map_len);
10326 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10327 dev_err(&pdev->dev,
10328 "ioremap failed for SLI4 HBA"
10329 " doorbell registers.\n");
10330 error = -ENOMEM;
10331 goto out_iounmap_ctrl;
10332 }
10333 phba->pci_bar4_memmap_p =
10334 phba->sli4_hba.drbl_regs_memmap_p;
10335 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
10336 if (error)
10337 goto out_iounmap_all;
10338 } else {
10339 error = -ENOMEM;
2fcee4bf 10340 goto out_iounmap_all;
e4b9794e 10341 }
da0436e9
JS
10342 }
10343
1351e69f
JS
10344 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
10345 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
10346 /*
10347 * Map SLI4 if type 6 HBA DPP Register base to a kernel
10348 * virtual address and setup the registers.
10349 */
10350 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
10351 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
10352 phba->sli4_hba.dpp_regs_memmap_p =
10353 ioremap(phba->pci_bar2_map, bar2map_len);
10354 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10355 dev_err(&pdev->dev,
10356 "ioremap failed for SLI4 HBA dpp registers.\n");
3a487ff7 10357 error = -ENOMEM;
1351e69f
JS
10358 goto out_iounmap_ctrl;
10359 }
10360 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
10361 }
10362
b71413dd 10363 /* Set up the EQ/CQ register handeling functions now */
27d6ac0a
JS
10364 switch (if_type) {
10365 case LPFC_SLI_INTF_IF_TYPE_0:
10366 case LPFC_SLI_INTF_IF_TYPE_2:
b71413dd 10367 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
32517fc0
JS
10368 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10369 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
27d6ac0a
JS
10370 break;
10371 case LPFC_SLI_INTF_IF_TYPE_6:
10372 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
32517fc0
JS
10373 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10374 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
27d6ac0a
JS
10375 break;
10376 default:
10377 break;
b71413dd
JS
10378 }
10379
da0436e9
JS
10380 return 0;
10381
10382out_iounmap_all:
10383 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10384out_iounmap_ctrl:
10385 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10386out_iounmap_conf:
10387 iounmap(phba->sli4_hba.conf_regs_memmap_p);
3a487ff7 10388
da0436e9
JS
10389 return error;
10390}
10391
10392/**
10393 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
10394 * @phba: pointer to lpfc hba data structure.
10395 *
10396 * This routine is invoked to unset the PCI device memory space for device
10397 * with SLI-4 interface spec.
10398 **/
10399static void
10400lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
10401{
2e90f4b5
JS
10402 uint32_t if_type;
10403 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
da0436e9 10404
2e90f4b5
JS
10405 switch (if_type) {
10406 case LPFC_SLI_INTF_IF_TYPE_0:
10407 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10408 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10409 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10410 break;
10411 case LPFC_SLI_INTF_IF_TYPE_2:
10412 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10413 break;
27d6ac0a
JS
10414 case LPFC_SLI_INTF_IF_TYPE_6:
10415 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10416 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10417 break;
2e90f4b5
JS
10418 case LPFC_SLI_INTF_IF_TYPE_1:
10419 default:
10420 dev_printk(KERN_ERR, &phba->pcidev->dev,
10421 "FATAL - unsupported SLI4 interface type - %d\n",
10422 if_type);
10423 break;
10424 }
da0436e9
JS
10425}
10426
10427/**
10428 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
10429 * @phba: pointer to lpfc hba data structure.
10430 *
10431 * This routine is invoked to enable the MSI-X interrupt vectors to device
45ffac19 10432 * with SLI-3 interface specs.
da0436e9
JS
10433 *
10434 * Return codes
af901ca1 10435 * 0 - successful
da0436e9
JS
10436 * other values - error
10437 **/
10438static int
10439lpfc_sli_enable_msix(struct lpfc_hba *phba)
10440{
45ffac19 10441 int rc;
da0436e9
JS
10442 LPFC_MBOXQ_t *pmb;
10443
10444 /* Set up MSI-X multi-message vectors */
45ffac19
CH
10445 rc = pci_alloc_irq_vectors(phba->pcidev,
10446 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
10447 if (rc < 0) {
da0436e9
JS
10448 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10449 "0420 PCI enable MSI-X failed (%d)\n", rc);
029165ac 10450 goto vec_fail_out;
da0436e9 10451 }
45ffac19 10452
da0436e9
JS
10453 /*
10454 * Assign MSI-X vectors to interrupt handlers
10455 */
10456
10457 /* vector-0 is associated to slow-path handler */
45ffac19 10458 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
ed243d37 10459 &lpfc_sli_sp_intr_handler, 0,
da0436e9
JS
10460 LPFC_SP_DRIVER_HANDLER_NAME, phba);
10461 if (rc) {
10462 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10463 "0421 MSI-X slow-path request_irq failed "
10464 "(%d)\n", rc);
10465 goto msi_fail_out;
10466 }
10467
10468 /* vector-1 is associated to fast-path handler */
45ffac19 10469 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
ed243d37 10470 &lpfc_sli_fp_intr_handler, 0,
da0436e9
JS
10471 LPFC_FP_DRIVER_HANDLER_NAME, phba);
10472
10473 if (rc) {
10474 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10475 "0429 MSI-X fast-path request_irq failed "
10476 "(%d)\n", rc);
10477 goto irq_fail_out;
10478 }
10479
10480 /*
10481 * Configure HBA MSI-X attention conditions to messages
10482 */
10483 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10484
10485 if (!pmb) {
10486 rc = -ENOMEM;
10487 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10488 "0474 Unable to allocate memory for issuing "
10489 "MBOX_CONFIG_MSI command\n");
10490 goto mem_fail_out;
10491 }
10492 rc = lpfc_config_msi(phba, pmb);
10493 if (rc)
10494 goto mbx_fail_out;
10495 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10496 if (rc != MBX_SUCCESS) {
10497 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
10498 "0351 Config MSI mailbox command failed, "
10499 "mbxCmd x%x, mbxStatus x%x\n",
10500 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
10501 goto mbx_fail_out;
10502 }
10503
10504 /* Free memory allocated for mailbox command */
10505 mempool_free(pmb, phba->mbox_mem_pool);
10506 return rc;
10507
10508mbx_fail_out:
10509 /* Free memory allocated for mailbox command */
10510 mempool_free(pmb, phba->mbox_mem_pool);
10511
10512mem_fail_out:
10513 /* free the irq already requested */
45ffac19 10514 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
da0436e9
JS
10515
10516irq_fail_out:
10517 /* free the irq already requested */
45ffac19 10518 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
da0436e9
JS
10519
10520msi_fail_out:
10521 /* Unconfigure MSI-X capability structure */
45ffac19 10522 pci_free_irq_vectors(phba->pcidev);
029165ac
AG
10523
10524vec_fail_out:
da0436e9
JS
10525 return rc;
10526}
10527
da0436e9
JS
10528/**
10529 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
10530 * @phba: pointer to lpfc hba data structure.
10531 *
10532 * This routine is invoked to enable the MSI interrupt mode to device with
10533 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
10534 * enable the MSI vector. The device driver is responsible for calling the
10535 * request_irq() to register MSI vector with a interrupt the handler, which
10536 * is done in this function.
10537 *
10538 * Return codes
af901ca1 10539 * 0 - successful
da0436e9
JS
10540 * other values - error
10541 */
10542static int
10543lpfc_sli_enable_msi(struct lpfc_hba *phba)
10544{
10545 int rc;
10546
10547 rc = pci_enable_msi(phba->pcidev);
10548 if (!rc)
10549 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10550 "0462 PCI enable MSI mode success.\n");
10551 else {
10552 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10553 "0471 PCI enable MSI mode failed (%d)\n", rc);
10554 return rc;
10555 }
10556
10557 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
ed243d37 10558 0, LPFC_DRIVER_NAME, phba);
da0436e9
JS
10559 if (rc) {
10560 pci_disable_msi(phba->pcidev);
10561 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10562 "0478 MSI request_irq failed (%d)\n", rc);
10563 }
10564 return rc;
10565}
10566
da0436e9
JS
10567/**
10568 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
10569 * @phba: pointer to lpfc hba data structure.
10570 *
10571 * This routine is invoked to enable device interrupt and associate driver's
10572 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
10573 * spec. Depends on the interrupt mode configured to the driver, the driver
10574 * will try to fallback from the configured interrupt mode to an interrupt
10575 * mode which is supported by the platform, kernel, and device in the order
10576 * of:
10577 * MSI-X -> MSI -> IRQ.
10578 *
10579 * Return codes
af901ca1 10580 * 0 - successful
da0436e9
JS
10581 * other values - error
10582 **/
10583static uint32_t
10584lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10585{
10586 uint32_t intr_mode = LPFC_INTR_ERROR;
10587 int retval;
10588
10589 if (cfg_mode == 2) {
10590 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
10591 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
10592 if (!retval) {
10593 /* Now, try to enable MSI-X interrupt mode */
10594 retval = lpfc_sli_enable_msix(phba);
10595 if (!retval) {
10596 /* Indicate initialization to MSI-X mode */
10597 phba->intr_type = MSIX;
10598 intr_mode = 2;
10599 }
10600 }
10601 }
10602
10603 /* Fallback to MSI if MSI-X initialization failed */
10604 if (cfg_mode >= 1 && phba->intr_type == NONE) {
10605 retval = lpfc_sli_enable_msi(phba);
10606 if (!retval) {
10607 /* Indicate initialization to MSI mode */
10608 phba->intr_type = MSI;
10609 intr_mode = 1;
10610 }
10611 }
10612
10613 /* Fallback to INTx if both MSI-X/MSI initalization failed */
10614 if (phba->intr_type == NONE) {
10615 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
10616 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10617 if (!retval) {
10618 /* Indicate initialization to INTx mode */
10619 phba->intr_type = INTx;
10620 intr_mode = 0;
10621 }
10622 }
10623 return intr_mode;
10624}
10625
10626/**
10627 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10628 * @phba: pointer to lpfc hba data structure.
10629 *
10630 * This routine is invoked to disable device interrupt and disassociate the
10631 * driver's interrupt handler(s) from interrupt vector(s) to device with
10632 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10633 * release the interrupt vector(s) for the message signaled interrupt.
10634 **/
10635static void
10636lpfc_sli_disable_intr(struct lpfc_hba *phba)
10637{
45ffac19
CH
10638 int nr_irqs, i;
10639
da0436e9 10640 if (phba->intr_type == MSIX)
45ffac19
CH
10641 nr_irqs = LPFC_MSIX_VECTORS;
10642 else
10643 nr_irqs = 1;
10644
10645 for (i = 0; i < nr_irqs; i++)
10646 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10647 pci_free_irq_vectors(phba->pcidev);
da0436e9
JS
10648
10649 /* Reset interrupt management states */
10650 phba->intr_type = NONE;
10651 phba->sli.slistat.sli_intr = 0;
da0436e9
JS
10652}
10653
6a828b0f 10654/**
657add4e 10655 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
6a828b0f
JS
10656 * @phba: pointer to lpfc hba data structure.
10657 * @id: EQ vector index or Hardware Queue index
10658 * @match: LPFC_FIND_BY_EQ = match by EQ
10659 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
657add4e 10660 * Return the CPU that matches the selection criteria
6a828b0f
JS
10661 */
10662static uint16_t
10663lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10664{
10665 struct lpfc_vector_map_info *cpup;
10666 int cpu;
10667
657add4e 10668 /* Loop through all CPUs */
222e9239
JS
10669 for_each_present_cpu(cpu) {
10670 cpup = &phba->sli4_hba.cpu_map[cpu];
657add4e
JS
10671
10672 /* If we are matching by EQ, there may be multiple CPUs using
10673 * using the same vector, so select the one with
10674 * LPFC_CPU_FIRST_IRQ set.
10675 */
6a828b0f 10676 if ((match == LPFC_FIND_BY_EQ) &&
657add4e 10677 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
6a828b0f
JS
10678 (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10679 (cpup->eq == id))
10680 return cpu;
657add4e
JS
10681
10682 /* If matching by HDWQ, select the first CPU that matches */
6a828b0f
JS
10683 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10684 return cpu;
6a828b0f
JS
10685 }
10686 return 0;
10687}
10688
6a828b0f
JS
10689#ifdef CONFIG_X86
10690/**
10691 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10692 * @phba: pointer to lpfc hba data structure.
10693 * @cpu: CPU map index
10694 * @phys_id: CPU package physical id
10695 * @core_id: CPU core id
10696 */
10697static int
10698lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10699 uint16_t phys_id, uint16_t core_id)
10700{
10701 struct lpfc_vector_map_info *cpup;
10702 int idx;
10703
222e9239
JS
10704 for_each_present_cpu(idx) {
10705 cpup = &phba->sli4_hba.cpu_map[idx];
6a828b0f
JS
10706 /* Does the cpup match the one we are looking for */
10707 if ((cpup->phys_id == phys_id) &&
10708 (cpup->core_id == core_id) &&
222e9239 10709 (cpu != idx))
6a828b0f 10710 return 1;
6a828b0f
JS
10711 }
10712 return 0;
10713}
10714#endif
10715
7bb03bbf 10716/**
895427bd 10717 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
7bb03bbf 10718 * @phba: pointer to lpfc hba data structure.
895427bd
JS
10719 * @vectors: number of msix vectors allocated.
10720 *
10721 * The routine will figure out the CPU affinity assignment for every
6a828b0f 10722 * MSI-X vector allocated for the HBA.
895427bd
JS
10723 * In addition, the CPU to IO channel mapping will be calculated
10724 * and the phba->sli4_hba.cpu_map array will reflect this.
7bb03bbf 10725 */
895427bd
JS
10726static void
10727lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
7bb03bbf 10728{
3ad348d9 10729 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
6a828b0f
JS
10730 int max_phys_id, min_phys_id;
10731 int max_core_id, min_core_id;
7bb03bbf 10732 struct lpfc_vector_map_info *cpup;
d9954a2d 10733 struct lpfc_vector_map_info *new_cpup;
75508a8b 10734 const struct cpumask *maskp;
7bb03bbf
JS
10735#ifdef CONFIG_X86
10736 struct cpuinfo_x86 *cpuinfo;
10737#endif
7bb03bbf
JS
10738
10739 /* Init cpu_map array */
d9954a2d
JS
10740 for_each_possible_cpu(cpu) {
10741 cpup = &phba->sli4_hba.cpu_map[cpu];
10742 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10743 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10744 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10745 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10746 cpup->irq = LPFC_VECTOR_MAP_EMPTY;
10747 cpup->flag = 0;
10748 }
7bb03bbf 10749
6a828b0f 10750 max_phys_id = 0;
d9954a2d 10751 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
6a828b0f 10752 max_core_id = 0;
d9954a2d 10753 min_core_id = LPFC_VECTOR_MAP_EMPTY;
7bb03bbf
JS
10754
10755 /* Update CPU map with physical id and core id of each CPU */
222e9239
JS
10756 for_each_present_cpu(cpu) {
10757 cpup = &phba->sli4_hba.cpu_map[cpu];
7bb03bbf
JS
10758#ifdef CONFIG_X86
10759 cpuinfo = &cpu_data(cpu);
10760 cpup->phys_id = cpuinfo->phys_proc_id;
10761 cpup->core_id = cpuinfo->cpu_core_id;
d9954a2d
JS
10762 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10763 cpup->flag |= LPFC_CPU_MAP_HYPER;
7bb03bbf
JS
10764#else
10765 /* No distinction between CPUs for other platforms */
10766 cpup->phys_id = 0;
6a828b0f 10767 cpup->core_id = cpu;
7bb03bbf 10768#endif
6a828b0f 10769
b3295c2a 10770 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3ad348d9
JS
10771 "3328 CPU %d physid %d coreid %d flag x%x\n",
10772 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
6a828b0f
JS
10773
10774 if (cpup->phys_id > max_phys_id)
10775 max_phys_id = cpup->phys_id;
10776 if (cpup->phys_id < min_phys_id)
10777 min_phys_id = cpup->phys_id;
10778
10779 if (cpup->core_id > max_core_id)
10780 max_core_id = cpup->core_id;
10781 if (cpup->core_id < min_core_id)
10782 min_core_id = cpup->core_id;
7bb03bbf 10783 }
7bb03bbf 10784
32517fc0
JS
10785 for_each_possible_cpu(i) {
10786 struct lpfc_eq_intr_info *eqi =
10787 per_cpu_ptr(phba->sli4_hba.eq_info, i);
10788
10789 INIT_LIST_HEAD(&eqi->list);
10790 eqi->icnt = 0;
10791 }
10792
d9954a2d
JS
10793 /* This loop sets up all CPUs that are affinitized with a
10794 * irq vector assigned to the driver. All affinitized CPUs
657add4e 10795 * will get a link to that vectors IRQ and EQ.
d9954a2d 10796 */
75508a8b 10797 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
657add4e 10798 /* Get a CPU mask for all CPUs affinitized to this vector */
75508a8b
JS
10799 maskp = pci_irq_get_affinity(phba->pcidev, idx);
10800 if (!maskp)
10801 continue;
10802
657add4e
JS
10803 i = 0;
10804 /* Loop through all CPUs associated with vector idx */
75508a8b 10805 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
657add4e 10806 /* Set the EQ index and IRQ for that vector */
75508a8b 10807 cpup = &phba->sli4_hba.cpu_map[cpu];
6a828b0f 10808 cpup->eq = idx;
6a828b0f
JS
10809 cpup->irq = pci_irq_vector(phba->pcidev, idx);
10810
657add4e
JS
10811 /* If this is the first CPU thats assigned to this
10812 * vector, set LPFC_CPU_FIRST_IRQ.
10813 */
10814 if (!i)
10815 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10816 i++;
3ad348d9
JS
10817
10818 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10819 "3336 Set Affinity: CPU %d "
10820 "irq %d eq %d flag x%x\n",
10821 cpu, cpup->irq, cpup->eq, cpup->flag);
6a828b0f 10822 }
b3295c2a 10823 }
d9954a2d
JS
10824
10825 /* After looking at each irq vector assigned to this pcidev, its
10826 * possible to see that not ALL CPUs have been accounted for.
657add4e
JS
10827 * Next we will set any unassigned (unaffinitized) cpu map
10828 * entries to a IRQ on the same phys_id.
d9954a2d
JS
10829 */
10830 first_cpu = cpumask_first(cpu_present_mask);
10831 start_cpu = first_cpu;
10832
10833 for_each_present_cpu(cpu) {
10834 cpup = &phba->sli4_hba.cpu_map[cpu];
10835
10836 /* Is this CPU entry unassigned */
10837 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10838 /* Mark CPU as IRQ not assigned by the kernel */
10839 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10840
657add4e 10841 /* If so, find a new_cpup thats on the the SAME
d9954a2d
JS
10842 * phys_id as cpup. start_cpu will start where we
10843 * left off so all unassigned entries don't get assgined
10844 * the IRQ of the first entry.
10845 */
10846 new_cpu = start_cpu;
10847 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10848 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10849 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10850 (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
10851 (new_cpup->phys_id == cpup->phys_id))
10852 goto found_same;
10853 new_cpu = cpumask_next(
10854 new_cpu, cpu_present_mask);
10855 if (new_cpu == nr_cpumask_bits)
10856 new_cpu = first_cpu;
10857 }
10858 /* At this point, we leave the CPU as unassigned */
10859 continue;
10860found_same:
10861 /* We found a matching phys_id, so copy the IRQ info */
10862 cpup->eq = new_cpup->eq;
d9954a2d
JS
10863 cpup->irq = new_cpup->irq;
10864
10865 /* Bump start_cpu to the next slot to minmize the
10866 * chance of having multiple unassigned CPU entries
10867 * selecting the same IRQ.
10868 */
10869 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10870 if (start_cpu == nr_cpumask_bits)
10871 start_cpu = first_cpu;
10872
657add4e 10873 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
d9954a2d 10874 "3337 Set Affinity: CPU %d "
657add4e 10875 "irq %d from id %d same "
d9954a2d 10876 "phys_id (%d)\n",
657add4e 10877 cpu, cpup->irq, new_cpu, cpup->phys_id);
d9954a2d
JS
10878 }
10879 }
10880
10881 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
10882 start_cpu = first_cpu;
10883
10884 for_each_present_cpu(cpu) {
10885 cpup = &phba->sli4_hba.cpu_map[cpu];
10886
10887 /* Is this entry unassigned */
10888 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
10889 /* Mark it as IRQ not assigned by the kernel */
10890 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
10891
657add4e 10892 /* If so, find a new_cpup thats on ANY phys_id
d9954a2d
JS
10893 * as the cpup. start_cpu will start where we
10894 * left off so all unassigned entries don't get
10895 * assigned the IRQ of the first entry.
10896 */
10897 new_cpu = start_cpu;
10898 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10899 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10900 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
10901 (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
10902 goto found_any;
10903 new_cpu = cpumask_next(
10904 new_cpu, cpu_present_mask);
10905 if (new_cpu == nr_cpumask_bits)
10906 new_cpu = first_cpu;
10907 }
10908 /* We should never leave an entry unassigned */
10909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10910 "3339 Set Affinity: CPU %d "
657add4e
JS
10911 "irq %d UNASSIGNED\n",
10912 cpup->hdwq, cpup->irq);
d9954a2d
JS
10913 continue;
10914found_any:
10915 /* We found an available entry, copy the IRQ info */
10916 cpup->eq = new_cpup->eq;
d9954a2d
JS
10917 cpup->irq = new_cpup->irq;
10918
10919 /* Bump start_cpu to the next slot to minmize the
10920 * chance of having multiple unassigned CPU entries
10921 * selecting the same IRQ.
10922 */
10923 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10924 if (start_cpu == nr_cpumask_bits)
10925 start_cpu = first_cpu;
10926
657add4e 10927 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
d9954a2d 10928 "3338 Set Affinity: CPU %d "
657add4e
JS
10929 "irq %d from id %d (%d/%d)\n",
10930 cpu, cpup->irq, new_cpu,
d9954a2d
JS
10931 new_cpup->phys_id, new_cpup->core_id);
10932 }
10933 }
657add4e 10934
3ad348d9
JS
10935 /* Assign hdwq indices that are unique across all cpus in the map
10936 * that are also FIRST_CPUs.
10937 */
10938 idx = 0;
10939 for_each_present_cpu(cpu) {
10940 cpup = &phba->sli4_hba.cpu_map[cpu];
10941
10942 /* Only FIRST IRQs get a hdwq index assignment. */
10943 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10944 continue;
10945
10946 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
10947 cpup->hdwq = idx;
10948 idx++;
10949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10950 "3333 Set Affinity: CPU %d (phys %d core %d): "
10951 "hdwq %d eq %d irq %d flg x%x\n",
10952 cpu, cpup->phys_id, cpup->core_id,
10953 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
10954 }
657add4e
JS
10955 /* Finally we need to associate a hdwq with each cpu_map entry
10956 * This will be 1 to 1 - hdwq to cpu, unless there are less
10957 * hardware queues then CPUs. For that case we will just round-robin
10958 * the available hardware queues as they get assigned to CPUs.
3ad348d9
JS
10959 * The next_idx is the idx from the FIRST_CPU loop above to account
10960 * for irq_chann < hdwq. The idx is used for round-robin assignments
10961 * and needs to start at 0.
657add4e 10962 */
3ad348d9 10963 next_idx = idx;
657add4e 10964 start_cpu = 0;
3ad348d9 10965 idx = 0;
657add4e
JS
10966 for_each_present_cpu(cpu) {
10967 cpup = &phba->sli4_hba.cpu_map[cpu];
657add4e 10968
3ad348d9
JS
10969 /* FIRST cpus are already mapped. */
10970 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10971 continue;
10972
10973 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
10974 * of the unassigned cpus to the next idx so that all
10975 * hdw queues are fully utilized.
10976 */
10977 if (next_idx < phba->cfg_hdw_queue) {
10978 cpup->hdwq = next_idx;
10979 next_idx++;
10980 continue;
10981 }
10982
10983 /* Not a First CPU and all hdw_queues are used. Reuse a
10984 * Hardware Queue for another CPU, so be smart about it
10985 * and pick one that has its IRQ/EQ mapped to the same phys_id
10986 * (CPU package) and core_id.
10987 */
10988 new_cpu = start_cpu;
10989 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10990 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10991 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
10992 new_cpup->phys_id == cpup->phys_id &&
10993 new_cpup->core_id == cpup->core_id) {
10994 goto found_hdwq;
657add4e 10995 }
3ad348d9
JS
10996 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
10997 if (new_cpu == nr_cpumask_bits)
10998 new_cpu = first_cpu;
10999 }
657add4e 11000
3ad348d9
JS
11001 /* If we can't match both phys_id and core_id,
11002 * settle for just a phys_id match.
11003 */
11004 new_cpu = start_cpu;
11005 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11006 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11007 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11008 new_cpup->phys_id == cpup->phys_id)
11009 goto found_hdwq;
11010
11011 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11012 if (new_cpu == nr_cpumask_bits)
11013 new_cpu = first_cpu;
657add4e 11014 }
3ad348d9
JS
11015
11016 /* Otherwise just round robin on cfg_hdw_queue */
11017 cpup->hdwq = idx % phba->cfg_hdw_queue;
11018 idx++;
11019 goto logit;
11020 found_hdwq:
11021 /* We found an available entry, copy the IRQ info */
11022 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11023 if (start_cpu == nr_cpumask_bits)
11024 start_cpu = first_cpu;
11025 cpup->hdwq = new_cpup->hdwq;
11026 logit:
657add4e
JS
11027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11028 "3335 Set Affinity: CPU %d (phys %d core %d): "
11029 "hdwq %d eq %d irq %d flg x%x\n",
11030 cpu, cpup->phys_id, cpup->core_id,
11031 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
657add4e
JS
11032 }
11033
11034 /* The cpu_map array will be used later during initialization
11035 * when EQ / CQ / WQs are allocated and configured.
11036 */
b3295c2a 11037 return;
7bb03bbf 11038}
7bb03bbf 11039
da0436e9
JS
11040/**
11041 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
11042 * @phba: pointer to lpfc hba data structure.
11043 *
11044 * This routine is invoked to enable the MSI-X interrupt vectors to device
45ffac19 11045 * with SLI-4 interface spec.
da0436e9
JS
11046 *
11047 * Return codes
af901ca1 11048 * 0 - successful
da0436e9
JS
11049 * other values - error
11050 **/
11051static int
11052lpfc_sli4_enable_msix(struct lpfc_hba *phba)
11053{
75baf696 11054 int vectors, rc, index;
b83d005e 11055 char *name;
da0436e9
JS
11056
11057 /* Set up MSI-X multi-message vectors */
6a828b0f 11058 vectors = phba->cfg_irq_chann;
45ffac19 11059
f358dd0c 11060 rc = pci_alloc_irq_vectors(phba->pcidev,
75508a8b 11061 1,
f358dd0c 11062 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
4f871e1b 11063 if (rc < 0) {
da0436e9
JS
11064 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11065 "0484 PCI enable MSI-X failed (%d)\n", rc);
029165ac 11066 goto vec_fail_out;
da0436e9 11067 }
4f871e1b 11068 vectors = rc;
75baf696 11069
7bb03bbf 11070 /* Assign MSI-X vectors to interrupt handlers */
67d12733 11071 for (index = 0; index < vectors; index++) {
b83d005e
JS
11072 name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
11073 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
11074 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
4305f183 11075 LPFC_DRIVER_HANDLER_NAME"%d", index);
da0436e9 11076
895427bd
JS
11077 phba->sli4_hba.hba_eq_hdl[index].idx = index;
11078 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
7370d10a
JS
11079 rc = request_irq(pci_irq_vector(phba->pcidev, index),
11080 &lpfc_sli4_hba_intr_handler, 0,
11081 name,
11082 &phba->sli4_hba.hba_eq_hdl[index]);
da0436e9
JS
11083 if (rc) {
11084 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11085 "0486 MSI-X fast-path (%d) "
11086 "request_irq failed (%d)\n", index, rc);
11087 goto cfg_fail_out;
11088 }
11089 }
11090
6a828b0f 11091 if (vectors != phba->cfg_irq_chann) {
82c3e9ba
JS
11092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11093 "3238 Reducing IO channels to match number of "
11094 "MSI-X vectors, requested %d got %d\n",
6a828b0f
JS
11095 phba->cfg_irq_chann, vectors);
11096 if (phba->cfg_irq_chann > vectors)
11097 phba->cfg_irq_chann = vectors;
982ab128 11098 if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
cdb42bec 11099 phba->cfg_nvmet_mrq = vectors;
82c3e9ba 11100 }
7bb03bbf 11101
da0436e9
JS
11102 return rc;
11103
11104cfg_fail_out:
11105 /* free the irq already requested */
895427bd
JS
11106 for (--index; index >= 0; index--)
11107 free_irq(pci_irq_vector(phba->pcidev, index),
11108 &phba->sli4_hba.hba_eq_hdl[index]);
da0436e9 11109
da0436e9 11110 /* Unconfigure MSI-X capability structure */
45ffac19 11111 pci_free_irq_vectors(phba->pcidev);
029165ac
AG
11112
11113vec_fail_out:
da0436e9
JS
11114 return rc;
11115}
11116
da0436e9
JS
11117/**
11118 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
11119 * @phba: pointer to lpfc hba data structure.
11120 *
11121 * This routine is invoked to enable the MSI interrupt mode to device with
11122 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
11123 * to enable the MSI vector. The device driver is responsible for calling
11124 * the request_irq() to register MSI vector with a interrupt the handler,
11125 * which is done in this function.
11126 *
11127 * Return codes
af901ca1 11128 * 0 - successful
da0436e9
JS
11129 * other values - error
11130 **/
11131static int
11132lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11133{
11134 int rc, index;
11135
11136 rc = pci_enable_msi(phba->pcidev);
11137 if (!rc)
11138 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11139 "0487 PCI enable MSI mode success.\n");
11140 else {
11141 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11142 "0488 PCI enable MSI mode failed (%d)\n", rc);
11143 return rc;
11144 }
11145
11146 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
ed243d37 11147 0, LPFC_DRIVER_NAME, phba);
da0436e9
JS
11148 if (rc) {
11149 pci_disable_msi(phba->pcidev);
11150 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11151 "0490 MSI request_irq failed (%d)\n", rc);
75baf696 11152 return rc;
da0436e9
JS
11153 }
11154
6a828b0f 11155 for (index = 0; index < phba->cfg_irq_chann; index++) {
895427bd
JS
11156 phba->sli4_hba.hba_eq_hdl[index].idx = index;
11157 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
da0436e9
JS
11158 }
11159
75baf696 11160 return 0;
da0436e9
JS
11161}
11162
da0436e9
JS
11163/**
11164 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
11165 * @phba: pointer to lpfc hba data structure.
11166 *
11167 * This routine is invoked to enable device interrupt and associate driver's
11168 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
11169 * interface spec. Depends on the interrupt mode configured to the driver,
11170 * the driver will try to fallback from the configured interrupt mode to an
11171 * interrupt mode which is supported by the platform, kernel, and device in
11172 * the order of:
11173 * MSI-X -> MSI -> IRQ.
11174 *
11175 * Return codes
af901ca1 11176 * 0 - successful
da0436e9
JS
11177 * other values - error
11178 **/
11179static uint32_t
11180lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11181{
11182 uint32_t intr_mode = LPFC_INTR_ERROR;
895427bd 11183 int retval, idx;
da0436e9
JS
11184
11185 if (cfg_mode == 2) {
11186 /* Preparation before conf_msi mbox cmd */
11187 retval = 0;
11188 if (!retval) {
11189 /* Now, try to enable MSI-X interrupt mode */
11190 retval = lpfc_sli4_enable_msix(phba);
11191 if (!retval) {
11192 /* Indicate initialization to MSI-X mode */
11193 phba->intr_type = MSIX;
11194 intr_mode = 2;
11195 }
11196 }
11197 }
11198
11199 /* Fallback to MSI if MSI-X initialization failed */
11200 if (cfg_mode >= 1 && phba->intr_type == NONE) {
11201 retval = lpfc_sli4_enable_msi(phba);
11202 if (!retval) {
11203 /* Indicate initialization to MSI mode */
11204 phba->intr_type = MSI;
11205 intr_mode = 1;
11206 }
11207 }
11208
11209 /* Fallback to INTx if both MSI-X/MSI initalization failed */
11210 if (phba->intr_type == NONE) {
11211 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11212 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
11213 if (!retval) {
895427bd
JS
11214 struct lpfc_hba_eq_hdl *eqhdl;
11215
da0436e9
JS
11216 /* Indicate initialization to INTx mode */
11217 phba->intr_type = INTx;
11218 intr_mode = 0;
895427bd 11219
6a828b0f 11220 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
895427bd
JS
11221 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
11222 eqhdl->idx = idx;
11223 eqhdl->phba = phba;
1ba981fd 11224 }
da0436e9
JS
11225 }
11226 }
11227 return intr_mode;
11228}
11229
11230/**
11231 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
11232 * @phba: pointer to lpfc hba data structure.
11233 *
11234 * This routine is invoked to disable device interrupt and disassociate
11235 * the driver's interrupt handler(s) from interrupt vector(s) to device
11236 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
11237 * will release the interrupt vector(s) for the message signaled interrupt.
11238 **/
11239static void
11240lpfc_sli4_disable_intr(struct lpfc_hba *phba)
11241{
11242 /* Disable the currently initialized interrupt mode */
45ffac19
CH
11243 if (phba->intr_type == MSIX) {
11244 int index;
11245
11246 /* Free up MSI-X multi-message vectors */
6a828b0f 11247 for (index = 0; index < phba->cfg_irq_chann; index++) {
b3295c2a
JS
11248 irq_set_affinity_hint(
11249 pci_irq_vector(phba->pcidev, index),
11250 NULL);
895427bd
JS
11251 free_irq(pci_irq_vector(phba->pcidev, index),
11252 &phba->sli4_hba.hba_eq_hdl[index]);
b3295c2a 11253 }
45ffac19 11254 } else {
da0436e9 11255 free_irq(phba->pcidev->irq, phba);
45ffac19
CH
11256 }
11257
11258 pci_free_irq_vectors(phba->pcidev);
da0436e9
JS
11259
11260 /* Reset interrupt management states */
11261 phba->intr_type = NONE;
11262 phba->sli.slistat.sli_intr = 0;
da0436e9
JS
11263}
11264
11265/**
11266 * lpfc_unset_hba - Unset SLI3 hba device initialization
11267 * @phba: pointer to lpfc hba data structure.
11268 *
11269 * This routine is invoked to unset the HBA device initialization steps to
11270 * a device with SLI-3 interface spec.
11271 **/
11272static void
11273lpfc_unset_hba(struct lpfc_hba *phba)
11274{
11275 struct lpfc_vport *vport = phba->pport;
11276 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
11277
11278 spin_lock_irq(shost->host_lock);
11279 vport->load_flag |= FC_UNLOADING;
11280 spin_unlock_irq(shost->host_lock);
11281
72859909
JS
11282 kfree(phba->vpi_bmask);
11283 kfree(phba->vpi_ids);
11284
da0436e9
JS
11285 lpfc_stop_hba_timers(phba);
11286
11287 phba->pport->work_port_events = 0;
11288
11289 lpfc_sli_hba_down(phba);
11290
11291 lpfc_sli_brdrestart(phba);
11292
11293 lpfc_sli_disable_intr(phba);
11294
11295 return;
11296}
11297
5af5eee7
JS
11298/**
11299 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
11300 * @phba: Pointer to HBA context object.
11301 *
11302 * This function is called in the SLI4 code path to wait for completion
11303 * of device's XRIs exchange busy. It will check the XRI exchange busy
11304 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
11305 * that, it will check the XRI exchange busy on outstanding FCP and ELS
11306 * I/Os every 30 seconds, log error message, and wait forever. Only when
11307 * all XRI exchange busy complete, the driver unload shall proceed with
11308 * invoking the function reset ioctl mailbox command to the CNA and the
11309 * the rest of the driver unload resource release.
11310 **/
11311static void
11312lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11313{
5e5b511d
JS
11314 struct lpfc_sli4_hdw_queue *qp;
11315 int idx, ccnt, fcnt;
5af5eee7 11316 int wait_time = 0;
5e5b511d 11317 int io_xri_cmpl = 1;
86c67379 11318 int nvmet_xri_cmpl = 1;
895427bd 11319 int fcp_xri_cmpl = 1;
5af5eee7
JS
11320 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11321
c3725bdc
JS
11322 /* Driver just aborted IOs during the hba_unset process. Pause
11323 * here to give the HBA time to complete the IO and get entries
11324 * into the abts lists.
11325 */
11326 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
11327
11328 /* Wait for NVME pending IO to flush back to transport. */
11329 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11330 lpfc_nvme_wait_for_io_drain(phba);
11331
5e5b511d
JS
11332 ccnt = 0;
11333 fcnt = 0;
11334 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11335 qp = &phba->sli4_hba.hdwq[idx];
11336 fcp_xri_cmpl = list_empty(
11337 &qp->lpfc_abts_scsi_buf_list);
11338 if (!fcp_xri_cmpl) /* if list is NOT empty */
11339 fcnt++;
11340 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11341 io_xri_cmpl = list_empty(
11342 &qp->lpfc_abts_nvme_buf_list);
11343 if (!io_xri_cmpl) /* if list is NOT empty */
11344 ccnt++;
11345 }
11346 }
11347 if (ccnt)
11348 io_xri_cmpl = 0;
11349 if (fcnt)
11350 fcp_xri_cmpl = 0;
11351
86c67379 11352 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
86c67379
JS
11353 nvmet_xri_cmpl =
11354 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11355 }
895427bd 11356
5e5b511d 11357 while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl ||
f358dd0c 11358 !nvmet_xri_cmpl) {
5af5eee7 11359 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
68c9b55d
JS
11360 if (!nvmet_xri_cmpl)
11361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11362 "6424 NVMET XRI exchange busy "
11363 "wait time: %d seconds.\n",
11364 wait_time/1000);
5e5b511d 11365 if (!io_xri_cmpl)
895427bd
JS
11366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11367 "6100 NVME XRI exchange busy "
11368 "wait time: %d seconds.\n",
11369 wait_time/1000);
5af5eee7
JS
11370 if (!fcp_xri_cmpl)
11371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11372 "2877 FCP XRI exchange busy "
11373 "wait time: %d seconds.\n",
11374 wait_time/1000);
11375 if (!els_xri_cmpl)
11376 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11377 "2878 ELS XRI exchange busy "
11378 "wait time: %d seconds.\n",
11379 wait_time/1000);
11380 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
11381 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
11382 } else {
11383 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
11384 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
11385 }
5e5b511d
JS
11386
11387 ccnt = 0;
11388 fcnt = 0;
11389 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11390 qp = &phba->sli4_hba.hdwq[idx];
11391 fcp_xri_cmpl = list_empty(
11392 &qp->lpfc_abts_scsi_buf_list);
11393 if (!fcp_xri_cmpl) /* if list is NOT empty */
11394 fcnt++;
11395 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11396 io_xri_cmpl = list_empty(
11397 &qp->lpfc_abts_nvme_buf_list);
11398 if (!io_xri_cmpl) /* if list is NOT empty */
11399 ccnt++;
11400 }
11401 }
11402 if (ccnt)
11403 io_xri_cmpl = 0;
11404 if (fcnt)
11405 fcp_xri_cmpl = 0;
11406
86c67379 11407 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
86c67379
JS
11408 nvmet_xri_cmpl = list_empty(
11409 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11410 }
5af5eee7
JS
11411 els_xri_cmpl =
11412 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
f358dd0c 11413
5af5eee7
JS
11414 }
11415}
11416
da0436e9
JS
11417/**
11418 * lpfc_sli4_hba_unset - Unset the fcoe hba
11419 * @phba: Pointer to HBA context object.
11420 *
11421 * This function is called in the SLI4 code path to reset the HBA's FCoE
11422 * function. The caller is not required to hold any lock. This routine
11423 * issues PCI function reset mailbox command to reset the FCoE function.
11424 * At the end of the function, it calls lpfc_hba_down_post function to
11425 * free any pending commands.
11426 **/
11427static void
11428lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11429{
11430 int wait_cnt = 0;
11431 LPFC_MBOXQ_t *mboxq;
912e3acd 11432 struct pci_dev *pdev = phba->pcidev;
da0436e9
JS
11433
11434 lpfc_stop_hba_timers(phba);
cdb42bec
JS
11435 if (phba->pport)
11436 phba->sli4_hba.intr_enable = 0;
da0436e9
JS
11437
11438 /*
11439 * Gracefully wait out the potential current outstanding asynchronous
11440 * mailbox command.
11441 */
11442
11443 /* First, block any pending async mailbox command from posted */
11444 spin_lock_irq(&phba->hbalock);
11445 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11446 spin_unlock_irq(&phba->hbalock);
11447 /* Now, trying to wait it out if we can */
11448 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11449 msleep(10);
11450 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
11451 break;
11452 }
11453 /* Forcefully release the outstanding mailbox command if timed out */
11454 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
11455 spin_lock_irq(&phba->hbalock);
11456 mboxq = phba->sli.mbox_active;
11457 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
11458 __lpfc_mbox_cmpl_put(phba, mboxq);
11459 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11460 phba->sli.mbox_active = NULL;
11461 spin_unlock_irq(&phba->hbalock);
11462 }
11463
5af5eee7
JS
11464 /* Abort all iocbs associated with the hba */
11465 lpfc_sli_hba_iocb_abort(phba);
11466
11467 /* Wait for completion of device XRI exchange busy */
11468 lpfc_sli4_xri_exchange_busy_wait(phba);
11469
da0436e9
JS
11470 /* Disable PCI subsystem interrupt */
11471 lpfc_sli4_disable_intr(phba);
11472
912e3acd
JS
11473 /* Disable SR-IOV if enabled */
11474 if (phba->cfg_sriov_nr_virtfn)
11475 pci_disable_sriov(pdev);
11476
da0436e9
JS
11477 /* Stop kthread signal shall trigger work_done one more time */
11478 kthread_stop(phba->worker_thread);
11479
d2cc9bcd 11480 /* Disable FW logging to host memory */
1165a5c2 11481 lpfc_ras_stop_fwlog(phba);
d2cc9bcd 11482
d1f525aa
JS
11483 /* Unset the queues shared with the hardware then release all
11484 * allocated resources.
11485 */
11486 lpfc_sli4_queue_unset(phba);
11487 lpfc_sli4_queue_destroy(phba);
11488
3677a3a7
JS
11489 /* Reset SLI4 HBA FCoE function */
11490 lpfc_pci_function_reset(phba);
11491
1165a5c2
JS
11492 /* Free RAS DMA memory */
11493 if (phba->ras_fwlog.ras_enabled)
11494 lpfc_sli4_ras_dma_free(phba);
11495
da0436e9 11496 /* Stop the SLI4 device port */
1ffdd2c0
JS
11497 if (phba->pport)
11498 phba->pport->work_port_events = 0;
da0436e9
JS
11499}
11500
28baac74
JS
11501 /**
11502 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
11503 * @phba: Pointer to HBA context object.
11504 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
11505 *
11506 * This function is called in the SLI4 code path to read the port's
11507 * sli4 capabilities.
11508 *
11509 * This function may be be called from any context that can block-wait
11510 * for the completion. The expectation is that this routine is called
11511 * typically from probe_one or from the online routine.
11512 **/
11513int
11514lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11515{
11516 int rc;
11517 struct lpfc_mqe *mqe;
11518 struct lpfc_pc_sli4_params *sli4_params;
11519 uint32_t mbox_tmo;
11520
11521 rc = 0;
11522 mqe = &mboxq->u.mqe;
11523
11524 /* Read the port's SLI4 Parameters port capabilities */
fedd3b7b 11525 lpfc_pc_sli4_params(mboxq);
28baac74
JS
11526 if (!phba->sli4_hba.intr_enable)
11527 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11528 else {
a183a15f 11529 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
28baac74
JS
11530 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11531 }
11532
11533 if (unlikely(rc))
11534 return 1;
11535
11536 sli4_params = &phba->sli4_hba.pc_sli4_params;
11537 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
11538 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
11539 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
11540 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
11541 &mqe->un.sli4_params);
11542 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
11543 &mqe->un.sli4_params);
11544 sli4_params->proto_types = mqe->un.sli4_params.word3;
11545 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
11546 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
11547 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
11548 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
11549 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
11550 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
11551 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
11552 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
11553 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
11554 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
11555 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
11556 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
11557 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
11558 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
11559 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
11560 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
11561 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
11562 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
11563 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
11564 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
0558056c
JS
11565
11566 /* Make sure that sge_supp_len can be handled by the driver */
11567 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11568 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11569
28baac74
JS
11570 return rc;
11571}
11572
fedd3b7b
JS
11573/**
11574 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
11575 * @phba: Pointer to HBA context object.
11576 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
11577 *
11578 * This function is called in the SLI4 code path to read the port's
11579 * sli4 capabilities.
11580 *
11581 * This function may be be called from any context that can block-wait
11582 * for the completion. The expectation is that this routine is called
11583 * typically from probe_one or from the online routine.
11584 **/
11585int
11586lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11587{
11588 int rc;
11589 struct lpfc_mqe *mqe = &mboxq->u.mqe;
11590 struct lpfc_pc_sli4_params *sli4_params;
a183a15f 11591 uint32_t mbox_tmo;
fedd3b7b 11592 int length;
bf316c78 11593 bool exp_wqcq_pages = true;
fedd3b7b
JS
11594 struct lpfc_sli4_parameters *mbx_sli4_parameters;
11595
6d368e53
JS
11596 /*
11597 * By default, the driver assumes the SLI4 port requires RPI
11598 * header postings. The SLI4_PARAM response will correct this
11599 * assumption.
11600 */
11601 phba->sli4_hba.rpi_hdrs_in_use = 1;
11602
fedd3b7b
JS
11603 /* Read the port's SLI4 Config Parameters */
11604 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
11605 sizeof(struct lpfc_sli4_cfg_mhdr));
11606 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11607 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
11608 length, LPFC_SLI4_MBX_EMBED);
11609 if (!phba->sli4_hba.intr_enable)
11610 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
a183a15f
JS
11611 else {
11612 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
11613 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11614 }
fedd3b7b
JS
11615 if (unlikely(rc))
11616 return rc;
11617 sli4_params = &phba->sli4_hba.pc_sli4_params;
11618 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
11619 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
11620 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
11621 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
11622 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
11623 mbx_sli4_parameters);
11624 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
11625 mbx_sli4_parameters);
11626 if (bf_get(cfg_phwq, mbx_sli4_parameters))
11627 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
11628 else
11629 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
11630 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
11631 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
1ba981fd 11632 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
fedd3b7b
JS
11633 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
11634 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
11635 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
11636 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
7365f6fd
JS
11637 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
11638 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
0c651878 11639 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
66e9e6bf 11640 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
fedd3b7b
JS
11641 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
11642 mbx_sli4_parameters);
895427bd 11643 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
fedd3b7b
JS
11644 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
11645 mbx_sli4_parameters);
6d368e53
JS
11646 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
11647 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
c15e0704
JS
11648
11649 /* Check for firmware nvme support */
11650 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
11651 bf_get(cfg_xib, mbx_sli4_parameters));
11652
11653 if (rc) {
11654 /* Save this to indicate the Firmware supports NVME */
11655 sli4_params->nvme = 1;
11656
11657 /* Firmware NVME support, check driver FC4 NVME support */
11658 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
11659 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
11660 "6133 Disabling NVME support: "
11661 "FC4 type not supported: x%x\n",
11662 phba->cfg_enable_fc4_type);
11663 goto fcponly;
11664 }
11665 } else {
11666 /* No firmware NVME support, check driver FC4 NVME support */
11667 sli4_params->nvme = 0;
11668 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
11670 "6101 Disabling NVME support: Not "
11671 "supported by firmware (%d %d) x%x\n",
11672 bf_get(cfg_nvme, mbx_sli4_parameters),
11673 bf_get(cfg_xib, mbx_sli4_parameters),
11674 phba->cfg_enable_fc4_type);
11675fcponly:
11676 phba->nvme_support = 0;
11677 phba->nvmet_support = 0;
11678 phba->cfg_nvmet_mrq = 0;
6a224b47 11679 phba->cfg_nvme_seg_cnt = 0;
c15e0704
JS
11680
11681 /* If no FC4 type support, move to just SCSI support */
11682 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
11683 return -ENODEV;
11684 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
11685 }
895427bd 11686 }
0558056c 11687
c26c265b
JS
11688 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
11689 * accommodate 512K and 1M IOs in a single nvme buf and supply
11690 * enough NVME LS iocb buffers for larger connectivity counts.
11691 */
11692 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11693 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
11694 phba->cfg_iocb_cnt = 5;
11695 }
11696
414abe0a
JS
11697 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
11698 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
11699 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
11700 phba->cfg_enable_pbde = 0;
0bc2b7c5 11701
20aefac3
JS
11702 /*
11703 * To support Suppress Response feature we must satisfy 3 conditions.
11704 * lpfc_suppress_rsp module parameter must be set (default).
11705 * In SLI4-Parameters Descriptor:
11706 * Extended Inline Buffers (XIB) must be supported.
11707 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
11708 * (double negative).
11709 */
11710 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
11711 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
f358dd0c 11712 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
20aefac3
JS
11713 else
11714 phba->cfg_suppress_rsp = 0;
f358dd0c 11715
0cf07f84
JS
11716 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
11717 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
11718
0558056c
JS
11719 /* Make sure that sge_supp_len can be handled by the driver */
11720 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
11721 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
11722
b5c53958 11723 /*
c176ffa0
JS
11724 * Check whether the adapter supports an embedded copy of the
11725 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
11726 * to use this option, 128-byte WQEs must be used.
b5c53958
JS
11727 */
11728 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
11729 phba->fcp_embed_io = 1;
11730 else
11731 phba->fcp_embed_io = 0;
7bdedb34 11732
0bc2b7c5 11733 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
414abe0a 11734 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
0bc2b7c5 11735 bf_get(cfg_xib, mbx_sli4_parameters),
414abe0a
JS
11736 phba->cfg_enable_pbde,
11737 phba->fcp_embed_io, phba->nvme_support,
4e565cf0 11738 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
0bc2b7c5 11739
bf316c78
JS
11740 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
11741 LPFC_SLI_INTF_IF_TYPE_2) &&
11742 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
c221768b 11743 LPFC_SLI_INTF_FAMILY_LNCR_A0))
bf316c78
JS
11744 exp_wqcq_pages = false;
11745
c176ffa0
JS
11746 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
11747 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
bf316c78 11748 exp_wqcq_pages &&
c176ffa0
JS
11749 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
11750 phba->enab_exp_wqcq_pages = 1;
11751 else
11752 phba->enab_exp_wqcq_pages = 0;
7bdedb34
JS
11753 /*
11754 * Check if the SLI port supports MDS Diagnostics
11755 */
11756 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
11757 phba->mds_diags_support = 1;
11758 else
11759 phba->mds_diags_support = 0;
d2cc9bcd 11760
fedd3b7b
JS
11761 return 0;
11762}
11763
da0436e9
JS
11764/**
11765 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
11766 * @pdev: pointer to PCI device
11767 * @pid: pointer to PCI device identifier
11768 *
11769 * This routine is to be called to attach a device with SLI-3 interface spec
11770 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
11771 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
11772 * information of the device and driver to see if the driver state that it can
11773 * support this kind of device. If the match is successful, the driver core
11774 * invokes this routine. If this routine determines it can claim the HBA, it
11775 * does all the initialization that it needs to do to handle the HBA properly.
11776 *
11777 * Return code
11778 * 0 - driver can claim the device
11779 * negative value - driver can not claim the device
11780 **/
6f039790 11781static int
da0436e9
JS
11782lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
11783{
11784 struct lpfc_hba *phba;
11785 struct lpfc_vport *vport = NULL;
6669f9bb 11786 struct Scsi_Host *shost = NULL;
da0436e9
JS
11787 int error;
11788 uint32_t cfg_mode, intr_mode;
11789
11790 /* Allocate memory for HBA structure */
11791 phba = lpfc_hba_alloc(pdev);
11792 if (!phba)
11793 return -ENOMEM;
11794
11795 /* Perform generic PCI device enabling operation */
11796 error = lpfc_enable_pci_dev(phba);
079b5c91 11797 if (error)
da0436e9 11798 goto out_free_phba;
da0436e9
JS
11799
11800 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
11801 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
11802 if (error)
11803 goto out_disable_pci_dev;
11804
11805 /* Set up SLI-3 specific device PCI memory space */
11806 error = lpfc_sli_pci_mem_setup(phba);
11807 if (error) {
11808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11809 "1402 Failed to set up pci memory space.\n");
11810 goto out_disable_pci_dev;
11811 }
11812
da0436e9
JS
11813 /* Set up SLI-3 specific device driver resources */
11814 error = lpfc_sli_driver_resource_setup(phba);
11815 if (error) {
11816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11817 "1404 Failed to set up driver resource.\n");
11818 goto out_unset_pci_mem_s3;
11819 }
11820
11821 /* Initialize and populate the iocb list per host */
d1f525aa 11822
da0436e9
JS
11823 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
11824 if (error) {
11825 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11826 "1405 Failed to initialize iocb list.\n");
11827 goto out_unset_driver_resource_s3;
11828 }
11829
11830 /* Set up common device driver resources */
11831 error = lpfc_setup_driver_resource_phase2(phba);
11832 if (error) {
11833 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11834 "1406 Failed to set up driver resource.\n");
11835 goto out_free_iocb_list;
11836 }
11837
079b5c91
JS
11838 /* Get the default values for Model Name and Description */
11839 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
11840
da0436e9
JS
11841 /* Create SCSI host to the physical port */
11842 error = lpfc_create_shost(phba);
11843 if (error) {
11844 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11845 "1407 Failed to create scsi host.\n");
11846 goto out_unset_driver_resource;
11847 }
11848
11849 /* Configure sysfs attributes */
11850 vport = phba->pport;
11851 error = lpfc_alloc_sysfs_attr(vport);
11852 if (error) {
11853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11854 "1476 Failed to allocate sysfs attr\n");
11855 goto out_destroy_shost;
11856 }
11857
6669f9bb 11858 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
da0436e9
JS
11859 /* Now, trying to enable interrupt and bring up the device */
11860 cfg_mode = phba->cfg_use_msi;
11861 while (true) {
11862 /* Put device to a known state before enabling interrupt */
11863 lpfc_stop_port(phba);
11864 /* Configure and enable interrupt */
11865 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
11866 if (intr_mode == LPFC_INTR_ERROR) {
11867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11868 "0431 Failed to enable interrupt.\n");
11869 error = -ENODEV;
11870 goto out_free_sysfs_attr;
11871 }
11872 /* SLI-3 HBA setup */
11873 if (lpfc_sli_hba_setup(phba)) {
11874 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11875 "1477 Failed to set up hba\n");
11876 error = -ENODEV;
11877 goto out_remove_device;
11878 }
11879
11880 /* Wait 50ms for the interrupts of previous mailbox commands */
11881 msleep(50);
11882 /* Check active interrupts on message signaled interrupts */
11883 if (intr_mode == 0 ||
11884 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
11885 /* Log the current active interrupt mode */
11886 phba->intr_mode = intr_mode;
11887 lpfc_log_intr_mode(phba, intr_mode);
11888 break;
11889 } else {
11890 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11891 "0447 Configure interrupt mode (%d) "
11892 "failed active interrupt test.\n",
11893 intr_mode);
11894 /* Disable the current interrupt mode */
11895 lpfc_sli_disable_intr(phba);
11896 /* Try next level of interrupt mode */
11897 cfg_mode = --intr_mode;
11898 }
11899 }
11900
11901 /* Perform post initialization setup */
11902 lpfc_post_init_setup(phba);
11903
11904 /* Check if there are static vports to be created. */
11905 lpfc_create_static_vport(phba);
11906
11907 return 0;
11908
11909out_remove_device:
11910 lpfc_unset_hba(phba);
11911out_free_sysfs_attr:
11912 lpfc_free_sysfs_attr(vport);
11913out_destroy_shost:
11914 lpfc_destroy_shost(phba);
11915out_unset_driver_resource:
11916 lpfc_unset_driver_resource_phase2(phba);
11917out_free_iocb_list:
11918 lpfc_free_iocb_list(phba);
11919out_unset_driver_resource_s3:
11920 lpfc_sli_driver_resource_unset(phba);
11921out_unset_pci_mem_s3:
11922 lpfc_sli_pci_mem_unset(phba);
11923out_disable_pci_dev:
11924 lpfc_disable_pci_dev(phba);
6669f9bb
JS
11925 if (shost)
11926 scsi_host_put(shost);
da0436e9
JS
11927out_free_phba:
11928 lpfc_hba_free(phba);
11929 return error;
11930}
11931
11932/**
11933 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
11934 * @pdev: pointer to PCI device
11935 *
11936 * This routine is to be called to disattach a device with SLI-3 interface
11937 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
11938 * removed from PCI bus, it performs all the necessary cleanup for the HBA
11939 * device to be removed from the PCI subsystem properly.
11940 **/
6f039790 11941static void
da0436e9
JS
11942lpfc_pci_remove_one_s3(struct pci_dev *pdev)
11943{
11944 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11945 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
11946 struct lpfc_vport **vports;
11947 struct lpfc_hba *phba = vport->phba;
11948 int i;
da0436e9
JS
11949
11950 spin_lock_irq(&phba->hbalock);
11951 vport->load_flag |= FC_UNLOADING;
11952 spin_unlock_irq(&phba->hbalock);
11953
11954 lpfc_free_sysfs_attr(vport);
11955
11956 /* Release all the vports against this physical port */
11957 vports = lpfc_create_vport_work_array(phba);
11958 if (vports != NULL)
587a37f6
JS
11959 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
11960 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
11961 continue;
da0436e9 11962 fc_vport_terminate(vports[i]->fc_vport);
587a37f6 11963 }
da0436e9
JS
11964 lpfc_destroy_vport_work_array(phba, vports);
11965
11966 /* Remove FC host and then SCSI host with the physical port */
11967 fc_remove_host(shost);
11968 scsi_remove_host(shost);
d613b6a7 11969
da0436e9
JS
11970 lpfc_cleanup(vport);
11971
11972 /*
11973 * Bring down the SLI Layer. This step disable all interrupts,
11974 * clears the rings, discards all mailbox commands, and resets
11975 * the HBA.
11976 */
11977
48e34d0f 11978 /* HBA interrupt will be disabled after this call */
da0436e9
JS
11979 lpfc_sli_hba_down(phba);
11980 /* Stop kthread signal shall trigger work_done one more time */
11981 kthread_stop(phba->worker_thread);
11982 /* Final cleanup of txcmplq and reset the HBA */
11983 lpfc_sli_brdrestart(phba);
11984
72859909
JS
11985 kfree(phba->vpi_bmask);
11986 kfree(phba->vpi_ids);
11987
da0436e9 11988 lpfc_stop_hba_timers(phba);
523128e5 11989 spin_lock_irq(&phba->port_list_lock);
da0436e9 11990 list_del_init(&vport->listentry);
523128e5 11991 spin_unlock_irq(&phba->port_list_lock);
da0436e9
JS
11992
11993 lpfc_debugfs_terminate(vport);
11994
912e3acd
JS
11995 /* Disable SR-IOV if enabled */
11996 if (phba->cfg_sriov_nr_virtfn)
11997 pci_disable_sriov(pdev);
11998
da0436e9
JS
11999 /* Disable interrupt */
12000 lpfc_sli_disable_intr(phba);
12001
da0436e9
JS
12002 scsi_host_put(shost);
12003
12004 /*
12005 * Call scsi_free before mem_free since scsi bufs are released to their
12006 * corresponding pools here.
12007 */
12008 lpfc_scsi_free(phba);
0794d601
JS
12009 lpfc_free_iocb_list(phba);
12010
da0436e9
JS
12011 lpfc_mem_free_all(phba);
12012
12013 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
12014 phba->hbqslimp.virt, phba->hbqslimp.phys);
12015
12016 /* Free resources associated with SLI2 interface */
12017 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
12018 phba->slim2p.virt, phba->slim2p.phys);
12019
12020 /* unmap adapter SLIM and Control Registers */
12021 iounmap(phba->ctrl_regs_memmap_p);
12022 iounmap(phba->slim_memmap_p);
12023
12024 lpfc_hba_free(phba);
12025
e0c0483c 12026 pci_release_mem_regions(pdev);
da0436e9
JS
12027 pci_disable_device(pdev);
12028}
12029
12030/**
12031 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
12032 * @pdev: pointer to PCI device
12033 * @msg: power management message
12034 *
12035 * This routine is to be called from the kernel's PCI subsystem to support
12036 * system Power Management (PM) to device with SLI-3 interface spec. When
12037 * PM invokes this method, it quiesces the device by stopping the driver's
12038 * worker thread for the device, turning off device's interrupt and DMA,
12039 * and bring the device offline. Note that as the driver implements the
12040 * minimum PM requirements to a power-aware driver's PM support for the
12041 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12042 * to the suspend() method call will be treated as SUSPEND and the driver will
12043 * fully reinitialize its device during resume() method call, the driver will
12044 * set device to PCI_D3hot state in PCI config space instead of setting it
12045 * according to the @msg provided by the PM.
12046 *
12047 * Return code
12048 * 0 - driver suspended the device
12049 * Error otherwise
12050 **/
12051static int
12052lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
12053{
12054 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12055 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12056
12057 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12058 "0473 PCI device Power Management suspend.\n");
12059
12060 /* Bring down the device */
618a5230 12061 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
da0436e9
JS
12062 lpfc_offline(phba);
12063 kthread_stop(phba->worker_thread);
12064
12065 /* Disable interrupt from device */
12066 lpfc_sli_disable_intr(phba);
12067
12068 /* Save device state to PCI config space */
12069 pci_save_state(pdev);
12070 pci_set_power_state(pdev, PCI_D3hot);
12071
12072 return 0;
12073}
12074
12075/**
12076 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
12077 * @pdev: pointer to PCI device
12078 *
12079 * This routine is to be called from the kernel's PCI subsystem to support
12080 * system Power Management (PM) to device with SLI-3 interface spec. When PM
12081 * invokes this method, it restores the device's PCI config space state and
12082 * fully reinitializes the device and brings it online. Note that as the
12083 * driver implements the minimum PM requirements to a power-aware driver's
12084 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
12085 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
12086 * driver will fully reinitialize its device during resume() method call,
12087 * the device will be set to PCI_D0 directly in PCI config space before
12088 * restoring the state.
12089 *
12090 * Return code
12091 * 0 - driver suspended the device
12092 * Error otherwise
12093 **/
12094static int
12095lpfc_pci_resume_one_s3(struct pci_dev *pdev)
12096{
12097 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12098 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12099 uint32_t intr_mode;
12100 int error;
12101
12102 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12103 "0452 PCI device Power Management resume.\n");
12104
12105 /* Restore device state from PCI config space */
12106 pci_set_power_state(pdev, PCI_D0);
12107 pci_restore_state(pdev);
0d878419 12108
1dfb5a47
JS
12109 /*
12110 * As the new kernel behavior of pci_restore_state() API call clears
12111 * device saved_state flag, need to save the restored state again.
12112 */
12113 pci_save_state(pdev);
12114
da0436e9
JS
12115 if (pdev->is_busmaster)
12116 pci_set_master(pdev);
12117
12118 /* Startup the kernel thread for this host adapter. */
12119 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12120 "lpfc_worker_%d", phba->brd_no);
12121 if (IS_ERR(phba->worker_thread)) {
12122 error = PTR_ERR(phba->worker_thread);
12123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12124 "0434 PM resume failed to start worker "
12125 "thread: error=x%x.\n", error);
12126 return error;
12127 }
12128
12129 /* Configure and enable interrupt */
12130 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12131 if (intr_mode == LPFC_INTR_ERROR) {
12132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12133 "0430 PM resume Failed to enable interrupt\n");
12134 return -EIO;
12135 } else
12136 phba->intr_mode = intr_mode;
12137
12138 /* Restart HBA and bring it online */
12139 lpfc_sli_brdrestart(phba);
12140 lpfc_online(phba);
12141
12142 /* Log the current active interrupt mode */
12143 lpfc_log_intr_mode(phba, phba->intr_mode);
12144
12145 return 0;
12146}
12147
891478a2
JS
12148/**
12149 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
12150 * @phba: pointer to lpfc hba data structure.
12151 *
12152 * This routine is called to prepare the SLI3 device for PCI slot recover. It
e2af0d2e 12153 * aborts all the outstanding SCSI I/Os to the pci device.
891478a2
JS
12154 **/
12155static void
12156lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
12157{
12158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12159 "2723 PCI channel I/O abort preparing for recovery\n");
e2af0d2e
JS
12160
12161 /*
12162 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12163 * and let the SCSI mid-layer to retry them to recover.
12164 */
db55fba8 12165 lpfc_sli_abort_fcp_rings(phba);
891478a2
JS
12166}
12167
0d878419
JS
12168/**
12169 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
12170 * @phba: pointer to lpfc hba data structure.
12171 *
12172 * This routine is called to prepare the SLI3 device for PCI slot reset. It
12173 * disables the device interrupt and pci device, and aborts the internal FCP
12174 * pending I/Os.
12175 **/
12176static void
12177lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12178{
0d878419 12179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
891478a2 12180 "2710 PCI channel disable preparing for reset\n");
e2af0d2e 12181
75baf696 12182 /* Block any management I/Os to the device */
618a5230 12183 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
75baf696 12184
e2af0d2e
JS
12185 /* Block all SCSI devices' I/Os on the host */
12186 lpfc_scsi_dev_block(phba);
12187
ea714f3d
JS
12188 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12189 lpfc_sli_flush_fcp_rings(phba);
12190
e2af0d2e
JS
12191 /* stop all timers */
12192 lpfc_stop_hba_timers(phba);
12193
0d878419
JS
12194 /* Disable interrupt and pci device */
12195 lpfc_sli_disable_intr(phba);
12196 pci_disable_device(phba->pcidev);
0d878419
JS
12197}
12198
12199/**
12200 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
12201 * @phba: pointer to lpfc hba data structure.
12202 *
12203 * This routine is called to prepare the SLI3 device for PCI slot permanently
12204 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12205 * pending I/Os.
12206 **/
12207static void
75baf696 12208lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
0d878419
JS
12209{
12210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
891478a2 12211 "2711 PCI channel permanent disable for failure\n");
e2af0d2e
JS
12212 /* Block all SCSI devices' I/Os on the host */
12213 lpfc_scsi_dev_block(phba);
12214
12215 /* stop all timers */
12216 lpfc_stop_hba_timers(phba);
12217
0d878419
JS
12218 /* Clean up all driver's outstanding SCSI I/Os */
12219 lpfc_sli_flush_fcp_rings(phba);
12220}
12221
da0436e9
JS
12222/**
12223 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
12224 * @pdev: pointer to PCI device.
12225 * @state: the current PCI connection state.
12226 *
12227 * This routine is called from the PCI subsystem for I/O error handling to
12228 * device with SLI-3 interface spec. This function is called by the PCI
12229 * subsystem after a PCI bus error affecting this device has been detected.
12230 * When this function is invoked, it will need to stop all the I/Os and
12231 * interrupt(s) to the device. Once that is done, it will return
12232 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
12233 * as desired.
12234 *
12235 * Return codes
0d878419 12236 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
da0436e9
JS
12237 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12238 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12239 **/
12240static pci_ers_result_t
12241lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
12242{
12243 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12244 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
da0436e9 12245
0d878419
JS
12246 switch (state) {
12247 case pci_channel_io_normal:
891478a2
JS
12248 /* Non-fatal error, prepare for recovery */
12249 lpfc_sli_prep_dev_for_recover(phba);
0d878419
JS
12250 return PCI_ERS_RESULT_CAN_RECOVER;
12251 case pci_channel_io_frozen:
12252 /* Fatal error, prepare for slot reset */
12253 lpfc_sli_prep_dev_for_reset(phba);
12254 return PCI_ERS_RESULT_NEED_RESET;
12255 case pci_channel_io_perm_failure:
12256 /* Permanent failure, prepare for device down */
75baf696 12257 lpfc_sli_prep_dev_for_perm_failure(phba);
da0436e9 12258 return PCI_ERS_RESULT_DISCONNECT;
0d878419
JS
12259 default:
12260 /* Unknown state, prepare and request slot reset */
12261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12262 "0472 Unknown PCI error state: x%x\n", state);
12263 lpfc_sli_prep_dev_for_reset(phba);
12264 return PCI_ERS_RESULT_NEED_RESET;
da0436e9 12265 }
da0436e9
JS
12266}
12267
12268/**
12269 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
12270 * @pdev: pointer to PCI device.
12271 *
12272 * This routine is called from the PCI subsystem for error handling to
12273 * device with SLI-3 interface spec. This is called after PCI bus has been
12274 * reset to restart the PCI card from scratch, as if from a cold-boot.
12275 * During the PCI subsystem error recovery, after driver returns
12276 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12277 * recovery and then call this routine before calling the .resume method
12278 * to recover the device. This function will initialize the HBA device,
12279 * enable the interrupt, but it will just put the HBA to offline state
12280 * without passing any I/O traffic.
12281 *
12282 * Return codes
12283 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
12284 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12285 */
12286static pci_ers_result_t
12287lpfc_io_slot_reset_s3(struct pci_dev *pdev)
12288{
12289 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12290 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12291 struct lpfc_sli *psli = &phba->sli;
12292 uint32_t intr_mode;
12293
12294 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12295 if (pci_enable_device_mem(pdev)) {
12296 printk(KERN_ERR "lpfc: Cannot re-enable "
12297 "PCI device after reset.\n");
12298 return PCI_ERS_RESULT_DISCONNECT;
12299 }
12300
12301 pci_restore_state(pdev);
1dfb5a47
JS
12302
12303 /*
12304 * As the new kernel behavior of pci_restore_state() API call clears
12305 * device saved_state flag, need to save the restored state again.
12306 */
12307 pci_save_state(pdev);
12308
da0436e9
JS
12309 if (pdev->is_busmaster)
12310 pci_set_master(pdev);
12311
12312 spin_lock_irq(&phba->hbalock);
12313 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12314 spin_unlock_irq(&phba->hbalock);
12315
12316 /* Configure and enable interrupt */
12317 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
12318 if (intr_mode == LPFC_INTR_ERROR) {
12319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12320 "0427 Cannot re-enable interrupt after "
12321 "slot reset.\n");
12322 return PCI_ERS_RESULT_DISCONNECT;
12323 } else
12324 phba->intr_mode = intr_mode;
12325
75baf696 12326 /* Take device offline, it will perform cleanup */
618a5230 12327 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
da0436e9
JS
12328 lpfc_offline(phba);
12329 lpfc_sli_brdrestart(phba);
12330
12331 /* Log the current active interrupt mode */
12332 lpfc_log_intr_mode(phba, phba->intr_mode);
12333
12334 return PCI_ERS_RESULT_RECOVERED;
12335}
12336
12337/**
12338 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
12339 * @pdev: pointer to PCI device
12340 *
12341 * This routine is called from the PCI subsystem for error handling to device
12342 * with SLI-3 interface spec. It is called when kernel error recovery tells
12343 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12344 * error recovery. After this call, traffic can start to flow from this device
12345 * again.
12346 */
12347static void
12348lpfc_io_resume_s3(struct pci_dev *pdev)
12349{
12350 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12351 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3772a991 12352
e2af0d2e 12353 /* Bring device online, it will be no-op for non-fatal error resume */
da0436e9
JS
12354 lpfc_online(phba);
12355}
3772a991 12356
da0436e9
JS
12357/**
12358 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
12359 * @phba: pointer to lpfc hba data structure.
12360 *
12361 * returns the number of ELS/CT IOCBs to reserve
12362 **/
12363int
12364lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
12365{
12366 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
12367
f1126688
JS
12368 if (phba->sli_rev == LPFC_SLI_REV4) {
12369 if (max_xri <= 100)
6a9c52cf 12370 return 10;
f1126688 12371 else if (max_xri <= 256)
6a9c52cf 12372 return 25;
f1126688 12373 else if (max_xri <= 512)
6a9c52cf 12374 return 50;
f1126688 12375 else if (max_xri <= 1024)
6a9c52cf 12376 return 100;
8a9d2e80 12377 else if (max_xri <= 1536)
6a9c52cf 12378 return 150;
8a9d2e80
JS
12379 else if (max_xri <= 2048)
12380 return 200;
12381 else
12382 return 250;
f1126688
JS
12383 } else
12384 return 0;
3772a991
JS
12385}
12386
895427bd
JS
12387/**
12388 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
12389 * @phba: pointer to lpfc hba data structure.
12390 *
f358dd0c 12391 * returns the number of ELS/CT + NVMET IOCBs to reserve
895427bd
JS
12392 **/
12393int
12394lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
12395{
12396 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
12397
f358dd0c
JS
12398 if (phba->nvmet_support)
12399 max_xri += LPFC_NVMET_BUF_POST;
895427bd
JS
12400 return max_xri;
12401}
12402
12403
1feb8204
JS
12404static void
12405lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
12406 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
12407 const struct firmware *fw)
12408{
a72d56b2
JS
12409 if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
12410 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12411 magic_number != MAGIC_NUMER_G6) ||
12412 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12413 magic_number != MAGIC_NUMER_G7))
1feb8204
JS
12414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12415 "3030 This firmware version is not supported on "
12416 "this HBA model. Device:%x Magic:%x Type:%x "
12417 "ID:%x Size %d %zd\n",
12418 phba->pcidev->device, magic_number, ftype, fid,
12419 fsize, fw->size);
12420 else
12421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12422 "3022 FW Download failed. Device:%x Magic:%x Type:%x "
12423 "ID:%x Size %d %zd\n",
12424 phba->pcidev->device, magic_number, ftype, fid,
12425 fsize, fw->size);
12426}
12427
12428
52d52440
JS
12429/**
12430 * lpfc_write_firmware - attempt to write a firmware image to the port
52d52440 12431 * @fw: pointer to firmware image returned from request_firmware.
ce396282 12432 * @phba: pointer to lpfc hba data structure.
52d52440 12433 *
52d52440 12434 **/
ce396282
JS
12435static void
12436lpfc_write_firmware(const struct firmware *fw, void *context)
52d52440 12437{
ce396282 12438 struct lpfc_hba *phba = (struct lpfc_hba *)context;
6b5151fd 12439 char fwrev[FW_REV_STR_SIZE];
ce396282 12440 struct lpfc_grp_hdr *image;
52d52440
JS
12441 struct list_head dma_buffer_list;
12442 int i, rc = 0;
12443 struct lpfc_dmabuf *dmabuf, *next;
12444 uint32_t offset = 0, temp_offset = 0;
6b6ef5db 12445 uint32_t magic_number, ftype, fid, fsize;
52d52440 12446
c71ab861 12447 /* It can be null in no-wait mode, sanity check */
ce396282
JS
12448 if (!fw) {
12449 rc = -ENXIO;
12450 goto out;
12451 }
12452 image = (struct lpfc_grp_hdr *)fw->data;
12453
6b6ef5db
JS
12454 magic_number = be32_to_cpu(image->magic_number);
12455 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
1feb8204 12456 fid = bf_get_be32(lpfc_grp_hdr_id, image);
6b6ef5db
JS
12457 fsize = be32_to_cpu(image->size);
12458
52d52440 12459 INIT_LIST_HEAD(&dma_buffer_list);
52d52440 12460 lpfc_decode_firmware_rev(phba, fwrev, 1);
88a2cfbb 12461 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
52d52440 12462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
ce396282 12463 "3023 Updating Firmware, Current Version:%s "
52d52440 12464 "New Version:%s\n",
88a2cfbb 12465 fwrev, image->revision);
52d52440
JS
12466 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
12467 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
12468 GFP_KERNEL);
12469 if (!dmabuf) {
12470 rc = -ENOMEM;
ce396282 12471 goto release_out;
52d52440
JS
12472 }
12473 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12474 SLI4_PAGE_SIZE,
12475 &dmabuf->phys,
12476 GFP_KERNEL);
12477 if (!dmabuf->virt) {
12478 kfree(dmabuf);
12479 rc = -ENOMEM;
ce396282 12480 goto release_out;
52d52440
JS
12481 }
12482 list_add_tail(&dmabuf->list, &dma_buffer_list);
12483 }
12484 while (offset < fw->size) {
12485 temp_offset = offset;
12486 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
079b5c91 12487 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
52d52440
JS
12488 memcpy(dmabuf->virt,
12489 fw->data + temp_offset,
079b5c91
JS
12490 fw->size - temp_offset);
12491 temp_offset = fw->size;
52d52440
JS
12492 break;
12493 }
52d52440
JS
12494 memcpy(dmabuf->virt, fw->data + temp_offset,
12495 SLI4_PAGE_SIZE);
88a2cfbb 12496 temp_offset += SLI4_PAGE_SIZE;
52d52440
JS
12497 }
12498 rc = lpfc_wr_object(phba, &dma_buffer_list,
12499 (fw->size - offset), &offset);
1feb8204
JS
12500 if (rc) {
12501 lpfc_log_write_firmware_error(phba, offset,
12502 magic_number, ftype, fid, fsize, fw);
ce396282 12503 goto release_out;
1feb8204 12504 }
52d52440
JS
12505 }
12506 rc = offset;
1feb8204
JS
12507 } else
12508 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12509 "3029 Skipped Firmware update, Current "
12510 "Version:%s New Version:%s\n",
12511 fwrev, image->revision);
ce396282
JS
12512
12513release_out:
52d52440
JS
12514 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
12515 list_del(&dmabuf->list);
12516 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
12517 dmabuf->virt, dmabuf->phys);
12518 kfree(dmabuf);
12519 }
ce396282
JS
12520 release_firmware(fw);
12521out:
12522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
c71ab861 12523 "3024 Firmware update done: %d.\n", rc);
ce396282 12524 return;
52d52440
JS
12525}
12526
c71ab861
JS
12527/**
12528 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
12529 * @phba: pointer to lpfc hba data structure.
12530 *
12531 * This routine is called to perform Linux generic firmware upgrade on device
12532 * that supports such feature.
12533 **/
12534int
12535lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
12536{
12537 uint8_t file_name[ELX_MODEL_NAME_SIZE];
12538 int ret;
12539 const struct firmware *fw;
12540
12541 /* Only supported on SLI4 interface type 2 for now */
27d6ac0a 12542 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
c71ab861
JS
12543 LPFC_SLI_INTF_IF_TYPE_2)
12544 return -EPERM;
12545
12546 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
12547
12548 if (fw_upgrade == INT_FW_UPGRADE) {
12549 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
12550 file_name, &phba->pcidev->dev,
12551 GFP_KERNEL, (void *)phba,
12552 lpfc_write_firmware);
12553 } else if (fw_upgrade == RUN_FW_UPGRADE) {
12554 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
12555 if (!ret)
12556 lpfc_write_firmware(fw, (void *)phba);
12557 } else {
12558 ret = -EINVAL;
12559 }
12560
12561 return ret;
12562}
12563
3772a991 12564/**
da0436e9 12565 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
3772a991
JS
12566 * @pdev: pointer to PCI device
12567 * @pid: pointer to PCI device identifier
12568 *
da0436e9
JS
12569 * This routine is called from the kernel's PCI subsystem to device with
12570 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991 12571 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
da0436e9
JS
12572 * information of the device and driver to see if the driver state that it
12573 * can support this kind of device. If the match is successful, the driver
12574 * core invokes this routine. If this routine determines it can claim the HBA,
12575 * it does all the initialization that it needs to do to handle the HBA
12576 * properly.
3772a991
JS
12577 *
12578 * Return code
12579 * 0 - driver can claim the device
12580 * negative value - driver can not claim the device
12581 **/
6f039790 12582static int
da0436e9 12583lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
3772a991
JS
12584{
12585 struct lpfc_hba *phba;
12586 struct lpfc_vport *vport = NULL;
6669f9bb 12587 struct Scsi_Host *shost = NULL;
6c621a22 12588 int error;
3772a991
JS
12589 uint32_t cfg_mode, intr_mode;
12590
12591 /* Allocate memory for HBA structure */
12592 phba = lpfc_hba_alloc(pdev);
12593 if (!phba)
12594 return -ENOMEM;
12595
12596 /* Perform generic PCI device enabling operation */
12597 error = lpfc_enable_pci_dev(phba);
079b5c91 12598 if (error)
3772a991 12599 goto out_free_phba;
3772a991 12600
da0436e9
JS
12601 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
12602 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
3772a991
JS
12603 if (error)
12604 goto out_disable_pci_dev;
12605
da0436e9
JS
12606 /* Set up SLI-4 specific device PCI memory space */
12607 error = lpfc_sli4_pci_mem_setup(phba);
3772a991
JS
12608 if (error) {
12609 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 12610 "1410 Failed to set up pci memory space.\n");
3772a991
JS
12611 goto out_disable_pci_dev;
12612 }
12613
da0436e9
JS
12614 /* Set up SLI-4 Specific device driver resources */
12615 error = lpfc_sli4_driver_resource_setup(phba);
3772a991
JS
12616 if (error) {
12617 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9
JS
12618 "1412 Failed to set up driver resource.\n");
12619 goto out_unset_pci_mem_s4;
3772a991
JS
12620 }
12621
19ca7609 12622 INIT_LIST_HEAD(&phba->active_rrq_list);
7d791df7 12623 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
19ca7609 12624
3772a991
JS
12625 /* Set up common device driver resources */
12626 error = lpfc_setup_driver_resource_phase2(phba);
12627 if (error) {
12628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 12629 "1414 Failed to set up driver resource.\n");
6c621a22 12630 goto out_unset_driver_resource_s4;
3772a991
JS
12631 }
12632
079b5c91
JS
12633 /* Get the default values for Model Name and Description */
12634 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
12635
3772a991 12636 /* Now, trying to enable interrupt and bring up the device */
5b75da2f 12637 cfg_mode = phba->cfg_use_msi;
5b75da2f 12638
7b15db32 12639 /* Put device to a known state before enabling interrupt */
cdb42bec 12640 phba->pport = NULL;
7b15db32 12641 lpfc_stop_port(phba);
895427bd 12642
7b15db32
JS
12643 /* Configure and enable interrupt */
12644 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
12645 if (intr_mode == LPFC_INTR_ERROR) {
12646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12647 "0426 Failed to enable interrupt.\n");
12648 error = -ENODEV;
cdb42bec 12649 goto out_unset_driver_resource;
7b15db32
JS
12650 }
12651 /* Default to single EQ for non-MSI-X */
895427bd 12652 if (phba->intr_type != MSIX) {
6a828b0f 12653 phba->cfg_irq_chann = 1;
2d7dbc4c 12654 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
2d7dbc4c
JS
12655 if (phba->nvmet_support)
12656 phba->cfg_nvmet_mrq = 1;
12657 }
cdb42bec 12658 }
6a828b0f 12659 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
cdb42bec
JS
12660
12661 /* Create SCSI host to the physical port */
12662 error = lpfc_create_shost(phba);
12663 if (error) {
12664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12665 "1415 Failed to create scsi host.\n");
12666 goto out_disable_intr;
12667 }
12668 vport = phba->pport;
12669 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
12670
12671 /* Configure sysfs attributes */
12672 error = lpfc_alloc_sysfs_attr(vport);
12673 if (error) {
12674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12675 "1416 Failed to allocate sysfs attr\n");
12676 goto out_destroy_shost;
895427bd
JS
12677 }
12678
7b15db32
JS
12679 /* Set up SLI-4 HBA */
12680 if (lpfc_sli4_hba_setup(phba)) {
12681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12682 "1421 Failed to set up hba\n");
12683 error = -ENODEV;
cdb42bec 12684 goto out_free_sysfs_attr;
98c9ea5c 12685 }
858c9f6c 12686
7b15db32
JS
12687 /* Log the current active interrupt mode */
12688 phba->intr_mode = intr_mode;
12689 lpfc_log_intr_mode(phba, intr_mode);
12690
3772a991
JS
12691 /* Perform post initialization setup */
12692 lpfc_post_init_setup(phba);
dea3101e 12693
01649561
JS
12694 /* NVME support in FW earlier in the driver load corrects the
12695 * FC4 type making a check for nvme_support unnecessary.
12696 */
0794d601
JS
12697 if (phba->nvmet_support == 0) {
12698 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12699 /* Create NVME binding with nvme_fc_transport. This
12700 * ensures the vport is initialized. If the localport
12701 * create fails, it should not unload the driver to
12702 * support field issues.
12703 */
12704 error = lpfc_nvme_create_localport(vport);
12705 if (error) {
12706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12707 "6004 NVME registration "
12708 "failed, error x%x\n",
12709 error);
12710 }
01649561
JS
12711 }
12712 }
895427bd 12713
c71ab861
JS
12714 /* check for firmware upgrade or downgrade */
12715 if (phba->cfg_request_firmware_upgrade)
db6f1c2f 12716 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
52d52440 12717
1c6834a7
JS
12718 /* Check if there are static vports to be created. */
12719 lpfc_create_static_vport(phba);
d2cc9bcd
JS
12720
12721 /* Enable RAS FW log support */
12722 lpfc_sli4_ras_setup(phba);
12723
dea3101e 12724 return 0;
12725
5b75da2f
JS
12726out_free_sysfs_attr:
12727 lpfc_free_sysfs_attr(vport);
3772a991
JS
12728out_destroy_shost:
12729 lpfc_destroy_shost(phba);
cdb42bec
JS
12730out_disable_intr:
12731 lpfc_sli4_disable_intr(phba);
3772a991
JS
12732out_unset_driver_resource:
12733 lpfc_unset_driver_resource_phase2(phba);
da0436e9
JS
12734out_unset_driver_resource_s4:
12735 lpfc_sli4_driver_resource_unset(phba);
12736out_unset_pci_mem_s4:
12737 lpfc_sli4_pci_mem_unset(phba);
3772a991
JS
12738out_disable_pci_dev:
12739 lpfc_disable_pci_dev(phba);
6669f9bb
JS
12740 if (shost)
12741 scsi_host_put(shost);
2e0fef85 12742out_free_phba:
3772a991 12743 lpfc_hba_free(phba);
dea3101e 12744 return error;
12745}
12746
e59058c4 12747/**
da0436e9 12748 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
e59058c4
JS
12749 * @pdev: pointer to PCI device
12750 *
da0436e9
JS
12751 * This routine is called from the kernel's PCI subsystem to device with
12752 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
3772a991
JS
12753 * removed from PCI bus, it performs all the necessary cleanup for the HBA
12754 * device to be removed from the PCI subsystem properly.
e59058c4 12755 **/
6f039790 12756static void
da0436e9 12757lpfc_pci_remove_one_s4(struct pci_dev *pdev)
dea3101e 12758{
da0436e9 12759 struct Scsi_Host *shost = pci_get_drvdata(pdev);
2e0fef85 12760 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
eada272d 12761 struct lpfc_vport **vports;
da0436e9 12762 struct lpfc_hba *phba = vport->phba;
eada272d 12763 int i;
8a4df120 12764
da0436e9 12765 /* Mark the device unloading flag */
549e55cd 12766 spin_lock_irq(&phba->hbalock);
51ef4c26 12767 vport->load_flag |= FC_UNLOADING;
549e55cd 12768 spin_unlock_irq(&phba->hbalock);
2e0fef85 12769
da0436e9 12770 /* Free the HBA sysfs attributes */
858c9f6c
JS
12771 lpfc_free_sysfs_attr(vport);
12772
eada272d
JS
12773 /* Release all the vports against this physical port */
12774 vports = lpfc_create_vport_work_array(phba);
12775 if (vports != NULL)
587a37f6
JS
12776 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12777 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
12778 continue;
eada272d 12779 fc_vport_terminate(vports[i]->fc_vport);
587a37f6 12780 }
eada272d
JS
12781 lpfc_destroy_vport_work_array(phba, vports);
12782
12783 /* Remove FC host and then SCSI host with the physical port */
858c9f6c
JS
12784 fc_remove_host(shost);
12785 scsi_remove_host(shost);
da0436e9 12786
d613b6a7
JS
12787 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
12788 * localports are destroyed after to cleanup all transport memory.
895427bd 12789 */
87af33fe 12790 lpfc_cleanup(vport);
d613b6a7
JS
12791 lpfc_nvmet_destroy_targetport(phba);
12792 lpfc_nvme_destroy_localport(vport);
87af33fe 12793
c490850a
JS
12794 /* De-allocate multi-XRI pools */
12795 if (phba->cfg_xri_rebalancing)
12796 lpfc_destroy_multixri_pools(phba);
12797
281d6190
JS
12798 /*
12799 * Bring down the SLI Layer. This step disables all interrupts,
12800 * clears the rings, discards all mailbox commands, and resets
12801 * the HBA FCoE function.
12802 */
12803 lpfc_debugfs_terminate(vport);
a257bf90 12804
1901762f 12805 lpfc_stop_hba_timers(phba);
523128e5 12806 spin_lock_irq(&phba->port_list_lock);
858c9f6c 12807 list_del_init(&vport->listentry);
523128e5 12808 spin_unlock_irq(&phba->port_list_lock);
858c9f6c 12809
3677a3a7 12810 /* Perform scsi free before driver resource_unset since scsi
da0436e9 12811 * buffers are released to their corresponding pools here.
2e0fef85 12812 */
5e5b511d 12813 lpfc_io_free(phba);
01649561 12814 lpfc_free_iocb_list(phba);
5e5b511d 12815 lpfc_sli4_hba_unset(phba);
67d12733 12816
0cdb84ec 12817 lpfc_unset_driver_resource_phase2(phba);
da0436e9 12818 lpfc_sli4_driver_resource_unset(phba);
ed957684 12819
da0436e9
JS
12820 /* Unmap adapter Control and Doorbell registers */
12821 lpfc_sli4_pci_mem_unset(phba);
2e0fef85 12822
da0436e9
JS
12823 /* Release PCI resources and disable device's PCI function */
12824 scsi_host_put(shost);
12825 lpfc_disable_pci_dev(phba);
2e0fef85 12826
da0436e9 12827 /* Finally, free the driver's device data structure */
3772a991 12828 lpfc_hba_free(phba);
2e0fef85 12829
da0436e9 12830 return;
dea3101e 12831}
12832
3a55b532 12833/**
da0436e9 12834 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
3a55b532
JS
12835 * @pdev: pointer to PCI device
12836 * @msg: power management message
12837 *
da0436e9
JS
12838 * This routine is called from the kernel's PCI subsystem to support system
12839 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
12840 * this method, it quiesces the device by stopping the driver's worker
12841 * thread for the device, turning off device's interrupt and DMA, and bring
12842 * the device offline. Note that as the driver implements the minimum PM
12843 * requirements to a power-aware driver's PM support for suspend/resume -- all
12844 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
12845 * method call will be treated as SUSPEND and the driver will fully
12846 * reinitialize its device during resume() method call, the driver will set
12847 * device to PCI_D3hot state in PCI config space instead of setting it
3772a991 12848 * according to the @msg provided by the PM.
3a55b532
JS
12849 *
12850 * Return code
3772a991
JS
12851 * 0 - driver suspended the device
12852 * Error otherwise
3a55b532
JS
12853 **/
12854static int
da0436e9 12855lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3a55b532
JS
12856{
12857 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12858 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12859
12860 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
75baf696 12861 "2843 PCI device Power Management suspend.\n");
3a55b532
JS
12862
12863 /* Bring down the device */
618a5230 12864 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
3a55b532
JS
12865 lpfc_offline(phba);
12866 kthread_stop(phba->worker_thread);
12867
12868 /* Disable interrupt from device */
da0436e9 12869 lpfc_sli4_disable_intr(phba);
5350d872 12870 lpfc_sli4_queue_destroy(phba);
3a55b532
JS
12871
12872 /* Save device state to PCI config space */
12873 pci_save_state(pdev);
12874 pci_set_power_state(pdev, PCI_D3hot);
12875
12876 return 0;
12877}
12878
12879/**
da0436e9 12880 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
3a55b532
JS
12881 * @pdev: pointer to PCI device
12882 *
da0436e9
JS
12883 * This routine is called from the kernel's PCI subsystem to support system
12884 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
12885 * this method, it restores the device's PCI config space state and fully
12886 * reinitializes the device and brings it online. Note that as the driver
12887 * implements the minimum PM requirements to a power-aware driver's PM for
12888 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
12889 * to the suspend() method call will be treated as SUSPEND and the driver
12890 * will fully reinitialize its device during resume() method call, the device
12891 * will be set to PCI_D0 directly in PCI config space before restoring the
12892 * state.
3a55b532
JS
12893 *
12894 * Return code
3772a991
JS
12895 * 0 - driver suspended the device
12896 * Error otherwise
3a55b532
JS
12897 **/
12898static int
da0436e9 12899lpfc_pci_resume_one_s4(struct pci_dev *pdev)
3a55b532
JS
12900{
12901 struct Scsi_Host *shost = pci_get_drvdata(pdev);
12902 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
5b75da2f 12903 uint32_t intr_mode;
3a55b532
JS
12904 int error;
12905
12906 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
da0436e9 12907 "0292 PCI device Power Management resume.\n");
3a55b532
JS
12908
12909 /* Restore device state from PCI config space */
12910 pci_set_power_state(pdev, PCI_D0);
12911 pci_restore_state(pdev);
1dfb5a47
JS
12912
12913 /*
12914 * As the new kernel behavior of pci_restore_state() API call clears
12915 * device saved_state flag, need to save the restored state again.
12916 */
12917 pci_save_state(pdev);
12918
3a55b532
JS
12919 if (pdev->is_busmaster)
12920 pci_set_master(pdev);
12921
da0436e9 12922 /* Startup the kernel thread for this host adapter. */
3a55b532
JS
12923 phba->worker_thread = kthread_run(lpfc_do_work, phba,
12924 "lpfc_worker_%d", phba->brd_no);
12925 if (IS_ERR(phba->worker_thread)) {
12926 error = PTR_ERR(phba->worker_thread);
12927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 12928 "0293 PM resume failed to start worker "
3a55b532
JS
12929 "thread: error=x%x.\n", error);
12930 return error;
12931 }
12932
5b75da2f 12933 /* Configure and enable interrupt */
da0436e9 12934 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
5b75da2f 12935 if (intr_mode == LPFC_INTR_ERROR) {
3a55b532 12936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
da0436e9 12937 "0294 PM resume Failed to enable interrupt\n");
5b75da2f
JS
12938 return -EIO;
12939 } else
12940 phba->intr_mode = intr_mode;
3a55b532
JS
12941
12942 /* Restart HBA and bring it online */
12943 lpfc_sli_brdrestart(phba);
12944 lpfc_online(phba);
12945
5b75da2f
JS
12946 /* Log the current active interrupt mode */
12947 lpfc_log_intr_mode(phba, phba->intr_mode);
12948
3a55b532
JS
12949 return 0;
12950}
12951
75baf696
JS
12952/**
12953 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
12954 * @phba: pointer to lpfc hba data structure.
12955 *
12956 * This routine is called to prepare the SLI4 device for PCI slot recover. It
12957 * aborts all the outstanding SCSI I/Os to the pci device.
12958 **/
12959static void
12960lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
12961{
75baf696
JS
12962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12963 "2828 PCI channel I/O abort preparing for recovery\n");
12964 /*
12965 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
12966 * and let the SCSI mid-layer to retry them to recover.
12967 */
db55fba8 12968 lpfc_sli_abort_fcp_rings(phba);
75baf696
JS
12969}
12970
12971/**
12972 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
12973 * @phba: pointer to lpfc hba data structure.
12974 *
12975 * This routine is called to prepare the SLI4 device for PCI slot reset. It
12976 * disables the device interrupt and pci device, and aborts the internal FCP
12977 * pending I/Os.
12978 **/
12979static void
12980lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
12981{
12982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12983 "2826 PCI channel disable preparing for reset\n");
12984
12985 /* Block any management I/Os to the device */
618a5230 12986 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
75baf696
JS
12987
12988 /* Block all SCSI devices' I/Os on the host */
12989 lpfc_scsi_dev_block(phba);
12990
ea714f3d
JS
12991 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12992 lpfc_sli_flush_fcp_rings(phba);
12993
c3725bdc
JS
12994 /* Flush the outstanding NVME IOs if fc4 type enabled. */
12995 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12996 lpfc_sli_flush_nvme_rings(phba);
12997
75baf696
JS
12998 /* stop all timers */
12999 lpfc_stop_hba_timers(phba);
13000
13001 /* Disable interrupt and pci device */
13002 lpfc_sli4_disable_intr(phba);
5350d872 13003 lpfc_sli4_queue_destroy(phba);
75baf696 13004 pci_disable_device(phba->pcidev);
75baf696
JS
13005}
13006
13007/**
13008 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
13009 * @phba: pointer to lpfc hba data structure.
13010 *
13011 * This routine is called to prepare the SLI4 device for PCI slot permanently
13012 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
13013 * pending I/Os.
13014 **/
13015static void
13016lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
13017{
13018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13019 "2827 PCI channel permanent disable for failure\n");
13020
13021 /* Block all SCSI devices' I/Os on the host */
13022 lpfc_scsi_dev_block(phba);
13023
13024 /* stop all timers */
13025 lpfc_stop_hba_timers(phba);
13026
13027 /* Clean up all driver's outstanding SCSI I/Os */
13028 lpfc_sli_flush_fcp_rings(phba);
c3725bdc
JS
13029
13030 /* Flush the outstanding NVME IOs if fc4 type enabled. */
13031 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13032 lpfc_sli_flush_nvme_rings(phba);
75baf696
JS
13033}
13034
8d63f375 13035/**
da0436e9 13036 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
e59058c4
JS
13037 * @pdev: pointer to PCI device.
13038 * @state: the current PCI connection state.
8d63f375 13039 *
da0436e9
JS
13040 * This routine is called from the PCI subsystem for error handling to device
13041 * with SLI-4 interface spec. This function is called by the PCI subsystem
13042 * after a PCI bus error affecting this device has been detected. When this
13043 * function is invoked, it will need to stop all the I/Os and interrupt(s)
13044 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
13045 * for the PCI subsystem to perform proper recovery as desired.
e59058c4
JS
13046 *
13047 * Return codes
3772a991
JS
13048 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13049 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
e59058c4 13050 **/
3772a991 13051static pci_ers_result_t
da0436e9 13052lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8d63f375 13053{
75baf696
JS
13054 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13055 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13056
13057 switch (state) {
13058 case pci_channel_io_normal:
13059 /* Non-fatal error, prepare for recovery */
13060 lpfc_sli4_prep_dev_for_recover(phba);
13061 return PCI_ERS_RESULT_CAN_RECOVER;
13062 case pci_channel_io_frozen:
13063 /* Fatal error, prepare for slot reset */
13064 lpfc_sli4_prep_dev_for_reset(phba);
13065 return PCI_ERS_RESULT_NEED_RESET;
13066 case pci_channel_io_perm_failure:
13067 /* Permanent failure, prepare for device down */
13068 lpfc_sli4_prep_dev_for_perm_failure(phba);
13069 return PCI_ERS_RESULT_DISCONNECT;
13070 default:
13071 /* Unknown state, prepare and request slot reset */
13072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13073 "2825 Unknown PCI error state: x%x\n", state);
13074 lpfc_sli4_prep_dev_for_reset(phba);
13075 return PCI_ERS_RESULT_NEED_RESET;
13076 }
8d63f375
LV
13077}
13078
13079/**
da0436e9 13080 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
e59058c4
JS
13081 * @pdev: pointer to PCI device.
13082 *
da0436e9
JS
13083 * This routine is called from the PCI subsystem for error handling to device
13084 * with SLI-4 interface spec. It is called after PCI bus has been reset to
13085 * restart the PCI card from scratch, as if from a cold-boot. During the
13086 * PCI subsystem error recovery, after the driver returns
3772a991 13087 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
da0436e9
JS
13088 * recovery and then call this routine before calling the .resume method to
13089 * recover the device. This function will initialize the HBA device, enable
13090 * the interrupt, but it will just put the HBA to offline state without
13091 * passing any I/O traffic.
8d63f375 13092 *
e59058c4 13093 * Return codes
3772a991
JS
13094 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13095 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8d63f375 13096 */
3772a991 13097static pci_ers_result_t
da0436e9 13098lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8d63f375 13099{
75baf696
JS
13100 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13101 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13102 struct lpfc_sli *psli = &phba->sli;
13103 uint32_t intr_mode;
13104
13105 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
13106 if (pci_enable_device_mem(pdev)) {
13107 printk(KERN_ERR "lpfc: Cannot re-enable "
13108 "PCI device after reset.\n");
13109 return PCI_ERS_RESULT_DISCONNECT;
13110 }
13111
13112 pci_restore_state(pdev);
0a96e975
JS
13113
13114 /*
13115 * As the new kernel behavior of pci_restore_state() API call clears
13116 * device saved_state flag, need to save the restored state again.
13117 */
13118 pci_save_state(pdev);
13119
75baf696
JS
13120 if (pdev->is_busmaster)
13121 pci_set_master(pdev);
13122
13123 spin_lock_irq(&phba->hbalock);
13124 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
13125 spin_unlock_irq(&phba->hbalock);
13126
13127 /* Configure and enable interrupt */
13128 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
13129 if (intr_mode == LPFC_INTR_ERROR) {
13130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13131 "2824 Cannot re-enable interrupt after "
13132 "slot reset.\n");
13133 return PCI_ERS_RESULT_DISCONNECT;
13134 } else
13135 phba->intr_mode = intr_mode;
13136
13137 /* Log the current active interrupt mode */
13138 lpfc_log_intr_mode(phba, phba->intr_mode);
13139
8d63f375
LV
13140 return PCI_ERS_RESULT_RECOVERED;
13141}
13142
13143/**
da0436e9 13144 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
e59058c4 13145 * @pdev: pointer to PCI device
8d63f375 13146 *
3772a991 13147 * This routine is called from the PCI subsystem for error handling to device
da0436e9 13148 * with SLI-4 interface spec. It is called when kernel error recovery tells
3772a991
JS
13149 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
13150 * error recovery. After this call, traffic can start to flow from this device
13151 * again.
da0436e9 13152 **/
3772a991 13153static void
da0436e9 13154lpfc_io_resume_s4(struct pci_dev *pdev)
8d63f375 13155{
75baf696
JS
13156 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13157 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13158
13159 /*
13160 * In case of slot reset, as function reset is performed through
13161 * mailbox command which needs DMA to be enabled, this operation
13162 * has to be moved to the io resume phase. Taking device offline
13163 * will perform the necessary cleanup.
13164 */
13165 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
13166 /* Perform device reset */
618a5230 13167 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
75baf696
JS
13168 lpfc_offline(phba);
13169 lpfc_sli_brdrestart(phba);
13170 /* Bring the device back online */
13171 lpfc_online(phba);
13172 }
8d63f375
LV
13173}
13174
3772a991
JS
13175/**
13176 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
13177 * @pdev: pointer to PCI device
13178 * @pid: pointer to PCI device identifier
13179 *
13180 * This routine is to be registered to the kernel's PCI subsystem. When an
13181 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
13182 * at PCI device-specific information of the device and driver to see if the
13183 * driver state that it can support this kind of device. If the match is
13184 * successful, the driver core invokes this routine. This routine dispatches
13185 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
13186 * do all the initialization that it needs to do to handle the HBA device
13187 * properly.
13188 *
13189 * Return code
13190 * 0 - driver can claim the device
13191 * negative value - driver can not claim the device
13192 **/
6f039790 13193static int
3772a991
JS
13194lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
13195{
13196 int rc;
8fa38513 13197 struct lpfc_sli_intf intf;
3772a991 13198
28baac74 13199 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
3772a991
JS
13200 return -ENODEV;
13201
8fa38513 13202 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
28baac74 13203 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
da0436e9 13204 rc = lpfc_pci_probe_one_s4(pdev, pid);
8fa38513 13205 else
3772a991 13206 rc = lpfc_pci_probe_one_s3(pdev, pid);
8fa38513 13207
3772a991
JS
13208 return rc;
13209}
13210
13211/**
13212 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
13213 * @pdev: pointer to PCI device
13214 *
13215 * This routine is to be registered to the kernel's PCI subsystem. When an
13216 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
13217 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
13218 * remove routine, which will perform all the necessary cleanup for the
13219 * device to be removed from the PCI subsystem properly.
13220 **/
6f039790 13221static void
3772a991
JS
13222lpfc_pci_remove_one(struct pci_dev *pdev)
13223{
13224 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13225 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13226
13227 switch (phba->pci_dev_grp) {
13228 case LPFC_PCI_DEV_LP:
13229 lpfc_pci_remove_one_s3(pdev);
13230 break;
da0436e9
JS
13231 case LPFC_PCI_DEV_OC:
13232 lpfc_pci_remove_one_s4(pdev);
13233 break;
3772a991
JS
13234 default:
13235 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13236 "1424 Invalid PCI device group: 0x%x\n",
13237 phba->pci_dev_grp);
13238 break;
13239 }
13240 return;
13241}
13242
13243/**
13244 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
13245 * @pdev: pointer to PCI device
13246 * @msg: power management message
13247 *
13248 * This routine is to be registered to the kernel's PCI subsystem to support
13249 * system Power Management (PM). When PM invokes this method, it dispatches
13250 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
13251 * suspend the device.
13252 *
13253 * Return code
13254 * 0 - driver suspended the device
13255 * Error otherwise
13256 **/
13257static int
13258lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
13259{
13260 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13261 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13262 int rc = -ENODEV;
13263
13264 switch (phba->pci_dev_grp) {
13265 case LPFC_PCI_DEV_LP:
13266 rc = lpfc_pci_suspend_one_s3(pdev, msg);
13267 break;
da0436e9
JS
13268 case LPFC_PCI_DEV_OC:
13269 rc = lpfc_pci_suspend_one_s4(pdev, msg);
13270 break;
3772a991
JS
13271 default:
13272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13273 "1425 Invalid PCI device group: 0x%x\n",
13274 phba->pci_dev_grp);
13275 break;
13276 }
13277 return rc;
13278}
13279
13280/**
13281 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
13282 * @pdev: pointer to PCI device
13283 *
13284 * This routine is to be registered to the kernel's PCI subsystem to support
13285 * system Power Management (PM). When PM invokes this method, it dispatches
13286 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
13287 * resume the device.
13288 *
13289 * Return code
13290 * 0 - driver suspended the device
13291 * Error otherwise
13292 **/
13293static int
13294lpfc_pci_resume_one(struct pci_dev *pdev)
13295{
13296 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13297 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13298 int rc = -ENODEV;
13299
13300 switch (phba->pci_dev_grp) {
13301 case LPFC_PCI_DEV_LP:
13302 rc = lpfc_pci_resume_one_s3(pdev);
13303 break;
da0436e9
JS
13304 case LPFC_PCI_DEV_OC:
13305 rc = lpfc_pci_resume_one_s4(pdev);
13306 break;
3772a991
JS
13307 default:
13308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13309 "1426 Invalid PCI device group: 0x%x\n",
13310 phba->pci_dev_grp);
13311 break;
13312 }
13313 return rc;
13314}
13315
13316/**
13317 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
13318 * @pdev: pointer to PCI device.
13319 * @state: the current PCI connection state.
13320 *
13321 * This routine is registered to the PCI subsystem for error handling. This
13322 * function is called by the PCI subsystem after a PCI bus error affecting
13323 * this device has been detected. When this routine is invoked, it dispatches
13324 * the action to the proper SLI-3 or SLI-4 device error detected handling
13325 * routine, which will perform the proper error detected operation.
13326 *
13327 * Return codes
13328 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
13329 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13330 **/
13331static pci_ers_result_t
13332lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
13333{
13334 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13335 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13336 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13337
13338 switch (phba->pci_dev_grp) {
13339 case LPFC_PCI_DEV_LP:
13340 rc = lpfc_io_error_detected_s3(pdev, state);
13341 break;
da0436e9
JS
13342 case LPFC_PCI_DEV_OC:
13343 rc = lpfc_io_error_detected_s4(pdev, state);
13344 break;
3772a991
JS
13345 default:
13346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13347 "1427 Invalid PCI device group: 0x%x\n",
13348 phba->pci_dev_grp);
13349 break;
13350 }
13351 return rc;
13352}
13353
13354/**
13355 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
13356 * @pdev: pointer to PCI device.
13357 *
13358 * This routine is registered to the PCI subsystem for error handling. This
13359 * function is called after PCI bus has been reset to restart the PCI card
13360 * from scratch, as if from a cold-boot. When this routine is invoked, it
13361 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
13362 * routine, which will perform the proper device reset.
13363 *
13364 * Return codes
13365 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
13366 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
13367 **/
13368static pci_ers_result_t
13369lpfc_io_slot_reset(struct pci_dev *pdev)
13370{
13371 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13372 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13373 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
13374
13375 switch (phba->pci_dev_grp) {
13376 case LPFC_PCI_DEV_LP:
13377 rc = lpfc_io_slot_reset_s3(pdev);
13378 break;
da0436e9
JS
13379 case LPFC_PCI_DEV_OC:
13380 rc = lpfc_io_slot_reset_s4(pdev);
13381 break;
3772a991
JS
13382 default:
13383 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13384 "1428 Invalid PCI device group: 0x%x\n",
13385 phba->pci_dev_grp);
13386 break;
13387 }
13388 return rc;
13389}
13390
13391/**
13392 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
13393 * @pdev: pointer to PCI device
13394 *
13395 * This routine is registered to the PCI subsystem for error handling. It
13396 * is called when kernel error recovery tells the lpfc driver that it is
13397 * OK to resume normal PCI operation after PCI bus error recovery. When
13398 * this routine is invoked, it dispatches the action to the proper SLI-3
13399 * or SLI-4 device io_resume routine, which will resume the device operation.
13400 **/
13401static void
13402lpfc_io_resume(struct pci_dev *pdev)
13403{
13404 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13405 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
13406
13407 switch (phba->pci_dev_grp) {
13408 case LPFC_PCI_DEV_LP:
13409 lpfc_io_resume_s3(pdev);
13410 break;
da0436e9
JS
13411 case LPFC_PCI_DEV_OC:
13412 lpfc_io_resume_s4(pdev);
13413 break;
3772a991
JS
13414 default:
13415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13416 "1429 Invalid PCI device group: 0x%x\n",
13417 phba->pci_dev_grp);
13418 break;
13419 }
13420 return;
13421}
13422
1ba981fd
JS
13423/**
13424 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
13425 * @phba: pointer to lpfc hba data structure.
13426 *
13427 * This routine checks to see if OAS is supported for this adapter. If
13428 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
13429 * the enable oas flag is cleared and the pool created for OAS device data
13430 * is destroyed.
13431 *
13432 **/
c7092975 13433static void
1ba981fd
JS
13434lpfc_sli4_oas_verify(struct lpfc_hba *phba)
13435{
13436
13437 if (!phba->cfg_EnableXLane)
13438 return;
13439
13440 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
13441 phba->cfg_fof = 1;
13442 } else {
f38fa0bb 13443 phba->cfg_fof = 0;
1ba981fd
JS
13444 if (phba->device_data_mem_pool)
13445 mempool_destroy(phba->device_data_mem_pool);
13446 phba->device_data_mem_pool = NULL;
13447 }
13448
13449 return;
13450}
13451
d2cc9bcd
JS
13452/**
13453 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
13454 * @phba: pointer to lpfc hba data structure.
13455 *
13456 * This routine checks to see if RAS is supported by the adapter. Check the
13457 * function through which RAS support enablement is to be done.
13458 **/
13459void
13460lpfc_sli4_ras_init(struct lpfc_hba *phba)
13461{
13462 switch (phba->pcidev->device) {
13463 case PCI_DEVICE_ID_LANCER_G6_FC:
13464 case PCI_DEVICE_ID_LANCER_G7_FC:
13465 phba->ras_fwlog.ras_hwsupport = true;
cb34990b
JS
13466 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
13467 phba->cfg_ras_fwlog_buffsize)
d2cc9bcd
JS
13468 phba->ras_fwlog.ras_enabled = true;
13469 else
13470 phba->ras_fwlog.ras_enabled = false;
13471 break;
13472 default:
13473 phba->ras_fwlog.ras_hwsupport = false;
13474 }
13475}
13476
1ba981fd 13477
dea3101e 13478MODULE_DEVICE_TABLE(pci, lpfc_id_table);
13479
a55b2d21 13480static const struct pci_error_handlers lpfc_err_handler = {
8d63f375
LV
13481 .error_detected = lpfc_io_error_detected,
13482 .slot_reset = lpfc_io_slot_reset,
13483 .resume = lpfc_io_resume,
13484};
13485
dea3101e 13486static struct pci_driver lpfc_driver = {
13487 .name = LPFC_DRIVER_NAME,
13488 .id_table = lpfc_id_table,
13489 .probe = lpfc_pci_probe_one,
6f039790 13490 .remove = lpfc_pci_remove_one,
85e8a239 13491 .shutdown = lpfc_pci_remove_one,
3a55b532 13492 .suspend = lpfc_pci_suspend_one,
3772a991 13493 .resume = lpfc_pci_resume_one,
2e0fef85 13494 .err_handler = &lpfc_err_handler,
dea3101e 13495};
13496
3ef6d24c 13497static const struct file_operations lpfc_mgmt_fop = {
858feacd 13498 .owner = THIS_MODULE,
3ef6d24c
JS
13499};
13500
13501static struct miscdevice lpfc_mgmt_dev = {
13502 .minor = MISC_DYNAMIC_MINOR,
13503 .name = "lpfcmgmt",
13504 .fops = &lpfc_mgmt_fop,
13505};
13506
e59058c4 13507/**
3621a710 13508 * lpfc_init - lpfc module initialization routine
e59058c4
JS
13509 *
13510 * This routine is to be invoked when the lpfc module is loaded into the
13511 * kernel. The special kernel macro module_init() is used to indicate the
13512 * role of this routine to the kernel as lpfc module entry point.
13513 *
13514 * Return codes
13515 * 0 - successful
13516 * -ENOMEM - FC attach transport failed
13517 * all others - failed
13518 */
dea3101e 13519static int __init
13520lpfc_init(void)
13521{
13522 int error = 0;
13523
13524 printk(LPFC_MODULE_DESC "\n");
c44ce173 13525 printk(LPFC_COPYRIGHT "\n");
dea3101e 13526
3ef6d24c
JS
13527 error = misc_register(&lpfc_mgmt_dev);
13528 if (error)
13529 printk(KERN_ERR "Could not register lpfcmgmt device, "
13530 "misc_register returned with status %d", error);
13531
458c083e
JS
13532 lpfc_transport_functions.vport_create = lpfc_vport_create;
13533 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
dea3101e 13534 lpfc_transport_template =
13535 fc_attach_transport(&lpfc_transport_functions);
7ee5d43e 13536 if (lpfc_transport_template == NULL)
dea3101e 13537 return -ENOMEM;
458c083e
JS
13538 lpfc_vport_transport_template =
13539 fc_attach_transport(&lpfc_vport_transport_functions);
13540 if (lpfc_vport_transport_template == NULL) {
13541 fc_release_transport(lpfc_transport_template);
13542 return -ENOMEM;
7ee5d43e 13543 }
5fd11085 13544 lpfc_nvme_cmd_template();
bd3061ba 13545 lpfc_nvmet_cmd_template();
7bb03bbf
JS
13546
13547 /* Initialize in case vector mapping is needed */
2ea259ee 13548 lpfc_present_cpu = num_present_cpus();
7bb03bbf 13549
dea3101e 13550 error = pci_register_driver(&lpfc_driver);
92d7f7b0 13551 if (error) {
dea3101e 13552 fc_release_transport(lpfc_transport_template);
458c083e 13553 fc_release_transport(lpfc_vport_transport_template);
92d7f7b0 13554 }
dea3101e 13555
13556 return error;
13557}
13558
e59058c4 13559/**
3621a710 13560 * lpfc_exit - lpfc module removal routine
e59058c4
JS
13561 *
13562 * This routine is invoked when the lpfc module is removed from the kernel.
13563 * The special kernel macro module_exit() is used to indicate the role of
13564 * this routine to the kernel as lpfc module exit point.
13565 */
dea3101e 13566static void __exit
13567lpfc_exit(void)
13568{
3ef6d24c 13569 misc_deregister(&lpfc_mgmt_dev);
dea3101e 13570 pci_unregister_driver(&lpfc_driver);
13571 fc_release_transport(lpfc_transport_template);
458c083e 13572 fc_release_transport(lpfc_vport_transport_template);
81301a9b 13573 if (_dump_buf_data) {
6a9c52cf
JS
13574 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
13575 "_dump_buf_data at 0x%p\n",
81301a9b
JS
13576 (1L << _dump_buf_data_order), _dump_buf_data);
13577 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
13578 }
13579
13580 if (_dump_buf_dif) {
6a9c52cf
JS
13581 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
13582 "_dump_buf_dif at 0x%p\n",
81301a9b
JS
13583 (1L << _dump_buf_dif_order), _dump_buf_dif);
13584 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
13585 }
7973967f 13586 idr_destroy(&lpfc_hba_index);
dea3101e 13587}
13588
13589module_init(lpfc_init);
13590module_exit(lpfc_exit);
13591MODULE_LICENSE("GPL");
13592MODULE_DESCRIPTION(LPFC_MODULE_DESC);
d080abe0 13593MODULE_AUTHOR("Broadcom");
dea3101e 13594MODULE_VERSION("0:" LPFC_DRIVER_VERSION);