1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched/clock.h>
34 #include <linux/ctype.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/irq.h>
40 #include <linux/bitops.h>
41 #include <linux/crash_dump.h>
42 #include <linux/cpu.h>
43 #include <linux/cpuhotplug.h>
45 #include <scsi/scsi.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_transport_fc.h>
49 #include <scsi/scsi_tcq.h>
50 #include <scsi/fc/fc_fs.h>
55 #include "lpfc_sli4.h"
57 #include "lpfc_disc.h"
59 #include "lpfc_scsi.h"
60 #include "lpfc_nvme.h"
61 #include "lpfc_logmsg.h"
62 #include "lpfc_crtn.h"
63 #include "lpfc_vport.h"
64 #include "lpfc_version.h"
67 static enum cpuhp_state lpfc_cpuhp_state;
68 /* Used when mapping IRQ vectors in a driver centric manner */
69 static uint32_t lpfc_present_cpu;
70 static bool lpfc_pldv_detect;
72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76 static int lpfc_post_rcv_buf(struct lpfc_hba *);
77 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79 static int lpfc_setup_endian_order(struct lpfc_hba *);
80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83 static void lpfc_init_sgl_list(struct lpfc_hba *);
84 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85 static void lpfc_free_active_sgl(struct lpfc_hba *);
86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
97 static void lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba);
98 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
100 static struct scsi_transport_template *lpfc_transport_template = NULL;
101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
102 static DEFINE_IDR(lpfc_hba_index);
103 #define LPFC_NVMET_BUF_POST 254
104 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
105 static void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts);
108 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
109 * @phba: pointer to lpfc hba data structure.
111 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
112 * mailbox command. It retrieves the revision information from the HBA and
113 * collects the Vital Product Data (VPD) about the HBA for preparing the
114 * configuration of the HBA.
118 * -ERESTART - requests the SLI layer to reset the HBA and try again.
119 * Any other value - indicates an error.
122 lpfc_config_port_prep(struct lpfc_hba *phba)
124 lpfc_vpd_t *vp = &phba->vpd;
128 char *lpfc_vpd_data = NULL;
130 static char licensed[56] =
131 "key unlock for use with gnu public licensed code only\0";
132 static int init_key = 1;
134 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
136 phba->link_state = LPFC_HBA_ERROR;
141 phba->link_state = LPFC_INIT_MBX_CMDS;
143 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
145 uint32_t *ptext = (uint32_t *) licensed;
147 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
148 *ptext = cpu_to_be32(*ptext);
152 lpfc_read_nv(phba, pmb);
153 memset((char*)mb->un.varRDnvp.rsvd3, 0,
154 sizeof (mb->un.varRDnvp.rsvd3));
155 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
158 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
160 if (rc != MBX_SUCCESS) {
161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
162 "0324 Config Port initialization "
163 "error, mbxCmd x%x READ_NVPARM, "
165 mb->mbxCommand, mb->mbxStatus);
166 mempool_free(pmb, phba->mbox_mem_pool);
169 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
171 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
176 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
177 * which was already set in lpfc_get_cfgparam()
179 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
181 /* Setup and issue mailbox READ REV command */
182 lpfc_read_rev(phba, pmb);
183 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
184 if (rc != MBX_SUCCESS) {
185 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
186 "0439 Adapter failed to init, mbxCmd x%x "
187 "READ_REV, mbxStatus x%x\n",
188 mb->mbxCommand, mb->mbxStatus);
189 mempool_free( pmb, phba->mbox_mem_pool);
195 * The value of rr must be 1 since the driver set the cv field to 1.
196 * This setting requires the FW to set all revision fields.
198 if (mb->un.varRdRev.rr == 0) {
200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
201 "0440 Adapter failed to init, READ_REV has "
202 "missing revision information.\n");
203 mempool_free(pmb, phba->mbox_mem_pool);
207 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
208 mempool_free(pmb, phba->mbox_mem_pool);
212 /* Save information as VPD data */
214 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
215 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
216 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
217 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
218 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
219 vp->rev.biuRev = mb->un.varRdRev.biuRev;
220 vp->rev.smRev = mb->un.varRdRev.smRev;
221 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
222 vp->rev.endecRev = mb->un.varRdRev.endecRev;
223 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
224 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
225 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
226 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
227 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
228 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
230 /* If the sli feature level is less then 9, we must
231 * tear down all RPIs and VPIs on link down if NPIV
234 if (vp->rev.feaLevelHigh < 9)
235 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
237 if (lpfc_is_LC_HBA(phba->pcidev->device))
238 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
239 sizeof (phba->RandomData));
241 /* Get adapter VPD information */
242 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
246 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
247 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
249 if (rc != MBX_SUCCESS) {
250 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
251 "0441 VPD not present on adapter, "
252 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
253 mb->mbxCommand, mb->mbxStatus);
254 mb->un.varDmp.word_cnt = 0;
256 /* dump mem may return a zero when finished or we got a
257 * mailbox error, either way we are done.
259 if (mb->un.varDmp.word_cnt == 0)
262 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
263 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
264 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
265 lpfc_vpd_data + offset,
266 mb->un.varDmp.word_cnt);
267 offset += mb->un.varDmp.word_cnt;
268 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
270 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
272 kfree(lpfc_vpd_data);
274 mempool_free(pmb, phba->mbox_mem_pool);
279 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
280 * @phba: pointer to lpfc hba data structure.
281 * @pmboxq: pointer to the driver internal queue element for mailbox command.
283 * This is the completion handler for driver's configuring asynchronous event
284 * mailbox command to the device. If the mailbox command returns successfully,
285 * it will set internal async event support flag to 1; otherwise, it will
286 * set internal async event support flag to 0.
289 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
291 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
292 phba->temp_sensor_support = 1;
294 phba->temp_sensor_support = 0;
295 mempool_free(pmboxq, phba->mbox_mem_pool);
300 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
301 * @phba: pointer to lpfc hba data structure.
302 * @pmboxq: pointer to the driver internal queue element for mailbox command.
304 * This is the completion handler for dump mailbox command for getting
305 * wake up parameters. When this command complete, the response contain
306 * Option rom version of the HBA. This function translate the version number
307 * into a human readable string and store it in OptionROMVersion.
310 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
313 uint32_t prog_id_word;
315 /* character array used for decoding dist type. */
316 char dist_char[] = "nabx";
318 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
319 mempool_free(pmboxq, phba->mbox_mem_pool);
323 prg = (struct prog_id *) &prog_id_word;
325 /* word 7 contain option rom version */
326 prog_id_word = pmboxq->u.mb.un.varWords[7];
328 /* Decode the Option rom version word to a readable string */
329 dist = dist_char[prg->dist];
331 if ((prg->dist == 3) && (prg->num == 0))
332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
333 prg->ver, prg->rev, prg->lev);
335 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
336 prg->ver, prg->rev, prg->lev,
338 mempool_free(pmboxq, phba->mbox_mem_pool);
343 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
344 * @vport: pointer to lpfc vport data structure.
351 lpfc_update_vport_wwn(struct lpfc_vport *vport)
353 struct lpfc_hba *phba = vport->phba;
356 * If the name is empty or there exists a soft name
357 * then copy the service params name, otherwise use the fc name
359 if (vport->fc_nodename.u.wwn[0] == 0)
360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
361 sizeof(struct lpfc_name));
363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
364 sizeof(struct lpfc_name));
367 * If the port name has changed, then set the Param changes flag
370 if (vport->fc_portname.u.wwn[0] != 0 &&
371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
372 sizeof(struct lpfc_name))) {
373 vport->vport_flag |= FAWWPN_PARAM_CHG;
375 if (phba->sli_rev == LPFC_SLI_REV4 &&
376 vport->port_type == LPFC_PHYSICAL_PORT &&
377 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
378 if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
379 phba->sli4_hba.fawwpn_flag &=
381 lpfc_printf_log(phba, KERN_INFO,
382 LOG_SLI | LOG_DISCOVERY | LOG_ELS,
383 "2701 FA-PWWN change WWPN from %llx to "
384 "%llx: vflag x%x fawwpn_flag x%x\n",
385 wwn_to_u64(vport->fc_portname.u.wwn),
387 (vport->fc_sparam.portName.u.wwn),
389 phba->sli4_hba.fawwpn_flag);
390 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
391 sizeof(struct lpfc_name));
395 if (vport->fc_portname.u.wwn[0] == 0)
396 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
397 sizeof(struct lpfc_name));
399 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
400 sizeof(struct lpfc_name));
404 * lpfc_config_port_post - Perform lpfc initialization after config port
405 * @phba: pointer to lpfc hba data structure.
407 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
408 * command call. It performs all internal resource and state setups on the
409 * port: post IOCB buffers, enable appropriate host interrupt attentions,
410 * ELS ring timers, etc.
414 * Any other value - error.
417 lpfc_config_port_post(struct lpfc_hba *phba)
419 struct lpfc_vport *vport = phba->pport;
420 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
423 struct lpfc_dmabuf *mp;
424 struct lpfc_sli *psli = &phba->sli;
425 uint32_t status, timeout;
429 spin_lock_irq(&phba->hbalock);
431 * If the Config port completed correctly the HBA is not
432 * over heated any more.
434 if (phba->over_temp_state == HBA_OVER_TEMP)
435 phba->over_temp_state = HBA_NORMAL_TEMP;
436 spin_unlock_irq(&phba->hbalock);
438 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
440 phba->link_state = LPFC_HBA_ERROR;
445 /* Get login parameters for NID. */
446 rc = lpfc_read_sparam(phba, pmb, 0);
448 mempool_free(pmb, phba->mbox_mem_pool);
453 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
454 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
455 "0448 Adapter failed init, mbxCmd x%x "
456 "READ_SPARM mbxStatus x%x\n",
457 mb->mbxCommand, mb->mbxStatus);
458 phba->link_state = LPFC_HBA_ERROR;
459 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
465 /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
466 * longer needed. Prevent unintended ctx_buf access as the mbox is
469 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
470 lpfc_mbuf_free(phba, mp->virt, mp->phys);
473 lpfc_update_vport_wwn(vport);
475 /* Update the fc_host data structures with new wwn. */
476 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
477 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
478 fc_host_max_npiv_vports(shost) = phba->max_vpi;
480 /* If no serial number in VPD data, use low 6 bytes of WWNN */
481 /* This should be consolidated into parse_vpd ? - mr */
482 if (phba->SerialNumber[0] == 0) {
485 outptr = &vport->fc_nodename.u.s.IEEE[0];
486 for (i = 0; i < 12; i++) {
488 j = ((status & 0xf0) >> 4);
490 phba->SerialNumber[i] =
491 (char)((uint8_t) 0x30 + (uint8_t) j);
493 phba->SerialNumber[i] =
494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
498 phba->SerialNumber[i] =
499 (char)((uint8_t) 0x30 + (uint8_t) j);
501 phba->SerialNumber[i] =
502 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
506 lpfc_read_config(phba, pmb);
508 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
510 "0453 Adapter failed to init, mbxCmd x%x "
511 "READ_CONFIG, mbxStatus x%x\n",
512 mb->mbxCommand, mb->mbxStatus);
513 phba->link_state = LPFC_HBA_ERROR;
514 mempool_free( pmb, phba->mbox_mem_pool);
518 /* Check if the port is disabled */
519 lpfc_sli_read_link_ste(phba);
521 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
522 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
523 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
524 "3359 HBA queue depth changed from %d to %d\n",
525 phba->cfg_hba_queue_depth,
526 mb->un.varRdConfig.max_xri);
527 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
530 phba->lmt = mb->un.varRdConfig.lmt;
532 /* Get the default values for Model Name and Description */
533 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
535 phba->link_state = LPFC_LINK_DOWN;
537 /* Only process IOCBs on ELS ring till hba_state is READY */
538 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
539 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
540 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
541 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
543 /* Post receive buffers for desired rings */
544 if (phba->sli_rev != 3)
545 lpfc_post_rcv_buf(phba);
548 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
550 if (phba->intr_type == MSIX) {
551 rc = lpfc_config_msi(phba, pmb);
553 mempool_free(pmb, phba->mbox_mem_pool);
556 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
557 if (rc != MBX_SUCCESS) {
558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
559 "0352 Config MSI mailbox command "
560 "failed, mbxCmd x%x, mbxStatus x%x\n",
561 pmb->u.mb.mbxCommand,
562 pmb->u.mb.mbxStatus);
563 mempool_free(pmb, phba->mbox_mem_pool);
568 spin_lock_irq(&phba->hbalock);
569 /* Initialize ERATT handling flag */
570 phba->hba_flag &= ~HBA_ERATT_HANDLED;
572 /* Enable appropriate host interrupts */
573 if (lpfc_readl(phba->HCregaddr, &status)) {
574 spin_unlock_irq(&phba->hbalock);
577 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
578 if (psli->num_rings > 0)
579 status |= HC_R0INT_ENA;
580 if (psli->num_rings > 1)
581 status |= HC_R1INT_ENA;
582 if (psli->num_rings > 2)
583 status |= HC_R2INT_ENA;
584 if (psli->num_rings > 3)
585 status |= HC_R3INT_ENA;
587 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
588 (phba->cfg_poll & DISABLE_FCP_RING_INT))
589 status &= ~(HC_R0INT_ENA);
591 writel(status, phba->HCregaddr);
592 readl(phba->HCregaddr); /* flush */
593 spin_unlock_irq(&phba->hbalock);
595 /* Set up ring-0 (ELS) timer */
596 timeout = phba->fc_ratov * 2;
597 mod_timer(&vport->els_tmofunc,
598 jiffies + msecs_to_jiffies(1000 * timeout));
599 /* Set up heart beat (HB) timer */
600 mod_timer(&phba->hb_tmofunc,
601 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
602 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
603 phba->last_completion_time = jiffies;
604 /* Set up error attention (ERATT) polling timer */
605 mod_timer(&phba->eratt_poll,
606 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
608 if (phba->hba_flag & LINK_DISABLED) {
609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
610 "2598 Adapter Link is disabled.\n");
611 lpfc_down_link(phba, pmb);
612 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
613 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
614 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
616 "2599 Adapter failed to issue DOWN_LINK"
617 " mbox command rc 0x%x\n", rc);
619 mempool_free(pmb, phba->mbox_mem_pool);
622 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
623 mempool_free(pmb, phba->mbox_mem_pool);
624 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
628 /* MBOX buffer will be freed in mbox compl */
629 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
631 phba->link_state = LPFC_HBA_ERROR;
635 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
636 pmb->mbox_cmpl = lpfc_config_async_cmpl;
637 pmb->vport = phba->pport;
638 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
640 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
641 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
642 "0456 Adapter failed to issue "
643 "ASYNCEVT_ENABLE mbox status x%x\n",
645 mempool_free(pmb, phba->mbox_mem_pool);
648 /* Get Option rom version */
649 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
651 phba->link_state = LPFC_HBA_ERROR;
655 lpfc_dump_wakeup_param(phba, pmb);
656 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
657 pmb->vport = phba->pport;
658 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
660 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
661 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
662 "0435 Adapter failed "
663 "to get Option ROM version status x%x\n", rc);
664 mempool_free(pmb, phba->mbox_mem_pool);
671 * lpfc_sli4_refresh_params - update driver copy of params.
672 * @phba: Pointer to HBA context object.
674 * This is called to refresh driver copy of dynamic fields from the
675 * common_get_sli4_parameters descriptor.
678 lpfc_sli4_refresh_params(struct lpfc_hba *phba)
681 struct lpfc_mqe *mqe;
682 struct lpfc_sli4_parameters *mbx_sli4_parameters;
685 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
690 /* Read the port's SLI4 Config Parameters */
691 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
692 sizeof(struct lpfc_sli4_cfg_mhdr));
693 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
694 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
695 length, LPFC_SLI4_MBX_EMBED);
697 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
699 mempool_free(mboxq, phba->mbox_mem_pool);
702 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
703 phba->sli4_hba.pc_sli4_params.mi_cap =
704 bf_get(cfg_mi_ver, mbx_sli4_parameters);
706 /* Are we forcing MI off via module parameter? */
707 if (phba->cfg_enable_mi)
708 phba->sli4_hba.pc_sli4_params.mi_ver =
709 bf_get(cfg_mi_ver, mbx_sli4_parameters);
711 phba->sli4_hba.pc_sli4_params.mi_ver = 0;
713 phba->sli4_hba.pc_sli4_params.cmf =
714 bf_get(cfg_cmf, mbx_sli4_parameters);
715 phba->sli4_hba.pc_sli4_params.pls =
716 bf_get(cfg_pvl, mbx_sli4_parameters);
718 mempool_free(mboxq, phba->mbox_mem_pool);
723 * lpfc_hba_init_link - Initialize the FC link
724 * @phba: pointer to lpfc hba data structure.
725 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
727 * This routine will issue the INIT_LINK mailbox command call.
728 * It is available to other drivers through the lpfc_hba data
729 * structure for use as a delayed link up mechanism with the
730 * module parameter lpfc_suppress_link_up.
734 * Any other value - error
737 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
739 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
743 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
744 * @phba: pointer to lpfc hba data structure.
745 * @fc_topology: desired fc topology.
746 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
748 * This routine will issue the INIT_LINK mailbox command call.
749 * It is available to other drivers through the lpfc_hba data
750 * structure for use as a delayed link up mechanism with the
751 * module parameter lpfc_suppress_link_up.
755 * Any other value - error
758 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
761 struct lpfc_vport *vport = phba->pport;
766 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
768 phba->link_state = LPFC_HBA_ERROR;
774 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
775 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
776 !(phba->lmt & LMT_1Gb)) ||
777 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
778 !(phba->lmt & LMT_2Gb)) ||
779 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
780 !(phba->lmt & LMT_4Gb)) ||
781 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
782 !(phba->lmt & LMT_8Gb)) ||
783 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
784 !(phba->lmt & LMT_10Gb)) ||
785 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
786 !(phba->lmt & LMT_16Gb)) ||
787 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
788 !(phba->lmt & LMT_32Gb)) ||
789 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
790 !(phba->lmt & LMT_64Gb))) {
791 /* Reset link speed to auto */
792 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
793 "1302 Invalid speed for this board:%d "
794 "Reset link speed to auto.\n",
795 phba->cfg_link_speed);
796 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
798 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
799 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
800 if (phba->sli_rev < LPFC_SLI_REV4)
801 lpfc_set_loopback_flag(phba);
802 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
803 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
804 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
805 "0498 Adapter failed to init, mbxCmd x%x "
806 "INIT_LINK, mbxStatus x%x\n",
807 mb->mbxCommand, mb->mbxStatus);
808 if (phba->sli_rev <= LPFC_SLI_REV3) {
809 /* Clear all interrupt enable conditions */
810 writel(0, phba->HCregaddr);
811 readl(phba->HCregaddr); /* flush */
812 /* Clear all pending interrupts */
813 writel(0xffffffff, phba->HAregaddr);
814 readl(phba->HAregaddr); /* flush */
816 phba->link_state = LPFC_HBA_ERROR;
817 if (rc != MBX_BUSY || flag == MBX_POLL)
818 mempool_free(pmb, phba->mbox_mem_pool);
821 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
822 if (flag == MBX_POLL)
823 mempool_free(pmb, phba->mbox_mem_pool);
829 * lpfc_hba_down_link - this routine downs the FC link
830 * @phba: pointer to lpfc hba data structure.
831 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
833 * This routine will issue the DOWN_LINK mailbox command call.
834 * It is available to other drivers through the lpfc_hba data
835 * structure for use to stop the link.
839 * Any other value - error
842 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
847 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
849 phba->link_state = LPFC_HBA_ERROR;
853 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
854 "0491 Adapter Link is disabled.\n");
855 lpfc_down_link(phba, pmb);
856 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
857 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
858 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
859 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
860 "2522 Adapter failed to issue DOWN_LINK"
861 " mbox command rc 0x%x\n", rc);
863 mempool_free(pmb, phba->mbox_mem_pool);
866 if (flag == MBX_POLL)
867 mempool_free(pmb, phba->mbox_mem_pool);
873 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
874 * @phba: pointer to lpfc HBA data structure.
876 * This routine will do LPFC uninitialization before the HBA is reset when
877 * bringing down the SLI Layer.
881 * Any other value - error.
884 lpfc_hba_down_prep(struct lpfc_hba *phba)
886 struct lpfc_vport **vports;
889 if (phba->sli_rev <= LPFC_SLI_REV3) {
890 /* Disable interrupts */
891 writel(0, phba->HCregaddr);
892 readl(phba->HCregaddr); /* flush */
895 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
896 lpfc_cleanup_discovery_resources(phba->pport);
898 vports = lpfc_create_vport_work_array(phba);
900 for (i = 0; i <= phba->max_vports &&
901 vports[i] != NULL; i++)
902 lpfc_cleanup_discovery_resources(vports[i]);
903 lpfc_destroy_vport_work_array(phba, vports);
909 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
910 * rspiocb which got deferred
912 * @phba: pointer to lpfc HBA data structure.
914 * This routine will cleanup completed slow path events after HBA is reset
915 * when bringing down the SLI Layer.
922 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
924 struct lpfc_iocbq *rspiocbq;
925 struct hbq_dmabuf *dmabuf;
926 struct lpfc_cq_event *cq_event;
928 spin_lock_irq(&phba->hbalock);
929 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
930 spin_unlock_irq(&phba->hbalock);
932 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
933 /* Get the response iocb from the head of work queue */
934 spin_lock_irq(&phba->hbalock);
935 list_remove_head(&phba->sli4_hba.sp_queue_event,
936 cq_event, struct lpfc_cq_event, list);
937 spin_unlock_irq(&phba->hbalock);
939 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
940 case CQE_CODE_COMPL_WQE:
941 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
943 lpfc_sli_release_iocbq(phba, rspiocbq);
945 case CQE_CODE_RECEIVE:
946 case CQE_CODE_RECEIVE_V1:
947 dmabuf = container_of(cq_event, struct hbq_dmabuf,
949 lpfc_in_buf_free(phba, &dmabuf->dbuf);
955 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
956 * @phba: pointer to lpfc HBA data structure.
958 * This routine will cleanup posted ELS buffers after the HBA is reset
959 * when bringing down the SLI Layer.
966 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
968 struct lpfc_sli *psli = &phba->sli;
969 struct lpfc_sli_ring *pring;
970 struct lpfc_dmabuf *mp, *next_mp;
974 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
975 lpfc_sli_hbqbuf_free_all(phba);
977 /* Cleanup preposted buffers on the ELS ring */
978 pring = &psli->sli3_ring[LPFC_ELS_RING];
979 spin_lock_irq(&phba->hbalock);
980 list_splice_init(&pring->postbufq, &buflist);
981 spin_unlock_irq(&phba->hbalock);
984 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
987 lpfc_mbuf_free(phba, mp->virt, mp->phys);
991 spin_lock_irq(&phba->hbalock);
992 pring->postbufq_cnt -= count;
993 spin_unlock_irq(&phba->hbalock);
998 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
999 * @phba: pointer to lpfc HBA data structure.
1001 * This routine will cleanup the txcmplq after the HBA is reset when bringing
1002 * down the SLI Layer.
1008 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1010 struct lpfc_sli *psli = &phba->sli;
1011 struct lpfc_queue *qp = NULL;
1012 struct lpfc_sli_ring *pring;
1013 LIST_HEAD(completions);
1015 struct lpfc_iocbq *piocb, *next_iocb;
1017 if (phba->sli_rev != LPFC_SLI_REV4) {
1018 for (i = 0; i < psli->num_rings; i++) {
1019 pring = &psli->sli3_ring[i];
1020 spin_lock_irq(&phba->hbalock);
1021 /* At this point in time the HBA is either reset or DOA
1022 * Nothing should be on txcmplq as it will
1025 list_splice_init(&pring->txcmplq, &completions);
1026 pring->txcmplq_cnt = 0;
1027 spin_unlock_irq(&phba->hbalock);
1029 lpfc_sli_abort_iocb_ring(phba, pring);
1031 /* Cancel all the IOCBs from the completions list */
1032 lpfc_sli_cancel_iocbs(phba, &completions,
1033 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1036 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1040 spin_lock_irq(&pring->ring_lock);
1041 list_for_each_entry_safe(piocb, next_iocb,
1042 &pring->txcmplq, list)
1043 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1044 list_splice_init(&pring->txcmplq, &completions);
1045 pring->txcmplq_cnt = 0;
1046 spin_unlock_irq(&pring->ring_lock);
1047 lpfc_sli_abort_iocb_ring(phba, pring);
1049 /* Cancel all the IOCBs from the completions list */
1050 lpfc_sli_cancel_iocbs(phba, &completions,
1051 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1055 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1056 * @phba: pointer to lpfc HBA data structure.
1058 * This routine will do uninitialization after the HBA is reset when bring
1059 * down the SLI Layer.
1063 * Any other value - error.
1066 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1068 lpfc_hba_free_post_buf(phba);
1069 lpfc_hba_clean_txcmplq(phba);
1074 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1075 * @phba: pointer to lpfc HBA data structure.
1077 * This routine will do uninitialization after the HBA is reset when bring
1078 * down the SLI Layer.
1082 * Any other value - error.
1085 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1087 struct lpfc_io_buf *psb, *psb_next;
1088 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1089 struct lpfc_sli4_hdw_queue *qp;
1091 LIST_HEAD(nvme_aborts);
1092 LIST_HEAD(nvmet_aborts);
1093 struct lpfc_sglq *sglq_entry = NULL;
1097 lpfc_sli_hbqbuf_free_all(phba);
1098 lpfc_hba_clean_txcmplq(phba);
1100 /* At this point in time the HBA is either reset or DOA. Either
1101 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1102 * on the lpfc_els_sgl_list so that it can either be freed if the
1103 * driver is unloading or reposted if the driver is restarting
1107 /* sgl_list_lock required because worker thread uses this
1110 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1111 list_for_each_entry(sglq_entry,
1112 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1113 sglq_entry->state = SGL_FREED;
1115 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1116 &phba->sli4_hba.lpfc_els_sgl_list);
1119 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1121 /* abts_xxxx_buf_list_lock required because worker thread uses this
1124 spin_lock_irq(&phba->hbalock);
1126 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1127 qp = &phba->sli4_hba.hdwq[idx];
1129 spin_lock(&qp->abts_io_buf_list_lock);
1130 list_splice_init(&qp->lpfc_abts_io_buf_list,
1133 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1135 psb->status = IOSTAT_SUCCESS;
1138 spin_lock(&qp->io_buf_list_put_lock);
1139 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1140 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1141 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1142 qp->abts_scsi_io_bufs = 0;
1143 qp->abts_nvme_io_bufs = 0;
1144 spin_unlock(&qp->io_buf_list_put_lock);
1145 spin_unlock(&qp->abts_io_buf_list_lock);
1147 spin_unlock_irq(&phba->hbalock);
1149 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1150 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1151 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1153 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1154 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1155 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1156 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1160 lpfc_sli4_free_sp_events(phba);
1165 * lpfc_hba_down_post - Wrapper func for hba down post routine
1166 * @phba: pointer to lpfc HBA data structure.
1168 * This routine wraps the actual SLI3 or SLI4 routine for performing
1169 * uninitialization after the HBA is reset when bring down the SLI Layer.
1173 * Any other value - error.
1176 lpfc_hba_down_post(struct lpfc_hba *phba)
1178 return (*phba->lpfc_hba_down_post)(phba);
1182 * lpfc_hb_timeout - The HBA-timer timeout handler
1183 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1185 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1186 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1187 * work-port-events bitmap and the worker thread is notified. This timeout
1188 * event will be used by the worker thread to invoke the actual timeout
1189 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1190 * be performed in the timeout handler and the HBA timeout event bit shall
1191 * be cleared by the worker thread after it has taken the event bitmap out.
1194 lpfc_hb_timeout(struct timer_list *t)
1196 struct lpfc_hba *phba;
1197 uint32_t tmo_posted;
1198 unsigned long iflag;
1200 phba = from_timer(phba, t, hb_tmofunc);
1202 /* Check for heart beat timeout conditions */
1203 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1204 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1206 phba->pport->work_port_events |= WORKER_HB_TMO;
1207 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1209 /* Tell the worker thread there is work to do */
1211 lpfc_worker_wake_up(phba);
1216 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1217 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1219 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1220 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1221 * work-port-events bitmap and the worker thread is notified. This timeout
1222 * event will be used by the worker thread to invoke the actual timeout
1223 * handler routine, lpfc_rrq_handler. Any periodical operations will
1224 * be performed in the timeout handler and the RRQ timeout event bit shall
1225 * be cleared by the worker thread after it has taken the event bitmap out.
1228 lpfc_rrq_timeout(struct timer_list *t)
1230 struct lpfc_hba *phba;
1231 unsigned long iflag;
1233 phba = from_timer(phba, t, rrq_tmr);
1234 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1235 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
1236 phba->hba_flag |= HBA_RRQ_ACTIVE;
1238 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1239 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1241 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
1242 lpfc_worker_wake_up(phba);
1246 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1247 * @phba: pointer to lpfc hba data structure.
1248 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1250 * This is the callback function to the lpfc heart-beat mailbox command.
1251 * If configured, the lpfc driver issues the heart-beat mailbox command to
1252 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1253 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1254 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1255 * heart-beat outstanding state. Once the mailbox command comes back and
1256 * no error conditions detected, the heart-beat mailbox command timer is
1257 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1258 * state is cleared for the next heart-beat. If the timer expired with the
1259 * heart-beat outstanding state set, the driver will put the HBA offline.
1262 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1264 unsigned long drvr_flag;
1266 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1267 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1268 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1270 /* Check and reset heart-beat timer if necessary */
1271 mempool_free(pmboxq, phba->mbox_mem_pool);
1272 if (!test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) &&
1273 !(phba->link_state == LPFC_HBA_ERROR) &&
1274 !test_bit(FC_UNLOADING, &phba->pport->load_flag))
1275 mod_timer(&phba->hb_tmofunc,
1277 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1282 * lpfc_idle_stat_delay_work - idle_stat tracking
1284 * This routine tracks per-eq idle_stat and determines polling decisions.
1290 lpfc_idle_stat_delay_work(struct work_struct *work)
1292 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1294 idle_stat_delay_work);
1295 struct lpfc_queue *eq;
1296 struct lpfc_sli4_hdw_queue *hdwq;
1297 struct lpfc_idle_stat *idle_stat;
1298 u32 i, idle_percent;
1299 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1301 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
1304 if (phba->link_state == LPFC_HBA_ERROR ||
1305 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) ||
1306 phba->cmf_active_mode != LPFC_CFG_OFF)
1309 for_each_present_cpu(i) {
1310 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1313 /* Skip if we've already handled this eq's primary CPU */
1317 idle_stat = &phba->sli4_hba.idle_stat[i];
1319 /* get_cpu_idle_time returns values as running counters. Thus,
1320 * to know the amount for this period, the prior counter values
1321 * need to be subtracted from the current counter values.
1322 * From there, the idle time stat can be calculated as a
1323 * percentage of 100 - the sum of the other consumption times.
1325 wall_idle = get_cpu_idle_time(i, &wall, 1);
1326 diff_idle = wall_idle - idle_stat->prev_idle;
1327 diff_wall = wall - idle_stat->prev_wall;
1329 if (diff_wall <= diff_idle)
1332 busy_time = diff_wall - diff_idle;
1334 idle_percent = div64_u64(100 * busy_time, diff_wall);
1335 idle_percent = 100 - idle_percent;
1337 if (idle_percent < 15)
1338 eq->poll_mode = LPFC_QUEUE_WORK;
1340 eq->poll_mode = LPFC_THREADED_IRQ;
1342 idle_stat->prev_idle = wall_idle;
1343 idle_stat->prev_wall = wall;
1347 schedule_delayed_work(&phba->idle_stat_delay_work,
1348 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1352 lpfc_hb_eq_delay_work(struct work_struct *work)
1354 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1355 struct lpfc_hba, eq_delay_work);
1356 struct lpfc_eq_intr_info *eqi, *eqi_new;
1357 struct lpfc_queue *eq, *eq_next;
1358 unsigned char *ena_delay = NULL;
1362 if (!phba->cfg_auto_imax ||
1363 test_bit(FC_UNLOADING, &phba->pport->load_flag))
1366 if (phba->link_state == LPFC_HBA_ERROR ||
1367 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
1370 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1375 for (i = 0; i < phba->cfg_irq_chann; i++) {
1376 /* Get the EQ corresponding to the IRQ vector */
1377 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1380 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1381 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1382 ena_delay[eq->last_cpu] = 1;
1386 for_each_present_cpu(i) {
1387 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1389 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1390 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1391 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1398 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1399 if (unlikely(eq->last_cpu != i)) {
1400 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1402 list_move_tail(&eq->cpu_list, &eqi_new->list);
1405 if (usdelay != eq->q_mode)
1406 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1414 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1415 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1419 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1420 * @phba: pointer to lpfc hba data structure.
1422 * For each heartbeat, this routine does some heuristic methods to adjust
1423 * XRI distribution. The goal is to fully utilize free XRIs.
1425 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1430 hwq_count = phba->cfg_hdw_queue;
1431 for (i = 0; i < hwq_count; i++) {
1432 /* Adjust XRIs in private pool */
1433 lpfc_adjust_pvt_pool_count(phba, i);
1435 /* Adjust high watermark */
1436 lpfc_adjust_high_watermark(phba, i);
1438 #ifdef LPFC_MXP_STAT
1439 /* Snapshot pbl, pvt and busy count */
1440 lpfc_snapshot_mxp(phba, i);
1446 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1447 * @phba: pointer to lpfc hba data structure.
1449 * If a HB mbox is not already in progrees, this routine will allocate
1450 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1451 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1454 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1456 LPFC_MBOXQ_t *pmboxq;
1459 /* Is a Heartbeat mbox already in progress */
1460 if (phba->hba_flag & HBA_HBEAT_INP)
1463 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1467 lpfc_heart_beat(phba, pmboxq);
1468 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1469 pmboxq->vport = phba->pport;
1470 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1472 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1473 mempool_free(pmboxq, phba->mbox_mem_pool);
1476 phba->hba_flag |= HBA_HBEAT_INP;
1482 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1483 * @phba: pointer to lpfc hba data structure.
1485 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1486 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1487 * of the value of lpfc_enable_hba_heartbeat.
1488 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1489 * try to issue a MBX_HEARTBEAT mbox command.
1492 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1494 if (phba->cfg_enable_hba_heartbeat)
1496 phba->hba_flag |= HBA_HBEAT_TMO;
1500 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1501 * @phba: pointer to lpfc hba data structure.
1503 * This is the actual HBA-timer timeout handler to be invoked by the worker
1504 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1505 * handler performs any periodic operations needed for the device. If such
1506 * periodic event has already been attended to either in the interrupt handler
1507 * or by processing slow-ring or fast-ring events within the HBA-timer
1508 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1509 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1510 * is configured and there is no heart-beat mailbox command outstanding, a
1511 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1512 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1516 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1518 struct lpfc_vport **vports;
1519 struct lpfc_dmabuf *buf_ptr;
1522 struct lpfc_sli *psli = &phba->sli;
1523 LIST_HEAD(completions);
1525 if (phba->cfg_xri_rebalancing) {
1526 /* Multi-XRI pools handler */
1527 lpfc_hb_mxp_handler(phba);
1530 vports = lpfc_create_vport_work_array(phba);
1532 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1533 lpfc_rcv_seq_check_edtov(vports[i]);
1534 lpfc_fdmi_change_check(vports[i]);
1536 lpfc_destroy_vport_work_array(phba, vports);
1538 if (phba->link_state == LPFC_HBA_ERROR ||
1539 test_bit(FC_UNLOADING, &phba->pport->load_flag) ||
1540 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
1543 if (phba->elsbuf_cnt &&
1544 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1545 spin_lock_irq(&phba->hbalock);
1546 list_splice_init(&phba->elsbuf, &completions);
1547 phba->elsbuf_cnt = 0;
1548 phba->elsbuf_prev_cnt = 0;
1549 spin_unlock_irq(&phba->hbalock);
1551 while (!list_empty(&completions)) {
1552 list_remove_head(&completions, buf_ptr,
1553 struct lpfc_dmabuf, list);
1554 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1558 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1560 /* If there is no heart beat outstanding, issue a heartbeat command */
1561 if (phba->cfg_enable_hba_heartbeat) {
1562 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1563 spin_lock_irq(&phba->pport->work_port_lock);
1564 if (time_after(phba->last_completion_time +
1565 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1567 spin_unlock_irq(&phba->pport->work_port_lock);
1568 if (phba->hba_flag & HBA_HBEAT_INP)
1569 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1571 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1574 spin_unlock_irq(&phba->pport->work_port_lock);
1576 /* Check if a MBX_HEARTBEAT is already in progress */
1577 if (phba->hba_flag & HBA_HBEAT_INP) {
1579 * If heart beat timeout called with HBA_HBEAT_INP set
1580 * we need to give the hb mailbox cmd a chance to
1583 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1584 "0459 Adapter heartbeat still outstanding: "
1585 "last compl time was %d ms.\n",
1586 jiffies_to_msecs(jiffies
1587 - phba->last_completion_time));
1588 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1590 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1591 (list_empty(&psli->mboxq))) {
1593 retval = lpfc_issue_hb_mbox(phba);
1595 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1598 phba->skipped_hb = 0;
1599 } else if (time_before_eq(phba->last_completion_time,
1600 phba->skipped_hb)) {
1601 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1602 "2857 Last completion time not "
1603 " updated in %d ms\n",
1604 jiffies_to_msecs(jiffies
1605 - phba->last_completion_time));
1607 phba->skipped_hb = jiffies;
1609 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1613 /* Check to see if we want to force a MBX_HEARTBEAT */
1614 if (phba->hba_flag & HBA_HBEAT_TMO) {
1615 retval = lpfc_issue_hb_mbox(phba);
1617 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1619 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1622 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1625 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1629 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1630 * @phba: pointer to lpfc hba data structure.
1632 * This routine is called to bring the HBA offline when HBA hardware error
1633 * other than Port Error 6 has been detected.
1636 lpfc_offline_eratt(struct lpfc_hba *phba)
1638 struct lpfc_sli *psli = &phba->sli;
1640 spin_lock_irq(&phba->hbalock);
1641 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1642 spin_unlock_irq(&phba->hbalock);
1643 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1646 lpfc_reset_barrier(phba);
1647 spin_lock_irq(&phba->hbalock);
1648 lpfc_sli_brdreset(phba);
1649 spin_unlock_irq(&phba->hbalock);
1650 lpfc_hba_down_post(phba);
1651 lpfc_sli_brdready(phba, HS_MBRDY);
1652 lpfc_unblock_mgmt_io(phba);
1653 phba->link_state = LPFC_HBA_ERROR;
1658 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1659 * @phba: pointer to lpfc hba data structure.
1661 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1662 * other than Port Error 6 has been detected.
1665 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1667 spin_lock_irq(&phba->hbalock);
1668 if (phba->link_state == LPFC_HBA_ERROR &&
1669 test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1670 spin_unlock_irq(&phba->hbalock);
1673 phba->link_state = LPFC_HBA_ERROR;
1674 spin_unlock_irq(&phba->hbalock);
1676 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1677 lpfc_sli_flush_io_rings(phba);
1679 lpfc_hba_down_post(phba);
1680 lpfc_unblock_mgmt_io(phba);
1684 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1685 * @phba: pointer to lpfc hba data structure.
1687 * This routine is invoked to handle the deferred HBA hardware error
1688 * conditions. This type of error is indicated by HBA by setting ER1
1689 * and another ER bit in the host status register. The driver will
1690 * wait until the ER1 bit clears before handling the error condition.
1693 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1695 uint32_t old_host_status = phba->work_hs;
1696 struct lpfc_sli *psli = &phba->sli;
1698 /* If the pci channel is offline, ignore possible errors,
1699 * since we cannot communicate with the pci card anyway.
1701 if (pci_channel_offline(phba->pcidev)) {
1702 spin_lock_irq(&phba->hbalock);
1703 phba->hba_flag &= ~DEFER_ERATT;
1704 spin_unlock_irq(&phba->hbalock);
1708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1709 "0479 Deferred Adapter Hardware Error "
1710 "Data: x%x x%x x%x\n",
1711 phba->work_hs, phba->work_status[0],
1712 phba->work_status[1]);
1714 spin_lock_irq(&phba->hbalock);
1715 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1716 spin_unlock_irq(&phba->hbalock);
1720 * Firmware stops when it triggred erratt. That could cause the I/Os
1721 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1722 * SCSI layer retry it after re-establishing link.
1724 lpfc_sli_abort_fcp_rings(phba);
1727 * There was a firmware error. Take the hba offline and then
1728 * attempt to restart it.
1730 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1733 /* Wait for the ER1 bit to clear.*/
1734 while (phba->work_hs & HS_FFER1) {
1736 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1737 phba->work_hs = UNPLUG_ERR ;
1740 /* If driver is unloading let the worker thread continue */
1741 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1748 * This is to ptrotect against a race condition in which
1749 * first write to the host attention register clear the
1750 * host status register.
1752 if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag))
1753 phba->work_hs = old_host_status & ~HS_FFER1;
1755 spin_lock_irq(&phba->hbalock);
1756 phba->hba_flag &= ~DEFER_ERATT;
1757 spin_unlock_irq(&phba->hbalock);
1758 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1759 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1763 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1765 struct lpfc_board_event_header board_event;
1766 struct Scsi_Host *shost;
1768 board_event.event_type = FC_REG_BOARD_EVENT;
1769 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1770 shost = lpfc_shost_from_vport(phba->pport);
1771 fc_host_post_vendor_event(shost, fc_get_event_number(),
1772 sizeof(board_event),
1773 (char *) &board_event,
1778 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1779 * @phba: pointer to lpfc hba data structure.
1781 * This routine is invoked to handle the following HBA hardware error
1783 * 1 - HBA error attention interrupt
1784 * 2 - DMA ring index out of range
1785 * 3 - Mailbox command came back as unknown
1788 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1790 struct lpfc_vport *vport = phba->pport;
1791 struct lpfc_sli *psli = &phba->sli;
1792 uint32_t event_data;
1793 unsigned long temperature;
1794 struct temp_event temp_event_data;
1795 struct Scsi_Host *shost;
1797 /* If the pci channel is offline, ignore possible errors,
1798 * since we cannot communicate with the pci card anyway.
1800 if (pci_channel_offline(phba->pcidev)) {
1801 spin_lock_irq(&phba->hbalock);
1802 phba->hba_flag &= ~DEFER_ERATT;
1803 spin_unlock_irq(&phba->hbalock);
1807 /* If resets are disabled then leave the HBA alone and return */
1808 if (!phba->cfg_enable_hba_reset)
1811 /* Send an internal error event to mgmt application */
1812 lpfc_board_errevt_to_mgmt(phba);
1814 if (phba->hba_flag & DEFER_ERATT)
1815 lpfc_handle_deferred_eratt(phba);
1817 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1818 if (phba->work_hs & HS_FFER6)
1819 /* Re-establishing Link */
1820 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1821 "1301 Re-establishing Link "
1822 "Data: x%x x%x x%x\n",
1823 phba->work_hs, phba->work_status[0],
1824 phba->work_status[1]);
1825 if (phba->work_hs & HS_FFER8)
1826 /* Device Zeroization */
1827 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1828 "2861 Host Authentication device "
1829 "zeroization Data:x%x x%x x%x\n",
1830 phba->work_hs, phba->work_status[0],
1831 phba->work_status[1]);
1833 spin_lock_irq(&phba->hbalock);
1834 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1835 spin_unlock_irq(&phba->hbalock);
1838 * Firmware stops when it triggled erratt with HS_FFER6.
1839 * That could cause the I/Os dropped by the firmware.
1840 * Error iocb (I/O) on txcmplq and let the SCSI layer
1841 * retry it after re-establishing link.
1843 lpfc_sli_abort_fcp_rings(phba);
1846 * There was a firmware error. Take the hba offline and then
1847 * attempt to restart it.
1849 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1851 lpfc_sli_brdrestart(phba);
1852 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1853 lpfc_unblock_mgmt_io(phba);
1856 lpfc_unblock_mgmt_io(phba);
1857 } else if (phba->work_hs & HS_CRIT_TEMP) {
1858 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1859 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1860 temp_event_data.event_code = LPFC_CRIT_TEMP;
1861 temp_event_data.data = (uint32_t)temperature;
1863 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1864 "0406 Adapter maximum temperature exceeded "
1865 "(%ld), taking this port offline "
1866 "Data: x%x x%x x%x\n",
1867 temperature, phba->work_hs,
1868 phba->work_status[0], phba->work_status[1]);
1870 shost = lpfc_shost_from_vport(phba->pport);
1871 fc_host_post_vendor_event(shost, fc_get_event_number(),
1872 sizeof(temp_event_data),
1873 (char *) &temp_event_data,
1874 SCSI_NL_VID_TYPE_PCI
1875 | PCI_VENDOR_ID_EMULEX);
1877 spin_lock_irq(&phba->hbalock);
1878 phba->over_temp_state = HBA_OVER_TEMP;
1879 spin_unlock_irq(&phba->hbalock);
1880 lpfc_offline_eratt(phba);
1883 /* The if clause above forces this code path when the status
1884 * failure is a value other than FFER6. Do not call the offline
1885 * twice. This is the adapter hardware error path.
1887 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1888 "0457 Adapter Hardware Error "
1889 "Data: x%x x%x x%x\n",
1891 phba->work_status[0], phba->work_status[1]);
1893 event_data = FC_REG_DUMP_EVENT;
1894 shost = lpfc_shost_from_vport(vport);
1895 fc_host_post_vendor_event(shost, fc_get_event_number(),
1896 sizeof(event_data), (char *) &event_data,
1897 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1899 lpfc_offline_eratt(phba);
1905 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1906 * @phba: pointer to lpfc hba data structure.
1907 * @mbx_action: flag for mailbox shutdown action.
1908 * @en_rn_msg: send reset/port recovery message.
1909 * This routine is invoked to perform an SLI4 port PCI function reset in
1910 * response to port status register polling attention. It waits for port
1911 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1912 * During this process, interrupt vectors are freed and later requested
1913 * for handling possible port resource change.
1916 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1921 LPFC_MBOXQ_t *mboxq;
1923 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1924 LPFC_SLI_INTF_IF_TYPE_2) {
1926 * On error status condition, driver need to wait for port
1927 * ready before performing reset.
1929 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1934 /* need reset: attempt for port recovery */
1936 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1937 "2887 Reset Needed: Attempting Port "
1940 /* If we are no wait, the HBA has been reset and is not
1941 * functional, thus we should clear
1942 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1944 if (mbx_action == LPFC_MBX_NO_WAIT) {
1945 spin_lock_irq(&phba->hbalock);
1946 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1947 if (phba->sli.mbox_active) {
1948 mboxq = phba->sli.mbox_active;
1949 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1950 __lpfc_mbox_cmpl_put(phba, mboxq);
1951 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1952 phba->sli.mbox_active = NULL;
1954 spin_unlock_irq(&phba->hbalock);
1957 lpfc_offline_prep(phba, mbx_action);
1958 lpfc_sli_flush_io_rings(phba);
1960 /* release interrupt for possible resource change */
1961 lpfc_sli4_disable_intr(phba);
1962 rc = lpfc_sli_brdrestart(phba);
1964 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1965 "6309 Failed to restart board\n");
1968 /* request and enable interrupt */
1969 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1970 if (intr_mode == LPFC_INTR_ERROR) {
1971 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1972 "3175 Failed to enable interrupt\n");
1975 phba->intr_mode = intr_mode;
1976 rc = lpfc_online(phba);
1978 lpfc_unblock_mgmt_io(phba);
1984 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1985 * @phba: pointer to lpfc hba data structure.
1987 * This routine is invoked to handle the SLI4 HBA hardware error attention
1991 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1993 struct lpfc_vport *vport = phba->pport;
1994 uint32_t event_data;
1995 struct Scsi_Host *shost;
1997 struct lpfc_register portstat_reg = {0};
1998 uint32_t reg_err1, reg_err2;
1999 uint32_t uerrlo_reg, uemasklo_reg;
2000 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
2001 bool en_rn_msg = true;
2002 struct temp_event temp_event_data;
2003 struct lpfc_register portsmphr_reg;
2006 /* If the pci channel is offline, ignore possible errors, since
2007 * we cannot communicate with the pci card anyway.
2009 if (pci_channel_offline(phba->pcidev)) {
2010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2011 "3166 pci channel is offline\n");
2012 lpfc_sli_flush_io_rings(phba);
2016 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2017 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2019 case LPFC_SLI_INTF_IF_TYPE_0:
2020 pci_rd_rc1 = lpfc_readl(
2021 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2023 pci_rd_rc2 = lpfc_readl(
2024 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2026 /* consider PCI bus read error as pci_channel_offline */
2027 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2029 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
2030 lpfc_sli4_offline_eratt(phba);
2033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2034 "7623 Checking UE recoverable");
2036 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2037 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2038 &portsmphr_reg.word0))
2041 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2043 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2044 LPFC_PORT_SEM_UE_RECOVERABLE)
2046 /*Sleep for 1Sec, before checking SEMAPHORE */
2050 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2051 "4827 smphr_port_status x%x : Waited %dSec",
2052 smphr_port_status, i);
2054 /* Recoverable UE, reset the HBA device */
2055 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2056 LPFC_PORT_SEM_UE_RECOVERABLE) {
2057 for (i = 0; i < 20; i++) {
2059 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2060 &portsmphr_reg.word0) &&
2061 (LPFC_POST_STAGE_PORT_READY ==
2062 bf_get(lpfc_port_smphr_port_status,
2064 rc = lpfc_sli4_port_sta_fn_reset(phba,
2065 LPFC_MBX_NO_WAIT, en_rn_msg);
2068 lpfc_printf_log(phba, KERN_ERR,
2070 "4215 Failed to recover UE");
2075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2076 "7624 Firmware not ready: Failing UE recovery,"
2077 " waited %dSec", i);
2078 phba->link_state = LPFC_HBA_ERROR;
2081 case LPFC_SLI_INTF_IF_TYPE_2:
2082 case LPFC_SLI_INTF_IF_TYPE_6:
2083 pci_rd_rc1 = lpfc_readl(
2084 phba->sli4_hba.u.if_type2.STATUSregaddr,
2085 &portstat_reg.word0);
2086 /* consider PCI bus read error as pci_channel_offline */
2087 if (pci_rd_rc1 == -EIO) {
2088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2089 "3151 PCI bus read access failure: x%x\n",
2090 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2091 lpfc_sli4_offline_eratt(phba);
2094 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2095 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2096 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2097 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2098 "2889 Port Overtemperature event, "
2099 "taking port offline Data: x%x x%x\n",
2100 reg_err1, reg_err2);
2102 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2103 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2104 temp_event_data.event_code = LPFC_CRIT_TEMP;
2105 temp_event_data.data = 0xFFFFFFFF;
2107 shost = lpfc_shost_from_vport(phba->pport);
2108 fc_host_post_vendor_event(shost, fc_get_event_number(),
2109 sizeof(temp_event_data),
2110 (char *)&temp_event_data,
2111 SCSI_NL_VID_TYPE_PCI
2112 | PCI_VENDOR_ID_EMULEX);
2114 spin_lock_irq(&phba->hbalock);
2115 phba->over_temp_state = HBA_OVER_TEMP;
2116 spin_unlock_irq(&phba->hbalock);
2117 lpfc_sli4_offline_eratt(phba);
2120 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2121 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2122 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2123 "3143 Port Down: Firmware Update "
2126 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2127 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2128 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2129 "3144 Port Down: Debug Dump\n");
2130 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2131 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2132 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2133 "3145 Port Down: Provisioning\n");
2135 /* If resets are disabled then leave the HBA alone and return */
2136 if (!phba->cfg_enable_hba_reset)
2139 /* Check port status register for function reset */
2140 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2143 /* don't report event on forced debug dump */
2144 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2145 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2150 /* fall through for not able to recover */
2151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2152 "3152 Unrecoverable error\n");
2153 lpfc_sli4_offline_eratt(phba);
2155 case LPFC_SLI_INTF_IF_TYPE_1:
2159 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2160 "3123 Report dump event to upper layer\n");
2161 /* Send an internal error event to mgmt application */
2162 lpfc_board_errevt_to_mgmt(phba);
2164 event_data = FC_REG_DUMP_EVENT;
2165 shost = lpfc_shost_from_vport(vport);
2166 fc_host_post_vendor_event(shost, fc_get_event_number(),
2167 sizeof(event_data), (char *) &event_data,
2168 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2172 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2173 * @phba: pointer to lpfc HBA data structure.
2175 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2176 * routine from the API jump table function pointer from the lpfc_hba struct.
2180 * Any other value - error.
2183 lpfc_handle_eratt(struct lpfc_hba *phba)
2185 (*phba->lpfc_handle_eratt)(phba);
2189 * lpfc_handle_latt - The HBA link event handler
2190 * @phba: pointer to lpfc hba data structure.
2192 * This routine is invoked from the worker thread to handle a HBA host
2193 * attention link event. SLI3 only.
2196 lpfc_handle_latt(struct lpfc_hba *phba)
2198 struct lpfc_vport *vport = phba->pport;
2199 struct lpfc_sli *psli = &phba->sli;
2201 volatile uint32_t control;
2204 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2207 goto lpfc_handle_latt_err_exit;
2210 rc = lpfc_mbox_rsrc_prep(phba, pmb);
2213 mempool_free(pmb, phba->mbox_mem_pool);
2214 goto lpfc_handle_latt_err_exit;
2217 /* Cleanup any outstanding ELS commands */
2218 lpfc_els_flush_all_cmd(phba);
2219 psli->slistat.link_event++;
2220 lpfc_read_topology(phba, pmb, pmb->ctx_buf);
2221 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2223 /* Block ELS IOCBs until we have processed this mbox command */
2224 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2225 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2226 if (rc == MBX_NOT_FINISHED) {
2228 goto lpfc_handle_latt_free_mbuf;
2231 /* Clear Link Attention in HA REG */
2232 spin_lock_irq(&phba->hbalock);
2233 writel(HA_LATT, phba->HAregaddr);
2234 readl(phba->HAregaddr); /* flush */
2235 spin_unlock_irq(&phba->hbalock);
2239 lpfc_handle_latt_free_mbuf:
2240 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2241 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2242 lpfc_handle_latt_err_exit:
2243 /* Enable Link attention interrupts */
2244 spin_lock_irq(&phba->hbalock);
2245 psli->sli_flag |= LPFC_PROCESS_LA;
2246 control = readl(phba->HCregaddr);
2247 control |= HC_LAINT_ENA;
2248 writel(control, phba->HCregaddr);
2249 readl(phba->HCregaddr); /* flush */
2251 /* Clear Link Attention in HA REG */
2252 writel(HA_LATT, phba->HAregaddr);
2253 readl(phba->HAregaddr); /* flush */
2254 spin_unlock_irq(&phba->hbalock);
2255 lpfc_linkdown(phba);
2256 phba->link_state = LPFC_HBA_ERROR;
2258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2259 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2265 lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
2269 while (length > 0) {
2270 /* Look for Serial Number */
2271 if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
2278 phba->SerialNumber[j++] = vpd[(*pindex)++];
2282 phba->SerialNumber[j] = 0;
2284 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
2285 phba->vpd_flag |= VPD_MODEL_DESC;
2292 phba->ModelDesc[j++] = vpd[(*pindex)++];
2296 phba->ModelDesc[j] = 0;
2298 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
2299 phba->vpd_flag |= VPD_MODEL_NAME;
2306 phba->ModelName[j++] = vpd[(*pindex)++];
2310 phba->ModelName[j] = 0;
2312 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
2313 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2320 phba->ProgramType[j++] = vpd[(*pindex)++];
2324 phba->ProgramType[j] = 0;
2326 } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
2327 phba->vpd_flag |= VPD_PORT;
2334 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2335 (phba->sli4_hba.pport_name_sta ==
2336 LPFC_SLI4_PPNAME_GET)) {
2340 phba->Port[j++] = vpd[(*pindex)++];
2344 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2345 (phba->sli4_hba.pport_name_sta ==
2346 LPFC_SLI4_PPNAME_NON))
2360 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2361 * @phba: pointer to lpfc hba data structure.
2362 * @vpd: pointer to the vital product data.
2363 * @len: length of the vital product data in bytes.
2365 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2366 * an array of characters. In this routine, the ModelName, ProgramType, and
2367 * ModelDesc, etc. fields of the phba data structure will be populated.
2370 * 0 - pointer to the VPD passed in is NULL
2374 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2376 uint8_t lenlo, lenhi;
2386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2387 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2388 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2390 while (!finished && (index < (len - 4))) {
2391 switch (vpd[index]) {
2399 i = ((((unsigned short)lenhi) << 8) + lenlo);
2408 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2409 if (Length > len - index)
2410 Length = len - index;
2412 lpfc_fill_vpd(phba, vpd, Length, &index);
2428 * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2429 * @phba: pointer to lpfc hba data structure.
2430 * @mdp: pointer to the data structure to hold the derived model name.
2431 * @descp: pointer to the data structure to hold the derived description.
2433 * This routine retrieves HBA's description based on its registered PCI device
2434 * ID. The @descp passed into this function points to an array of 256 chars. It
2435 * shall be returned with the model name, maximum speed, and the host bus type.
2436 * The @mdp passed into this function points to an array of 80 chars. When the
2437 * function returns, the @mdp will be filled with the model name.
2440 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2442 uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2443 char *model = "<Unknown>";
2446 switch (sub_dev_id) {
2447 case PCI_DEVICE_ID_CLRY_161E:
2450 case PCI_DEVICE_ID_CLRY_162E:
2453 case PCI_DEVICE_ID_CLRY_164E:
2456 case PCI_DEVICE_ID_CLRY_161P:
2459 case PCI_DEVICE_ID_CLRY_162P:
2462 case PCI_DEVICE_ID_CLRY_164P:
2465 case PCI_DEVICE_ID_CLRY_321E:
2468 case PCI_DEVICE_ID_CLRY_322E:
2471 case PCI_DEVICE_ID_CLRY_324E:
2474 case PCI_DEVICE_ID_CLRY_321P:
2477 case PCI_DEVICE_ID_CLRY_322P:
2480 case PCI_DEVICE_ID_CLRY_324P:
2483 case PCI_DEVICE_ID_TLFC_2XX2:
2487 case PCI_DEVICE_ID_TLFC_3162:
2491 case PCI_DEVICE_ID_TLFC_3322:
2500 if (mdp && mdp[0] == '\0')
2501 snprintf(mdp, 79, "%s", model);
2503 if (descp && descp[0] == '\0')
2504 snprintf(descp, 255,
2505 "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2506 (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2512 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2513 * @phba: pointer to lpfc hba data structure.
2514 * @mdp: pointer to the data structure to hold the derived model name.
2515 * @descp: pointer to the data structure to hold the derived description.
2517 * This routine retrieves HBA's description based on its registered PCI device
2518 * ID. The @descp passed into this function points to an array of 256 chars. It
2519 * shall be returned with the model name, maximum speed, and the host bus type.
2520 * The @mdp passed into this function points to an array of 80 chars. When the
2521 * function returns, the @mdp will be filled with the model name.
2524 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2527 uint16_t dev_id = phba->pcidev->device;
2530 int oneConnect = 0; /* default is not a oneConnect */
2535 } m = {"<Unknown>", "", ""};
2537 if (mdp && mdp[0] != '\0'
2538 && descp && descp[0] != '\0')
2541 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2542 lpfc_get_atto_model_desc(phba, mdp, descp);
2546 if (phba->lmt & LMT_64Gb)
2548 else if (phba->lmt & LMT_32Gb)
2550 else if (phba->lmt & LMT_16Gb)
2552 else if (phba->lmt & LMT_10Gb)
2554 else if (phba->lmt & LMT_8Gb)
2556 else if (phba->lmt & LMT_4Gb)
2558 else if (phba->lmt & LMT_2Gb)
2560 else if (phba->lmt & LMT_1Gb)
2568 case PCI_DEVICE_ID_FIREFLY:
2569 m = (typeof(m)){"LP6000", "PCI",
2570 "Obsolete, Unsupported Fibre Channel Adapter"};
2572 case PCI_DEVICE_ID_SUPERFLY:
2573 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2574 m = (typeof(m)){"LP7000", "PCI", ""};
2576 m = (typeof(m)){"LP7000E", "PCI", ""};
2577 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2579 case PCI_DEVICE_ID_DRAGONFLY:
2580 m = (typeof(m)){"LP8000", "PCI",
2581 "Obsolete, Unsupported Fibre Channel Adapter"};
2583 case PCI_DEVICE_ID_CENTAUR:
2584 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2585 m = (typeof(m)){"LP9002", "PCI", ""};
2587 m = (typeof(m)){"LP9000", "PCI", ""};
2588 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2590 case PCI_DEVICE_ID_RFLY:
2591 m = (typeof(m)){"LP952", "PCI",
2592 "Obsolete, Unsupported Fibre Channel Adapter"};
2594 case PCI_DEVICE_ID_PEGASUS:
2595 m = (typeof(m)){"LP9802", "PCI-X",
2596 "Obsolete, Unsupported Fibre Channel Adapter"};
2598 case PCI_DEVICE_ID_THOR:
2599 m = (typeof(m)){"LP10000", "PCI-X",
2600 "Obsolete, Unsupported Fibre Channel Adapter"};
2602 case PCI_DEVICE_ID_VIPER:
2603 m = (typeof(m)){"LPX1000", "PCI-X",
2604 "Obsolete, Unsupported Fibre Channel Adapter"};
2606 case PCI_DEVICE_ID_PFLY:
2607 m = (typeof(m)){"LP982", "PCI-X",
2608 "Obsolete, Unsupported Fibre Channel Adapter"};
2610 case PCI_DEVICE_ID_TFLY:
2611 m = (typeof(m)){"LP1050", "PCI-X",
2612 "Obsolete, Unsupported Fibre Channel Adapter"};
2614 case PCI_DEVICE_ID_HELIOS:
2615 m = (typeof(m)){"LP11000", "PCI-X2",
2616 "Obsolete, Unsupported Fibre Channel Adapter"};
2618 case PCI_DEVICE_ID_HELIOS_SCSP:
2619 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2620 "Obsolete, Unsupported Fibre Channel Adapter"};
2622 case PCI_DEVICE_ID_HELIOS_DCSP:
2623 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2624 "Obsolete, Unsupported Fibre Channel Adapter"};
2626 case PCI_DEVICE_ID_NEPTUNE:
2627 m = (typeof(m)){"LPe1000", "PCIe",
2628 "Obsolete, Unsupported Fibre Channel Adapter"};
2630 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2631 m = (typeof(m)){"LPe1000-SP", "PCIe",
2632 "Obsolete, Unsupported Fibre Channel Adapter"};
2634 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2635 m = (typeof(m)){"LPe1002-SP", "PCIe",
2636 "Obsolete, Unsupported Fibre Channel Adapter"};
2638 case PCI_DEVICE_ID_BMID:
2639 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2641 case PCI_DEVICE_ID_BSMB:
2642 m = (typeof(m)){"LP111", "PCI-X2",
2643 "Obsolete, Unsupported Fibre Channel Adapter"};
2645 case PCI_DEVICE_ID_ZEPHYR:
2646 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2648 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2649 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2651 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2652 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2655 case PCI_DEVICE_ID_ZMID:
2656 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2658 case PCI_DEVICE_ID_ZSMB:
2659 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2661 case PCI_DEVICE_ID_LP101:
2662 m = (typeof(m)){"LP101", "PCI-X",
2663 "Obsolete, Unsupported Fibre Channel Adapter"};
2665 case PCI_DEVICE_ID_LP10000S:
2666 m = (typeof(m)){"LP10000-S", "PCI",
2667 "Obsolete, Unsupported Fibre Channel Adapter"};
2669 case PCI_DEVICE_ID_LP11000S:
2670 m = (typeof(m)){"LP11000-S", "PCI-X2",
2671 "Obsolete, Unsupported Fibre Channel Adapter"};
2673 case PCI_DEVICE_ID_LPE11000S:
2674 m = (typeof(m)){"LPe11000-S", "PCIe",
2675 "Obsolete, Unsupported Fibre Channel Adapter"};
2677 case PCI_DEVICE_ID_SAT:
2678 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2680 case PCI_DEVICE_ID_SAT_MID:
2681 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2683 case PCI_DEVICE_ID_SAT_SMB:
2684 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2686 case PCI_DEVICE_ID_SAT_DCSP:
2687 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2689 case PCI_DEVICE_ID_SAT_SCSP:
2690 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2692 case PCI_DEVICE_ID_SAT_S:
2693 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2695 case PCI_DEVICE_ID_PROTEUS_VF:
2696 m = (typeof(m)){"LPev12000", "PCIe IOV",
2697 "Obsolete, Unsupported Fibre Channel Adapter"};
2699 case PCI_DEVICE_ID_PROTEUS_PF:
2700 m = (typeof(m)){"LPev12000", "PCIe IOV",
2701 "Obsolete, Unsupported Fibre Channel Adapter"};
2703 case PCI_DEVICE_ID_PROTEUS_S:
2704 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2705 "Obsolete, Unsupported Fibre Channel Adapter"};
2707 case PCI_DEVICE_ID_TIGERSHARK:
2709 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2711 case PCI_DEVICE_ID_TOMCAT:
2713 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2715 case PCI_DEVICE_ID_FALCON:
2716 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2717 "EmulexSecure Fibre"};
2719 case PCI_DEVICE_ID_BALIUS:
2720 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2721 "Obsolete, Unsupported Fibre Channel Adapter"};
2723 case PCI_DEVICE_ID_LANCER_FC:
2724 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2726 case PCI_DEVICE_ID_LANCER_FC_VF:
2727 m = (typeof(m)){"LPe16000", "PCIe",
2728 "Obsolete, Unsupported Fibre Channel Adapter"};
2730 case PCI_DEVICE_ID_LANCER_FCOE:
2732 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2734 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2736 m = (typeof(m)){"OCe15100", "PCIe",
2737 "Obsolete, Unsupported FCoE"};
2739 case PCI_DEVICE_ID_LANCER_G6_FC:
2740 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2742 case PCI_DEVICE_ID_LANCER_G7_FC:
2743 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2745 case PCI_DEVICE_ID_LANCER_G7P_FC:
2746 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2748 case PCI_DEVICE_ID_SKYHAWK:
2749 case PCI_DEVICE_ID_SKYHAWK_VF:
2751 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2754 m = (typeof(m)){"Unknown", "", ""};
2758 if (mdp && mdp[0] == '\0')
2759 snprintf(mdp, 79,"%s", m.name);
2761 * oneConnect hba requires special processing, they are all initiators
2762 * and we put the port number on the end
2764 if (descp && descp[0] == '\0') {
2766 snprintf(descp, 255,
2767 "Emulex OneConnect %s, %s Initiator %s",
2770 else if (max_speed == 0)
2771 snprintf(descp, 255,
2773 m.name, m.bus, m.function);
2775 snprintf(descp, 255,
2776 "Emulex %s %d%s %s %s",
2777 m.name, max_speed, (GE) ? "GE" : "Gb",
2783 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2784 * @phba: pointer to lpfc hba data structure.
2785 * @pring: pointer to a IOCB ring.
2786 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2788 * This routine posts a given number of IOCBs with the associated DMA buffer
2789 * descriptors specified by the cnt argument to the given IOCB ring.
2792 * The number of IOCBs NOT able to be posted to the IOCB ring.
2795 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2798 struct lpfc_iocbq *iocb;
2799 struct lpfc_dmabuf *mp1, *mp2;
2801 cnt += pring->missbufcnt;
2803 /* While there are buffers to post */
2805 /* Allocate buffer for command iocb */
2806 iocb = lpfc_sli_get_iocbq(phba);
2808 pring->missbufcnt = cnt;
2813 /* 2 buffers can be posted per command */
2814 /* Allocate buffer to post */
2815 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2817 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2818 if (!mp1 || !mp1->virt) {
2820 lpfc_sli_release_iocbq(phba, iocb);
2821 pring->missbufcnt = cnt;
2825 INIT_LIST_HEAD(&mp1->list);
2826 /* Allocate buffer to post */
2828 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2830 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2832 if (!mp2 || !mp2->virt) {
2834 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2836 lpfc_sli_release_iocbq(phba, iocb);
2837 pring->missbufcnt = cnt;
2841 INIT_LIST_HEAD(&mp2->list);
2846 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2847 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2848 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2849 icmd->ulpBdeCount = 1;
2852 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2853 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2854 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2856 icmd->ulpBdeCount = 2;
2859 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2862 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2864 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2868 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2872 lpfc_sli_release_iocbq(phba, iocb);
2873 pring->missbufcnt = cnt;
2876 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2878 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2880 pring->missbufcnt = 0;
2885 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2886 * @phba: pointer to lpfc hba data structure.
2888 * This routine posts initial receive IOCB buffers to the ELS ring. The
2889 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2890 * set to 64 IOCBs. SLI3 only.
2893 * 0 - success (currently always success)
2896 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2898 struct lpfc_sli *psli = &phba->sli;
2900 /* Ring 0, ELS / CT buffers */
2901 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2902 /* Ring 2 - FCP no buffers needed */
2907 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2910 * lpfc_sha_init - Set up initial array of hash table entries
2911 * @HashResultPointer: pointer to an array as hash table.
2913 * This routine sets up the initial values to the array of hash table entries
2917 lpfc_sha_init(uint32_t * HashResultPointer)
2919 HashResultPointer[0] = 0x67452301;
2920 HashResultPointer[1] = 0xEFCDAB89;
2921 HashResultPointer[2] = 0x98BADCFE;
2922 HashResultPointer[3] = 0x10325476;
2923 HashResultPointer[4] = 0xC3D2E1F0;
2927 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2928 * @HashResultPointer: pointer to an initial/result hash table.
2929 * @HashWorkingPointer: pointer to an working hash table.
2931 * This routine iterates an initial hash table pointed by @HashResultPointer
2932 * with the values from the working hash table pointeed by @HashWorkingPointer.
2933 * The results are putting back to the initial hash table, returned through
2934 * the @HashResultPointer as the result hash table.
2937 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2941 uint32_t A, B, C, D, E;
2944 HashWorkingPointer[t] =
2946 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2948 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2949 } while (++t <= 79);
2951 A = HashResultPointer[0];
2952 B = HashResultPointer[1];
2953 C = HashResultPointer[2];
2954 D = HashResultPointer[3];
2955 E = HashResultPointer[4];
2959 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2960 } else if (t < 40) {
2961 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2962 } else if (t < 60) {
2963 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2965 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2967 TEMP += S(5, A) + E + HashWorkingPointer[t];
2973 } while (++t <= 79);
2975 HashResultPointer[0] += A;
2976 HashResultPointer[1] += B;
2977 HashResultPointer[2] += C;
2978 HashResultPointer[3] += D;
2979 HashResultPointer[4] += E;
2984 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2985 * @RandomChallenge: pointer to the entry of host challenge random number array.
2986 * @HashWorking: pointer to the entry of the working hash array.
2988 * This routine calculates the working hash array referred by @HashWorking
2989 * from the challenge random numbers associated with the host, referred by
2990 * @RandomChallenge. The result is put into the entry of the working hash
2991 * array and returned by reference through @HashWorking.
2994 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2996 *HashWorking = (*RandomChallenge ^ *HashWorking);
3000 * lpfc_hba_init - Perform special handling for LC HBA initialization
3001 * @phba: pointer to lpfc hba data structure.
3002 * @hbainit: pointer to an array of unsigned 32-bit integers.
3004 * This routine performs the special handling for LC HBA initialization.
3007 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
3010 uint32_t *HashWorking;
3011 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3013 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
3017 HashWorking[0] = HashWorking[78] = *pwwnn++;
3018 HashWorking[1] = HashWorking[79] = *pwwnn;
3020 for (t = 0; t < 7; t++)
3021 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3023 lpfc_sha_init(hbainit);
3024 lpfc_sha_iterate(hbainit, HashWorking);
3029 * lpfc_cleanup - Performs vport cleanups before deleting a vport
3030 * @vport: pointer to a virtual N_Port data structure.
3032 * This routine performs the necessary cleanups before deleting the @vport.
3033 * It invokes the discovery state machine to perform necessary state
3034 * transitions and to release the ndlps associated with the @vport. Note,
3035 * the physical port is treated as @vport 0.
3038 lpfc_cleanup(struct lpfc_vport *vport)
3040 struct lpfc_hba *phba = vport->phba;
3041 struct lpfc_nodelist *ndlp, *next_ndlp;
3044 if (phba->link_state > LPFC_LINK_DOWN)
3045 lpfc_port_link_failure(vport);
3047 /* Clean up VMID resources */
3048 if (lpfc_is_vmid_enabled(phba))
3049 lpfc_vmid_vport_cleanup(vport);
3051 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3052 if (vport->port_type != LPFC_PHYSICAL_PORT &&
3053 ndlp->nlp_DID == Fabric_DID) {
3054 /* Just free up ndlp with Fabric_DID for vports */
3059 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
3060 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3065 /* Fabric Ports not in UNMAPPED state are cleaned up in the
3068 if (ndlp->nlp_type & NLP_FABRIC &&
3069 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3070 lpfc_disc_state_machine(vport, ndlp, NULL,
3071 NLP_EVT_DEVICE_RECOVERY);
3073 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3074 lpfc_disc_state_machine(vport, ndlp, NULL,
3078 /* This is a special case flush to return all
3079 * IOs before entering this loop. There are
3080 * two points in the code where a flush is
3081 * avoided if the FC_UNLOADING flag is set.
3082 * one is in the multipool destroy,
3083 * (this prevents a crash) and the other is
3084 * in the nvme abort handler, ( also prevents
3085 * a crash). Both of these exceptions are
3086 * cases where the slot is still accessible.
3087 * The flush here is only when the pci slot
3090 if (test_bit(FC_UNLOADING, &vport->load_flag) &&
3091 pci_channel_offline(phba->pcidev))
3092 lpfc_sli_flush_io_rings(vport->phba);
3094 /* At this point, ALL ndlp's should be gone
3095 * because of the previous NLP_EVT_DEVICE_RM.
3096 * Lets wait for this to happen, if needed.
3098 while (!list_empty(&vport->fc_nodes)) {
3100 lpfc_printf_vlog(vport, KERN_ERR,
3102 "0233 Nodelist not empty\n");
3103 list_for_each_entry_safe(ndlp, next_ndlp,
3104 &vport->fc_nodes, nlp_listp) {
3105 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3107 "0282 did:x%x ndlp:x%px "
3108 "refcnt:%d xflags x%x nflag x%x\n",
3109 ndlp->nlp_DID, (void *)ndlp,
3110 kref_read(&ndlp->kref),
3111 ndlp->fc4_xpt_flags,
3117 /* Wait for any activity on ndlps to settle */
3120 lpfc_cleanup_vports_rrqs(vport, NULL);
3124 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3125 * @vport: pointer to a virtual N_Port data structure.
3127 * This routine stops all the timers associated with a @vport. This function
3128 * is invoked before disabling or deleting a @vport. Note that the physical
3129 * port is treated as @vport 0.
3132 lpfc_stop_vport_timers(struct lpfc_vport *vport)
3134 del_timer_sync(&vport->els_tmofunc);
3135 del_timer_sync(&vport->delayed_disc_tmo);
3136 lpfc_can_disctmo(vport);
3141 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3142 * @phba: pointer to lpfc hba data structure.
3144 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
3145 * caller of this routine should already hold the host lock.
3148 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3150 /* Clear pending FCF rediscovery wait flag */
3151 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3153 /* Now, try to stop the timer */
3154 del_timer(&phba->fcf.redisc_wait);
3158 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3159 * @phba: pointer to lpfc hba data structure.
3161 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3162 * checks whether the FCF rediscovery wait timer is pending with the host
3163 * lock held before proceeding with disabling the timer and clearing the
3164 * wait timer pendig flag.
3167 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3169 spin_lock_irq(&phba->hbalock);
3170 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3171 /* FCF rediscovery timer already fired or stopped */
3172 spin_unlock_irq(&phba->hbalock);
3175 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3176 /* Clear failover in progress flags */
3177 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3178 spin_unlock_irq(&phba->hbalock);
3182 * lpfc_cmf_stop - Stop CMF processing
3183 * @phba: pointer to lpfc hba data structure.
3185 * This is called when the link goes down or if CMF mode is turned OFF.
3186 * It is also called when going offline or unloaded just before the
3187 * congestion info buffer is unregistered.
3190 lpfc_cmf_stop(struct lpfc_hba *phba)
3193 struct lpfc_cgn_stat *cgs;
3195 /* We only do something if CMF is enabled */
3196 if (!phba->sli4_hba.pc_sli4_params.cmf)
3199 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3200 "6221 Stop CMF / Cancel Timer\n");
3202 /* Cancel the CMF timer */
3203 hrtimer_cancel(&phba->cmf_stats_timer);
3204 hrtimer_cancel(&phba->cmf_timer);
3206 /* Zero CMF counters */
3207 atomic_set(&phba->cmf_busy, 0);
3208 for_each_present_cpu(cpu) {
3209 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3210 atomic64_set(&cgs->total_bytes, 0);
3211 atomic64_set(&cgs->rcv_bytes, 0);
3212 atomic_set(&cgs->rx_io_cnt, 0);
3213 atomic64_set(&cgs->rx_latency, 0);
3215 atomic_set(&phba->cmf_bw_wait, 0);
3217 /* Resume any blocked IO - Queue unblock on workqueue */
3218 queue_work(phba->wq, &phba->unblock_request_work);
3221 static inline uint64_t
3222 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3224 uint64_t rate = lpfc_sli_port_speed_get(phba);
3226 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3230 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3232 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3233 "6223 Signal CMF init\n");
3235 /* Use the new fc_linkspeed to recalculate */
3236 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3237 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3238 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3239 phba->cmf_interval_rate, 1000);
3240 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3242 /* This is a signal to firmware to sync up CMF BW with link speed */
3243 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3247 * lpfc_cmf_start - Start CMF processing
3248 * @phba: pointer to lpfc hba data structure.
3250 * This is called when the link comes up or if CMF mode is turned OFF
3251 * to Monitor or Managed.
3254 lpfc_cmf_start(struct lpfc_hba *phba)
3256 struct lpfc_cgn_stat *cgs;
3259 /* We only do something if CMF is enabled */
3260 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3261 phba->cmf_active_mode == LPFC_CFG_OFF)
3264 /* Reinitialize congestion buffer info */
3265 lpfc_init_congestion_buf(phba);
3267 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3268 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3269 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3270 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3272 atomic_set(&phba->cmf_busy, 0);
3273 for_each_present_cpu(cpu) {
3274 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3275 atomic64_set(&cgs->total_bytes, 0);
3276 atomic64_set(&cgs->rcv_bytes, 0);
3277 atomic_set(&cgs->rx_io_cnt, 0);
3278 atomic64_set(&cgs->rx_latency, 0);
3280 phba->cmf_latency.tv_sec = 0;
3281 phba->cmf_latency.tv_nsec = 0;
3283 lpfc_cmf_signal_init(phba);
3285 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3286 "6222 Start CMF / Timer\n");
3288 phba->cmf_timer_cnt = 0;
3289 hrtimer_start(&phba->cmf_timer,
3290 ktime_set(0, LPFC_CMF_INTERVAL * NSEC_PER_MSEC),
3292 hrtimer_start(&phba->cmf_stats_timer,
3293 ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC),
3295 /* Setup for latency check in IO cmpl routines */
3296 ktime_get_real_ts64(&phba->cmf_latency);
3298 atomic_set(&phba->cmf_bw_wait, 0);
3299 atomic_set(&phba->cmf_stop_io, 0);
3303 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3304 * @phba: pointer to lpfc hba data structure.
3306 * This routine stops all the timers associated with a HBA. This function is
3307 * invoked before either putting a HBA offline or unloading the driver.
3310 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3313 lpfc_stop_vport_timers(phba->pport);
3314 cancel_delayed_work_sync(&phba->eq_delay_work);
3315 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3316 del_timer_sync(&phba->sli.mbox_tmo);
3317 del_timer_sync(&phba->fabric_block_timer);
3318 del_timer_sync(&phba->eratt_poll);
3319 del_timer_sync(&phba->hb_tmofunc);
3320 if (phba->sli_rev == LPFC_SLI_REV4) {
3321 del_timer_sync(&phba->rrq_tmr);
3322 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3324 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3326 switch (phba->pci_dev_grp) {
3327 case LPFC_PCI_DEV_LP:
3328 /* Stop any LightPulse device specific driver timers */
3329 del_timer_sync(&phba->fcp_poll_timer);
3331 case LPFC_PCI_DEV_OC:
3332 /* Stop any OneConnect device specific driver timers */
3333 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3336 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3337 "0297 Invalid device group (x%x)\n",
3345 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3346 * @phba: pointer to lpfc hba data structure.
3347 * @mbx_action: flag for mailbox no wait action.
3349 * This routine marks a HBA's management interface as blocked. Once the HBA's
3350 * management interface is marked as blocked, all the user space access to
3351 * the HBA, whether they are from sysfs interface or libdfc interface will
3352 * all be blocked. The HBA is set to block the management interface when the
3353 * driver prepares the HBA interface for online or offline.
3356 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3358 unsigned long iflag;
3359 uint8_t actcmd = MBX_HEARTBEAT;
3360 unsigned long timeout;
3362 spin_lock_irqsave(&phba->hbalock, iflag);
3363 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3364 spin_unlock_irqrestore(&phba->hbalock, iflag);
3365 if (mbx_action == LPFC_MBX_NO_WAIT)
3367 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3368 spin_lock_irqsave(&phba->hbalock, iflag);
3369 if (phba->sli.mbox_active) {
3370 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3371 /* Determine how long we might wait for the active mailbox
3372 * command to be gracefully completed by firmware.
3374 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3375 phba->sli.mbox_active) * 1000) + jiffies;
3377 spin_unlock_irqrestore(&phba->hbalock, iflag);
3379 /* Wait for the outstnading mailbox command to complete */
3380 while (phba->sli.mbox_active) {
3381 /* Check active mailbox complete status every 2ms */
3383 if (time_after(jiffies, timeout)) {
3384 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3385 "2813 Mgmt IO is Blocked %x "
3386 "- mbox cmd %x still active\n",
3387 phba->sli.sli_flag, actcmd);
3394 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3395 * @phba: pointer to lpfc hba data structure.
3397 * Allocate RPIs for all active remote nodes. This is needed whenever
3398 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3399 * is to fixup the temporary rpi assignments.
3402 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3404 struct lpfc_nodelist *ndlp, *next_ndlp;
3405 struct lpfc_vport **vports;
3408 if (phba->sli_rev != LPFC_SLI_REV4)
3411 vports = lpfc_create_vport_work_array(phba);
3415 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3416 if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
3419 list_for_each_entry_safe(ndlp, next_ndlp,
3420 &vports[i]->fc_nodes,
3422 rpi = lpfc_sli4_alloc_rpi(phba);
3423 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3424 /* TODO print log? */
3427 ndlp->nlp_rpi = rpi;
3428 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3429 LOG_NODE | LOG_DISCOVERY,
3430 "0009 Assign RPI x%x to ndlp x%px "
3431 "DID:x%06x flg:x%x\n",
3432 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3436 lpfc_destroy_vport_work_array(phba, vports);
3440 * lpfc_create_expedite_pool - create expedite pool
3441 * @phba: pointer to lpfc hba data structure.
3443 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3444 * to expedite pool. Mark them as expedite.
3446 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3448 struct lpfc_sli4_hdw_queue *qp;
3449 struct lpfc_io_buf *lpfc_ncmd;
3450 struct lpfc_io_buf *lpfc_ncmd_next;
3451 struct lpfc_epd_pool *epd_pool;
3452 unsigned long iflag;
3454 epd_pool = &phba->epd_pool;
3455 qp = &phba->sli4_hba.hdwq[0];
3457 spin_lock_init(&epd_pool->lock);
3458 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3459 spin_lock(&epd_pool->lock);
3460 INIT_LIST_HEAD(&epd_pool->list);
3461 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3462 &qp->lpfc_io_buf_list_put, list) {
3463 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3464 lpfc_ncmd->expedite = true;
3467 if (epd_pool->count >= XRI_BATCH)
3470 spin_unlock(&epd_pool->lock);
3471 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3475 * lpfc_destroy_expedite_pool - destroy expedite pool
3476 * @phba: pointer to lpfc hba data structure.
3478 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3479 * of HWQ 0. Clear the mark.
3481 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3483 struct lpfc_sli4_hdw_queue *qp;
3484 struct lpfc_io_buf *lpfc_ncmd;
3485 struct lpfc_io_buf *lpfc_ncmd_next;
3486 struct lpfc_epd_pool *epd_pool;
3487 unsigned long iflag;
3489 epd_pool = &phba->epd_pool;
3490 qp = &phba->sli4_hba.hdwq[0];
3492 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3493 spin_lock(&epd_pool->lock);
3494 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3495 &epd_pool->list, list) {
3496 list_move_tail(&lpfc_ncmd->list,
3497 &qp->lpfc_io_buf_list_put);
3498 lpfc_ncmd->flags = false;
3502 spin_unlock(&epd_pool->lock);
3503 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3507 * lpfc_create_multixri_pools - create multi-XRI pools
3508 * @phba: pointer to lpfc hba data structure.
3510 * This routine initialize public, private per HWQ. Then, move XRIs from
3511 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3514 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3519 struct lpfc_io_buf *lpfc_ncmd;
3520 struct lpfc_io_buf *lpfc_ncmd_next;
3521 unsigned long iflag;
3522 struct lpfc_sli4_hdw_queue *qp;
3523 struct lpfc_multixri_pool *multixri_pool;
3524 struct lpfc_pbl_pool *pbl_pool;
3525 struct lpfc_pvt_pool *pvt_pool;
3527 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3528 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3529 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3530 phba->sli4_hba.io_xri_cnt);
3532 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3533 lpfc_create_expedite_pool(phba);
3535 hwq_count = phba->cfg_hdw_queue;
3536 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3538 for (i = 0; i < hwq_count; i++) {
3539 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3541 if (!multixri_pool) {
3542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3543 "1238 Failed to allocate memory for "
3546 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3547 lpfc_destroy_expedite_pool(phba);
3551 qp = &phba->sli4_hba.hdwq[j];
3552 kfree(qp->p_multixri_pool);
3555 phba->cfg_xri_rebalancing = 0;
3559 qp = &phba->sli4_hba.hdwq[i];
3560 qp->p_multixri_pool = multixri_pool;
3562 multixri_pool->xri_limit = count_per_hwq;
3563 multixri_pool->rrb_next_hwqid = i;
3565 /* Deal with public free xri pool */
3566 pbl_pool = &multixri_pool->pbl_pool;
3567 spin_lock_init(&pbl_pool->lock);
3568 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3569 spin_lock(&pbl_pool->lock);
3570 INIT_LIST_HEAD(&pbl_pool->list);
3571 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3572 &qp->lpfc_io_buf_list_put, list) {
3573 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3577 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3578 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3579 pbl_pool->count, i);
3580 spin_unlock(&pbl_pool->lock);
3581 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3583 /* Deal with private free xri pool */
3584 pvt_pool = &multixri_pool->pvt_pool;
3585 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3586 pvt_pool->low_watermark = XRI_BATCH;
3587 spin_lock_init(&pvt_pool->lock);
3588 spin_lock_irqsave(&pvt_pool->lock, iflag);
3589 INIT_LIST_HEAD(&pvt_pool->list);
3590 pvt_pool->count = 0;
3591 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3596 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3597 * @phba: pointer to lpfc hba data structure.
3599 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3601 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3605 struct lpfc_io_buf *lpfc_ncmd;
3606 struct lpfc_io_buf *lpfc_ncmd_next;
3607 unsigned long iflag;
3608 struct lpfc_sli4_hdw_queue *qp;
3609 struct lpfc_multixri_pool *multixri_pool;
3610 struct lpfc_pbl_pool *pbl_pool;
3611 struct lpfc_pvt_pool *pvt_pool;
3613 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3614 lpfc_destroy_expedite_pool(phba);
3616 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
3617 lpfc_sli_flush_io_rings(phba);
3619 hwq_count = phba->cfg_hdw_queue;
3621 for (i = 0; i < hwq_count; i++) {
3622 qp = &phba->sli4_hba.hdwq[i];
3623 multixri_pool = qp->p_multixri_pool;
3627 qp->p_multixri_pool = NULL;
3629 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3631 /* Deal with public free xri pool */
3632 pbl_pool = &multixri_pool->pbl_pool;
3633 spin_lock(&pbl_pool->lock);
3635 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3636 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3637 pbl_pool->count, i);
3639 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3640 &pbl_pool->list, list) {
3641 list_move_tail(&lpfc_ncmd->list,
3642 &qp->lpfc_io_buf_list_put);
3647 INIT_LIST_HEAD(&pbl_pool->list);
3648 pbl_pool->count = 0;
3650 spin_unlock(&pbl_pool->lock);
3652 /* Deal with private free xri pool */
3653 pvt_pool = &multixri_pool->pvt_pool;
3654 spin_lock(&pvt_pool->lock);
3656 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3657 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3658 pvt_pool->count, i);
3660 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3661 &pvt_pool->list, list) {
3662 list_move_tail(&lpfc_ncmd->list,
3663 &qp->lpfc_io_buf_list_put);
3668 INIT_LIST_HEAD(&pvt_pool->list);
3669 pvt_pool->count = 0;
3671 spin_unlock(&pvt_pool->lock);
3672 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3674 kfree(multixri_pool);
3679 * lpfc_online - Initialize and bring a HBA online
3680 * @phba: pointer to lpfc hba data structure.
3682 * This routine initializes the HBA and brings a HBA online. During this
3683 * process, the management interface is blocked to prevent user space access
3684 * to the HBA interfering with the driver initialization.
3691 lpfc_online(struct lpfc_hba *phba)
3693 struct lpfc_vport *vport;
3694 struct lpfc_vport **vports;
3696 bool vpis_cleared = false;
3700 vport = phba->pport;
3702 if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
3705 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3706 "0458 Bring Adapter online\n");
3708 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3710 if (phba->sli_rev == LPFC_SLI_REV4) {
3711 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3712 lpfc_unblock_mgmt_io(phba);
3715 spin_lock_irq(&phba->hbalock);
3716 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3717 vpis_cleared = true;
3718 spin_unlock_irq(&phba->hbalock);
3720 /* Reestablish the local initiator port.
3721 * The offline process destroyed the previous lport.
3723 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3724 !phba->nvmet_support) {
3725 error = lpfc_nvme_create_localport(phba->pport);
3727 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3728 "6132 NVME restore reg failed "
3729 "on nvmei error x%x\n", error);
3732 lpfc_sli_queue_init(phba);
3733 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3734 lpfc_unblock_mgmt_io(phba);
3739 vports = lpfc_create_vport_work_array(phba);
3740 if (vports != NULL) {
3741 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3742 clear_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag);
3743 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3744 set_bit(FC_VPORT_NEEDS_REG_VPI,
3745 &vports[i]->fc_flag);
3746 if (phba->sli_rev == LPFC_SLI_REV4) {
3747 set_bit(FC_VPORT_NEEDS_INIT_VPI,
3748 &vports[i]->fc_flag);
3749 if ((vpis_cleared) &&
3750 (vports[i]->port_type !=
3751 LPFC_PHYSICAL_PORT))
3756 lpfc_destroy_vport_work_array(phba, vports);
3758 if (phba->cfg_xri_rebalancing)
3759 lpfc_create_multixri_pools(phba);
3761 lpfc_cpuhp_add(phba);
3763 lpfc_unblock_mgmt_io(phba);
3768 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3769 * @phba: pointer to lpfc hba data structure.
3771 * This routine marks a HBA's management interface as not blocked. Once the
3772 * HBA's management interface is marked as not blocked, all the user space
3773 * access to the HBA, whether they are from sysfs interface or libdfc
3774 * interface will be allowed. The HBA is set to block the management interface
3775 * when the driver prepares the HBA interface for online or offline and then
3776 * set to unblock the management interface afterwards.
3779 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3781 unsigned long iflag;
3783 spin_lock_irqsave(&phba->hbalock, iflag);
3784 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3785 spin_unlock_irqrestore(&phba->hbalock, iflag);
3789 * lpfc_offline_prep - Prepare a HBA to be brought offline
3790 * @phba: pointer to lpfc hba data structure.
3791 * @mbx_action: flag for mailbox shutdown action.
3793 * This routine is invoked to prepare a HBA to be brought offline. It performs
3794 * unregistration login to all the nodes on all vports and flushes the mailbox
3795 * queue to make it ready to be brought offline.
3798 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3800 struct lpfc_vport *vport = phba->pport;
3801 struct lpfc_nodelist *ndlp, *next_ndlp;
3802 struct lpfc_vport **vports;
3803 struct Scsi_Host *shost;
3808 if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
3811 lpfc_block_mgmt_io(phba, mbx_action);
3813 lpfc_linkdown(phba);
3815 offline = pci_channel_offline(phba->pcidev);
3816 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3818 /* Issue an unreg_login to all nodes on all vports */
3819 vports = lpfc_create_vport_work_array(phba);
3820 if (vports != NULL) {
3821 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3822 if (test_bit(FC_UNLOADING, &vports[i]->load_flag))
3824 shost = lpfc_shost_from_vport(vports[i]);
3825 spin_lock_irq(shost->host_lock);
3826 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3827 spin_unlock_irq(shost->host_lock);
3828 set_bit(FC_VPORT_NEEDS_REG_VPI, &vports[i]->fc_flag);
3829 clear_bit(FC_VFI_REGISTERED, &vports[i]->fc_flag);
3831 list_for_each_entry_safe(ndlp, next_ndlp,
3832 &vports[i]->fc_nodes,
3835 spin_lock_irq(&ndlp->lock);
3836 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3837 spin_unlock_irq(&ndlp->lock);
3839 if (offline || hba_pci_err) {
3840 spin_lock_irq(&ndlp->lock);
3841 ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3842 NLP_RPI_REGISTERED);
3843 spin_unlock_irq(&ndlp->lock);
3844 if (phba->sli_rev == LPFC_SLI_REV4)
3845 lpfc_sli_rpi_release(vports[i],
3848 lpfc_unreg_rpi(vports[i], ndlp);
3851 * Whenever an SLI4 port goes offline, free the
3852 * RPI. Get a new RPI when the adapter port
3853 * comes back online.
3855 if (phba->sli_rev == LPFC_SLI_REV4) {
3856 lpfc_printf_vlog(vports[i], KERN_INFO,
3857 LOG_NODE | LOG_DISCOVERY,
3858 "0011 Free RPI x%x on "
3859 "ndlp: x%px did x%x\n",
3860 ndlp->nlp_rpi, ndlp,
3862 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3863 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3866 if (ndlp->nlp_type & NLP_FABRIC) {
3867 lpfc_disc_state_machine(vports[i], ndlp,
3868 NULL, NLP_EVT_DEVICE_RECOVERY);
3870 /* Don't remove the node unless the node
3871 * has been unregistered with the
3872 * transport, and we're not in recovery
3873 * before dev_loss_tmo triggered.
3874 * Otherwise, let dev_loss take care of
3877 if (!(ndlp->save_flags &
3878 NLP_IN_RECOV_POST_DEV_LOSS) &&
3879 !(ndlp->fc4_xpt_flags &
3880 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3881 lpfc_disc_state_machine
3889 lpfc_destroy_vport_work_array(phba, vports);
3891 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3894 flush_workqueue(phba->wq);
3898 * lpfc_offline - Bring a HBA offline
3899 * @phba: pointer to lpfc hba data structure.
3901 * This routine actually brings a HBA offline. It stops all the timers
3902 * associated with the HBA, brings down the SLI layer, and eventually
3903 * marks the HBA as in offline state for the upper layer protocol.
3906 lpfc_offline(struct lpfc_hba *phba)
3908 struct Scsi_Host *shost;
3909 struct lpfc_vport **vports;
3912 if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
3915 /* stop port and all timers associated with this hba */
3916 lpfc_stop_port(phba);
3918 /* Tear down the local and target port registrations. The
3919 * nvme transports need to cleanup.
3921 lpfc_nvmet_destroy_targetport(phba);
3922 lpfc_nvme_destroy_localport(phba->pport);
3924 vports = lpfc_create_vport_work_array(phba);
3926 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3927 lpfc_stop_vport_timers(vports[i]);
3928 lpfc_destroy_vport_work_array(phba, vports);
3929 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3930 "0460 Bring Adapter offline\n");
3931 /* Bring down the SLI Layer and cleanup. The HBA is offline
3933 lpfc_sli_hba_down(phba);
3934 spin_lock_irq(&phba->hbalock);
3936 spin_unlock_irq(&phba->hbalock);
3937 vports = lpfc_create_vport_work_array(phba);
3939 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3940 shost = lpfc_shost_from_vport(vports[i]);
3941 spin_lock_irq(shost->host_lock);
3942 vports[i]->work_port_events = 0;
3943 spin_unlock_irq(shost->host_lock);
3944 set_bit(FC_OFFLINE_MODE, &vports[i]->fc_flag);
3946 lpfc_destroy_vport_work_array(phba, vports);
3947 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3950 if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
3951 __lpfc_cpuhp_remove(phba);
3953 if (phba->cfg_xri_rebalancing)
3954 lpfc_destroy_multixri_pools(phba);
3958 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3959 * @phba: pointer to lpfc hba data structure.
3961 * This routine is to free all the SCSI buffers and IOCBs from the driver
3962 * list back to kernel. It is called from lpfc_pci_remove_one to free
3963 * the internal resources before the device is removed from the system.
3966 lpfc_scsi_free(struct lpfc_hba *phba)
3968 struct lpfc_io_buf *sb, *sb_next;
3970 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3973 spin_lock_irq(&phba->hbalock);
3975 /* Release all the lpfc_scsi_bufs maintained by this host. */
3977 spin_lock(&phba->scsi_buf_list_put_lock);
3978 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3980 list_del(&sb->list);
3981 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3984 phba->total_scsi_bufs--;
3986 spin_unlock(&phba->scsi_buf_list_put_lock);
3988 spin_lock(&phba->scsi_buf_list_get_lock);
3989 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3991 list_del(&sb->list);
3992 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3995 phba->total_scsi_bufs--;
3997 spin_unlock(&phba->scsi_buf_list_get_lock);
3998 spin_unlock_irq(&phba->hbalock);
4002 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
4003 * @phba: pointer to lpfc hba data structure.
4005 * This routine is to free all the IO buffers and IOCBs from the driver
4006 * list back to kernel. It is called from lpfc_pci_remove_one to free
4007 * the internal resources before the device is removed from the system.
4010 lpfc_io_free(struct lpfc_hba *phba)
4012 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
4013 struct lpfc_sli4_hdw_queue *qp;
4016 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4017 qp = &phba->sli4_hba.hdwq[idx];
4018 /* Release all the lpfc_nvme_bufs maintained by this host. */
4019 spin_lock(&qp->io_buf_list_put_lock);
4020 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4021 &qp->lpfc_io_buf_list_put,
4023 list_del(&lpfc_ncmd->list);
4025 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4026 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4027 if (phba->cfg_xpsgl && !phba->nvmet_support)
4028 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4029 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4031 qp->total_io_bufs--;
4033 spin_unlock(&qp->io_buf_list_put_lock);
4035 spin_lock(&qp->io_buf_list_get_lock);
4036 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4037 &qp->lpfc_io_buf_list_get,
4039 list_del(&lpfc_ncmd->list);
4041 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4042 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4043 if (phba->cfg_xpsgl && !phba->nvmet_support)
4044 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4045 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4047 qp->total_io_bufs--;
4049 spin_unlock(&qp->io_buf_list_get_lock);
4054 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4055 * @phba: pointer to lpfc hba data structure.
4057 * This routine first calculates the sizes of the current els and allocated
4058 * scsi sgl lists, and then goes through all sgls to updates the physical
4059 * XRIs assigned due to port function reset. During port initialization, the
4060 * current els and allocated scsi sgl lists are 0s.
4063 * 0 - successful (for now, it always returns 0)
4066 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4068 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4069 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4070 LIST_HEAD(els_sgl_list);
4074 * update on pci function's els xri-sgl list
4076 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4078 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4079 /* els xri-sgl expanded */
4080 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4081 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4082 "3157 ELS xri-sgl count increased from "
4083 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4085 /* allocate the additional els sgls */
4086 for (i = 0; i < xri_cnt; i++) {
4087 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4089 if (sglq_entry == NULL) {
4090 lpfc_printf_log(phba, KERN_ERR,
4092 "2562 Failure to allocate an "
4093 "ELS sgl entry:%d\n", i);
4097 sglq_entry->buff_type = GEN_BUFF_TYPE;
4098 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4100 if (sglq_entry->virt == NULL) {
4102 lpfc_printf_log(phba, KERN_ERR,
4104 "2563 Failure to allocate an "
4105 "ELS mbuf:%d\n", i);
4109 sglq_entry->sgl = sglq_entry->virt;
4110 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4111 sglq_entry->state = SGL_FREED;
4112 list_add_tail(&sglq_entry->list, &els_sgl_list);
4114 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4115 list_splice_init(&els_sgl_list,
4116 &phba->sli4_hba.lpfc_els_sgl_list);
4117 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4118 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4119 /* els xri-sgl shrinked */
4120 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4121 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4122 "3158 ELS xri-sgl count decreased from "
4123 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4125 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4126 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4128 /* release extra els sgls from list */
4129 for (i = 0; i < xri_cnt; i++) {
4130 list_remove_head(&els_sgl_list,
4131 sglq_entry, struct lpfc_sglq, list);
4133 __lpfc_mbuf_free(phba, sglq_entry->virt,
4138 list_splice_init(&els_sgl_list,
4139 &phba->sli4_hba.lpfc_els_sgl_list);
4140 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4142 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4143 "3163 ELS xri-sgl count unchanged: %d\n",
4145 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4147 /* update xris to els sgls on the list */
4149 sglq_entry_next = NULL;
4150 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4151 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4152 lxri = lpfc_sli4_next_xritag(phba);
4153 if (lxri == NO_XRI) {
4154 lpfc_printf_log(phba, KERN_ERR,
4156 "2400 Failed to allocate xri for "
4161 sglq_entry->sli4_lxritag = lxri;
4162 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4167 lpfc_free_els_sgl_list(phba);
4172 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4173 * @phba: pointer to lpfc hba data structure.
4175 * This routine first calculates the sizes of the current els and allocated
4176 * scsi sgl lists, and then goes through all sgls to updates the physical
4177 * XRIs assigned due to port function reset. During port initialization, the
4178 * current els and allocated scsi sgl lists are 0s.
4181 * 0 - successful (for now, it always returns 0)
4184 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4186 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4187 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4188 uint16_t nvmet_xri_cnt;
4189 LIST_HEAD(nvmet_sgl_list);
4193 * update on pci function's nvmet xri-sgl list
4195 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4197 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4198 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4199 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4200 /* els xri-sgl expanded */
4201 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4202 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4203 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4204 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4205 /* allocate the additional nvmet sgls */
4206 for (i = 0; i < xri_cnt; i++) {
4207 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4209 if (sglq_entry == NULL) {
4210 lpfc_printf_log(phba, KERN_ERR,
4212 "6303 Failure to allocate an "
4213 "NVMET sgl entry:%d\n", i);
4217 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4218 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4220 if (sglq_entry->virt == NULL) {
4222 lpfc_printf_log(phba, KERN_ERR,
4224 "6304 Failure to allocate an "
4225 "NVMET buf:%d\n", i);
4229 sglq_entry->sgl = sglq_entry->virt;
4230 memset(sglq_entry->sgl, 0,
4231 phba->cfg_sg_dma_buf_size);
4232 sglq_entry->state = SGL_FREED;
4233 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4235 spin_lock_irq(&phba->hbalock);
4236 spin_lock(&phba->sli4_hba.sgl_list_lock);
4237 list_splice_init(&nvmet_sgl_list,
4238 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4239 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4240 spin_unlock_irq(&phba->hbalock);
4241 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4242 /* nvmet xri-sgl shrunk */
4243 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4244 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4245 "6305 NVMET xri-sgl count decreased from "
4246 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4248 spin_lock_irq(&phba->hbalock);
4249 spin_lock(&phba->sli4_hba.sgl_list_lock);
4250 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4252 /* release extra nvmet sgls from list */
4253 for (i = 0; i < xri_cnt; i++) {
4254 list_remove_head(&nvmet_sgl_list,
4255 sglq_entry, struct lpfc_sglq, list);
4257 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4262 list_splice_init(&nvmet_sgl_list,
4263 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4264 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4265 spin_unlock_irq(&phba->hbalock);
4267 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4268 "6306 NVMET xri-sgl count unchanged: %d\n",
4270 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4272 /* update xris to nvmet sgls on the list */
4274 sglq_entry_next = NULL;
4275 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4276 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4277 lxri = lpfc_sli4_next_xritag(phba);
4278 if (lxri == NO_XRI) {
4279 lpfc_printf_log(phba, KERN_ERR,
4281 "6307 Failed to allocate xri for "
4286 sglq_entry->sli4_lxritag = lxri;
4287 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4292 lpfc_free_nvmet_sgl_list(phba);
4297 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4300 struct lpfc_sli4_hdw_queue *qp;
4301 struct lpfc_io_buf *lpfc_cmd;
4302 struct lpfc_io_buf *iobufp, *prev_iobufp;
4303 int idx, cnt, xri, inserted;
4306 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4307 qp = &phba->sli4_hba.hdwq[idx];
4308 spin_lock_irq(&qp->io_buf_list_get_lock);
4309 spin_lock(&qp->io_buf_list_put_lock);
4311 /* Take everything off the get and put lists */
4312 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4313 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4314 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4315 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4316 cnt += qp->get_io_bufs + qp->put_io_bufs;
4317 qp->get_io_bufs = 0;
4318 qp->put_io_bufs = 0;
4319 qp->total_io_bufs = 0;
4320 spin_unlock(&qp->io_buf_list_put_lock);
4321 spin_unlock_irq(&qp->io_buf_list_get_lock);
4325 * Take IO buffers off blist and put on cbuf sorted by XRI.
4326 * This is because POST_SGL takes a sequential range of XRIs
4327 * to post to the firmware.
4329 for (idx = 0; idx < cnt; idx++) {
4330 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4334 list_add_tail(&lpfc_cmd->list, cbuf);
4337 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4340 list_for_each_entry(iobufp, cbuf, list) {
4341 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4343 list_add(&lpfc_cmd->list,
4344 &prev_iobufp->list);
4346 list_add(&lpfc_cmd->list, cbuf);
4350 prev_iobufp = iobufp;
4353 list_add_tail(&lpfc_cmd->list, cbuf);
4359 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4361 struct lpfc_sli4_hdw_queue *qp;
4362 struct lpfc_io_buf *lpfc_cmd;
4364 unsigned long iflags;
4366 qp = phba->sli4_hba.hdwq;
4368 while (!list_empty(cbuf)) {
4369 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4370 list_remove_head(cbuf, lpfc_cmd,
4371 struct lpfc_io_buf, list);
4375 qp = &phba->sli4_hba.hdwq[idx];
4376 lpfc_cmd->hdwq_no = idx;
4377 lpfc_cmd->hdwq = qp;
4378 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4379 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
4380 list_add_tail(&lpfc_cmd->list,
4381 &qp->lpfc_io_buf_list_put);
4383 qp->total_io_bufs++;
4384 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
4392 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4393 * @phba: pointer to lpfc hba data structure.
4395 * This routine first calculates the sizes of the current els and allocated
4396 * scsi sgl lists, and then goes through all sgls to updates the physical
4397 * XRIs assigned due to port function reset. During port initialization, the
4398 * current els and allocated scsi sgl lists are 0s.
4401 * 0 - successful (for now, it always returns 0)
4404 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4406 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4407 uint16_t i, lxri, els_xri_cnt;
4408 uint16_t io_xri_cnt, io_xri_max;
4409 LIST_HEAD(io_sgl_list);
4413 * update on pci function's allocated nvme xri-sgl list
4416 /* maximum number of xris available for nvme buffers */
4417 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4418 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4419 phba->sli4_hba.io_xri_max = io_xri_max;
4421 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4422 "6074 Current allocated XRI sgl count:%d, "
4423 "maximum XRI count:%d els_xri_cnt:%d\n\n",
4424 phba->sli4_hba.io_xri_cnt,
4425 phba->sli4_hba.io_xri_max,
4428 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4430 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4431 /* max nvme xri shrunk below the allocated nvme buffers */
4432 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4433 phba->sli4_hba.io_xri_max;
4434 /* release the extra allocated nvme buffers */
4435 for (i = 0; i < io_xri_cnt; i++) {
4436 list_remove_head(&io_sgl_list, lpfc_ncmd,
4437 struct lpfc_io_buf, list);
4439 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4441 lpfc_ncmd->dma_handle);
4445 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4448 /* update xris associated to remaining allocated nvme buffers */
4450 lpfc_ncmd_next = NULL;
4451 phba->sli4_hba.io_xri_cnt = cnt;
4452 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4453 &io_sgl_list, list) {
4454 lxri = lpfc_sli4_next_xritag(phba);
4455 if (lxri == NO_XRI) {
4456 lpfc_printf_log(phba, KERN_ERR,
4458 "6075 Failed to allocate xri for "
4463 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4464 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4466 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4475 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4476 * @phba: Pointer to lpfc hba data structure.
4477 * @num_to_alloc: The requested number of buffers to allocate.
4479 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4480 * the nvme buffer contains all the necessary information needed to initiate
4481 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4482 * them on a list, it post them to the port by using SGL block post.
4485 * int - number of IO buffers that were allocated and posted.
4486 * 0 = failure, less than num_to_alloc is a partial failure.
4489 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4491 struct lpfc_io_buf *lpfc_ncmd;
4492 struct lpfc_iocbq *pwqeq;
4493 uint16_t iotag, lxri = 0;
4494 int bcnt, num_posted;
4495 LIST_HEAD(prep_nblist);
4496 LIST_HEAD(post_nblist);
4497 LIST_HEAD(nvme_nblist);
4499 phba->sli4_hba.io_xri_cnt = 0;
4500 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4501 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4505 * Get memory from the pci pool to map the virt space to
4506 * pci bus space for an I/O. The DMA buffer includes the
4507 * number of SGE's necessary to support the sg_tablesize.
4509 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4511 &lpfc_ncmd->dma_handle);
4512 if (!lpfc_ncmd->data) {
4517 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4518 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4521 * 4K Page alignment is CRITICAL to BlockGuard, double
4524 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4525 (((unsigned long)(lpfc_ncmd->data) &
4526 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4527 lpfc_printf_log(phba, KERN_ERR,
4529 "3369 Memory alignment err: "
4531 (unsigned long)lpfc_ncmd->data);
4532 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4534 lpfc_ncmd->dma_handle);
4540 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4542 lxri = lpfc_sli4_next_xritag(phba);
4543 if (lxri == NO_XRI) {
4544 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4545 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4549 pwqeq = &lpfc_ncmd->cur_iocbq;
4551 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4552 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4554 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4555 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4557 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4558 "6121 Failed to allocate IOTAG for"
4559 " XRI:0x%x\n", lxri);
4560 lpfc_sli4_free_xri(phba, lxri);
4563 pwqeq->sli4_lxritag = lxri;
4564 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4566 /* Initialize local short-hand pointers. */
4567 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4568 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4569 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4570 spin_lock_init(&lpfc_ncmd->buf_lock);
4572 /* add the nvme buffer to a post list */
4573 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4574 phba->sli4_hba.io_xri_cnt++;
4576 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4577 "6114 Allocate %d out of %d requested new NVME "
4578 "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
4579 sizeof(*lpfc_ncmd));
4582 /* post the list of nvme buffer sgls to port if available */
4583 if (!list_empty(&post_nblist))
4584 num_posted = lpfc_sli4_post_io_sgl_list(
4585 phba, &post_nblist, bcnt);
4593 lpfc_get_wwpn(struct lpfc_hba *phba)
4597 LPFC_MBOXQ_t *mboxq;
4600 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4603 return (uint64_t)-1;
4605 /* First get WWN of HBA instance */
4606 lpfc_read_nv(phba, mboxq);
4607 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4608 if (rc != MBX_SUCCESS) {
4609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4610 "6019 Mailbox failed , mbxCmd x%x "
4611 "READ_NV, mbxStatus x%x\n",
4612 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4613 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4614 mempool_free(mboxq, phba->mbox_mem_pool);
4615 return (uint64_t) -1;
4618 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4619 /* wwn is WWPN of HBA instance */
4620 mempool_free(mboxq, phba->mbox_mem_pool);
4621 if (phba->sli_rev == LPFC_SLI_REV4)
4622 return be64_to_cpu(wwn);
4624 return rol64(wwn, 32);
4627 static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
4629 if (phba->sli_rev == LPFC_SLI_REV4)
4630 if (phba->cfg_xpsgl && !phba->nvmet_support)
4631 return LPFC_MAX_SG_TABLESIZE;
4633 return phba->cfg_scsi_seg_cnt;
4635 return phba->cfg_sg_seg_cnt;
4639 * lpfc_vmid_res_alloc - Allocates resources for VMID
4640 * @phba: pointer to lpfc hba data structure.
4641 * @vport: pointer to vport data structure
4643 * This routine allocated the resources needed for the VMID.
4650 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4652 /* VMID feature is supported only on SLI4 */
4653 if (phba->sli_rev == LPFC_SLI_REV3) {
4654 phba->cfg_vmid_app_header = 0;
4655 phba->cfg_vmid_priority_tagging = 0;
4658 if (lpfc_is_vmid_enabled(phba)) {
4660 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4665 rwlock_init(&vport->vmid_lock);
4667 /* Set the VMID parameters for the vport */
4668 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4669 vport->vmid_inactivity_timeout =
4670 phba->cfg_vmid_inactivity_timeout;
4671 vport->max_vmid = phba->cfg_max_vmid;
4672 vport->cur_vmid_cnt = 0;
4674 vport->vmid_priority_range = bitmap_zalloc
4675 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4677 if (!vport->vmid_priority_range) {
4682 hash_init(vport->hash_table);
4688 * lpfc_create_port - Create an FC port
4689 * @phba: pointer to lpfc hba data structure.
4690 * @instance: a unique integer ID to this FC port.
4691 * @dev: pointer to the device data structure.
4693 * This routine creates a FC port for the upper layer protocol. The FC port
4694 * can be created on top of either a physical port or a virtual port provided
4695 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4696 * and associates the FC port created before adding the shost into the SCSI
4700 * @vport - pointer to the virtual N_Port data structure.
4701 * NULL - port create failed.
4704 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4706 struct lpfc_vport *vport;
4707 struct Scsi_Host *shost = NULL;
4708 struct scsi_host_template *template;
4712 bool use_no_reset_hba = false;
4715 if (lpfc_no_hba_reset_cnt) {
4716 if (phba->sli_rev < LPFC_SLI_REV4 &&
4717 dev == &phba->pcidev->dev) {
4718 /* Reset the port first */
4719 lpfc_sli_brdrestart(phba);
4720 rc = lpfc_sli_chipset_init(phba);
4724 wwn = lpfc_get_wwpn(phba);
4727 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4728 if (wwn == lpfc_no_hba_reset[i]) {
4729 lpfc_printf_log(phba, KERN_ERR,
4731 "6020 Setting use_no_reset port=%llx\n",
4733 use_no_reset_hba = true;
4738 /* Seed template for SCSI host registration */
4739 if (dev == &phba->pcidev->dev) {
4740 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4741 /* Seed physical port template */
4742 template = &lpfc_template;
4744 if (use_no_reset_hba)
4745 /* template is for a no reset SCSI Host */
4746 template->eh_host_reset_handler = NULL;
4748 /* Seed updated value of sg_tablesize */
4749 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4751 /* NVMET is for physical port only */
4752 template = &lpfc_template_nvme;
4755 /* Seed vport template */
4756 template = &lpfc_vport_template;
4758 /* Seed updated value of sg_tablesize */
4759 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4762 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4766 vport = (struct lpfc_vport *) shost->hostdata;
4768 set_bit(FC_LOADING, &vport->load_flag);
4769 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
4770 vport->fc_rscn_flush = 0;
4771 atomic_set(&vport->fc_plogi_cnt, 0);
4772 atomic_set(&vport->fc_adisc_cnt, 0);
4773 atomic_set(&vport->fc_reglogin_cnt, 0);
4774 atomic_set(&vport->fc_prli_cnt, 0);
4775 atomic_set(&vport->fc_unmap_cnt, 0);
4776 atomic_set(&vport->fc_map_cnt, 0);
4777 atomic_set(&vport->fc_npr_cnt, 0);
4778 atomic_set(&vport->fc_unused_cnt, 0);
4779 lpfc_get_vport_cfgparam(vport);
4781 /* Adjust value in vport */
4782 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4784 shost->unique_id = instance;
4785 shost->max_id = LPFC_MAX_TARGET;
4786 shost->max_lun = vport->cfg_max_luns;
4787 shost->this_id = -1;
4788 shost->max_cmd_len = 16;
4790 if (phba->sli_rev == LPFC_SLI_REV4) {
4791 if (!phba->cfg_fcp_mq_threshold ||
4792 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4793 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4795 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4796 phba->cfg_fcp_mq_threshold);
4798 shost->dma_boundary =
4799 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4801 /* SLI-3 has a limited number of hardware queues (3),
4802 * thus there is only one for FCP processing.
4804 shost->nr_hw_queues = 1;
4807 * Set initial can_queue value since 0 is no longer supported and
4808 * scsi_add_host will fail. This will be adjusted later based on the
4809 * max xri value determined in hba setup.
4811 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4812 if (dev != &phba->pcidev->dev) {
4813 shost->transportt = lpfc_vport_transport_template;
4814 vport->port_type = LPFC_NPIV_PORT;
4816 shost->transportt = lpfc_transport_template;
4817 vport->port_type = LPFC_PHYSICAL_PORT;
4820 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4821 "9081 CreatePort TMPLATE type %x TBLsize %d "
4823 vport->port_type, shost->sg_tablesize,
4824 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4826 /* Allocate the resources for VMID */
4827 rc = lpfc_vmid_res_alloc(phba, vport);
4832 /* Initialize all internally managed lists. */
4833 INIT_LIST_HEAD(&vport->fc_nodes);
4834 spin_lock_init(&vport->fc_nodes_list_lock);
4835 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4836 spin_lock_init(&vport->work_port_lock);
4838 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4840 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4842 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4844 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4845 lpfc_setup_bg(phba, shost);
4847 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4851 spin_lock_irq(&phba->port_list_lock);
4852 list_add_tail(&vport->listentry, &phba->port_list);
4853 spin_unlock_irq(&phba->port_list_lock);
4858 bitmap_free(vport->vmid_priority_range);
4860 scsi_host_put(shost);
4866 * destroy_port - destroy an FC port
4867 * @vport: pointer to an lpfc virtual N_Port data structure.
4869 * This routine destroys a FC port from the upper layer protocol. All the
4870 * resources associated with the port are released.
4873 destroy_port(struct lpfc_vport *vport)
4875 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4876 struct lpfc_hba *phba = vport->phba;
4878 lpfc_debugfs_terminate(vport);
4879 fc_remove_host(shost);
4880 scsi_remove_host(shost);
4882 spin_lock_irq(&phba->port_list_lock);
4883 list_del_init(&vport->listentry);
4884 spin_unlock_irq(&phba->port_list_lock);
4886 lpfc_cleanup(vport);
4891 * lpfc_get_instance - Get a unique integer ID
4893 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4894 * uses the kernel idr facility to perform the task.
4897 * instance - a unique integer ID allocated as the new instance.
4898 * -1 - lpfc get instance failed.
4901 lpfc_get_instance(void)
4905 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4906 return ret < 0 ? -1 : ret;
4910 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4911 * @shost: pointer to SCSI host data structure.
4912 * @time: elapsed time of the scan in jiffies.
4914 * This routine is called by the SCSI layer with a SCSI host to determine
4915 * whether the scan host is finished.
4917 * Note: there is no scan_start function as adapter initialization will have
4918 * asynchronously kicked off the link initialization.
4921 * 0 - SCSI host scan is not over yet.
4922 * 1 - SCSI host scan is over.
4924 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4926 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4927 struct lpfc_hba *phba = vport->phba;
4930 spin_lock_irq(shost->host_lock);
4932 if (test_bit(FC_UNLOADING, &vport->load_flag)) {
4936 if (time >= msecs_to_jiffies(30 * 1000)) {
4937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4938 "0461 Scanning longer than 30 "
4939 "seconds. Continuing initialization\n");
4943 if (time >= msecs_to_jiffies(15 * 1000) &&
4944 phba->link_state <= LPFC_LINK_DOWN) {
4945 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4946 "0465 Link down longer than 15 "
4947 "seconds. Continuing initialization\n");
4952 if (vport->port_state != LPFC_VPORT_READY)
4954 if (vport->num_disc_nodes || vport->fc_prli_sent)
4956 if (!atomic_read(&vport->fc_map_cnt) &&
4957 time < msecs_to_jiffies(2 * 1000))
4959 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4965 spin_unlock_irq(shost->host_lock);
4969 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4971 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4972 struct lpfc_hba *phba = vport->phba;
4974 fc_host_supported_speeds(shost) = 0;
4976 * Avoid reporting supported link speed for FCoE as it can't be
4977 * controlled via FCoE.
4979 if (phba->hba_flag & HBA_FCOE_MODE)
4982 if (phba->lmt & LMT_256Gb)
4983 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4984 if (phba->lmt & LMT_128Gb)
4985 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4986 if (phba->lmt & LMT_64Gb)
4987 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4988 if (phba->lmt & LMT_32Gb)
4989 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4990 if (phba->lmt & LMT_16Gb)
4991 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4992 if (phba->lmt & LMT_10Gb)
4993 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4994 if (phba->lmt & LMT_8Gb)
4995 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4996 if (phba->lmt & LMT_4Gb)
4997 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4998 if (phba->lmt & LMT_2Gb)
4999 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
5000 if (phba->lmt & LMT_1Gb)
5001 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
5005 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
5006 * @shost: pointer to SCSI host data structure.
5008 * This routine initializes a given SCSI host attributes on a FC port. The
5009 * SCSI host can be either on top of a physical port or a virtual port.
5011 void lpfc_host_attrib_init(struct Scsi_Host *shost)
5013 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5014 struct lpfc_hba *phba = vport->phba;
5016 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
5019 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5020 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5021 fc_host_supported_classes(shost) = FC_COS_CLASS3;
5023 memset(fc_host_supported_fc4s(shost), 0,
5024 sizeof(fc_host_supported_fc4s(shost)));
5025 fc_host_supported_fc4s(shost)[2] = 1;
5026 fc_host_supported_fc4s(shost)[7] = 1;
5028 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5029 sizeof fc_host_symbolic_name(shost));
5031 lpfc_host_supported_speeds_set(shost);
5033 fc_host_maxframe_size(shost) =
5034 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5035 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5037 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5039 /* This value is also unchanging */
5040 memset(fc_host_active_fc4s(shost), 0,
5041 sizeof(fc_host_active_fc4s(shost)));
5042 fc_host_active_fc4s(shost)[2] = 1;
5043 fc_host_active_fc4s(shost)[7] = 1;
5045 fc_host_max_npiv_vports(shost) = phba->max_vpi;
5046 clear_bit(FC_LOADING, &vport->load_flag);
5050 * lpfc_stop_port_s3 - Stop SLI3 device port
5051 * @phba: pointer to lpfc hba data structure.
5053 * This routine is invoked to stop an SLI3 device port, it stops the device
5054 * from generating interrupts and stops the device driver's timers for the
5058 lpfc_stop_port_s3(struct lpfc_hba *phba)
5060 /* Clear all interrupt enable conditions */
5061 writel(0, phba->HCregaddr);
5062 readl(phba->HCregaddr); /* flush */
5063 /* Clear all pending interrupts */
5064 writel(0xffffffff, phba->HAregaddr);
5065 readl(phba->HAregaddr); /* flush */
5067 /* Reset some HBA SLI setup states */
5068 lpfc_stop_hba_timers(phba);
5069 phba->pport->work_port_events = 0;
5073 * lpfc_stop_port_s4 - Stop SLI4 device port
5074 * @phba: pointer to lpfc hba data structure.
5076 * This routine is invoked to stop an SLI4 device port, it stops the device
5077 * from generating interrupts and stops the device driver's timers for the
5081 lpfc_stop_port_s4(struct lpfc_hba *phba)
5083 /* Reset some HBA SLI4 setup states */
5084 lpfc_stop_hba_timers(phba);
5086 phba->pport->work_port_events = 0;
5087 phba->sli4_hba.intr_enable = 0;
5091 * lpfc_stop_port - Wrapper function for stopping hba port
5092 * @phba: Pointer to HBA context object.
5094 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
5095 * the API jump table function pointer from the lpfc_hba struct.
5098 lpfc_stop_port(struct lpfc_hba *phba)
5100 phba->lpfc_stop_port(phba);
5103 flush_workqueue(phba->wq);
5107 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5108 * @phba: Pointer to hba for which this call is being executed.
5110 * This routine starts the timer waiting for the FCF rediscovery to complete.
5113 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5115 unsigned long fcf_redisc_wait_tmo =
5116 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5117 /* Start fcf rediscovery wait period timer */
5118 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5119 spin_lock_irq(&phba->hbalock);
5120 /* Allow action to new fcf asynchronous event */
5121 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5122 /* Mark the FCF rediscovery pending state */
5123 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5124 spin_unlock_irq(&phba->hbalock);
5128 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5129 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5131 * This routine is invoked when waiting for FCF table rediscover has been
5132 * timed out. If new FCF record(s) has (have) been discovered during the
5133 * wait period, a new FCF event shall be added to the FCOE async event
5134 * list, and then worker thread shall be waked up for processing from the
5135 * worker thread context.
5138 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5140 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5142 /* Don't send FCF rediscovery event if timer cancelled */
5143 spin_lock_irq(&phba->hbalock);
5144 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5145 spin_unlock_irq(&phba->hbalock);
5148 /* Clear FCF rediscovery timer pending flag */
5149 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5150 /* FCF rediscovery event to worker thread */
5151 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5152 spin_unlock_irq(&phba->hbalock);
5153 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5154 "2776 FCF rediscover quiescent timer expired\n");
5155 /* wake up worker thread */
5156 lpfc_worker_wake_up(phba);
5160 * lpfc_vmid_poll - VMID timeout detection
5161 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5163 * This routine is invoked when there is no I/O on by a VM for the specified
5164 * amount of time. When this situation is detected, the VMID has to be
5165 * deregistered from the switch and all the local resources freed. The VMID
5166 * will be reassigned to the VM once the I/O begins.
5169 lpfc_vmid_poll(struct timer_list *t)
5171 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5174 /* check if there is a need to issue QFPA */
5175 if (phba->pport->vmid_priority_tagging) {
5177 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5180 /* Is the vmid inactivity timer enabled */
5181 if (phba->pport->vmid_inactivity_timeout ||
5182 test_bit(FC_DEREGISTER_ALL_APP_ID, &phba->pport->load_flag)) {
5184 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5188 lpfc_worker_wake_up(phba);
5190 /* restart the timer for the next iteration */
5191 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5196 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5197 * @phba: pointer to lpfc hba data structure.
5198 * @acqe_link: pointer to the async link completion queue entry.
5200 * This routine is to parse the SLI4 link-attention link fault code.
5203 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5204 struct lpfc_acqe_link *acqe_link)
5206 switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) {
5207 case LPFC_FC_LA_TYPE_LINK_DOWN:
5208 case LPFC_FC_LA_TYPE_TRUNKING_EVENT:
5209 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
5210 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
5213 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5214 case LPFC_ASYNC_LINK_FAULT_NONE:
5215 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5216 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5217 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5220 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5221 "0398 Unknown link fault code: x%x\n",
5222 bf_get(lpfc_acqe_link_fault, acqe_link));
5230 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5231 * @phba: pointer to lpfc hba data structure.
5232 * @acqe_link: pointer to the async link completion queue entry.
5234 * This routine is to parse the SLI4 link attention type and translate it
5235 * into the base driver's link attention type coding.
5237 * Return: Link attention type in terms of base driver's coding.
5240 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5241 struct lpfc_acqe_link *acqe_link)
5245 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5246 case LPFC_ASYNC_LINK_STATUS_DOWN:
5247 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5248 att_type = LPFC_ATT_LINK_DOWN;
5250 case LPFC_ASYNC_LINK_STATUS_UP:
5251 /* Ignore physical link up events - wait for logical link up */
5252 att_type = LPFC_ATT_RESERVED;
5254 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5255 att_type = LPFC_ATT_LINK_UP;
5258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5259 "0399 Invalid link attention type: x%x\n",
5260 bf_get(lpfc_acqe_link_status, acqe_link));
5261 att_type = LPFC_ATT_RESERVED;
5268 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5269 * @phba: pointer to lpfc hba data structure.
5271 * This routine is to get an SLI3 FC port's link speed in Mbps.
5273 * Return: link speed in terms of Mbps.
5276 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5278 uint32_t link_speed;
5280 if (!lpfc_is_link_up(phba))
5283 if (phba->sli_rev <= LPFC_SLI_REV3) {
5284 switch (phba->fc_linkspeed) {
5285 case LPFC_LINK_SPEED_1GHZ:
5288 case LPFC_LINK_SPEED_2GHZ:
5291 case LPFC_LINK_SPEED_4GHZ:
5294 case LPFC_LINK_SPEED_8GHZ:
5297 case LPFC_LINK_SPEED_10GHZ:
5300 case LPFC_LINK_SPEED_16GHZ:
5307 if (phba->sli4_hba.link_state.logical_speed)
5309 phba->sli4_hba.link_state.logical_speed;
5311 link_speed = phba->sli4_hba.link_state.speed;
5317 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5318 * @phba: pointer to lpfc hba data structure.
5319 * @evt_code: asynchronous event code.
5320 * @speed_code: asynchronous event link speed code.
5322 * This routine is to parse the giving SLI4 async event link speed code into
5323 * value of Mbps for the link speed.
5325 * Return: link speed in terms of Mbps.
5328 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5331 uint32_t port_speed;
5334 case LPFC_TRAILER_CODE_LINK:
5335 switch (speed_code) {
5336 case LPFC_ASYNC_LINK_SPEED_ZERO:
5339 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5342 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5345 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5348 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5351 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5354 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5357 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5360 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5361 port_speed = 100000;
5367 case LPFC_TRAILER_CODE_FC:
5368 switch (speed_code) {
5369 case LPFC_FC_LA_SPEED_UNKNOWN:
5372 case LPFC_FC_LA_SPEED_1G:
5375 case LPFC_FC_LA_SPEED_2G:
5378 case LPFC_FC_LA_SPEED_4G:
5381 case LPFC_FC_LA_SPEED_8G:
5384 case LPFC_FC_LA_SPEED_10G:
5387 case LPFC_FC_LA_SPEED_16G:
5390 case LPFC_FC_LA_SPEED_32G:
5393 case LPFC_FC_LA_SPEED_64G:
5396 case LPFC_FC_LA_SPEED_128G:
5397 port_speed = 128000;
5399 case LPFC_FC_LA_SPEED_256G:
5400 port_speed = 256000;
5413 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5414 * @phba: pointer to lpfc hba data structure.
5415 * @acqe_link: pointer to the async link completion queue entry.
5417 * This routine is to handle the SLI4 asynchronous FCoE link event.
5420 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5421 struct lpfc_acqe_link *acqe_link)
5425 struct lpfc_mbx_read_top *la;
5429 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5430 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5432 phba->fcoe_eventtag = acqe_link->event_tag;
5433 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5435 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5436 "0395 The mboxq allocation failed\n");
5440 rc = lpfc_mbox_rsrc_prep(phba, pmb);
5442 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5443 "0396 mailbox allocation failed\n");
5447 /* Cleanup any outstanding ELS commands */
5448 lpfc_els_flush_all_cmd(phba);
5450 /* Block ELS IOCBs until we have done process link event */
5451 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5453 /* Update link event statistics */
5454 phba->sli.slistat.link_event++;
5456 /* Create lpfc_handle_latt mailbox command from link ACQE */
5457 lpfc_read_topology(phba, pmb, pmb->ctx_buf);
5458 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5459 pmb->vport = phba->pport;
5461 /* Keep the link status for extra SLI4 state machine reference */
5462 phba->sli4_hba.link_state.speed =
5463 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5464 bf_get(lpfc_acqe_link_speed, acqe_link));
5465 phba->sli4_hba.link_state.duplex =
5466 bf_get(lpfc_acqe_link_duplex, acqe_link);
5467 phba->sli4_hba.link_state.status =
5468 bf_get(lpfc_acqe_link_status, acqe_link);
5469 phba->sli4_hba.link_state.type =
5470 bf_get(lpfc_acqe_link_type, acqe_link);
5471 phba->sli4_hba.link_state.number =
5472 bf_get(lpfc_acqe_link_number, acqe_link);
5473 phba->sli4_hba.link_state.fault =
5474 bf_get(lpfc_acqe_link_fault, acqe_link);
5475 phba->sli4_hba.link_state.logical_speed =
5476 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5478 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5479 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5480 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5481 "Logical speed:%dMbps Fault:%d\n",
5482 phba->sli4_hba.link_state.speed,
5483 phba->sli4_hba.link_state.topology,
5484 phba->sli4_hba.link_state.status,
5485 phba->sli4_hba.link_state.type,
5486 phba->sli4_hba.link_state.number,
5487 phba->sli4_hba.link_state.logical_speed,
5488 phba->sli4_hba.link_state.fault);
5490 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5491 * topology info. Note: Optional for non FC-AL ports.
5493 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5494 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5495 if (rc == MBX_NOT_FINISHED)
5500 * For FCoE Mode: fill in all the topology information we need and call
5501 * the READ_TOPOLOGY completion routine to continue without actually
5502 * sending the READ_TOPOLOGY mailbox command to the port.
5504 /* Initialize completion status */
5506 mb->mbxStatus = MBX_SUCCESS;
5508 /* Parse port fault information field */
5509 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5511 /* Parse and translate link attention fields */
5512 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5513 la->eventTag = acqe_link->event_tag;
5514 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5515 bf_set(lpfc_mbx_read_top_link_spd, la,
5516 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5518 /* Fake the following irrelevant fields */
5519 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5520 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5521 bf_set(lpfc_mbx_read_top_il, la, 0);
5522 bf_set(lpfc_mbx_read_top_pb, la, 0);
5523 bf_set(lpfc_mbx_read_top_fa, la, 0);
5524 bf_set(lpfc_mbx_read_top_mm, la, 0);
5526 /* Invoke the lpfc_handle_latt mailbox command callback function */
5527 lpfc_mbx_cmpl_read_topology(phba, pmb);
5532 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5536 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5538 * @phba: pointer to lpfc hba data structure.
5539 * @speed_code: asynchronous event link speed code.
5541 * This routine is to parse the giving SLI4 async event link speed code into
5542 * value of Read topology link speed.
5544 * Return: link speed in terms of Read topology.
5547 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5551 switch (speed_code) {
5552 case LPFC_FC_LA_SPEED_1G:
5553 port_speed = LPFC_LINK_SPEED_1GHZ;
5555 case LPFC_FC_LA_SPEED_2G:
5556 port_speed = LPFC_LINK_SPEED_2GHZ;
5558 case LPFC_FC_LA_SPEED_4G:
5559 port_speed = LPFC_LINK_SPEED_4GHZ;
5561 case LPFC_FC_LA_SPEED_8G:
5562 port_speed = LPFC_LINK_SPEED_8GHZ;
5564 case LPFC_FC_LA_SPEED_16G:
5565 port_speed = LPFC_LINK_SPEED_16GHZ;
5567 case LPFC_FC_LA_SPEED_32G:
5568 port_speed = LPFC_LINK_SPEED_32GHZ;
5570 case LPFC_FC_LA_SPEED_64G:
5571 port_speed = LPFC_LINK_SPEED_64GHZ;
5573 case LPFC_FC_LA_SPEED_128G:
5574 port_speed = LPFC_LINK_SPEED_128GHZ;
5576 case LPFC_FC_LA_SPEED_256G:
5577 port_speed = LPFC_LINK_SPEED_256GHZ;
5588 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5590 if (!phba->rx_monitor) {
5591 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5592 "4411 Rx Monitor Info is empty.\n");
5594 lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
5595 LPFC_MAX_RXMONITOR_DUMP);
5600 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5601 * @phba: pointer to lpfc hba data structure.
5602 * @dtag: FPIN descriptor received
5604 * Increment the FPIN received counter/time when it happens.
5607 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5609 struct lpfc_cgn_info *cp;
5612 /* Make sure we have a congestion info buffer */
5615 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5617 /* Update congestion statistics */
5619 case ELS_DTAG_LNK_INTEGRITY:
5620 le32_add_cpu(&cp->link_integ_notification, 1);
5621 lpfc_cgn_update_tstamp(phba, &cp->stat_lnk);
5623 case ELS_DTAG_DELIVERY:
5624 le32_add_cpu(&cp->delivery_notification, 1);
5625 lpfc_cgn_update_tstamp(phba, &cp->stat_delivery);
5627 case ELS_DTAG_PEER_CONGEST:
5628 le32_add_cpu(&cp->cgn_peer_notification, 1);
5629 lpfc_cgn_update_tstamp(phba, &cp->stat_peer);
5631 case ELS_DTAG_CONGESTION:
5632 le32_add_cpu(&cp->cgn_notification, 1);
5633 lpfc_cgn_update_tstamp(phba, &cp->stat_fpin);
5635 if (phba->cgn_fpin_frequency &&
5636 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5637 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5638 cp->cgn_stat_npm = value;
5641 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5642 LPFC_CGN_CRC32_SEED);
5643 cp->cgn_info_crc = cpu_to_le32(value);
5647 * lpfc_cgn_update_tstamp - Update cmf timestamp
5648 * @phba: pointer to lpfc hba data structure.
5649 * @ts: structure to write the timestamp to.
5652 lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts)
5654 struct timespec64 cur_time;
5657 ktime_get_real_ts64(&cur_time);
5658 time64_to_tm(cur_time.tv_sec, 0, &tm_val);
5660 ts->month = tm_val.tm_mon + 1;
5661 ts->day = tm_val.tm_mday;
5662 ts->year = tm_val.tm_year - 100;
5663 ts->hour = tm_val.tm_hour;
5664 ts->minute = tm_val.tm_min;
5665 ts->second = tm_val.tm_sec;
5667 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5668 "2646 Updated CMF timestamp : "
5669 "%u/%u/%u %u:%u:%u\n",
5672 ts->minute, ts->second);
5676 * lpfc_cmf_stats_timer - Save data into registered congestion buffer
5677 * @timer: Timer cookie to access lpfc private data
5679 * Save the congestion event data every minute.
5680 * On the hour collapse all the minute data into hour data. Every day
5681 * collapse all the hour data into daily data. Separate driver
5682 * and fabrc congestion event counters that will be saved out
5683 * to the registered congestion buffer every minute.
5685 static enum hrtimer_restart
5686 lpfc_cmf_stats_timer(struct hrtimer *timer)
5688 struct lpfc_hba *phba;
5689 struct lpfc_cgn_info *cp;
5691 uint16_t value, mvalue;
5694 uint32_t dvalue, wvalue, lvalue, avalue;
5700 phba = container_of(timer, struct lpfc_hba, cmf_stats_timer);
5701 /* Make sure we have a congestion info buffer */
5703 return HRTIMER_NORESTART;
5704 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5706 phba->cgn_evt_timestamp = jiffies +
5707 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5708 phba->cgn_evt_minute++;
5710 /* We should get to this point in the routine on 1 minute intervals */
5711 lpfc_cgn_update_tstamp(phba, &cp->base_time);
5713 if (phba->cgn_fpin_frequency &&
5714 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5715 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5716 cp->cgn_stat_npm = value;
5719 /* Read and clear the latency counters for this minute */
5720 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5721 latsum = atomic64_read(&phba->cgn_latency_evt);
5722 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5723 atomic64_set(&phba->cgn_latency_evt, 0);
5725 /* We need to store MB/sec bandwidth in the congestion information.
5726 * block_cnt is count of 512 byte blocks for the entire minute,
5727 * bps will get bytes per sec before finally converting to MB/sec.
5729 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5730 phba->rx_block_cnt = 0;
5731 mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5734 /* cgn parameters */
5735 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5736 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5737 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5738 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5740 /* Fill in default LUN qdepth */
5741 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5742 cp->cgn_lunq = cpu_to_le16(value);
5744 /* Record congestion buffer info - every minute
5745 * cgn_driver_evt_cnt (Driver events)
5746 * cgn_fabric_warn_cnt (Congestion Warnings)
5747 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5748 * cgn_fabric_alarm_cnt (Congestion Alarms)
5750 index = ++cp->cgn_index_minute;
5751 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5752 cp->cgn_index_minute = 0;
5756 /* Get the number of driver events in this sample and reset counter */
5757 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5758 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5760 /* Get the number of warning events - FPIN and Signal for this minute */
5762 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5763 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5764 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5765 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5766 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5768 /* Get the number of alarm events - FPIN and Signal for this minute */
5770 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5771 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5772 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5773 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5775 /* Collect the driver, warning, alarm and latency counts for this
5776 * minute into the driver congestion buffer.
5778 ptr = &cp->cgn_drvr_min[index];
5779 value = (uint16_t)dvalue;
5780 *ptr = cpu_to_le16(value);
5782 ptr = &cp->cgn_warn_min[index];
5783 value = (uint16_t)wvalue;
5784 *ptr = cpu_to_le16(value);
5786 ptr = &cp->cgn_alarm_min[index];
5787 value = (uint16_t)avalue;
5788 *ptr = cpu_to_le16(value);
5790 lptr = &cp->cgn_latency_min[index];
5792 lvalue = (uint32_t)div_u64(latsum, lvalue);
5793 *lptr = cpu_to_le32(lvalue);
5798 /* Collect the bandwidth value into the driver's congesion buffer. */
5799 mptr = &cp->cgn_bw_min[index];
5800 *mptr = cpu_to_le16(mvalue);
5802 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5803 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5804 index, dvalue, wvalue, *lptr, mvalue, avalue);
5807 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5808 /* Record congestion buffer info - every hour
5809 * Collapse all minutes into an hour
5811 index = ++cp->cgn_index_hour;
5812 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5813 cp->cgn_index_hour = 0;
5823 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5824 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5825 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5826 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5827 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5828 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5830 if (lvalue) /* Avg of latency averages */
5831 lvalue /= LPFC_MIN_HOUR;
5832 if (mbps) /* Avg of Bandwidth averages */
5833 mvalue = mbps / LPFC_MIN_HOUR;
5835 lptr = &cp->cgn_drvr_hr[index];
5836 *lptr = cpu_to_le32(dvalue);
5837 lptr = &cp->cgn_warn_hr[index];
5838 *lptr = cpu_to_le32(wvalue);
5839 lptr = &cp->cgn_latency_hr[index];
5840 *lptr = cpu_to_le32(lvalue);
5841 mptr = &cp->cgn_bw_hr[index];
5842 *mptr = cpu_to_le16(mvalue);
5843 lptr = &cp->cgn_alarm_hr[index];
5844 *lptr = cpu_to_le32(avalue);
5846 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5847 "2419 Congestion Info - hour "
5848 "(%d): %d %d %d %d %d\n",
5849 index, dvalue, wvalue, lvalue, mvalue, avalue);
5853 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5854 /* Record congestion buffer info - every hour
5855 * Collapse all hours into a day. Rotate days
5856 * after LPFC_MAX_CGN_DAYS.
5858 index = ++cp->cgn_index_day;
5859 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5860 cp->cgn_index_day = 0;
5870 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5871 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5872 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5873 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5874 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5875 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5877 if (lvalue) /* Avg of latency averages */
5878 lvalue /= LPFC_HOUR_DAY;
5879 if (mbps) /* Avg of Bandwidth averages */
5880 mvalue = mbps / LPFC_HOUR_DAY;
5882 lptr = &cp->cgn_drvr_day[index];
5883 *lptr = cpu_to_le32(dvalue);
5884 lptr = &cp->cgn_warn_day[index];
5885 *lptr = cpu_to_le32(wvalue);
5886 lptr = &cp->cgn_latency_day[index];
5887 *lptr = cpu_to_le32(lvalue);
5888 mptr = &cp->cgn_bw_day[index];
5889 *mptr = cpu_to_le16(mvalue);
5890 lptr = &cp->cgn_alarm_day[index];
5891 *lptr = cpu_to_le32(avalue);
5893 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5894 "2420 Congestion Info - daily (%d): "
5896 index, dvalue, wvalue, lvalue, mvalue, avalue);
5899 /* Use the frequency found in the last rcv'ed FPIN */
5900 value = phba->cgn_fpin_frequency;
5901 cp->cgn_warn_freq = cpu_to_le16(value);
5902 cp->cgn_alarm_freq = cpu_to_le16(value);
5904 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5905 LPFC_CGN_CRC32_SEED);
5906 cp->cgn_info_crc = cpu_to_le32(lvalue);
5908 hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC));
5910 return HRTIMER_RESTART;
5914 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5915 * @phba: The Hba for which this call is being executed.
5917 * The routine calculates the latency from the beginning of the CMF timer
5918 * interval to the current point in time. It is called from IO completion
5919 * when we exceed our Bandwidth limitation for the time interval.
5922 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5924 struct timespec64 cmpl_time;
5927 ktime_get_real_ts64(&cmpl_time);
5929 /* This routine works on a ms granularity so sec and usec are
5930 * converted accordingly.
5932 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5933 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5936 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5937 msec = (cmpl_time.tv_sec -
5938 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5939 msec += ((cmpl_time.tv_nsec -
5940 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5942 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5944 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5945 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5952 * lpfc_cmf_timer - This is the timer function for one congestion
5954 * @timer: Pointer to the high resolution timer that expired
5956 static enum hrtimer_restart
5957 lpfc_cmf_timer(struct hrtimer *timer)
5959 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5961 struct rx_info_entry entry;
5963 uint32_t busy, max_read;
5964 uint64_t total, rcv, lat, mbpi, extra, cnt;
5965 int timer_interval = LPFC_CMF_INTERVAL;
5967 struct lpfc_cgn_stat *cgs;
5970 /* Only restart the timer if congestion mgmt is on */
5971 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5972 !phba->cmf_latency.tv_sec) {
5973 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5974 "6224 CMF timer exit: %d %lld\n",
5975 phba->cmf_active_mode,
5976 (uint64_t)phba->cmf_latency.tv_sec);
5977 return HRTIMER_NORESTART;
5980 /* If pport is not ready yet, just exit and wait for
5981 * the next timer cycle to hit.
5986 /* Do not block SCSI IO while in the timer routine since
5987 * total_bytes will be cleared
5989 atomic_set(&phba->cmf_stop_io, 1);
5991 /* First we need to calculate the actual ms between
5992 * the last timer interrupt and this one. We ask for
5993 * LPFC_CMF_INTERVAL, however the actual time may
5994 * vary depending on system overhead.
5996 ms = lpfc_calc_cmf_latency(phba);
5999 /* Immediately after we calculate the time since the last
6000 * timer interrupt, set the start time for the next
6003 ktime_get_real_ts64(&phba->cmf_latency);
6005 phba->cmf_link_byte_count =
6006 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6008 /* Collect all the stats from the prior timer interval */
6013 for_each_present_cpu(cpu) {
6014 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6015 total += atomic64_xchg(&cgs->total_bytes, 0);
6016 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6017 lat += atomic64_xchg(&cgs->rx_latency, 0);
6018 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6021 /* Before we issue another CMF_SYNC_WQE, retrieve the BW
6022 * returned from the last CMF_SYNC_WQE issued, from
6023 * cmf_last_sync_bw. This will be the target BW for
6024 * this next timer interval.
6026 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6027 phba->link_state != LPFC_LINK_DOWN &&
6028 phba->hba_flag & HBA_SETUP) {
6029 mbpi = phba->cmf_last_sync_bw;
6030 phba->cmf_last_sync_bw = 0;
6033 /* Calculate any extra bytes needed to account for the
6034 * timer accuracy. If we are less than LPFC_CMF_INTERVAL
6035 * calculate the adjustment needed for total to reflect
6036 * a full LPFC_CMF_INTERVAL.
6038 if (ms && ms < LPFC_CMF_INTERVAL) {
6039 cnt = div_u64(total, ms); /* bytes per ms */
6040 cnt *= LPFC_CMF_INTERVAL; /* what total should be */
6041 extra = cnt - total;
6043 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6045 /* For Monitor mode or link down we want mbpi
6046 * to be the full link speed
6048 mbpi = phba->cmf_link_byte_count;
6051 phba->cmf_timer_cnt++;
6054 /* Update congestion info buffer latency in us */
6055 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6056 atomic64_add(lat, &phba->cgn_latency_evt);
6058 busy = atomic_xchg(&phba->cmf_busy, 0);
6059 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6061 /* Calculate MBPI for the next timer interval */
6063 if (mbpi > phba->cmf_link_byte_count ||
6064 phba->cmf_active_mode == LPFC_CFG_MONITOR)
6065 mbpi = phba->cmf_link_byte_count;
6067 /* Change max_bytes_per_interval to what the prior
6068 * CMF_SYNC_WQE cmpl indicated.
6070 if (mbpi != phba->cmf_max_bytes_per_interval)
6071 phba->cmf_max_bytes_per_interval = mbpi;
6074 /* Save rxmonitor information for debug */
6075 if (phba->rx_monitor) {
6076 entry.total_bytes = total;
6077 entry.cmf_bytes = total + extra;
6078 entry.rcv_bytes = rcv;
6079 entry.cmf_busy = busy;
6080 entry.cmf_info = phba->cmf_active_info;
6082 entry.avg_io_latency = div_u64(lat, io_cnt);
6083 entry.avg_io_size = div_u64(rcv, io_cnt);
6085 entry.avg_io_latency = 0;
6086 entry.avg_io_size = 0;
6088 entry.max_read_cnt = max_read;
6089 entry.io_cnt = io_cnt;
6090 entry.max_bytes_per_interval = mbpi;
6091 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6092 entry.timer_utilization = phba->cmf_last_ts;
6094 entry.timer_utilization = ms;
6095 entry.timer_interval = ms;
6096 phba->cmf_last_ts = 0;
6098 lpfc_rx_monitor_record(phba->rx_monitor, &entry);
6101 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6102 /* If Monitor mode, check if we are oversubscribed
6103 * against the full line rate.
6105 if (mbpi && total > mbpi)
6106 atomic_inc(&phba->cgn_driver_evt_cnt);
6108 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
6110 /* Since total_bytes has already been zero'ed, its okay to unblock
6111 * after max_bytes_per_interval is setup.
6113 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6114 queue_work(phba->wq, &phba->unblock_request_work);
6116 /* SCSI IO is now unblocked */
6117 atomic_set(&phba->cmf_stop_io, 0);
6120 hrtimer_forward_now(timer,
6121 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6122 return HRTIMER_RESTART;
6125 #define trunk_link_status(__idx)\
6126 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6127 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6128 "Link up" : "Link down") : "NA"
6129 /* Did port __idx reported an error */
6130 #define trunk_port_fault(__idx)\
6131 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6132 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6135 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6136 struct lpfc_acqe_fc_la *acqe_fc)
6138 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6139 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6142 phba->sli4_hba.link_state.speed =
6143 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6144 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6146 phba->sli4_hba.link_state.logical_speed =
6147 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6148 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6149 phba->fc_linkspeed =
6150 lpfc_async_link_speed_to_read_top(
6152 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6154 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6155 phba->trunk_link.link0.state =
6156 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6157 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6158 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6161 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6162 phba->trunk_link.link1.state =
6163 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6164 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6165 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6168 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6169 phba->trunk_link.link2.state =
6170 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6171 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6172 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6175 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6176 phba->trunk_link.link3.state =
6177 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6178 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6179 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6184 phba->trunk_link.phy_lnk_speed =
6185 phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
6187 phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
6189 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6190 "2910 Async FC Trunking Event - Speed:%d\n"
6191 "\tLogical speed:%d "
6192 "port0: %s port1: %s port2: %s port3: %s\n",
6193 phba->sli4_hba.link_state.speed,
6194 phba->sli4_hba.link_state.logical_speed,
6195 trunk_link_status(0), trunk_link_status(1),
6196 trunk_link_status(2), trunk_link_status(3));
6198 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6199 lpfc_cmf_signal_init(phba);
6202 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6203 "3202 trunk error:0x%x (%s) seen on port0:%s "
6205 * SLI-4: We have only 0xA error codes
6206 * defined as of now. print an appropriate
6207 * message in case driver needs to be updated.
6209 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6210 "UNDEFINED. update driver." : trunk_errmsg[err],
6211 trunk_port_fault(0), trunk_port_fault(1),
6212 trunk_port_fault(2), trunk_port_fault(3));
6217 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6218 * @phba: pointer to lpfc hba data structure.
6219 * @acqe_fc: pointer to the async fc completion queue entry.
6221 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6222 * that the event was received and then issue a read_topology mailbox command so
6223 * that the rest of the driver will treat it the same as SLI3.
6226 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6230 struct lpfc_mbx_read_top *la;
6234 if (bf_get(lpfc_trailer_type, acqe_fc) !=
6235 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6236 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6237 "2895 Non FC link Event detected.(%d)\n",
6238 bf_get(lpfc_trailer_type, acqe_fc));
6242 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6243 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6244 lpfc_update_trunk_link_status(phba, acqe_fc);
6248 /* Keep the link status for extra SLI4 state machine reference */
6249 phba->sli4_hba.link_state.speed =
6250 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6251 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6252 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6253 phba->sli4_hba.link_state.topology =
6254 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6255 phba->sli4_hba.link_state.status =
6256 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6257 phba->sli4_hba.link_state.type =
6258 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6259 phba->sli4_hba.link_state.number =
6260 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6261 phba->sli4_hba.link_state.fault =
6262 bf_get(lpfc_acqe_link_fault, acqe_fc);
6263 phba->sli4_hba.link_state.link_status =
6264 bf_get(lpfc_acqe_fc_la_link_status, acqe_fc);
6267 * Only select attention types need logical speed modification to what
6268 * was previously set.
6270 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP &&
6271 phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6272 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6273 LPFC_FC_LA_TYPE_LINK_DOWN)
6274 phba->sli4_hba.link_state.logical_speed = 0;
6275 else if (!phba->sli4_hba.conf_trunk)
6276 phba->sli4_hba.link_state.logical_speed =
6277 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6280 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6281 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6282 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6283 "%dMbps Fault:x%x Link Status:x%x\n",
6284 phba->sli4_hba.link_state.speed,
6285 phba->sli4_hba.link_state.topology,
6286 phba->sli4_hba.link_state.status,
6287 phba->sli4_hba.link_state.type,
6288 phba->sli4_hba.link_state.number,
6289 phba->sli4_hba.link_state.logical_speed,
6290 phba->sli4_hba.link_state.fault,
6291 phba->sli4_hba.link_state.link_status);
6294 * The following attention types are informational only, providing
6295 * further details about link status. Overwrite the value of
6296 * link_state.status appropriately. No further action is required.
6298 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6299 switch (phba->sli4_hba.link_state.status) {
6300 case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
6301 log_level = KERN_WARNING;
6302 phba->sli4_hba.link_state.status =
6303 LPFC_FC_LA_TYPE_LINK_DOWN;
6305 case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
6307 * During bb credit recovery establishment, receiving
6308 * this attention type is normal. Link Up attention
6309 * type is expected to occur before this informational
6310 * attention type so keep the Link Up status.
6312 log_level = KERN_INFO;
6313 phba->sli4_hba.link_state.status =
6314 LPFC_FC_LA_TYPE_LINK_UP;
6317 log_level = KERN_INFO;
6320 lpfc_log_msg(phba, log_level, LOG_SLI,
6321 "2992 Async FC event - Informational Link "
6322 "Attention Type x%x\n",
6323 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc));
6327 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6330 "2897 The mboxq allocation failed\n");
6333 rc = lpfc_mbox_rsrc_prep(phba, pmb);
6335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6336 "2898 The mboxq prep failed\n");
6340 /* Cleanup any outstanding ELS commands */
6341 lpfc_els_flush_all_cmd(phba);
6343 /* Block ELS IOCBs until we have done process link event */
6344 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6346 /* Update link event statistics */
6347 phba->sli.slistat.link_event++;
6349 /* Create lpfc_handle_latt mailbox command from link ACQE */
6350 lpfc_read_topology(phba, pmb, pmb->ctx_buf);
6351 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6352 pmb->vport = phba->pport;
6354 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6355 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6357 switch (phba->sli4_hba.link_state.status) {
6358 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6359 phba->link_flag |= LS_MDS_LINK_DOWN;
6361 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6362 phba->link_flag |= LS_MDS_LOOPBACK;
6368 /* Initialize completion status */
6370 mb->mbxStatus = MBX_SUCCESS;
6372 /* Parse port fault information field */
6373 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6375 /* Parse and translate link attention fields */
6376 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6377 la->eventTag = acqe_fc->event_tag;
6379 if (phba->sli4_hba.link_state.status ==
6380 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6381 bf_set(lpfc_mbx_read_top_att_type, la,
6382 LPFC_FC_LA_TYPE_UNEXP_WWPN);
6384 bf_set(lpfc_mbx_read_top_att_type, la,
6385 LPFC_FC_LA_TYPE_LINK_DOWN);
6387 /* Invoke the mailbox command callback function */
6388 lpfc_mbx_cmpl_read_topology(phba, pmb);
6393 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6394 if (rc == MBX_NOT_FINISHED)
6399 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6403 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6404 * @phba: pointer to lpfc hba data structure.
6405 * @acqe_sli: pointer to the async SLI completion queue entry.
6407 * This routine is to handle the SLI4 asynchronous SLI events.
6410 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6416 uint8_t operational = 0;
6417 struct temp_event temp_event_data;
6418 struct lpfc_acqe_misconfigured_event *misconfigured;
6419 struct lpfc_acqe_cgn_signal *cgn_signal;
6420 struct Scsi_Host *shost;
6421 struct lpfc_vport **vports;
6424 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6426 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6427 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6428 "x%08x x%08x x%08x\n", evt_type,
6429 acqe_sli->event_data1, acqe_sli->event_data2,
6430 acqe_sli->event_data3, acqe_sli->trailer);
6432 port_name = phba->Port[0];
6433 if (port_name == 0x00)
6434 port_name = '?'; /* get port name is empty */
6437 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6438 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6439 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6440 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6442 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6443 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6444 acqe_sli->event_data1, port_name);
6446 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6447 shost = lpfc_shost_from_vport(phba->pport);
6448 fc_host_post_vendor_event(shost, fc_get_event_number(),
6449 sizeof(temp_event_data),
6450 (char *)&temp_event_data,
6451 SCSI_NL_VID_TYPE_PCI
6452 | PCI_VENDOR_ID_EMULEX);
6454 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6455 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6456 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6457 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6459 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
6460 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6461 acqe_sli->event_data1, port_name);
6463 shost = lpfc_shost_from_vport(phba->pport);
6464 fc_host_post_vendor_event(shost, fc_get_event_number(),
6465 sizeof(temp_event_data),
6466 (char *)&temp_event_data,
6467 SCSI_NL_VID_TYPE_PCI
6468 | PCI_VENDOR_ID_EMULEX);
6470 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6471 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6472 &acqe_sli->event_data1;
6474 /* fetch the status for this port */
6475 switch (phba->sli4_hba.lnk_info.lnk_no) {
6476 case LPFC_LINK_NUMBER_0:
6477 status = bf_get(lpfc_sli_misconfigured_port0_state,
6478 &misconfigured->theEvent);
6479 operational = bf_get(lpfc_sli_misconfigured_port0_op,
6480 &misconfigured->theEvent);
6482 case LPFC_LINK_NUMBER_1:
6483 status = bf_get(lpfc_sli_misconfigured_port1_state,
6484 &misconfigured->theEvent);
6485 operational = bf_get(lpfc_sli_misconfigured_port1_op,
6486 &misconfigured->theEvent);
6488 case LPFC_LINK_NUMBER_2:
6489 status = bf_get(lpfc_sli_misconfigured_port2_state,
6490 &misconfigured->theEvent);
6491 operational = bf_get(lpfc_sli_misconfigured_port2_op,
6492 &misconfigured->theEvent);
6494 case LPFC_LINK_NUMBER_3:
6495 status = bf_get(lpfc_sli_misconfigured_port3_state,
6496 &misconfigured->theEvent);
6497 operational = bf_get(lpfc_sli_misconfigured_port3_op,
6498 &misconfigured->theEvent);
6501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6503 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6504 "event: Invalid link %d",
6505 phba->sli4_hba.lnk_info.lnk_no);
6509 /* Skip if optic state unchanged */
6510 if (phba->sli4_hba.lnk_info.optic_state == status)
6514 case LPFC_SLI_EVENT_STATUS_VALID:
6515 sprintf(message, "Physical Link is functional");
6517 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6518 sprintf(message, "Optics faulted/incorrectly "
6519 "installed/not installed - Reseat optics, "
6520 "if issue not resolved, replace.");
6522 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6524 "Optics of two types installed - Remove one "
6525 "optic or install matching pair of optics.");
6527 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6528 sprintf(message, "Incompatible optics - Replace with "
6529 "compatible optics for card to function.");
6531 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6532 sprintf(message, "Unqualified optics - Replace with "
6533 "Avago optics for Warranty and Technical "
6534 "Support - Link is%s operational",
6535 (operational) ? " not" : "");
6537 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6538 sprintf(message, "Uncertified optics - Replace with "
6539 "Avago-certified optics to enable link "
6540 "operation - Link is%s operational",
6541 (operational) ? " not" : "");
6544 /* firmware is reporting a status we don't know about */
6545 sprintf(message, "Unknown event status x%02x", status);
6549 /* Issue READ_CONFIG mbox command to refresh supported speeds */
6550 rc = lpfc_sli4_read_config(phba);
6553 lpfc_printf_log(phba, KERN_ERR,
6555 "3194 Unable to retrieve supported "
6556 "speeds, rc = 0x%x\n", rc);
6558 rc = lpfc_sli4_refresh_params(phba);
6560 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6561 "3174 Unable to update pls support, "
6564 vports = lpfc_create_vport_work_array(phba);
6565 if (vports != NULL) {
6566 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6568 shost = lpfc_shost_from_vport(vports[i]);
6569 lpfc_host_supported_speeds_set(shost);
6572 lpfc_destroy_vport_work_array(phba, vports);
6574 phba->sli4_hba.lnk_info.optic_state = status;
6575 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6576 "3176 Port Name %c %s\n", port_name, message);
6578 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6579 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6580 "3192 Remote DPort Test Initiated - "
6581 "Event Data1:x%08x Event Data2: x%08x\n",
6582 acqe_sli->event_data1, acqe_sli->event_data2);
6584 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6585 /* Call FW to obtain active parms */
6586 lpfc_sli4_cgn_parm_chg_evt(phba);
6588 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6589 /* Misconfigured WWN. Reports that the SLI Port is configured
6590 * to use FA-WWN, but the attached device doesn’t support it.
6591 * Event Data1 - N.A, Event Data2 - N.A
6592 * This event only happens on the physical port.
6594 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6595 "2699 Misconfigured FA-PWWN - Attached device "
6596 "does not support FA-PWWN\n");
6597 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6598 memset(phba->pport->fc_portname.u.wwn, 0,
6599 sizeof(struct lpfc_name));
6601 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6602 /* EEPROM failure. No driver action is required */
6603 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6604 "2518 EEPROM failure - "
6605 "Event Data1: x%08x Event Data2: x%08x\n",
6606 acqe_sli->event_data1, acqe_sli->event_data2);
6608 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6609 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6611 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6612 &acqe_sli->event_data1;
6613 phba->cgn_acqe_cnt++;
6615 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6616 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6617 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6619 /* no threshold for CMF, even 1 signal will trigger an event */
6621 /* Alarm overrides warning, so check that first */
6622 if (cgn_signal->alarm_cnt) {
6623 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6624 /* Keep track of alarm cnt for CMF_SYNC_WQE */
6625 atomic_add(cgn_signal->alarm_cnt,
6626 &phba->cgn_sync_alarm_cnt);
6629 /* signal action needs to be taken */
6630 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6631 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6632 /* Keep track of warning cnt for CMF_SYNC_WQE */
6633 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6637 case LPFC_SLI_EVENT_TYPE_RD_SIGNAL:
6638 /* May be accompanied by a temperature event */
6639 lpfc_printf_log(phba, KERN_INFO,
6640 LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT,
6641 "2902 Remote Degrade Signaling: x%08x x%08x "
6643 acqe_sli->event_data1, acqe_sli->event_data2,
6644 acqe_sli->event_data3);
6646 case LPFC_SLI_EVENT_TYPE_RESET_CM_STATS:
6647 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
6648 "2905 Reset CM statistics\n");
6649 lpfc_sli4_async_cmstat_evt(phba);
6652 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6653 "3193 Unrecognized SLI event, type: 0x%x",
6660 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6661 * @vport: pointer to vport data structure.
6663 * This routine is to perform Clear Virtual Link (CVL) on a vport in
6664 * response to a CVL event.
6666 * Return the pointer to the ndlp with the vport if successful, otherwise
6669 static struct lpfc_nodelist *
6670 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6672 struct lpfc_nodelist *ndlp;
6673 struct Scsi_Host *shost;
6674 struct lpfc_hba *phba;
6681 ndlp = lpfc_findnode_did(vport, Fabric_DID);
6683 /* Cannot find existing Fabric ndlp, so allocate a new one */
6684 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6687 /* Set the node type */
6688 ndlp->nlp_type |= NLP_FABRIC;
6689 /* Put ndlp onto node list */
6690 lpfc_enqueue_node(vport, ndlp);
6692 if ((phba->pport->port_state < LPFC_FLOGI) &&
6693 (phba->pport->port_state != LPFC_VPORT_FAILED))
6695 /* If virtual link is not yet instantiated ignore CVL */
6696 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6697 && (vport->port_state != LPFC_VPORT_FAILED))
6699 shost = lpfc_shost_from_vport(vport);
6702 lpfc_linkdown_port(vport);
6703 lpfc_cleanup_pending_mbox(vport);
6704 set_bit(FC_VPORT_CVL_RCVD, &vport->fc_flag);
6710 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6711 * @phba: pointer to lpfc hba data structure.
6713 * This routine is to perform Clear Virtual Link (CVL) on all vports in
6714 * response to a FCF dead event.
6717 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6719 struct lpfc_vport **vports;
6722 vports = lpfc_create_vport_work_array(phba);
6724 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6725 lpfc_sli4_perform_vport_cvl(vports[i]);
6726 lpfc_destroy_vport_work_array(phba, vports);
6730 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6731 * @phba: pointer to lpfc hba data structure.
6732 * @acqe_fip: pointer to the async fcoe completion queue entry.
6734 * This routine is to handle the SLI4 asynchronous fcoe event.
6737 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6738 struct lpfc_acqe_fip *acqe_fip)
6740 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6742 struct lpfc_vport *vport;
6743 struct lpfc_nodelist *ndlp;
6744 int active_vlink_present;
6745 struct lpfc_vport **vports;
6748 phba->fc_eventTag = acqe_fip->event_tag;
6749 phba->fcoe_eventtag = acqe_fip->event_tag;
6750 switch (event_type) {
6751 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6752 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6753 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6754 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6755 "2546 New FCF event, evt_tag:x%x, "
6757 acqe_fip->event_tag,
6760 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6762 "2788 FCF param modified event, "
6763 "evt_tag:x%x, index:x%x\n",
6764 acqe_fip->event_tag,
6766 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6768 * During period of FCF discovery, read the FCF
6769 * table record indexed by the event to update
6770 * FCF roundrobin failover eligible FCF bmask.
6772 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6774 "2779 Read FCF (x%x) for updating "
6775 "roundrobin FCF failover bmask\n",
6777 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6780 /* If the FCF discovery is in progress, do nothing. */
6781 spin_lock_irq(&phba->hbalock);
6782 if (phba->hba_flag & FCF_TS_INPROG) {
6783 spin_unlock_irq(&phba->hbalock);
6786 /* If fast FCF failover rescan event is pending, do nothing */
6787 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6788 spin_unlock_irq(&phba->hbalock);
6792 /* If the FCF has been in discovered state, do nothing. */
6793 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6794 spin_unlock_irq(&phba->hbalock);
6797 spin_unlock_irq(&phba->hbalock);
6799 /* Otherwise, scan the entire FCF table and re-discover SAN */
6800 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6801 "2770 Start FCF table scan per async FCF "
6802 "event, evt_tag:x%x, index:x%x\n",
6803 acqe_fip->event_tag, acqe_fip->index);
6804 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6805 LPFC_FCOE_FCF_GET_FIRST);
6807 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6808 "2547 Issue FCF scan read FCF mailbox "
6809 "command failed (x%x)\n", rc);
6812 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6814 "2548 FCF Table full count 0x%x tag 0x%x\n",
6815 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6816 acqe_fip->event_tag);
6819 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6820 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6822 "2549 FCF (x%x) disconnected from network, "
6823 "tag:x%x\n", acqe_fip->index,
6824 acqe_fip->event_tag);
6826 * If we are in the middle of FCF failover process, clear
6827 * the corresponding FCF bit in the roundrobin bitmap.
6829 spin_lock_irq(&phba->hbalock);
6830 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6831 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6832 spin_unlock_irq(&phba->hbalock);
6833 /* Update FLOGI FCF failover eligible FCF bmask */
6834 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6837 spin_unlock_irq(&phba->hbalock);
6839 /* If the event is not for currently used fcf do nothing */
6840 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6844 * Otherwise, request the port to rediscover the entire FCF
6845 * table for a fast recovery from case that the current FCF
6846 * is no longer valid as we are not in the middle of FCF
6847 * failover process already.
6849 spin_lock_irq(&phba->hbalock);
6850 /* Mark the fast failover process in progress */
6851 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6852 spin_unlock_irq(&phba->hbalock);
6854 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6855 "2771 Start FCF fast failover process due to "
6856 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6857 "\n", acqe_fip->event_tag, acqe_fip->index);
6858 rc = lpfc_sli4_redisc_fcf_table(phba);
6860 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6862 "2772 Issue FCF rediscover mailbox "
6863 "command failed, fail through to FCF "
6865 spin_lock_irq(&phba->hbalock);
6866 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6867 spin_unlock_irq(&phba->hbalock);
6869 * Last resort will fail over by treating this
6870 * as a link down to FCF registration.
6872 lpfc_sli4_fcf_dead_failthrough(phba);
6874 /* Reset FCF roundrobin bmask for new discovery */
6875 lpfc_sli4_clear_fcf_rr_bmask(phba);
6877 * Handling fast FCF failover to a DEAD FCF event is
6878 * considered equalivant to receiving CVL to all vports.
6880 lpfc_sli4_perform_all_vport_cvl(phba);
6883 case LPFC_FIP_EVENT_TYPE_CVL:
6884 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6885 lpfc_printf_log(phba, KERN_ERR,
6887 "2718 Clear Virtual Link Received for VPI 0x%x"
6888 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6890 vport = lpfc_find_vport_by_vpid(phba,
6892 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6895 active_vlink_present = 0;
6897 vports = lpfc_create_vport_work_array(phba);
6899 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6901 if (!test_bit(FC_VPORT_CVL_RCVD,
6902 &vports[i]->fc_flag) &&
6903 vports[i]->port_state > LPFC_FDISC) {
6904 active_vlink_present = 1;
6908 lpfc_destroy_vport_work_array(phba, vports);
6912 * Don't re-instantiate if vport is marked for deletion.
6913 * If we are here first then vport_delete is going to wait
6914 * for discovery to complete.
6916 if (!test_bit(FC_UNLOADING, &vport->load_flag) &&
6917 active_vlink_present) {
6919 * If there are other active VLinks present,
6920 * re-instantiate the Vlink using FDISC.
6922 mod_timer(&ndlp->nlp_delayfunc,
6923 jiffies + msecs_to_jiffies(1000));
6924 spin_lock_irq(&ndlp->lock);
6925 ndlp->nlp_flag |= NLP_DELAY_TMO;
6926 spin_unlock_irq(&ndlp->lock);
6927 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6928 vport->port_state = LPFC_FDISC;
6931 * Otherwise, we request port to rediscover
6932 * the entire FCF table for a fast recovery
6933 * from possible case that the current FCF
6934 * is no longer valid if we are not already
6935 * in the FCF failover process.
6937 spin_lock_irq(&phba->hbalock);
6938 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6939 spin_unlock_irq(&phba->hbalock);
6942 /* Mark the fast failover process in progress */
6943 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6944 spin_unlock_irq(&phba->hbalock);
6945 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6947 "2773 Start FCF failover per CVL, "
6948 "evt_tag:x%x\n", acqe_fip->event_tag);
6949 rc = lpfc_sli4_redisc_fcf_table(phba);
6951 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6953 "2774 Issue FCF rediscover "
6954 "mailbox command failed, "
6955 "through to CVL event\n");
6956 spin_lock_irq(&phba->hbalock);
6957 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6958 spin_unlock_irq(&phba->hbalock);
6960 * Last resort will be re-try on the
6961 * the current registered FCF entry.
6963 lpfc_retry_pport_discovery(phba);
6966 * Reset FCF roundrobin bmask for new
6969 lpfc_sli4_clear_fcf_rr_bmask(phba);
6973 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6974 "0288 Unknown FCoE event type 0x%x event tag "
6975 "0x%x\n", event_type, acqe_fip->event_tag);
6981 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6982 * @phba: pointer to lpfc hba data structure.
6983 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6985 * This routine is to handle the SLI4 asynchronous dcbx event.
6988 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6989 struct lpfc_acqe_dcbx *acqe_dcbx)
6991 phba->fc_eventTag = acqe_dcbx->event_tag;
6992 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6993 "0290 The SLI4 DCBX asynchronous event is not "
6998 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6999 * @phba: pointer to lpfc hba data structure.
7000 * @acqe_grp5: pointer to the async grp5 completion queue entry.
7002 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
7003 * is an asynchronous notified of a logical link speed change. The Port
7004 * reports the logical link speed in units of 10Mbps.
7007 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
7008 struct lpfc_acqe_grp5 *acqe_grp5)
7010 uint16_t prev_ll_spd;
7012 phba->fc_eventTag = acqe_grp5->event_tag;
7013 phba->fcoe_eventtag = acqe_grp5->event_tag;
7014 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7015 phba->sli4_hba.link_state.logical_speed =
7016 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
7017 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7018 "2789 GRP5 Async Event: Updating logical link speed "
7019 "from %dMbps to %dMbps\n", prev_ll_spd,
7020 phba->sli4_hba.link_state.logical_speed);
7024 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7025 * @phba: pointer to lpfc hba data structure.
7027 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
7028 * is an asynchronous notification of a request to reset CM stats.
7031 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7035 lpfc_init_congestion_stat(phba);
7039 * lpfc_cgn_params_val - Validate FW congestion parameters.
7040 * @phba: pointer to lpfc hba data structure.
7041 * @p_cfg_param: pointer to FW provided congestion parameters.
7043 * This routine validates the congestion parameters passed
7044 * by the FW to the driver via an ACQE event.
7047 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7049 spin_lock_irq(&phba->hbalock);
7051 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7052 LPFC_CFG_MONITOR)) {
7053 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7054 "6225 CMF mode param out of range: %d\n",
7055 p_cfg_param->cgn_param_mode);
7056 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7059 spin_unlock_irq(&phba->hbalock);
7062 static const char * const lpfc_cmf_mode_to_str[] = {
7069 * lpfc_cgn_params_parse - Process a FW cong parm change event
7070 * @phba: pointer to lpfc hba data structure.
7071 * @p_cgn_param: pointer to a data buffer with the FW cong params.
7072 * @len: the size of pdata in bytes.
7074 * This routine validates the congestion management buffer signature
7075 * from the FW, validates the contents and makes corrections for
7076 * valid, in-range values. If the signature magic is correct and
7077 * after parameter validation, the contents are copied to the driver's
7078 * @phba structure. If the magic is incorrect, an error message is
7082 lpfc_cgn_params_parse(struct lpfc_hba *phba,
7083 struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7085 struct lpfc_cgn_info *cp;
7086 uint32_t crc, oldmode;
7087 char acr_string[4] = {0};
7089 /* Make sure the FW has encoded the correct magic number to
7090 * validate the congestion parameter in FW memory.
7092 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7093 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7094 "4668 FW cgn parm buffer data: "
7095 "magic 0x%x version %d mode %d "
7096 "level0 %d level1 %d "
7097 "level2 %d byte13 %d "
7098 "byte14 %d byte15 %d "
7099 "byte11 %d byte12 %d activeMode %d\n",
7100 p_cgn_param->cgn_param_magic,
7101 p_cgn_param->cgn_param_version,
7102 p_cgn_param->cgn_param_mode,
7103 p_cgn_param->cgn_param_level0,
7104 p_cgn_param->cgn_param_level1,
7105 p_cgn_param->cgn_param_level2,
7106 p_cgn_param->byte13,
7107 p_cgn_param->byte14,
7108 p_cgn_param->byte15,
7109 p_cgn_param->byte11,
7110 p_cgn_param->byte12,
7111 phba->cmf_active_mode);
7113 oldmode = phba->cmf_active_mode;
7115 /* Any parameters out of range are corrected to defaults
7116 * by this routine. No need to fail.
7118 lpfc_cgn_params_val(phba, p_cgn_param);
7120 /* Parameters are verified, move them into driver storage */
7121 spin_lock_irq(&phba->hbalock);
7122 memcpy(&phba->cgn_p, p_cgn_param,
7123 sizeof(struct lpfc_cgn_param));
7125 /* Update parameters in congestion info buffer now */
7127 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7128 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7129 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7130 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7131 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7132 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7133 LPFC_CGN_CRC32_SEED);
7134 cp->cgn_info_crc = cpu_to_le32(crc);
7136 spin_unlock_irq(&phba->hbalock);
7138 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7142 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7143 /* Turning CMF on */
7144 lpfc_cmf_start(phba);
7146 if (phba->link_state >= LPFC_LINK_UP) {
7147 phba->cgn_reg_fpin =
7148 phba->cgn_init_reg_fpin;
7149 phba->cgn_reg_signal =
7150 phba->cgn_init_reg_signal;
7151 lpfc_issue_els_edc(phba->pport, 0);
7155 case LPFC_CFG_MANAGED:
7156 switch (phba->cgn_p.cgn_param_mode) {
7158 /* Turning CMF off */
7159 lpfc_cmf_stop(phba);
7160 if (phba->link_state >= LPFC_LINK_UP)
7161 lpfc_issue_els_edc(phba->pport, 0);
7163 case LPFC_CFG_MONITOR:
7164 phba->cmf_max_bytes_per_interval =
7165 phba->cmf_link_byte_count;
7167 /* Resume blocked IO - unblock on workqueue */
7168 queue_work(phba->wq,
7169 &phba->unblock_request_work);
7173 case LPFC_CFG_MONITOR:
7174 switch (phba->cgn_p.cgn_param_mode) {
7176 /* Turning CMF off */
7177 lpfc_cmf_stop(phba);
7178 if (phba->link_state >= LPFC_LINK_UP)
7179 lpfc_issue_els_edc(phba->pport, 0);
7181 case LPFC_CFG_MANAGED:
7182 lpfc_cmf_signal_init(phba);
7187 if (oldmode != LPFC_CFG_OFF ||
7188 oldmode != phba->cgn_p.cgn_param_mode) {
7189 if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
7190 scnprintf(acr_string, sizeof(acr_string), "%u",
7191 phba->cgn_p.cgn_param_level0);
7193 scnprintf(acr_string, sizeof(acr_string), "NA");
7195 dev_info(&phba->pcidev->dev, "%d: "
7196 "4663 CMF: Mode %s acr %s\n",
7198 lpfc_cmf_mode_to_str
7199 [phba->cgn_p.cgn_param_mode],
7203 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7204 "4669 FW cgn parm buf wrong magic 0x%x "
7205 "version %d\n", p_cgn_param->cgn_param_magic,
7206 p_cgn_param->cgn_param_version);
7211 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7212 * @phba: pointer to lpfc hba data structure.
7214 * This routine issues a read_object mailbox command to
7215 * get the congestion management parameters from the FW
7216 * parses it and updates the driver maintained values.
7219 * 0 if the object was empty
7220 * -Eval if an error was encountered
7221 * Count if bytes were read from object
7224 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7227 struct lpfc_cgn_param *p_cgn_param = NULL;
7231 /* Find out if the FW has a new set of congestion parameters. */
7232 len = sizeof(struct lpfc_cgn_param);
7233 pdata = kzalloc(len, GFP_KERNEL);
7236 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7239 /* 0 means no data. A negative means error. A positive means
7240 * bytes were copied.
7243 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7244 "4670 CGN RD OBJ returns no data\n");
7246 } else if (ret < 0) {
7247 /* Some error. Just exit and return it to the caller.*/
7251 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7252 "6234 READ CGN PARAMS Successful %d\n", len);
7254 /* Parse data pointer over len and update the phba congestion
7255 * parameters with values passed back. The receive rate values
7256 * may have been altered in FW, but take no action here.
7258 p_cgn_param = (struct lpfc_cgn_param *)pdata;
7259 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7267 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7268 * @phba: pointer to lpfc hba data structure.
7270 * The FW generated Async ACQE SLI event calls this routine when
7271 * the event type is an SLI Internal Port Event and the Event Code
7272 * indicates a change to the FW maintained congestion parameters.
7274 * This routine executes a Read_Object mailbox call to obtain the
7275 * current congestion parameters maintained in FW and corrects
7276 * the driver's active congestion parameters.
7278 * The acqe event is not passed because there is no further data
7281 * Returns nonzero error if event processing encountered an error.
7282 * Zero otherwise for success.
7285 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7289 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7290 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7291 "4664 Cgn Evt when E2E off. Drop event\n");
7295 /* If the event is claiming an empty object, it's ok. A write
7296 * could have cleared it. Only error is a negative return
7299 ret = lpfc_sli4_cgn_params_read(phba);
7301 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7302 "4667 Error reading Cgn Params (%d)\n",
7305 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7306 "4673 CGN Event empty object.\n");
7312 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7313 * @phba: pointer to lpfc hba data structure.
7315 * This routine is invoked by the worker thread to process all the pending
7316 * SLI4 asynchronous events.
7318 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7320 struct lpfc_cq_event *cq_event;
7321 unsigned long iflags;
7323 /* First, declare the async event has been handled */
7324 spin_lock_irqsave(&phba->hbalock, iflags);
7325 phba->hba_flag &= ~ASYNC_EVENT;
7326 spin_unlock_irqrestore(&phba->hbalock, iflags);
7328 /* Now, handle all the async events */
7329 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7330 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7331 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7332 cq_event, struct lpfc_cq_event, list);
7333 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7336 /* Process the asynchronous event */
7337 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7338 case LPFC_TRAILER_CODE_LINK:
7339 lpfc_sli4_async_link_evt(phba,
7340 &cq_event->cqe.acqe_link);
7342 case LPFC_TRAILER_CODE_FCOE:
7343 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7345 case LPFC_TRAILER_CODE_DCBX:
7346 lpfc_sli4_async_dcbx_evt(phba,
7347 &cq_event->cqe.acqe_dcbx);
7349 case LPFC_TRAILER_CODE_GRP5:
7350 lpfc_sli4_async_grp5_evt(phba,
7351 &cq_event->cqe.acqe_grp5);
7353 case LPFC_TRAILER_CODE_FC:
7354 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7356 case LPFC_TRAILER_CODE_SLI:
7357 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7360 lpfc_printf_log(phba, KERN_ERR,
7362 "1804 Invalid asynchronous event code: "
7363 "x%x\n", bf_get(lpfc_trailer_code,
7364 &cq_event->cqe.mcqe_cmpl));
7368 /* Free the completion event processed to the free pool */
7369 lpfc_sli4_cq_event_release(phba, cq_event);
7370 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7372 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7376 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7377 * @phba: pointer to lpfc hba data structure.
7379 * This routine is invoked by the worker thread to process FCF table
7380 * rediscovery pending completion event.
7382 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7386 spin_lock_irq(&phba->hbalock);
7387 /* Clear FCF rediscovery timeout event */
7388 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7389 /* Clear driver fast failover FCF record flag */
7390 phba->fcf.failover_rec.flag = 0;
7391 /* Set state for FCF fast failover */
7392 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7393 spin_unlock_irq(&phba->hbalock);
7395 /* Scan FCF table from the first entry to re-discover SAN */
7396 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7397 "2777 Start post-quiescent FCF table scan\n");
7398 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7401 "2747 Issue FCF scan read FCF mailbox "
7402 "command failed 0x%x\n", rc);
7406 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7407 * @phba: pointer to lpfc hba data structure.
7408 * @dev_grp: The HBA PCI-Device group number.
7410 * This routine is invoked to set up the per HBA PCI-Device group function
7411 * API jump table entries.
7413 * Return: 0 if success, otherwise -ENODEV
7416 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7420 /* Set up lpfc PCI-device group */
7421 phba->pci_dev_grp = dev_grp;
7423 /* The LPFC_PCI_DEV_OC uses SLI4 */
7424 if (dev_grp == LPFC_PCI_DEV_OC)
7425 phba->sli_rev = LPFC_SLI_REV4;
7427 /* Set up device INIT API function jump table */
7428 rc = lpfc_init_api_table_setup(phba, dev_grp);
7431 /* Set up SCSI API function jump table */
7432 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7435 /* Set up SLI API function jump table */
7436 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7439 /* Set up MBOX API function jump table */
7440 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7448 * lpfc_log_intr_mode - Log the active interrupt mode
7449 * @phba: pointer to lpfc hba data structure.
7450 * @intr_mode: active interrupt mode adopted.
7452 * This routine it invoked to log the currently used active interrupt mode
7455 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7457 switch (intr_mode) {
7459 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7460 "0470 Enable INTx interrupt mode.\n");
7463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7464 "0481 Enabled MSI interrupt mode.\n");
7467 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7468 "0480 Enabled MSI-X interrupt mode.\n");
7471 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7472 "0482 Illegal interrupt mode.\n");
7479 * lpfc_enable_pci_dev - Enable a generic PCI device.
7480 * @phba: pointer to lpfc hba data structure.
7482 * This routine is invoked to enable the PCI device that is common to all
7487 * other values - error
7490 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7492 struct pci_dev *pdev;
7494 /* Obtain PCI device reference */
7498 pdev = phba->pcidev;
7499 /* Enable PCI device */
7500 if (pci_enable_device_mem(pdev))
7502 /* Request PCI resource for the device */
7503 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7504 goto out_disable_device;
7505 /* Set up device as PCI master and save state for EEH */
7506 pci_set_master(pdev);
7507 pci_try_set_mwi(pdev);
7508 pci_save_state(pdev);
7510 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7511 if (pci_is_pcie(pdev))
7512 pdev->needs_freset = 1;
7517 pci_disable_device(pdev);
7519 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7520 "1401 Failed to enable pci device\n");
7525 * lpfc_disable_pci_dev - Disable a generic PCI device.
7526 * @phba: pointer to lpfc hba data structure.
7528 * This routine is invoked to disable the PCI device that is common to all
7532 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7534 struct pci_dev *pdev;
7536 /* Obtain PCI device reference */
7540 pdev = phba->pcidev;
7541 /* Release PCI resource and disable PCI device */
7542 pci_release_mem_regions(pdev);
7543 pci_disable_device(pdev);
7549 * lpfc_reset_hba - Reset a hba
7550 * @phba: pointer to lpfc hba data structure.
7552 * This routine is invoked to reset a hba device. It brings the HBA
7553 * offline, performs a board restart, and then brings the board back
7554 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7555 * on outstanding mailbox commands.
7558 lpfc_reset_hba(struct lpfc_hba *phba)
7562 /* If resets are disabled then set error state and return. */
7563 if (!phba->cfg_enable_hba_reset) {
7564 phba->link_state = LPFC_HBA_ERROR;
7568 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7569 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7570 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7572 if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) {
7573 /* Perform a PCI function reset to start from clean */
7574 rc = lpfc_pci_function_reset(phba);
7575 lpfc_els_flush_all_cmd(phba);
7577 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7578 lpfc_sli_flush_io_rings(phba);
7581 clear_bit(MBX_TMO_ERR, &phba->bit_flags);
7583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7584 "8888 PCI function reset failed rc %x\n",
7587 lpfc_sli_brdrestart(phba);
7589 lpfc_unblock_mgmt_io(phba);
7594 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7595 * @phba: pointer to lpfc hba data structure.
7597 * This function enables the PCI SR-IOV virtual functions to a physical
7598 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7599 * enable the number of virtual functions to the physical function. As
7600 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7601 * API call does not considered as an error condition for most of the device.
7604 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7606 struct pci_dev *pdev = phba->pcidev;
7610 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7614 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7619 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7620 * @phba: pointer to lpfc hba data structure.
7621 * @nr_vfn: number of virtual functions to be enabled.
7623 * This function enables the PCI SR-IOV virtual functions to a physical
7624 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7625 * enable the number of virtual functions to the physical function. As
7626 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7627 * API call does not considered as an error condition for most of the device.
7630 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7632 struct pci_dev *pdev = phba->pcidev;
7633 uint16_t max_nr_vfn;
7636 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7637 if (nr_vfn > max_nr_vfn) {
7638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7639 "3057 Requested vfs (%d) greater than "
7640 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7644 rc = pci_enable_sriov(pdev, nr_vfn);
7646 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7647 "2806 Failed to enable sriov on this device "
7648 "with vfn number nr_vf:%d, rc:%d\n",
7651 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7652 "2807 Successful enable sriov on this device "
7653 "with vfn number nr_vf:%d\n", nr_vfn);
7658 lpfc_unblock_requests_work(struct work_struct *work)
7660 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7661 unblock_request_work);
7663 lpfc_unblock_requests(phba);
7667 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7668 * @phba: pointer to lpfc hba data structure.
7670 * This routine is invoked to set up the driver internal resources before the
7671 * device specific resource setup to support the HBA device it attached to.
7675 * other values - error
7678 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7680 struct lpfc_sli *psli = &phba->sli;
7683 * Driver resources common to all SLI revisions
7685 atomic_set(&phba->fast_event_count, 0);
7686 atomic_set(&phba->dbg_log_idx, 0);
7687 atomic_set(&phba->dbg_log_cnt, 0);
7688 atomic_set(&phba->dbg_log_dmping, 0);
7689 spin_lock_init(&phba->hbalock);
7691 /* Initialize port_list spinlock */
7692 spin_lock_init(&phba->port_list_lock);
7693 INIT_LIST_HEAD(&phba->port_list);
7695 INIT_LIST_HEAD(&phba->work_list);
7697 /* Initialize the wait queue head for the kernel thread */
7698 init_waitqueue_head(&phba->work_waitq);
7700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7701 "1403 Protocols supported %s %s %s\n",
7702 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7704 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7706 (phba->nvmet_support ? "NVMET" : " "));
7708 /* ras_fwlog state */
7709 spin_lock_init(&phba->ras_fwlog_lock);
7711 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7712 spin_lock_init(&phba->scsi_buf_list_get_lock);
7713 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7714 spin_lock_init(&phba->scsi_buf_list_put_lock);
7715 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7717 /* Initialize the fabric iocb list */
7718 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7720 /* Initialize list to save ELS buffers */
7721 INIT_LIST_HEAD(&phba->elsbuf);
7723 /* Initialize FCF connection rec list */
7724 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7726 /* Initialize OAS configuration list */
7727 spin_lock_init(&phba->devicelock);
7728 INIT_LIST_HEAD(&phba->luns);
7730 /* MBOX heartbeat timer */
7731 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7732 /* Fabric block timer */
7733 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7734 /* EA polling mode timer */
7735 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7736 /* Heartbeat timer */
7737 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7739 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7741 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7742 lpfc_idle_stat_delay_work);
7743 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7748 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7749 * @phba: pointer to lpfc hba data structure.
7751 * This routine is invoked to set up the driver internal resources specific to
7752 * support the SLI-3 HBA device it attached to.
7756 * other values - error
7759 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7764 * Initialize timers used by driver
7767 /* FCP polling mode timer */
7768 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7770 /* Host attention work mask setup */
7771 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7772 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7774 /* Get all the module params for configuring this host */
7775 lpfc_get_cfgparam(phba);
7776 /* Set up phase-1 common device driver resources */
7778 rc = lpfc_setup_driver_resource_phase1(phba);
7782 if (!phba->sli.sli3_ring)
7783 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7784 sizeof(struct lpfc_sli_ring),
7786 if (!phba->sli.sli3_ring)
7790 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7791 * used to create the sg_dma_buf_pool must be dynamically calculated.
7794 if (phba->sli_rev == LPFC_SLI_REV4)
7795 entry_sz = sizeof(struct sli4_sge);
7797 entry_sz = sizeof(struct ulp_bde64);
7799 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7800 if (phba->cfg_enable_bg) {
7802 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7803 * the FCP rsp, and a BDE for each. Sice we have no control
7804 * over how many protection data segments the SCSI Layer
7805 * will hand us (ie: there could be one for every block
7806 * in the IO), we just allocate enough BDEs to accomidate
7807 * our max amount and we need to limit lpfc_sg_seg_cnt to
7808 * minimize the risk of running out.
7810 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7811 sizeof(struct fcp_rsp) +
7812 (LPFC_MAX_SG_SEG_CNT * entry_sz);
7814 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7815 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7817 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7818 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7821 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7822 * the FCP rsp, a BDE for each, and a BDE for up to
7823 * cfg_sg_seg_cnt data segments.
7825 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7826 sizeof(struct fcp_rsp) +
7827 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7829 /* Total BDEs in BPL for scsi_sg_list */
7830 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7833 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7834 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7835 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7836 phba->cfg_total_seg_cnt);
7838 phba->max_vpi = LPFC_MAX_VPI;
7839 /* This will be set to correct value after config_port mbox */
7840 phba->max_vports = 0;
7843 * Initialize the SLI Layer to run with lpfc HBAs.
7845 lpfc_sli_setup(phba);
7846 lpfc_sli_queue_init(phba);
7848 /* Allocate device driver memory */
7849 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7852 phba->lpfc_sg_dma_buf_pool =
7853 dma_pool_create("lpfc_sg_dma_buf_pool",
7854 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7857 if (!phba->lpfc_sg_dma_buf_pool)
7860 phba->lpfc_cmd_rsp_buf_pool =
7861 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7863 sizeof(struct fcp_cmnd) +
7864 sizeof(struct fcp_rsp),
7867 if (!phba->lpfc_cmd_rsp_buf_pool)
7868 goto fail_free_dma_buf_pool;
7871 * Enable sr-iov virtual functions if supported and configured
7872 * through the module parameter.
7874 if (phba->cfg_sriov_nr_virtfn > 0) {
7875 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7876 phba->cfg_sriov_nr_virtfn);
7878 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7879 "2808 Requested number of SR-IOV "
7880 "virtual functions (%d) is not "
7882 phba->cfg_sriov_nr_virtfn);
7883 phba->cfg_sriov_nr_virtfn = 0;
7889 fail_free_dma_buf_pool:
7890 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7891 phba->lpfc_sg_dma_buf_pool = NULL;
7893 lpfc_mem_free(phba);
7898 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7899 * @phba: pointer to lpfc hba data structure.
7901 * This routine is invoked to unset the driver internal resources set up
7902 * specific for supporting the SLI-3 HBA device it attached to.
7905 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7907 /* Free device driver memory allocated */
7908 lpfc_mem_free_all(phba);
7914 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7915 * @phba: pointer to lpfc hba data structure.
7917 * This routine is invoked to set up the driver internal resources specific to
7918 * support the SLI-4 HBA device it attached to.
7922 * other values - error
7925 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7927 LPFC_MBOXQ_t *mboxq;
7929 int rc, i, max_buf_size;
7936 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7937 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7938 phba->sli4_hba.curr_disp_cpu = 0;
7940 /* Get all the module params for configuring this host */
7941 lpfc_get_cfgparam(phba);
7943 /* Set up phase-1 common device driver resources */
7944 rc = lpfc_setup_driver_resource_phase1(phba);
7948 /* Before proceed, wait for POST done and device ready */
7949 rc = lpfc_sli4_post_status_check(phba);
7953 /* Allocate all driver workqueues here */
7955 /* The lpfc_wq workqueue for deferred irq use */
7956 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7961 * Initialize timers used by driver
7964 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7966 /* FCF rediscover timer */
7967 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7969 /* CMF congestion timer */
7970 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7971 phba->cmf_timer.function = lpfc_cmf_timer;
7972 /* CMF 1 minute stats collection timer */
7973 hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7974 phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
7977 * Control structure for handling external multi-buffer mailbox
7978 * command pass-through.
7980 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7981 sizeof(struct lpfc_mbox_ext_buf_ctx));
7982 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7984 phba->max_vpi = LPFC_MAX_VPI;
7986 /* This will be set to correct value after the read_config mbox */
7987 phba->max_vports = 0;
7989 /* Program the default value of vlan_id and fc_map */
7990 phba->valid_vlan = 0;
7991 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7992 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7993 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7996 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7997 * we will associate a new ring, for each EQ/CQ/WQ tuple.
7998 * The WQ create will allocate the ring.
8001 /* Initialize buffer queue management fields */
8002 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
8003 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
8004 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
8006 /* for VMID idle timeout if VMID is enabled */
8007 if (lpfc_is_vmid_enabled(phba))
8008 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
8011 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
8013 /* Initialize the Abort buffer list used by driver */
8014 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8015 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8017 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8018 /* Initialize the Abort nvme buffer list used by driver */
8019 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8020 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8021 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8022 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8023 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8026 /* This abort list used by worker thread */
8027 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8028 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8029 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8030 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8033 * Initialize driver internal slow-path work queues
8036 /* Driver internel slow-path CQ Event pool */
8037 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8038 /* Response IOCB work queue list */
8039 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8040 /* Asynchronous event CQ Event work queue list */
8041 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8042 /* Slow-path XRI aborted CQ Event work queue list */
8043 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8044 /* Receive queue CQ Event work queue list */
8045 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8047 /* Initialize extent block lists. */
8048 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8049 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8050 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8051 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8053 /* Initialize mboxq lists. If the early init routines fail
8054 * these lists need to be correctly initialized.
8056 INIT_LIST_HEAD(&phba->sli.mboxq);
8057 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8059 /* initialize optic_state to 0xFF */
8060 phba->sli4_hba.lnk_info.optic_state = 0xff;
8062 /* Allocate device driver memory */
8063 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8065 goto out_destroy_workqueue;
8067 /* IF Type 2 ports get initialized now. */
8068 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8069 LPFC_SLI_INTF_IF_TYPE_2) {
8070 rc = lpfc_pci_function_reset(phba);
8075 phba->temp_sensor_support = 1;
8078 /* Create the bootstrap mailbox command */
8079 rc = lpfc_create_bootstrap_mbox(phba);
8083 /* Set up the host's endian order with the device. */
8084 rc = lpfc_setup_endian_order(phba);
8086 goto out_free_bsmbx;
8088 /* Set up the hba's configuration parameters. */
8089 rc = lpfc_sli4_read_config(phba);
8091 goto out_free_bsmbx;
8093 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8094 /* Right now the link is down, if FA-PWWN is configured the
8095 * firmware will try FLOGI before the driver gets a link up.
8096 * If it fails, the driver should get a MISCONFIGURED async
8097 * event which will clear this flag. The only notification
8098 * the driver gets is if it fails, if it succeeds there is no
8099 * notification given. Assume success.
8101 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8104 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8106 goto out_free_bsmbx;
8108 /* IF Type 0 ports get initialized now. */
8109 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8110 LPFC_SLI_INTF_IF_TYPE_0) {
8111 rc = lpfc_pci_function_reset(phba);
8113 goto out_free_bsmbx;
8116 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8120 goto out_free_bsmbx;
8123 /* Check for NVMET being configured */
8124 phba->nvmet_support = 0;
8125 if (lpfc_enable_nvmet_cnt) {
8127 /* First get WWN of HBA instance */
8128 lpfc_read_nv(phba, mboxq);
8129 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8130 if (rc != MBX_SUCCESS) {
8131 lpfc_printf_log(phba, KERN_ERR,
8133 "6016 Mailbox failed , mbxCmd x%x "
8134 "READ_NV, mbxStatus x%x\n",
8135 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8136 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8137 mempool_free(mboxq, phba->mbox_mem_pool);
8139 goto out_free_bsmbx;
8142 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8144 wwn = cpu_to_be64(wwn);
8145 phba->sli4_hba.wwnn.u.name = wwn;
8146 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8148 /* wwn is WWPN of HBA instance */
8149 wwn = cpu_to_be64(wwn);
8150 phba->sli4_hba.wwpn.u.name = wwn;
8152 /* Check to see if it matches any module parameter */
8153 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8154 if (wwn == lpfc_enable_nvmet[i]) {
8155 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8156 if (lpfc_nvmet_mem_alloc(phba))
8159 phba->nvmet_support = 1; /* a match */
8161 lpfc_printf_log(phba, KERN_ERR,
8163 "6017 NVME Target %016llx\n",
8166 lpfc_printf_log(phba, KERN_ERR,
8168 "6021 Can't enable NVME Target."
8169 " NVME_TARGET_FC infrastructure"
8170 " is not in kernel\n");
8172 /* Not supported for NVMET */
8173 phba->cfg_xri_rebalancing = 0;
8174 if (phba->irq_chann_mode == NHT_MODE) {
8175 phba->cfg_irq_chann =
8176 phba->sli4_hba.num_present_cpu;
8177 phba->cfg_hdw_queue =
8178 phba->sli4_hba.num_present_cpu;
8179 phba->irq_chann_mode = NORMAL_MODE;
8186 lpfc_nvme_mod_param_dep(phba);
8189 * Get sli4 parameters that override parameters from Port capabilities.
8190 * If this call fails, it isn't critical unless the SLI4 parameters come
8193 rc = lpfc_get_sli4_parameters(phba, mboxq);
8195 if_type = bf_get(lpfc_sli_intf_if_type,
8196 &phba->sli4_hba.sli_intf);
8197 if_fam = bf_get(lpfc_sli_intf_sli_family,
8198 &phba->sli4_hba.sli_intf);
8199 if (phba->sli4_hba.extents_in_use &&
8200 phba->sli4_hba.rpi_hdrs_in_use) {
8201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8202 "2999 Unsupported SLI4 Parameters "
8203 "Extents and RPI headers enabled.\n");
8204 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8205 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
8206 mempool_free(mboxq, phba->mbox_mem_pool);
8208 goto out_free_bsmbx;
8211 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8212 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8213 mempool_free(mboxq, phba->mbox_mem_pool);
8215 goto out_free_bsmbx;
8220 * 1 for cmd, 1 for rsp, NVME adds an extra one
8221 * for boundary conditions in its max_sgl_segment template.
8224 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8228 * It doesn't matter what family our adapter is in, we are
8229 * limited to 2 Pages, 512 SGEs, for our SGL.
8230 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8232 max_buf_size = (2 * SLI4_PAGE_SIZE);
8235 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8236 * used to create the sg_dma_buf_pool must be calculated.
8238 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8239 /* Both cfg_enable_bg and cfg_external_dif code paths */
8242 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8243 * the FCP rsp, and a SGE. Sice we have no control
8244 * over how many protection segments the SCSI Layer
8245 * will hand us (ie: there could be one for every block
8246 * in the IO), just allocate enough SGEs to accomidate
8247 * our max amount and we need to limit lpfc_sg_seg_cnt
8248 * to minimize the risk of running out.
8250 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8251 sizeof(struct fcp_rsp) + max_buf_size;
8253 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8254 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8257 * If supporting DIF, reduce the seg count for scsi to
8258 * allow room for the DIF sges.
8260 if (phba->cfg_enable_bg &&
8261 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8262 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8264 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8268 * The scsi_buf for a regular I/O holds the FCP cmnd,
8269 * the FCP rsp, a SGE for each, and a SGE for up to
8270 * cfg_sg_seg_cnt data segments.
8272 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8273 sizeof(struct fcp_rsp) +
8274 ((phba->cfg_sg_seg_cnt + extra) *
8275 sizeof(struct sli4_sge));
8277 /* Total SGEs for scsi_sg_list */
8278 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8279 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8282 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8283 * need to post 1 page for the SGL.
8287 if (phba->cfg_xpsgl && !phba->nvmet_support)
8288 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8289 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8290 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8292 phba->cfg_sg_dma_buf_size =
8293 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8295 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8296 sizeof(struct sli4_sge);
8298 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8299 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8300 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8301 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8302 "6300 Reducing NVME sg segment "
8304 LPFC_MAX_NVME_SEG_CNT);
8305 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8307 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8310 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8311 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8312 "total:%d scsi:%d nvme:%d\n",
8313 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8314 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8315 phba->cfg_nvme_seg_cnt);
8317 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8318 i = phba->cfg_sg_dma_buf_size;
8322 phba->lpfc_sg_dma_buf_pool =
8323 dma_pool_create("lpfc_sg_dma_buf_pool",
8325 phba->cfg_sg_dma_buf_size,
8327 if (!phba->lpfc_sg_dma_buf_pool) {
8329 goto out_free_bsmbx;
8332 phba->lpfc_cmd_rsp_buf_pool =
8333 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8335 sizeof(struct fcp_cmnd) +
8336 sizeof(struct fcp_rsp),
8338 if (!phba->lpfc_cmd_rsp_buf_pool) {
8340 goto out_free_sg_dma_buf;
8343 mempool_free(mboxq, phba->mbox_mem_pool);
8345 /* Verify OAS is supported */
8346 lpfc_sli4_oas_verify(phba);
8348 /* Verify RAS support on adapter */
8349 lpfc_sli4_ras_init(phba);
8351 /* Verify all the SLI4 queues */
8352 rc = lpfc_sli4_queue_verify(phba);
8354 goto out_free_cmd_rsp_buf;
8356 /* Create driver internal CQE event pool */
8357 rc = lpfc_sli4_cq_event_pool_create(phba);
8359 goto out_free_cmd_rsp_buf;
8361 /* Initialize sgl lists per host */
8362 lpfc_init_sgl_list(phba);
8364 /* Allocate and initialize active sgl array */
8365 rc = lpfc_init_active_sgl_array(phba);
8367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8368 "1430 Failed to initialize sgl list.\n");
8369 goto out_destroy_cq_event_pool;
8371 rc = lpfc_sli4_init_rpi_hdrs(phba);
8373 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8374 "1432 Failed to initialize rpi headers.\n");
8375 goto out_free_active_sgl;
8378 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8379 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8380 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8382 if (!phba->fcf.fcf_rr_bmask) {
8383 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8384 "2759 Failed allocate memory for FCF round "
8385 "robin failover bmask\n");
8387 goto out_remove_rpi_hdrs;
8390 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8391 sizeof(struct lpfc_hba_eq_hdl),
8393 if (!phba->sli4_hba.hba_eq_hdl) {
8394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8395 "2572 Failed allocate memory for "
8396 "fast-path per-EQ handle array\n");
8398 goto out_free_fcf_rr_bmask;
8401 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8402 sizeof(struct lpfc_vector_map_info),
8404 if (!phba->sli4_hba.cpu_map) {
8405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8406 "3327 Failed allocate memory for msi-x "
8407 "interrupt vector mapping\n");
8409 goto out_free_hba_eq_hdl;
8412 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8413 if (!phba->sli4_hba.eq_info) {
8414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8415 "3321 Failed allocation for per_cpu stats\n");
8417 goto out_free_hba_cpu_map;
8420 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8421 sizeof(*phba->sli4_hba.idle_stat),
8423 if (!phba->sli4_hba.idle_stat) {
8424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8425 "3390 Failed allocation for idle_stat\n");
8427 goto out_free_hba_eq_info;
8430 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8431 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8432 if (!phba->sli4_hba.c_stat) {
8433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8434 "3332 Failed allocating per cpu hdwq stats\n");
8436 goto out_free_hba_idle_stat;
8440 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8441 if (!phba->cmf_stat) {
8442 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8443 "3331 Failed allocating per cpu cgn stats\n");
8445 goto out_free_hba_hdwq_info;
8449 * Enable sr-iov virtual functions if supported and configured
8450 * through the module parameter.
8452 if (phba->cfg_sriov_nr_virtfn > 0) {
8453 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8454 phba->cfg_sriov_nr_virtfn);
8456 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8457 "3020 Requested number of SR-IOV "
8458 "virtual functions (%d) is not "
8460 phba->cfg_sriov_nr_virtfn);
8461 phba->cfg_sriov_nr_virtfn = 0;
8467 out_free_hba_hdwq_info:
8468 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8469 free_percpu(phba->sli4_hba.c_stat);
8470 out_free_hba_idle_stat:
8472 kfree(phba->sli4_hba.idle_stat);
8473 out_free_hba_eq_info:
8474 free_percpu(phba->sli4_hba.eq_info);
8475 out_free_hba_cpu_map:
8476 kfree(phba->sli4_hba.cpu_map);
8477 out_free_hba_eq_hdl:
8478 kfree(phba->sli4_hba.hba_eq_hdl);
8479 out_free_fcf_rr_bmask:
8480 kfree(phba->fcf.fcf_rr_bmask);
8481 out_remove_rpi_hdrs:
8482 lpfc_sli4_remove_rpi_hdrs(phba);
8483 out_free_active_sgl:
8484 lpfc_free_active_sgl(phba);
8485 out_destroy_cq_event_pool:
8486 lpfc_sli4_cq_event_pool_destroy(phba);
8487 out_free_cmd_rsp_buf:
8488 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8489 phba->lpfc_cmd_rsp_buf_pool = NULL;
8490 out_free_sg_dma_buf:
8491 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8492 phba->lpfc_sg_dma_buf_pool = NULL;
8494 lpfc_destroy_bootstrap_mbox(phba);
8496 lpfc_mem_free(phba);
8497 out_destroy_workqueue:
8498 destroy_workqueue(phba->wq);
8504 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8505 * @phba: pointer to lpfc hba data structure.
8507 * This routine is invoked to unset the driver internal resources set up
8508 * specific for supporting the SLI-4 HBA device it attached to.
8511 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8513 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8515 free_percpu(phba->sli4_hba.eq_info);
8516 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8517 free_percpu(phba->sli4_hba.c_stat);
8519 free_percpu(phba->cmf_stat);
8520 kfree(phba->sli4_hba.idle_stat);
8522 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8523 kfree(phba->sli4_hba.cpu_map);
8524 phba->sli4_hba.num_possible_cpu = 0;
8525 phba->sli4_hba.num_present_cpu = 0;
8526 phba->sli4_hba.curr_disp_cpu = 0;
8527 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8529 /* Free memory allocated for fast-path work queue handles */
8530 kfree(phba->sli4_hba.hba_eq_hdl);
8532 /* Free the allocated rpi headers. */
8533 lpfc_sli4_remove_rpi_hdrs(phba);
8534 lpfc_sli4_remove_rpis(phba);
8536 /* Free eligible FCF index bmask */
8537 kfree(phba->fcf.fcf_rr_bmask);
8539 /* Free the ELS sgl list */
8540 lpfc_free_active_sgl(phba);
8541 lpfc_free_els_sgl_list(phba);
8542 lpfc_free_nvmet_sgl_list(phba);
8544 /* Free the completion queue EQ event pool */
8545 lpfc_sli4_cq_event_release_all(phba);
8546 lpfc_sli4_cq_event_pool_destroy(phba);
8548 /* Release resource identifiers. */
8549 lpfc_sli4_dealloc_resource_identifiers(phba);
8551 /* Free the bsmbx region. */
8552 lpfc_destroy_bootstrap_mbox(phba);
8554 /* Free the SLI Layer memory with SLI4 HBAs */
8555 lpfc_mem_free_all(phba);
8557 /* Free the current connect table */
8558 list_for_each_entry_safe(conn_entry, next_conn_entry,
8559 &phba->fcf_conn_rec_list, list) {
8560 list_del_init(&conn_entry->list);
8568 * lpfc_init_api_table_setup - Set up init api function jump table
8569 * @phba: The hba struct for which this call is being executed.
8570 * @dev_grp: The HBA PCI-Device group number.
8572 * This routine sets up the device INIT interface API function jump table
8575 * Returns: 0 - success, -ENODEV - failure.
8578 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8580 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8581 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8582 phba->lpfc_selective_reset = lpfc_selective_reset;
8584 case LPFC_PCI_DEV_LP:
8585 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8586 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8587 phba->lpfc_stop_port = lpfc_stop_port_s3;
8589 case LPFC_PCI_DEV_OC:
8590 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8591 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8592 phba->lpfc_stop_port = lpfc_stop_port_s4;
8595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8596 "1431 Invalid HBA PCI-device group: 0x%x\n",
8604 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8605 * @phba: pointer to lpfc hba data structure.
8607 * This routine is invoked to set up the driver internal resources after the
8608 * device specific resource setup to support the HBA device it attached to.
8612 * other values - error
8615 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8619 /* Startup the kernel thread for this host adapter. */
8620 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8621 "lpfc_worker_%d", phba->brd_no);
8622 if (IS_ERR(phba->worker_thread)) {
8623 error = PTR_ERR(phba->worker_thread);
8631 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8632 * @phba: pointer to lpfc hba data structure.
8634 * This routine is invoked to unset the driver internal resources set up after
8635 * the device specific resource setup for supporting the HBA device it
8639 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8642 destroy_workqueue(phba->wq);
8646 /* Stop kernel worker thread */
8647 if (phba->worker_thread)
8648 kthread_stop(phba->worker_thread);
8652 * lpfc_free_iocb_list - Free iocb list.
8653 * @phba: pointer to lpfc hba data structure.
8655 * This routine is invoked to free the driver's IOCB list and memory.
8658 lpfc_free_iocb_list(struct lpfc_hba *phba)
8660 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8662 spin_lock_irq(&phba->hbalock);
8663 list_for_each_entry_safe(iocbq_entry, iocbq_next,
8664 &phba->lpfc_iocb_list, list) {
8665 list_del(&iocbq_entry->list);
8667 phba->total_iocbq_bufs--;
8669 spin_unlock_irq(&phba->hbalock);
8675 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8676 * @phba: pointer to lpfc hba data structure.
8677 * @iocb_count: number of requested iocbs
8679 * This routine is invoked to allocate and initizlize the driver's IOCB
8680 * list and set up the IOCB tag array accordingly.
8684 * other values - error
8687 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8689 struct lpfc_iocbq *iocbq_entry = NULL;
8693 /* Initialize and populate the iocb list per host. */
8694 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8695 for (i = 0; i < iocb_count; i++) {
8696 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8697 if (iocbq_entry == NULL) {
8698 printk(KERN_ERR "%s: only allocated %d iocbs of "
8699 "expected %d count. Unloading driver.\n",
8700 __func__, i, iocb_count);
8701 goto out_free_iocbq;
8704 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8707 printk(KERN_ERR "%s: failed to allocate IOTAG. "
8708 "Unloading driver.\n", __func__);
8709 goto out_free_iocbq;
8711 iocbq_entry->sli4_lxritag = NO_XRI;
8712 iocbq_entry->sli4_xritag = NO_XRI;
8714 spin_lock_irq(&phba->hbalock);
8715 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8716 phba->total_iocbq_bufs++;
8717 spin_unlock_irq(&phba->hbalock);
8723 lpfc_free_iocb_list(phba);
8729 * lpfc_free_sgl_list - Free a given sgl list.
8730 * @phba: pointer to lpfc hba data structure.
8731 * @sglq_list: pointer to the head of sgl list.
8733 * This routine is invoked to free a give sgl list and memory.
8736 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8738 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8740 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8741 list_del(&sglq_entry->list);
8742 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8748 * lpfc_free_els_sgl_list - Free els sgl list.
8749 * @phba: pointer to lpfc hba data structure.
8751 * This routine is invoked to free the driver's els sgl list and memory.
8754 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8756 LIST_HEAD(sglq_list);
8758 /* Retrieve all els sgls from driver list */
8759 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8760 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8761 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8763 /* Now free the sgl list */
8764 lpfc_free_sgl_list(phba, &sglq_list);
8768 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8769 * @phba: pointer to lpfc hba data structure.
8771 * This routine is invoked to free the driver's nvmet sgl list and memory.
8774 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8776 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8777 LIST_HEAD(sglq_list);
8779 /* Retrieve all nvmet sgls from driver list */
8780 spin_lock_irq(&phba->hbalock);
8781 spin_lock(&phba->sli4_hba.sgl_list_lock);
8782 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8783 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8784 spin_unlock_irq(&phba->hbalock);
8786 /* Now free the sgl list */
8787 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8788 list_del(&sglq_entry->list);
8789 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8793 /* Update the nvmet_xri_cnt to reflect no current sgls.
8794 * The next initialization cycle sets the count and allocates
8795 * the sgls over again.
8797 phba->sli4_hba.nvmet_xri_cnt = 0;
8801 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8802 * @phba: pointer to lpfc hba data structure.
8804 * This routine is invoked to allocate the driver's active sgl memory.
8805 * This array will hold the sglq_entry's for active IOs.
8808 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8811 size = sizeof(struct lpfc_sglq *);
8812 size *= phba->sli4_hba.max_cfg_param.max_xri;
8814 phba->sli4_hba.lpfc_sglq_active_list =
8815 kzalloc(size, GFP_KERNEL);
8816 if (!phba->sli4_hba.lpfc_sglq_active_list)
8822 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8823 * @phba: pointer to lpfc hba data structure.
8825 * This routine is invoked to walk through the array of active sglq entries
8826 * and free all of the resources.
8827 * This is just a place holder for now.
8830 lpfc_free_active_sgl(struct lpfc_hba *phba)
8832 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8836 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8837 * @phba: pointer to lpfc hba data structure.
8839 * This routine is invoked to allocate and initizlize the driver's sgl
8840 * list and set up the sgl xritag tag array accordingly.
8844 lpfc_init_sgl_list(struct lpfc_hba *phba)
8846 /* Initialize and populate the sglq list per host/VF. */
8847 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8848 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8849 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8850 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8852 /* els xri-sgl book keeping */
8853 phba->sli4_hba.els_xri_cnt = 0;
8855 /* nvme xri-buffer book keeping */
8856 phba->sli4_hba.io_xri_cnt = 0;
8860 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8861 * @phba: pointer to lpfc hba data structure.
8863 * This routine is invoked to post rpi header templates to the
8864 * port for those SLI4 ports that do not support extents. This routine
8865 * posts a PAGE_SIZE memory region to the port to hold up to
8866 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
8867 * and should be called only when interrupts are disabled.
8871 * -ERROR - otherwise.
8874 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8877 struct lpfc_rpi_hdr *rpi_hdr;
8879 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8880 if (!phba->sli4_hba.rpi_hdrs_in_use)
8882 if (phba->sli4_hba.extents_in_use)
8885 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8887 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8888 "0391 Error during rpi post operation\n");
8889 lpfc_sli4_remove_rpis(phba);
8897 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8898 * @phba: pointer to lpfc hba data structure.
8900 * This routine is invoked to allocate a single 4KB memory region to
8901 * support rpis and stores them in the phba. This single region
8902 * provides support for up to 64 rpis. The region is used globally
8906 * A valid rpi hdr on success.
8907 * A NULL pointer on any failure.
8909 struct lpfc_rpi_hdr *
8910 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8912 uint16_t rpi_limit, curr_rpi_range;
8913 struct lpfc_dmabuf *dmabuf;
8914 struct lpfc_rpi_hdr *rpi_hdr;
8917 * If the SLI4 port supports extents, posting the rpi header isn't
8918 * required. Set the expected maximum count and let the actual value
8919 * get set when extents are fully allocated.
8921 if (!phba->sli4_hba.rpi_hdrs_in_use)
8923 if (phba->sli4_hba.extents_in_use)
8926 /* The limit on the logical index is just the max_rpi count. */
8927 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8929 spin_lock_irq(&phba->hbalock);
8931 * Establish the starting RPI in this header block. The starting
8932 * rpi is normalized to a zero base because the physical rpi is
8935 curr_rpi_range = phba->sli4_hba.next_rpi;
8936 spin_unlock_irq(&phba->hbalock);
8938 /* Reached full RPI range */
8939 if (curr_rpi_range == rpi_limit)
8943 * First allocate the protocol header region for the port. The
8944 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8946 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8950 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8951 LPFC_HDR_TEMPLATE_SIZE,
8952 &dmabuf->phys, GFP_KERNEL);
8953 if (!dmabuf->virt) {
8955 goto err_free_dmabuf;
8958 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8960 goto err_free_coherent;
8963 /* Save the rpi header data for cleanup later. */
8964 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8966 goto err_free_coherent;
8968 rpi_hdr->dmabuf = dmabuf;
8969 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8970 rpi_hdr->page_count = 1;
8971 spin_lock_irq(&phba->hbalock);
8973 /* The rpi_hdr stores the logical index only. */
8974 rpi_hdr->start_rpi = curr_rpi_range;
8975 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8976 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8978 spin_unlock_irq(&phba->hbalock);
8982 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8983 dmabuf->virt, dmabuf->phys);
8990 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8991 * @phba: pointer to lpfc hba data structure.
8993 * This routine is invoked to remove all memory resources allocated
8994 * to support rpis for SLI4 ports not supporting extents. This routine
8995 * presumes the caller has released all rpis consumed by fabric or port
8996 * logins and is prepared to have the header pages removed.
8999 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
9001 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
9003 if (!phba->sli4_hba.rpi_hdrs_in_use)
9006 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
9007 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
9008 list_del(&rpi_hdr->list);
9009 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
9010 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
9011 kfree(rpi_hdr->dmabuf);
9015 /* There are no rpis available to the port now. */
9016 phba->sli4_hba.next_rpi = 0;
9020 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
9021 * @pdev: pointer to pci device data structure.
9023 * This routine is invoked to allocate the driver hba data structure for an
9024 * HBA device. If the allocation is successful, the phba reference to the
9025 * PCI device data structure is set.
9028 * pointer to @phba - successful
9031 static struct lpfc_hba *
9032 lpfc_hba_alloc(struct pci_dev *pdev)
9034 struct lpfc_hba *phba;
9036 /* Allocate memory for HBA structure */
9037 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
9039 dev_err(&pdev->dev, "failed to allocate hba struct\n");
9043 /* Set reference to PCI device in HBA structure */
9044 phba->pcidev = pdev;
9046 /* Assign an unused board number */
9047 phba->brd_no = lpfc_get_instance();
9048 if (phba->brd_no < 0) {
9052 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9054 spin_lock_init(&phba->ct_ev_lock);
9055 INIT_LIST_HEAD(&phba->ct_ev_waiters);
9061 * lpfc_hba_free - Free driver hba data structure with a device.
9062 * @phba: pointer to lpfc hba data structure.
9064 * This routine is invoked to free the driver hba data structure with an
9068 lpfc_hba_free(struct lpfc_hba *phba)
9070 if (phba->sli_rev == LPFC_SLI_REV4)
9071 kfree(phba->sli4_hba.hdwq);
9073 /* Release the driver assigned board number */
9074 idr_remove(&lpfc_hba_index, phba->brd_no);
9076 /* Free memory allocated with sli3 rings */
9077 kfree(phba->sli.sli3_ring);
9078 phba->sli.sli3_ring = NULL;
9085 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9086 * @vport: pointer to lpfc vport data structure.
9088 * This routine is will setup initial FDMI attribute masks for
9089 * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
9090 * to get these attributes first before falling back, the attribute
9091 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9094 lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
9096 struct lpfc_hba *phba = vport->phba;
9098 set_bit(FC_ALLOW_FDMI, &vport->load_flag);
9099 if (phba->cfg_enable_SmartSAN ||
9100 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9101 /* Setup appropriate attribute masks */
9102 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9103 if (phba->cfg_enable_SmartSAN)
9104 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9106 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9109 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
9110 "6077 Setup FDMI mask: hba x%x port x%x\n",
9111 vport->fdmi_hba_mask, vport->fdmi_port_mask);
9115 * lpfc_create_shost - Create hba physical port with associated scsi host.
9116 * @phba: pointer to lpfc hba data structure.
9118 * This routine is invoked to create HBA physical port and associate a SCSI
9123 * other values - error
9126 lpfc_create_shost(struct lpfc_hba *phba)
9128 struct lpfc_vport *vport;
9129 struct Scsi_Host *shost;
9131 /* Initialize HBA FC structure */
9132 phba->fc_edtov = FF_DEF_EDTOV;
9133 phba->fc_ratov = FF_DEF_RATOV;
9134 phba->fc_altov = FF_DEF_ALTOV;
9135 phba->fc_arbtov = FF_DEF_ARBTOV;
9137 atomic_set(&phba->sdev_cnt, 0);
9138 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9142 shost = lpfc_shost_from_vport(vport);
9143 phba->pport = vport;
9145 if (phba->nvmet_support) {
9146 /* Only 1 vport (pport) will support NVME target */
9147 phba->targetport = NULL;
9148 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9149 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9150 "6076 NVME Target Found\n");
9153 lpfc_debugfs_initialize(vport);
9154 /* Put reference to SCSI host to driver's device private data */
9155 pci_set_drvdata(phba->pcidev, shost);
9157 lpfc_setup_fdmi_mask(vport);
9160 * At this point we are fully registered with PSA. In addition,
9161 * any initial discovery should be completed.
9167 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9168 * @phba: pointer to lpfc hba data structure.
9170 * This routine is invoked to destroy HBA physical port and the associated
9174 lpfc_destroy_shost(struct lpfc_hba *phba)
9176 struct lpfc_vport *vport = phba->pport;
9178 /* Destroy physical port that associated with the SCSI host */
9179 destroy_port(vport);
9185 * lpfc_setup_bg - Setup Block guard structures and debug areas.
9186 * @phba: pointer to lpfc hba data structure.
9187 * @shost: the shost to be used to detect Block guard settings.
9189 * This routine sets up the local Block guard protocol settings for @shost.
9190 * This routine also allocates memory for debugging bg buffers.
9193 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9198 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9199 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9200 "1478 Registering BlockGuard with the "
9203 old_mask = phba->cfg_prot_mask;
9204 old_guard = phba->cfg_prot_guard;
9206 /* Only allow supported values */
9207 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9208 SHOST_DIX_TYPE0_PROTECTION |
9209 SHOST_DIX_TYPE1_PROTECTION);
9210 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9211 SHOST_DIX_GUARD_CRC);
9213 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9214 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9215 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9217 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9218 if ((old_mask != phba->cfg_prot_mask) ||
9219 (old_guard != phba->cfg_prot_guard))
9220 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9221 "1475 Registering BlockGuard with the "
9222 "SCSI layer: mask %d guard %d\n",
9223 phba->cfg_prot_mask,
9224 phba->cfg_prot_guard);
9226 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9227 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9229 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9230 "1479 Not Registering BlockGuard with the SCSI "
9231 "layer, Bad protection parameters: %d %d\n",
9232 old_mask, old_guard);
9237 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9238 * @phba: pointer to lpfc hba data structure.
9240 * This routine is invoked to perform all the necessary post initialization
9241 * setup for the device.
9244 lpfc_post_init_setup(struct lpfc_hba *phba)
9246 struct Scsi_Host *shost;
9247 struct lpfc_adapter_event_header adapter_event;
9249 /* Get the default values for Model Name and Description */
9250 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9253 * hba setup may have changed the hba_queue_depth so we need to
9254 * adjust the value of can_queue.
9256 shost = pci_get_drvdata(phba->pcidev);
9257 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9259 lpfc_host_attrib_init(shost);
9261 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9262 spin_lock_irq(shost->host_lock);
9263 lpfc_poll_start_timer(phba);
9264 spin_unlock_irq(shost->host_lock);
9267 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9268 "0428 Perform SCSI scan\n");
9269 /* Send board arrival event to upper layer */
9270 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9271 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9272 fc_host_post_vendor_event(shost, fc_get_event_number(),
9273 sizeof(adapter_event),
9274 (char *) &adapter_event,
9280 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9281 * @phba: pointer to lpfc hba data structure.
9283 * This routine is invoked to set up the PCI device memory space for device
9284 * with SLI-3 interface spec.
9288 * other values - error
9291 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9293 struct pci_dev *pdev = phba->pcidev;
9294 unsigned long bar0map_len, bar2map_len;
9302 /* Set the device DMA mask size */
9303 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9305 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9310 /* Get the bus address of Bar0 and Bar2 and the number of bytes
9311 * required by each mapping.
9313 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9314 bar0map_len = pci_resource_len(pdev, 0);
9316 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9317 bar2map_len = pci_resource_len(pdev, 2);
9319 /* Map HBA SLIM to a kernel virtual address. */
9320 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9321 if (!phba->slim_memmap_p) {
9322 dev_printk(KERN_ERR, &pdev->dev,
9323 "ioremap failed for SLIM memory.\n");
9327 /* Map HBA Control Registers to a kernel virtual address. */
9328 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9329 if (!phba->ctrl_regs_memmap_p) {
9330 dev_printk(KERN_ERR, &pdev->dev,
9331 "ioremap failed for HBA control registers.\n");
9332 goto out_iounmap_slim;
9335 /* Allocate memory for SLI-2 structures */
9336 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9337 &phba->slim2p.phys, GFP_KERNEL);
9338 if (!phba->slim2p.virt)
9341 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9342 phba->mbox_ext = (phba->slim2p.virt +
9343 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9344 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9345 phba->IOCBs = (phba->slim2p.virt +
9346 offsetof(struct lpfc_sli2_slim, IOCBs));
9348 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9349 lpfc_sli_hbq_size(),
9350 &phba->hbqslimp.phys,
9352 if (!phba->hbqslimp.virt)
9355 hbq_count = lpfc_sli_hbq_count();
9356 ptr = phba->hbqslimp.virt;
9357 for (i = 0; i < hbq_count; ++i) {
9358 phba->hbqs[i].hbq_virt = ptr;
9359 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9360 ptr += (lpfc_hbq_defs[i]->entry_count *
9361 sizeof(struct lpfc_hbq_entry));
9363 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9364 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9366 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9368 phba->MBslimaddr = phba->slim_memmap_p;
9369 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9370 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9371 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9372 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9377 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9378 phba->slim2p.virt, phba->slim2p.phys);
9380 iounmap(phba->ctrl_regs_memmap_p);
9382 iounmap(phba->slim_memmap_p);
9388 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9389 * @phba: pointer to lpfc hba data structure.
9391 * This routine is invoked to unset the PCI device memory space for device
9392 * with SLI-3 interface spec.
9395 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9397 struct pci_dev *pdev;
9399 /* Obtain PCI device reference */
9403 pdev = phba->pcidev;
9405 /* Free coherent DMA memory allocated */
9406 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9407 phba->hbqslimp.virt, phba->hbqslimp.phys);
9408 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9409 phba->slim2p.virt, phba->slim2p.phys);
9411 /* I/O memory unmap */
9412 iounmap(phba->ctrl_regs_memmap_p);
9413 iounmap(phba->slim_memmap_p);
9419 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9420 * @phba: pointer to lpfc hba data structure.
9422 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9423 * done and check status.
9425 * Return 0 if successful, otherwise -ENODEV.
9428 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9430 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9431 struct lpfc_register reg_data;
9432 int i, port_error = 0;
9435 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9436 memset(®_data, 0, sizeof(reg_data));
9437 if (!phba->sli4_hba.PSMPHRregaddr)
9440 /* Wait up to 30 seconds for the SLI Port POST done and ready */
9441 for (i = 0; i < 3000; i++) {
9442 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9443 &portsmphr_reg.word0) ||
9444 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9445 /* Port has a fatal POST error, break out */
9446 port_error = -ENODEV;
9449 if (LPFC_POST_STAGE_PORT_READY ==
9450 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9456 * If there was a port error during POST, then don't proceed with
9457 * other register reads as the data may not be valid. Just exit.
9460 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9461 "1408 Port Failed POST - portsmphr=0x%x, "
9462 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9463 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9464 portsmphr_reg.word0,
9465 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9466 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9467 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9468 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9469 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9470 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9471 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9472 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9474 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9475 "2534 Device Info: SLIFamily=0x%x, "
9476 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9477 "SLIHint_2=0x%x, FT=0x%x\n",
9478 bf_get(lpfc_sli_intf_sli_family,
9479 &phba->sli4_hba.sli_intf),
9480 bf_get(lpfc_sli_intf_slirev,
9481 &phba->sli4_hba.sli_intf),
9482 bf_get(lpfc_sli_intf_if_type,
9483 &phba->sli4_hba.sli_intf),
9484 bf_get(lpfc_sli_intf_sli_hint1,
9485 &phba->sli4_hba.sli_intf),
9486 bf_get(lpfc_sli_intf_sli_hint2,
9487 &phba->sli4_hba.sli_intf),
9488 bf_get(lpfc_sli_intf_func_type,
9489 &phba->sli4_hba.sli_intf));
9491 * Check for other Port errors during the initialization
9492 * process. Fail the load if the port did not come up
9495 if_type = bf_get(lpfc_sli_intf_if_type,
9496 &phba->sli4_hba.sli_intf);
9498 case LPFC_SLI_INTF_IF_TYPE_0:
9499 phba->sli4_hba.ue_mask_lo =
9500 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9501 phba->sli4_hba.ue_mask_hi =
9502 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9504 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9506 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9507 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9508 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9509 lpfc_printf_log(phba, KERN_ERR,
9511 "1422 Unrecoverable Error "
9512 "Detected during POST "
9513 "uerr_lo_reg=0x%x, "
9514 "uerr_hi_reg=0x%x, "
9515 "ue_mask_lo_reg=0x%x, "
9516 "ue_mask_hi_reg=0x%x\n",
9519 phba->sli4_hba.ue_mask_lo,
9520 phba->sli4_hba.ue_mask_hi);
9521 port_error = -ENODEV;
9524 case LPFC_SLI_INTF_IF_TYPE_2:
9525 case LPFC_SLI_INTF_IF_TYPE_6:
9526 /* Final checks. The port status should be clean. */
9527 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9529 lpfc_sli4_unrecoverable_port(®_data)) {
9530 phba->work_status[0] =
9531 readl(phba->sli4_hba.u.if_type2.
9533 phba->work_status[1] =
9534 readl(phba->sli4_hba.u.if_type2.
9536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9537 "2888 Unrecoverable port error "
9538 "following POST: port status reg "
9539 "0x%x, port_smphr reg 0x%x, "
9540 "error 1=0x%x, error 2=0x%x\n",
9542 portsmphr_reg.word0,
9543 phba->work_status[0],
9544 phba->work_status[1]);
9545 port_error = -ENODEV;
9549 if (lpfc_pldv_detect &&
9550 bf_get(lpfc_sli_intf_sli_family,
9551 &phba->sli4_hba.sli_intf) ==
9552 LPFC_SLI_INTF_FAMILY_G6)
9553 pci_write_config_byte(phba->pcidev,
9554 LPFC_SLI_INTF, CFG_PLD);
9556 case LPFC_SLI_INTF_IF_TYPE_1:
9565 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9566 * @phba: pointer to lpfc hba data structure.
9567 * @if_type: The SLI4 interface type getting configured.
9569 * This routine is invoked to set up SLI4 BAR0 PCI config space register
9573 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9576 case LPFC_SLI_INTF_IF_TYPE_0:
9577 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9578 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9579 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9580 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9581 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9582 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9583 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9584 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9585 phba->sli4_hba.SLIINTFregaddr =
9586 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9588 case LPFC_SLI_INTF_IF_TYPE_2:
9589 phba->sli4_hba.u.if_type2.EQDregaddr =
9590 phba->sli4_hba.conf_regs_memmap_p +
9591 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9592 phba->sli4_hba.u.if_type2.ERR1regaddr =
9593 phba->sli4_hba.conf_regs_memmap_p +
9594 LPFC_CTL_PORT_ER1_OFFSET;
9595 phba->sli4_hba.u.if_type2.ERR2regaddr =
9596 phba->sli4_hba.conf_regs_memmap_p +
9597 LPFC_CTL_PORT_ER2_OFFSET;
9598 phba->sli4_hba.u.if_type2.CTRLregaddr =
9599 phba->sli4_hba.conf_regs_memmap_p +
9600 LPFC_CTL_PORT_CTL_OFFSET;
9601 phba->sli4_hba.u.if_type2.STATUSregaddr =
9602 phba->sli4_hba.conf_regs_memmap_p +
9603 LPFC_CTL_PORT_STA_OFFSET;
9604 phba->sli4_hba.SLIINTFregaddr =
9605 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9606 phba->sli4_hba.PSMPHRregaddr =
9607 phba->sli4_hba.conf_regs_memmap_p +
9608 LPFC_CTL_PORT_SEM_OFFSET;
9609 phba->sli4_hba.RQDBregaddr =
9610 phba->sli4_hba.conf_regs_memmap_p +
9611 LPFC_ULP0_RQ_DOORBELL;
9612 phba->sli4_hba.WQDBregaddr =
9613 phba->sli4_hba.conf_regs_memmap_p +
9614 LPFC_ULP0_WQ_DOORBELL;
9615 phba->sli4_hba.CQDBregaddr =
9616 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9617 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9618 phba->sli4_hba.MQDBregaddr =
9619 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9620 phba->sli4_hba.BMBXregaddr =
9621 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9623 case LPFC_SLI_INTF_IF_TYPE_6:
9624 phba->sli4_hba.u.if_type2.EQDregaddr =
9625 phba->sli4_hba.conf_regs_memmap_p +
9626 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9627 phba->sli4_hba.u.if_type2.ERR1regaddr =
9628 phba->sli4_hba.conf_regs_memmap_p +
9629 LPFC_CTL_PORT_ER1_OFFSET;
9630 phba->sli4_hba.u.if_type2.ERR2regaddr =
9631 phba->sli4_hba.conf_regs_memmap_p +
9632 LPFC_CTL_PORT_ER2_OFFSET;
9633 phba->sli4_hba.u.if_type2.CTRLregaddr =
9634 phba->sli4_hba.conf_regs_memmap_p +
9635 LPFC_CTL_PORT_CTL_OFFSET;
9636 phba->sli4_hba.u.if_type2.STATUSregaddr =
9637 phba->sli4_hba.conf_regs_memmap_p +
9638 LPFC_CTL_PORT_STA_OFFSET;
9639 phba->sli4_hba.PSMPHRregaddr =
9640 phba->sli4_hba.conf_regs_memmap_p +
9641 LPFC_CTL_PORT_SEM_OFFSET;
9642 phba->sli4_hba.BMBXregaddr =
9643 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9645 case LPFC_SLI_INTF_IF_TYPE_1:
9647 dev_printk(KERN_ERR, &phba->pcidev->dev,
9648 "FATAL - unsupported SLI4 interface type - %d\n",
9655 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9656 * @phba: pointer to lpfc hba data structure.
9657 * @if_type: sli if type to operate on.
9659 * This routine is invoked to set up SLI4 BAR1 register memory map.
9662 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9665 case LPFC_SLI_INTF_IF_TYPE_0:
9666 phba->sli4_hba.PSMPHRregaddr =
9667 phba->sli4_hba.ctrl_regs_memmap_p +
9668 LPFC_SLIPORT_IF0_SMPHR;
9669 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9671 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9673 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9676 case LPFC_SLI_INTF_IF_TYPE_6:
9677 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9678 LPFC_IF6_RQ_DOORBELL;
9679 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9680 LPFC_IF6_WQ_DOORBELL;
9681 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9682 LPFC_IF6_CQ_DOORBELL;
9683 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9684 LPFC_IF6_EQ_DOORBELL;
9685 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9686 LPFC_IF6_MQ_DOORBELL;
9688 case LPFC_SLI_INTF_IF_TYPE_2:
9689 case LPFC_SLI_INTF_IF_TYPE_1:
9691 dev_err(&phba->pcidev->dev,
9692 "FATAL - unsupported SLI4 interface type - %d\n",
9699 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9700 * @phba: pointer to lpfc hba data structure.
9701 * @vf: virtual function number
9703 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9704 * based on the given viftual function number, @vf.
9706 * Return 0 if successful, otherwise -ENODEV.
9709 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9711 if (vf > LPFC_VIR_FUNC_MAX)
9714 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9715 vf * LPFC_VFR_PAGE_SIZE +
9716 LPFC_ULP0_RQ_DOORBELL);
9717 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9718 vf * LPFC_VFR_PAGE_SIZE +
9719 LPFC_ULP0_WQ_DOORBELL);
9720 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9721 vf * LPFC_VFR_PAGE_SIZE +
9722 LPFC_EQCQ_DOORBELL);
9723 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9724 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9725 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9726 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9727 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9732 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9733 * @phba: pointer to lpfc hba data structure.
9735 * This routine is invoked to create the bootstrap mailbox
9736 * region consistent with the SLI-4 interface spec. This
9737 * routine allocates all memory necessary to communicate
9738 * mailbox commands to the port and sets up all alignment
9739 * needs. No locks are expected to be held when calling
9744 * -ENOMEM - could not allocated memory.
9747 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9750 struct lpfc_dmabuf *dmabuf;
9751 struct dma_address *dma_address;
9755 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9760 * The bootstrap mailbox region is comprised of 2 parts
9761 * plus an alignment restriction of 16 bytes.
9763 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9764 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9765 &dmabuf->phys, GFP_KERNEL);
9766 if (!dmabuf->virt) {
9772 * Initialize the bootstrap mailbox pointers now so that the register
9773 * operations are simple later. The mailbox dma address is required
9774 * to be 16-byte aligned. Also align the virtual memory as each
9775 * maibox is copied into the bmbx mailbox region before issuing the
9776 * command to the port.
9778 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9779 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9781 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9782 LPFC_ALIGN_16_BYTE);
9783 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9784 LPFC_ALIGN_16_BYTE);
9787 * Set the high and low physical addresses now. The SLI4 alignment
9788 * requirement is 16 bytes and the mailbox is posted to the port
9789 * as two 30-bit addresses. The other data is a bit marking whether
9790 * the 30-bit address is the high or low address.
9791 * Upcast bmbx aphys to 64bits so shift instruction compiles
9792 * clean on 32 bit machines.
9794 dma_address = &phba->sli4_hba.bmbx.dma_address;
9795 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9796 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9797 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9798 LPFC_BMBX_BIT1_ADDR_HI);
9800 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9801 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9802 LPFC_BMBX_BIT1_ADDR_LO);
9807 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9808 * @phba: pointer to lpfc hba data structure.
9810 * This routine is invoked to teardown the bootstrap mailbox
9811 * region and release all host resources. This routine requires
9812 * the caller to ensure all mailbox commands recovered, no
9813 * additional mailbox comands are sent, and interrupts are disabled
9814 * before calling this routine.
9818 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9820 dma_free_coherent(&phba->pcidev->dev,
9821 phba->sli4_hba.bmbx.bmbx_size,
9822 phba->sli4_hba.bmbx.dmabuf->virt,
9823 phba->sli4_hba.bmbx.dmabuf->phys);
9825 kfree(phba->sli4_hba.bmbx.dmabuf);
9826 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9829 static const char * const lpfc_topo_to_str[] = {
9839 #define LINK_FLAGS_DEF 0x0
9840 #define LINK_FLAGS_P2P 0x1
9841 #define LINK_FLAGS_LOOP 0x2
9843 * lpfc_map_topology - Map the topology read from READ_CONFIG
9844 * @phba: pointer to lpfc hba data structure.
9845 * @rd_config: pointer to read config data
9847 * This routine is invoked to map the topology values as read
9848 * from the read config mailbox command. If the persistent
9849 * topology feature is supported, the firmware will provide the
9850 * saved topology information to be used in INIT_LINK
9853 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9857 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9858 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9859 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9861 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9862 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9865 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9866 "2019 FW does not support persistent topology "
9867 "Using driver parameter defined value [%s]",
9868 lpfc_topo_to_str[phba->cfg_topology]);
9871 /* FW supports persistent topology - override module parameter value */
9872 phba->hba_flag |= HBA_PERSISTENT_TOPO;
9874 /* if ASIC_GEN_NUM >= 0xC) */
9875 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9876 LPFC_SLI_INTF_IF_TYPE_6) ||
9877 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9878 LPFC_SLI_INTF_FAMILY_G6)) {
9880 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9881 ? FLAGS_TOPOLOGY_MODE_LOOP
9882 : FLAGS_TOPOLOGY_MODE_PT_PT);
9884 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9888 /* If topology failover set - pt is '0' or '1' */
9889 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9890 FLAGS_TOPOLOGY_MODE_LOOP_PT);
9892 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9893 ? FLAGS_TOPOLOGY_MODE_PT_PT
9894 : FLAGS_TOPOLOGY_MODE_LOOP);
9897 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9898 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9899 "2020 Using persistent topology value [%s]",
9900 lpfc_topo_to_str[phba->cfg_topology]);
9902 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9903 "2021 Invalid topology values from FW "
9904 "Using driver parameter defined value [%s]",
9905 lpfc_topo_to_str[phba->cfg_topology]);
9910 * lpfc_sli4_read_config - Get the config parameters.
9911 * @phba: pointer to lpfc hba data structure.
9913 * This routine is invoked to read the configuration parameters from the HBA.
9914 * The configuration parameters are used to set the base and maximum values
9915 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9916 * allocation for the port.
9920 * -ENOMEM - No available memory
9921 * -EIO - The mailbox failed to complete successfully.
9924 lpfc_sli4_read_config(struct lpfc_hba *phba)
9927 struct lpfc_mbx_read_config *rd_config;
9928 union lpfc_sli4_cfg_shdr *shdr;
9929 uint32_t shdr_status, shdr_add_status;
9930 struct lpfc_mbx_get_func_cfg *get_func_cfg;
9931 struct lpfc_rsrc_desc_fcfcoe *desc;
9933 uint16_t forced_link_speed;
9934 uint32_t if_type, qmin, fawwpn;
9935 int length, i, rc = 0, rc2;
9937 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9939 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9940 "2011 Unable to allocate memory for issuing "
9941 "SLI_CONFIG_SPECIAL mailbox command\n");
9945 lpfc_read_config(phba, pmb);
9947 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9948 if (rc != MBX_SUCCESS) {
9949 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9950 "2012 Mailbox failed , mbxCmd x%x "
9951 "READ_CONFIG, mbxStatus x%x\n",
9952 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9953 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9956 rd_config = &pmb->u.mqe.un.rd_config;
9957 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9958 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9959 phba->sli4_hba.lnk_info.lnk_tp =
9960 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9961 phba->sli4_hba.lnk_info.lnk_no =
9962 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9963 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9964 "3081 lnk_type:%d, lnk_numb:%d\n",
9965 phba->sli4_hba.lnk_info.lnk_tp,
9966 phba->sli4_hba.lnk_info.lnk_no);
9968 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9969 "3082 Mailbox (x%x) returned ldv:x0\n",
9970 bf_get(lpfc_mqe_command, &pmb->u.mqe));
9971 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9972 phba->bbcredit_support = 1;
9973 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9976 fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
9979 lpfc_printf_log(phba, KERN_INFO,
9980 LOG_INIT | LOG_DISCOVERY,
9981 "2702 READ_CONFIG: FA-PWWN is "
9983 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9985 /* Clear FW configured flag, preserve driver flag */
9986 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
9989 phba->sli4_hba.conf_trunk =
9990 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9991 phba->sli4_hba.extents_in_use =
9992 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9994 phba->sli4_hba.max_cfg_param.max_xri =
9995 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9996 /* Reduce resource usage in kdump environment */
9997 if (is_kdump_kernel() &&
9998 phba->sli4_hba.max_cfg_param.max_xri > 512)
9999 phba->sli4_hba.max_cfg_param.max_xri = 512;
10000 phba->sli4_hba.max_cfg_param.xri_base =
10001 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
10002 phba->sli4_hba.max_cfg_param.max_vpi =
10003 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
10004 /* Limit the max we support */
10005 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
10006 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
10007 phba->sli4_hba.max_cfg_param.vpi_base =
10008 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
10009 phba->sli4_hba.max_cfg_param.max_rpi =
10010 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
10011 phba->sli4_hba.max_cfg_param.rpi_base =
10012 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
10013 phba->sli4_hba.max_cfg_param.max_vfi =
10014 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
10015 phba->sli4_hba.max_cfg_param.vfi_base =
10016 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
10017 phba->sli4_hba.max_cfg_param.max_fcfi =
10018 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
10019 phba->sli4_hba.max_cfg_param.max_eq =
10020 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
10021 phba->sli4_hba.max_cfg_param.max_rq =
10022 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
10023 phba->sli4_hba.max_cfg_param.max_wq =
10024 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
10025 phba->sli4_hba.max_cfg_param.max_cq =
10026 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
10027 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10028 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10029 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10030 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10031 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10032 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10033 phba->max_vports = phba->max_vpi;
10035 /* Next decide on FPIN or Signal E2E CGN support
10036 * For congestion alarms and warnings valid combination are:
10037 * 1. FPIN alarms / FPIN warnings
10038 * 2. Signal alarms / Signal warnings
10039 * 3. FPIN alarms / Signal warnings
10040 * 4. Signal alarms / FPIN warnings
10042 * Initialize the adapter frequency to 100 mSecs
10044 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10045 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10046 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10048 if (lpfc_use_cgn_signal) {
10049 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
10050 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10051 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10053 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
10054 /* MUST support both alarm and warning
10055 * because EDC does not support alarm alone.
10057 if (phba->cgn_reg_signal !=
10058 EDC_CG_SIG_WARN_ONLY) {
10059 /* Must support both or none */
10060 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10061 phba->cgn_reg_signal =
10062 EDC_CG_SIG_NOTSUPPORTED;
10064 phba->cgn_reg_signal =
10065 EDC_CG_SIG_WARN_ALARM;
10066 phba->cgn_reg_fpin =
10067 LPFC_CGN_FPIN_NONE;
10072 /* Set the congestion initial signal and fpin values. */
10073 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10074 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10076 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10077 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10078 phba->cgn_reg_signal, phba->cgn_reg_fpin);
10080 lpfc_map_topology(phba, rd_config);
10081 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10082 "2003 cfg params Extents? %d "
10087 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10088 phba->sli4_hba.extents_in_use,
10089 phba->sli4_hba.max_cfg_param.xri_base,
10090 phba->sli4_hba.max_cfg_param.max_xri,
10091 phba->sli4_hba.max_cfg_param.vpi_base,
10092 phba->sli4_hba.max_cfg_param.max_vpi,
10093 phba->sli4_hba.max_cfg_param.vfi_base,
10094 phba->sli4_hba.max_cfg_param.max_vfi,
10095 phba->sli4_hba.max_cfg_param.rpi_base,
10096 phba->sli4_hba.max_cfg_param.max_rpi,
10097 phba->sli4_hba.max_cfg_param.max_fcfi,
10098 phba->sli4_hba.max_cfg_param.max_eq,
10099 phba->sli4_hba.max_cfg_param.max_cq,
10100 phba->sli4_hba.max_cfg_param.max_wq,
10101 phba->sli4_hba.max_cfg_param.max_rq,
10105 * Calculate queue resources based on how
10106 * many WQ/CQ/EQs are available.
10108 qmin = phba->sli4_hba.max_cfg_param.max_wq;
10109 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10110 qmin = phba->sli4_hba.max_cfg_param.max_cq;
10112 * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and
10113 * the remainder can be used for NVME / FCP.
10116 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10117 qmin = phba->sli4_hba.max_cfg_param.max_eq;
10119 /* Check to see if there is enough for default cfg */
10120 if ((phba->cfg_irq_chann > qmin) ||
10121 (phba->cfg_hdw_queue > qmin)) {
10122 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10123 "2005 Reducing Queues - "
10124 "FW resource limitation: "
10125 "WQ %d CQ %d EQ %d: min %d: "
10126 "IRQ %d HDWQ %d\n",
10127 phba->sli4_hba.max_cfg_param.max_wq,
10128 phba->sli4_hba.max_cfg_param.max_cq,
10129 phba->sli4_hba.max_cfg_param.max_eq,
10130 qmin, phba->cfg_irq_chann,
10131 phba->cfg_hdw_queue);
10133 if (phba->cfg_irq_chann > qmin)
10134 phba->cfg_irq_chann = qmin;
10135 if (phba->cfg_hdw_queue > qmin)
10136 phba->cfg_hdw_queue = qmin;
10143 /* Update link speed if forced link speed is supported */
10144 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10145 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10146 forced_link_speed =
10147 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10148 if (forced_link_speed) {
10149 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
10151 switch (forced_link_speed) {
10152 case LINK_SPEED_1G:
10153 phba->cfg_link_speed =
10154 LPFC_USER_LINK_SPEED_1G;
10156 case LINK_SPEED_2G:
10157 phba->cfg_link_speed =
10158 LPFC_USER_LINK_SPEED_2G;
10160 case LINK_SPEED_4G:
10161 phba->cfg_link_speed =
10162 LPFC_USER_LINK_SPEED_4G;
10164 case LINK_SPEED_8G:
10165 phba->cfg_link_speed =
10166 LPFC_USER_LINK_SPEED_8G;
10168 case LINK_SPEED_10G:
10169 phba->cfg_link_speed =
10170 LPFC_USER_LINK_SPEED_10G;
10172 case LINK_SPEED_16G:
10173 phba->cfg_link_speed =
10174 LPFC_USER_LINK_SPEED_16G;
10176 case LINK_SPEED_32G:
10177 phba->cfg_link_speed =
10178 LPFC_USER_LINK_SPEED_32G;
10180 case LINK_SPEED_64G:
10181 phba->cfg_link_speed =
10182 LPFC_USER_LINK_SPEED_64G;
10185 phba->cfg_link_speed =
10186 LPFC_USER_LINK_SPEED_AUTO;
10189 lpfc_printf_log(phba, KERN_ERR,
10191 "0047 Unrecognized link "
10193 forced_link_speed);
10194 phba->cfg_link_speed =
10195 LPFC_USER_LINK_SPEED_AUTO;
10200 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
10201 length = phba->sli4_hba.max_cfg_param.max_xri -
10202 lpfc_sli4_get_els_iocb_cnt(phba);
10203 if (phba->cfg_hba_queue_depth > length) {
10204 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10205 "3361 HBA queue depth changed from %d to %d\n",
10206 phba->cfg_hba_queue_depth, length);
10207 phba->cfg_hba_queue_depth = length;
10210 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10211 LPFC_SLI_INTF_IF_TYPE_2)
10214 /* get the pf# and vf# for SLI4 if_type 2 port */
10215 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10216 sizeof(struct lpfc_sli4_cfg_mhdr));
10217 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10218 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10219 length, LPFC_SLI4_MBX_EMBED);
10221 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10222 shdr = (union lpfc_sli4_cfg_shdr *)
10223 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10224 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10225 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10226 if (rc2 || shdr_status || shdr_add_status) {
10227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10228 "3026 Mailbox failed , mbxCmd x%x "
10229 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10230 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10231 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10235 /* search for fc_fcoe resrouce descriptor */
10236 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10238 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10239 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10240 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10241 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10242 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10243 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10246 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10247 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10248 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10249 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10250 phba->sli4_hba.iov.pf_number =
10251 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10252 phba->sli4_hba.iov.vf_number =
10253 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10258 if (i < LPFC_RSRC_DESC_MAX_NUM)
10259 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10260 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10261 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10262 phba->sli4_hba.iov.vf_number);
10264 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10265 "3028 GET_FUNCTION_CONFIG: failed to find "
10266 "Resource Descriptor:x%x\n",
10267 LPFC_RSRC_DESC_TYPE_FCFCOE);
10270 mempool_free(pmb, phba->mbox_mem_pool);
10275 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10276 * @phba: pointer to lpfc hba data structure.
10278 * This routine is invoked to setup the port-side endian order when
10279 * the port if_type is 0. This routine has no function for other
10284 * -ENOMEM - No available memory
10285 * -EIO - The mailbox failed to complete successfully.
10288 lpfc_setup_endian_order(struct lpfc_hba *phba)
10290 LPFC_MBOXQ_t *mboxq;
10291 uint32_t if_type, rc = 0;
10292 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10293 HOST_ENDIAN_HIGH_WORD1};
10295 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10297 case LPFC_SLI_INTF_IF_TYPE_0:
10298 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10301 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10302 "0492 Unable to allocate memory for "
10303 "issuing SLI_CONFIG_SPECIAL mailbox "
10309 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10310 * two words to contain special data values and no other data.
10312 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10313 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10314 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10315 if (rc != MBX_SUCCESS) {
10316 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10317 "0493 SLI_CONFIG_SPECIAL mailbox "
10318 "failed with status x%x\n",
10322 mempool_free(mboxq, phba->mbox_mem_pool);
10324 case LPFC_SLI_INTF_IF_TYPE_6:
10325 case LPFC_SLI_INTF_IF_TYPE_2:
10326 case LPFC_SLI_INTF_IF_TYPE_1:
10334 * lpfc_sli4_queue_verify - Verify and update EQ counts
10335 * @phba: pointer to lpfc hba data structure.
10337 * This routine is invoked to check the user settable queue counts for EQs.
10338 * After this routine is called the counts will be set to valid values that
10339 * adhere to the constraints of the system's interrupt vectors and the port's
10344 * -ENOMEM - No available memory
10347 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10350 * Sanity check for configured queue parameters against the run-time
10351 * device parameters
10354 if (phba->nvmet_support) {
10355 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10356 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10357 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10358 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10362 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10363 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10364 phba->cfg_nvmet_mrq);
10366 /* Get EQ depth from module parameter, fake the default for now */
10367 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10368 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10370 /* Get CQ depth from module parameter, fake the default for now */
10371 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10372 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10377 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10379 struct lpfc_queue *qdesc;
10383 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10384 /* Create Fast Path IO CQs */
10385 if (phba->enab_exp_wqcq_pages)
10386 /* Increase the CQ size when WQEs contain an embedded cdb */
10387 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10388 phba->sli4_hba.cq_esize,
10389 LPFC_CQE_EXP_COUNT, cpu);
10392 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10393 phba->sli4_hba.cq_esize,
10394 phba->sli4_hba.cq_ecount, cpu);
10396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10397 "0499 Failed allocate fast-path IO CQ (%d)\n",
10401 qdesc->qe_valid = 1;
10403 qdesc->chann = cpu;
10404 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10406 /* Create Fast Path IO WQs */
10407 if (phba->enab_exp_wqcq_pages) {
10408 /* Increase the WQ size when WQEs contain an embedded cdb */
10409 wqesize = (phba->fcp_embed_io) ?
10410 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10411 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10413 LPFC_WQE_EXP_COUNT, cpu);
10415 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10416 phba->sli4_hba.wq_esize,
10417 phba->sli4_hba.wq_ecount, cpu);
10420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10421 "0503 Failed allocate fast-path IO WQ (%d)\n",
10426 qdesc->chann = cpu;
10427 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10428 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10433 * lpfc_sli4_queue_create - Create all the SLI4 queues
10434 * @phba: pointer to lpfc hba data structure.
10436 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10437 * operation. For each SLI4 queue type, the parameters such as queue entry
10438 * count (queue depth) shall be taken from the module parameter. For now,
10439 * we just use some constant number as place holder.
10443 * -ENOMEM - No availble memory
10444 * -EIO - The mailbox failed to complete successfully.
10447 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10449 struct lpfc_queue *qdesc;
10450 int idx, cpu, eqcpu;
10451 struct lpfc_sli4_hdw_queue *qp;
10452 struct lpfc_vector_map_info *cpup;
10453 struct lpfc_vector_map_info *eqcpup;
10454 struct lpfc_eq_intr_info *eqi;
10457 * Create HBA Record arrays.
10458 * Both NVME and FCP will share that same vectors / EQs
10460 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10461 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10462 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10463 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10464 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10465 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10466 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10467 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10468 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10469 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10471 if (!phba->sli4_hba.hdwq) {
10472 phba->sli4_hba.hdwq = kcalloc(
10473 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10475 if (!phba->sli4_hba.hdwq) {
10476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10477 "6427 Failed allocate memory for "
10478 "fast-path Hardware Queue array\n");
10481 /* Prepare hardware queues to take IO buffers */
10482 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10483 qp = &phba->sli4_hba.hdwq[idx];
10484 spin_lock_init(&qp->io_buf_list_get_lock);
10485 spin_lock_init(&qp->io_buf_list_put_lock);
10486 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10487 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10488 qp->get_io_bufs = 0;
10489 qp->put_io_bufs = 0;
10490 qp->total_io_bufs = 0;
10491 spin_lock_init(&qp->abts_io_buf_list_lock);
10492 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10493 qp->abts_scsi_io_bufs = 0;
10494 qp->abts_nvme_io_bufs = 0;
10495 INIT_LIST_HEAD(&qp->sgl_list);
10496 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10497 spin_lock_init(&qp->hdwq_lock);
10501 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10502 if (phba->nvmet_support) {
10503 phba->sli4_hba.nvmet_cqset = kcalloc(
10504 phba->cfg_nvmet_mrq,
10505 sizeof(struct lpfc_queue *),
10507 if (!phba->sli4_hba.nvmet_cqset) {
10508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10509 "3121 Fail allocate memory for "
10510 "fast-path CQ set array\n");
10513 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10514 phba->cfg_nvmet_mrq,
10515 sizeof(struct lpfc_queue *),
10517 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10518 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10519 "3122 Fail allocate memory for "
10520 "fast-path RQ set hdr array\n");
10523 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10524 phba->cfg_nvmet_mrq,
10525 sizeof(struct lpfc_queue *),
10527 if (!phba->sli4_hba.nvmet_mrq_data) {
10528 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10529 "3124 Fail allocate memory for "
10530 "fast-path RQ set data array\n");
10536 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10538 /* Create HBA Event Queues (EQs) */
10539 for_each_present_cpu(cpu) {
10540 /* We only want to create 1 EQ per vector, even though
10541 * multiple CPUs might be using that vector. so only
10542 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10544 cpup = &phba->sli4_hba.cpu_map[cpu];
10545 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10548 /* Get a ptr to the Hardware Queue associated with this CPU */
10549 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10551 /* Allocate an EQ */
10552 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10553 phba->sli4_hba.eq_esize,
10554 phba->sli4_hba.eq_ecount, cpu);
10556 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10557 "0497 Failed allocate EQ (%d)\n",
10561 qdesc->qe_valid = 1;
10562 qdesc->hdwq = cpup->hdwq;
10563 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10564 qdesc->last_cpu = qdesc->chann;
10566 /* Save the allocated EQ in the Hardware Queue */
10567 qp->hba_eq = qdesc;
10569 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10570 list_add(&qdesc->cpu_list, &eqi->list);
10573 /* Now we need to populate the other Hardware Queues, that share
10574 * an IRQ vector, with the associated EQ ptr.
10576 for_each_present_cpu(cpu) {
10577 cpup = &phba->sli4_hba.cpu_map[cpu];
10579 /* Check for EQ already allocated in previous loop */
10580 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10583 /* Check for multiple CPUs per hdwq */
10584 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10588 /* We need to share an EQ for this hdwq */
10589 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10590 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10591 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10594 /* Allocate IO Path SLI4 CQ/WQs */
10595 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10596 if (lpfc_alloc_io_wq_cq(phba, idx))
10600 if (phba->nvmet_support) {
10601 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10602 cpu = lpfc_find_cpu_handle(phba, idx,
10603 LPFC_FIND_BY_HDWQ);
10604 qdesc = lpfc_sli4_queue_alloc(phba,
10605 LPFC_DEFAULT_PAGE_SIZE,
10606 phba->sli4_hba.cq_esize,
10607 phba->sli4_hba.cq_ecount,
10610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10611 "3142 Failed allocate NVME "
10612 "CQ Set (%d)\n", idx);
10615 qdesc->qe_valid = 1;
10617 qdesc->chann = cpu;
10618 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10623 * Create Slow Path Completion Queues (CQs)
10626 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10627 /* Create slow-path Mailbox Command Complete Queue */
10628 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10629 phba->sli4_hba.cq_esize,
10630 phba->sli4_hba.cq_ecount, cpu);
10632 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10633 "0500 Failed allocate slow-path mailbox CQ\n");
10636 qdesc->qe_valid = 1;
10637 phba->sli4_hba.mbx_cq = qdesc;
10639 /* Create slow-path ELS Complete Queue */
10640 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10641 phba->sli4_hba.cq_esize,
10642 phba->sli4_hba.cq_ecount, cpu);
10644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10645 "0501 Failed allocate slow-path ELS CQ\n");
10648 qdesc->qe_valid = 1;
10649 qdesc->chann = cpu;
10650 phba->sli4_hba.els_cq = qdesc;
10654 * Create Slow Path Work Queues (WQs)
10657 /* Create Mailbox Command Queue */
10659 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10660 phba->sli4_hba.mq_esize,
10661 phba->sli4_hba.mq_ecount, cpu);
10663 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10664 "0505 Failed allocate slow-path MQ\n");
10667 qdesc->chann = cpu;
10668 phba->sli4_hba.mbx_wq = qdesc;
10671 * Create ELS Work Queues
10674 /* Create slow-path ELS Work Queue */
10675 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10676 phba->sli4_hba.wq_esize,
10677 phba->sli4_hba.wq_ecount, cpu);
10679 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10680 "0504 Failed allocate slow-path ELS WQ\n");
10683 qdesc->chann = cpu;
10684 phba->sli4_hba.els_wq = qdesc;
10685 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10687 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10688 /* Create NVME LS Complete Queue */
10689 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10690 phba->sli4_hba.cq_esize,
10691 phba->sli4_hba.cq_ecount, cpu);
10693 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10694 "6079 Failed allocate NVME LS CQ\n");
10697 qdesc->chann = cpu;
10698 qdesc->qe_valid = 1;
10699 phba->sli4_hba.nvmels_cq = qdesc;
10701 /* Create NVME LS Work Queue */
10702 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10703 phba->sli4_hba.wq_esize,
10704 phba->sli4_hba.wq_ecount, cpu);
10706 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10707 "6080 Failed allocate NVME LS WQ\n");
10710 qdesc->chann = cpu;
10711 phba->sli4_hba.nvmels_wq = qdesc;
10712 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10716 * Create Receive Queue (RQ)
10719 /* Create Receive Queue for header */
10720 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10721 phba->sli4_hba.rq_esize,
10722 phba->sli4_hba.rq_ecount, cpu);
10724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10725 "0506 Failed allocate receive HRQ\n");
10728 phba->sli4_hba.hdr_rq = qdesc;
10730 /* Create Receive Queue for data */
10731 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10732 phba->sli4_hba.rq_esize,
10733 phba->sli4_hba.rq_ecount, cpu);
10735 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10736 "0507 Failed allocate receive DRQ\n");
10739 phba->sli4_hba.dat_rq = qdesc;
10741 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10742 phba->nvmet_support) {
10743 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10744 cpu = lpfc_find_cpu_handle(phba, idx,
10745 LPFC_FIND_BY_HDWQ);
10746 /* Create NVMET Receive Queue for header */
10747 qdesc = lpfc_sli4_queue_alloc(phba,
10748 LPFC_DEFAULT_PAGE_SIZE,
10749 phba->sli4_hba.rq_esize,
10750 LPFC_NVMET_RQE_DEF_COUNT,
10753 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10754 "3146 Failed allocate "
10759 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10761 /* Only needed for header of RQ pair */
10762 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10765 if (qdesc->rqbp == NULL) {
10766 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10767 "6131 Failed allocate "
10772 /* Put list in known state in case driver load fails. */
10773 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10775 /* Create NVMET Receive Queue for data */
10776 qdesc = lpfc_sli4_queue_alloc(phba,
10777 LPFC_DEFAULT_PAGE_SIZE,
10778 phba->sli4_hba.rq_esize,
10779 LPFC_NVMET_RQE_DEF_COUNT,
10782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10783 "3156 Failed allocate "
10788 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10792 /* Clear NVME stats */
10793 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10794 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10795 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10796 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10800 /* Clear SCSI stats */
10801 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10802 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10803 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10804 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10811 lpfc_sli4_queue_destroy(phba);
10816 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10819 lpfc_sli4_queue_free(*qp);
10825 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10832 for (idx = 0; idx < max; idx++)
10833 __lpfc_sli4_release_queue(&(*qs)[idx]);
10840 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10842 struct lpfc_sli4_hdw_queue *hdwq;
10843 struct lpfc_queue *eq;
10846 hdwq = phba->sli4_hba.hdwq;
10848 /* Loop thru all Hardware Queues */
10849 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10850 /* Free the CQ/WQ corresponding to the Hardware Queue */
10851 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10852 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10853 hdwq[idx].hba_eq = NULL;
10854 hdwq[idx].io_cq = NULL;
10855 hdwq[idx].io_wq = NULL;
10856 if (phba->cfg_xpsgl && !phba->nvmet_support)
10857 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10858 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10860 /* Loop thru all IRQ vectors */
10861 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10862 /* Free the EQ corresponding to the IRQ vector */
10863 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10864 lpfc_sli4_queue_free(eq);
10865 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10870 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10871 * @phba: pointer to lpfc hba data structure.
10873 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10878 * -ENOMEM - No available memory
10879 * -EIO - The mailbox failed to complete successfully.
10882 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10885 * Set FREE_INIT before beginning to free the queues.
10886 * Wait until the users of queues to acknowledge to
10887 * release queues by clearing FREE_WAIT.
10889 spin_lock_irq(&phba->hbalock);
10890 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10891 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10892 spin_unlock_irq(&phba->hbalock);
10894 spin_lock_irq(&phba->hbalock);
10896 spin_unlock_irq(&phba->hbalock);
10898 lpfc_sli4_cleanup_poll_list(phba);
10900 /* Release HBA eqs */
10901 if (phba->sli4_hba.hdwq)
10902 lpfc_sli4_release_hdwq(phba);
10904 if (phba->nvmet_support) {
10905 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10906 phba->cfg_nvmet_mrq);
10908 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10909 phba->cfg_nvmet_mrq);
10910 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10911 phba->cfg_nvmet_mrq);
10914 /* Release mailbox command work queue */
10915 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10917 /* Release ELS work queue */
10918 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10920 /* Release ELS work queue */
10921 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10923 /* Release unsolicited receive queue */
10924 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10925 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10927 /* Release ELS complete queue */
10928 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10930 /* Release NVME LS complete queue */
10931 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10933 /* Release mailbox command complete queue */
10934 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10936 /* Everything on this list has been freed */
10937 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10939 /* Done with freeing the queues */
10940 spin_lock_irq(&phba->hbalock);
10941 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10942 spin_unlock_irq(&phba->hbalock);
10946 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10948 struct lpfc_rqb *rqbp;
10949 struct lpfc_dmabuf *h_buf;
10950 struct rqb_dmabuf *rqb_buffer;
10953 while (!list_empty(&rqbp->rqb_buffer_list)) {
10954 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10955 struct lpfc_dmabuf, list);
10957 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10958 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10959 rqbp->buffer_count--;
10965 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10966 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10967 int qidx, uint32_t qtype)
10969 struct lpfc_sli_ring *pring;
10972 if (!eq || !cq || !wq) {
10973 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10974 "6085 Fast-path %s (%d) not allocated\n",
10975 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10979 /* create the Cq first */
10980 rc = lpfc_cq_create(phba, cq, eq,
10981 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10983 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10984 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10985 qidx, (uint32_t)rc);
10989 if (qtype != LPFC_MBOX) {
10990 /* Setup cq_map for fast lookup */
10992 *cq_map = cq->queue_id;
10994 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10995 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10996 qidx, cq->queue_id, qidx, eq->queue_id);
10998 /* create the wq */
10999 rc = lpfc_wq_create(phba, wq, cq, qtype);
11001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11002 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
11003 qidx, (uint32_t)rc);
11004 /* no need to tear down cq - caller will do so */
11008 /* Bind this CQ/WQ to the NVME ring */
11010 pring->sli.sli4.wqp = (void *)wq;
11013 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11014 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
11015 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
11017 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
11019 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11020 "0539 Failed setup of slow-path MQ: "
11021 "rc = 0x%x\n", rc);
11022 /* no need to tear down cq - caller will do so */
11026 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11027 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11028 phba->sli4_hba.mbx_wq->queue_id,
11029 phba->sli4_hba.mbx_cq->queue_id);
11036 * lpfc_setup_cq_lookup - Setup the CQ lookup table
11037 * @phba: pointer to lpfc hba data structure.
11039 * This routine will populate the cq_lookup table by all
11040 * available CQ queue_id's.
11043 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11045 struct lpfc_queue *eq, *childq;
11048 memset(phba->sli4_hba.cq_lookup, 0,
11049 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11050 /* Loop thru all IRQ vectors */
11051 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11052 /* Get the EQ corresponding to the IRQ vector */
11053 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11056 /* Loop through all CQs associated with that EQ */
11057 list_for_each_entry(childq, &eq->child_list, list) {
11058 if (childq->queue_id > phba->sli4_hba.cq_max)
11060 if (childq->subtype == LPFC_IO)
11061 phba->sli4_hba.cq_lookup[childq->queue_id] =
11068 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11069 * @phba: pointer to lpfc hba data structure.
11071 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
11076 * -ENOMEM - No available memory
11077 * -EIO - The mailbox failed to complete successfully.
11080 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11082 uint32_t shdr_status, shdr_add_status;
11083 union lpfc_sli4_cfg_shdr *shdr;
11084 struct lpfc_vector_map_info *cpup;
11085 struct lpfc_sli4_hdw_queue *qp;
11086 LPFC_MBOXQ_t *mboxq;
11088 uint32_t length, usdelay;
11091 /* Check for dual-ULP support */
11092 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11094 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11095 "3249 Unable to allocate memory for "
11096 "QUERY_FW_CFG mailbox command\n");
11099 length = (sizeof(struct lpfc_mbx_query_fw_config) -
11100 sizeof(struct lpfc_sli4_cfg_mhdr));
11101 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11102 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
11103 length, LPFC_SLI4_MBX_EMBED);
11105 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11107 shdr = (union lpfc_sli4_cfg_shdr *)
11108 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11109 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11110 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11111 if (shdr_status || shdr_add_status || rc) {
11112 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11113 "3250 QUERY_FW_CFG mailbox failed with status "
11114 "x%x add_status x%x, mbx status x%x\n",
11115 shdr_status, shdr_add_status, rc);
11116 mempool_free(mboxq, phba->mbox_mem_pool);
11121 phba->sli4_hba.fw_func_mode =
11122 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11123 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11124 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11125 phba->sli4_hba.physical_port =
11126 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11127 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11128 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11129 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11130 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11132 mempool_free(mboxq, phba->mbox_mem_pool);
11135 * Set up HBA Event Queues (EQs)
11137 qp = phba->sli4_hba.hdwq;
11139 /* Set up HBA event queue */
11141 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11142 "3147 Fast-path EQs not allocated\n");
11147 /* Loop thru all IRQ vectors */
11148 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11149 /* Create HBA Event Queues (EQs) in order */
11150 for_each_present_cpu(cpu) {
11151 cpup = &phba->sli4_hba.cpu_map[cpu];
11153 /* Look for the CPU thats using that vector with
11154 * LPFC_CPU_FIRST_IRQ set.
11156 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11158 if (qidx != cpup->eq)
11161 /* Create an EQ for that vector */
11162 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11163 phba->cfg_fcp_imax);
11165 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11166 "0523 Failed setup of fast-path"
11167 " EQ (%d), rc = 0x%x\n",
11168 cpup->eq, (uint32_t)rc);
11172 /* Save the EQ for that vector in the hba_eq_hdl */
11173 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11174 qp[cpup->hdwq].hba_eq;
11176 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11177 "2584 HBA EQ setup: queue[%d]-id=%d\n",
11179 qp[cpup->hdwq].hba_eq->queue_id);
11183 /* Loop thru all Hardware Queues */
11184 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11185 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11186 cpup = &phba->sli4_hba.cpu_map[cpu];
11188 /* Create the CQ/WQ corresponding to the Hardware Queue */
11189 rc = lpfc_create_wq_cq(phba,
11190 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11193 &phba->sli4_hba.hdwq[qidx].io_cq_map,
11197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11198 "0535 Failed to setup fastpath "
11199 "IO WQ/CQ (%d), rc = 0x%x\n",
11200 qidx, (uint32_t)rc);
11206 * Set up Slow Path Complete Queues (CQs)
11209 /* Set up slow-path MBOX CQ/MQ */
11211 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11212 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11213 "0528 %s not allocated\n",
11214 phba->sli4_hba.mbx_cq ?
11215 "Mailbox WQ" : "Mailbox CQ");
11220 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11221 phba->sli4_hba.mbx_cq,
11222 phba->sli4_hba.mbx_wq,
11223 NULL, 0, LPFC_MBOX);
11225 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11226 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11230 if (phba->nvmet_support) {
11231 if (!phba->sli4_hba.nvmet_cqset) {
11232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11233 "3165 Fast-path NVME CQ Set "
11234 "array not allocated\n");
11238 if (phba->cfg_nvmet_mrq > 1) {
11239 rc = lpfc_cq_create_set(phba,
11240 phba->sli4_hba.nvmet_cqset,
11242 LPFC_WCQ, LPFC_NVMET);
11244 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11245 "3164 Failed setup of NVME CQ "
11246 "Set, rc = 0x%x\n",
11251 /* Set up NVMET Receive Complete Queue */
11252 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11254 LPFC_WCQ, LPFC_NVMET);
11256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11257 "6089 Failed setup NVMET CQ: "
11258 "rc = 0x%x\n", (uint32_t)rc);
11261 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11263 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11264 "6090 NVMET CQ setup: cq-id=%d, "
11265 "parent eq-id=%d\n",
11266 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11267 qp[0].hba_eq->queue_id);
11271 /* Set up slow-path ELS WQ/CQ */
11272 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11274 "0530 ELS %s not allocated\n",
11275 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11279 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11280 phba->sli4_hba.els_cq,
11281 phba->sli4_hba.els_wq,
11282 NULL, 0, LPFC_ELS);
11284 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11285 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11289 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11290 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11291 phba->sli4_hba.els_wq->queue_id,
11292 phba->sli4_hba.els_cq->queue_id);
11294 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11295 /* Set up NVME LS Complete Queue */
11296 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11297 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11298 "6091 LS %s not allocated\n",
11299 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11303 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11304 phba->sli4_hba.nvmels_cq,
11305 phba->sli4_hba.nvmels_wq,
11306 NULL, 0, LPFC_NVME_LS);
11308 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11309 "0526 Failed setup of NVVME LS WQ/CQ: "
11310 "rc = 0x%x\n", (uint32_t)rc);
11314 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11315 "6096 ELS WQ setup: wq-id=%d, "
11316 "parent cq-id=%d\n",
11317 phba->sli4_hba.nvmels_wq->queue_id,
11318 phba->sli4_hba.nvmels_cq->queue_id);
11322 * Create NVMET Receive Queue (RQ)
11324 if (phba->nvmet_support) {
11325 if ((!phba->sli4_hba.nvmet_cqset) ||
11326 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11327 (!phba->sli4_hba.nvmet_mrq_data)) {
11328 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11329 "6130 MRQ CQ Queues not "
11334 if (phba->cfg_nvmet_mrq > 1) {
11335 rc = lpfc_mrq_create(phba,
11336 phba->sli4_hba.nvmet_mrq_hdr,
11337 phba->sli4_hba.nvmet_mrq_data,
11338 phba->sli4_hba.nvmet_cqset,
11341 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11342 "6098 Failed setup of NVMET "
11343 "MRQ: rc = 0x%x\n",
11349 rc = lpfc_rq_create(phba,
11350 phba->sli4_hba.nvmet_mrq_hdr[0],
11351 phba->sli4_hba.nvmet_mrq_data[0],
11352 phba->sli4_hba.nvmet_cqset[0],
11355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11356 "6057 Failed setup of NVMET "
11357 "Receive Queue: rc = 0x%x\n",
11363 phba, KERN_INFO, LOG_INIT,
11364 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11365 "dat-rq-id=%d parent cq-id=%d\n",
11366 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11367 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11368 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11373 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11374 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11375 "0540 Receive Queue not allocated\n");
11380 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11381 phba->sli4_hba.els_cq, LPFC_USOL);
11383 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11384 "0541 Failed setup of Receive Queue: "
11385 "rc = 0x%x\n", (uint32_t)rc);
11389 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11390 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11391 "parent cq-id=%d\n",
11392 phba->sli4_hba.hdr_rq->queue_id,
11393 phba->sli4_hba.dat_rq->queue_id,
11394 phba->sli4_hba.els_cq->queue_id);
11396 if (phba->cfg_fcp_imax)
11397 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11401 for (qidx = 0; qidx < phba->cfg_irq_chann;
11402 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11403 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11406 if (phba->sli4_hba.cq_max) {
11407 kfree(phba->sli4_hba.cq_lookup);
11408 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11409 sizeof(struct lpfc_queue *), GFP_KERNEL);
11410 if (!phba->sli4_hba.cq_lookup) {
11411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11412 "0549 Failed setup of CQ Lookup table: "
11413 "size 0x%x\n", phba->sli4_hba.cq_max);
11417 lpfc_setup_cq_lookup(phba);
11422 lpfc_sli4_queue_unset(phba);
11428 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11429 * @phba: pointer to lpfc hba data structure.
11431 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11436 * -ENOMEM - No available memory
11437 * -EIO - The mailbox failed to complete successfully.
11440 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11442 struct lpfc_sli4_hdw_queue *qp;
11443 struct lpfc_queue *eq;
11446 /* Unset mailbox command work queue */
11447 if (phba->sli4_hba.mbx_wq)
11448 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11450 /* Unset NVME LS work queue */
11451 if (phba->sli4_hba.nvmels_wq)
11452 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11454 /* Unset ELS work queue */
11455 if (phba->sli4_hba.els_wq)
11456 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11458 /* Unset unsolicited receive queue */
11459 if (phba->sli4_hba.hdr_rq)
11460 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11461 phba->sli4_hba.dat_rq);
11463 /* Unset mailbox command complete queue */
11464 if (phba->sli4_hba.mbx_cq)
11465 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11467 /* Unset ELS complete queue */
11468 if (phba->sli4_hba.els_cq)
11469 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11471 /* Unset NVME LS complete queue */
11472 if (phba->sli4_hba.nvmels_cq)
11473 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11475 if (phba->nvmet_support) {
11476 /* Unset NVMET MRQ queue */
11477 if (phba->sli4_hba.nvmet_mrq_hdr) {
11478 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11481 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11482 phba->sli4_hba.nvmet_mrq_data[qidx]);
11485 /* Unset NVMET CQ Set complete queue */
11486 if (phba->sli4_hba.nvmet_cqset) {
11487 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11489 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11493 /* Unset fast-path SLI4 queues */
11494 if (phba->sli4_hba.hdwq) {
11495 /* Loop thru all Hardware Queues */
11496 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11497 /* Destroy the CQ/WQ corresponding to Hardware Queue */
11498 qp = &phba->sli4_hba.hdwq[qidx];
11499 lpfc_wq_destroy(phba, qp->io_wq);
11500 lpfc_cq_destroy(phba, qp->io_cq);
11502 /* Loop thru all IRQ vectors */
11503 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11504 /* Destroy the EQ corresponding to the IRQ vector */
11505 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11506 lpfc_eq_destroy(phba, eq);
11510 kfree(phba->sli4_hba.cq_lookup);
11511 phba->sli4_hba.cq_lookup = NULL;
11512 phba->sli4_hba.cq_max = 0;
11516 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11517 * @phba: pointer to lpfc hba data structure.
11519 * This routine is invoked to allocate and set up a pool of completion queue
11520 * events. The body of the completion queue event is a completion queue entry
11521 * CQE. For now, this pool is used for the interrupt service routine to queue
11522 * the following HBA completion queue events for the worker thread to process:
11523 * - Mailbox asynchronous events
11524 * - Receive queue completion unsolicited events
11525 * Later, this can be used for all the slow-path events.
11529 * -ENOMEM - No available memory
11532 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11534 struct lpfc_cq_event *cq_event;
11537 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11538 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11540 goto out_pool_create_fail;
11541 list_add_tail(&cq_event->list,
11542 &phba->sli4_hba.sp_cqe_event_pool);
11546 out_pool_create_fail:
11547 lpfc_sli4_cq_event_pool_destroy(phba);
11552 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11553 * @phba: pointer to lpfc hba data structure.
11555 * This routine is invoked to free the pool of completion queue events at
11556 * driver unload time. Note that, it is the responsibility of the driver
11557 * cleanup routine to free all the outstanding completion-queue events
11558 * allocated from this pool back into the pool before invoking this routine
11559 * to destroy the pool.
11562 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11564 struct lpfc_cq_event *cq_event, *next_cq_event;
11566 list_for_each_entry_safe(cq_event, next_cq_event,
11567 &phba->sli4_hba.sp_cqe_event_pool, list) {
11568 list_del(&cq_event->list);
11574 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11575 * @phba: pointer to lpfc hba data structure.
11577 * This routine is the lock free version of the API invoked to allocate a
11578 * completion-queue event from the free pool.
11580 * Return: Pointer to the newly allocated completion-queue event if successful
11583 struct lpfc_cq_event *
11584 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11586 struct lpfc_cq_event *cq_event = NULL;
11588 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11589 struct lpfc_cq_event, list);
11594 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11595 * @phba: pointer to lpfc hba data structure.
11597 * This routine is the lock version of the API invoked to allocate a
11598 * completion-queue event from the free pool.
11600 * Return: Pointer to the newly allocated completion-queue event if successful
11603 struct lpfc_cq_event *
11604 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11606 struct lpfc_cq_event *cq_event;
11607 unsigned long iflags;
11609 spin_lock_irqsave(&phba->hbalock, iflags);
11610 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11611 spin_unlock_irqrestore(&phba->hbalock, iflags);
11616 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11617 * @phba: pointer to lpfc hba data structure.
11618 * @cq_event: pointer to the completion queue event to be freed.
11620 * This routine is the lock free version of the API invoked to release a
11621 * completion-queue event back into the free pool.
11624 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11625 struct lpfc_cq_event *cq_event)
11627 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11631 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11632 * @phba: pointer to lpfc hba data structure.
11633 * @cq_event: pointer to the completion queue event to be freed.
11635 * This routine is the lock version of the API invoked to release a
11636 * completion-queue event back into the free pool.
11639 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11640 struct lpfc_cq_event *cq_event)
11642 unsigned long iflags;
11643 spin_lock_irqsave(&phba->hbalock, iflags);
11644 __lpfc_sli4_cq_event_release(phba, cq_event);
11645 spin_unlock_irqrestore(&phba->hbalock, iflags);
11649 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11650 * @phba: pointer to lpfc hba data structure.
11652 * This routine is to free all the pending completion-queue events to the
11653 * back into the free pool for device reset.
11656 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11658 LIST_HEAD(cq_event_list);
11659 struct lpfc_cq_event *cq_event;
11660 unsigned long iflags;
11662 /* Retrieve all the pending WCQEs from pending WCQE lists */
11664 /* Pending ELS XRI abort events */
11665 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11666 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11668 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11670 /* Pending asynnc events */
11671 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11672 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11674 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11676 while (!list_empty(&cq_event_list)) {
11677 list_remove_head(&cq_event_list, cq_event,
11678 struct lpfc_cq_event, list);
11679 lpfc_sli4_cq_event_release(phba, cq_event);
11684 * lpfc_pci_function_reset - Reset pci function.
11685 * @phba: pointer to lpfc hba data structure.
11687 * This routine is invoked to request a PCI function reset. It will destroys
11688 * all resources assigned to the PCI function which originates this request.
11692 * -ENOMEM - No available memory
11693 * -EIO - The mailbox failed to complete successfully.
11696 lpfc_pci_function_reset(struct lpfc_hba *phba)
11698 LPFC_MBOXQ_t *mboxq;
11699 uint32_t rc = 0, if_type;
11700 uint32_t shdr_status, shdr_add_status;
11702 uint32_t port_reset = 0;
11703 union lpfc_sli4_cfg_shdr *shdr;
11704 struct lpfc_register reg_data;
11707 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11709 case LPFC_SLI_INTF_IF_TYPE_0:
11710 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11713 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11714 "0494 Unable to allocate memory for "
11715 "issuing SLI_FUNCTION_RESET mailbox "
11720 /* Setup PCI function reset mailbox-ioctl command */
11721 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11722 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11723 LPFC_SLI4_MBX_EMBED);
11724 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11725 shdr = (union lpfc_sli4_cfg_shdr *)
11726 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11727 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11728 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11730 mempool_free(mboxq, phba->mbox_mem_pool);
11731 if (shdr_status || shdr_add_status || rc) {
11732 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11733 "0495 SLI_FUNCTION_RESET mailbox "
11734 "failed with status x%x add_status x%x,"
11735 " mbx status x%x\n",
11736 shdr_status, shdr_add_status, rc);
11740 case LPFC_SLI_INTF_IF_TYPE_2:
11741 case LPFC_SLI_INTF_IF_TYPE_6:
11744 * Poll the Port Status Register and wait for RDY for
11745 * up to 30 seconds. If the port doesn't respond, treat
11748 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11749 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11750 STATUSregaddr, ®_data.word0)) {
11754 if (bf_get(lpfc_sliport_status_rdy, ®_data))
11759 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
11760 phba->work_status[0] = readl(
11761 phba->sli4_hba.u.if_type2.ERR1regaddr);
11762 phba->work_status[1] = readl(
11763 phba->sli4_hba.u.if_type2.ERR2regaddr);
11764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11765 "2890 Port not ready, port status reg "
11766 "0x%x error 1=0x%x, error 2=0x%x\n",
11768 phba->work_status[0],
11769 phba->work_status[1]);
11774 if (bf_get(lpfc_sliport_status_pldv, ®_data))
11775 lpfc_pldv_detect = true;
11779 * Reset the port now
11781 reg_data.word0 = 0;
11782 bf_set(lpfc_sliport_ctrl_end, ®_data,
11783 LPFC_SLIPORT_LITTLE_ENDIAN);
11784 bf_set(lpfc_sliport_ctrl_ip, ®_data,
11785 LPFC_SLIPORT_INIT_PORT);
11786 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11789 pci_read_config_word(phba->pcidev,
11790 PCI_DEVICE_ID, &devid);
11795 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
11801 case LPFC_SLI_INTF_IF_TYPE_1:
11807 /* Catch the not-ready port failure after a port reset. */
11809 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11810 "3317 HBA not functional: IP Reset Failed "
11811 "try: echo fw_reset > board_mode\n");
11819 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11820 * @phba: pointer to lpfc hba data structure.
11822 * This routine is invoked to set up the PCI device memory space for device
11823 * with SLI-4 interface spec.
11827 * other values - error
11830 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11832 struct pci_dev *pdev = phba->pcidev;
11833 unsigned long bar0map_len, bar1map_len, bar2map_len;
11840 /* Set the device DMA mask size */
11841 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11843 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11848 * The BARs and register set definitions and offset locations are
11849 * dependent on the if_type.
11851 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11852 &phba->sli4_hba.sli_intf.word0)) {
11856 /* There is no SLI3 failback for SLI4 devices. */
11857 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11858 LPFC_SLI_INTF_VALID) {
11859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11860 "2894 SLI_INTF reg contents invalid "
11861 "sli_intf reg 0x%x\n",
11862 phba->sli4_hba.sli_intf.word0);
11866 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11868 * Get the bus address of SLI4 device Bar regions and the
11869 * number of bytes required by each mapping. The mapping of the
11870 * particular PCI BARs regions is dependent on the type of
11873 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11874 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11875 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11878 * Map SLI4 PCI Config Space Register base to a kernel virtual
11881 phba->sli4_hba.conf_regs_memmap_p =
11882 ioremap(phba->pci_bar0_map, bar0map_len);
11883 if (!phba->sli4_hba.conf_regs_memmap_p) {
11884 dev_printk(KERN_ERR, &pdev->dev,
11885 "ioremap failed for SLI4 PCI config "
11889 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11890 /* Set up BAR0 PCI config space register memory map */
11891 lpfc_sli4_bar0_register_memmap(phba, if_type);
11893 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11894 bar0map_len = pci_resource_len(pdev, 1);
11895 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11896 dev_printk(KERN_ERR, &pdev->dev,
11897 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11900 phba->sli4_hba.conf_regs_memmap_p =
11901 ioremap(phba->pci_bar0_map, bar0map_len);
11902 if (!phba->sli4_hba.conf_regs_memmap_p) {
11903 dev_printk(KERN_ERR, &pdev->dev,
11904 "ioremap failed for SLI4 PCI config "
11908 lpfc_sli4_bar0_register_memmap(phba, if_type);
11911 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11912 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11914 * Map SLI4 if type 0 HBA Control Register base to a
11915 * kernel virtual address and setup the registers.
11917 phba->pci_bar1_map = pci_resource_start(pdev,
11919 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11920 phba->sli4_hba.ctrl_regs_memmap_p =
11921 ioremap(phba->pci_bar1_map,
11923 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11924 dev_err(&pdev->dev,
11925 "ioremap failed for SLI4 HBA "
11926 "control registers.\n");
11928 goto out_iounmap_conf;
11930 phba->pci_bar2_memmap_p =
11931 phba->sli4_hba.ctrl_regs_memmap_p;
11932 lpfc_sli4_bar1_register_memmap(phba, if_type);
11935 goto out_iounmap_conf;
11939 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11940 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11942 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11943 * virtual address and setup the registers.
11945 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11946 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11947 phba->sli4_hba.drbl_regs_memmap_p =
11948 ioremap(phba->pci_bar1_map, bar1map_len);
11949 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11950 dev_err(&pdev->dev,
11951 "ioremap failed for SLI4 HBA doorbell registers.\n");
11953 goto out_iounmap_conf;
11955 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11956 lpfc_sli4_bar1_register_memmap(phba, if_type);
11959 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11960 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11962 * Map SLI4 if type 0 HBA Doorbell Register base to
11963 * a kernel virtual address and setup the registers.
11965 phba->pci_bar2_map = pci_resource_start(pdev,
11967 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11968 phba->sli4_hba.drbl_regs_memmap_p =
11969 ioremap(phba->pci_bar2_map,
11971 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11972 dev_err(&pdev->dev,
11973 "ioremap failed for SLI4 HBA"
11974 " doorbell registers.\n");
11976 goto out_iounmap_ctrl;
11978 phba->pci_bar4_memmap_p =
11979 phba->sli4_hba.drbl_regs_memmap_p;
11980 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11982 goto out_iounmap_all;
11985 goto out_iounmap_ctrl;
11989 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11990 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11992 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11993 * virtual address and setup the registers.
11995 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11996 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11997 phba->sli4_hba.dpp_regs_memmap_p =
11998 ioremap(phba->pci_bar2_map, bar2map_len);
11999 if (!phba->sli4_hba.dpp_regs_memmap_p) {
12000 dev_err(&pdev->dev,
12001 "ioremap failed for SLI4 HBA dpp registers.\n");
12003 goto out_iounmap_all;
12005 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
12008 /* Set up the EQ/CQ register handeling functions now */
12010 case LPFC_SLI_INTF_IF_TYPE_0:
12011 case LPFC_SLI_INTF_IF_TYPE_2:
12012 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
12013 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
12014 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
12016 case LPFC_SLI_INTF_IF_TYPE_6:
12017 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
12018 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
12019 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12028 if (phba->sli4_hba.drbl_regs_memmap_p)
12029 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12031 if (phba->sli4_hba.ctrl_regs_memmap_p)
12032 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12034 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12040 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12041 * @phba: pointer to lpfc hba data structure.
12043 * This routine is invoked to unset the PCI device memory space for device
12044 * with SLI-4 interface spec.
12047 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12050 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12053 case LPFC_SLI_INTF_IF_TYPE_0:
12054 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12055 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12056 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12058 case LPFC_SLI_INTF_IF_TYPE_2:
12059 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12061 case LPFC_SLI_INTF_IF_TYPE_6:
12062 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12063 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12064 if (phba->sli4_hba.dpp_regs_memmap_p)
12065 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12067 case LPFC_SLI_INTF_IF_TYPE_1:
12070 dev_printk(KERN_ERR, &phba->pcidev->dev,
12071 "FATAL - unsupported SLI4 interface type - %d\n",
12078 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12079 * @phba: pointer to lpfc hba data structure.
12081 * This routine is invoked to enable the MSI-X interrupt vectors to device
12082 * with SLI-3 interface specs.
12086 * other values - error
12089 lpfc_sli_enable_msix(struct lpfc_hba *phba)
12094 /* Set up MSI-X multi-message vectors */
12095 rc = pci_alloc_irq_vectors(phba->pcidev,
12096 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
12098 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12099 "0420 PCI enable MSI-X failed (%d)\n", rc);
12104 * Assign MSI-X vectors to interrupt handlers
12107 /* vector-0 is associated to slow-path handler */
12108 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12109 &lpfc_sli_sp_intr_handler, 0,
12110 LPFC_SP_DRIVER_HANDLER_NAME, phba);
12112 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12113 "0421 MSI-X slow-path request_irq failed "
12118 /* vector-1 is associated to fast-path handler */
12119 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12120 &lpfc_sli_fp_intr_handler, 0,
12121 LPFC_FP_DRIVER_HANDLER_NAME, phba);
12124 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12125 "0429 MSI-X fast-path request_irq failed "
12131 * Configure HBA MSI-X attention conditions to messages
12133 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12137 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12138 "0474 Unable to allocate memory for issuing "
12139 "MBOX_CONFIG_MSI command\n");
12142 rc = lpfc_config_msi(phba, pmb);
12145 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12146 if (rc != MBX_SUCCESS) {
12147 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12148 "0351 Config MSI mailbox command failed, "
12149 "mbxCmd x%x, mbxStatus x%x\n",
12150 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12154 /* Free memory allocated for mailbox command */
12155 mempool_free(pmb, phba->mbox_mem_pool);
12159 /* Free memory allocated for mailbox command */
12160 mempool_free(pmb, phba->mbox_mem_pool);
12163 /* free the irq already requested */
12164 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12167 /* free the irq already requested */
12168 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12171 /* Unconfigure MSI-X capability structure */
12172 pci_free_irq_vectors(phba->pcidev);
12179 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12180 * @phba: pointer to lpfc hba data structure.
12182 * This routine is invoked to enable the MSI interrupt mode to device with
12183 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12184 * enable the MSI vector. The device driver is responsible for calling the
12185 * request_irq() to register MSI vector with a interrupt the handler, which
12186 * is done in this function.
12190 * other values - error
12193 lpfc_sli_enable_msi(struct lpfc_hba *phba)
12197 rc = pci_enable_msi(phba->pcidev);
12199 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12200 "0012 PCI enable MSI mode success.\n");
12202 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12203 "0471 PCI enable MSI mode failed (%d)\n", rc);
12207 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12208 0, LPFC_DRIVER_NAME, phba);
12210 pci_disable_msi(phba->pcidev);
12211 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12212 "0478 MSI request_irq failed (%d)\n", rc);
12218 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12219 * @phba: pointer to lpfc hba data structure.
12220 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12222 * This routine is invoked to enable device interrupt and associate driver's
12223 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12224 * spec. Depends on the interrupt mode configured to the driver, the driver
12225 * will try to fallback from the configured interrupt mode to an interrupt
12226 * mode which is supported by the platform, kernel, and device in the order
12228 * MSI-X -> MSI -> IRQ.
12232 * other values - error
12235 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12237 uint32_t intr_mode = LPFC_INTR_ERROR;
12240 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12241 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12244 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12246 if (cfg_mode == 2) {
12247 /* Now, try to enable MSI-X interrupt mode */
12248 retval = lpfc_sli_enable_msix(phba);
12250 /* Indicate initialization to MSI-X mode */
12251 phba->intr_type = MSIX;
12256 /* Fallback to MSI if MSI-X initialization failed */
12257 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12258 retval = lpfc_sli_enable_msi(phba);
12260 /* Indicate initialization to MSI mode */
12261 phba->intr_type = MSI;
12266 /* Fallback to INTx if both MSI-X/MSI initalization failed */
12267 if (phba->intr_type == NONE) {
12268 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12269 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12271 /* Indicate initialization to INTx mode */
12272 phba->intr_type = INTx;
12280 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12281 * @phba: pointer to lpfc hba data structure.
12283 * This routine is invoked to disable device interrupt and disassociate the
12284 * driver's interrupt handler(s) from interrupt vector(s) to device with
12285 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12286 * release the interrupt vector(s) for the message signaled interrupt.
12289 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12293 if (phba->intr_type == MSIX)
12294 nr_irqs = LPFC_MSIX_VECTORS;
12298 for (i = 0; i < nr_irqs; i++)
12299 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12300 pci_free_irq_vectors(phba->pcidev);
12302 /* Reset interrupt management states */
12303 phba->intr_type = NONE;
12304 phba->sli.slistat.sli_intr = 0;
12308 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12309 * @phba: pointer to lpfc hba data structure.
12310 * @id: EQ vector index or Hardware Queue index
12311 * @match: LPFC_FIND_BY_EQ = match by EQ
12312 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
12313 * Return the CPU that matches the selection criteria
12316 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12318 struct lpfc_vector_map_info *cpup;
12321 /* Loop through all CPUs */
12322 for_each_present_cpu(cpu) {
12323 cpup = &phba->sli4_hba.cpu_map[cpu];
12325 /* If we are matching by EQ, there may be multiple CPUs using
12326 * using the same vector, so select the one with
12327 * LPFC_CPU_FIRST_IRQ set.
12329 if ((match == LPFC_FIND_BY_EQ) &&
12330 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12334 /* If matching by HDWQ, select the first CPU that matches */
12335 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12343 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12344 * @phba: pointer to lpfc hba data structure.
12345 * @cpu: CPU map index
12346 * @phys_id: CPU package physical id
12347 * @core_id: CPU core id
12350 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12351 uint16_t phys_id, uint16_t core_id)
12353 struct lpfc_vector_map_info *cpup;
12356 for_each_present_cpu(idx) {
12357 cpup = &phba->sli4_hba.cpu_map[idx];
12358 /* Does the cpup match the one we are looking for */
12359 if ((cpup->phys_id == phys_id) &&
12360 (cpup->core_id == core_id) &&
12369 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12370 * @phba: pointer to lpfc hba data structure.
12371 * @eqidx: index for eq and irq vector
12372 * @flag: flags to set for vector_map structure
12373 * @cpu: cpu used to index vector_map structure
12375 * The routine assigns eq info into vector_map structure
12378 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12381 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12382 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12385 cpup->flag |= flag;
12387 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12388 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12389 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12393 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12394 * @phba: pointer to lpfc hba data structure.
12396 * The routine initializes the cpu_map array structure
12399 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12401 struct lpfc_vector_map_info *cpup;
12402 struct lpfc_eq_intr_info *eqi;
12405 for_each_possible_cpu(cpu) {
12406 cpup = &phba->sli4_hba.cpu_map[cpu];
12407 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12408 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12409 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12410 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12412 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12413 INIT_LIST_HEAD(&eqi->list);
12419 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12420 * @phba: pointer to lpfc hba data structure.
12422 * The routine initializes the hba_eq_hdl array structure
12425 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12427 struct lpfc_hba_eq_hdl *eqhdl;
12430 for (i = 0; i < phba->cfg_irq_chann; i++) {
12431 eqhdl = lpfc_get_eq_hdl(i);
12432 eqhdl->irq = LPFC_IRQ_EMPTY;
12433 eqhdl->phba = phba;
12438 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12439 * @phba: pointer to lpfc hba data structure.
12440 * @vectors: number of msix vectors allocated.
12442 * The routine will figure out the CPU affinity assignment for every
12443 * MSI-X vector allocated for the HBA.
12444 * In addition, the CPU to IO channel mapping will be calculated
12445 * and the phba->sli4_hba.cpu_map array will reflect this.
12448 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12450 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12451 int max_phys_id, min_phys_id;
12452 int max_core_id, min_core_id;
12453 struct lpfc_vector_map_info *cpup;
12454 struct lpfc_vector_map_info *new_cpup;
12455 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12456 struct lpfc_hdwq_stat *c_stat;
12460 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12462 min_core_id = LPFC_VECTOR_MAP_EMPTY;
12464 /* Update CPU map with physical id and core id of each CPU */
12465 for_each_present_cpu(cpu) {
12466 cpup = &phba->sli4_hba.cpu_map[cpu];
12468 cpup->phys_id = topology_physical_package_id(cpu);
12469 cpup->core_id = topology_core_id(cpu);
12470 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12471 cpup->flag |= LPFC_CPU_MAP_HYPER;
12473 /* No distinction between CPUs for other platforms */
12475 cpup->core_id = cpu;
12478 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12479 "3328 CPU %d physid %d coreid %d flag x%x\n",
12480 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12482 if (cpup->phys_id > max_phys_id)
12483 max_phys_id = cpup->phys_id;
12484 if (cpup->phys_id < min_phys_id)
12485 min_phys_id = cpup->phys_id;
12487 if (cpup->core_id > max_core_id)
12488 max_core_id = cpup->core_id;
12489 if (cpup->core_id < min_core_id)
12490 min_core_id = cpup->core_id;
12493 /* After looking at each irq vector assigned to this pcidev, its
12494 * possible to see that not ALL CPUs have been accounted for.
12495 * Next we will set any unassigned (unaffinitized) cpu map
12496 * entries to a IRQ on the same phys_id.
12498 first_cpu = cpumask_first(cpu_present_mask);
12499 start_cpu = first_cpu;
12501 for_each_present_cpu(cpu) {
12502 cpup = &phba->sli4_hba.cpu_map[cpu];
12504 /* Is this CPU entry unassigned */
12505 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12506 /* Mark CPU as IRQ not assigned by the kernel */
12507 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12509 /* If so, find a new_cpup that is on the SAME
12510 * phys_id as cpup. start_cpu will start where we
12511 * left off so all unassigned entries don't get assgined
12512 * the IRQ of the first entry.
12514 new_cpu = start_cpu;
12515 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12516 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12517 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12518 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12519 (new_cpup->phys_id == cpup->phys_id))
12521 new_cpu = lpfc_next_present_cpu(new_cpu);
12523 /* At this point, we leave the CPU as unassigned */
12526 /* We found a matching phys_id, so copy the IRQ info */
12527 cpup->eq = new_cpup->eq;
12529 /* Bump start_cpu to the next slot to minmize the
12530 * chance of having multiple unassigned CPU entries
12531 * selecting the same IRQ.
12533 start_cpu = lpfc_next_present_cpu(new_cpu);
12535 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12536 "3337 Set Affinity: CPU %d "
12537 "eq %d from peer cpu %d same "
12539 cpu, cpup->eq, new_cpu,
12544 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12545 start_cpu = first_cpu;
12547 for_each_present_cpu(cpu) {
12548 cpup = &phba->sli4_hba.cpu_map[cpu];
12550 /* Is this entry unassigned */
12551 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12552 /* Mark it as IRQ not assigned by the kernel */
12553 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12555 /* If so, find a new_cpup thats on ANY phys_id
12556 * as the cpup. start_cpu will start where we
12557 * left off so all unassigned entries don't get
12558 * assigned the IRQ of the first entry.
12560 new_cpu = start_cpu;
12561 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12562 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12563 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12564 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12566 new_cpu = lpfc_next_present_cpu(new_cpu);
12568 /* We should never leave an entry unassigned */
12569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12570 "3339 Set Affinity: CPU %d "
12571 "eq %d UNASSIGNED\n",
12572 cpup->hdwq, cpup->eq);
12575 /* We found an available entry, copy the IRQ info */
12576 cpup->eq = new_cpup->eq;
12578 /* Bump start_cpu to the next slot to minmize the
12579 * chance of having multiple unassigned CPU entries
12580 * selecting the same IRQ.
12582 start_cpu = lpfc_next_present_cpu(new_cpu);
12584 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12585 "3338 Set Affinity: CPU %d "
12586 "eq %d from peer cpu %d (%d/%d)\n",
12587 cpu, cpup->eq, new_cpu,
12588 new_cpup->phys_id, new_cpup->core_id);
12592 /* Assign hdwq indices that are unique across all cpus in the map
12593 * that are also FIRST_CPUs.
12596 for_each_present_cpu(cpu) {
12597 cpup = &phba->sli4_hba.cpu_map[cpu];
12599 /* Only FIRST IRQs get a hdwq index assignment. */
12600 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12603 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12606 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12607 "3333 Set Affinity: CPU %d (phys %d core %d): "
12608 "hdwq %d eq %d flg x%x\n",
12609 cpu, cpup->phys_id, cpup->core_id,
12610 cpup->hdwq, cpup->eq, cpup->flag);
12612 /* Associate a hdwq with each cpu_map entry
12613 * This will be 1 to 1 - hdwq to cpu, unless there are less
12614 * hardware queues then CPUs. For that case we will just round-robin
12615 * the available hardware queues as they get assigned to CPUs.
12616 * The next_idx is the idx from the FIRST_CPU loop above to account
12617 * for irq_chann < hdwq. The idx is used for round-robin assignments
12618 * and needs to start at 0.
12623 for_each_present_cpu(cpu) {
12624 cpup = &phba->sli4_hba.cpu_map[cpu];
12626 /* FIRST cpus are already mapped. */
12627 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12630 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12631 * of the unassigned cpus to the next idx so that all
12632 * hdw queues are fully utilized.
12634 if (next_idx < phba->cfg_hdw_queue) {
12635 cpup->hdwq = next_idx;
12640 /* Not a First CPU and all hdw_queues are used. Reuse a
12641 * Hardware Queue for another CPU, so be smart about it
12642 * and pick one that has its IRQ/EQ mapped to the same phys_id
12643 * (CPU package) and core_id.
12645 new_cpu = start_cpu;
12646 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12647 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12648 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12649 new_cpup->phys_id == cpup->phys_id &&
12650 new_cpup->core_id == cpup->core_id) {
12653 new_cpu = lpfc_next_present_cpu(new_cpu);
12656 /* If we can't match both phys_id and core_id,
12657 * settle for just a phys_id match.
12659 new_cpu = start_cpu;
12660 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12661 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12662 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12663 new_cpup->phys_id == cpup->phys_id)
12665 new_cpu = lpfc_next_present_cpu(new_cpu);
12668 /* Otherwise just round robin on cfg_hdw_queue */
12669 cpup->hdwq = idx % phba->cfg_hdw_queue;
12673 /* We found an available entry, copy the IRQ info */
12674 start_cpu = lpfc_next_present_cpu(new_cpu);
12675 cpup->hdwq = new_cpup->hdwq;
12677 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12678 "3335 Set Affinity: CPU %d (phys %d core %d): "
12679 "hdwq %d eq %d flg x%x\n",
12680 cpu, cpup->phys_id, cpup->core_id,
12681 cpup->hdwq, cpup->eq, cpup->flag);
12685 * Initialize the cpu_map slots for not-present cpus in case
12686 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12689 for_each_possible_cpu(cpu) {
12690 cpup = &phba->sli4_hba.cpu_map[cpu];
12691 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12692 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12693 c_stat->hdwq_no = cpup->hdwq;
12695 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12698 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12699 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12700 c_stat->hdwq_no = cpup->hdwq;
12702 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12703 "3340 Set Affinity: not present "
12704 "CPU %d hdwq %d\n",
12708 /* The cpu_map array will be used later during initialization
12709 * when EQ / CQ / WQs are allocated and configured.
12715 * lpfc_cpuhp_get_eq
12717 * @phba: pointer to lpfc hba data structure.
12718 * @cpu: cpu going offline
12719 * @eqlist: eq list to append to
12722 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12723 struct list_head *eqlist)
12725 const struct cpumask *maskp;
12726 struct lpfc_queue *eq;
12727 struct cpumask *tmp;
12730 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12734 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12735 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12739 * if irq is not affinitized to the cpu going
12740 * then we don't need to poll the eq attached
12743 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12745 /* get the cpus that are online and are affini-
12746 * tized to this irq vector. If the count is
12747 * more than 1 then cpuhp is not going to shut-
12748 * down this vector. Since this cpu has not
12749 * gone offline yet, we need >1.
12751 cpumask_and(tmp, maskp, cpu_online_mask);
12752 if (cpumask_weight(tmp) > 1)
12755 /* Now that we have an irq to shutdown, get the eq
12756 * mapped to this irq. Note: multiple hdwq's in
12757 * the software can share an eq, but eventually
12758 * only eq will be mapped to this vector
12760 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12761 list_add(&eq->_poll_list, eqlist);
12767 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12769 if (phba->sli_rev != LPFC_SLI_REV4)
12772 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12775 * unregistering the instance doesn't stop the polling
12776 * timer. Wait for the poll timer to retire.
12779 del_timer_sync(&phba->cpuhp_poll_timer);
12782 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12785 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
12788 __lpfc_cpuhp_remove(phba);
12791 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12793 if (phba->sli_rev != LPFC_SLI_REV4)
12798 if (!list_empty(&phba->poll_list))
12799 mod_timer(&phba->cpuhp_poll_timer,
12800 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12804 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12808 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12810 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
12815 if (phba->sli_rev != LPFC_SLI_REV4) {
12820 /* proceed with the hotplug */
12825 * lpfc_irq_set_aff - set IRQ affinity
12826 * @eqhdl: EQ handle
12827 * @cpu: cpu to set affinity
12831 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12833 cpumask_clear(&eqhdl->aff_mask);
12834 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12835 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12836 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12840 * lpfc_irq_clear_aff - clear IRQ affinity
12841 * @eqhdl: EQ handle
12845 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12847 cpumask_clear(&eqhdl->aff_mask);
12848 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12852 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12853 * @phba: pointer to HBA context object.
12854 * @cpu: cpu going offline/online
12855 * @offline: true, cpu is going offline. false, cpu is coming online.
12857 * If cpu is going offline, we'll try our best effort to find the next
12858 * online cpu on the phba's original_mask and migrate all offlining IRQ
12861 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12863 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12864 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12868 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12870 struct lpfc_vector_map_info *cpup;
12871 struct cpumask *aff_mask;
12872 unsigned int cpu_select, cpu_next, idx;
12873 const struct cpumask *orig_mask;
12875 if (phba->irq_chann_mode == NORMAL_MODE)
12878 orig_mask = &phba->sli4_hba.irq_aff_mask;
12880 if (!cpumask_test_cpu(cpu, orig_mask))
12883 cpup = &phba->sli4_hba.cpu_map[cpu];
12885 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12889 /* Find next online CPU on original mask */
12890 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12891 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12893 /* Found a valid CPU */
12894 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12895 /* Go through each eqhdl and ensure offlining
12896 * cpu aff_mask is migrated
12898 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12899 aff_mask = lpfc_get_aff_mask(idx);
12901 /* Migrate affinity */
12902 if (cpumask_test_cpu(cpu, aff_mask))
12903 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12907 /* Rely on irqbalance if no online CPUs left on NUMA */
12908 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12909 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12912 /* Migrate affinity back to this CPU */
12913 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12917 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12919 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12920 struct lpfc_queue *eq, *next;
12925 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12929 if (__lpfc_cpuhp_checks(phba, &retval))
12932 lpfc_irq_rebalance(phba, cpu, true);
12934 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12938 /* start polling on these eq's */
12939 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12940 list_del_init(&eq->_poll_list);
12941 lpfc_sli4_start_polling(eq);
12947 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12949 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12950 struct lpfc_queue *eq, *next;
12955 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12959 if (__lpfc_cpuhp_checks(phba, &retval))
12962 lpfc_irq_rebalance(phba, cpu, false);
12964 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12965 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12967 lpfc_sli4_stop_polling(eq);
12974 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12975 * @phba: pointer to lpfc hba data structure.
12977 * This routine is invoked to enable the MSI-X interrupt vectors to device
12978 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
12979 * to cpus on the system.
12981 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12982 * the number of cpus on the same numa node as this adapter. The vectors are
12983 * allocated without requesting OS affinity mapping. A vector will be
12984 * allocated and assigned to each online and offline cpu. If the cpu is
12985 * online, then affinity will be set to that cpu. If the cpu is offline, then
12986 * affinity will be set to the nearest peer cpu within the numa node that is
12987 * online. If there are no online cpus within the numa node, affinity is not
12988 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12989 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12992 * If numa mode is not enabled and there is more than 1 vector allocated, then
12993 * the driver relies on the managed irq interface where the OS assigns vector to
12994 * cpu affinity. The driver will then use that affinity mapping to setup its
12995 * cpu mapping table.
12999 * other values - error
13002 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
13004 int vectors, rc, index;
13006 const struct cpumask *aff_mask = NULL;
13007 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
13008 struct lpfc_vector_map_info *cpup;
13009 struct lpfc_hba_eq_hdl *eqhdl;
13010 const struct cpumask *maskp;
13011 unsigned int flags = PCI_IRQ_MSIX;
13013 /* Set up MSI-X multi-message vectors */
13014 vectors = phba->cfg_irq_chann;
13016 if (phba->irq_chann_mode != NORMAL_MODE)
13017 aff_mask = &phba->sli4_hba.irq_aff_mask;
13020 cpu_cnt = cpumask_weight(aff_mask);
13021 vectors = min(phba->cfg_irq_chann, cpu_cnt);
13023 /* cpu: iterates over aff_mask including offline or online
13024 * cpu_select: iterates over online aff_mask to set affinity
13026 cpu = cpumask_first(aff_mask);
13027 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13029 flags |= PCI_IRQ_AFFINITY;
13032 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13034 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13035 "0484 PCI enable MSI-X failed (%d)\n", rc);
13040 /* Assign MSI-X vectors to interrupt handlers */
13041 for (index = 0; index < vectors; index++) {
13042 eqhdl = lpfc_get_eq_hdl(index);
13043 name = eqhdl->handler_name;
13044 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
13045 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
13046 LPFC_DRIVER_HANDLER_NAME"%d", index);
13048 eqhdl->idx = index;
13049 rc = pci_irq_vector(phba->pcidev, index);
13051 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13052 "0489 MSI-X fast-path (%d) "
13053 "pci_irq_vec failed (%d)\n", index, rc);
13058 rc = request_threaded_irq(eqhdl->irq,
13059 &lpfc_sli4_hba_intr_handler,
13060 &lpfc_sli4_hba_intr_handler_th,
13063 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13064 "0486 MSI-X fast-path (%d) "
13065 "request_irq failed (%d)\n", index, rc);
13070 /* If found a neighboring online cpu, set affinity */
13071 if (cpu_select < nr_cpu_ids)
13072 lpfc_irq_set_aff(eqhdl, cpu_select);
13074 /* Assign EQ to cpu_map */
13075 lpfc_assign_eq_map_info(phba, index,
13076 LPFC_CPU_FIRST_IRQ,
13079 /* Iterate to next offline or online cpu in aff_mask */
13080 cpu = cpumask_next(cpu, aff_mask);
13082 /* Find next online cpu in aff_mask to set affinity */
13083 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13084 } else if (vectors == 1) {
13085 cpu = cpumask_first(cpu_present_mask);
13086 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13089 maskp = pci_irq_get_affinity(phba->pcidev, index);
13091 /* Loop through all CPUs associated with vector index */
13092 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
13093 cpup = &phba->sli4_hba.cpu_map[cpu];
13095 /* If this is the first CPU thats assigned to
13096 * this vector, set LPFC_CPU_FIRST_IRQ.
13098 * With certain platforms its possible that irq
13099 * vectors are affinitized to all the cpu's.
13100 * This can result in each cpu_map.eq to be set
13101 * to the last vector, resulting in overwrite
13102 * of all the previous cpu_map.eq. Ensure that
13103 * each vector receives a place in cpu_map.
13104 * Later call to lpfc_cpu_affinity_check will
13105 * ensure we are nicely balanced out.
13107 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13109 lpfc_assign_eq_map_info(phba, index,
13110 LPFC_CPU_FIRST_IRQ,
13117 if (vectors != phba->cfg_irq_chann) {
13118 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13119 "3238 Reducing IO channels to match number of "
13120 "MSI-X vectors, requested %d got %d\n",
13121 phba->cfg_irq_chann, vectors);
13122 if (phba->cfg_irq_chann > vectors)
13123 phba->cfg_irq_chann = vectors;
13129 /* free the irq already requested */
13130 for (--index; index >= 0; index--) {
13131 eqhdl = lpfc_get_eq_hdl(index);
13132 lpfc_irq_clear_aff(eqhdl);
13133 free_irq(eqhdl->irq, eqhdl);
13136 /* Unconfigure MSI-X capability structure */
13137 pci_free_irq_vectors(phba->pcidev);
13144 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13145 * @phba: pointer to lpfc hba data structure.
13147 * This routine is invoked to enable the MSI interrupt mode to device with
13148 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13149 * called to enable the MSI vector. The device driver is responsible for
13150 * calling the request_irq() to register MSI vector with a interrupt the
13151 * handler, which is done in this function.
13155 * other values - error
13158 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13162 struct lpfc_hba_eq_hdl *eqhdl;
13164 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13165 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13167 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13168 "0487 PCI enable MSI mode success.\n");
13170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13171 "0488 PCI enable MSI mode failed (%d)\n", rc);
13172 return rc ? rc : -1;
13175 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13176 0, LPFC_DRIVER_NAME, phba);
13178 pci_free_irq_vectors(phba->pcidev);
13179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13180 "0490 MSI request_irq failed (%d)\n", rc);
13184 eqhdl = lpfc_get_eq_hdl(0);
13185 rc = pci_irq_vector(phba->pcidev, 0);
13187 pci_free_irq_vectors(phba->pcidev);
13188 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13189 "0496 MSI pci_irq_vec failed (%d)\n", rc);
13194 cpu = cpumask_first(cpu_present_mask);
13195 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13197 for (index = 0; index < phba->cfg_irq_chann; index++) {
13198 eqhdl = lpfc_get_eq_hdl(index);
13199 eqhdl->idx = index;
13206 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13207 * @phba: pointer to lpfc hba data structure.
13208 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13210 * This routine is invoked to enable device interrupt and associate driver's
13211 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13212 * interface spec. Depends on the interrupt mode configured to the driver,
13213 * the driver will try to fallback from the configured interrupt mode to an
13214 * interrupt mode which is supported by the platform, kernel, and device in
13216 * MSI-X -> MSI -> IRQ.
13219 * Interrupt mode (2, 1, 0) - successful
13220 * LPFC_INTR_ERROR - error
13223 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13225 uint32_t intr_mode = LPFC_INTR_ERROR;
13228 if (cfg_mode == 2) {
13229 /* Preparation before conf_msi mbox cmd */
13232 /* Now, try to enable MSI-X interrupt mode */
13233 retval = lpfc_sli4_enable_msix(phba);
13235 /* Indicate initialization to MSI-X mode */
13236 phba->intr_type = MSIX;
13242 /* Fallback to MSI if MSI-X initialization failed */
13243 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13244 retval = lpfc_sli4_enable_msi(phba);
13246 /* Indicate initialization to MSI mode */
13247 phba->intr_type = MSI;
13252 /* Fallback to INTx if both MSI-X/MSI initalization failed */
13253 if (phba->intr_type == NONE) {
13254 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13255 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13257 struct lpfc_hba_eq_hdl *eqhdl;
13260 /* Indicate initialization to INTx mode */
13261 phba->intr_type = INTx;
13264 eqhdl = lpfc_get_eq_hdl(0);
13265 retval = pci_irq_vector(phba->pcidev, 0);
13267 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13268 "0502 INTR pci_irq_vec failed (%d)\n",
13270 return LPFC_INTR_ERROR;
13272 eqhdl->irq = retval;
13274 cpu = cpumask_first(cpu_present_mask);
13275 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13277 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13278 eqhdl = lpfc_get_eq_hdl(idx);
13287 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13288 * @phba: pointer to lpfc hba data structure.
13290 * This routine is invoked to disable device interrupt and disassociate
13291 * the driver's interrupt handler(s) from interrupt vector(s) to device
13292 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13293 * will release the interrupt vector(s) for the message signaled interrupt.
13296 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13298 /* Disable the currently initialized interrupt mode */
13299 if (phba->intr_type == MSIX) {
13301 struct lpfc_hba_eq_hdl *eqhdl;
13303 /* Free up MSI-X multi-message vectors */
13304 for (index = 0; index < phba->cfg_irq_chann; index++) {
13305 eqhdl = lpfc_get_eq_hdl(index);
13306 lpfc_irq_clear_aff(eqhdl);
13307 free_irq(eqhdl->irq, eqhdl);
13310 free_irq(phba->pcidev->irq, phba);
13313 pci_free_irq_vectors(phba->pcidev);
13315 /* Reset interrupt management states */
13316 phba->intr_type = NONE;
13317 phba->sli.slistat.sli_intr = 0;
13321 * lpfc_unset_hba - Unset SLI3 hba device initialization
13322 * @phba: pointer to lpfc hba data structure.
13324 * This routine is invoked to unset the HBA device initialization steps to
13325 * a device with SLI-3 interface spec.
13328 lpfc_unset_hba(struct lpfc_hba *phba)
13330 set_bit(FC_UNLOADING, &phba->pport->load_flag);
13332 kfree(phba->vpi_bmask);
13333 kfree(phba->vpi_ids);
13335 lpfc_stop_hba_timers(phba);
13337 phba->pport->work_port_events = 0;
13339 lpfc_sli_hba_down(phba);
13341 lpfc_sli_brdrestart(phba);
13343 lpfc_sli_disable_intr(phba);
13349 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13350 * @phba: Pointer to HBA context object.
13352 * This function is called in the SLI4 code path to wait for completion
13353 * of device's XRIs exchange busy. It will check the XRI exchange busy
13354 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13355 * that, it will check the XRI exchange busy on outstanding FCP and ELS
13356 * I/Os every 30 seconds, log error message, and wait forever. Only when
13357 * all XRI exchange busy complete, the driver unload shall proceed with
13358 * invoking the function reset ioctl mailbox command to the CNA and the
13359 * the rest of the driver unload resource release.
13362 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13364 struct lpfc_sli4_hdw_queue *qp;
13367 int io_xri_cmpl = 1;
13368 int nvmet_xri_cmpl = 1;
13369 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13371 /* Driver just aborted IOs during the hba_unset process. Pause
13372 * here to give the HBA time to complete the IO and get entries
13373 * into the abts lists.
13375 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13377 /* Wait for NVME pending IO to flush back to transport. */
13378 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13379 lpfc_nvme_wait_for_io_drain(phba);
13382 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13383 qp = &phba->sli4_hba.hdwq[idx];
13384 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13385 if (!io_xri_cmpl) /* if list is NOT empty */
13391 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13393 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13396 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13397 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13398 if (!nvmet_xri_cmpl)
13399 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13400 "6424 NVMET XRI exchange busy "
13401 "wait time: %d seconds.\n",
13404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13405 "6100 IO XRI exchange busy "
13406 "wait time: %d seconds.\n",
13409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13410 "2878 ELS XRI exchange busy "
13411 "wait time: %d seconds.\n",
13413 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13414 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13416 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13417 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13421 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13422 qp = &phba->sli4_hba.hdwq[idx];
13423 io_xri_cmpl = list_empty(
13424 &qp->lpfc_abts_io_buf_list);
13425 if (!io_xri_cmpl) /* if list is NOT empty */
13431 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13432 nvmet_xri_cmpl = list_empty(
13433 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13436 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13442 * lpfc_sli4_hba_unset - Unset the fcoe hba
13443 * @phba: Pointer to HBA context object.
13445 * This function is called in the SLI4 code path to reset the HBA's FCoE
13446 * function. The caller is not required to hold any lock. This routine
13447 * issues PCI function reset mailbox command to reset the FCoE function.
13448 * At the end of the function, it calls lpfc_hba_down_post function to
13449 * free any pending commands.
13452 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13455 LPFC_MBOXQ_t *mboxq;
13456 struct pci_dev *pdev = phba->pcidev;
13458 lpfc_stop_hba_timers(phba);
13459 hrtimer_cancel(&phba->cmf_stats_timer);
13460 hrtimer_cancel(&phba->cmf_timer);
13463 phba->sli4_hba.intr_enable = 0;
13466 * Gracefully wait out the potential current outstanding asynchronous
13470 /* First, block any pending async mailbox command from posted */
13471 spin_lock_irq(&phba->hbalock);
13472 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13473 spin_unlock_irq(&phba->hbalock);
13474 /* Now, trying to wait it out if we can */
13475 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13477 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13480 /* Forcefully release the outstanding mailbox command if timed out */
13481 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13482 spin_lock_irq(&phba->hbalock);
13483 mboxq = phba->sli.mbox_active;
13484 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13485 __lpfc_mbox_cmpl_put(phba, mboxq);
13486 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13487 phba->sli.mbox_active = NULL;
13488 spin_unlock_irq(&phba->hbalock);
13491 /* Abort all iocbs associated with the hba */
13492 lpfc_sli_hba_iocb_abort(phba);
13494 if (!pci_channel_offline(phba->pcidev))
13495 /* Wait for completion of device XRI exchange busy */
13496 lpfc_sli4_xri_exchange_busy_wait(phba);
13498 /* per-phba callback de-registration for hotplug event */
13500 lpfc_cpuhp_remove(phba);
13502 /* Disable PCI subsystem interrupt */
13503 lpfc_sli4_disable_intr(phba);
13505 /* Disable SR-IOV if enabled */
13506 if (phba->cfg_sriov_nr_virtfn)
13507 pci_disable_sriov(pdev);
13509 /* Stop kthread signal shall trigger work_done one more time */
13510 kthread_stop(phba->worker_thread);
13512 /* Disable FW logging to host memory */
13513 lpfc_ras_stop_fwlog(phba);
13515 /* Reset SLI4 HBA FCoE function */
13516 lpfc_pci_function_reset(phba);
13518 /* release all queue allocated resources. */
13519 lpfc_sli4_queue_destroy(phba);
13521 /* Free RAS DMA memory */
13522 if (phba->ras_fwlog.ras_enabled)
13523 lpfc_sli4_ras_dma_free(phba);
13525 /* Stop the SLI4 device port */
13527 phba->pport->work_port_events = 0;
13531 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13536 for (bit = 0; bit < 8; bit++) {
13537 msb = (crc >> 31) & 1;
13540 if (msb ^ (byte & 1)) {
13541 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13550 lpfc_cgn_reverse_bits(uint32_t wd)
13552 uint32_t result = 0;
13555 for (i = 0; i < 32; i++) {
13557 result |= (1 & (wd >> i));
13563 * The routine corresponds with the algorithm the HBA firmware
13564 * uses to validate the data integrity.
13567 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13571 uint8_t *data = (uint8_t *)ptr;
13573 for (i = 0; i < byteLen; ++i)
13574 crc = lpfc_cgn_crc32(crc, data[i]);
13576 result = ~lpfc_cgn_reverse_bits(crc);
13581 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13583 struct lpfc_cgn_info *cp;
13587 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13588 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13592 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13594 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13595 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13596 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13597 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13599 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13600 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13601 atomic64_set(&phba->cgn_latency_evt, 0);
13602 phba->cgn_evt_minute = 0;
13604 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13605 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13606 cp->cgn_info_version = LPFC_CGN_INFO_V4;
13608 /* cgn parameters */
13609 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13610 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13611 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13612 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13614 lpfc_cgn_update_tstamp(phba, &cp->base_time);
13616 /* Fill in default LUN qdepth */
13618 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13619 cp->cgn_lunq = cpu_to_le16(size);
13622 /* last used Index initialized to 0xff already */
13624 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13625 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13626 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13627 cp->cgn_info_crc = cpu_to_le32(crc);
13629 phba->cgn_evt_timestamp = jiffies +
13630 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13634 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13636 struct lpfc_cgn_info *cp;
13639 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13640 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13645 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13646 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13648 lpfc_cgn_update_tstamp(phba, &cp->stat_start);
13649 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13650 cp->cgn_info_crc = cpu_to_le32(crc);
13654 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13655 * @phba: Pointer to hba context object.
13656 * @reg: flag to determine register or unregister.
13659 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13661 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13662 union lpfc_sli4_cfg_shdr *shdr;
13663 uint32_t shdr_status, shdr_add_status;
13664 LPFC_MBOXQ_t *mboxq;
13670 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13672 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13673 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13674 "HBA state x%x reg %d\n",
13675 phba->pport->port_state, reg);
13679 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13680 sizeof(struct lpfc_sli4_cfg_mhdr));
13681 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13682 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13683 LPFC_SLI4_MBX_EMBED);
13684 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13685 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13687 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13689 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13690 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13691 reg_congestion_buf->addr_lo =
13692 putPaddrLow(phba->cgn_i->phys);
13693 reg_congestion_buf->addr_hi =
13694 putPaddrHigh(phba->cgn_i->phys);
13696 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13697 shdr = (union lpfc_sli4_cfg_shdr *)
13698 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13699 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13700 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13702 mempool_free(mboxq, phba->mbox_mem_pool);
13703 if (shdr_status || shdr_add_status || rc) {
13704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13705 "2642 REG_CONGESTION_BUF mailbox "
13706 "failed with status x%x add_status x%x,"
13707 " mbx status x%x reg %d\n",
13708 shdr_status, shdr_add_status, rc, reg);
13715 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13717 lpfc_cmf_stop(phba);
13718 return __lpfc_reg_congestion_buf(phba, 0);
13722 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13724 return __lpfc_reg_congestion_buf(phba, 1);
13728 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13729 * @phba: Pointer to HBA context object.
13730 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13732 * This function is called in the SLI4 code path to read the port's
13733 * sli4 capabilities.
13735 * This function may be be called from any context that can block-wait
13736 * for the completion. The expectation is that this routine is called
13737 * typically from probe_one or from the online routine.
13740 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13743 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13744 struct lpfc_pc_sli4_params *sli4_params;
13747 bool exp_wqcq_pages = true;
13748 struct lpfc_sli4_parameters *mbx_sli4_parameters;
13751 * By default, the driver assumes the SLI4 port requires RPI
13752 * header postings. The SLI4_PARAM response will correct this
13755 phba->sli4_hba.rpi_hdrs_in_use = 1;
13757 /* Read the port's SLI4 Config Parameters */
13758 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13759 sizeof(struct lpfc_sli4_cfg_mhdr));
13760 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13761 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13762 length, LPFC_SLI4_MBX_EMBED);
13763 if (!phba->sli4_hba.intr_enable)
13764 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13766 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13767 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13771 sli4_params = &phba->sli4_hba.pc_sli4_params;
13772 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13773 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13774 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13775 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13776 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13777 mbx_sli4_parameters);
13778 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13779 mbx_sli4_parameters);
13780 if (bf_get(cfg_phwq, mbx_sli4_parameters))
13781 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13783 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13784 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13785 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13786 mbx_sli4_parameters);
13787 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13788 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13789 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13790 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13791 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13792 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13793 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13794 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13795 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13796 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13797 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13798 mbx_sli4_parameters);
13799 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13800 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13801 mbx_sli4_parameters);
13802 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13803 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13804 sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters);
13806 /* Check for Extended Pre-Registered SGL support */
13807 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13809 /* Check for firmware nvme support */
13810 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13811 bf_get(cfg_xib, mbx_sli4_parameters));
13814 /* Save this to indicate the Firmware supports NVME */
13815 sli4_params->nvme = 1;
13817 /* Firmware NVME support, check driver FC4 NVME support */
13818 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13819 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13820 "6133 Disabling NVME support: "
13821 "FC4 type not supported: x%x\n",
13822 phba->cfg_enable_fc4_type);
13826 /* No firmware NVME support, check driver FC4 NVME support */
13827 sli4_params->nvme = 0;
13828 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13830 "6101 Disabling NVME support: Not "
13831 "supported by firmware (%d %d) x%x\n",
13832 bf_get(cfg_nvme, mbx_sli4_parameters),
13833 bf_get(cfg_xib, mbx_sli4_parameters),
13834 phba->cfg_enable_fc4_type);
13836 phba->nvmet_support = 0;
13837 phba->cfg_nvmet_mrq = 0;
13838 phba->cfg_nvme_seg_cnt = 0;
13840 /* If no FC4 type support, move to just SCSI support */
13841 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13843 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13847 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13848 * accommodate 512K and 1M IOs in a single nvme buf.
13850 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13851 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13853 /* Enable embedded Payload BDE if support is indicated */
13854 if (bf_get(cfg_pbde, mbx_sli4_parameters))
13855 phba->cfg_enable_pbde = 1;
13857 phba->cfg_enable_pbde = 0;
13860 * To support Suppress Response feature we must satisfy 3 conditions.
13861 * lpfc_suppress_rsp module parameter must be set (default).
13862 * In SLI4-Parameters Descriptor:
13863 * Extended Inline Buffers (XIB) must be supported.
13864 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13865 * (double negative).
13867 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13868 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13869 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13871 phba->cfg_suppress_rsp = 0;
13873 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13874 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13876 /* Make sure that sge_supp_len can be handled by the driver */
13877 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13878 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13880 rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
13881 if (unlikely(rc)) {
13882 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13883 "6400 Can't set dma maximum segment size\n");
13888 * Check whether the adapter supports an embedded copy of the
13889 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13890 * to use this option, 128-byte WQEs must be used.
13892 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13893 phba->fcp_embed_io = 1;
13895 phba->fcp_embed_io = 0;
13897 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13898 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13899 bf_get(cfg_xib, mbx_sli4_parameters),
13900 phba->cfg_enable_pbde,
13901 phba->fcp_embed_io, sli4_params->nvme,
13902 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13904 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13905 LPFC_SLI_INTF_IF_TYPE_2) &&
13906 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13907 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13908 exp_wqcq_pages = false;
13910 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13911 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13913 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13914 phba->enab_exp_wqcq_pages = 1;
13916 phba->enab_exp_wqcq_pages = 0;
13918 * Check if the SLI port supports MDS Diagnostics
13920 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13921 phba->mds_diags_support = 1;
13923 phba->mds_diags_support = 0;
13926 * Check if the SLI port supports NSLER
13928 if (bf_get(cfg_nsler, mbx_sli4_parameters))
13937 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13938 * @pdev: pointer to PCI device
13939 * @pid: pointer to PCI device identifier
13941 * This routine is to be called to attach a device with SLI-3 interface spec
13942 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13943 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13944 * information of the device and driver to see if the driver state that it can
13945 * support this kind of device. If the match is successful, the driver core
13946 * invokes this routine. If this routine determines it can claim the HBA, it
13947 * does all the initialization that it needs to do to handle the HBA properly.
13950 * 0 - driver can claim the device
13951 * negative value - driver can not claim the device
13954 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13956 struct lpfc_hba *phba;
13957 struct lpfc_vport *vport = NULL;
13958 struct Scsi_Host *shost = NULL;
13960 uint32_t cfg_mode, intr_mode;
13962 /* Allocate memory for HBA structure */
13963 phba = lpfc_hba_alloc(pdev);
13967 /* Perform generic PCI device enabling operation */
13968 error = lpfc_enable_pci_dev(phba);
13970 goto out_free_phba;
13972 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13973 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13975 goto out_disable_pci_dev;
13977 /* Set up SLI-3 specific device PCI memory space */
13978 error = lpfc_sli_pci_mem_setup(phba);
13980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13981 "1402 Failed to set up pci memory space.\n");
13982 goto out_disable_pci_dev;
13985 /* Set up SLI-3 specific device driver resources */
13986 error = lpfc_sli_driver_resource_setup(phba);
13988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13989 "1404 Failed to set up driver resource.\n");
13990 goto out_unset_pci_mem_s3;
13993 /* Initialize and populate the iocb list per host */
13995 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13998 "1405 Failed to initialize iocb list.\n");
13999 goto out_unset_driver_resource_s3;
14002 /* Set up common device driver resources */
14003 error = lpfc_setup_driver_resource_phase2(phba);
14005 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14006 "1406 Failed to set up driver resource.\n");
14007 goto out_free_iocb_list;
14010 /* Get the default values for Model Name and Description */
14011 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14013 /* Create SCSI host to the physical port */
14014 error = lpfc_create_shost(phba);
14016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14017 "1407 Failed to create scsi host.\n");
14018 goto out_unset_driver_resource;
14021 /* Configure sysfs attributes */
14022 vport = phba->pport;
14023 error = lpfc_alloc_sysfs_attr(vport);
14025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14026 "1476 Failed to allocate sysfs attr\n");
14027 goto out_destroy_shost;
14030 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14031 /* Now, trying to enable interrupt and bring up the device */
14032 cfg_mode = phba->cfg_use_msi;
14034 /* Put device to a known state before enabling interrupt */
14035 lpfc_stop_port(phba);
14036 /* Configure and enable interrupt */
14037 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
14038 if (intr_mode == LPFC_INTR_ERROR) {
14039 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14040 "0431 Failed to enable interrupt.\n");
14042 goto out_free_sysfs_attr;
14044 /* SLI-3 HBA setup */
14045 if (lpfc_sli_hba_setup(phba)) {
14046 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14047 "1477 Failed to set up hba\n");
14049 goto out_remove_device;
14052 /* Wait 50ms for the interrupts of previous mailbox commands */
14054 /* Check active interrupts on message signaled interrupts */
14055 if (intr_mode == 0 ||
14056 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14057 /* Log the current active interrupt mode */
14058 phba->intr_mode = intr_mode;
14059 lpfc_log_intr_mode(phba, intr_mode);
14062 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14063 "0447 Configure interrupt mode (%d) "
14064 "failed active interrupt test.\n",
14066 /* Disable the current interrupt mode */
14067 lpfc_sli_disable_intr(phba);
14068 /* Try next level of interrupt mode */
14069 cfg_mode = --intr_mode;
14073 /* Perform post initialization setup */
14074 lpfc_post_init_setup(phba);
14076 /* Check if there are static vports to be created. */
14077 lpfc_create_static_vport(phba);
14082 lpfc_unset_hba(phba);
14083 out_free_sysfs_attr:
14084 lpfc_free_sysfs_attr(vport);
14086 lpfc_destroy_shost(phba);
14087 out_unset_driver_resource:
14088 lpfc_unset_driver_resource_phase2(phba);
14089 out_free_iocb_list:
14090 lpfc_free_iocb_list(phba);
14091 out_unset_driver_resource_s3:
14092 lpfc_sli_driver_resource_unset(phba);
14093 out_unset_pci_mem_s3:
14094 lpfc_sli_pci_mem_unset(phba);
14095 out_disable_pci_dev:
14096 lpfc_disable_pci_dev(phba);
14098 scsi_host_put(shost);
14100 lpfc_hba_free(phba);
14105 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14106 * @pdev: pointer to PCI device
14108 * This routine is to be called to disattach a device with SLI-3 interface
14109 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14110 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14111 * device to be removed from the PCI subsystem properly.
14114 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14116 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14117 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14118 struct lpfc_vport **vports;
14119 struct lpfc_hba *phba = vport->phba;
14122 set_bit(FC_UNLOADING, &vport->load_flag);
14124 lpfc_free_sysfs_attr(vport);
14126 /* Release all the vports against this physical port */
14127 vports = lpfc_create_vport_work_array(phba);
14128 if (vports != NULL)
14129 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14130 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14132 fc_vport_terminate(vports[i]->fc_vport);
14134 lpfc_destroy_vport_work_array(phba, vports);
14136 /* Remove FC host with the physical port */
14137 fc_remove_host(shost);
14138 scsi_remove_host(shost);
14140 /* Clean up all nodes, mailboxes and IOs. */
14141 lpfc_cleanup(vport);
14144 * Bring down the SLI Layer. This step disable all interrupts,
14145 * clears the rings, discards all mailbox commands, and resets
14149 /* HBA interrupt will be disabled after this call */
14150 lpfc_sli_hba_down(phba);
14151 /* Stop kthread signal shall trigger work_done one more time */
14152 kthread_stop(phba->worker_thread);
14153 /* Final cleanup of txcmplq and reset the HBA */
14154 lpfc_sli_brdrestart(phba);
14156 kfree(phba->vpi_bmask);
14157 kfree(phba->vpi_ids);
14159 lpfc_stop_hba_timers(phba);
14160 spin_lock_irq(&phba->port_list_lock);
14161 list_del_init(&vport->listentry);
14162 spin_unlock_irq(&phba->port_list_lock);
14164 lpfc_debugfs_terminate(vport);
14166 /* Disable SR-IOV if enabled */
14167 if (phba->cfg_sriov_nr_virtfn)
14168 pci_disable_sriov(pdev);
14170 /* Disable interrupt */
14171 lpfc_sli_disable_intr(phba);
14173 scsi_host_put(shost);
14176 * Call scsi_free before mem_free since scsi bufs are released to their
14177 * corresponding pools here.
14179 lpfc_scsi_free(phba);
14180 lpfc_free_iocb_list(phba);
14182 lpfc_mem_free_all(phba);
14184 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14185 phba->hbqslimp.virt, phba->hbqslimp.phys);
14187 /* Free resources associated with SLI2 interface */
14188 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14189 phba->slim2p.virt, phba->slim2p.phys);
14191 /* unmap adapter SLIM and Control Registers */
14192 iounmap(phba->ctrl_regs_memmap_p);
14193 iounmap(phba->slim_memmap_p);
14195 lpfc_hba_free(phba);
14197 pci_release_mem_regions(pdev);
14198 pci_disable_device(pdev);
14202 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14203 * @dev_d: pointer to device
14205 * This routine is to be called from the kernel's PCI subsystem to support
14206 * system Power Management (PM) to device with SLI-3 interface spec. When
14207 * PM invokes this method, it quiesces the device by stopping the driver's
14208 * worker thread for the device, turning off device's interrupt and DMA,
14209 * and bring the device offline. Note that as the driver implements the
14210 * minimum PM requirements to a power-aware driver's PM support for the
14211 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14212 * to the suspend() method call will be treated as SUSPEND and the driver will
14213 * fully reinitialize its device during resume() method call, the driver will
14214 * set device to PCI_D3hot state in PCI config space instead of setting it
14215 * according to the @msg provided by the PM.
14218 * 0 - driver suspended the device
14221 static int __maybe_unused
14222 lpfc_pci_suspend_one_s3(struct device *dev_d)
14224 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14225 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14227 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14228 "0473 PCI device Power Management suspend.\n");
14230 /* Bring down the device */
14231 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14232 lpfc_offline(phba);
14233 kthread_stop(phba->worker_thread);
14235 /* Disable interrupt from device */
14236 lpfc_sli_disable_intr(phba);
14242 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14243 * @dev_d: pointer to device
14245 * This routine is to be called from the kernel's PCI subsystem to support
14246 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14247 * invokes this method, it restores the device's PCI config space state and
14248 * fully reinitializes the device and brings it online. Note that as the
14249 * driver implements the minimum PM requirements to a power-aware driver's
14250 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14251 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14252 * driver will fully reinitialize its device during resume() method call,
14253 * the device will be set to PCI_D0 directly in PCI config space before
14254 * restoring the state.
14257 * 0 - driver suspended the device
14260 static int __maybe_unused
14261 lpfc_pci_resume_one_s3(struct device *dev_d)
14263 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14264 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14265 uint32_t intr_mode;
14268 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14269 "0452 PCI device Power Management resume.\n");
14271 /* Startup the kernel thread for this host adapter. */
14272 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14273 "lpfc_worker_%d", phba->brd_no);
14274 if (IS_ERR(phba->worker_thread)) {
14275 error = PTR_ERR(phba->worker_thread);
14276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14277 "0434 PM resume failed to start worker "
14278 "thread: error=x%x.\n", error);
14282 /* Init cpu_map array */
14283 lpfc_cpu_map_array_init(phba);
14284 /* Init hba_eq_hdl array */
14285 lpfc_hba_eq_hdl_array_init(phba);
14286 /* Configure and enable interrupt */
14287 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14288 if (intr_mode == LPFC_INTR_ERROR) {
14289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14290 "0430 PM resume Failed to enable interrupt\n");
14293 phba->intr_mode = intr_mode;
14295 /* Restart HBA and bring it online */
14296 lpfc_sli_brdrestart(phba);
14299 /* Log the current active interrupt mode */
14300 lpfc_log_intr_mode(phba, phba->intr_mode);
14306 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14307 * @phba: pointer to lpfc hba data structure.
14309 * This routine is called to prepare the SLI3 device for PCI slot recover. It
14310 * aborts all the outstanding SCSI I/Os to the pci device.
14313 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14316 "2723 PCI channel I/O abort preparing for recovery\n");
14319 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14320 * and let the SCSI mid-layer to retry them to recover.
14322 lpfc_sli_abort_fcp_rings(phba);
14326 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14327 * @phba: pointer to lpfc hba data structure.
14329 * This routine is called to prepare the SLI3 device for PCI slot reset. It
14330 * disables the device interrupt and pci device, and aborts the internal FCP
14334 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14336 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14337 "2710 PCI channel disable preparing for reset\n");
14339 /* Block any management I/Os to the device */
14340 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14342 /* Block all SCSI devices' I/Os on the host */
14343 lpfc_scsi_dev_block(phba);
14345 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14346 lpfc_sli_flush_io_rings(phba);
14348 /* stop all timers */
14349 lpfc_stop_hba_timers(phba);
14351 /* Disable interrupt and pci device */
14352 lpfc_sli_disable_intr(phba);
14353 pci_disable_device(phba->pcidev);
14357 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14358 * @phba: pointer to lpfc hba data structure.
14360 * This routine is called to prepare the SLI3 device for PCI slot permanently
14361 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14365 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14368 "2711 PCI channel permanent disable for failure\n");
14369 /* Block all SCSI devices' I/Os on the host */
14370 lpfc_scsi_dev_block(phba);
14371 lpfc_sli4_prep_dev_for_reset(phba);
14373 /* stop all timers */
14374 lpfc_stop_hba_timers(phba);
14376 /* Clean up all driver's outstanding SCSI I/Os */
14377 lpfc_sli_flush_io_rings(phba);
14381 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14382 * @pdev: pointer to PCI device.
14383 * @state: the current PCI connection state.
14385 * This routine is called from the PCI subsystem for I/O error handling to
14386 * device with SLI-3 interface spec. This function is called by the PCI
14387 * subsystem after a PCI bus error affecting this device has been detected.
14388 * When this function is invoked, it will need to stop all the I/Os and
14389 * interrupt(s) to the device. Once that is done, it will return
14390 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14394 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14395 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14396 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14398 static pci_ers_result_t
14399 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14401 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14402 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14405 case pci_channel_io_normal:
14406 /* Non-fatal error, prepare for recovery */
14407 lpfc_sli_prep_dev_for_recover(phba);
14408 return PCI_ERS_RESULT_CAN_RECOVER;
14409 case pci_channel_io_frozen:
14410 /* Fatal error, prepare for slot reset */
14411 lpfc_sli_prep_dev_for_reset(phba);
14412 return PCI_ERS_RESULT_NEED_RESET;
14413 case pci_channel_io_perm_failure:
14414 /* Permanent failure, prepare for device down */
14415 lpfc_sli_prep_dev_for_perm_failure(phba);
14416 return PCI_ERS_RESULT_DISCONNECT;
14418 /* Unknown state, prepare and request slot reset */
14419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14420 "0472 Unknown PCI error state: x%x\n", state);
14421 lpfc_sli_prep_dev_for_reset(phba);
14422 return PCI_ERS_RESULT_NEED_RESET;
14427 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14428 * @pdev: pointer to PCI device.
14430 * This routine is called from the PCI subsystem for error handling to
14431 * device with SLI-3 interface spec. This is called after PCI bus has been
14432 * reset to restart the PCI card from scratch, as if from a cold-boot.
14433 * During the PCI subsystem error recovery, after driver returns
14434 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14435 * recovery and then call this routine before calling the .resume method
14436 * to recover the device. This function will initialize the HBA device,
14437 * enable the interrupt, but it will just put the HBA to offline state
14438 * without passing any I/O traffic.
14441 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14442 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14444 static pci_ers_result_t
14445 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14447 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14448 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14449 struct lpfc_sli *psli = &phba->sli;
14450 uint32_t intr_mode;
14452 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14453 if (pci_enable_device_mem(pdev)) {
14454 printk(KERN_ERR "lpfc: Cannot re-enable "
14455 "PCI device after reset.\n");
14456 return PCI_ERS_RESULT_DISCONNECT;
14459 pci_restore_state(pdev);
14462 * As the new kernel behavior of pci_restore_state() API call clears
14463 * device saved_state flag, need to save the restored state again.
14465 pci_save_state(pdev);
14467 if (pdev->is_busmaster)
14468 pci_set_master(pdev);
14470 spin_lock_irq(&phba->hbalock);
14471 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14472 spin_unlock_irq(&phba->hbalock);
14474 /* Configure and enable interrupt */
14475 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14476 if (intr_mode == LPFC_INTR_ERROR) {
14477 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14478 "0427 Cannot re-enable interrupt after "
14480 return PCI_ERS_RESULT_DISCONNECT;
14482 phba->intr_mode = intr_mode;
14484 /* Take device offline, it will perform cleanup */
14485 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14486 lpfc_offline(phba);
14487 lpfc_sli_brdrestart(phba);
14489 /* Log the current active interrupt mode */
14490 lpfc_log_intr_mode(phba, phba->intr_mode);
14492 return PCI_ERS_RESULT_RECOVERED;
14496 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14497 * @pdev: pointer to PCI device
14499 * This routine is called from the PCI subsystem for error handling to device
14500 * with SLI-3 interface spec. It is called when kernel error recovery tells
14501 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14502 * error recovery. After this call, traffic can start to flow from this device
14506 lpfc_io_resume_s3(struct pci_dev *pdev)
14508 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14509 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14511 /* Bring device online, it will be no-op for non-fatal error resume */
14516 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14517 * @phba: pointer to lpfc hba data structure.
14519 * returns the number of ELS/CT IOCBs to reserve
14522 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14524 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14526 if (phba->sli_rev == LPFC_SLI_REV4) {
14527 if (max_xri <= 100)
14529 else if (max_xri <= 256)
14531 else if (max_xri <= 512)
14533 else if (max_xri <= 1024)
14535 else if (max_xri <= 1536)
14537 else if (max_xri <= 2048)
14546 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14547 * @phba: pointer to lpfc hba data structure.
14549 * returns the number of ELS/CT + NVMET IOCBs to reserve
14552 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14554 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14556 if (phba->nvmet_support)
14557 max_xri += LPFC_NVMET_BUF_POST;
14563 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14564 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14565 const struct firmware *fw)
14570 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14571 /* Three cases: (1) FW was not supported on the detected adapter.
14572 * (2) FW update has been locked out administratively.
14573 * (3) Some other error during FW update.
14574 * In each case, an unmaskable message is written to the console
14575 * for admin diagnosis.
14577 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14578 (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14579 magic_number != MAGIC_NUMBER_G6) ||
14580 (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14581 magic_number != MAGIC_NUMBER_G7) ||
14582 (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14583 magic_number != MAGIC_NUMBER_G7P)) {
14584 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14585 "3030 This firmware version is not supported on"
14586 " this HBA model. Device:%x Magic:%x Type:%x "
14587 "ID:%x Size %d %zd\n",
14588 phba->pcidev->device, magic_number, ftype, fid,
14591 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14593 "3021 Firmware downloads have been prohibited "
14594 "by a system configuration setting on "
14595 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14597 phba->pcidev->device, magic_number, ftype, fid,
14601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14602 "3022 FW Download failed. Add Status x%x "
14603 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14605 offset, phba->pcidev->device, magic_number,
14606 ftype, fid, fsize, fw->size);
14613 * lpfc_write_firmware - attempt to write a firmware image to the port
14614 * @fw: pointer to firmware image returned from request_firmware.
14615 * @context: pointer to firmware image returned from request_firmware.
14619 lpfc_write_firmware(const struct firmware *fw, void *context)
14621 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14622 char fwrev[FW_REV_STR_SIZE];
14623 struct lpfc_grp_hdr *image;
14624 struct list_head dma_buffer_list;
14626 struct lpfc_dmabuf *dmabuf, *next;
14627 uint32_t offset = 0, temp_offset = 0;
14628 uint32_t magic_number, ftype, fid, fsize;
14630 /* It can be null in no-wait mode, sanity check */
14635 image = (struct lpfc_grp_hdr *)fw->data;
14637 magic_number = be32_to_cpu(image->magic_number);
14638 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14639 fid = bf_get_be32(lpfc_grp_hdr_id, image);
14640 fsize = be32_to_cpu(image->size);
14642 INIT_LIST_HEAD(&dma_buffer_list);
14643 lpfc_decode_firmware_rev(phba, fwrev, 1);
14644 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14645 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14646 "3023 Updating Firmware, Current Version:%s "
14647 "New Version:%s\n",
14648 fwrev, image->revision);
14649 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14656 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14660 if (!dmabuf->virt) {
14665 list_add_tail(&dmabuf->list, &dma_buffer_list);
14667 while (offset < fw->size) {
14668 temp_offset = offset;
14669 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14670 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14671 memcpy(dmabuf->virt,
14672 fw->data + temp_offset,
14673 fw->size - temp_offset);
14674 temp_offset = fw->size;
14677 memcpy(dmabuf->virt, fw->data + temp_offset,
14679 temp_offset += SLI4_PAGE_SIZE;
14681 rc = lpfc_wr_object(phba, &dma_buffer_list,
14682 (fw->size - offset), &offset);
14684 rc = lpfc_log_write_firmware_error(phba, offset,
14695 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14696 "3029 Skipped Firmware update, Current "
14697 "Version:%s New Version:%s\n",
14698 fwrev, image->revision);
14701 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14702 list_del(&dmabuf->list);
14703 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14704 dmabuf->virt, dmabuf->phys);
14707 release_firmware(fw);
14710 lpfc_log_msg(phba, KERN_ERR, LOG_INIT | LOG_SLI,
14711 "3062 Firmware update error, status %d.\n", rc);
14713 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14714 "3024 Firmware update success: size %d.\n", rc);
14718 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14719 * @phba: pointer to lpfc hba data structure.
14720 * @fw_upgrade: which firmware to update.
14722 * This routine is called to perform Linux generic firmware upgrade on device
14723 * that supports such feature.
14726 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14728 char file_name[ELX_FW_NAME_SIZE] = {0};
14730 const struct firmware *fw;
14732 /* Only supported on SLI4 interface type 2 for now */
14733 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14734 LPFC_SLI_INTF_IF_TYPE_2)
14737 scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName);
14739 if (fw_upgrade == INT_FW_UPGRADE) {
14740 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14741 file_name, &phba->pcidev->dev,
14742 GFP_KERNEL, (void *)phba,
14743 lpfc_write_firmware);
14744 } else if (fw_upgrade == RUN_FW_UPGRADE) {
14745 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14747 lpfc_write_firmware(fw, (void *)phba);
14756 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14757 * @pdev: pointer to PCI device
14758 * @pid: pointer to PCI device identifier
14760 * This routine is called from the kernel's PCI subsystem to device with
14761 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14762 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14763 * information of the device and driver to see if the driver state that it
14764 * can support this kind of device. If the match is successful, the driver
14765 * core invokes this routine. If this routine determines it can claim the HBA,
14766 * it does all the initialization that it needs to do to handle the HBA
14770 * 0 - driver can claim the device
14771 * negative value - driver can not claim the device
14774 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14776 struct lpfc_hba *phba;
14777 struct lpfc_vport *vport = NULL;
14778 struct Scsi_Host *shost = NULL;
14780 uint32_t cfg_mode, intr_mode;
14782 /* Allocate memory for HBA structure */
14783 phba = lpfc_hba_alloc(pdev);
14787 INIT_LIST_HEAD(&phba->poll_list);
14789 /* Perform generic PCI device enabling operation */
14790 error = lpfc_enable_pci_dev(phba);
14792 goto out_free_phba;
14794 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14795 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14797 goto out_disable_pci_dev;
14799 /* Set up SLI-4 specific device PCI memory space */
14800 error = lpfc_sli4_pci_mem_setup(phba);
14802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14803 "1410 Failed to set up pci memory space.\n");
14804 goto out_disable_pci_dev;
14807 /* Set up SLI-4 Specific device driver resources */
14808 error = lpfc_sli4_driver_resource_setup(phba);
14810 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14811 "1412 Failed to set up driver resource.\n");
14812 goto out_unset_pci_mem_s4;
14815 INIT_LIST_HEAD(&phba->active_rrq_list);
14816 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14818 /* Set up common device driver resources */
14819 error = lpfc_setup_driver_resource_phase2(phba);
14821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14822 "1414 Failed to set up driver resource.\n");
14823 goto out_unset_driver_resource_s4;
14826 /* Get the default values for Model Name and Description */
14827 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14829 /* Now, trying to enable interrupt and bring up the device */
14830 cfg_mode = phba->cfg_use_msi;
14832 /* Put device to a known state before enabling interrupt */
14833 phba->pport = NULL;
14834 lpfc_stop_port(phba);
14836 /* Init cpu_map array */
14837 lpfc_cpu_map_array_init(phba);
14839 /* Init hba_eq_hdl array */
14840 lpfc_hba_eq_hdl_array_init(phba);
14842 /* Configure and enable interrupt */
14843 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14844 if (intr_mode == LPFC_INTR_ERROR) {
14845 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14846 "0426 Failed to enable interrupt.\n");
14848 goto out_unset_driver_resource;
14850 /* Default to single EQ for non-MSI-X */
14851 if (phba->intr_type != MSIX) {
14852 phba->cfg_irq_chann = 1;
14853 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14854 if (phba->nvmet_support)
14855 phba->cfg_nvmet_mrq = 1;
14858 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14860 /* Create SCSI host to the physical port */
14861 error = lpfc_create_shost(phba);
14863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14864 "1415 Failed to create scsi host.\n");
14865 goto out_disable_intr;
14867 vport = phba->pport;
14868 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14870 /* Configure sysfs attributes */
14871 error = lpfc_alloc_sysfs_attr(vport);
14873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14874 "1416 Failed to allocate sysfs attr\n");
14875 goto out_destroy_shost;
14878 /* Set up SLI-4 HBA */
14879 if (lpfc_sli4_hba_setup(phba)) {
14880 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14881 "1421 Failed to set up hba\n");
14883 goto out_free_sysfs_attr;
14886 /* Log the current active interrupt mode */
14887 phba->intr_mode = intr_mode;
14888 lpfc_log_intr_mode(phba, intr_mode);
14890 /* Perform post initialization setup */
14891 lpfc_post_init_setup(phba);
14893 /* NVME support in FW earlier in the driver load corrects the
14894 * FC4 type making a check for nvme_support unnecessary.
14896 if (phba->nvmet_support == 0) {
14897 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14898 /* Create NVME binding with nvme_fc_transport. This
14899 * ensures the vport is initialized. If the localport
14900 * create fails, it should not unload the driver to
14901 * support field issues.
14903 error = lpfc_nvme_create_localport(vport);
14905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14906 "6004 NVME registration "
14907 "failed, error x%x\n",
14913 /* check for firmware upgrade or downgrade */
14914 if (phba->cfg_request_firmware_upgrade)
14915 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14917 /* Check if there are static vports to be created. */
14918 lpfc_create_static_vport(phba);
14920 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14921 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14925 out_free_sysfs_attr:
14926 lpfc_free_sysfs_attr(vport);
14928 lpfc_destroy_shost(phba);
14930 lpfc_sli4_disable_intr(phba);
14931 out_unset_driver_resource:
14932 lpfc_unset_driver_resource_phase2(phba);
14933 out_unset_driver_resource_s4:
14934 lpfc_sli4_driver_resource_unset(phba);
14935 out_unset_pci_mem_s4:
14936 lpfc_sli4_pci_mem_unset(phba);
14937 out_disable_pci_dev:
14938 lpfc_disable_pci_dev(phba);
14940 scsi_host_put(shost);
14942 lpfc_hba_free(phba);
14947 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14948 * @pdev: pointer to PCI device
14950 * This routine is called from the kernel's PCI subsystem to device with
14951 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14952 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14953 * device to be removed from the PCI subsystem properly.
14956 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14958 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14959 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14960 struct lpfc_vport **vports;
14961 struct lpfc_hba *phba = vport->phba;
14964 /* Mark the device unloading flag */
14965 set_bit(FC_UNLOADING, &vport->load_flag);
14967 lpfc_unreg_congestion_buf(phba);
14969 lpfc_free_sysfs_attr(vport);
14971 /* Release all the vports against this physical port */
14972 vports = lpfc_create_vport_work_array(phba);
14973 if (vports != NULL)
14974 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14975 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14977 fc_vport_terminate(vports[i]->fc_vport);
14979 lpfc_destroy_vport_work_array(phba, vports);
14981 /* Remove FC host with the physical port */
14982 fc_remove_host(shost);
14983 scsi_remove_host(shost);
14985 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
14986 * localports are destroyed after to cleanup all transport memory.
14988 lpfc_cleanup(vport);
14989 lpfc_nvmet_destroy_targetport(phba);
14990 lpfc_nvme_destroy_localport(vport);
14992 /* De-allocate multi-XRI pools */
14993 if (phba->cfg_xri_rebalancing)
14994 lpfc_destroy_multixri_pools(phba);
14997 * Bring down the SLI Layer. This step disables all interrupts,
14998 * clears the rings, discards all mailbox commands, and resets
14999 * the HBA FCoE function.
15001 lpfc_debugfs_terminate(vport);
15003 lpfc_stop_hba_timers(phba);
15004 spin_lock_irq(&phba->port_list_lock);
15005 list_del_init(&vport->listentry);
15006 spin_unlock_irq(&phba->port_list_lock);
15008 /* Perform scsi free before driver resource_unset since scsi
15009 * buffers are released to their corresponding pools here.
15011 lpfc_io_free(phba);
15012 lpfc_free_iocb_list(phba);
15013 lpfc_sli4_hba_unset(phba);
15015 lpfc_unset_driver_resource_phase2(phba);
15016 lpfc_sli4_driver_resource_unset(phba);
15018 /* Unmap adapter Control and Doorbell registers */
15019 lpfc_sli4_pci_mem_unset(phba);
15021 /* Release PCI resources and disable device's PCI function */
15022 scsi_host_put(shost);
15023 lpfc_disable_pci_dev(phba);
15025 /* Finally, free the driver's device data structure */
15026 lpfc_hba_free(phba);
15032 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
15033 * @dev_d: pointer to device
15035 * This routine is called from the kernel's PCI subsystem to support system
15036 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
15037 * this method, it quiesces the device by stopping the driver's worker
15038 * thread for the device, turning off device's interrupt and DMA, and bring
15039 * the device offline. Note that as the driver implements the minimum PM
15040 * requirements to a power-aware driver's PM support for suspend/resume -- all
15041 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
15042 * method call will be treated as SUSPEND and the driver will fully
15043 * reinitialize its device during resume() method call, the driver will set
15044 * device to PCI_D3hot state in PCI config space instead of setting it
15045 * according to the @msg provided by the PM.
15048 * 0 - driver suspended the device
15051 static int __maybe_unused
15052 lpfc_pci_suspend_one_s4(struct device *dev_d)
15054 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15055 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15057 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15058 "2843 PCI device Power Management suspend.\n");
15060 /* Bring down the device */
15061 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15062 lpfc_offline(phba);
15063 kthread_stop(phba->worker_thread);
15065 /* Disable interrupt from device */
15066 lpfc_sli4_disable_intr(phba);
15067 lpfc_sli4_queue_destroy(phba);
15073 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15074 * @dev_d: pointer to device
15076 * This routine is called from the kernel's PCI subsystem to support system
15077 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15078 * this method, it restores the device's PCI config space state and fully
15079 * reinitializes the device and brings it online. Note that as the driver
15080 * implements the minimum PM requirements to a power-aware driver's PM for
15081 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15082 * to the suspend() method call will be treated as SUSPEND and the driver
15083 * will fully reinitialize its device during resume() method call, the device
15084 * will be set to PCI_D0 directly in PCI config space before restoring the
15088 * 0 - driver suspended the device
15091 static int __maybe_unused
15092 lpfc_pci_resume_one_s4(struct device *dev_d)
15094 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15095 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15096 uint32_t intr_mode;
15099 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15100 "0292 PCI device Power Management resume.\n");
15102 /* Startup the kernel thread for this host adapter. */
15103 phba->worker_thread = kthread_run(lpfc_do_work, phba,
15104 "lpfc_worker_%d", phba->brd_no);
15105 if (IS_ERR(phba->worker_thread)) {
15106 error = PTR_ERR(phba->worker_thread);
15107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15108 "0293 PM resume failed to start worker "
15109 "thread: error=x%x.\n", error);
15113 /* Configure and enable interrupt */
15114 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15115 if (intr_mode == LPFC_INTR_ERROR) {
15116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15117 "0294 PM resume Failed to enable interrupt\n");
15120 phba->intr_mode = intr_mode;
15122 /* Restart HBA and bring it online */
15123 lpfc_sli_brdrestart(phba);
15126 /* Log the current active interrupt mode */
15127 lpfc_log_intr_mode(phba, phba->intr_mode);
15133 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15134 * @phba: pointer to lpfc hba data structure.
15136 * This routine is called to prepare the SLI4 device for PCI slot recover. It
15137 * aborts all the outstanding SCSI I/Os to the pci device.
15140 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15142 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15143 "2828 PCI channel I/O abort preparing for recovery\n");
15145 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
15146 * and let the SCSI mid-layer to retry them to recover.
15148 lpfc_sli_abort_fcp_rings(phba);
15152 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15153 * @phba: pointer to lpfc hba data structure.
15155 * This routine is called to prepare the SLI4 device for PCI slot reset. It
15156 * disables the device interrupt and pci device, and aborts the internal FCP
15160 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15162 int offline = pci_channel_offline(phba->pcidev);
15164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15165 "2826 PCI channel disable preparing for reset offline"
15168 /* Block any management I/Os to the device */
15169 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15172 /* HBA_PCI_ERR was set in io_error_detect */
15173 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15174 /* Flush all driver's outstanding I/Os as we are to reset */
15175 lpfc_sli_flush_io_rings(phba);
15176 lpfc_offline(phba);
15178 /* stop all timers */
15179 lpfc_stop_hba_timers(phba);
15181 lpfc_sli4_queue_destroy(phba);
15182 /* Disable interrupt and pci device */
15183 lpfc_sli4_disable_intr(phba);
15184 pci_disable_device(phba->pcidev);
15188 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15189 * @phba: pointer to lpfc hba data structure.
15191 * This routine is called to prepare the SLI4 device for PCI slot permanently
15192 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15196 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15199 "2827 PCI channel permanent disable for failure\n");
15201 /* Block all SCSI devices' I/Os on the host */
15202 lpfc_scsi_dev_block(phba);
15204 /* stop all timers */
15205 lpfc_stop_hba_timers(phba);
15207 /* Clean up all driver's outstanding I/Os */
15208 lpfc_sli_flush_io_rings(phba);
15212 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15213 * @pdev: pointer to PCI device.
15214 * @state: the current PCI connection state.
15216 * This routine is called from the PCI subsystem for error handling to device
15217 * with SLI-4 interface spec. This function is called by the PCI subsystem
15218 * after a PCI bus error affecting this device has been detected. When this
15219 * function is invoked, it will need to stop all the I/Os and interrupt(s)
15220 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15221 * for the PCI subsystem to perform proper recovery as desired.
15224 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15225 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15227 static pci_ers_result_t
15228 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15230 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15231 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15235 case pci_channel_io_normal:
15236 /* Non-fatal error, prepare for recovery */
15237 lpfc_sli4_prep_dev_for_recover(phba);
15238 return PCI_ERS_RESULT_CAN_RECOVER;
15239 case pci_channel_io_frozen:
15240 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15241 /* Fatal error, prepare for slot reset */
15243 lpfc_sli4_prep_dev_for_reset(phba);
15245 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15246 "2832 Already handling PCI error "
15247 "state: x%x\n", state);
15248 return PCI_ERS_RESULT_NEED_RESET;
15249 case pci_channel_io_perm_failure:
15250 set_bit(HBA_PCI_ERR, &phba->bit_flags);
15251 /* Permanent failure, prepare for device down */
15252 lpfc_sli4_prep_dev_for_perm_failure(phba);
15253 return PCI_ERS_RESULT_DISCONNECT;
15255 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15257 lpfc_sli4_prep_dev_for_reset(phba);
15258 /* Unknown state, prepare and request slot reset */
15259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15260 "2825 Unknown PCI error state: x%x\n", state);
15261 lpfc_sli4_prep_dev_for_reset(phba);
15262 return PCI_ERS_RESULT_NEED_RESET;
15267 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15268 * @pdev: pointer to PCI device.
15270 * This routine is called from the PCI subsystem for error handling to device
15271 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15272 * restart the PCI card from scratch, as if from a cold-boot. During the
15273 * PCI subsystem error recovery, after the driver returns
15274 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15275 * recovery and then call this routine before calling the .resume method to
15276 * recover the device. This function will initialize the HBA device, enable
15277 * the interrupt, but it will just put the HBA to offline state without
15278 * passing any I/O traffic.
15281 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15282 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15284 static pci_ers_result_t
15285 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15287 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15288 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15289 struct lpfc_sli *psli = &phba->sli;
15290 uint32_t intr_mode;
15293 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15294 if (pci_enable_device_mem(pdev)) {
15295 printk(KERN_ERR "lpfc: Cannot re-enable "
15296 "PCI device after reset.\n");
15297 return PCI_ERS_RESULT_DISCONNECT;
15300 pci_restore_state(pdev);
15302 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15304 dev_info(&pdev->dev,
15305 "hba_pci_err was not set, recovering slot reset.\n");
15307 * As the new kernel behavior of pci_restore_state() API call clears
15308 * device saved_state flag, need to save the restored state again.
15310 pci_save_state(pdev);
15312 if (pdev->is_busmaster)
15313 pci_set_master(pdev);
15315 spin_lock_irq(&phba->hbalock);
15316 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15317 spin_unlock_irq(&phba->hbalock);
15319 /* Init cpu_map array */
15320 lpfc_cpu_map_array_init(phba);
15321 /* Configure and enable interrupt */
15322 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15323 if (intr_mode == LPFC_INTR_ERROR) {
15324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15325 "2824 Cannot re-enable interrupt after "
15327 return PCI_ERS_RESULT_DISCONNECT;
15329 phba->intr_mode = intr_mode;
15330 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15332 /* Log the current active interrupt mode */
15333 lpfc_log_intr_mode(phba, phba->intr_mode);
15335 return PCI_ERS_RESULT_RECOVERED;
15339 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15340 * @pdev: pointer to PCI device
15342 * This routine is called from the PCI subsystem for error handling to device
15343 * with SLI-4 interface spec. It is called when kernel error recovery tells
15344 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15345 * error recovery. After this call, traffic can start to flow from this device
15349 lpfc_io_resume_s4(struct pci_dev *pdev)
15351 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15352 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15355 * In case of slot reset, as function reset is performed through
15356 * mailbox command which needs DMA to be enabled, this operation
15357 * has to be moved to the io resume phase. Taking device offline
15358 * will perform the necessary cleanup.
15360 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15361 /* Perform device reset */
15362 lpfc_sli_brdrestart(phba);
15363 /* Bring the device back online */
15369 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15370 * @pdev: pointer to PCI device
15371 * @pid: pointer to PCI device identifier
15373 * This routine is to be registered to the kernel's PCI subsystem. When an
15374 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15375 * at PCI device-specific information of the device and driver to see if the
15376 * driver state that it can support this kind of device. If the match is
15377 * successful, the driver core invokes this routine. This routine dispatches
15378 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15379 * do all the initialization that it needs to do to handle the HBA device
15383 * 0 - driver can claim the device
15384 * negative value - driver can not claim the device
15387 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15390 struct lpfc_sli_intf intf;
15392 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15395 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15396 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15397 rc = lpfc_pci_probe_one_s4(pdev, pid);
15399 rc = lpfc_pci_probe_one_s3(pdev, pid);
15405 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15406 * @pdev: pointer to PCI device
15408 * This routine is to be registered to the kernel's PCI subsystem. When an
15409 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15410 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15411 * remove routine, which will perform all the necessary cleanup for the
15412 * device to be removed from the PCI subsystem properly.
15415 lpfc_pci_remove_one(struct pci_dev *pdev)
15417 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15418 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15420 switch (phba->pci_dev_grp) {
15421 case LPFC_PCI_DEV_LP:
15422 lpfc_pci_remove_one_s3(pdev);
15424 case LPFC_PCI_DEV_OC:
15425 lpfc_pci_remove_one_s4(pdev);
15428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15429 "1424 Invalid PCI device group: 0x%x\n",
15430 phba->pci_dev_grp);
15437 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15438 * @dev: pointer to device
15440 * This routine is to be registered to the kernel's PCI subsystem to support
15441 * system Power Management (PM). When PM invokes this method, it dispatches
15442 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15443 * suspend the device.
15446 * 0 - driver suspended the device
15449 static int __maybe_unused
15450 lpfc_pci_suspend_one(struct device *dev)
15452 struct Scsi_Host *shost = dev_get_drvdata(dev);
15453 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15456 switch (phba->pci_dev_grp) {
15457 case LPFC_PCI_DEV_LP:
15458 rc = lpfc_pci_suspend_one_s3(dev);
15460 case LPFC_PCI_DEV_OC:
15461 rc = lpfc_pci_suspend_one_s4(dev);
15464 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15465 "1425 Invalid PCI device group: 0x%x\n",
15466 phba->pci_dev_grp);
15473 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15474 * @dev: pointer to device
15476 * This routine is to be registered to the kernel's PCI subsystem to support
15477 * system Power Management (PM). When PM invokes this method, it dispatches
15478 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15479 * resume the device.
15482 * 0 - driver suspended the device
15485 static int __maybe_unused
15486 lpfc_pci_resume_one(struct device *dev)
15488 struct Scsi_Host *shost = dev_get_drvdata(dev);
15489 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15492 switch (phba->pci_dev_grp) {
15493 case LPFC_PCI_DEV_LP:
15494 rc = lpfc_pci_resume_one_s3(dev);
15496 case LPFC_PCI_DEV_OC:
15497 rc = lpfc_pci_resume_one_s4(dev);
15500 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15501 "1426 Invalid PCI device group: 0x%x\n",
15502 phba->pci_dev_grp);
15509 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15510 * @pdev: pointer to PCI device.
15511 * @state: the current PCI connection state.
15513 * This routine is registered to the PCI subsystem for error handling. This
15514 * function is called by the PCI subsystem after a PCI bus error affecting
15515 * this device has been detected. When this routine is invoked, it dispatches
15516 * the action to the proper SLI-3 or SLI-4 device error detected handling
15517 * routine, which will perform the proper error detected operation.
15520 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15521 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15523 static pci_ers_result_t
15524 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15526 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15527 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15528 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15530 if (phba->link_state == LPFC_HBA_ERROR &&
15531 phba->hba_flag & HBA_IOQ_FLUSH)
15532 return PCI_ERS_RESULT_NEED_RESET;
15534 switch (phba->pci_dev_grp) {
15535 case LPFC_PCI_DEV_LP:
15536 rc = lpfc_io_error_detected_s3(pdev, state);
15538 case LPFC_PCI_DEV_OC:
15539 rc = lpfc_io_error_detected_s4(pdev, state);
15542 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15543 "1427 Invalid PCI device group: 0x%x\n",
15544 phba->pci_dev_grp);
15551 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15552 * @pdev: pointer to PCI device.
15554 * This routine is registered to the PCI subsystem for error handling. This
15555 * function is called after PCI bus has been reset to restart the PCI card
15556 * from scratch, as if from a cold-boot. When this routine is invoked, it
15557 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15558 * routine, which will perform the proper device reset.
15561 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15562 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15564 static pci_ers_result_t
15565 lpfc_io_slot_reset(struct pci_dev *pdev)
15567 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15568 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15569 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15571 switch (phba->pci_dev_grp) {
15572 case LPFC_PCI_DEV_LP:
15573 rc = lpfc_io_slot_reset_s3(pdev);
15575 case LPFC_PCI_DEV_OC:
15576 rc = lpfc_io_slot_reset_s4(pdev);
15579 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15580 "1428 Invalid PCI device group: 0x%x\n",
15581 phba->pci_dev_grp);
15588 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15589 * @pdev: pointer to PCI device
15591 * This routine is registered to the PCI subsystem for error handling. It
15592 * is called when kernel error recovery tells the lpfc driver that it is
15593 * OK to resume normal PCI operation after PCI bus error recovery. When
15594 * this routine is invoked, it dispatches the action to the proper SLI-3
15595 * or SLI-4 device io_resume routine, which will resume the device operation.
15598 lpfc_io_resume(struct pci_dev *pdev)
15600 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15601 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15603 switch (phba->pci_dev_grp) {
15604 case LPFC_PCI_DEV_LP:
15605 lpfc_io_resume_s3(pdev);
15607 case LPFC_PCI_DEV_OC:
15608 lpfc_io_resume_s4(pdev);
15611 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15612 "1429 Invalid PCI device group: 0x%x\n",
15613 phba->pci_dev_grp);
15620 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15621 * @phba: pointer to lpfc hba data structure.
15623 * This routine checks to see if OAS is supported for this adapter. If
15624 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
15625 * the enable oas flag is cleared and the pool created for OAS device data
15630 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15633 if (!phba->cfg_EnableXLane)
15636 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15640 mempool_destroy(phba->device_data_mem_pool);
15641 phba->device_data_mem_pool = NULL;
15648 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15649 * @phba: pointer to lpfc hba data structure.
15651 * This routine checks to see if RAS is supported by the adapter. Check the
15652 * function through which RAS support enablement is to be done.
15655 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15657 /* if ASIC_GEN_NUM >= 0xC) */
15658 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15659 LPFC_SLI_INTF_IF_TYPE_6) ||
15660 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15661 LPFC_SLI_INTF_FAMILY_G6)) {
15662 phba->ras_fwlog.ras_hwsupport = true;
15663 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15664 phba->cfg_ras_fwlog_buffsize)
15665 phba->ras_fwlog.ras_enabled = true;
15667 phba->ras_fwlog.ras_enabled = false;
15669 phba->ras_fwlog.ras_hwsupport = false;
15674 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15676 static const struct pci_error_handlers lpfc_err_handler = {
15677 .error_detected = lpfc_io_error_detected,
15678 .slot_reset = lpfc_io_slot_reset,
15679 .resume = lpfc_io_resume,
15682 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15683 lpfc_pci_suspend_one,
15684 lpfc_pci_resume_one);
15686 static struct pci_driver lpfc_driver = {
15687 .name = LPFC_DRIVER_NAME,
15688 .id_table = lpfc_id_table,
15689 .probe = lpfc_pci_probe_one,
15690 .remove = lpfc_pci_remove_one,
15691 .shutdown = lpfc_pci_remove_one,
15692 .driver.pm = &lpfc_pci_pm_ops_one,
15693 .err_handler = &lpfc_err_handler,
15696 static const struct file_operations lpfc_mgmt_fop = {
15697 .owner = THIS_MODULE,
15700 static struct miscdevice lpfc_mgmt_dev = {
15701 .minor = MISC_DYNAMIC_MINOR,
15702 .name = "lpfcmgmt",
15703 .fops = &lpfc_mgmt_fop,
15707 * lpfc_init - lpfc module initialization routine
15709 * This routine is to be invoked when the lpfc module is loaded into the
15710 * kernel. The special kernel macro module_init() is used to indicate the
15711 * role of this routine to the kernel as lpfc module entry point.
15715 * -ENOMEM - FC attach transport failed
15716 * all others - failed
15723 pr_info(LPFC_MODULE_DESC "\n");
15724 pr_info(LPFC_COPYRIGHT "\n");
15726 error = misc_register(&lpfc_mgmt_dev);
15728 printk(KERN_ERR "Could not register lpfcmgmt device, "
15729 "misc_register returned with status %d", error);
15732 lpfc_transport_functions.vport_create = lpfc_vport_create;
15733 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15734 lpfc_transport_template =
15735 fc_attach_transport(&lpfc_transport_functions);
15736 if (lpfc_transport_template == NULL)
15738 lpfc_vport_transport_template =
15739 fc_attach_transport(&lpfc_vport_transport_functions);
15740 if (lpfc_vport_transport_template == NULL) {
15741 fc_release_transport(lpfc_transport_template);
15744 lpfc_wqe_cmd_template();
15745 lpfc_nvmet_cmd_template();
15747 /* Initialize in case vector mapping is needed */
15748 lpfc_present_cpu = num_present_cpus();
15750 lpfc_pldv_detect = false;
15752 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15753 "lpfc/sli4:online",
15754 lpfc_cpu_online, lpfc_cpu_offline);
15756 goto cpuhp_failure;
15757 lpfc_cpuhp_state = error;
15759 error = pci_register_driver(&lpfc_driver);
15766 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15768 fc_release_transport(lpfc_transport_template);
15769 fc_release_transport(lpfc_vport_transport_template);
15771 misc_deregister(&lpfc_mgmt_dev);
15776 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15778 unsigned int start_idx;
15779 unsigned int dbg_cnt;
15780 unsigned int temp_idx;
15783 unsigned long rem_nsec;
15785 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15788 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15789 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15792 temp_idx = start_idx;
15793 if (dbg_cnt >= DBG_LOG_SZ) {
15794 dbg_cnt = DBG_LOG_SZ;
15797 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15798 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15800 if (start_idx < dbg_cnt)
15801 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15803 start_idx -= dbg_cnt;
15806 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15807 start_idx, temp_idx, dbg_cnt);
15809 for (i = 0; i < dbg_cnt; i++) {
15810 if ((start_idx + i) < DBG_LOG_SZ)
15811 temp_idx = (start_idx + i) % DBG_LOG_SZ;
15814 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15815 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15817 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15819 phba->dbg_log[temp_idx].log);
15822 atomic_set(&phba->dbg_log_cnt, 0);
15823 atomic_set(&phba->dbg_log_dmping, 0);
15827 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15831 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15832 struct va_format vaf;
15835 va_start(args, fmt);
15836 if (unlikely(dbg_dmping)) {
15839 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15843 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15846 atomic_inc(&phba->dbg_log_cnt);
15848 vscnprintf(phba->dbg_log[idx].log,
15849 sizeof(phba->dbg_log[idx].log), fmt, args);
15852 phba->dbg_log[idx].t_ns = local_clock();
15856 * lpfc_exit - lpfc module removal routine
15858 * This routine is invoked when the lpfc module is removed from the kernel.
15859 * The special kernel macro module_exit() is used to indicate the role of
15860 * this routine to the kernel as lpfc module exit point.
15865 misc_deregister(&lpfc_mgmt_dev);
15866 pci_unregister_driver(&lpfc_driver);
15867 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15868 fc_release_transport(lpfc_transport_template);
15869 fc_release_transport(lpfc_vport_transport_template);
15870 idr_destroy(&lpfc_hba_index);
15873 module_init(lpfc_init);
15874 module_exit(lpfc_exit);
15875 MODULE_LICENSE("GPL");
15876 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15877 MODULE_AUTHOR("Broadcom");
15878 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);