1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
31 #include <linux/aer.h>
32 #include <linux/slab.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_transport_fc.h>
42 #include "lpfc_sli4.h"
44 #include "lpfc_disc.h"
45 #include "lpfc_scsi.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_vport.h"
50 #include "lpfc_version.h"
53 unsigned long _dump_buf_data_order;
55 unsigned long _dump_buf_dif_order;
56 spinlock_t _dump_buf_lock;
58 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
59 static int lpfc_post_rcv_buf(struct lpfc_hba *);
60 static int lpfc_sli4_queue_create(struct lpfc_hba *);
61 static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
62 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
63 static int lpfc_setup_endian_order(struct lpfc_hba *);
64 static int lpfc_sli4_read_config(struct lpfc_hba *);
65 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
66 static void lpfc_free_sgl_list(struct lpfc_hba *);
67 static int lpfc_init_sgl_list(struct lpfc_hba *);
68 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
69 static void lpfc_free_active_sgl(struct lpfc_hba *);
70 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
71 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
72 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
73 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
74 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
76 static struct scsi_transport_template *lpfc_transport_template = NULL;
77 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
78 static DEFINE_IDR(lpfc_hba_index);
81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
82 * @phba: pointer to lpfc hba data structure.
84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
85 * mailbox command. It retrieves the revision information from the HBA and
86 * collects the Vital Product Data (VPD) about the HBA for preparing the
87 * configuration of the HBA.
91 * -ERESTART - requests the SLI layer to reset the HBA and try again.
92 * Any other value - indicates an error.
95 lpfc_config_port_prep(struct lpfc_hba *phba)
97 lpfc_vpd_t *vp = &phba->vpd;
101 char *lpfc_vpd_data = NULL;
103 static char licensed[56] =
104 "key unlock for use with gnu public licensed code only\0";
105 static int init_key = 1;
107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
109 phba->link_state = LPFC_HBA_ERROR;
114 phba->link_state = LPFC_INIT_MBX_CMDS;
116 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
118 uint32_t *ptext = (uint32_t *) licensed;
120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
121 *ptext = cpu_to_be32(*ptext);
125 lpfc_read_nv(phba, pmb);
126 memset((char*)mb->un.varRDnvp.rsvd3, 0,
127 sizeof (mb->un.varRDnvp.rsvd3));
128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
133 if (rc != MBX_SUCCESS) {
134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
135 "0324 Config Port initialization "
136 "error, mbxCmd x%x READ_NVPARM, "
138 mb->mbxCommand, mb->mbxStatus);
139 mempool_free(pmb, phba->mbox_mem_pool);
142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
148 phba->sli3_options = 0x0;
150 /* Setup and issue mailbox READ REV command */
151 lpfc_read_rev(phba, pmb);
152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153 if (rc != MBX_SUCCESS) {
154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
155 "0439 Adapter failed to init, mbxCmd x%x "
156 "READ_REV, mbxStatus x%x\n",
157 mb->mbxCommand, mb->mbxStatus);
158 mempool_free( pmb, phba->mbox_mem_pool);
164 * The value of rr must be 1 since the driver set the cv field to 1.
165 * This setting requires the FW to set all revision fields.
167 if (mb->un.varRdRev.rr == 0) {
169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
170 "0440 Adapter failed to init, READ_REV has "
171 "missing revision information.\n");
172 mempool_free(pmb, phba->mbox_mem_pool);
176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
177 mempool_free(pmb, phba->mbox_mem_pool);
181 /* Save information as VPD data */
183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
188 vp->rev.biuRev = mb->un.varRdRev.biuRev;
189 vp->rev.smRev = mb->un.varRdRev.smRev;
190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
191 vp->rev.endecRev = mb->un.varRdRev.endecRev;
192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
199 /* If the sli feature level is less then 9, we must
200 * tear down all RPIs and VPIs on link down if NPIV
203 if (vp->rev.feaLevelHigh < 9)
204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
206 if (lpfc_is_LC_HBA(phba->pcidev->device))
207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
208 sizeof (phba->RandomData));
210 /* Get adapter VPD information */
211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
219 if (rc != MBX_SUCCESS) {
220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221 "0441 VPD not present on adapter, "
222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223 mb->mbxCommand, mb->mbxStatus);
224 mb->un.varDmp.word_cnt = 0;
226 /* dump mem may return a zero when finished or we got a
227 * mailbox error, either way we are done.
229 if (mb->un.varDmp.word_cnt == 0)
231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234 lpfc_vpd_data + offset,
235 mb->un.varDmp.word_cnt);
236 offset += mb->un.varDmp.word_cnt;
237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
240 kfree(lpfc_vpd_data);
242 mempool_free(pmb, phba->mbox_mem_pool);
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260 phba->temp_sensor_support = 1;
262 phba->temp_sensor_support = 0;
263 mempool_free(pmboxq, phba->mbox_mem_pool);
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
281 uint32_t prog_id_word;
283 /* character array used for decoding dist type. */
284 char dist_char[] = "nabx";
286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287 mempool_free(pmboxq, phba->mbox_mem_pool);
291 prg = (struct prog_id *) &prog_id_word;
293 /* word 7 contain option rom version */
294 prog_id_word = pmboxq->u.mb.un.varWords[7];
296 /* Decode the Option rom version word to a readable string */
298 dist = dist_char[prg->dist];
300 if ((prg->dist == 3) && (prg->num == 0))
301 sprintf(phba->OptionROMVersion, "%d.%d%d",
302 prg->ver, prg->rev, prg->lev);
304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305 prg->ver, prg->rev, prg->lev,
307 mempool_free(pmboxq, phba->mbox_mem_pool);
312 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure.
315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
316 * command call. It performs all internal resource and state setups on the
317 * port: post IOCB buffers, enable appropriate host interrupt attentions,
318 * ELS ring timers, etc.
322 * Any other value - error.
325 lpfc_config_port_post(struct lpfc_hba *phba)
327 struct lpfc_vport *vport = phba->pport;
328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
331 struct lpfc_dmabuf *mp;
332 struct lpfc_sli *psli = &phba->sli;
333 uint32_t status, timeout;
337 spin_lock_irq(&phba->hbalock);
339 * If the Config port completed correctly the HBA is not
340 * over heated any more.
342 if (phba->over_temp_state == HBA_OVER_TEMP)
343 phba->over_temp_state = HBA_NORMAL_TEMP;
344 spin_unlock_irq(&phba->hbalock);
346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
348 phba->link_state = LPFC_HBA_ERROR;
353 /* Get login parameters for NID. */
354 rc = lpfc_read_sparam(phba, pmb, 0);
356 mempool_free(pmb, phba->mbox_mem_pool);
361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
363 "0448 Adapter failed init, mbxCmd x%x "
364 "READ_SPARM mbxStatus x%x\n",
365 mb->mbxCommand, mb->mbxStatus);
366 phba->link_state = LPFC_HBA_ERROR;
367 mp = (struct lpfc_dmabuf *) pmb->context1;
368 mempool_free(pmb, phba->mbox_mem_pool);
369 lpfc_mbuf_free(phba, mp->virt, mp->phys);
374 mp = (struct lpfc_dmabuf *) pmb->context1;
376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
377 lpfc_mbuf_free(phba, mp->virt, mp->phys);
379 pmb->context1 = NULL;
381 if (phba->cfg_soft_wwnn)
382 u64_to_wwn(phba->cfg_soft_wwnn,
383 vport->fc_sparam.nodeName.u.wwn);
384 if (phba->cfg_soft_wwpn)
385 u64_to_wwn(phba->cfg_soft_wwpn,
386 vport->fc_sparam.portName.u.wwn);
387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388 sizeof (struct lpfc_name));
389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 sizeof (struct lpfc_name));
392 /* Update the fc_host data structures with new wwn. */
393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
395 fc_host_max_npiv_vports(shost) = phba->max_vpi;
397 /* If no serial number in VPD data, use low 6 bytes of WWNN */
398 /* This should be consolidated into parse_vpd ? - mr */
399 if (phba->SerialNumber[0] == 0) {
402 outptr = &vport->fc_nodename.u.s.IEEE[0];
403 for (i = 0; i < 12; i++) {
405 j = ((status & 0xf0) >> 4);
407 phba->SerialNumber[i] =
408 (char)((uint8_t) 0x30 + (uint8_t) j);
410 phba->SerialNumber[i] =
411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
415 phba->SerialNumber[i] =
416 (char)((uint8_t) 0x30 + (uint8_t) j);
418 phba->SerialNumber[i] =
419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
423 lpfc_read_config(phba, pmb);
425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
427 "0453 Adapter failed to init, mbxCmd x%x "
428 "READ_CONFIG, mbxStatus x%x\n",
429 mb->mbxCommand, mb->mbxStatus);
430 phba->link_state = LPFC_HBA_ERROR;
431 mempool_free( pmb, phba->mbox_mem_pool);
435 /* Check if the port is disabled */
436 lpfc_sli_read_link_ste(phba);
438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
440 phba->cfg_hba_queue_depth =
441 (mb->un.varRdConfig.max_xri + 1) -
442 lpfc_sli4_get_els_iocb_cnt(phba);
444 phba->lmt = mb->un.varRdConfig.lmt;
446 /* Get the default values for Model Name and Description */
447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
449 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
450 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
451 && !(phba->lmt & LMT_1Gb))
452 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
453 && !(phba->lmt & LMT_2Gb))
454 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
455 && !(phba->lmt & LMT_4Gb))
456 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
457 && !(phba->lmt & LMT_8Gb))
458 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
459 && !(phba->lmt & LMT_10Gb))
460 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
461 && !(phba->lmt & LMT_16Gb))) {
462 /* Reset link speed to auto */
463 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
464 "1302 Invalid speed for this board: "
465 "Reset link speed to auto: x%x\n",
466 phba->cfg_link_speed);
467 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
470 phba->link_state = LPFC_LINK_DOWN;
472 /* Only process IOCBs on ELS ring till hba_state is READY */
473 if (psli->ring[psli->extra_ring].cmdringaddr)
474 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
475 if (psli->ring[psli->fcp_ring].cmdringaddr)
476 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
477 if (psli->ring[psli->next_ring].cmdringaddr)
478 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
480 /* Post receive buffers for desired rings */
481 if (phba->sli_rev != 3)
482 lpfc_post_rcv_buf(phba);
485 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
487 if (phba->intr_type == MSIX) {
488 rc = lpfc_config_msi(phba, pmb);
490 mempool_free(pmb, phba->mbox_mem_pool);
493 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
494 if (rc != MBX_SUCCESS) {
495 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
496 "0352 Config MSI mailbox command "
497 "failed, mbxCmd x%x, mbxStatus x%x\n",
498 pmb->u.mb.mbxCommand,
499 pmb->u.mb.mbxStatus);
500 mempool_free(pmb, phba->mbox_mem_pool);
505 spin_lock_irq(&phba->hbalock);
506 /* Initialize ERATT handling flag */
507 phba->hba_flag &= ~HBA_ERATT_HANDLED;
509 /* Enable appropriate host interrupts */
510 status = readl(phba->HCregaddr);
511 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
512 if (psli->num_rings > 0)
513 status |= HC_R0INT_ENA;
514 if (psli->num_rings > 1)
515 status |= HC_R1INT_ENA;
516 if (psli->num_rings > 2)
517 status |= HC_R2INT_ENA;
518 if (psli->num_rings > 3)
519 status |= HC_R3INT_ENA;
521 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
522 (phba->cfg_poll & DISABLE_FCP_RING_INT))
523 status &= ~(HC_R0INT_ENA);
525 writel(status, phba->HCregaddr);
526 readl(phba->HCregaddr); /* flush */
527 spin_unlock_irq(&phba->hbalock);
529 /* Set up ring-0 (ELS) timer */
530 timeout = phba->fc_ratov * 2;
531 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
532 /* Set up heart beat (HB) timer */
533 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
534 phba->hb_outstanding = 0;
535 phba->last_completion_time = jiffies;
536 /* Set up error attention (ERATT) polling timer */
537 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
539 if (phba->hba_flag & LINK_DISABLED) {
540 lpfc_printf_log(phba,
542 "2598 Adapter Link is disabled.\n");
543 lpfc_down_link(phba, pmb);
544 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
545 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
546 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
547 lpfc_printf_log(phba,
549 "2599 Adapter failed to issue DOWN_LINK"
550 " mbox command rc 0x%x\n", rc);
552 mempool_free(pmb, phba->mbox_mem_pool);
555 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
556 lpfc_init_link(phba, pmb, phba->cfg_topology,
557 phba->cfg_link_speed);
558 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
559 lpfc_set_loopback_flag(phba);
560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
561 if (rc != MBX_SUCCESS) {
562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
563 "0454 Adapter failed to init, mbxCmd x%x "
564 "INIT_LINK, mbxStatus x%x\n",
565 mb->mbxCommand, mb->mbxStatus);
567 /* Clear all interrupt enable conditions */
568 writel(0, phba->HCregaddr);
569 readl(phba->HCregaddr); /* flush */
570 /* Clear all pending interrupts */
571 writel(0xffffffff, phba->HAregaddr);
572 readl(phba->HAregaddr); /* flush */
574 phba->link_state = LPFC_HBA_ERROR;
576 mempool_free(pmb, phba->mbox_mem_pool);
580 /* MBOX buffer will be freed in mbox compl */
581 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
583 phba->link_state = LPFC_HBA_ERROR;
587 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
588 pmb->mbox_cmpl = lpfc_config_async_cmpl;
589 pmb->vport = phba->pport;
590 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
592 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
593 lpfc_printf_log(phba,
596 "0456 Adapter failed to issue "
597 "ASYNCEVT_ENABLE mbox status x%x\n",
599 mempool_free(pmb, phba->mbox_mem_pool);
602 /* Get Option rom version */
603 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
605 phba->link_state = LPFC_HBA_ERROR;
609 lpfc_dump_wakeup_param(phba, pmb);
610 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
611 pmb->vport = phba->pport;
612 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
614 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
616 "to get Option ROM version status x%x\n", rc);
617 mempool_free(pmb, phba->mbox_mem_pool);
624 * lpfc_hba_init_link - Initialize the FC link
625 * @phba: pointer to lpfc hba data structure.
626 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
628 * This routine will issue the INIT_LINK mailbox command call.
629 * It is available to other drivers through the lpfc_hba data
630 * structure for use as a delayed link up mechanism with the
631 * module parameter lpfc_suppress_link_up.
635 * Any other value - error
638 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
640 struct lpfc_vport *vport = phba->pport;
645 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
647 phba->link_state = LPFC_HBA_ERROR;
653 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
654 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
655 lpfc_set_loopback_flag(phba);
656 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
659 "0498 Adapter failed to init, mbxCmd x%x "
660 "INIT_LINK, mbxStatus x%x\n",
661 mb->mbxCommand, mb->mbxStatus);
662 if (phba->sli_rev <= LPFC_SLI_REV3) {
663 /* Clear all interrupt enable conditions */
664 writel(0, phba->HCregaddr);
665 readl(phba->HCregaddr); /* flush */
666 /* Clear all pending interrupts */
667 writel(0xffffffff, phba->HAregaddr);
668 readl(phba->HAregaddr); /* flush */
670 phba->link_state = LPFC_HBA_ERROR;
671 if (rc != MBX_BUSY || flag == MBX_POLL)
672 mempool_free(pmb, phba->mbox_mem_pool);
675 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
676 if (flag == MBX_POLL)
677 mempool_free(pmb, phba->mbox_mem_pool);
683 * lpfc_hba_down_link - this routine downs the FC link
684 * @phba: pointer to lpfc hba data structure.
685 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
687 * This routine will issue the DOWN_LINK mailbox command call.
688 * It is available to other drivers through the lpfc_hba data
689 * structure for use to stop the link.
693 * Any other value - error
696 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
701 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
703 phba->link_state = LPFC_HBA_ERROR;
707 lpfc_printf_log(phba,
709 "0491 Adapter Link is disabled.\n");
710 lpfc_down_link(phba, pmb);
711 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
712 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
713 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
714 lpfc_printf_log(phba,
716 "2522 Adapter failed to issue DOWN_LINK"
717 " mbox command rc 0x%x\n", rc);
719 mempool_free(pmb, phba->mbox_mem_pool);
722 if (flag == MBX_POLL)
723 mempool_free(pmb, phba->mbox_mem_pool);
729 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
730 * @phba: pointer to lpfc HBA data structure.
732 * This routine will do LPFC uninitialization before the HBA is reset when
733 * bringing down the SLI Layer.
737 * Any other value - error.
740 lpfc_hba_down_prep(struct lpfc_hba *phba)
742 struct lpfc_vport **vports;
745 if (phba->sli_rev <= LPFC_SLI_REV3) {
746 /* Disable interrupts */
747 writel(0, phba->HCregaddr);
748 readl(phba->HCregaddr); /* flush */
751 if (phba->pport->load_flag & FC_UNLOADING)
752 lpfc_cleanup_discovery_resources(phba->pport);
754 vports = lpfc_create_vport_work_array(phba);
756 for (i = 0; i <= phba->max_vports &&
757 vports[i] != NULL; i++)
758 lpfc_cleanup_discovery_resources(vports[i]);
759 lpfc_destroy_vport_work_array(phba, vports);
765 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
766 * @phba: pointer to lpfc HBA data structure.
768 * This routine will do uninitialization after the HBA is reset when bring
769 * down the SLI Layer.
773 * Any other value - error.
776 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
778 struct lpfc_sli *psli = &phba->sli;
779 struct lpfc_sli_ring *pring;
780 struct lpfc_dmabuf *mp, *next_mp;
781 LIST_HEAD(completions);
784 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
785 lpfc_sli_hbqbuf_free_all(phba);
787 /* Cleanup preposted buffers on the ELS ring */
788 pring = &psli->ring[LPFC_ELS_RING];
789 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
791 pring->postbufq_cnt--;
792 lpfc_mbuf_free(phba, mp->virt, mp->phys);
797 spin_lock_irq(&phba->hbalock);
798 for (i = 0; i < psli->num_rings; i++) {
799 pring = &psli->ring[i];
801 /* At this point in time the HBA is either reset or DOA. Either
802 * way, nothing should be on txcmplq as it will NEVER complete.
804 list_splice_init(&pring->txcmplq, &completions);
805 pring->txcmplq_cnt = 0;
806 spin_unlock_irq(&phba->hbalock);
808 /* Cancel all the IOCBs from the completions list */
809 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
812 lpfc_sli_abort_iocb_ring(phba, pring);
813 spin_lock_irq(&phba->hbalock);
815 spin_unlock_irq(&phba->hbalock);
821 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
822 * @phba: pointer to lpfc HBA data structure.
824 * This routine will do uninitialization after the HBA is reset when bring
825 * down the SLI Layer.
829 * Any other value - error.
832 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
834 struct lpfc_scsi_buf *psb, *psb_next;
837 unsigned long iflag = 0;
838 struct lpfc_sglq *sglq_entry = NULL;
840 ret = lpfc_hba_down_post_s3(phba);
843 /* At this point in time the HBA is either reset or DOA. Either
844 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
845 * on the lpfc_sgl_list so that it can either be freed if the
846 * driver is unloading or reposted if the driver is restarting
849 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
851 /* abts_sgl_list_lock required because worker thread uses this
854 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
855 list_for_each_entry(sglq_entry,
856 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
857 sglq_entry->state = SGL_FREED;
859 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
860 &phba->sli4_hba.lpfc_sgl_list);
861 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
862 /* abts_scsi_buf_list_lock required because worker thread uses this
865 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
866 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
868 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
869 spin_unlock_irq(&phba->hbalock);
871 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
873 psb->status = IOSTAT_SUCCESS;
875 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
876 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
877 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
882 * lpfc_hba_down_post - Wrapper func for hba down post routine
883 * @phba: pointer to lpfc HBA data structure.
885 * This routine wraps the actual SLI3 or SLI4 routine for performing
886 * uninitialization after the HBA is reset when bring down the SLI Layer.
890 * Any other value - error.
893 lpfc_hba_down_post(struct lpfc_hba *phba)
895 return (*phba->lpfc_hba_down_post)(phba);
899 * lpfc_hb_timeout - The HBA-timer timeout handler
900 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
902 * This is the HBA-timer timeout handler registered to the lpfc driver. When
903 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
904 * work-port-events bitmap and the worker thread is notified. This timeout
905 * event will be used by the worker thread to invoke the actual timeout
906 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
907 * be performed in the timeout handler and the HBA timeout event bit shall
908 * be cleared by the worker thread after it has taken the event bitmap out.
911 lpfc_hb_timeout(unsigned long ptr)
913 struct lpfc_hba *phba;
917 phba = (struct lpfc_hba *)ptr;
919 /* Check for heart beat timeout conditions */
920 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
921 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
923 phba->pport->work_port_events |= WORKER_HB_TMO;
924 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
926 /* Tell the worker thread there is work to do */
928 lpfc_worker_wake_up(phba);
933 * lpfc_rrq_timeout - The RRQ-timer timeout handler
934 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
936 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
937 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
938 * work-port-events bitmap and the worker thread is notified. This timeout
939 * event will be used by the worker thread to invoke the actual timeout
940 * handler routine, lpfc_rrq_handler. Any periodical operations will
941 * be performed in the timeout handler and the RRQ timeout event bit shall
942 * be cleared by the worker thread after it has taken the event bitmap out.
945 lpfc_rrq_timeout(unsigned long ptr)
947 struct lpfc_hba *phba;
951 phba = (struct lpfc_hba *)ptr;
952 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
953 tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE;
955 phba->hba_flag |= HBA_RRQ_ACTIVE;
956 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
958 lpfc_worker_wake_up(phba);
962 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
963 * @phba: pointer to lpfc hba data structure.
964 * @pmboxq: pointer to the driver internal queue element for mailbox command.
966 * This is the callback function to the lpfc heart-beat mailbox command.
967 * If configured, the lpfc driver issues the heart-beat mailbox command to
968 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
969 * heart-beat mailbox command is issued, the driver shall set up heart-beat
970 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
971 * heart-beat outstanding state. Once the mailbox command comes back and
972 * no error conditions detected, the heart-beat mailbox command timer is
973 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
974 * state is cleared for the next heart-beat. If the timer expired with the
975 * heart-beat outstanding state set, the driver will put the HBA offline.
978 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
980 unsigned long drvr_flag;
982 spin_lock_irqsave(&phba->hbalock, drvr_flag);
983 phba->hb_outstanding = 0;
984 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
986 /* Check and reset heart-beat timer is necessary */
987 mempool_free(pmboxq, phba->mbox_mem_pool);
988 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
989 !(phba->link_state == LPFC_HBA_ERROR) &&
990 !(phba->pport->load_flag & FC_UNLOADING))
991 mod_timer(&phba->hb_tmofunc,
992 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
997 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
998 * @phba: pointer to lpfc hba data structure.
1000 * This is the actual HBA-timer timeout handler to be invoked by the worker
1001 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1002 * handler performs any periodic operations needed for the device. If such
1003 * periodic event has already been attended to either in the interrupt handler
1004 * or by processing slow-ring or fast-ring events within the HBA-timer
1005 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1006 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1007 * is configured and there is no heart-beat mailbox command outstanding, a
1008 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1009 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1013 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1015 struct lpfc_vport **vports;
1016 LPFC_MBOXQ_t *pmboxq;
1017 struct lpfc_dmabuf *buf_ptr;
1019 struct lpfc_sli *psli = &phba->sli;
1020 LIST_HEAD(completions);
1022 vports = lpfc_create_vport_work_array(phba);
1024 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1025 lpfc_rcv_seq_check_edtov(vports[i]);
1026 lpfc_destroy_vport_work_array(phba, vports);
1028 if ((phba->link_state == LPFC_HBA_ERROR) ||
1029 (phba->pport->load_flag & FC_UNLOADING) ||
1030 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1033 spin_lock_irq(&phba->pport->work_port_lock);
1035 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1037 spin_unlock_irq(&phba->pport->work_port_lock);
1038 if (!phba->hb_outstanding)
1039 mod_timer(&phba->hb_tmofunc,
1040 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1042 mod_timer(&phba->hb_tmofunc,
1043 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1046 spin_unlock_irq(&phba->pport->work_port_lock);
1048 if (phba->elsbuf_cnt &&
1049 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1050 spin_lock_irq(&phba->hbalock);
1051 list_splice_init(&phba->elsbuf, &completions);
1052 phba->elsbuf_cnt = 0;
1053 phba->elsbuf_prev_cnt = 0;
1054 spin_unlock_irq(&phba->hbalock);
1056 while (!list_empty(&completions)) {
1057 list_remove_head(&completions, buf_ptr,
1058 struct lpfc_dmabuf, list);
1059 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1063 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1065 /* If there is no heart beat outstanding, issue a heartbeat command */
1066 if (phba->cfg_enable_hba_heartbeat) {
1067 if (!phba->hb_outstanding) {
1068 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1069 (list_empty(&psli->mboxq))) {
1070 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1073 mod_timer(&phba->hb_tmofunc,
1075 HZ * LPFC_HB_MBOX_INTERVAL);
1079 lpfc_heart_beat(phba, pmboxq);
1080 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1081 pmboxq->vport = phba->pport;
1082 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1085 if (retval != MBX_BUSY &&
1086 retval != MBX_SUCCESS) {
1087 mempool_free(pmboxq,
1088 phba->mbox_mem_pool);
1089 mod_timer(&phba->hb_tmofunc,
1091 HZ * LPFC_HB_MBOX_INTERVAL);
1094 phba->skipped_hb = 0;
1095 phba->hb_outstanding = 1;
1096 } else if (time_before_eq(phba->last_completion_time,
1097 phba->skipped_hb)) {
1098 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1099 "2857 Last completion time not "
1100 " updated in %d ms\n",
1101 jiffies_to_msecs(jiffies
1102 - phba->last_completion_time));
1104 phba->skipped_hb = jiffies;
1106 mod_timer(&phba->hb_tmofunc,
1107 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1111 * If heart beat timeout called with hb_outstanding set
1112 * we need to give the hb mailbox cmd a chance to
1115 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1116 "0459 Adapter heartbeat still out"
1117 "standing:last compl time was %d ms.\n",
1118 jiffies_to_msecs(jiffies
1119 - phba->last_completion_time));
1120 mod_timer(&phba->hb_tmofunc,
1121 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1127 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1128 * @phba: pointer to lpfc hba data structure.
1130 * This routine is called to bring the HBA offline when HBA hardware error
1131 * other than Port Error 6 has been detected.
1134 lpfc_offline_eratt(struct lpfc_hba *phba)
1136 struct lpfc_sli *psli = &phba->sli;
1138 spin_lock_irq(&phba->hbalock);
1139 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1140 spin_unlock_irq(&phba->hbalock);
1141 lpfc_offline_prep(phba);
1144 lpfc_reset_barrier(phba);
1145 spin_lock_irq(&phba->hbalock);
1146 lpfc_sli_brdreset(phba);
1147 spin_unlock_irq(&phba->hbalock);
1148 lpfc_hba_down_post(phba);
1149 lpfc_sli_brdready(phba, HS_MBRDY);
1150 lpfc_unblock_mgmt_io(phba);
1151 phba->link_state = LPFC_HBA_ERROR;
1156 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1157 * @phba: pointer to lpfc hba data structure.
1159 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1160 * other than Port Error 6 has been detected.
1163 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1165 lpfc_offline_prep(phba);
1167 lpfc_sli4_brdreset(phba);
1168 lpfc_hba_down_post(phba);
1169 lpfc_sli4_post_status_check(phba);
1170 lpfc_unblock_mgmt_io(phba);
1171 phba->link_state = LPFC_HBA_ERROR;
1175 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1176 * @phba: pointer to lpfc hba data structure.
1178 * This routine is invoked to handle the deferred HBA hardware error
1179 * conditions. This type of error is indicated by HBA by setting ER1
1180 * and another ER bit in the host status register. The driver will
1181 * wait until the ER1 bit clears before handling the error condition.
1184 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1186 uint32_t old_host_status = phba->work_hs;
1187 struct lpfc_sli_ring *pring;
1188 struct lpfc_sli *psli = &phba->sli;
1190 /* If the pci channel is offline, ignore possible errors,
1191 * since we cannot communicate with the pci card anyway.
1193 if (pci_channel_offline(phba->pcidev)) {
1194 spin_lock_irq(&phba->hbalock);
1195 phba->hba_flag &= ~DEFER_ERATT;
1196 spin_unlock_irq(&phba->hbalock);
1200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1201 "0479 Deferred Adapter Hardware Error "
1202 "Data: x%x x%x x%x\n",
1204 phba->work_status[0], phba->work_status[1]);
1206 spin_lock_irq(&phba->hbalock);
1207 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1208 spin_unlock_irq(&phba->hbalock);
1212 * Firmware stops when it triggred erratt. That could cause the I/Os
1213 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1214 * SCSI layer retry it after re-establishing link.
1216 pring = &psli->ring[psli->fcp_ring];
1217 lpfc_sli_abort_iocb_ring(phba, pring);
1220 * There was a firmware error. Take the hba offline and then
1221 * attempt to restart it.
1223 lpfc_offline_prep(phba);
1226 /* Wait for the ER1 bit to clear.*/
1227 while (phba->work_hs & HS_FFER1) {
1229 phba->work_hs = readl(phba->HSregaddr);
1230 /* If driver is unloading let the worker thread continue */
1231 if (phba->pport->load_flag & FC_UNLOADING) {
1238 * This is to ptrotect against a race condition in which
1239 * first write to the host attention register clear the
1240 * host status register.
1242 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1243 phba->work_hs = old_host_status & ~HS_FFER1;
1245 spin_lock_irq(&phba->hbalock);
1246 phba->hba_flag &= ~DEFER_ERATT;
1247 spin_unlock_irq(&phba->hbalock);
1248 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1249 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1253 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1255 struct lpfc_board_event_header board_event;
1256 struct Scsi_Host *shost;
1258 board_event.event_type = FC_REG_BOARD_EVENT;
1259 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1260 shost = lpfc_shost_from_vport(phba->pport);
1261 fc_host_post_vendor_event(shost, fc_get_event_number(),
1262 sizeof(board_event),
1263 (char *) &board_event,
1268 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1269 * @phba: pointer to lpfc hba data structure.
1271 * This routine is invoked to handle the following HBA hardware error
1273 * 1 - HBA error attention interrupt
1274 * 2 - DMA ring index out of range
1275 * 3 - Mailbox command came back as unknown
1278 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1280 struct lpfc_vport *vport = phba->pport;
1281 struct lpfc_sli *psli = &phba->sli;
1282 struct lpfc_sli_ring *pring;
1283 uint32_t event_data;
1284 unsigned long temperature;
1285 struct temp_event temp_event_data;
1286 struct Scsi_Host *shost;
1288 /* If the pci channel is offline, ignore possible errors,
1289 * since we cannot communicate with the pci card anyway.
1291 if (pci_channel_offline(phba->pcidev)) {
1292 spin_lock_irq(&phba->hbalock);
1293 phba->hba_flag &= ~DEFER_ERATT;
1294 spin_unlock_irq(&phba->hbalock);
1298 /* If resets are disabled then leave the HBA alone and return */
1299 if (!phba->cfg_enable_hba_reset)
1302 /* Send an internal error event to mgmt application */
1303 lpfc_board_errevt_to_mgmt(phba);
1305 if (phba->hba_flag & DEFER_ERATT)
1306 lpfc_handle_deferred_eratt(phba);
1308 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1309 if (phba->work_hs & HS_FFER6)
1310 /* Re-establishing Link */
1311 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1312 "1301 Re-establishing Link "
1313 "Data: x%x x%x x%x\n",
1314 phba->work_hs, phba->work_status[0],
1315 phba->work_status[1]);
1316 if (phba->work_hs & HS_FFER8)
1317 /* Device Zeroization */
1318 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1319 "2861 Host Authentication device "
1320 "zeroization Data:x%x x%x x%x\n",
1321 phba->work_hs, phba->work_status[0],
1322 phba->work_status[1]);
1324 spin_lock_irq(&phba->hbalock);
1325 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1326 spin_unlock_irq(&phba->hbalock);
1329 * Firmware stops when it triggled erratt with HS_FFER6.
1330 * That could cause the I/Os dropped by the firmware.
1331 * Error iocb (I/O) on txcmplq and let the SCSI layer
1332 * retry it after re-establishing link.
1334 pring = &psli->ring[psli->fcp_ring];
1335 lpfc_sli_abort_iocb_ring(phba, pring);
1338 * There was a firmware error. Take the hba offline and then
1339 * attempt to restart it.
1341 lpfc_offline_prep(phba);
1343 lpfc_sli_brdrestart(phba);
1344 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1345 lpfc_unblock_mgmt_io(phba);
1348 lpfc_unblock_mgmt_io(phba);
1349 } else if (phba->work_hs & HS_CRIT_TEMP) {
1350 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1351 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1352 temp_event_data.event_code = LPFC_CRIT_TEMP;
1353 temp_event_data.data = (uint32_t)temperature;
1355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1356 "0406 Adapter maximum temperature exceeded "
1357 "(%ld), taking this port offline "
1358 "Data: x%x x%x x%x\n",
1359 temperature, phba->work_hs,
1360 phba->work_status[0], phba->work_status[1]);
1362 shost = lpfc_shost_from_vport(phba->pport);
1363 fc_host_post_vendor_event(shost, fc_get_event_number(),
1364 sizeof(temp_event_data),
1365 (char *) &temp_event_data,
1366 SCSI_NL_VID_TYPE_PCI
1367 | PCI_VENDOR_ID_EMULEX);
1369 spin_lock_irq(&phba->hbalock);
1370 phba->over_temp_state = HBA_OVER_TEMP;
1371 spin_unlock_irq(&phba->hbalock);
1372 lpfc_offline_eratt(phba);
1375 /* The if clause above forces this code path when the status
1376 * failure is a value other than FFER6. Do not call the offline
1377 * twice. This is the adapter hardware error path.
1379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1380 "0457 Adapter Hardware Error "
1381 "Data: x%x x%x x%x\n",
1383 phba->work_status[0], phba->work_status[1]);
1385 event_data = FC_REG_DUMP_EVENT;
1386 shost = lpfc_shost_from_vport(vport);
1387 fc_host_post_vendor_event(shost, fc_get_event_number(),
1388 sizeof(event_data), (char *) &event_data,
1389 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1391 lpfc_offline_eratt(phba);
1397 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1398 * @phba: pointer to lpfc hba data structure.
1400 * This routine is invoked to handle the SLI4 HBA hardware error attention
1404 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1406 struct lpfc_vport *vport = phba->pport;
1407 uint32_t event_data;
1408 struct Scsi_Host *shost;
1410 /* If the pci channel is offline, ignore possible errors, since
1411 * we cannot communicate with the pci card anyway.
1413 if (pci_channel_offline(phba->pcidev))
1415 /* If resets are disabled then leave the HBA alone and return */
1416 if (!phba->cfg_enable_hba_reset)
1419 /* Send an internal error event to mgmt application */
1420 lpfc_board_errevt_to_mgmt(phba);
1422 /* For now, the actual action for SLI4 device handling is not
1423 * specified yet, just treated it as adaptor hardware failure
1425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1426 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1427 phba->work_status[0], phba->work_status[1]);
1429 event_data = FC_REG_DUMP_EVENT;
1430 shost = lpfc_shost_from_vport(vport);
1431 fc_host_post_vendor_event(shost, fc_get_event_number(),
1432 sizeof(event_data), (char *) &event_data,
1433 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1435 lpfc_sli4_offline_eratt(phba);
1439 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1440 * @phba: pointer to lpfc HBA data structure.
1442 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1443 * routine from the API jump table function pointer from the lpfc_hba struct.
1447 * Any other value - error.
1450 lpfc_handle_eratt(struct lpfc_hba *phba)
1452 (*phba->lpfc_handle_eratt)(phba);
1456 * lpfc_handle_latt - The HBA link event handler
1457 * @phba: pointer to lpfc hba data structure.
1459 * This routine is invoked from the worker thread to handle a HBA host
1460 * attention link event.
1463 lpfc_handle_latt(struct lpfc_hba *phba)
1465 struct lpfc_vport *vport = phba->pport;
1466 struct lpfc_sli *psli = &phba->sli;
1468 volatile uint32_t control;
1469 struct lpfc_dmabuf *mp;
1472 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1475 goto lpfc_handle_latt_err_exit;
1478 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1481 goto lpfc_handle_latt_free_pmb;
1484 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1487 goto lpfc_handle_latt_free_mp;
1490 /* Cleanup any outstanding ELS commands */
1491 lpfc_els_flush_all_cmd(phba);
1493 psli->slistat.link_event++;
1494 lpfc_read_topology(phba, pmb, mp);
1495 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1497 /* Block ELS IOCBs until we have processed this mbox command */
1498 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1499 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1500 if (rc == MBX_NOT_FINISHED) {
1502 goto lpfc_handle_latt_free_mbuf;
1505 /* Clear Link Attention in HA REG */
1506 spin_lock_irq(&phba->hbalock);
1507 writel(HA_LATT, phba->HAregaddr);
1508 readl(phba->HAregaddr); /* flush */
1509 spin_unlock_irq(&phba->hbalock);
1513 lpfc_handle_latt_free_mbuf:
1514 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1515 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1516 lpfc_handle_latt_free_mp:
1518 lpfc_handle_latt_free_pmb:
1519 mempool_free(pmb, phba->mbox_mem_pool);
1520 lpfc_handle_latt_err_exit:
1521 /* Enable Link attention interrupts */
1522 spin_lock_irq(&phba->hbalock);
1523 psli->sli_flag |= LPFC_PROCESS_LA;
1524 control = readl(phba->HCregaddr);
1525 control |= HC_LAINT_ENA;
1526 writel(control, phba->HCregaddr);
1527 readl(phba->HCregaddr); /* flush */
1529 /* Clear Link Attention in HA REG */
1530 writel(HA_LATT, phba->HAregaddr);
1531 readl(phba->HAregaddr); /* flush */
1532 spin_unlock_irq(&phba->hbalock);
1533 lpfc_linkdown(phba);
1534 phba->link_state = LPFC_HBA_ERROR;
1536 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1537 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1543 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1544 * @phba: pointer to lpfc hba data structure.
1545 * @vpd: pointer to the vital product data.
1546 * @len: length of the vital product data in bytes.
1548 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1549 * an array of characters. In this routine, the ModelName, ProgramType, and
1550 * ModelDesc, etc. fields of the phba data structure will be populated.
1553 * 0 - pointer to the VPD passed in is NULL
1557 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1559 uint8_t lenlo, lenhi;
1569 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1570 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1571 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1573 while (!finished && (index < (len - 4))) {
1574 switch (vpd[index]) {
1582 i = ((((unsigned short)lenhi) << 8) + lenlo);
1591 Length = ((((unsigned short)lenhi) << 8) + lenlo);
1592 if (Length > len - index)
1593 Length = len - index;
1594 while (Length > 0) {
1595 /* Look for Serial Number */
1596 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1603 phba->SerialNumber[j++] = vpd[index++];
1607 phba->SerialNumber[j] = 0;
1610 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1611 phba->vpd_flag |= VPD_MODEL_DESC;
1618 phba->ModelDesc[j++] = vpd[index++];
1622 phba->ModelDesc[j] = 0;
1625 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1626 phba->vpd_flag |= VPD_MODEL_NAME;
1633 phba->ModelName[j++] = vpd[index++];
1637 phba->ModelName[j] = 0;
1640 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1641 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1648 phba->ProgramType[j++] = vpd[index++];
1652 phba->ProgramType[j] = 0;
1655 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1656 phba->vpd_flag |= VPD_PORT;
1663 phba->Port[j++] = vpd[index++];
1693 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1694 * @phba: pointer to lpfc hba data structure.
1695 * @mdp: pointer to the data structure to hold the derived model name.
1696 * @descp: pointer to the data structure to hold the derived description.
1698 * This routine retrieves HBA's description based on its registered PCI device
1699 * ID. The @descp passed into this function points to an array of 256 chars. It
1700 * shall be returned with the model name, maximum speed, and the host bus type.
1701 * The @mdp passed into this function points to an array of 80 chars. When the
1702 * function returns, the @mdp will be filled with the model name.
1705 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1708 uint16_t dev_id = phba->pcidev->device;
1711 int oneConnect = 0; /* default is not a oneConnect */
1716 } m = {"<Unknown>", "", ""};
1718 if (mdp && mdp[0] != '\0'
1719 && descp && descp[0] != '\0')
1722 if (phba->lmt & LMT_10Gb)
1724 else if (phba->lmt & LMT_8Gb)
1726 else if (phba->lmt & LMT_4Gb)
1728 else if (phba->lmt & LMT_2Gb)
1736 case PCI_DEVICE_ID_FIREFLY:
1737 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1739 case PCI_DEVICE_ID_SUPERFLY:
1740 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1741 m = (typeof(m)){"LP7000", "PCI",
1742 "Fibre Channel Adapter"};
1744 m = (typeof(m)){"LP7000E", "PCI",
1745 "Fibre Channel Adapter"};
1747 case PCI_DEVICE_ID_DRAGONFLY:
1748 m = (typeof(m)){"LP8000", "PCI",
1749 "Fibre Channel Adapter"};
1751 case PCI_DEVICE_ID_CENTAUR:
1752 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1753 m = (typeof(m)){"LP9002", "PCI",
1754 "Fibre Channel Adapter"};
1756 m = (typeof(m)){"LP9000", "PCI",
1757 "Fibre Channel Adapter"};
1759 case PCI_DEVICE_ID_RFLY:
1760 m = (typeof(m)){"LP952", "PCI",
1761 "Fibre Channel Adapter"};
1763 case PCI_DEVICE_ID_PEGASUS:
1764 m = (typeof(m)){"LP9802", "PCI-X",
1765 "Fibre Channel Adapter"};
1767 case PCI_DEVICE_ID_THOR:
1768 m = (typeof(m)){"LP10000", "PCI-X",
1769 "Fibre Channel Adapter"};
1771 case PCI_DEVICE_ID_VIPER:
1772 m = (typeof(m)){"LPX1000", "PCI-X",
1773 "Fibre Channel Adapter"};
1775 case PCI_DEVICE_ID_PFLY:
1776 m = (typeof(m)){"LP982", "PCI-X",
1777 "Fibre Channel Adapter"};
1779 case PCI_DEVICE_ID_TFLY:
1780 m = (typeof(m)){"LP1050", "PCI-X",
1781 "Fibre Channel Adapter"};
1783 case PCI_DEVICE_ID_HELIOS:
1784 m = (typeof(m)){"LP11000", "PCI-X2",
1785 "Fibre Channel Adapter"};
1787 case PCI_DEVICE_ID_HELIOS_SCSP:
1788 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1789 "Fibre Channel Adapter"};
1791 case PCI_DEVICE_ID_HELIOS_DCSP:
1792 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1793 "Fibre Channel Adapter"};
1795 case PCI_DEVICE_ID_NEPTUNE:
1796 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1798 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1799 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1801 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1802 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1804 case PCI_DEVICE_ID_BMID:
1805 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1807 case PCI_DEVICE_ID_BSMB:
1808 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1810 case PCI_DEVICE_ID_ZEPHYR:
1811 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1813 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1814 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1816 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1817 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1820 case PCI_DEVICE_ID_ZMID:
1821 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1823 case PCI_DEVICE_ID_ZSMB:
1824 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1826 case PCI_DEVICE_ID_LP101:
1827 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1829 case PCI_DEVICE_ID_LP10000S:
1830 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1832 case PCI_DEVICE_ID_LP11000S:
1833 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1835 case PCI_DEVICE_ID_LPE11000S:
1836 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1838 case PCI_DEVICE_ID_SAT:
1839 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1841 case PCI_DEVICE_ID_SAT_MID:
1842 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1844 case PCI_DEVICE_ID_SAT_SMB:
1845 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1847 case PCI_DEVICE_ID_SAT_DCSP:
1848 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1850 case PCI_DEVICE_ID_SAT_SCSP:
1851 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1853 case PCI_DEVICE_ID_SAT_S:
1854 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1856 case PCI_DEVICE_ID_HORNET:
1857 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1860 case PCI_DEVICE_ID_PROTEUS_VF:
1861 m = (typeof(m)){"LPev12000", "PCIe IOV",
1862 "Fibre Channel Adapter"};
1864 case PCI_DEVICE_ID_PROTEUS_PF:
1865 m = (typeof(m)){"LPev12000", "PCIe IOV",
1866 "Fibre Channel Adapter"};
1868 case PCI_DEVICE_ID_PROTEUS_S:
1869 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1870 "Fibre Channel Adapter"};
1872 case PCI_DEVICE_ID_TIGERSHARK:
1874 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1876 case PCI_DEVICE_ID_TOMCAT:
1878 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1880 case PCI_DEVICE_ID_FALCON:
1881 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1882 "EmulexSecure Fibre"};
1884 case PCI_DEVICE_ID_BALIUS:
1885 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1886 "Fibre Channel Adapter"};
1888 case PCI_DEVICE_ID_LANCER_FC:
1890 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
1892 case PCI_DEVICE_ID_LANCER_FCOE:
1894 m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
1897 m = (typeof(m)){"Unknown", "", ""};
1901 if (mdp && mdp[0] == '\0')
1902 snprintf(mdp, 79,"%s", m.name);
1903 /* oneConnect hba requires special processing, they are all initiators
1904 * and we put the port number on the end
1906 if (descp && descp[0] == '\0') {
1908 snprintf(descp, 255,
1909 "Emulex OneConnect %s, %s Initiator, Port %s",
1913 snprintf(descp, 255,
1914 "Emulex %s %d%s %s %s",
1915 m.name, max_speed, (GE) ? "GE" : "Gb",
1921 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1922 * @phba: pointer to lpfc hba data structure.
1923 * @pring: pointer to a IOCB ring.
1924 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1926 * This routine posts a given number of IOCBs with the associated DMA buffer
1927 * descriptors specified by the cnt argument to the given IOCB ring.
1930 * The number of IOCBs NOT able to be posted to the IOCB ring.
1933 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1936 struct lpfc_iocbq *iocb;
1937 struct lpfc_dmabuf *mp1, *mp2;
1939 cnt += pring->missbufcnt;
1941 /* While there are buffers to post */
1943 /* Allocate buffer for command iocb */
1944 iocb = lpfc_sli_get_iocbq(phba);
1946 pring->missbufcnt = cnt;
1951 /* 2 buffers can be posted per command */
1952 /* Allocate buffer to post */
1953 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1955 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1956 if (!mp1 || !mp1->virt) {
1958 lpfc_sli_release_iocbq(phba, iocb);
1959 pring->missbufcnt = cnt;
1963 INIT_LIST_HEAD(&mp1->list);
1964 /* Allocate buffer to post */
1966 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1968 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1970 if (!mp2 || !mp2->virt) {
1972 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1974 lpfc_sli_release_iocbq(phba, iocb);
1975 pring->missbufcnt = cnt;
1979 INIT_LIST_HEAD(&mp2->list);
1984 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1985 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1986 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1987 icmd->ulpBdeCount = 1;
1990 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1991 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1992 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1994 icmd->ulpBdeCount = 2;
1997 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2000 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2002 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2006 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2010 lpfc_sli_release_iocbq(phba, iocb);
2011 pring->missbufcnt = cnt;
2014 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2016 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2018 pring->missbufcnt = 0;
2023 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2024 * @phba: pointer to lpfc hba data structure.
2026 * This routine posts initial receive IOCB buffers to the ELS ring. The
2027 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2031 * 0 - success (currently always success)
2034 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2036 struct lpfc_sli *psli = &phba->sli;
2038 /* Ring 0, ELS / CT buffers */
2039 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2040 /* Ring 2 - FCP no buffers needed */
2045 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2048 * lpfc_sha_init - Set up initial array of hash table entries
2049 * @HashResultPointer: pointer to an array as hash table.
2051 * This routine sets up the initial values to the array of hash table entries
2055 lpfc_sha_init(uint32_t * HashResultPointer)
2057 HashResultPointer[0] = 0x67452301;
2058 HashResultPointer[1] = 0xEFCDAB89;
2059 HashResultPointer[2] = 0x98BADCFE;
2060 HashResultPointer[3] = 0x10325476;
2061 HashResultPointer[4] = 0xC3D2E1F0;
2065 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2066 * @HashResultPointer: pointer to an initial/result hash table.
2067 * @HashWorkingPointer: pointer to an working hash table.
2069 * This routine iterates an initial hash table pointed by @HashResultPointer
2070 * with the values from the working hash table pointeed by @HashWorkingPointer.
2071 * The results are putting back to the initial hash table, returned through
2072 * the @HashResultPointer as the result hash table.
2075 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2079 uint32_t A, B, C, D, E;
2082 HashWorkingPointer[t] =
2084 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2086 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2087 } while (++t <= 79);
2089 A = HashResultPointer[0];
2090 B = HashResultPointer[1];
2091 C = HashResultPointer[2];
2092 D = HashResultPointer[3];
2093 E = HashResultPointer[4];
2097 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2098 } else if (t < 40) {
2099 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2100 } else if (t < 60) {
2101 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2103 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2105 TEMP += S(5, A) + E + HashWorkingPointer[t];
2111 } while (++t <= 79);
2113 HashResultPointer[0] += A;
2114 HashResultPointer[1] += B;
2115 HashResultPointer[2] += C;
2116 HashResultPointer[3] += D;
2117 HashResultPointer[4] += E;
2122 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2123 * @RandomChallenge: pointer to the entry of host challenge random number array.
2124 * @HashWorking: pointer to the entry of the working hash array.
2126 * This routine calculates the working hash array referred by @HashWorking
2127 * from the challenge random numbers associated with the host, referred by
2128 * @RandomChallenge. The result is put into the entry of the working hash
2129 * array and returned by reference through @HashWorking.
2132 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2134 *HashWorking = (*RandomChallenge ^ *HashWorking);
2138 * lpfc_hba_init - Perform special handling for LC HBA initialization
2139 * @phba: pointer to lpfc hba data structure.
2140 * @hbainit: pointer to an array of unsigned 32-bit integers.
2142 * This routine performs the special handling for LC HBA initialization.
2145 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2148 uint32_t *HashWorking;
2149 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2151 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2155 HashWorking[0] = HashWorking[78] = *pwwnn++;
2156 HashWorking[1] = HashWorking[79] = *pwwnn;
2158 for (t = 0; t < 7; t++)
2159 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2161 lpfc_sha_init(hbainit);
2162 lpfc_sha_iterate(hbainit, HashWorking);
2167 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2168 * @vport: pointer to a virtual N_Port data structure.
2170 * This routine performs the necessary cleanups before deleting the @vport.
2171 * It invokes the discovery state machine to perform necessary state
2172 * transitions and to release the ndlps associated with the @vport. Note,
2173 * the physical port is treated as @vport 0.
2176 lpfc_cleanup(struct lpfc_vport *vport)
2178 struct lpfc_hba *phba = vport->phba;
2179 struct lpfc_nodelist *ndlp, *next_ndlp;
2182 if (phba->link_state > LPFC_LINK_DOWN)
2183 lpfc_port_link_failure(vport);
2185 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2186 if (!NLP_CHK_NODE_ACT(ndlp)) {
2187 ndlp = lpfc_enable_node(vport, ndlp,
2188 NLP_STE_UNUSED_NODE);
2191 spin_lock_irq(&phba->ndlp_lock);
2192 NLP_SET_FREE_REQ(ndlp);
2193 spin_unlock_irq(&phba->ndlp_lock);
2194 /* Trigger the release of the ndlp memory */
2198 spin_lock_irq(&phba->ndlp_lock);
2199 if (NLP_CHK_FREE_REQ(ndlp)) {
2200 /* The ndlp should not be in memory free mode already */
2201 spin_unlock_irq(&phba->ndlp_lock);
2204 /* Indicate request for freeing ndlp memory */
2205 NLP_SET_FREE_REQ(ndlp);
2206 spin_unlock_irq(&phba->ndlp_lock);
2208 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2209 ndlp->nlp_DID == Fabric_DID) {
2210 /* Just free up ndlp with Fabric_DID for vports */
2215 if (ndlp->nlp_type & NLP_FABRIC)
2216 lpfc_disc_state_machine(vport, ndlp, NULL,
2217 NLP_EVT_DEVICE_RECOVERY);
2219 lpfc_disc_state_machine(vport, ndlp, NULL,
2224 /* At this point, ALL ndlp's should be gone
2225 * because of the previous NLP_EVT_DEVICE_RM.
2226 * Lets wait for this to happen, if needed.
2228 while (!list_empty(&vport->fc_nodes)) {
2230 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2231 "0233 Nodelist not empty\n");
2232 list_for_each_entry_safe(ndlp, next_ndlp,
2233 &vport->fc_nodes, nlp_listp) {
2234 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2236 "0282 did:x%x ndlp:x%p "
2237 "usgmap:x%x refcnt:%d\n",
2238 ndlp->nlp_DID, (void *)ndlp,
2241 &ndlp->kref.refcount));
2246 /* Wait for any activity on ndlps to settle */
2252 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2253 * @vport: pointer to a virtual N_Port data structure.
2255 * This routine stops all the timers associated with a @vport. This function
2256 * is invoked before disabling or deleting a @vport. Note that the physical
2257 * port is treated as @vport 0.
2260 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2262 del_timer_sync(&vport->els_tmofunc);
2263 del_timer_sync(&vport->fc_fdmitmo);
2264 lpfc_can_disctmo(vport);
2269 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2270 * @phba: pointer to lpfc hba data structure.
2272 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2273 * caller of this routine should already hold the host lock.
2276 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2278 /* Clear pending FCF rediscovery wait flag */
2279 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2281 /* Now, try to stop the timer */
2282 del_timer(&phba->fcf.redisc_wait);
2286 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2287 * @phba: pointer to lpfc hba data structure.
2289 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2290 * checks whether the FCF rediscovery wait timer is pending with the host
2291 * lock held before proceeding with disabling the timer and clearing the
2292 * wait timer pendig flag.
2295 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2297 spin_lock_irq(&phba->hbalock);
2298 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2299 /* FCF rediscovery timer already fired or stopped */
2300 spin_unlock_irq(&phba->hbalock);
2303 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2304 /* Clear failover in progress flags */
2305 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2306 spin_unlock_irq(&phba->hbalock);
2310 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2311 * @phba: pointer to lpfc hba data structure.
2313 * This routine stops all the timers associated with a HBA. This function is
2314 * invoked before either putting a HBA offline or unloading the driver.
2317 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2319 lpfc_stop_vport_timers(phba->pport);
2320 del_timer_sync(&phba->sli.mbox_tmo);
2321 del_timer_sync(&phba->fabric_block_timer);
2322 del_timer_sync(&phba->eratt_poll);
2323 del_timer_sync(&phba->hb_tmofunc);
2324 phba->hb_outstanding = 0;
2326 switch (phba->pci_dev_grp) {
2327 case LPFC_PCI_DEV_LP:
2328 /* Stop any LightPulse device specific driver timers */
2329 del_timer_sync(&phba->fcp_poll_timer);
2331 case LPFC_PCI_DEV_OC:
2332 /* Stop any OneConnect device sepcific driver timers */
2333 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2337 "0297 Invalid device group (x%x)\n",
2345 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2346 * @phba: pointer to lpfc hba data structure.
2348 * This routine marks a HBA's management interface as blocked. Once the HBA's
2349 * management interface is marked as blocked, all the user space access to
2350 * the HBA, whether they are from sysfs interface or libdfc interface will
2351 * all be blocked. The HBA is set to block the management interface when the
2352 * driver prepares the HBA interface for online or offline.
2355 lpfc_block_mgmt_io(struct lpfc_hba * phba)
2357 unsigned long iflag;
2358 uint8_t actcmd = MBX_HEARTBEAT;
2359 unsigned long timeout;
2362 spin_lock_irqsave(&phba->hbalock, iflag);
2363 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2364 if (phba->sli.mbox_active)
2365 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2366 spin_unlock_irqrestore(&phba->hbalock, iflag);
2367 /* Determine how long we might wait for the active mailbox
2368 * command to be gracefully completed by firmware.
2370 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2372 /* Wait for the outstnading mailbox command to complete */
2373 while (phba->sli.mbox_active) {
2374 /* Check active mailbox complete status every 2ms */
2376 if (time_after(jiffies, timeout)) {
2377 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2378 "2813 Mgmt IO is Blocked %x "
2379 "- mbox cmd %x still active\n",
2380 phba->sli.sli_flag, actcmd);
2387 * lpfc_online - Initialize and bring a HBA online
2388 * @phba: pointer to lpfc hba data structure.
2390 * This routine initializes the HBA and brings a HBA online. During this
2391 * process, the management interface is blocked to prevent user space access
2392 * to the HBA interfering with the driver initialization.
2399 lpfc_online(struct lpfc_hba *phba)
2401 struct lpfc_vport *vport;
2402 struct lpfc_vport **vports;
2407 vport = phba->pport;
2409 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2412 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2413 "0458 Bring Adapter online\n");
2415 lpfc_block_mgmt_io(phba);
2417 if (!lpfc_sli_queue_setup(phba)) {
2418 lpfc_unblock_mgmt_io(phba);
2422 if (phba->sli_rev == LPFC_SLI_REV4) {
2423 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2424 lpfc_unblock_mgmt_io(phba);
2428 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2429 lpfc_unblock_mgmt_io(phba);
2434 vports = lpfc_create_vport_work_array(phba);
2436 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2437 struct Scsi_Host *shost;
2438 shost = lpfc_shost_from_vport(vports[i]);
2439 spin_lock_irq(shost->host_lock);
2440 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2441 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2442 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2443 if (phba->sli_rev == LPFC_SLI_REV4)
2444 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2445 spin_unlock_irq(shost->host_lock);
2447 lpfc_destroy_vport_work_array(phba, vports);
2449 lpfc_unblock_mgmt_io(phba);
2454 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2455 * @phba: pointer to lpfc hba data structure.
2457 * This routine marks a HBA's management interface as not blocked. Once the
2458 * HBA's management interface is marked as not blocked, all the user space
2459 * access to the HBA, whether they are from sysfs interface or libdfc
2460 * interface will be allowed. The HBA is set to block the management interface
2461 * when the driver prepares the HBA interface for online or offline and then
2462 * set to unblock the management interface afterwards.
2465 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2467 unsigned long iflag;
2469 spin_lock_irqsave(&phba->hbalock, iflag);
2470 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2471 spin_unlock_irqrestore(&phba->hbalock, iflag);
2475 * lpfc_offline_prep - Prepare a HBA to be brought offline
2476 * @phba: pointer to lpfc hba data structure.
2478 * This routine is invoked to prepare a HBA to be brought offline. It performs
2479 * unregistration login to all the nodes on all vports and flushes the mailbox
2480 * queue to make it ready to be brought offline.
2483 lpfc_offline_prep(struct lpfc_hba * phba)
2485 struct lpfc_vport *vport = phba->pport;
2486 struct lpfc_nodelist *ndlp, *next_ndlp;
2487 struct lpfc_vport **vports;
2488 struct Scsi_Host *shost;
2491 if (vport->fc_flag & FC_OFFLINE_MODE)
2494 lpfc_block_mgmt_io(phba);
2496 lpfc_linkdown(phba);
2498 /* Issue an unreg_login to all nodes on all vports */
2499 vports = lpfc_create_vport_work_array(phba);
2500 if (vports != NULL) {
2501 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2502 if (vports[i]->load_flag & FC_UNLOADING)
2504 shost = lpfc_shost_from_vport(vports[i]);
2505 spin_lock_irq(shost->host_lock);
2506 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2507 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2508 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2509 spin_unlock_irq(shost->host_lock);
2511 shost = lpfc_shost_from_vport(vports[i]);
2512 list_for_each_entry_safe(ndlp, next_ndlp,
2513 &vports[i]->fc_nodes,
2515 if (!NLP_CHK_NODE_ACT(ndlp))
2517 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2519 if (ndlp->nlp_type & NLP_FABRIC) {
2520 lpfc_disc_state_machine(vports[i], ndlp,
2521 NULL, NLP_EVT_DEVICE_RECOVERY);
2522 lpfc_disc_state_machine(vports[i], ndlp,
2523 NULL, NLP_EVT_DEVICE_RM);
2525 spin_lock_irq(shost->host_lock);
2526 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2527 spin_unlock_irq(shost->host_lock);
2528 lpfc_unreg_rpi(vports[i], ndlp);
2532 lpfc_destroy_vport_work_array(phba, vports);
2534 lpfc_sli_mbox_sys_shutdown(phba);
2538 * lpfc_offline - Bring a HBA offline
2539 * @phba: pointer to lpfc hba data structure.
2541 * This routine actually brings a HBA offline. It stops all the timers
2542 * associated with the HBA, brings down the SLI layer, and eventually
2543 * marks the HBA as in offline state for the upper layer protocol.
2546 lpfc_offline(struct lpfc_hba *phba)
2548 struct Scsi_Host *shost;
2549 struct lpfc_vport **vports;
2552 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2555 /* stop port and all timers associated with this hba */
2556 lpfc_stop_port(phba);
2557 vports = lpfc_create_vport_work_array(phba);
2559 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2560 lpfc_stop_vport_timers(vports[i]);
2561 lpfc_destroy_vport_work_array(phba, vports);
2562 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2563 "0460 Bring Adapter offline\n");
2564 /* Bring down the SLI Layer and cleanup. The HBA is offline
2566 lpfc_sli_hba_down(phba);
2567 spin_lock_irq(&phba->hbalock);
2569 spin_unlock_irq(&phba->hbalock);
2570 vports = lpfc_create_vport_work_array(phba);
2572 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2573 shost = lpfc_shost_from_vport(vports[i]);
2574 spin_lock_irq(shost->host_lock);
2575 vports[i]->work_port_events = 0;
2576 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2577 spin_unlock_irq(shost->host_lock);
2579 lpfc_destroy_vport_work_array(phba, vports);
2583 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2584 * @phba: pointer to lpfc hba data structure.
2586 * This routine is to free all the SCSI buffers and IOCBs from the driver
2587 * list back to kernel. It is called from lpfc_pci_remove_one to free
2588 * the internal resources before the device is removed from the system.
2591 * 0 - successful (for now, it always returns 0)
2594 lpfc_scsi_free(struct lpfc_hba *phba)
2596 struct lpfc_scsi_buf *sb, *sb_next;
2597 struct lpfc_iocbq *io, *io_next;
2599 spin_lock_irq(&phba->hbalock);
2600 /* Release all the lpfc_scsi_bufs maintained by this host. */
2601 spin_lock(&phba->scsi_buf_list_lock);
2602 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2603 list_del(&sb->list);
2604 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2607 phba->total_scsi_bufs--;
2609 spin_unlock(&phba->scsi_buf_list_lock);
2611 /* Release all the lpfc_iocbq entries maintained by this host. */
2612 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2613 list_del(&io->list);
2615 phba->total_iocbq_bufs--;
2617 spin_unlock_irq(&phba->hbalock);
2622 * lpfc_create_port - Create an FC port
2623 * @phba: pointer to lpfc hba data structure.
2624 * @instance: a unique integer ID to this FC port.
2625 * @dev: pointer to the device data structure.
2627 * This routine creates a FC port for the upper layer protocol. The FC port
2628 * can be created on top of either a physical port or a virtual port provided
2629 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2630 * and associates the FC port created before adding the shost into the SCSI
2634 * @vport - pointer to the virtual N_Port data structure.
2635 * NULL - port create failed.
2638 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2640 struct lpfc_vport *vport;
2641 struct Scsi_Host *shost;
2644 if (dev != &phba->pcidev->dev)
2645 shost = scsi_host_alloc(&lpfc_vport_template,
2646 sizeof(struct lpfc_vport));
2648 shost = scsi_host_alloc(&lpfc_template,
2649 sizeof(struct lpfc_vport));
2653 vport = (struct lpfc_vport *) shost->hostdata;
2655 vport->load_flag |= FC_LOADING;
2656 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2657 vport->fc_rscn_flush = 0;
2659 lpfc_get_vport_cfgparam(vport);
2660 shost->unique_id = instance;
2661 shost->max_id = LPFC_MAX_TARGET;
2662 shost->max_lun = vport->cfg_max_luns;
2663 shost->this_id = -1;
2664 shost->max_cmd_len = 16;
2665 if (phba->sli_rev == LPFC_SLI_REV4) {
2666 shost->dma_boundary =
2667 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2668 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2672 * Set initial can_queue value since 0 is no longer supported and
2673 * scsi_add_host will fail. This will be adjusted later based on the
2674 * max xri value determined in hba setup.
2676 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2677 if (dev != &phba->pcidev->dev) {
2678 shost->transportt = lpfc_vport_transport_template;
2679 vport->port_type = LPFC_NPIV_PORT;
2681 shost->transportt = lpfc_transport_template;
2682 vport->port_type = LPFC_PHYSICAL_PORT;
2685 /* Initialize all internally managed lists. */
2686 INIT_LIST_HEAD(&vport->fc_nodes);
2687 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2688 spin_lock_init(&vport->work_port_lock);
2690 init_timer(&vport->fc_disctmo);
2691 vport->fc_disctmo.function = lpfc_disc_timeout;
2692 vport->fc_disctmo.data = (unsigned long)vport;
2694 init_timer(&vport->fc_fdmitmo);
2695 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2696 vport->fc_fdmitmo.data = (unsigned long)vport;
2698 init_timer(&vport->els_tmofunc);
2699 vport->els_tmofunc.function = lpfc_els_timeout;
2700 vport->els_tmofunc.data = (unsigned long)vport;
2701 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2705 spin_lock_irq(&phba->hbalock);
2706 list_add_tail(&vport->listentry, &phba->port_list);
2707 spin_unlock_irq(&phba->hbalock);
2711 scsi_host_put(shost);
2717 * destroy_port - destroy an FC port
2718 * @vport: pointer to an lpfc virtual N_Port data structure.
2720 * This routine destroys a FC port from the upper layer protocol. All the
2721 * resources associated with the port are released.
2724 destroy_port(struct lpfc_vport *vport)
2726 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2727 struct lpfc_hba *phba = vport->phba;
2729 lpfc_debugfs_terminate(vport);
2730 fc_remove_host(shost);
2731 scsi_remove_host(shost);
2733 spin_lock_irq(&phba->hbalock);
2734 list_del_init(&vport->listentry);
2735 spin_unlock_irq(&phba->hbalock);
2737 lpfc_cleanup(vport);
2742 * lpfc_get_instance - Get a unique integer ID
2744 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2745 * uses the kernel idr facility to perform the task.
2748 * instance - a unique integer ID allocated as the new instance.
2749 * -1 - lpfc get instance failed.
2752 lpfc_get_instance(void)
2756 /* Assign an unused number */
2757 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2759 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2765 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2766 * @shost: pointer to SCSI host data structure.
2767 * @time: elapsed time of the scan in jiffies.
2769 * This routine is called by the SCSI layer with a SCSI host to determine
2770 * whether the scan host is finished.
2772 * Note: there is no scan_start function as adapter initialization will have
2773 * asynchronously kicked off the link initialization.
2776 * 0 - SCSI host scan is not over yet.
2777 * 1 - SCSI host scan is over.
2779 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2781 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2782 struct lpfc_hba *phba = vport->phba;
2785 spin_lock_irq(shost->host_lock);
2787 if (vport->load_flag & FC_UNLOADING) {
2791 if (time >= 30 * HZ) {
2792 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2793 "0461 Scanning longer than 30 "
2794 "seconds. Continuing initialization\n");
2798 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2799 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2800 "0465 Link down longer than 15 "
2801 "seconds. Continuing initialization\n");
2806 if (vport->port_state != LPFC_VPORT_READY)
2808 if (vport->num_disc_nodes || vport->fc_prli_sent)
2810 if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2812 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2818 spin_unlock_irq(shost->host_lock);
2823 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2824 * @shost: pointer to SCSI host data structure.
2826 * This routine initializes a given SCSI host attributes on a FC port. The
2827 * SCSI host can be either on top of a physical port or a virtual port.
2829 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2831 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2832 struct lpfc_hba *phba = vport->phba;
2834 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
2837 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2838 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2839 fc_host_supported_classes(shost) = FC_COS_CLASS3;
2841 memset(fc_host_supported_fc4s(shost), 0,
2842 sizeof(fc_host_supported_fc4s(shost)));
2843 fc_host_supported_fc4s(shost)[2] = 1;
2844 fc_host_supported_fc4s(shost)[7] = 1;
2846 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2847 sizeof fc_host_symbolic_name(shost));
2849 fc_host_supported_speeds(shost) = 0;
2850 if (phba->lmt & LMT_10Gb)
2851 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2852 if (phba->lmt & LMT_8Gb)
2853 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2854 if (phba->lmt & LMT_4Gb)
2855 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2856 if (phba->lmt & LMT_2Gb)
2857 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2858 if (phba->lmt & LMT_1Gb)
2859 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2861 fc_host_maxframe_size(shost) =
2862 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2863 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2865 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2867 /* This value is also unchanging */
2868 memset(fc_host_active_fc4s(shost), 0,
2869 sizeof(fc_host_active_fc4s(shost)));
2870 fc_host_active_fc4s(shost)[2] = 1;
2871 fc_host_active_fc4s(shost)[7] = 1;
2873 fc_host_max_npiv_vports(shost) = phba->max_vpi;
2874 spin_lock_irq(shost->host_lock);
2875 vport->load_flag &= ~FC_LOADING;
2876 spin_unlock_irq(shost->host_lock);
2880 * lpfc_stop_port_s3 - Stop SLI3 device port
2881 * @phba: pointer to lpfc hba data structure.
2883 * This routine is invoked to stop an SLI3 device port, it stops the device
2884 * from generating interrupts and stops the device driver's timers for the
2888 lpfc_stop_port_s3(struct lpfc_hba *phba)
2890 /* Clear all interrupt enable conditions */
2891 writel(0, phba->HCregaddr);
2892 readl(phba->HCregaddr); /* flush */
2893 /* Clear all pending interrupts */
2894 writel(0xffffffff, phba->HAregaddr);
2895 readl(phba->HAregaddr); /* flush */
2897 /* Reset some HBA SLI setup states */
2898 lpfc_stop_hba_timers(phba);
2899 phba->pport->work_port_events = 0;
2903 * lpfc_stop_port_s4 - Stop SLI4 device port
2904 * @phba: pointer to lpfc hba data structure.
2906 * This routine is invoked to stop an SLI4 device port, it stops the device
2907 * from generating interrupts and stops the device driver's timers for the
2911 lpfc_stop_port_s4(struct lpfc_hba *phba)
2913 /* Reset some HBA SLI4 setup states */
2914 lpfc_stop_hba_timers(phba);
2915 phba->pport->work_port_events = 0;
2916 phba->sli4_hba.intr_enable = 0;
2920 * lpfc_stop_port - Wrapper function for stopping hba port
2921 * @phba: Pointer to HBA context object.
2923 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2924 * the API jump table function pointer from the lpfc_hba struct.
2927 lpfc_stop_port(struct lpfc_hba *phba)
2929 phba->lpfc_stop_port(phba);
2933 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2934 * @phba: Pointer to hba for which this call is being executed.
2936 * This routine starts the timer waiting for the FCF rediscovery to complete.
2939 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2941 unsigned long fcf_redisc_wait_tmo =
2942 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2943 /* Start fcf rediscovery wait period timer */
2944 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2945 spin_lock_irq(&phba->hbalock);
2946 /* Allow action to new fcf asynchronous event */
2947 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2948 /* Mark the FCF rediscovery pending state */
2949 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2950 spin_unlock_irq(&phba->hbalock);
2954 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
2955 * @ptr: Map to lpfc_hba data structure pointer.
2957 * This routine is invoked when waiting for FCF table rediscover has been
2958 * timed out. If new FCF record(s) has (have) been discovered during the
2959 * wait period, a new FCF event shall be added to the FCOE async event
2960 * list, and then worker thread shall be waked up for processing from the
2961 * worker thread context.
2964 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2966 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2968 /* Don't send FCF rediscovery event if timer cancelled */
2969 spin_lock_irq(&phba->hbalock);
2970 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2971 spin_unlock_irq(&phba->hbalock);
2974 /* Clear FCF rediscovery timer pending flag */
2975 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2976 /* FCF rediscovery event to worker thread */
2977 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2978 spin_unlock_irq(&phba->hbalock);
2979 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2980 "2776 FCF rediscover quiescent timer expired\n");
2981 /* wake up worker thread */
2982 lpfc_worker_wake_up(phba);
2986 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2987 * @phba: pointer to lpfc hba data structure.
2989 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2990 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2991 * was successful and the firmware supports FCoE. Any other return indicates
2992 * a error. It is assumed that this function will be called before interrupts
2996 lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2999 LPFC_MBOXQ_t *mboxq;
3000 struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
3002 uint32_t shdr_status, shdr_add_status;
3004 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3007 "2621 Failed to allocate mbox for "
3008 "query firmware config cmd\n");
3011 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
3012 length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
3013 sizeof(struct lpfc_sli4_cfg_mhdr));
3014 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
3015 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
3016 length, LPFC_SLI4_MBX_EMBED);
3017 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3018 /* The IOCTL status is embedded in the mailbox subheader. */
3019 shdr_status = bf_get(lpfc_mbox_hdr_status,
3020 &query_fw_cfg->header.cfg_shdr.response);
3021 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
3022 &query_fw_cfg->header.cfg_shdr.response);
3023 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
3024 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3025 "2622 Query Firmware Config failed "
3026 "mbx status x%x, status x%x add_status x%x\n",
3027 rc, shdr_status, shdr_add_status);
3030 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
3031 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3032 "2623 FCoE Function not supported by firmware. "
3033 "Function mode = %08x\n",
3034 query_fw_cfg->function_mode);
3037 if (rc != MBX_TIMEOUT)
3038 mempool_free(mboxq, phba->mbox_mem_pool);
3043 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3044 * @phba: pointer to lpfc hba data structure.
3045 * @acqe_link: pointer to the async link completion queue entry.
3047 * This routine is to parse the SLI4 link-attention link fault code and
3048 * translate it into the base driver's read link attention mailbox command
3051 * Return: Link-attention status in terms of base driver's coding.
3054 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3055 struct lpfc_acqe_link *acqe_link)
3057 uint16_t latt_fault;
3059 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3060 case LPFC_ASYNC_LINK_FAULT_NONE:
3061 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3062 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3066 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3067 "0398 Invalid link fault code: x%x\n",
3068 bf_get(lpfc_acqe_link_fault, acqe_link));
3069 latt_fault = MBXERR_ERROR;
3076 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3077 * @phba: pointer to lpfc hba data structure.
3078 * @acqe_link: pointer to the async link completion queue entry.
3080 * This routine is to parse the SLI4 link attention type and translate it
3081 * into the base driver's link attention type coding.
3083 * Return: Link attention type in terms of base driver's coding.
3086 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3087 struct lpfc_acqe_link *acqe_link)
3091 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3092 case LPFC_ASYNC_LINK_STATUS_DOWN:
3093 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3094 att_type = LPFC_ATT_LINK_DOWN;
3096 case LPFC_ASYNC_LINK_STATUS_UP:
3097 /* Ignore physical link up events - wait for logical link up */
3098 att_type = LPFC_ATT_RESERVED;
3100 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3101 att_type = LPFC_ATT_LINK_UP;
3104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3105 "0399 Invalid link attention type: x%x\n",
3106 bf_get(lpfc_acqe_link_status, acqe_link));
3107 att_type = LPFC_ATT_RESERVED;
3114 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3115 * @phba: pointer to lpfc hba data structure.
3116 * @acqe_link: pointer to the async link completion queue entry.
3118 * This routine is to parse the SLI4 link-attention link speed and translate
3119 * it into the base driver's link-attention link speed coding.
3121 * Return: Link-attention link speed in terms of base driver's coding.
3124 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3125 struct lpfc_acqe_link *acqe_link)
3129 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3130 case LPFC_ASYNC_LINK_SPEED_ZERO:
3131 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3132 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3133 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3135 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3136 link_speed = LPFC_LINK_SPEED_1GHZ;
3138 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3139 link_speed = LPFC_LINK_SPEED_10GHZ;
3142 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3143 "0483 Invalid link-attention link speed: x%x\n",
3144 bf_get(lpfc_acqe_link_speed, acqe_link));
3145 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3152 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3153 * @phba: pointer to lpfc hba data structure.
3154 * @acqe_link: pointer to the async link completion queue entry.
3156 * This routine is to handle the SLI4 asynchronous FCoE link event.
3159 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3160 struct lpfc_acqe_link *acqe_link)
3162 struct lpfc_dmabuf *mp;
3165 struct lpfc_mbx_read_top *la;
3169 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3170 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3172 phba->fcoe_eventtag = acqe_link->event_tag;
3173 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3175 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3176 "0395 The mboxq allocation failed\n");
3179 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3181 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3182 "0396 The lpfc_dmabuf allocation failed\n");
3185 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3187 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3188 "0397 The mbuf allocation failed\n");
3189 goto out_free_dmabuf;
3192 /* Cleanup any outstanding ELS commands */
3193 lpfc_els_flush_all_cmd(phba);
3195 /* Block ELS IOCBs until we have done process link event */
3196 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3198 /* Update link event statistics */
3199 phba->sli.slistat.link_event++;
3201 /* Create lpfc_handle_latt mailbox command from link ACQE */
3202 lpfc_read_topology(phba, pmb, mp);
3203 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3204 pmb->vport = phba->pport;
3206 /* Keep the link status for extra SLI4 state machine reference */
3207 phba->sli4_hba.link_state.speed =
3208 bf_get(lpfc_acqe_link_speed, acqe_link);
3209 phba->sli4_hba.link_state.duplex =
3210 bf_get(lpfc_acqe_link_duplex, acqe_link);
3211 phba->sli4_hba.link_state.status =
3212 bf_get(lpfc_acqe_link_status, acqe_link);
3213 phba->sli4_hba.link_state.type =
3214 bf_get(lpfc_acqe_link_type, acqe_link);
3215 phba->sli4_hba.link_state.number =
3216 bf_get(lpfc_acqe_link_number, acqe_link);
3217 phba->sli4_hba.link_state.fault =
3218 bf_get(lpfc_acqe_link_fault, acqe_link);
3219 phba->sli4_hba.link_state.logical_speed =
3220 bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3221 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3222 "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x "
3223 "LA Type:x%x Port Type:%d Port Number:%d Logical "
3224 "speed:%dMbps Fault:%d\n",
3225 phba->sli4_hba.link_state.speed,
3226 phba->sli4_hba.link_state.topology,
3227 phba->sli4_hba.link_state.status,
3228 phba->sli4_hba.link_state.type,
3229 phba->sli4_hba.link_state.number,
3230 phba->sli4_hba.link_state.logical_speed * 10,
3231 phba->sli4_hba.link_state.fault);
3233 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3234 * topology info. Note: Optional for non FC-AL ports.
3236 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3237 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3238 if (rc == MBX_NOT_FINISHED)
3239 goto out_free_dmabuf;
3243 * For FCoE Mode: fill in all the topology information we need and call
3244 * the READ_TOPOLOGY completion routine to continue without actually
3245 * sending the READ_TOPOLOGY mailbox command to the port.
3247 /* Parse and translate status field */
3249 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3251 /* Parse and translate link attention fields */
3252 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3253 la->eventTag = acqe_link->event_tag;
3254 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3255 bf_set(lpfc_mbx_read_top_link_spd, la,
3256 lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3258 /* Fake the the following irrelvant fields */
3259 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3260 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3261 bf_set(lpfc_mbx_read_top_il, la, 0);
3262 bf_set(lpfc_mbx_read_top_pb, la, 0);
3263 bf_set(lpfc_mbx_read_top_fa, la, 0);
3264 bf_set(lpfc_mbx_read_top_mm, la, 0);
3266 /* Invoke the lpfc_handle_latt mailbox command callback function */
3267 lpfc_mbx_cmpl_read_topology(phba, pmb);
3274 mempool_free(pmb, phba->mbox_mem_pool);
3278 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3279 * @phba: pointer to lpfc hba data structure.
3280 * @acqe_fc: pointer to the async fc completion queue entry.
3282 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3283 * that the event was received and then issue a read_topology mailbox command so
3284 * that the rest of the driver will treat it the same as SLI3.
3287 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3289 struct lpfc_dmabuf *mp;
3293 if (bf_get(lpfc_trailer_type, acqe_fc) !=
3294 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3295 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3296 "2895 Non FC link Event detected.(%d)\n",
3297 bf_get(lpfc_trailer_type, acqe_fc));
3300 /* Keep the link status for extra SLI4 state machine reference */
3301 phba->sli4_hba.link_state.speed =
3302 bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3303 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3304 phba->sli4_hba.link_state.topology =
3305 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3306 phba->sli4_hba.link_state.status =
3307 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3308 phba->sli4_hba.link_state.type =
3309 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3310 phba->sli4_hba.link_state.number =
3311 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3312 phba->sli4_hba.link_state.fault =
3313 bf_get(lpfc_acqe_link_fault, acqe_fc);
3314 phba->sli4_hba.link_state.logical_speed =
3315 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3316 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3317 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3318 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3319 "%dMbps Fault:%d\n",
3320 phba->sli4_hba.link_state.speed,
3321 phba->sli4_hba.link_state.topology,
3322 phba->sli4_hba.link_state.status,
3323 phba->sli4_hba.link_state.type,
3324 phba->sli4_hba.link_state.number,
3325 phba->sli4_hba.link_state.logical_speed * 10,
3326 phba->sli4_hba.link_state.fault);
3327 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3329 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3330 "2897 The mboxq allocation failed\n");
3333 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3335 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3336 "2898 The lpfc_dmabuf allocation failed\n");
3339 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3341 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3342 "2899 The mbuf allocation failed\n");
3343 goto out_free_dmabuf;
3346 /* Cleanup any outstanding ELS commands */
3347 lpfc_els_flush_all_cmd(phba);
3349 /* Block ELS IOCBs until we have done process link event */
3350 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3352 /* Update link event statistics */
3353 phba->sli.slistat.link_event++;
3355 /* Create lpfc_handle_latt mailbox command from link ACQE */
3356 lpfc_read_topology(phba, pmb, mp);
3357 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3358 pmb->vport = phba->pport;
3360 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3361 if (rc == MBX_NOT_FINISHED)
3362 goto out_free_dmabuf;
3368 mempool_free(pmb, phba->mbox_mem_pool);
3372 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3373 * @phba: pointer to lpfc hba data structure.
3374 * @acqe_fc: pointer to the async SLI completion queue entry.
3376 * This routine is to handle the SLI4 asynchronous SLI events.
3379 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3381 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3382 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3383 "x%08x SLI Event Type:%d",
3384 acqe_sli->event_data1, acqe_sli->event_data2,
3385 bf_get(lpfc_trailer_type, acqe_sli));
3390 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3391 * @vport: pointer to vport data structure.
3393 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3394 * response to a CVL event.
3396 * Return the pointer to the ndlp with the vport if successful, otherwise
3399 static struct lpfc_nodelist *
3400 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3402 struct lpfc_nodelist *ndlp;
3403 struct Scsi_Host *shost;
3404 struct lpfc_hba *phba;
3411 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3413 /* Cannot find existing Fabric ndlp, so allocate a new one */
3414 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3417 lpfc_nlp_init(vport, ndlp, Fabric_DID);
3418 /* Set the node type */
3419 ndlp->nlp_type |= NLP_FABRIC;
3420 /* Put ndlp onto node list */
3421 lpfc_enqueue_node(vport, ndlp);
3422 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3423 /* re-setup ndlp without removing from node list */
3424 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3428 if ((phba->pport->port_state < LPFC_FLOGI) &&
3429 (phba->pport->port_state != LPFC_VPORT_FAILED))
3431 /* If virtual link is not yet instantiated ignore CVL */
3432 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3433 && (vport->port_state != LPFC_VPORT_FAILED))
3435 shost = lpfc_shost_from_vport(vport);
3438 lpfc_linkdown_port(vport);
3439 lpfc_cleanup_pending_mbox(vport);
3440 spin_lock_irq(shost->host_lock);
3441 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3442 spin_unlock_irq(shost->host_lock);
3448 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3449 * @vport: pointer to lpfc hba data structure.
3451 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3452 * response to a FCF dead event.
3455 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3457 struct lpfc_vport **vports;
3460 vports = lpfc_create_vport_work_array(phba);
3462 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3463 lpfc_sli4_perform_vport_cvl(vports[i]);
3464 lpfc_destroy_vport_work_array(phba, vports);
3468 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
3469 * @phba: pointer to lpfc hba data structure.
3470 * @acqe_link: pointer to the async fcoe completion queue entry.
3472 * This routine is to handle the SLI4 asynchronous fcoe event.
3475 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3476 struct lpfc_acqe_fip *acqe_fip)
3478 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
3480 struct lpfc_vport *vport;
3481 struct lpfc_nodelist *ndlp;
3482 struct Scsi_Host *shost;
3483 int active_vlink_present;
3484 struct lpfc_vport **vports;
3487 phba->fc_eventTag = acqe_fip->event_tag;
3488 phba->fcoe_eventtag = acqe_fip->event_tag;
3489 switch (event_type) {
3490 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3491 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3492 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
3493 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3495 "2546 New FCF event, evt_tag:x%x, "
3497 acqe_fip->event_tag,
3500 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3502 "2788 FCF param modified event, "
3503 "evt_tag:x%x, index:x%x\n",
3504 acqe_fip->event_tag,
3506 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3508 * During period of FCF discovery, read the FCF
3509 * table record indexed by the event to update
3510 * FCF roundrobin failover eligible FCF bmask.
3512 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3514 "2779 Read FCF (x%x) for updating "
3515 "roundrobin FCF failover bmask\n",
3517 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
3520 /* If the FCF discovery is in progress, do nothing. */
3521 spin_lock_irq(&phba->hbalock);
3522 if (phba->hba_flag & FCF_TS_INPROG) {
3523 spin_unlock_irq(&phba->hbalock);
3526 /* If fast FCF failover rescan event is pending, do nothing */
3527 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3528 spin_unlock_irq(&phba->hbalock);
3532 /* If the FCF has been in discovered state, do nothing. */
3533 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3534 spin_unlock_irq(&phba->hbalock);
3537 spin_unlock_irq(&phba->hbalock);
3539 /* Otherwise, scan the entire FCF table and re-discover SAN */
3540 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3541 "2770 Start FCF table scan per async FCF "
3542 "event, evt_tag:x%x, index:x%x\n",
3543 acqe_fip->event_tag, acqe_fip->index);
3544 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3545 LPFC_FCOE_FCF_GET_FIRST);
3547 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3548 "2547 Issue FCF scan read FCF mailbox "
3549 "command failed (x%x)\n", rc);
3552 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
3553 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3554 "2548 FCF Table full count 0x%x tag 0x%x\n",
3555 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3556 acqe_fip->event_tag);
3559 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3560 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3561 "2549 FCF (x%x) disconnected from network, "
3562 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
3564 * If we are in the middle of FCF failover process, clear
3565 * the corresponding FCF bit in the roundrobin bitmap.
3567 spin_lock_irq(&phba->hbalock);
3568 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3569 spin_unlock_irq(&phba->hbalock);
3570 /* Update FLOGI FCF failover eligible FCF bmask */
3571 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
3574 spin_unlock_irq(&phba->hbalock);
3576 /* If the event is not for currently used fcf do nothing */
3577 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
3581 * Otherwise, request the port to rediscover the entire FCF
3582 * table for a fast recovery from case that the current FCF
3583 * is no longer valid as we are not in the middle of FCF
3584 * failover process already.
3586 spin_lock_irq(&phba->hbalock);
3587 /* Mark the fast failover process in progress */
3588 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3589 spin_unlock_irq(&phba->hbalock);
3591 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3592 "2771 Start FCF fast failover process due to "
3593 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3594 "\n", acqe_fip->event_tag, acqe_fip->index);
3595 rc = lpfc_sli4_redisc_fcf_table(phba);
3597 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3599 "2772 Issue FCF rediscover mabilbox "
3600 "command failed, fail through to FCF "
3602 spin_lock_irq(&phba->hbalock);
3603 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3604 spin_unlock_irq(&phba->hbalock);
3606 * Last resort will fail over by treating this
3607 * as a link down to FCF registration.
3609 lpfc_sli4_fcf_dead_failthrough(phba);
3611 /* Reset FCF roundrobin bmask for new discovery */
3612 memset(phba->fcf.fcf_rr_bmask, 0,
3613 sizeof(*phba->fcf.fcf_rr_bmask));
3615 * Handling fast FCF failover to a DEAD FCF event is
3616 * considered equalivant to receiving CVL to all vports.
3618 lpfc_sli4_perform_all_vport_cvl(phba);
3621 case LPFC_FIP_EVENT_TYPE_CVL:
3622 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3623 "2718 Clear Virtual Link Received for VPI 0x%x"
3624 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3625 vport = lpfc_find_vport_by_vpid(phba,
3626 acqe_fip->index - phba->vpi_base);
3627 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3630 active_vlink_present = 0;
3632 vports = lpfc_create_vport_work_array(phba);
3634 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3636 if ((!(vports[i]->fc_flag &
3637 FC_VPORT_CVL_RCVD)) &&
3638 (vports[i]->port_state > LPFC_FDISC)) {
3639 active_vlink_present = 1;
3643 lpfc_destroy_vport_work_array(phba, vports);
3646 if (active_vlink_present) {
3648 * If there are other active VLinks present,
3649 * re-instantiate the Vlink using FDISC.
3651 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3652 shost = lpfc_shost_from_vport(vport);
3653 spin_lock_irq(shost->host_lock);
3654 ndlp->nlp_flag |= NLP_DELAY_TMO;
3655 spin_unlock_irq(shost->host_lock);
3656 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3657 vport->port_state = LPFC_FDISC;
3660 * Otherwise, we request port to rediscover
3661 * the entire FCF table for a fast recovery
3662 * from possible case that the current FCF
3663 * is no longer valid if we are not already
3664 * in the FCF failover process.
3666 spin_lock_irq(&phba->hbalock);
3667 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3668 spin_unlock_irq(&phba->hbalock);
3671 /* Mark the fast failover process in progress */
3672 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3673 spin_unlock_irq(&phba->hbalock);
3674 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3676 "2773 Start FCF failover per CVL, "
3677 "evt_tag:x%x\n", acqe_fip->event_tag);
3678 rc = lpfc_sli4_redisc_fcf_table(phba);
3680 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3682 "2774 Issue FCF rediscover "
3683 "mabilbox command failed, "
3684 "through to CVL event\n");
3685 spin_lock_irq(&phba->hbalock);
3686 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3687 spin_unlock_irq(&phba->hbalock);
3689 * Last resort will be re-try on the
3690 * the current registered FCF entry.
3692 lpfc_retry_pport_discovery(phba);
3695 * Reset FCF roundrobin bmask for new
3698 memset(phba->fcf.fcf_rr_bmask, 0,
3699 sizeof(*phba->fcf.fcf_rr_bmask));
3703 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3704 "0288 Unknown FCoE event type 0x%x event tag "
3705 "0x%x\n", event_type, acqe_fip->event_tag);
3711 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3712 * @phba: pointer to lpfc hba data structure.
3713 * @acqe_link: pointer to the async dcbx completion queue entry.
3715 * This routine is to handle the SLI4 asynchronous dcbx event.
3718 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3719 struct lpfc_acqe_dcbx *acqe_dcbx)
3721 phba->fc_eventTag = acqe_dcbx->event_tag;
3722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3723 "0290 The SLI4 DCBX asynchronous event is not "
3728 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3729 * @phba: pointer to lpfc hba data structure.
3730 * @acqe_link: pointer to the async grp5 completion queue entry.
3732 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3733 * is an asynchronous notified of a logical link speed change. The Port
3734 * reports the logical link speed in units of 10Mbps.
3737 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3738 struct lpfc_acqe_grp5 *acqe_grp5)
3740 uint16_t prev_ll_spd;
3742 phba->fc_eventTag = acqe_grp5->event_tag;
3743 phba->fcoe_eventtag = acqe_grp5->event_tag;
3744 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3745 phba->sli4_hba.link_state.logical_speed =
3746 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3747 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3748 "2789 GRP5 Async Event: Updating logical link speed "
3749 "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3750 (phba->sli4_hba.link_state.logical_speed*10));
3754 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3755 * @phba: pointer to lpfc hba data structure.
3757 * This routine is invoked by the worker thread to process all the pending
3758 * SLI4 asynchronous events.
3760 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3762 struct lpfc_cq_event *cq_event;
3764 /* First, declare the async event has been handled */
3765 spin_lock_irq(&phba->hbalock);
3766 phba->hba_flag &= ~ASYNC_EVENT;
3767 spin_unlock_irq(&phba->hbalock);
3768 /* Now, handle all the async events */
3769 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3770 /* Get the first event from the head of the event queue */
3771 spin_lock_irq(&phba->hbalock);
3772 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3773 cq_event, struct lpfc_cq_event, list);
3774 spin_unlock_irq(&phba->hbalock);
3775 /* Process the asynchronous event */
3776 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3777 case LPFC_TRAILER_CODE_LINK:
3778 lpfc_sli4_async_link_evt(phba,
3779 &cq_event->cqe.acqe_link);
3781 case LPFC_TRAILER_CODE_FCOE:
3782 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
3784 case LPFC_TRAILER_CODE_DCBX:
3785 lpfc_sli4_async_dcbx_evt(phba,
3786 &cq_event->cqe.acqe_dcbx);
3788 case LPFC_TRAILER_CODE_GRP5:
3789 lpfc_sli4_async_grp5_evt(phba,
3790 &cq_event->cqe.acqe_grp5);
3792 case LPFC_TRAILER_CODE_FC:
3793 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3795 case LPFC_TRAILER_CODE_SLI:
3796 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3799 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3800 "1804 Invalid asynchrous event code: "
3801 "x%x\n", bf_get(lpfc_trailer_code,
3802 &cq_event->cqe.mcqe_cmpl));
3805 /* Free the completion event processed to the free pool */
3806 lpfc_sli4_cq_event_release(phba, cq_event);
3811 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3812 * @phba: pointer to lpfc hba data structure.
3814 * This routine is invoked by the worker thread to process FCF table
3815 * rediscovery pending completion event.
3817 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3821 spin_lock_irq(&phba->hbalock);
3822 /* Clear FCF rediscovery timeout event */
3823 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3824 /* Clear driver fast failover FCF record flag */
3825 phba->fcf.failover_rec.flag = 0;
3826 /* Set state for FCF fast failover */
3827 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3828 spin_unlock_irq(&phba->hbalock);
3830 /* Scan FCF table from the first entry to re-discover SAN */
3831 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3832 "2777 Start post-quiescent FCF table scan\n");
3833 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3835 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3836 "2747 Issue FCF scan read FCF mailbox "
3837 "command failed 0x%x\n", rc);
3841 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3842 * @phba: pointer to lpfc hba data structure.
3843 * @dev_grp: The HBA PCI-Device group number.
3845 * This routine is invoked to set up the per HBA PCI-Device group function
3846 * API jump table entries.
3848 * Return: 0 if success, otherwise -ENODEV
3851 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3855 /* Set up lpfc PCI-device group */
3856 phba->pci_dev_grp = dev_grp;
3858 /* The LPFC_PCI_DEV_OC uses SLI4 */
3859 if (dev_grp == LPFC_PCI_DEV_OC)
3860 phba->sli_rev = LPFC_SLI_REV4;
3862 /* Set up device INIT API function jump table */
3863 rc = lpfc_init_api_table_setup(phba, dev_grp);
3866 /* Set up SCSI API function jump table */
3867 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3870 /* Set up SLI API function jump table */
3871 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3874 /* Set up MBOX API function jump table */
3875 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3883 * lpfc_log_intr_mode - Log the active interrupt mode
3884 * @phba: pointer to lpfc hba data structure.
3885 * @intr_mode: active interrupt mode adopted.
3887 * This routine it invoked to log the currently used active interrupt mode
3890 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3892 switch (intr_mode) {
3894 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3895 "0470 Enable INTx interrupt mode.\n");
3898 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3899 "0481 Enabled MSI interrupt mode.\n");
3902 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3903 "0480 Enabled MSI-X interrupt mode.\n");
3906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3907 "0482 Illegal interrupt mode.\n");
3914 * lpfc_enable_pci_dev - Enable a generic PCI device.
3915 * @phba: pointer to lpfc hba data structure.
3917 * This routine is invoked to enable the PCI device that is common to all
3922 * other values - error
3925 lpfc_enable_pci_dev(struct lpfc_hba *phba)
3927 struct pci_dev *pdev;
3930 /* Obtain PCI device reference */
3934 pdev = phba->pcidev;
3935 /* Select PCI BARs */
3936 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3937 /* Enable PCI device */
3938 if (pci_enable_device_mem(pdev))
3940 /* Request PCI resource for the device */
3941 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3942 goto out_disable_device;
3943 /* Set up device as PCI master and save state for EEH */
3944 pci_set_master(pdev);
3945 pci_try_set_mwi(pdev);
3946 pci_save_state(pdev);
3951 pci_disable_device(pdev);
3957 * lpfc_disable_pci_dev - Disable a generic PCI device.
3958 * @phba: pointer to lpfc hba data structure.
3960 * This routine is invoked to disable the PCI device that is common to all
3964 lpfc_disable_pci_dev(struct lpfc_hba *phba)
3966 struct pci_dev *pdev;
3969 /* Obtain PCI device reference */
3973 pdev = phba->pcidev;
3974 /* Select PCI BARs */
3975 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3976 /* Release PCI resource and disable PCI device */
3977 pci_release_selected_regions(pdev, bars);
3978 pci_disable_device(pdev);
3979 /* Null out PCI private reference to driver */
3980 pci_set_drvdata(pdev, NULL);
3986 * lpfc_reset_hba - Reset a hba
3987 * @phba: pointer to lpfc hba data structure.
3989 * This routine is invoked to reset a hba device. It brings the HBA
3990 * offline, performs a board restart, and then brings the board back
3991 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3992 * on outstanding mailbox commands.
3995 lpfc_reset_hba(struct lpfc_hba *phba)
3997 /* If resets are disabled then set error state and return. */
3998 if (!phba->cfg_enable_hba_reset) {
3999 phba->link_state = LPFC_HBA_ERROR;
4002 lpfc_offline_prep(phba);
4004 lpfc_sli_brdrestart(phba);
4006 lpfc_unblock_mgmt_io(phba);
4010 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4011 * @phba: pointer to lpfc hba data structure.
4013 * This routine is invoked to set up the driver internal resources specific to
4014 * support the SLI-3 HBA device it attached to.
4018 * other values - error
4021 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4023 struct lpfc_sli *psli;
4026 * Initialize timers used by driver
4029 /* Heartbeat timer */
4030 init_timer(&phba->hb_tmofunc);
4031 phba->hb_tmofunc.function = lpfc_hb_timeout;
4032 phba->hb_tmofunc.data = (unsigned long)phba;
4035 /* MBOX heartbeat timer */
4036 init_timer(&psli->mbox_tmo);
4037 psli->mbox_tmo.function = lpfc_mbox_timeout;
4038 psli->mbox_tmo.data = (unsigned long) phba;
4039 /* FCP polling mode timer */
4040 init_timer(&phba->fcp_poll_timer);
4041 phba->fcp_poll_timer.function = lpfc_poll_timeout;
4042 phba->fcp_poll_timer.data = (unsigned long) phba;
4043 /* Fabric block timer */
4044 init_timer(&phba->fabric_block_timer);
4045 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4046 phba->fabric_block_timer.data = (unsigned long) phba;
4047 /* EA polling mode timer */
4048 init_timer(&phba->eratt_poll);
4049 phba->eratt_poll.function = lpfc_poll_eratt;
4050 phba->eratt_poll.data = (unsigned long) phba;
4052 /* Host attention work mask setup */
4053 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4054 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4056 /* Get all the module params for configuring this host */
4057 lpfc_get_cfgparam(phba);
4058 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4059 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4060 /* check for menlo minimum sg count */
4061 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4062 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4066 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4067 * used to create the sg_dma_buf_pool must be dynamically calculated.
4068 * 2 segments are added since the IOCB needs a command and response bde.
4070 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4071 sizeof(struct fcp_rsp) +
4072 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4074 if (phba->cfg_enable_bg) {
4075 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4076 phba->cfg_sg_dma_buf_size +=
4077 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4080 /* Also reinitialize the host templates with new values. */
4081 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4082 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4084 phba->max_vpi = LPFC_MAX_VPI;
4085 /* This will be set to correct value after config_port mbox */
4086 phba->max_vports = 0;
4089 * Initialize the SLI Layer to run with lpfc HBAs.
4091 lpfc_sli_setup(phba);
4092 lpfc_sli_queue_setup(phba);
4094 /* Allocate device driver memory */
4095 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4102 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4103 * @phba: pointer to lpfc hba data structure.
4105 * This routine is invoked to unset the driver internal resources set up
4106 * specific for supporting the SLI-3 HBA device it attached to.
4109 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4111 /* Free device driver memory allocated */
4112 lpfc_mem_free_all(phba);
4118 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4119 * @phba: pointer to lpfc hba data structure.
4121 * This routine is invoked to set up the driver internal resources specific to
4122 * support the SLI-4 HBA device it attached to.
4126 * other values - error
4129 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4131 struct lpfc_sli *psli;
4132 LPFC_MBOXQ_t *mboxq;
4133 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4134 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4135 struct lpfc_mqe *mqe;
4136 int longs, sli_family;
4138 /* Before proceed, wait for POST done and device ready */
4139 rc = lpfc_sli4_post_status_check(phba);
4144 * Initialize timers used by driver
4147 /* Heartbeat timer */
4148 init_timer(&phba->hb_tmofunc);
4149 phba->hb_tmofunc.function = lpfc_hb_timeout;
4150 phba->hb_tmofunc.data = (unsigned long)phba;
4151 init_timer(&phba->rrq_tmr);
4152 phba->rrq_tmr.function = lpfc_rrq_timeout;
4153 phba->rrq_tmr.data = (unsigned long)phba;
4156 /* MBOX heartbeat timer */
4157 init_timer(&psli->mbox_tmo);
4158 psli->mbox_tmo.function = lpfc_mbox_timeout;
4159 psli->mbox_tmo.data = (unsigned long) phba;
4160 /* Fabric block timer */
4161 init_timer(&phba->fabric_block_timer);
4162 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4163 phba->fabric_block_timer.data = (unsigned long) phba;
4164 /* EA polling mode timer */
4165 init_timer(&phba->eratt_poll);
4166 phba->eratt_poll.function = lpfc_poll_eratt;
4167 phba->eratt_poll.data = (unsigned long) phba;
4168 /* FCF rediscover timer */
4169 init_timer(&phba->fcf.redisc_wait);
4170 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4171 phba->fcf.redisc_wait.data = (unsigned long)phba;
4174 * We need to do a READ_CONFIG mailbox command here before
4175 * calling lpfc_get_cfgparam. For VFs this will report the
4176 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4177 * All of the resources allocated
4178 * for this Port are tied to these values.
4180 /* Get all the module params for configuring this host */
4181 lpfc_get_cfgparam(phba);
4182 phba->max_vpi = LPFC_MAX_VPI;
4183 /* This will be set to correct value after the read_config mbox */
4184 phba->max_vports = 0;
4186 /* Program the default value of vlan_id and fc_map */
4187 phba->valid_vlan = 0;
4188 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4189 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4190 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4193 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4194 * used to create the sg_dma_buf_pool must be dynamically calculated.
4195 * 2 segments are added since the IOCB needs a command and response bde.
4196 * To insure that the scsi sgl does not cross a 4k page boundary only
4197 * sgl sizes of must be a power of 2.
4199 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4200 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4202 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4203 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4204 switch (sli_family) {
4205 case LPFC_SLI_INTF_FAMILY_BE2:
4206 case LPFC_SLI_INTF_FAMILY_BE3:
4207 /* There is a single hint for BE - 2 pages per BPL. */
4208 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4209 LPFC_SLI_INTF_SLI_HINT1_1)
4210 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4212 case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4213 case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4217 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4218 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4219 dma_buf_size = dma_buf_size << 1)
4221 if (dma_buf_size == max_buf_size)
4222 phba->cfg_sg_seg_cnt = (dma_buf_size -
4223 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4224 (2 * sizeof(struct sli4_sge))) /
4225 sizeof(struct sli4_sge);
4226 phba->cfg_sg_dma_buf_size = dma_buf_size;
4228 /* Initialize buffer queue management fields */
4229 hbq_count = lpfc_sli_hbq_count();
4230 for (i = 0; i < hbq_count; ++i)
4231 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4232 INIT_LIST_HEAD(&phba->rb_pend_list);
4233 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4234 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4237 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4239 /* Initialize the Abort scsi buffer list used by driver */
4240 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4241 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4242 /* This abort list used by worker thread */
4243 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4246 * Initialize dirver internal slow-path work queues
4249 /* Driver internel slow-path CQ Event pool */
4250 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4251 /* Response IOCB work queue list */
4252 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4253 /* Asynchronous event CQ Event work queue list */
4254 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4255 /* Fast-path XRI aborted CQ Event work queue list */
4256 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4257 /* Slow-path XRI aborted CQ Event work queue list */
4258 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4259 /* Receive queue CQ Event work queue list */
4260 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4262 /* Initialize the driver internal SLI layer lists. */
4263 lpfc_sli_setup(phba);
4264 lpfc_sli_queue_setup(phba);
4266 /* Allocate device driver memory */
4267 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4271 /* Create the bootstrap mailbox command */
4272 rc = lpfc_create_bootstrap_mbox(phba);
4276 /* Set up the host's endian order with the device. */
4277 rc = lpfc_setup_endian_order(phba);
4279 goto out_free_bsmbx;
4281 rc = lpfc_sli4_fw_cfg_check(phba);
4283 goto out_free_bsmbx;
4285 /* Set up the hba's configuration parameters. */
4286 rc = lpfc_sli4_read_config(phba);
4288 goto out_free_bsmbx;
4290 /* Perform a function reset */
4291 rc = lpfc_pci_function_reset(phba);
4293 goto out_free_bsmbx;
4295 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4299 goto out_free_bsmbx;
4302 /* Get the Supported Pages. It is always available. */
4303 lpfc_supported_pages(mboxq);
4304 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4307 mempool_free(mboxq, phba->mbox_mem_pool);
4308 goto out_free_bsmbx;
4311 mqe = &mboxq->u.mqe;
4312 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4313 LPFC_MAX_SUPPORTED_PAGES);
4314 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4315 switch (pn_page[i]) {
4316 case LPFC_SLI4_PARAMETERS:
4317 phba->sli4_hba.pc_sli4_params.supported = 1;
4324 /* Read the port's SLI4 Parameters capabilities if supported. */
4325 if (phba->sli4_hba.pc_sli4_params.supported)
4326 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4327 mempool_free(mboxq, phba->mbox_mem_pool);
4330 goto out_free_bsmbx;
4332 /* Create all the SLI4 queues */
4333 rc = lpfc_sli4_queue_create(phba);
4335 goto out_free_bsmbx;
4337 /* Create driver internal CQE event pool */
4338 rc = lpfc_sli4_cq_event_pool_create(phba);
4340 goto out_destroy_queue;
4342 /* Initialize and populate the iocb list per host */
4343 rc = lpfc_init_sgl_list(phba);
4345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4346 "1400 Failed to initialize sgl list.\n");
4347 goto out_destroy_cq_event_pool;
4349 rc = lpfc_init_active_sgl_array(phba);
4351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4352 "1430 Failed to initialize sgl list.\n");
4353 goto out_free_sgl_list;
4356 rc = lpfc_sli4_init_rpi_hdrs(phba);
4358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4359 "1432 Failed to initialize rpi headers.\n");
4360 goto out_free_active_sgl;
4363 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4364 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4365 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4367 if (!phba->fcf.fcf_rr_bmask) {
4368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4369 "2759 Failed allocate memory for FCF round "
4370 "robin failover bmask\n");
4371 goto out_remove_rpi_hdrs;
4374 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4375 phba->cfg_fcp_eq_count), GFP_KERNEL);
4376 if (!phba->sli4_hba.fcp_eq_hdl) {
4377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4378 "2572 Failed allocate memory for fast-path "
4379 "per-EQ handle array\n");
4380 goto out_free_fcf_rr_bmask;
4383 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4384 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4385 if (!phba->sli4_hba.msix_entries) {
4386 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4387 "2573 Failed allocate memory for msi-x "
4388 "interrupt vector entries\n");
4389 goto out_free_fcp_eq_hdl;
4394 out_free_fcp_eq_hdl:
4395 kfree(phba->sli4_hba.fcp_eq_hdl);
4396 out_free_fcf_rr_bmask:
4397 kfree(phba->fcf.fcf_rr_bmask);
4398 out_remove_rpi_hdrs:
4399 lpfc_sli4_remove_rpi_hdrs(phba);
4400 out_free_active_sgl:
4401 lpfc_free_active_sgl(phba);
4403 lpfc_free_sgl_list(phba);
4404 out_destroy_cq_event_pool:
4405 lpfc_sli4_cq_event_pool_destroy(phba);
4407 lpfc_sli4_queue_destroy(phba);
4409 lpfc_destroy_bootstrap_mbox(phba);
4411 lpfc_mem_free(phba);
4416 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4417 * @phba: pointer to lpfc hba data structure.
4419 * This routine is invoked to unset the driver internal resources set up
4420 * specific for supporting the SLI-4 HBA device it attached to.
4423 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4425 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4427 /* Free memory allocated for msi-x interrupt vector entries */
4428 kfree(phba->sli4_hba.msix_entries);
4430 /* Free memory allocated for fast-path work queue handles */
4431 kfree(phba->sli4_hba.fcp_eq_hdl);
4433 /* Free the allocated rpi headers. */
4434 lpfc_sli4_remove_rpi_hdrs(phba);
4435 lpfc_sli4_remove_rpis(phba);
4437 /* Free eligible FCF index bmask */
4438 kfree(phba->fcf.fcf_rr_bmask);
4440 /* Free the ELS sgl list */
4441 lpfc_free_active_sgl(phba);
4442 lpfc_free_sgl_list(phba);
4444 /* Free the SCSI sgl management array */
4445 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4447 /* Free the SLI4 queues */
4448 lpfc_sli4_queue_destroy(phba);
4450 /* Free the completion queue EQ event pool */
4451 lpfc_sli4_cq_event_release_all(phba);
4452 lpfc_sli4_cq_event_pool_destroy(phba);
4454 /* Free the bsmbx region. */
4455 lpfc_destroy_bootstrap_mbox(phba);
4457 /* Free the SLI Layer memory with SLI4 HBAs */
4458 lpfc_mem_free_all(phba);
4460 /* Free the current connect table */
4461 list_for_each_entry_safe(conn_entry, next_conn_entry,
4462 &phba->fcf_conn_rec_list, list) {
4463 list_del_init(&conn_entry->list);
4471 * lpfc_init_api_table_setup - Set up init api fucntion jump table
4472 * @phba: The hba struct for which this call is being executed.
4473 * @dev_grp: The HBA PCI-Device group number.
4475 * This routine sets up the device INIT interface API function jump table
4478 * Returns: 0 - success, -ENODEV - failure.
4481 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4483 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4484 phba->lpfc_hba_down_link = lpfc_hba_down_link;
4486 case LPFC_PCI_DEV_LP:
4487 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4488 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4489 phba->lpfc_stop_port = lpfc_stop_port_s3;
4491 case LPFC_PCI_DEV_OC:
4492 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4493 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4494 phba->lpfc_stop_port = lpfc_stop_port_s4;
4497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4498 "1431 Invalid HBA PCI-device group: 0x%x\n",
4507 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4508 * @phba: pointer to lpfc hba data structure.
4510 * This routine is invoked to set up the driver internal resources before the
4511 * device specific resource setup to support the HBA device it attached to.
4515 * other values - error
4518 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4521 * Driver resources common to all SLI revisions
4523 atomic_set(&phba->fast_event_count, 0);
4524 spin_lock_init(&phba->hbalock);
4526 /* Initialize ndlp management spinlock */
4527 spin_lock_init(&phba->ndlp_lock);
4529 INIT_LIST_HEAD(&phba->port_list);
4530 INIT_LIST_HEAD(&phba->work_list);
4531 init_waitqueue_head(&phba->wait_4_mlo_m_q);
4533 /* Initialize the wait queue head for the kernel thread */
4534 init_waitqueue_head(&phba->work_waitq);
4536 /* Initialize the scsi buffer list used by driver for scsi IO */
4537 spin_lock_init(&phba->scsi_buf_list_lock);
4538 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4540 /* Initialize the fabric iocb list */
4541 INIT_LIST_HEAD(&phba->fabric_iocb_list);
4543 /* Initialize list to save ELS buffers */
4544 INIT_LIST_HEAD(&phba->elsbuf);
4546 /* Initialize FCF connection rec list */
4547 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4553 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4554 * @phba: pointer to lpfc hba data structure.
4556 * This routine is invoked to set up the driver internal resources after the
4557 * device specific resource setup to support the HBA device it attached to.
4561 * other values - error
4564 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4568 /* Startup the kernel thread for this host adapter. */
4569 phba->worker_thread = kthread_run(lpfc_do_work, phba,
4570 "lpfc_worker_%d", phba->brd_no);
4571 if (IS_ERR(phba->worker_thread)) {
4572 error = PTR_ERR(phba->worker_thread);
4580 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4581 * @phba: pointer to lpfc hba data structure.
4583 * This routine is invoked to unset the driver internal resources set up after
4584 * the device specific resource setup for supporting the HBA device it
4588 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4590 /* Stop kernel worker thread */
4591 kthread_stop(phba->worker_thread);
4595 * lpfc_free_iocb_list - Free iocb list.
4596 * @phba: pointer to lpfc hba data structure.
4598 * This routine is invoked to free the driver's IOCB list and memory.
4601 lpfc_free_iocb_list(struct lpfc_hba *phba)
4603 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4605 spin_lock_irq(&phba->hbalock);
4606 list_for_each_entry_safe(iocbq_entry, iocbq_next,
4607 &phba->lpfc_iocb_list, list) {
4608 list_del(&iocbq_entry->list);
4610 phba->total_iocbq_bufs--;
4612 spin_unlock_irq(&phba->hbalock);
4618 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4619 * @phba: pointer to lpfc hba data structure.
4621 * This routine is invoked to allocate and initizlize the driver's IOCB
4622 * list and set up the IOCB tag array accordingly.
4626 * other values - error
4629 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4631 struct lpfc_iocbq *iocbq_entry = NULL;
4635 /* Initialize and populate the iocb list per host. */
4636 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4637 for (i = 0; i < iocb_count; i++) {
4638 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4639 if (iocbq_entry == NULL) {
4640 printk(KERN_ERR "%s: only allocated %d iocbs of "
4641 "expected %d count. Unloading driver.\n",
4642 __func__, i, LPFC_IOCB_LIST_CNT);
4643 goto out_free_iocbq;
4646 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4649 printk(KERN_ERR "%s: failed to allocate IOTAG. "
4650 "Unloading driver.\n", __func__);
4651 goto out_free_iocbq;
4653 iocbq_entry->sli4_xritag = NO_XRI;
4655 spin_lock_irq(&phba->hbalock);
4656 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4657 phba->total_iocbq_bufs++;
4658 spin_unlock_irq(&phba->hbalock);
4664 lpfc_free_iocb_list(phba);
4670 * lpfc_free_sgl_list - Free sgl list.
4671 * @phba: pointer to lpfc hba data structure.
4673 * This routine is invoked to free the driver's sgl list and memory.
4676 lpfc_free_sgl_list(struct lpfc_hba *phba)
4678 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4679 LIST_HEAD(sglq_list);
4681 spin_lock_irq(&phba->hbalock);
4682 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4683 spin_unlock_irq(&phba->hbalock);
4685 list_for_each_entry_safe(sglq_entry, sglq_next,
4687 list_del(&sglq_entry->list);
4688 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4690 phba->sli4_hba.total_sglq_bufs--;
4692 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4696 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4697 * @phba: pointer to lpfc hba data structure.
4699 * This routine is invoked to allocate the driver's active sgl memory.
4700 * This array will hold the sglq_entry's for active IOs.
4703 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4706 size = sizeof(struct lpfc_sglq *);
4707 size *= phba->sli4_hba.max_cfg_param.max_xri;
4709 phba->sli4_hba.lpfc_sglq_active_list =
4710 kzalloc(size, GFP_KERNEL);
4711 if (!phba->sli4_hba.lpfc_sglq_active_list)
4717 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4718 * @phba: pointer to lpfc hba data structure.
4720 * This routine is invoked to walk through the array of active sglq entries
4721 * and free all of the resources.
4722 * This is just a place holder for now.
4725 lpfc_free_active_sgl(struct lpfc_hba *phba)
4727 kfree(phba->sli4_hba.lpfc_sglq_active_list);
4731 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4732 * @phba: pointer to lpfc hba data structure.
4734 * This routine is invoked to allocate and initizlize the driver's sgl
4735 * list and set up the sgl xritag tag array accordingly.
4739 * other values - error
4742 lpfc_init_sgl_list(struct lpfc_hba *phba)
4744 struct lpfc_sglq *sglq_entry = NULL;
4748 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4749 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4750 "2400 lpfc_init_sgl_list els %d.\n",
4752 /* Initialize and populate the sglq list per host/VF. */
4753 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4754 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4756 /* Sanity check on XRI management */
4757 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4759 "2562 No room left for SCSI XRI allocation: "
4760 "max_xri=%d, els_xri=%d\n",
4761 phba->sli4_hba.max_cfg_param.max_xri,
4766 /* Allocate memory for the ELS XRI management array */
4767 phba->sli4_hba.lpfc_els_sgl_array =
4768 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4771 if (!phba->sli4_hba.lpfc_els_sgl_array) {
4772 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4773 "2401 Failed to allocate memory for ELS "
4774 "XRI management array of size %d.\n",
4779 /* Keep the SCSI XRI into the XRI management array */
4780 phba->sli4_hba.scsi_xri_max =
4781 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4782 phba->sli4_hba.scsi_xri_cnt = 0;
4784 phba->sli4_hba.lpfc_scsi_psb_array =
4785 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4786 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4788 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4789 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4790 "2563 Failed to allocate memory for SCSI "
4791 "XRI management array of size %d.\n",
4792 phba->sli4_hba.scsi_xri_max);
4793 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4797 for (i = 0; i < els_xri_cnt; i++) {
4798 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4799 if (sglq_entry == NULL) {
4800 printk(KERN_ERR "%s: only allocated %d sgls of "
4801 "expected %d count. Unloading driver.\n",
4802 __func__, i, els_xri_cnt);
4806 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4807 if (sglq_entry->sli4_xritag == NO_XRI) {
4809 printk(KERN_ERR "%s: failed to allocate XRI.\n"
4810 "Unloading driver.\n", __func__);
4813 sglq_entry->buff_type = GEN_BUFF_TYPE;
4814 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4815 if (sglq_entry->virt == NULL) {
4817 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4818 "Unloading driver.\n", __func__);
4821 sglq_entry->sgl = sglq_entry->virt;
4822 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4824 /* The list order is used by later block SGL registraton */
4825 spin_lock_irq(&phba->hbalock);
4826 sglq_entry->state = SGL_FREED;
4827 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4828 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4829 phba->sli4_hba.total_sglq_bufs++;
4830 spin_unlock_irq(&phba->hbalock);
4835 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4836 lpfc_free_sgl_list(phba);
4841 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4842 * @phba: pointer to lpfc hba data structure.
4844 * This routine is invoked to post rpi header templates to the
4845 * HBA consistent with the SLI-4 interface spec. This routine
4846 * posts a PAGE_SIZE memory region to the port to hold up to
4847 * PAGE_SIZE modulo 64 rpi context headers.
4848 * No locks are held here because this is an initialization routine
4849 * called only from probe or lpfc_online when interrupts are not
4850 * enabled and the driver is reinitializing the device.
4854 * -ENOMEM - No availble memory
4855 * -EIO - The mailbox failed to complete successfully.
4858 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4863 struct lpfc_rpi_hdr *rpi_hdr;
4865 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4868 * Provision an rpi bitmask range for discovery. The total count
4869 * is the difference between max and base + 1.
4871 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4872 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4874 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4875 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4877 if (!phba->sli4_hba.rpi_bmask)
4880 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4882 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4883 "0391 Error during rpi post operation\n");
4884 lpfc_sli4_remove_rpis(phba);
4892 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4893 * @phba: pointer to lpfc hba data structure.
4895 * This routine is invoked to allocate a single 4KB memory region to
4896 * support rpis and stores them in the phba. This single region
4897 * provides support for up to 64 rpis. The region is used globally
4901 * A valid rpi hdr on success.
4902 * A NULL pointer on any failure.
4904 struct lpfc_rpi_hdr *
4905 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4907 uint16_t rpi_limit, curr_rpi_range;
4908 struct lpfc_dmabuf *dmabuf;
4909 struct lpfc_rpi_hdr *rpi_hdr;
4911 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4912 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4914 spin_lock_irq(&phba->hbalock);
4915 curr_rpi_range = phba->sli4_hba.next_rpi;
4916 spin_unlock_irq(&phba->hbalock);
4919 * The port has a limited number of rpis. The increment here
4920 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4921 * and to allow the full max_rpi range per port.
4923 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4927 * First allocate the protocol header region for the port. The
4928 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4930 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4934 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4935 LPFC_HDR_TEMPLATE_SIZE,
4938 if (!dmabuf->virt) {
4940 goto err_free_dmabuf;
4943 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4944 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4946 goto err_free_coherent;
4949 /* Save the rpi header data for cleanup later. */
4950 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4952 goto err_free_coherent;
4954 rpi_hdr->dmabuf = dmabuf;
4955 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4956 rpi_hdr->page_count = 1;
4957 spin_lock_irq(&phba->hbalock);
4958 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4959 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4962 * The next_rpi stores the next module-64 rpi value to post
4963 * in any subsequent rpi memory region postings.
4965 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4966 spin_unlock_irq(&phba->hbalock);
4970 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4971 dmabuf->virt, dmabuf->phys);
4978 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4979 * @phba: pointer to lpfc hba data structure.
4981 * This routine is invoked to remove all memory resources allocated
4982 * to support rpis. This routine presumes the caller has released all
4983 * rpis consumed by fabric or port logins and is prepared to have
4984 * the header pages removed.
4987 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4989 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4991 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4992 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4993 list_del(&rpi_hdr->list);
4994 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4995 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4996 kfree(rpi_hdr->dmabuf);
5000 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5001 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
5005 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5006 * @pdev: pointer to pci device data structure.
5008 * This routine is invoked to allocate the driver hba data structure for an
5009 * HBA device. If the allocation is successful, the phba reference to the
5010 * PCI device data structure is set.
5013 * pointer to @phba - successful
5016 static struct lpfc_hba *
5017 lpfc_hba_alloc(struct pci_dev *pdev)
5019 struct lpfc_hba *phba;
5021 /* Allocate memory for HBA structure */
5022 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5024 dev_err(&pdev->dev, "failed to allocate hba struct\n");
5028 /* Set reference to PCI device in HBA structure */
5029 phba->pcidev = pdev;
5031 /* Assign an unused board number */
5032 phba->brd_no = lpfc_get_instance();
5033 if (phba->brd_no < 0) {
5038 spin_lock_init(&phba->ct_ev_lock);
5039 INIT_LIST_HEAD(&phba->ct_ev_waiters);
5045 * lpfc_hba_free - Free driver hba data structure with a device.
5046 * @phba: pointer to lpfc hba data structure.
5048 * This routine is invoked to free the driver hba data structure with an
5052 lpfc_hba_free(struct lpfc_hba *phba)
5054 /* Release the driver assigned board number */
5055 idr_remove(&lpfc_hba_index, phba->brd_no);
5062 * lpfc_create_shost - Create hba physical port with associated scsi host.
5063 * @phba: pointer to lpfc hba data structure.
5065 * This routine is invoked to create HBA physical port and associate a SCSI
5070 * other values - error
5073 lpfc_create_shost(struct lpfc_hba *phba)
5075 struct lpfc_vport *vport;
5076 struct Scsi_Host *shost;
5078 /* Initialize HBA FC structure */
5079 phba->fc_edtov = FF_DEF_EDTOV;
5080 phba->fc_ratov = FF_DEF_RATOV;
5081 phba->fc_altov = FF_DEF_ALTOV;
5082 phba->fc_arbtov = FF_DEF_ARBTOV;
5084 atomic_set(&phba->sdev_cnt, 0);
5085 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5089 shost = lpfc_shost_from_vport(vport);
5090 phba->pport = vport;
5091 lpfc_debugfs_initialize(vport);
5092 /* Put reference to SCSI host to driver's device private data */
5093 pci_set_drvdata(phba->pcidev, shost);
5099 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5100 * @phba: pointer to lpfc hba data structure.
5102 * This routine is invoked to destroy HBA physical port and the associated
5106 lpfc_destroy_shost(struct lpfc_hba *phba)
5108 struct lpfc_vport *vport = phba->pport;
5110 /* Destroy physical port that associated with the SCSI host */
5111 destroy_port(vport);
5117 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5118 * @phba: pointer to lpfc hba data structure.
5119 * @shost: the shost to be used to detect Block guard settings.
5121 * This routine sets up the local Block guard protocol settings for @shost.
5122 * This routine also allocates memory for debugging bg buffers.
5125 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5128 if (lpfc_prot_mask && lpfc_prot_guard) {
5129 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5130 "1478 Registering BlockGuard with the "
5132 scsi_host_set_prot(shost, lpfc_prot_mask);
5133 scsi_host_set_guard(shost, lpfc_prot_guard);
5135 if (!_dump_buf_data) {
5137 spin_lock_init(&_dump_buf_lock);
5139 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5140 if (_dump_buf_data) {
5141 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5142 "9043 BLKGRD: allocated %d pages for "
5143 "_dump_buf_data at 0x%p\n",
5144 (1 << pagecnt), _dump_buf_data);
5145 _dump_buf_data_order = pagecnt;
5146 memset(_dump_buf_data, 0,
5147 ((1 << PAGE_SHIFT) << pagecnt));
5152 if (!_dump_buf_data_order)
5153 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5154 "9044 BLKGRD: ERROR unable to allocate "
5155 "memory for hexdump\n");
5157 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5158 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5159 "\n", _dump_buf_data);
5160 if (!_dump_buf_dif) {
5163 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5164 if (_dump_buf_dif) {
5165 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5166 "9046 BLKGRD: allocated %d pages for "
5167 "_dump_buf_dif at 0x%p\n",
5168 (1 << pagecnt), _dump_buf_dif);
5169 _dump_buf_dif_order = pagecnt;
5170 memset(_dump_buf_dif, 0,
5171 ((1 << PAGE_SHIFT) << pagecnt));
5176 if (!_dump_buf_dif_order)
5177 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5178 "9047 BLKGRD: ERROR unable to allocate "
5179 "memory for hexdump\n");
5181 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5182 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5187 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5188 * @phba: pointer to lpfc hba data structure.
5190 * This routine is invoked to perform all the necessary post initialization
5191 * setup for the device.
5194 lpfc_post_init_setup(struct lpfc_hba *phba)
5196 struct Scsi_Host *shost;
5197 struct lpfc_adapter_event_header adapter_event;
5199 /* Get the default values for Model Name and Description */
5200 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5203 * hba setup may have changed the hba_queue_depth so we need to
5204 * adjust the value of can_queue.
5206 shost = pci_get_drvdata(phba->pcidev);
5207 shost->can_queue = phba->cfg_hba_queue_depth - 10;
5208 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5209 lpfc_setup_bg(phba, shost);
5211 lpfc_host_attrib_init(shost);
5213 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5214 spin_lock_irq(shost->host_lock);
5215 lpfc_poll_start_timer(phba);
5216 spin_unlock_irq(shost->host_lock);
5219 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5220 "0428 Perform SCSI scan\n");
5221 /* Send board arrival event to upper layer */
5222 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5223 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5224 fc_host_post_vendor_event(shost, fc_get_event_number(),
5225 sizeof(adapter_event),
5226 (char *) &adapter_event,
5232 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5233 * @phba: pointer to lpfc hba data structure.
5235 * This routine is invoked to set up the PCI device memory space for device
5236 * with SLI-3 interface spec.
5240 * other values - error
5243 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5245 struct pci_dev *pdev;
5246 unsigned long bar0map_len, bar2map_len;
5249 int error = -ENODEV;
5251 /* Obtain PCI device reference */
5255 pdev = phba->pcidev;
5257 /* Set the device DMA mask size */
5258 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5259 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5260 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5261 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5266 /* Get the bus address of Bar0 and Bar2 and the number of bytes
5267 * required by each mapping.
5269 phba->pci_bar0_map = pci_resource_start(pdev, 0);
5270 bar0map_len = pci_resource_len(pdev, 0);
5272 phba->pci_bar2_map = pci_resource_start(pdev, 2);
5273 bar2map_len = pci_resource_len(pdev, 2);
5275 /* Map HBA SLIM to a kernel virtual address. */
5276 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5277 if (!phba->slim_memmap_p) {
5278 dev_printk(KERN_ERR, &pdev->dev,
5279 "ioremap failed for SLIM memory.\n");
5283 /* Map HBA Control Registers to a kernel virtual address. */
5284 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5285 if (!phba->ctrl_regs_memmap_p) {
5286 dev_printk(KERN_ERR, &pdev->dev,
5287 "ioremap failed for HBA control registers.\n");
5288 goto out_iounmap_slim;
5291 /* Allocate memory for SLI-2 structures */
5292 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5296 if (!phba->slim2p.virt)
5299 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5300 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5301 phba->mbox_ext = (phba->slim2p.virt +
5302 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5303 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5304 phba->IOCBs = (phba->slim2p.virt +
5305 offsetof(struct lpfc_sli2_slim, IOCBs));
5307 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5308 lpfc_sli_hbq_size(),
5309 &phba->hbqslimp.phys,
5311 if (!phba->hbqslimp.virt)
5314 hbq_count = lpfc_sli_hbq_count();
5315 ptr = phba->hbqslimp.virt;
5316 for (i = 0; i < hbq_count; ++i) {
5317 phba->hbqs[i].hbq_virt = ptr;
5318 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5319 ptr += (lpfc_hbq_defs[i]->entry_count *
5320 sizeof(struct lpfc_hbq_entry));
5322 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5323 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5325 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5327 INIT_LIST_HEAD(&phba->rb_pend_list);
5329 phba->MBslimaddr = phba->slim_memmap_p;
5330 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5331 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5332 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5333 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5338 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5339 phba->slim2p.virt, phba->slim2p.phys);
5341 iounmap(phba->ctrl_regs_memmap_p);
5343 iounmap(phba->slim_memmap_p);
5349 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5350 * @phba: pointer to lpfc hba data structure.
5352 * This routine is invoked to unset the PCI device memory space for device
5353 * with SLI-3 interface spec.
5356 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5358 struct pci_dev *pdev;
5360 /* Obtain PCI device reference */
5364 pdev = phba->pcidev;
5366 /* Free coherent DMA memory allocated */
5367 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5368 phba->hbqslimp.virt, phba->hbqslimp.phys);
5369 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5370 phba->slim2p.virt, phba->slim2p.phys);
5372 /* I/O memory unmap */
5373 iounmap(phba->ctrl_regs_memmap_p);
5374 iounmap(phba->slim_memmap_p);
5380 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5381 * @phba: pointer to lpfc hba data structure.
5383 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5384 * done and check status.
5386 * Return 0 if successful, otherwise -ENODEV.
5389 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5391 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
5392 int i, port_error = -ENODEV;
5394 if (!phba->sli4_hba.STAregaddr)
5397 /* Wait up to 30 seconds for the SLI Port POST done and ready */
5398 for (i = 0; i < 3000; i++) {
5399 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
5400 /* Encounter fatal POST error, break out */
5401 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
5402 port_error = -ENODEV;
5405 if (LPFC_POST_STAGE_ARMFW_READY ==
5406 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
5414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5415 "1408 Failure HBA POST Status: sta_reg=0x%x, "
5416 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
5417 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
5418 bf_get(lpfc_hst_state_perr, &sta_reg),
5419 bf_get(lpfc_hst_state_sfi, &sta_reg),
5420 bf_get(lpfc_hst_state_nip, &sta_reg),
5421 bf_get(lpfc_hst_state_ipc, &sta_reg),
5422 bf_get(lpfc_hst_state_xrom, &sta_reg),
5423 bf_get(lpfc_hst_state_dl, &sta_reg),
5424 bf_get(lpfc_hst_state_port_status, &sta_reg));
5426 /* Log device information */
5427 phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
5428 if (bf_get(lpfc_sli_intf_valid,
5429 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
5430 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5431 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
5432 "IFType=0x%x, SLIHint_1=0x%x, SLIHint_2=0x%x, "
5434 bf_get(lpfc_sli_intf_sli_family,
5435 &phba->sli4_hba.sli_intf),
5436 bf_get(lpfc_sli_intf_slirev,
5437 &phba->sli4_hba.sli_intf),
5438 bf_get(lpfc_sli_intf_if_type,
5439 &phba->sli4_hba.sli_intf),
5440 bf_get(lpfc_sli_intf_sli_hint1,
5441 &phba->sli4_hba.sli_intf),
5442 bf_get(lpfc_sli_intf_sli_hint2,
5443 &phba->sli4_hba.sli_intf),
5444 bf_get(lpfc_sli_intf_func_type,
5445 &phba->sli4_hba.sli_intf));
5448 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
5449 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
5450 /* With uncoverable error, log the error message and return error */
5451 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
5452 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
5453 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5454 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5456 "1422 HBA Unrecoverable error: "
5457 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
5458 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
5459 uerrlo_reg.word0, uerrhi_reg.word0,
5460 phba->sli4_hba.ue_mask_lo,
5461 phba->sli4_hba.ue_mask_hi);
5469 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5470 * @phba: pointer to lpfc hba data structure.
5472 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5476 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
5478 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5479 LPFC_UERR_STATUS_LO;
5480 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5481 LPFC_UERR_STATUS_HI;
5482 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
5484 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
5486 phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
5491 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5492 * @phba: pointer to lpfc hba data structure.
5494 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5498 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5501 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5503 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5505 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5507 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5513 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5514 * @phba: pointer to lpfc hba data structure.
5515 * @vf: virtual function number
5517 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5518 * based on the given viftual function number, @vf.
5520 * Return 0 if successful, otherwise -ENODEV.
5523 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5525 if (vf > LPFC_VIR_FUNC_MAX)
5528 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5529 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5530 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5531 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5532 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5533 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5534 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5535 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5536 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5537 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5542 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5543 * @phba: pointer to lpfc hba data structure.
5545 * This routine is invoked to create the bootstrap mailbox
5546 * region consistent with the SLI-4 interface spec. This
5547 * routine allocates all memory necessary to communicate
5548 * mailbox commands to the port and sets up all alignment
5549 * needs. No locks are expected to be held when calling
5554 * -ENOMEM - could not allocated memory.
5557 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5560 struct lpfc_dmabuf *dmabuf;
5561 struct dma_address *dma_address;
5565 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5570 * The bootstrap mailbox region is comprised of 2 parts
5571 * plus an alignment restriction of 16 bytes.
5573 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5574 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5578 if (!dmabuf->virt) {
5582 memset(dmabuf->virt, 0, bmbx_size);
5585 * Initialize the bootstrap mailbox pointers now so that the register
5586 * operations are simple later. The mailbox dma address is required
5587 * to be 16-byte aligned. Also align the virtual memory as each
5588 * maibox is copied into the bmbx mailbox region before issuing the
5589 * command to the port.
5591 phba->sli4_hba.bmbx.dmabuf = dmabuf;
5592 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5594 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5595 LPFC_ALIGN_16_BYTE);
5596 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5597 LPFC_ALIGN_16_BYTE);
5600 * Set the high and low physical addresses now. The SLI4 alignment
5601 * requirement is 16 bytes and the mailbox is posted to the port
5602 * as two 30-bit addresses. The other data is a bit marking whether
5603 * the 30-bit address is the high or low address.
5604 * Upcast bmbx aphys to 64bits so shift instruction compiles
5605 * clean on 32 bit machines.
5607 dma_address = &phba->sli4_hba.bmbx.dma_address;
5608 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5609 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5610 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5611 LPFC_BMBX_BIT1_ADDR_HI);
5613 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5614 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5615 LPFC_BMBX_BIT1_ADDR_LO);
5620 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5621 * @phba: pointer to lpfc hba data structure.
5623 * This routine is invoked to teardown the bootstrap mailbox
5624 * region and release all host resources. This routine requires
5625 * the caller to ensure all mailbox commands recovered, no
5626 * additional mailbox comands are sent, and interrupts are disabled
5627 * before calling this routine.
5631 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5633 dma_free_coherent(&phba->pcidev->dev,
5634 phba->sli4_hba.bmbx.bmbx_size,
5635 phba->sli4_hba.bmbx.dmabuf->virt,
5636 phba->sli4_hba.bmbx.dmabuf->phys);
5638 kfree(phba->sli4_hba.bmbx.dmabuf);
5639 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5643 * lpfc_sli4_read_config - Get the config parameters.
5644 * @phba: pointer to lpfc hba data structure.
5646 * This routine is invoked to read the configuration parameters from the HBA.
5647 * The configuration parameters are used to set the base and maximum values
5648 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5649 * allocation for the port.
5653 * -ENOMEM - No availble memory
5654 * -EIO - The mailbox failed to complete successfully.
5657 lpfc_sli4_read_config(struct lpfc_hba *phba)
5660 struct lpfc_mbx_read_config *rd_config;
5663 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5665 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5666 "2011 Unable to allocate memory for issuing "
5667 "SLI_CONFIG_SPECIAL mailbox command\n");
5671 lpfc_read_config(phba, pmb);
5673 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5674 if (rc != MBX_SUCCESS) {
5675 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5676 "2012 Mailbox failed , mbxCmd x%x "
5677 "READ_CONFIG, mbxStatus x%x\n",
5678 bf_get(lpfc_mqe_command, &pmb->u.mqe),
5679 bf_get(lpfc_mqe_status, &pmb->u.mqe));
5682 rd_config = &pmb->u.mqe.un.rd_config;
5683 phba->sli4_hba.max_cfg_param.max_xri =
5684 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5685 phba->sli4_hba.max_cfg_param.xri_base =
5686 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5687 phba->sli4_hba.max_cfg_param.max_vpi =
5688 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5689 phba->sli4_hba.max_cfg_param.vpi_base =
5690 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5691 phba->sli4_hba.max_cfg_param.max_rpi =
5692 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5693 phba->sli4_hba.max_cfg_param.rpi_base =
5694 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5695 phba->sli4_hba.max_cfg_param.max_vfi =
5696 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5697 phba->sli4_hba.max_cfg_param.vfi_base =
5698 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5699 phba->sli4_hba.max_cfg_param.max_fcfi =
5700 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5701 phba->sli4_hba.max_cfg_param.fcfi_base =
5702 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5703 phba->sli4_hba.max_cfg_param.max_eq =
5704 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5705 phba->sli4_hba.max_cfg_param.max_rq =
5706 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5707 phba->sli4_hba.max_cfg_param.max_wq =
5708 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5709 phba->sli4_hba.max_cfg_param.max_cq =
5710 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5711 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5712 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5713 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5714 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5715 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5716 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5717 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5718 phba->max_vports = phba->max_vpi;
5719 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5720 "2003 cfg params XRI(B:%d M:%d), "
5724 "FCFI(B:%d M:%d)\n",
5725 phba->sli4_hba.max_cfg_param.xri_base,
5726 phba->sli4_hba.max_cfg_param.max_xri,
5727 phba->sli4_hba.max_cfg_param.vpi_base,
5728 phba->sli4_hba.max_cfg_param.max_vpi,
5729 phba->sli4_hba.max_cfg_param.vfi_base,
5730 phba->sli4_hba.max_cfg_param.max_vfi,
5731 phba->sli4_hba.max_cfg_param.rpi_base,
5732 phba->sli4_hba.max_cfg_param.max_rpi,
5733 phba->sli4_hba.max_cfg_param.fcfi_base,
5734 phba->sli4_hba.max_cfg_param.max_fcfi);
5736 mempool_free(pmb, phba->mbox_mem_pool);
5738 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
5739 if (phba->cfg_hba_queue_depth >
5740 (phba->sli4_hba.max_cfg_param.max_xri -
5741 lpfc_sli4_get_els_iocb_cnt(phba)))
5742 phba->cfg_hba_queue_depth =
5743 phba->sli4_hba.max_cfg_param.max_xri -
5744 lpfc_sli4_get_els_iocb_cnt(phba);
5749 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
5750 * @phba: pointer to lpfc hba data structure.
5752 * This routine is invoked to setup the host-side endian order to the
5753 * HBA consistent with the SLI-4 interface spec.
5757 * -ENOMEM - No availble memory
5758 * -EIO - The mailbox failed to complete successfully.
5761 lpfc_setup_endian_order(struct lpfc_hba *phba)
5763 LPFC_MBOXQ_t *mboxq;
5765 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5766 HOST_ENDIAN_HIGH_WORD1};
5768 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5771 "0492 Unable to allocate memory for issuing "
5772 "SLI_CONFIG_SPECIAL mailbox command\n");
5777 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
5778 * words to contain special data values and no other data.
5780 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5781 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5782 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5783 if (rc != MBX_SUCCESS) {
5784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5785 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
5791 mempool_free(mboxq, phba->mbox_mem_pool);
5796 * lpfc_sli4_queue_create - Create all the SLI4 queues
5797 * @phba: pointer to lpfc hba data structure.
5799 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5800 * operation. For each SLI4 queue type, the parameters such as queue entry
5801 * count (queue depth) shall be taken from the module parameter. For now,
5802 * we just use some constant number as place holder.
5806 * -ENOMEM - No availble memory
5807 * -EIO - The mailbox failed to complete successfully.
5810 lpfc_sli4_queue_create(struct lpfc_hba *phba)
5812 struct lpfc_queue *qdesc;
5813 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5814 int cfg_fcp_wq_count;
5815 int cfg_fcp_eq_count;
5818 * Sanity check for confiugred queue parameters against the run-time
5822 /* Sanity check on FCP fast-path WQ parameters */
5823 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5824 if (cfg_fcp_wq_count >
5825 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5826 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5828 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5830 "2581 Not enough WQs (%d) from "
5831 "the pci function for supporting "
5833 phba->sli4_hba.max_cfg_param.max_wq,
5834 phba->cfg_fcp_wq_count);
5837 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5838 "2582 Not enough WQs (%d) from the pci "
5839 "function for supporting the requested "
5840 "FCP WQs (%d), the actual FCP WQs can "
5841 "be supported: %d\n",
5842 phba->sli4_hba.max_cfg_param.max_wq,
5843 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5845 /* The actual number of FCP work queues adopted */
5846 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5848 /* Sanity check on FCP fast-path EQ parameters */
5849 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5850 if (cfg_fcp_eq_count >
5851 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5852 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5854 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5856 "2574 Not enough EQs (%d) from the "
5857 "pci function for supporting FCP "
5859 phba->sli4_hba.max_cfg_param.max_eq,
5860 phba->cfg_fcp_eq_count);
5863 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5864 "2575 Not enough EQs (%d) from the pci "
5865 "function for supporting the requested "
5866 "FCP EQs (%d), the actual FCP EQs can "
5867 "be supported: %d\n",
5868 phba->sli4_hba.max_cfg_param.max_eq,
5869 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5871 /* It does not make sense to have more EQs than WQs */
5872 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5873 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5874 "2593 The FCP EQ count(%d) cannot be greater "
5875 "than the FCP WQ count(%d), limiting the "
5876 "FCP EQ count to %d\n", cfg_fcp_eq_count,
5877 phba->cfg_fcp_wq_count,
5878 phba->cfg_fcp_wq_count);
5879 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5881 /* The actual number of FCP event queues adopted */
5882 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5883 /* The overall number of event queues used */
5884 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5887 * Create Event Queues (EQs)
5890 /* Get EQ depth from module parameter, fake the default for now */
5891 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5892 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5894 /* Create slow path event queue */
5895 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5896 phba->sli4_hba.eq_ecount);
5898 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5899 "0496 Failed allocate slow-path EQ\n");
5902 phba->sli4_hba.sp_eq = qdesc;
5904 /* Create fast-path FCP Event Queue(s) */
5905 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
5906 phba->cfg_fcp_eq_count), GFP_KERNEL);
5907 if (!phba->sli4_hba.fp_eq) {
5908 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5909 "2576 Failed allocate memory for fast-path "
5910 "EQ record array\n");
5911 goto out_free_sp_eq;
5913 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5914 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5915 phba->sli4_hba.eq_ecount);
5917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5918 "0497 Failed allocate fast-path EQ\n");
5919 goto out_free_fp_eq;
5921 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5925 * Create Complete Queues (CQs)
5928 /* Get CQ depth from module parameter, fake the default for now */
5929 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5930 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5932 /* Create slow-path Mailbox Command Complete Queue */
5933 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5934 phba->sli4_hba.cq_ecount);
5936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5937 "0500 Failed allocate slow-path mailbox CQ\n");
5938 goto out_free_fp_eq;
5940 phba->sli4_hba.mbx_cq = qdesc;
5942 /* Create slow-path ELS Complete Queue */
5943 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5944 phba->sli4_hba.cq_ecount);
5946 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5947 "0501 Failed allocate slow-path ELS CQ\n");
5948 goto out_free_mbx_cq;
5950 phba->sli4_hba.els_cq = qdesc;
5953 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5954 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5955 phba->cfg_fcp_eq_count), GFP_KERNEL);
5956 if (!phba->sli4_hba.fcp_cq) {
5957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5958 "2577 Failed allocate memory for fast-path "
5959 "CQ record array\n");
5960 goto out_free_els_cq;
5962 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5963 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5964 phba->sli4_hba.cq_ecount);
5966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5967 "0499 Failed allocate fast-path FCP "
5968 "CQ (%d)\n", fcp_cqidx);
5969 goto out_free_fcp_cq;
5971 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5974 /* Create Mailbox Command Queue */
5975 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5976 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5978 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5979 phba->sli4_hba.mq_ecount);
5981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5982 "0505 Failed allocate slow-path MQ\n");
5983 goto out_free_fcp_cq;
5985 phba->sli4_hba.mbx_wq = qdesc;
5988 * Create all the Work Queues (WQs)
5990 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5991 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5993 /* Create slow-path ELS Work Queue */
5994 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5995 phba->sli4_hba.wq_ecount);
5997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5998 "0504 Failed allocate slow-path ELS WQ\n");
5999 goto out_free_mbx_wq;
6001 phba->sli4_hba.els_wq = qdesc;
6003 /* Create fast-path FCP Work Queue(s) */
6004 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6005 phba->cfg_fcp_wq_count), GFP_KERNEL);
6006 if (!phba->sli4_hba.fcp_wq) {
6007 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6008 "2578 Failed allocate memory for fast-path "
6009 "WQ record array\n");
6010 goto out_free_els_wq;
6012 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6013 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6014 phba->sli4_hba.wq_ecount);
6016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6017 "0503 Failed allocate fast-path FCP "
6018 "WQ (%d)\n", fcp_wqidx);
6019 goto out_free_fcp_wq;
6021 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6025 * Create Receive Queue (RQ)
6027 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6028 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6030 /* Create Receive Queue for header */
6031 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6032 phba->sli4_hba.rq_ecount);
6034 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6035 "0506 Failed allocate receive HRQ\n");
6036 goto out_free_fcp_wq;
6038 phba->sli4_hba.hdr_rq = qdesc;
6040 /* Create Receive Queue for data */
6041 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6042 phba->sli4_hba.rq_ecount);
6044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6045 "0507 Failed allocate receive DRQ\n");
6046 goto out_free_hdr_rq;
6048 phba->sli4_hba.dat_rq = qdesc;
6053 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6054 phba->sli4_hba.hdr_rq = NULL;
6056 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6057 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6058 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6060 kfree(phba->sli4_hba.fcp_wq);
6062 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6063 phba->sli4_hba.els_wq = NULL;
6065 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6066 phba->sli4_hba.mbx_wq = NULL;
6068 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6069 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6070 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6072 kfree(phba->sli4_hba.fcp_cq);
6074 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6075 phba->sli4_hba.els_cq = NULL;
6077 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6078 phba->sli4_hba.mbx_cq = NULL;
6080 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6081 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6082 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6084 kfree(phba->sli4_hba.fp_eq);
6086 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6087 phba->sli4_hba.sp_eq = NULL;
6093 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6094 * @phba: pointer to lpfc hba data structure.
6096 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6101 * -ENOMEM - No availble memory
6102 * -EIO - The mailbox failed to complete successfully.
6105 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6109 /* Release mailbox command work queue */
6110 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6111 phba->sli4_hba.mbx_wq = NULL;
6113 /* Release ELS work queue */
6114 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6115 phba->sli4_hba.els_wq = NULL;
6117 /* Release FCP work queue */
6118 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6119 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6120 kfree(phba->sli4_hba.fcp_wq);
6121 phba->sli4_hba.fcp_wq = NULL;
6123 /* Release unsolicited receive queue */
6124 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6125 phba->sli4_hba.hdr_rq = NULL;
6126 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6127 phba->sli4_hba.dat_rq = NULL;
6129 /* Release ELS complete queue */
6130 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6131 phba->sli4_hba.els_cq = NULL;
6133 /* Release mailbox command complete queue */
6134 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6135 phba->sli4_hba.mbx_cq = NULL;
6137 /* Release FCP response complete queue */
6138 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6139 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6140 kfree(phba->sli4_hba.fcp_cq);
6141 phba->sli4_hba.fcp_cq = NULL;
6143 /* Release fast-path event queue */
6144 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6145 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6146 kfree(phba->sli4_hba.fp_eq);
6147 phba->sli4_hba.fp_eq = NULL;
6149 /* Release slow-path event queue */
6150 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6151 phba->sli4_hba.sp_eq = NULL;
6157 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
6158 * @phba: pointer to lpfc hba data structure.
6160 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
6165 * -ENOMEM - No availble memory
6166 * -EIO - The mailbox failed to complete successfully.
6169 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6172 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6173 int fcp_cq_index = 0;
6176 * Set up Event Queues (EQs)
6179 /* Set up slow-path event queue */
6180 if (!phba->sli4_hba.sp_eq) {
6181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6182 "0520 Slow-path EQ not allocated\n");
6185 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6188 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6189 "0521 Failed setup of slow-path EQ: "
6193 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6194 "2583 Slow-path EQ setup: queue-id=%d\n",
6195 phba->sli4_hba.sp_eq->queue_id);
6197 /* Set up fast-path event queue */
6198 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6199 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6201 "0522 Fast-path EQ (%d) not "
6202 "allocated\n", fcp_eqidx);
6203 goto out_destroy_fp_eq;
6205 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6206 phba->cfg_fcp_imax);
6208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6209 "0523 Failed setup of fast-path EQ "
6210 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
6211 goto out_destroy_fp_eq;
6213 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6214 "2584 Fast-path EQ setup: "
6215 "queue[%d]-id=%d\n", fcp_eqidx,
6216 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6220 * Set up Complete Queues (CQs)
6223 /* Set up slow-path MBOX Complete Queue as the first CQ */
6224 if (!phba->sli4_hba.mbx_cq) {
6225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6226 "0528 Mailbox CQ not allocated\n");
6227 goto out_destroy_fp_eq;
6229 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6230 LPFC_MCQ, LPFC_MBOX);
6232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6233 "0529 Failed setup of slow-path mailbox CQ: "
6235 goto out_destroy_fp_eq;
6237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6238 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6239 phba->sli4_hba.mbx_cq->queue_id,
6240 phba->sli4_hba.sp_eq->queue_id);
6242 /* Set up slow-path ELS Complete Queue */
6243 if (!phba->sli4_hba.els_cq) {
6244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6245 "0530 ELS CQ not allocated\n");
6246 goto out_destroy_mbx_cq;
6248 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6249 LPFC_WCQ, LPFC_ELS);
6251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6252 "0531 Failed setup of slow-path ELS CQ: "
6254 goto out_destroy_mbx_cq;
6256 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6257 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6258 phba->sli4_hba.els_cq->queue_id,
6259 phba->sli4_hba.sp_eq->queue_id);
6261 /* Set up fast-path FCP Response Complete Queue */
6262 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6263 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6265 "0526 Fast-path FCP CQ (%d) not "
6266 "allocated\n", fcp_cqidx);
6267 goto out_destroy_fcp_cq;
6269 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6270 phba->sli4_hba.fp_eq[fcp_cqidx],
6271 LPFC_WCQ, LPFC_FCP);
6273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6274 "0527 Failed setup of fast-path FCP "
6275 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6276 goto out_destroy_fcp_cq;
6278 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6279 "2588 FCP CQ setup: cq[%d]-id=%d, "
6280 "parent eq[%d]-id=%d\n",
6282 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6284 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
6288 * Set up all the Work Queues (WQs)
6291 /* Set up Mailbox Command Queue */
6292 if (!phba->sli4_hba.mbx_wq) {
6293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6294 "0538 Slow-path MQ not allocated\n");
6295 goto out_destroy_fcp_cq;
6297 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6298 phba->sli4_hba.mbx_cq, LPFC_MBOX);
6300 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6301 "0539 Failed setup of slow-path MQ: "
6303 goto out_destroy_fcp_cq;
6305 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6306 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6307 phba->sli4_hba.mbx_wq->queue_id,
6308 phba->sli4_hba.mbx_cq->queue_id);
6310 /* Set up slow-path ELS Work Queue */
6311 if (!phba->sli4_hba.els_wq) {
6312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6313 "0536 Slow-path ELS WQ not allocated\n");
6314 goto out_destroy_mbx_wq;
6316 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6317 phba->sli4_hba.els_cq, LPFC_ELS);
6319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6320 "0537 Failed setup of slow-path ELS WQ: "
6322 goto out_destroy_mbx_wq;
6324 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6325 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6326 phba->sli4_hba.els_wq->queue_id,
6327 phba->sli4_hba.els_cq->queue_id);
6329 /* Set up fast-path FCP Work Queue */
6330 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6331 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6333 "0534 Fast-path FCP WQ (%d) not "
6334 "allocated\n", fcp_wqidx);
6335 goto out_destroy_fcp_wq;
6337 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6338 phba->sli4_hba.fcp_cq[fcp_cq_index],
6341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6342 "0535 Failed setup of fast-path FCP "
6343 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6344 goto out_destroy_fcp_wq;
6346 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6347 "2591 FCP WQ setup: wq[%d]-id=%d, "
6348 "parent cq[%d]-id=%d\n",
6350 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6352 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6353 /* Round robin FCP Work Queue's Completion Queue assignment */
6354 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
6358 * Create Receive Queue (RQ)
6360 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6362 "0540 Receive Queue not allocated\n");
6363 goto out_destroy_fcp_wq;
6365 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6366 phba->sli4_hba.els_cq, LPFC_USOL);
6368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6369 "0541 Failed setup of Receive Queue: "
6371 goto out_destroy_fcp_wq;
6373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6374 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6375 "parent cq-id=%d\n",
6376 phba->sli4_hba.hdr_rq->queue_id,
6377 phba->sli4_hba.dat_rq->queue_id,
6378 phba->sli4_hba.els_cq->queue_id);
6382 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6383 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6384 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6386 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6388 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6389 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6390 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6392 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6394 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6395 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6396 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6402 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6403 * @phba: pointer to lpfc hba data structure.
6405 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6410 * -ENOMEM - No availble memory
6411 * -EIO - The mailbox failed to complete successfully.
6414 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6418 /* Unset mailbox command work queue */
6419 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6420 /* Unset ELS work queue */
6421 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6422 /* Unset unsolicited receive queue */
6423 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6424 /* Unset FCP work queue */
6425 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6426 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6427 /* Unset mailbox command complete queue */
6428 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6429 /* Unset ELS complete queue */
6430 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6431 /* Unset FCP response complete queue */
6432 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6433 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6434 /* Unset fast-path event queue */
6435 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6436 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6437 /* Unset slow-path event queue */
6438 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6442 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6443 * @phba: pointer to lpfc hba data structure.
6445 * This routine is invoked to allocate and set up a pool of completion queue
6446 * events. The body of the completion queue event is a completion queue entry
6447 * CQE. For now, this pool is used for the interrupt service routine to queue
6448 * the following HBA completion queue events for the worker thread to process:
6449 * - Mailbox asynchronous events
6450 * - Receive queue completion unsolicited events
6451 * Later, this can be used for all the slow-path events.
6455 * -ENOMEM - No availble memory
6458 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6460 struct lpfc_cq_event *cq_event;
6463 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6464 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6466 goto out_pool_create_fail;
6467 list_add_tail(&cq_event->list,
6468 &phba->sli4_hba.sp_cqe_event_pool);
6472 out_pool_create_fail:
6473 lpfc_sli4_cq_event_pool_destroy(phba);
6478 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6479 * @phba: pointer to lpfc hba data structure.
6481 * This routine is invoked to free the pool of completion queue events at
6482 * driver unload time. Note that, it is the responsibility of the driver
6483 * cleanup routine to free all the outstanding completion-queue events
6484 * allocated from this pool back into the pool before invoking this routine
6485 * to destroy the pool.
6488 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6490 struct lpfc_cq_event *cq_event, *next_cq_event;
6492 list_for_each_entry_safe(cq_event, next_cq_event,
6493 &phba->sli4_hba.sp_cqe_event_pool, list) {
6494 list_del(&cq_event->list);
6500 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6501 * @phba: pointer to lpfc hba data structure.
6503 * This routine is the lock free version of the API invoked to allocate a
6504 * completion-queue event from the free pool.
6506 * Return: Pointer to the newly allocated completion-queue event if successful
6509 struct lpfc_cq_event *
6510 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6512 struct lpfc_cq_event *cq_event = NULL;
6514 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6515 struct lpfc_cq_event, list);
6520 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6521 * @phba: pointer to lpfc hba data structure.
6523 * This routine is the lock version of the API invoked to allocate a
6524 * completion-queue event from the free pool.
6526 * Return: Pointer to the newly allocated completion-queue event if successful
6529 struct lpfc_cq_event *
6530 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6532 struct lpfc_cq_event *cq_event;
6533 unsigned long iflags;
6535 spin_lock_irqsave(&phba->hbalock, iflags);
6536 cq_event = __lpfc_sli4_cq_event_alloc(phba);
6537 spin_unlock_irqrestore(&phba->hbalock, iflags);
6542 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6543 * @phba: pointer to lpfc hba data structure.
6544 * @cq_event: pointer to the completion queue event to be freed.
6546 * This routine is the lock free version of the API invoked to release a
6547 * completion-queue event back into the free pool.
6550 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6551 struct lpfc_cq_event *cq_event)
6553 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6557 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6558 * @phba: pointer to lpfc hba data structure.
6559 * @cq_event: pointer to the completion queue event to be freed.
6561 * This routine is the lock version of the API invoked to release a
6562 * completion-queue event back into the free pool.
6565 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6566 struct lpfc_cq_event *cq_event)
6568 unsigned long iflags;
6569 spin_lock_irqsave(&phba->hbalock, iflags);
6570 __lpfc_sli4_cq_event_release(phba, cq_event);
6571 spin_unlock_irqrestore(&phba->hbalock, iflags);
6575 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6576 * @phba: pointer to lpfc hba data structure.
6578 * This routine is to free all the pending completion-queue events to the
6579 * back into the free pool for device reset.
6582 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6585 struct lpfc_cq_event *cqe;
6586 unsigned long iflags;
6588 /* Retrieve all the pending WCQEs from pending WCQE lists */
6589 spin_lock_irqsave(&phba->hbalock, iflags);
6590 /* Pending FCP XRI abort events */
6591 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6593 /* Pending ELS XRI abort events */
6594 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6596 /* Pending asynnc events */
6597 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6599 spin_unlock_irqrestore(&phba->hbalock, iflags);
6601 while (!list_empty(&cqelist)) {
6602 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6603 lpfc_sli4_cq_event_release(phba, cqe);
6608 * lpfc_pci_function_reset - Reset pci function.
6609 * @phba: pointer to lpfc hba data structure.
6611 * This routine is invoked to request a PCI function reset. It will destroys
6612 * all resources assigned to the PCI function which originates this request.
6616 * -ENOMEM - No availble memory
6617 * -EIO - The mailbox failed to complete successfully.
6620 lpfc_pci_function_reset(struct lpfc_hba *phba)
6622 LPFC_MBOXQ_t *mboxq;
6624 uint32_t shdr_status, shdr_add_status;
6625 union lpfc_sli4_cfg_shdr *shdr;
6627 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6629 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6630 "0494 Unable to allocate memory for issuing "
6631 "SLI_FUNCTION_RESET mailbox command\n");
6635 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
6636 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6637 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6638 LPFC_SLI4_MBX_EMBED);
6639 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6640 shdr = (union lpfc_sli4_cfg_shdr *)
6641 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6642 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6643 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6644 if (rc != MBX_TIMEOUT)
6645 mempool_free(mboxq, phba->mbox_mem_pool);
6646 if (shdr_status || shdr_add_status || rc) {
6647 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6648 "0495 SLI_FUNCTION_RESET mailbox failed with "
6649 "status x%x add_status x%x, mbx status x%x\n",
6650 shdr_status, shdr_add_status, rc);
6657 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
6658 * @phba: pointer to lpfc hba data structure.
6659 * @cnt: number of nop mailbox commands to send.
6661 * This routine is invoked to send a number @cnt of NOP mailbox command and
6662 * wait for each command to complete.
6664 * Return: the number of NOP mailbox command completed.
6667 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6669 LPFC_MBOXQ_t *mboxq;
6670 int length, cmdsent;
6673 uint32_t shdr_status, shdr_add_status;
6674 union lpfc_sli4_cfg_shdr *shdr;
6677 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6678 "2518 Requested to send 0 NOP mailbox cmd\n");
6682 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6684 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6685 "2519 Unable to allocate memory for issuing "
6686 "NOP mailbox command\n");
6690 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
6691 length = (sizeof(struct lpfc_mbx_nop) -
6692 sizeof(struct lpfc_sli4_cfg_mhdr));
6693 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6694 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
6696 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6697 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
6698 if (!phba->sli4_hba.intr_enable)
6699 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6701 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6702 if (rc == MBX_TIMEOUT)
6704 /* Check return status */
6705 shdr = (union lpfc_sli4_cfg_shdr *)
6706 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6707 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6708 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6710 if (shdr_status || shdr_add_status || rc) {
6711 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6712 "2520 NOP mailbox command failed "
6713 "status x%x add_status x%x mbx "
6714 "status x%x\n", shdr_status,
6715 shdr_add_status, rc);
6720 if (rc != MBX_TIMEOUT)
6721 mempool_free(mboxq, phba->mbox_mem_pool);
6727 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6728 * @phba: pointer to lpfc hba data structure.
6730 * This routine is invoked to set up the PCI device memory space for device
6731 * with SLI-4 interface spec.
6735 * other values - error
6738 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6740 struct pci_dev *pdev;
6741 unsigned long bar0map_len, bar1map_len, bar2map_len;
6742 int error = -ENODEV;
6744 /* Obtain PCI device reference */
6748 pdev = phba->pcidev;
6750 /* Set the device DMA mask size */
6751 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6752 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6753 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6754 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6759 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6760 * number of bytes required by each mapping. They are actually
6761 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
6763 if (pci_resource_start(pdev, 0)) {
6764 phba->pci_bar0_map = pci_resource_start(pdev, 0);
6765 bar0map_len = pci_resource_len(pdev, 0);
6767 phba->pci_bar0_map = pci_resource_start(pdev, 1);
6768 bar0map_len = pci_resource_len(pdev, 1);
6770 phba->pci_bar1_map = pci_resource_start(pdev, 2);
6771 bar1map_len = pci_resource_len(pdev, 2);
6773 phba->pci_bar2_map = pci_resource_start(pdev, 4);
6774 bar2map_len = pci_resource_len(pdev, 4);
6776 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
6777 phba->sli4_hba.conf_regs_memmap_p =
6778 ioremap(phba->pci_bar0_map, bar0map_len);
6779 if (!phba->sli4_hba.conf_regs_memmap_p) {
6780 dev_printk(KERN_ERR, &pdev->dev,
6781 "ioremap failed for SLI4 PCI config registers.\n");
6785 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
6786 phba->sli4_hba.ctrl_regs_memmap_p =
6787 ioremap(phba->pci_bar1_map, bar1map_len);
6788 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
6789 dev_printk(KERN_ERR, &pdev->dev,
6790 "ioremap failed for SLI4 HBA control registers.\n");
6791 goto out_iounmap_conf;
6794 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
6795 phba->sli4_hba.drbl_regs_memmap_p =
6796 ioremap(phba->pci_bar2_map, bar2map_len);
6797 if (!phba->sli4_hba.drbl_regs_memmap_p) {
6798 dev_printk(KERN_ERR, &pdev->dev,
6799 "ioremap failed for SLI4 HBA doorbell registers.\n");
6800 goto out_iounmap_ctrl;
6803 /* Set up BAR0 PCI config space register memory map */
6804 lpfc_sli4_bar0_register_memmap(phba);
6806 /* Set up BAR1 register memory map */
6807 lpfc_sli4_bar1_register_memmap(phba);
6809 /* Set up BAR2 register memory map */
6810 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
6812 goto out_iounmap_all;
6817 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6819 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6821 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6827 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
6828 * @phba: pointer to lpfc hba data structure.
6830 * This routine is invoked to unset the PCI device memory space for device
6831 * with SLI-4 interface spec.
6834 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
6836 struct pci_dev *pdev;
6838 /* Obtain PCI device reference */
6842 pdev = phba->pcidev;
6844 /* Free coherent DMA memory allocated */
6846 /* Unmap I/O memory space */
6847 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6848 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6849 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6855 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6856 * @phba: pointer to lpfc hba data structure.
6858 * This routine is invoked to enable the MSI-X interrupt vectors to device
6859 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6860 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6861 * invoked, enables either all or nothing, depending on the current
6862 * availability of PCI vector resources. The device driver is responsible
6863 * for calling the individual request_irq() to register each MSI-X vector
6864 * with a interrupt handler, which is done in this function. Note that
6865 * later when device is unloading, the driver should always call free_irq()
6866 * on all MSI-X vectors it has done request_irq() on before calling
6867 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
6868 * will be left with MSI-X enabled and leaks its vectors.
6872 * other values - error
6875 lpfc_sli_enable_msix(struct lpfc_hba *phba)
6880 /* Set up MSI-X multi-message vectors */
6881 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6882 phba->msix_entries[i].entry = i;
6884 /* Configure MSI-X capability structure */
6885 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
6886 ARRAY_SIZE(phba->msix_entries));
6888 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6889 "0420 PCI enable MSI-X failed (%d)\n", rc);
6892 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6893 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6894 "0477 MSI-X entry[%d]: vector=x%x "
6896 phba->msix_entries[i].vector,
6897 phba->msix_entries[i].entry);
6899 * Assign MSI-X vectors to interrupt handlers
6902 /* vector-0 is associated to slow-path handler */
6903 rc = request_irq(phba->msix_entries[0].vector,
6904 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6905 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6907 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6908 "0421 MSI-X slow-path request_irq failed "
6913 /* vector-1 is associated to fast-path handler */
6914 rc = request_irq(phba->msix_entries[1].vector,
6915 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6916 LPFC_FP_DRIVER_HANDLER_NAME, phba);
6919 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6920 "0429 MSI-X fast-path request_irq failed "
6926 * Configure HBA MSI-X attention conditions to messages
6928 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6933 "0474 Unable to allocate memory for issuing "
6934 "MBOX_CONFIG_MSI command\n");
6937 rc = lpfc_config_msi(phba, pmb);
6940 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6941 if (rc != MBX_SUCCESS) {
6942 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6943 "0351 Config MSI mailbox command failed, "
6944 "mbxCmd x%x, mbxStatus x%x\n",
6945 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
6949 /* Free memory allocated for mailbox command */
6950 mempool_free(pmb, phba->mbox_mem_pool);
6954 /* Free memory allocated for mailbox command */
6955 mempool_free(pmb, phba->mbox_mem_pool);
6958 /* free the irq already requested */
6959 free_irq(phba->msix_entries[1].vector, phba);
6962 /* free the irq already requested */
6963 free_irq(phba->msix_entries[0].vector, phba);
6966 /* Unconfigure MSI-X capability structure */
6967 pci_disable_msix(phba->pcidev);
6972 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
6973 * @phba: pointer to lpfc hba data structure.
6975 * This routine is invoked to release the MSI-X vectors and then disable the
6976 * MSI-X interrupt mode to device with SLI-3 interface spec.
6979 lpfc_sli_disable_msix(struct lpfc_hba *phba)
6983 /* Free up MSI-X multi-message vectors */
6984 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
6985 free_irq(phba->msix_entries[i].vector, phba);
6987 pci_disable_msix(phba->pcidev);
6993 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
6994 * @phba: pointer to lpfc hba data structure.
6996 * This routine is invoked to enable the MSI interrupt mode to device with
6997 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
6998 * enable the MSI vector. The device driver is responsible for calling the
6999 * request_irq() to register MSI vector with a interrupt the handler, which
7000 * is done in this function.
7004 * other values - error
7007 lpfc_sli_enable_msi(struct lpfc_hba *phba)
7011 rc = pci_enable_msi(phba->pcidev);
7013 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7014 "0462 PCI enable MSI mode success.\n");
7016 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7017 "0471 PCI enable MSI mode failed (%d)\n", rc);
7021 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7022 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7024 pci_disable_msi(phba->pcidev);
7025 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7026 "0478 MSI request_irq failed (%d)\n", rc);
7032 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
7033 * @phba: pointer to lpfc hba data structure.
7035 * This routine is invoked to disable the MSI interrupt mode to device with
7036 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
7037 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7038 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7042 lpfc_sli_disable_msi(struct lpfc_hba *phba)
7044 free_irq(phba->pcidev->irq, phba);
7045 pci_disable_msi(phba->pcidev);
7050 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
7051 * @phba: pointer to lpfc hba data structure.
7053 * This routine is invoked to enable device interrupt and associate driver's
7054 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
7055 * spec. Depends on the interrupt mode configured to the driver, the driver
7056 * will try to fallback from the configured interrupt mode to an interrupt
7057 * mode which is supported by the platform, kernel, and device in the order
7059 * MSI-X -> MSI -> IRQ.
7063 * other values - error
7066 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7068 uint32_t intr_mode = LPFC_INTR_ERROR;
7071 if (cfg_mode == 2) {
7072 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
7073 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
7075 /* Now, try to enable MSI-X interrupt mode */
7076 retval = lpfc_sli_enable_msix(phba);
7078 /* Indicate initialization to MSI-X mode */
7079 phba->intr_type = MSIX;
7085 /* Fallback to MSI if MSI-X initialization failed */
7086 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7087 retval = lpfc_sli_enable_msi(phba);
7089 /* Indicate initialization to MSI mode */
7090 phba->intr_type = MSI;
7095 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7096 if (phba->intr_type == NONE) {
7097 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7098 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7100 /* Indicate initialization to INTx mode */
7101 phba->intr_type = INTx;
7109 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
7110 * @phba: pointer to lpfc hba data structure.
7112 * This routine is invoked to disable device interrupt and disassociate the
7113 * driver's interrupt handler(s) from interrupt vector(s) to device with
7114 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
7115 * release the interrupt vector(s) for the message signaled interrupt.
7118 lpfc_sli_disable_intr(struct lpfc_hba *phba)
7120 /* Disable the currently initialized interrupt mode */
7121 if (phba->intr_type == MSIX)
7122 lpfc_sli_disable_msix(phba);
7123 else if (phba->intr_type == MSI)
7124 lpfc_sli_disable_msi(phba);
7125 else if (phba->intr_type == INTx)
7126 free_irq(phba->pcidev->irq, phba);
7128 /* Reset interrupt management states */
7129 phba->intr_type = NONE;
7130 phba->sli.slistat.sli_intr = 0;
7136 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7137 * @phba: pointer to lpfc hba data structure.
7139 * This routine is invoked to enable the MSI-X interrupt vectors to device
7140 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7141 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7142 * enables either all or nothing, depending on the current availability of
7143 * PCI vector resources. The device driver is responsible for calling the
7144 * individual request_irq() to register each MSI-X vector with a interrupt
7145 * handler, which is done in this function. Note that later when device is
7146 * unloading, the driver should always call free_irq() on all MSI-X vectors
7147 * it has done request_irq() on before calling pci_disable_msix(). Failure
7148 * to do so results in a BUG_ON() and a device will be left with MSI-X
7149 * enabled and leaks its vectors.
7153 * other values - error
7156 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7158 int vectors, rc, index;
7160 /* Set up MSI-X multi-message vectors */
7161 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7162 phba->sli4_hba.msix_entries[index].entry = index;
7164 /* Configure MSI-X capability structure */
7165 vectors = phba->sli4_hba.cfg_eqn;
7166 enable_msix_vectors:
7167 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7171 goto enable_msix_vectors;
7173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7174 "0484 PCI enable MSI-X failed (%d)\n", rc);
7178 /* Log MSI-X vector assignment */
7179 for (index = 0; index < vectors; index++)
7180 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7181 "0489 MSI-X entry[%d]: vector=x%x "
7182 "message=%d\n", index,
7183 phba->sli4_hba.msix_entries[index].vector,
7184 phba->sli4_hba.msix_entries[index].entry);
7186 * Assign MSI-X vectors to interrupt handlers
7189 /* The first vector must associated to slow-path handler for MQ */
7190 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7191 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7192 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7194 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7195 "0485 MSI-X slow-path request_irq failed "
7200 /* The rest of the vector(s) are associated to fast-path handler(s) */
7201 for (index = 1; index < vectors; index++) {
7202 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7203 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7204 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7205 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7206 LPFC_FP_DRIVER_HANDLER_NAME,
7207 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7209 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7210 "0486 MSI-X fast-path (%d) "
7211 "request_irq failed (%d)\n", index, rc);
7215 phba->sli4_hba.msix_vec_nr = vectors;
7220 /* free the irq already requested */
7221 for (--index; index >= 1; index--)
7222 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7223 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7225 /* free the irq already requested */
7226 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7229 /* Unconfigure MSI-X capability structure */
7230 pci_disable_msix(phba->pcidev);
7235 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7236 * @phba: pointer to lpfc hba data structure.
7238 * This routine is invoked to release the MSI-X vectors and then disable the
7239 * MSI-X interrupt mode to device with SLI-4 interface spec.
7242 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7246 /* Free up MSI-X multi-message vectors */
7247 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7249 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7250 free_irq(phba->sli4_hba.msix_entries[index].vector,
7251 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7254 pci_disable_msix(phba->pcidev);
7260 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7261 * @phba: pointer to lpfc hba data structure.
7263 * This routine is invoked to enable the MSI interrupt mode to device with
7264 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7265 * to enable the MSI vector. The device driver is responsible for calling
7266 * the request_irq() to register MSI vector with a interrupt the handler,
7267 * which is done in this function.
7271 * other values - error
7274 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7278 rc = pci_enable_msi(phba->pcidev);
7280 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7281 "0487 PCI enable MSI mode success.\n");
7283 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7284 "0488 PCI enable MSI mode failed (%d)\n", rc);
7288 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7289 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7291 pci_disable_msi(phba->pcidev);
7292 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7293 "0490 MSI request_irq failed (%d)\n", rc);
7297 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7298 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7299 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7306 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7307 * @phba: pointer to lpfc hba data structure.
7309 * This routine is invoked to disable the MSI interrupt mode to device with
7310 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7311 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7312 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7316 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7318 free_irq(phba->pcidev->irq, phba);
7319 pci_disable_msi(phba->pcidev);
7324 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7325 * @phba: pointer to lpfc hba data structure.
7327 * This routine is invoked to enable device interrupt and associate driver's
7328 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7329 * interface spec. Depends on the interrupt mode configured to the driver,
7330 * the driver will try to fallback from the configured interrupt mode to an
7331 * interrupt mode which is supported by the platform, kernel, and device in
7333 * MSI-X -> MSI -> IRQ.
7337 * other values - error
7340 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7342 uint32_t intr_mode = LPFC_INTR_ERROR;
7345 if (cfg_mode == 2) {
7346 /* Preparation before conf_msi mbox cmd */
7349 /* Now, try to enable MSI-X interrupt mode */
7350 retval = lpfc_sli4_enable_msix(phba);
7352 /* Indicate initialization to MSI-X mode */
7353 phba->intr_type = MSIX;
7359 /* Fallback to MSI if MSI-X initialization failed */
7360 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7361 retval = lpfc_sli4_enable_msi(phba);
7363 /* Indicate initialization to MSI mode */
7364 phba->intr_type = MSI;
7369 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7370 if (phba->intr_type == NONE) {
7371 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7372 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7374 /* Indicate initialization to INTx mode */
7375 phba->intr_type = INTx;
7377 for (index = 0; index < phba->cfg_fcp_eq_count;
7379 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7380 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7388 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7389 * @phba: pointer to lpfc hba data structure.
7391 * This routine is invoked to disable device interrupt and disassociate
7392 * the driver's interrupt handler(s) from interrupt vector(s) to device
7393 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7394 * will release the interrupt vector(s) for the message signaled interrupt.
7397 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7399 /* Disable the currently initialized interrupt mode */
7400 if (phba->intr_type == MSIX)
7401 lpfc_sli4_disable_msix(phba);
7402 else if (phba->intr_type == MSI)
7403 lpfc_sli4_disable_msi(phba);
7404 else if (phba->intr_type == INTx)
7405 free_irq(phba->pcidev->irq, phba);
7407 /* Reset interrupt management states */
7408 phba->intr_type = NONE;
7409 phba->sli.slistat.sli_intr = 0;
7415 * lpfc_unset_hba - Unset SLI3 hba device initialization
7416 * @phba: pointer to lpfc hba data structure.
7418 * This routine is invoked to unset the HBA device initialization steps to
7419 * a device with SLI-3 interface spec.
7422 lpfc_unset_hba(struct lpfc_hba *phba)
7424 struct lpfc_vport *vport = phba->pport;
7425 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7427 spin_lock_irq(shost->host_lock);
7428 vport->load_flag |= FC_UNLOADING;
7429 spin_unlock_irq(shost->host_lock);
7431 lpfc_stop_hba_timers(phba);
7433 phba->pport->work_port_events = 0;
7435 lpfc_sli_hba_down(phba);
7437 lpfc_sli_brdrestart(phba);
7439 lpfc_sli_disable_intr(phba);
7445 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7446 * @phba: pointer to lpfc hba data structure.
7448 * This routine is invoked to unset the HBA device initialization steps to
7449 * a device with SLI-4 interface spec.
7452 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7454 struct lpfc_vport *vport = phba->pport;
7455 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7457 spin_lock_irq(shost->host_lock);
7458 vport->load_flag |= FC_UNLOADING;
7459 spin_unlock_irq(shost->host_lock);
7461 phba->pport->work_port_events = 0;
7463 /* Stop the SLI4 device port */
7464 lpfc_stop_port(phba);
7466 lpfc_sli4_disable_intr(phba);
7468 /* Reset SLI4 HBA FCoE function */
7469 lpfc_pci_function_reset(phba);
7475 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
7476 * @phba: Pointer to HBA context object.
7478 * This function is called in the SLI4 code path to wait for completion
7479 * of device's XRIs exchange busy. It will check the XRI exchange busy
7480 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
7481 * that, it will check the XRI exchange busy on outstanding FCP and ELS
7482 * I/Os every 30 seconds, log error message, and wait forever. Only when
7483 * all XRI exchange busy complete, the driver unload shall proceed with
7484 * invoking the function reset ioctl mailbox command to the CNA and the
7485 * the rest of the driver unload resource release.
7488 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
7491 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7492 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7494 while (!fcp_xri_cmpl || !els_xri_cmpl) {
7495 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
7497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7498 "2877 FCP XRI exchange busy "
7499 "wait time: %d seconds.\n",
7502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7503 "2878 ELS XRI exchange busy "
7504 "wait time: %d seconds.\n",
7506 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
7507 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
7509 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
7510 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
7513 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7515 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7520 * lpfc_sli4_hba_unset - Unset the fcoe hba
7521 * @phba: Pointer to HBA context object.
7523 * This function is called in the SLI4 code path to reset the HBA's FCoE
7524 * function. The caller is not required to hold any lock. This routine
7525 * issues PCI function reset mailbox command to reset the FCoE function.
7526 * At the end of the function, it calls lpfc_hba_down_post function to
7527 * free any pending commands.
7530 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7533 LPFC_MBOXQ_t *mboxq;
7535 lpfc_stop_hba_timers(phba);
7536 phba->sli4_hba.intr_enable = 0;
7539 * Gracefully wait out the potential current outstanding asynchronous
7543 /* First, block any pending async mailbox command from posted */
7544 spin_lock_irq(&phba->hbalock);
7545 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7546 spin_unlock_irq(&phba->hbalock);
7547 /* Now, trying to wait it out if we can */
7548 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7550 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7553 /* Forcefully release the outstanding mailbox command if timed out */
7554 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7555 spin_lock_irq(&phba->hbalock);
7556 mboxq = phba->sli.mbox_active;
7557 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7558 __lpfc_mbox_cmpl_put(phba, mboxq);
7559 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7560 phba->sli.mbox_active = NULL;
7561 spin_unlock_irq(&phba->hbalock);
7564 /* Abort all iocbs associated with the hba */
7565 lpfc_sli_hba_iocb_abort(phba);
7567 /* Wait for completion of device XRI exchange busy */
7568 lpfc_sli4_xri_exchange_busy_wait(phba);
7570 /* Disable PCI subsystem interrupt */
7571 lpfc_sli4_disable_intr(phba);
7573 /* Stop kthread signal shall trigger work_done one more time */
7574 kthread_stop(phba->worker_thread);
7576 /* Reset SLI4 HBA FCoE function */
7577 lpfc_pci_function_reset(phba);
7579 /* Stop the SLI4 device port */
7580 phba->pport->work_port_events = 0;
7584 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7585 * @phba: Pointer to HBA context object.
7586 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7588 * This function is called in the SLI4 code path to read the port's
7589 * sli4 capabilities.
7591 * This function may be be called from any context that can block-wait
7592 * for the completion. The expectation is that this routine is called
7593 * typically from probe_one or from the online routine.
7596 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7599 struct lpfc_mqe *mqe;
7600 struct lpfc_pc_sli4_params *sli4_params;
7604 mqe = &mboxq->u.mqe;
7606 /* Read the port's SLI4 Parameters port capabilities */
7607 lpfc_sli4_params(mboxq);
7608 if (!phba->sli4_hba.intr_enable)
7609 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7611 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7612 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7618 sli4_params = &phba->sli4_hba.pc_sli4_params;
7619 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7620 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7621 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7622 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7623 &mqe->un.sli4_params);
7624 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7625 &mqe->un.sli4_params);
7626 sli4_params->proto_types = mqe->un.sli4_params.word3;
7627 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7628 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7629 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7630 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7631 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7632 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7633 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7634 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7635 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7636 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7637 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7638 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7639 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7640 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7641 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7642 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7643 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7644 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7645 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7646 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7651 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
7652 * @pdev: pointer to PCI device
7653 * @pid: pointer to PCI device identifier
7655 * This routine is to be called to attach a device with SLI-3 interface spec
7656 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7657 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7658 * information of the device and driver to see if the driver state that it can
7659 * support this kind of device. If the match is successful, the driver core
7660 * invokes this routine. If this routine determines it can claim the HBA, it
7661 * does all the initialization that it needs to do to handle the HBA properly.
7664 * 0 - driver can claim the device
7665 * negative value - driver can not claim the device
7667 static int __devinit
7668 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
7670 struct lpfc_hba *phba;
7671 struct lpfc_vport *vport = NULL;
7672 struct Scsi_Host *shost = NULL;
7674 uint32_t cfg_mode, intr_mode;
7676 /* Allocate memory for HBA structure */
7677 phba = lpfc_hba_alloc(pdev);
7681 /* Perform generic PCI device enabling operation */
7682 error = lpfc_enable_pci_dev(phba);
7684 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7685 "1401 Failed to enable pci device.\n");
7689 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
7690 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
7692 goto out_disable_pci_dev;
7694 /* Set up SLI-3 specific device PCI memory space */
7695 error = lpfc_sli_pci_mem_setup(phba);
7697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7698 "1402 Failed to set up pci memory space.\n");
7699 goto out_disable_pci_dev;
7702 /* Set up phase-1 common device driver resources */
7703 error = lpfc_setup_driver_resource_phase1(phba);
7705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7706 "1403 Failed to set up driver resource.\n");
7707 goto out_unset_pci_mem_s3;
7710 /* Set up SLI-3 specific device driver resources */
7711 error = lpfc_sli_driver_resource_setup(phba);
7713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7714 "1404 Failed to set up driver resource.\n");
7715 goto out_unset_pci_mem_s3;
7718 /* Initialize and populate the iocb list per host */
7719 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
7721 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7722 "1405 Failed to initialize iocb list.\n");
7723 goto out_unset_driver_resource_s3;
7726 /* Set up common device driver resources */
7727 error = lpfc_setup_driver_resource_phase2(phba);
7729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7730 "1406 Failed to set up driver resource.\n");
7731 goto out_free_iocb_list;
7734 /* Create SCSI host to the physical port */
7735 error = lpfc_create_shost(phba);
7737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7738 "1407 Failed to create scsi host.\n");
7739 goto out_unset_driver_resource;
7742 /* Configure sysfs attributes */
7743 vport = phba->pport;
7744 error = lpfc_alloc_sysfs_attr(vport);
7746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7747 "1476 Failed to allocate sysfs attr\n");
7748 goto out_destroy_shost;
7751 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7752 /* Now, trying to enable interrupt and bring up the device */
7753 cfg_mode = phba->cfg_use_msi;
7755 /* Put device to a known state before enabling interrupt */
7756 lpfc_stop_port(phba);
7757 /* Configure and enable interrupt */
7758 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
7759 if (intr_mode == LPFC_INTR_ERROR) {
7760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7761 "0431 Failed to enable interrupt.\n");
7763 goto out_free_sysfs_attr;
7765 /* SLI-3 HBA setup */
7766 if (lpfc_sli_hba_setup(phba)) {
7767 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7768 "1477 Failed to set up hba\n");
7770 goto out_remove_device;
7773 /* Wait 50ms for the interrupts of previous mailbox commands */
7775 /* Check active interrupts on message signaled interrupts */
7776 if (intr_mode == 0 ||
7777 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
7778 /* Log the current active interrupt mode */
7779 phba->intr_mode = intr_mode;
7780 lpfc_log_intr_mode(phba, intr_mode);
7783 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7784 "0447 Configure interrupt mode (%d) "
7785 "failed active interrupt test.\n",
7787 /* Disable the current interrupt mode */
7788 lpfc_sli_disable_intr(phba);
7789 /* Try next level of interrupt mode */
7790 cfg_mode = --intr_mode;
7794 /* Perform post initialization setup */
7795 lpfc_post_init_setup(phba);
7797 /* Check if there are static vports to be created. */
7798 lpfc_create_static_vport(phba);
7803 lpfc_unset_hba(phba);
7804 out_free_sysfs_attr:
7805 lpfc_free_sysfs_attr(vport);
7807 lpfc_destroy_shost(phba);
7808 out_unset_driver_resource:
7809 lpfc_unset_driver_resource_phase2(phba);
7811 lpfc_free_iocb_list(phba);
7812 out_unset_driver_resource_s3:
7813 lpfc_sli_driver_resource_unset(phba);
7814 out_unset_pci_mem_s3:
7815 lpfc_sli_pci_mem_unset(phba);
7816 out_disable_pci_dev:
7817 lpfc_disable_pci_dev(phba);
7819 scsi_host_put(shost);
7821 lpfc_hba_free(phba);
7826 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
7827 * @pdev: pointer to PCI device
7829 * This routine is to be called to disattach a device with SLI-3 interface
7830 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7831 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7832 * device to be removed from the PCI subsystem properly.
7834 static void __devexit
7835 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
7837 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7838 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7839 struct lpfc_vport **vports;
7840 struct lpfc_hba *phba = vport->phba;
7842 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
7844 spin_lock_irq(&phba->hbalock);
7845 vport->load_flag |= FC_UNLOADING;
7846 spin_unlock_irq(&phba->hbalock);
7848 lpfc_free_sysfs_attr(vport);
7850 /* Release all the vports against this physical port */
7851 vports = lpfc_create_vport_work_array(phba);
7853 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7854 fc_vport_terminate(vports[i]->fc_vport);
7855 lpfc_destroy_vport_work_array(phba, vports);
7857 /* Remove FC host and then SCSI host with the physical port */
7858 fc_remove_host(shost);
7859 scsi_remove_host(shost);
7860 lpfc_cleanup(vport);
7863 * Bring down the SLI Layer. This step disable all interrupts,
7864 * clears the rings, discards all mailbox commands, and resets
7868 /* HBA interrupt will be diabled after this call */
7869 lpfc_sli_hba_down(phba);
7870 /* Stop kthread signal shall trigger work_done one more time */
7871 kthread_stop(phba->worker_thread);
7872 /* Final cleanup of txcmplq and reset the HBA */
7873 lpfc_sli_brdrestart(phba);
7875 lpfc_stop_hba_timers(phba);
7876 spin_lock_irq(&phba->hbalock);
7877 list_del_init(&vport->listentry);
7878 spin_unlock_irq(&phba->hbalock);
7880 lpfc_debugfs_terminate(vport);
7882 /* Disable interrupt */
7883 lpfc_sli_disable_intr(phba);
7885 pci_set_drvdata(pdev, NULL);
7886 scsi_host_put(shost);
7889 * Call scsi_free before mem_free since scsi bufs are released to their
7890 * corresponding pools here.
7892 lpfc_scsi_free(phba);
7893 lpfc_mem_free_all(phba);
7895 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7896 phba->hbqslimp.virt, phba->hbqslimp.phys);
7898 /* Free resources associated with SLI2 interface */
7899 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7900 phba->slim2p.virt, phba->slim2p.phys);
7902 /* unmap adapter SLIM and Control Registers */
7903 iounmap(phba->ctrl_regs_memmap_p);
7904 iounmap(phba->slim_memmap_p);
7906 lpfc_hba_free(phba);
7908 pci_release_selected_regions(pdev, bars);
7909 pci_disable_device(pdev);
7913 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
7914 * @pdev: pointer to PCI device
7915 * @msg: power management message
7917 * This routine is to be called from the kernel's PCI subsystem to support
7918 * system Power Management (PM) to device with SLI-3 interface spec. When
7919 * PM invokes this method, it quiesces the device by stopping the driver's
7920 * worker thread for the device, turning off device's interrupt and DMA,
7921 * and bring the device offline. Note that as the driver implements the
7922 * minimum PM requirements to a power-aware driver's PM support for the
7923 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7924 * to the suspend() method call will be treated as SUSPEND and the driver will
7925 * fully reinitialize its device during resume() method call, the driver will
7926 * set device to PCI_D3hot state in PCI config space instead of setting it
7927 * according to the @msg provided by the PM.
7930 * 0 - driver suspended the device
7934 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
7936 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7937 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7939 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7940 "0473 PCI device Power Management suspend.\n");
7942 /* Bring down the device */
7943 lpfc_offline_prep(phba);
7945 kthread_stop(phba->worker_thread);
7947 /* Disable interrupt from device */
7948 lpfc_sli_disable_intr(phba);
7950 /* Save device state to PCI config space */
7951 pci_save_state(pdev);
7952 pci_set_power_state(pdev, PCI_D3hot);
7958 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
7959 * @pdev: pointer to PCI device
7961 * This routine is to be called from the kernel's PCI subsystem to support
7962 * system Power Management (PM) to device with SLI-3 interface spec. When PM
7963 * invokes this method, it restores the device's PCI config space state and
7964 * fully reinitializes the device and brings it online. Note that as the
7965 * driver implements the minimum PM requirements to a power-aware driver's
7966 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
7967 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
7968 * driver will fully reinitialize its device during resume() method call,
7969 * the device will be set to PCI_D0 directly in PCI config space before
7970 * restoring the state.
7973 * 0 - driver suspended the device
7977 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7979 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7980 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7984 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7985 "0452 PCI device Power Management resume.\n");
7987 /* Restore device state from PCI config space */
7988 pci_set_power_state(pdev, PCI_D0);
7989 pci_restore_state(pdev);
7992 * As the new kernel behavior of pci_restore_state() API call clears
7993 * device saved_state flag, need to save the restored state again.
7995 pci_save_state(pdev);
7997 if (pdev->is_busmaster)
7998 pci_set_master(pdev);
8000 /* Startup the kernel thread for this host adapter. */
8001 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8002 "lpfc_worker_%d", phba->brd_no);
8003 if (IS_ERR(phba->worker_thread)) {
8004 error = PTR_ERR(phba->worker_thread);
8005 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8006 "0434 PM resume failed to start worker "
8007 "thread: error=x%x.\n", error);
8011 /* Configure and enable interrupt */
8012 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8013 if (intr_mode == LPFC_INTR_ERROR) {
8014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8015 "0430 PM resume Failed to enable interrupt\n");
8018 phba->intr_mode = intr_mode;
8020 /* Restart HBA and bring it online */
8021 lpfc_sli_brdrestart(phba);
8024 /* Log the current active interrupt mode */
8025 lpfc_log_intr_mode(phba, phba->intr_mode);
8031 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
8032 * @phba: pointer to lpfc hba data structure.
8034 * This routine is called to prepare the SLI3 device for PCI slot recover. It
8035 * aborts all the outstanding SCSI I/Os to the pci device.
8038 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
8040 struct lpfc_sli *psli = &phba->sli;
8041 struct lpfc_sli_ring *pring;
8043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8044 "2723 PCI channel I/O abort preparing for recovery\n");
8047 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8048 * and let the SCSI mid-layer to retry them to recover.
8050 pring = &psli->ring[psli->fcp_ring];
8051 lpfc_sli_abort_iocb_ring(phba, pring);
8055 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
8056 * @phba: pointer to lpfc hba data structure.
8058 * This routine is called to prepare the SLI3 device for PCI slot reset. It
8059 * disables the device interrupt and pci device, and aborts the internal FCP
8063 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8066 "2710 PCI channel disable preparing for reset\n");
8068 /* Block any management I/Os to the device */
8069 lpfc_block_mgmt_io(phba);
8071 /* Block all SCSI devices' I/Os on the host */
8072 lpfc_scsi_dev_block(phba);
8074 /* stop all timers */
8075 lpfc_stop_hba_timers(phba);
8077 /* Disable interrupt and pci device */
8078 lpfc_sli_disable_intr(phba);
8079 pci_disable_device(phba->pcidev);
8081 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
8082 lpfc_sli_flush_fcp_rings(phba);
8086 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
8087 * @phba: pointer to lpfc hba data structure.
8089 * This routine is called to prepare the SLI3 device for PCI slot permanently
8090 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8094 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8097 "2711 PCI channel permanent disable for failure\n");
8098 /* Block all SCSI devices' I/Os on the host */
8099 lpfc_scsi_dev_block(phba);
8101 /* stop all timers */
8102 lpfc_stop_hba_timers(phba);
8104 /* Clean up all driver's outstanding SCSI I/Os */
8105 lpfc_sli_flush_fcp_rings(phba);
8109 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
8110 * @pdev: pointer to PCI device.
8111 * @state: the current PCI connection state.
8113 * This routine is called from the PCI subsystem for I/O error handling to
8114 * device with SLI-3 interface spec. This function is called by the PCI
8115 * subsystem after a PCI bus error affecting this device has been detected.
8116 * When this function is invoked, it will need to stop all the I/Os and
8117 * interrupt(s) to the device. Once that is done, it will return
8118 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
8122 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
8123 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8124 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8126 static pci_ers_result_t
8127 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
8129 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8130 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8133 case pci_channel_io_normal:
8134 /* Non-fatal error, prepare for recovery */
8135 lpfc_sli_prep_dev_for_recover(phba);
8136 return PCI_ERS_RESULT_CAN_RECOVER;
8137 case pci_channel_io_frozen:
8138 /* Fatal error, prepare for slot reset */
8139 lpfc_sli_prep_dev_for_reset(phba);
8140 return PCI_ERS_RESULT_NEED_RESET;
8141 case pci_channel_io_perm_failure:
8142 /* Permanent failure, prepare for device down */
8143 lpfc_sli_prep_dev_for_perm_failure(phba);
8144 return PCI_ERS_RESULT_DISCONNECT;
8146 /* Unknown state, prepare and request slot reset */
8147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8148 "0472 Unknown PCI error state: x%x\n", state);
8149 lpfc_sli_prep_dev_for_reset(phba);
8150 return PCI_ERS_RESULT_NEED_RESET;
8155 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
8156 * @pdev: pointer to PCI device.
8158 * This routine is called from the PCI subsystem for error handling to
8159 * device with SLI-3 interface spec. This is called after PCI bus has been
8160 * reset to restart the PCI card from scratch, as if from a cold-boot.
8161 * During the PCI subsystem error recovery, after driver returns
8162 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8163 * recovery and then call this routine before calling the .resume method
8164 * to recover the device. This function will initialize the HBA device,
8165 * enable the interrupt, but it will just put the HBA to offline state
8166 * without passing any I/O traffic.
8169 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
8170 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8172 static pci_ers_result_t
8173 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
8175 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8176 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8177 struct lpfc_sli *psli = &phba->sli;
8180 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8181 if (pci_enable_device_mem(pdev)) {
8182 printk(KERN_ERR "lpfc: Cannot re-enable "
8183 "PCI device after reset.\n");
8184 return PCI_ERS_RESULT_DISCONNECT;
8187 pci_restore_state(pdev);
8190 * As the new kernel behavior of pci_restore_state() API call clears
8191 * device saved_state flag, need to save the restored state again.
8193 pci_save_state(pdev);
8195 if (pdev->is_busmaster)
8196 pci_set_master(pdev);
8198 spin_lock_irq(&phba->hbalock);
8199 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8200 spin_unlock_irq(&phba->hbalock);
8202 /* Configure and enable interrupt */
8203 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8204 if (intr_mode == LPFC_INTR_ERROR) {
8205 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8206 "0427 Cannot re-enable interrupt after "
8208 return PCI_ERS_RESULT_DISCONNECT;
8210 phba->intr_mode = intr_mode;
8212 /* Take device offline, it will perform cleanup */
8213 lpfc_offline_prep(phba);
8215 lpfc_sli_brdrestart(phba);
8217 /* Log the current active interrupt mode */
8218 lpfc_log_intr_mode(phba, phba->intr_mode);
8220 return PCI_ERS_RESULT_RECOVERED;
8224 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8225 * @pdev: pointer to PCI device
8227 * This routine is called from the PCI subsystem for error handling to device
8228 * with SLI-3 interface spec. It is called when kernel error recovery tells
8229 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8230 * error recovery. After this call, traffic can start to flow from this device
8234 lpfc_io_resume_s3(struct pci_dev *pdev)
8236 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8237 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8239 /* Bring device online, it will be no-op for non-fatal error resume */
8242 /* Clean up Advanced Error Reporting (AER) if needed */
8243 if (phba->hba_flag & HBA_AER_ENABLED)
8244 pci_cleanup_aer_uncorrect_error_status(pdev);
8248 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8249 * @phba: pointer to lpfc hba data structure.
8251 * returns the number of ELS/CT IOCBs to reserve
8254 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8256 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8258 if (phba->sli_rev == LPFC_SLI_REV4) {
8261 else if (max_xri <= 256)
8263 else if (max_xri <= 512)
8265 else if (max_xri <= 1024)
8274 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8275 * @pdev: pointer to PCI device
8276 * @pid: pointer to PCI device identifier
8278 * This routine is called from the kernel's PCI subsystem to device with
8279 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8280 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8281 * information of the device and driver to see if the driver state that it
8282 * can support this kind of device. If the match is successful, the driver
8283 * core invokes this routine. If this routine determines it can claim the HBA,
8284 * it does all the initialization that it needs to do to handle the HBA
8288 * 0 - driver can claim the device
8289 * negative value - driver can not claim the device
8291 static int __devinit
8292 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8294 struct lpfc_hba *phba;
8295 struct lpfc_vport *vport = NULL;
8296 struct Scsi_Host *shost = NULL;
8298 uint32_t cfg_mode, intr_mode;
8301 /* Allocate memory for HBA structure */
8302 phba = lpfc_hba_alloc(pdev);
8306 /* Perform generic PCI device enabling operation */
8307 error = lpfc_enable_pci_dev(phba);
8309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8310 "1409 Failed to enable pci device.\n");
8314 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
8315 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8317 goto out_disable_pci_dev;
8319 /* Set up SLI-4 specific device PCI memory space */
8320 error = lpfc_sli4_pci_mem_setup(phba);
8322 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8323 "1410 Failed to set up pci memory space.\n");
8324 goto out_disable_pci_dev;
8327 /* Set up phase-1 common device driver resources */
8328 error = lpfc_setup_driver_resource_phase1(phba);
8330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8331 "1411 Failed to set up driver resource.\n");
8332 goto out_unset_pci_mem_s4;
8335 /* Set up SLI-4 Specific device driver resources */
8336 error = lpfc_sli4_driver_resource_setup(phba);
8338 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8339 "1412 Failed to set up driver resource.\n");
8340 goto out_unset_pci_mem_s4;
8343 /* Initialize and populate the iocb list per host */
8345 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8346 "2821 initialize iocb list %d.\n",
8347 phba->cfg_iocb_cnt*1024);
8348 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
8351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8352 "1413 Failed to initialize iocb list.\n");
8353 goto out_unset_driver_resource_s4;
8356 INIT_LIST_HEAD(&phba->active_rrq_list);
8358 /* Set up common device driver resources */
8359 error = lpfc_setup_driver_resource_phase2(phba);
8361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8362 "1414 Failed to set up driver resource.\n");
8363 goto out_free_iocb_list;
8366 /* Create SCSI host to the physical port */
8367 error = lpfc_create_shost(phba);
8369 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8370 "1415 Failed to create scsi host.\n");
8371 goto out_unset_driver_resource;
8374 /* Configure sysfs attributes */
8375 vport = phba->pport;
8376 error = lpfc_alloc_sysfs_attr(vport);
8378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8379 "1416 Failed to allocate sysfs attr\n");
8380 goto out_destroy_shost;
8383 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8384 /* Now, trying to enable interrupt and bring up the device */
8385 cfg_mode = phba->cfg_use_msi;
8387 /* Put device to a known state before enabling interrupt */
8388 lpfc_stop_port(phba);
8389 /* Configure and enable interrupt */
8390 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8391 if (intr_mode == LPFC_INTR_ERROR) {
8392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8393 "0426 Failed to enable interrupt.\n");
8395 goto out_free_sysfs_attr;
8397 /* Default to single FCP EQ for non-MSI-X */
8398 if (phba->intr_type != MSIX)
8399 phba->cfg_fcp_eq_count = 1;
8400 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
8401 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
8402 /* Set up SLI-4 HBA */
8403 if (lpfc_sli4_hba_setup(phba)) {
8404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8405 "1421 Failed to set up hba\n");
8407 goto out_disable_intr;
8410 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
8412 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
8415 /* Check active interrupts received only for MSI/MSI-X */
8416 if (intr_mode == 0 ||
8417 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
8418 /* Log the current active interrupt mode */
8419 phba->intr_mode = intr_mode;
8420 lpfc_log_intr_mode(phba, intr_mode);
8423 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8424 "0451 Configure interrupt mode (%d) "
8425 "failed active interrupt test.\n",
8427 /* Unset the preivous SLI-4 HBA setup */
8428 lpfc_sli4_unset_hba(phba);
8429 /* Try next level of interrupt mode */
8430 cfg_mode = --intr_mode;
8433 /* Perform post initialization setup */
8434 lpfc_post_init_setup(phba);
8436 /* Check if there are static vports to be created. */
8437 lpfc_create_static_vport(phba);
8442 lpfc_sli4_disable_intr(phba);
8443 out_free_sysfs_attr:
8444 lpfc_free_sysfs_attr(vport);
8446 lpfc_destroy_shost(phba);
8447 out_unset_driver_resource:
8448 lpfc_unset_driver_resource_phase2(phba);
8450 lpfc_free_iocb_list(phba);
8451 out_unset_driver_resource_s4:
8452 lpfc_sli4_driver_resource_unset(phba);
8453 out_unset_pci_mem_s4:
8454 lpfc_sli4_pci_mem_unset(phba);
8455 out_disable_pci_dev:
8456 lpfc_disable_pci_dev(phba);
8458 scsi_host_put(shost);
8460 lpfc_hba_free(phba);
8465 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
8466 * @pdev: pointer to PCI device
8468 * This routine is called from the kernel's PCI subsystem to device with
8469 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8470 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8471 * device to be removed from the PCI subsystem properly.
8473 static void __devexit
8474 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8476 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8477 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8478 struct lpfc_vport **vports;
8479 struct lpfc_hba *phba = vport->phba;
8482 /* Mark the device unloading flag */
8483 spin_lock_irq(&phba->hbalock);
8484 vport->load_flag |= FC_UNLOADING;
8485 spin_unlock_irq(&phba->hbalock);
8487 /* Free the HBA sysfs attributes */
8488 lpfc_free_sysfs_attr(vport);
8490 /* Release all the vports against this physical port */
8491 vports = lpfc_create_vport_work_array(phba);
8493 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8494 fc_vport_terminate(vports[i]->fc_vport);
8495 lpfc_destroy_vport_work_array(phba, vports);
8497 /* Remove FC host and then SCSI host with the physical port */
8498 fc_remove_host(shost);
8499 scsi_remove_host(shost);
8501 /* Perform cleanup on the physical port */
8502 lpfc_cleanup(vport);
8505 * Bring down the SLI Layer. This step disables all interrupts,
8506 * clears the rings, discards all mailbox commands, and resets
8507 * the HBA FCoE function.
8509 lpfc_debugfs_terminate(vport);
8510 lpfc_sli4_hba_unset(phba);
8512 spin_lock_irq(&phba->hbalock);
8513 list_del_init(&vport->listentry);
8514 spin_unlock_irq(&phba->hbalock);
8516 /* Perform scsi free before driver resource_unset since scsi
8517 * buffers are released to their corresponding pools here.
8519 lpfc_scsi_free(phba);
8520 lpfc_sli4_driver_resource_unset(phba);
8522 /* Unmap adapter Control and Doorbell registers */
8523 lpfc_sli4_pci_mem_unset(phba);
8525 /* Release PCI resources and disable device's PCI function */
8526 scsi_host_put(shost);
8527 lpfc_disable_pci_dev(phba);
8529 /* Finally, free the driver's device data structure */
8530 lpfc_hba_free(phba);
8536 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
8537 * @pdev: pointer to PCI device
8538 * @msg: power management message
8540 * This routine is called from the kernel's PCI subsystem to support system
8541 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
8542 * this method, it quiesces the device by stopping the driver's worker
8543 * thread for the device, turning off device's interrupt and DMA, and bring
8544 * the device offline. Note that as the driver implements the minimum PM
8545 * requirements to a power-aware driver's PM support for suspend/resume -- all
8546 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
8547 * method call will be treated as SUSPEND and the driver will fully
8548 * reinitialize its device during resume() method call, the driver will set
8549 * device to PCI_D3hot state in PCI config space instead of setting it
8550 * according to the @msg provided by the PM.
8553 * 0 - driver suspended the device
8557 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8559 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8560 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8562 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8563 "2843 PCI device Power Management suspend.\n");
8565 /* Bring down the device */
8566 lpfc_offline_prep(phba);
8568 kthread_stop(phba->worker_thread);
8570 /* Disable interrupt from device */
8571 lpfc_sli4_disable_intr(phba);
8573 /* Save device state to PCI config space */
8574 pci_save_state(pdev);
8575 pci_set_power_state(pdev, PCI_D3hot);
8581 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
8582 * @pdev: pointer to PCI device
8584 * This routine is called from the kernel's PCI subsystem to support system
8585 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
8586 * this method, it restores the device's PCI config space state and fully
8587 * reinitializes the device and brings it online. Note that as the driver
8588 * implements the minimum PM requirements to a power-aware driver's PM for
8589 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8590 * to the suspend() method call will be treated as SUSPEND and the driver
8591 * will fully reinitialize its device during resume() method call, the device
8592 * will be set to PCI_D0 directly in PCI config space before restoring the
8596 * 0 - driver suspended the device
8600 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8602 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8603 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8607 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8608 "0292 PCI device Power Management resume.\n");
8610 /* Restore device state from PCI config space */
8611 pci_set_power_state(pdev, PCI_D0);
8612 pci_restore_state(pdev);
8615 * As the new kernel behavior of pci_restore_state() API call clears
8616 * device saved_state flag, need to save the restored state again.
8618 pci_save_state(pdev);
8620 if (pdev->is_busmaster)
8621 pci_set_master(pdev);
8623 /* Startup the kernel thread for this host adapter. */
8624 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8625 "lpfc_worker_%d", phba->brd_no);
8626 if (IS_ERR(phba->worker_thread)) {
8627 error = PTR_ERR(phba->worker_thread);
8628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8629 "0293 PM resume failed to start worker "
8630 "thread: error=x%x.\n", error);
8634 /* Configure and enable interrupt */
8635 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8636 if (intr_mode == LPFC_INTR_ERROR) {
8637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8638 "0294 PM resume Failed to enable interrupt\n");
8641 phba->intr_mode = intr_mode;
8643 /* Restart HBA and bring it online */
8644 lpfc_sli_brdrestart(phba);
8647 /* Log the current active interrupt mode */
8648 lpfc_log_intr_mode(phba, phba->intr_mode);
8654 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
8655 * @phba: pointer to lpfc hba data structure.
8657 * This routine is called to prepare the SLI4 device for PCI slot recover. It
8658 * aborts all the outstanding SCSI I/Os to the pci device.
8661 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
8663 struct lpfc_sli *psli = &phba->sli;
8664 struct lpfc_sli_ring *pring;
8666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8667 "2828 PCI channel I/O abort preparing for recovery\n");
8669 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8670 * and let the SCSI mid-layer to retry them to recover.
8672 pring = &psli->ring[psli->fcp_ring];
8673 lpfc_sli_abort_iocb_ring(phba, pring);
8677 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
8678 * @phba: pointer to lpfc hba data structure.
8680 * This routine is called to prepare the SLI4 device for PCI slot reset. It
8681 * disables the device interrupt and pci device, and aborts the internal FCP
8685 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
8687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8688 "2826 PCI channel disable preparing for reset\n");
8690 /* Block any management I/Os to the device */
8691 lpfc_block_mgmt_io(phba);
8693 /* Block all SCSI devices' I/Os on the host */
8694 lpfc_scsi_dev_block(phba);
8696 /* stop all timers */
8697 lpfc_stop_hba_timers(phba);
8699 /* Disable interrupt and pci device */
8700 lpfc_sli4_disable_intr(phba);
8701 pci_disable_device(phba->pcidev);
8703 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
8704 lpfc_sli_flush_fcp_rings(phba);
8708 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
8709 * @phba: pointer to lpfc hba data structure.
8711 * This routine is called to prepare the SLI4 device for PCI slot permanently
8712 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8716 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8719 "2827 PCI channel permanent disable for failure\n");
8721 /* Block all SCSI devices' I/Os on the host */
8722 lpfc_scsi_dev_block(phba);
8724 /* stop all timers */
8725 lpfc_stop_hba_timers(phba);
8727 /* Clean up all driver's outstanding SCSI I/Os */
8728 lpfc_sli_flush_fcp_rings(phba);
8732 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
8733 * @pdev: pointer to PCI device.
8734 * @state: the current PCI connection state.
8736 * This routine is called from the PCI subsystem for error handling to device
8737 * with SLI-4 interface spec. This function is called by the PCI subsystem
8738 * after a PCI bus error affecting this device has been detected. When this
8739 * function is invoked, it will need to stop all the I/Os and interrupt(s)
8740 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
8741 * for the PCI subsystem to perform proper recovery as desired.
8744 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8745 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8747 static pci_ers_result_t
8748 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
8750 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8751 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8754 case pci_channel_io_normal:
8755 /* Non-fatal error, prepare for recovery */
8756 lpfc_sli4_prep_dev_for_recover(phba);
8757 return PCI_ERS_RESULT_CAN_RECOVER;
8758 case pci_channel_io_frozen:
8759 /* Fatal error, prepare for slot reset */
8760 lpfc_sli4_prep_dev_for_reset(phba);
8761 return PCI_ERS_RESULT_NEED_RESET;
8762 case pci_channel_io_perm_failure:
8763 /* Permanent failure, prepare for device down */
8764 lpfc_sli4_prep_dev_for_perm_failure(phba);
8765 return PCI_ERS_RESULT_DISCONNECT;
8767 /* Unknown state, prepare and request slot reset */
8768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8769 "2825 Unknown PCI error state: x%x\n", state);
8770 lpfc_sli4_prep_dev_for_reset(phba);
8771 return PCI_ERS_RESULT_NEED_RESET;
8776 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
8777 * @pdev: pointer to PCI device.
8779 * This routine is called from the PCI subsystem for error handling to device
8780 * with SLI-4 interface spec. It is called after PCI bus has been reset to
8781 * restart the PCI card from scratch, as if from a cold-boot. During the
8782 * PCI subsystem error recovery, after the driver returns
8783 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8784 * recovery and then call this routine before calling the .resume method to
8785 * recover the device. This function will initialize the HBA device, enable
8786 * the interrupt, but it will just put the HBA to offline state without
8787 * passing any I/O traffic.
8790 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
8791 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8793 static pci_ers_result_t
8794 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
8796 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8797 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8798 struct lpfc_sli *psli = &phba->sli;
8801 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8802 if (pci_enable_device_mem(pdev)) {
8803 printk(KERN_ERR "lpfc: Cannot re-enable "
8804 "PCI device after reset.\n");
8805 return PCI_ERS_RESULT_DISCONNECT;
8808 pci_restore_state(pdev);
8809 if (pdev->is_busmaster)
8810 pci_set_master(pdev);
8812 spin_lock_irq(&phba->hbalock);
8813 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8814 spin_unlock_irq(&phba->hbalock);
8816 /* Configure and enable interrupt */
8817 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8818 if (intr_mode == LPFC_INTR_ERROR) {
8819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8820 "2824 Cannot re-enable interrupt after "
8822 return PCI_ERS_RESULT_DISCONNECT;
8824 phba->intr_mode = intr_mode;
8826 /* Log the current active interrupt mode */
8827 lpfc_log_intr_mode(phba, phba->intr_mode);
8829 return PCI_ERS_RESULT_RECOVERED;
8833 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
8834 * @pdev: pointer to PCI device
8836 * This routine is called from the PCI subsystem for error handling to device
8837 * with SLI-4 interface spec. It is called when kernel error recovery tells
8838 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8839 * error recovery. After this call, traffic can start to flow from this device
8843 lpfc_io_resume_s4(struct pci_dev *pdev)
8845 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8846 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8849 * In case of slot reset, as function reset is performed through
8850 * mailbox command which needs DMA to be enabled, this operation
8851 * has to be moved to the io resume phase. Taking device offline
8852 * will perform the necessary cleanup.
8854 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
8855 /* Perform device reset */
8856 lpfc_offline_prep(phba);
8858 lpfc_sli_brdrestart(phba);
8859 /* Bring the device back online */
8863 /* Clean up Advanced Error Reporting (AER) if needed */
8864 if (phba->hba_flag & HBA_AER_ENABLED)
8865 pci_cleanup_aer_uncorrect_error_status(pdev);
8869 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
8870 * @pdev: pointer to PCI device
8871 * @pid: pointer to PCI device identifier
8873 * This routine is to be registered to the kernel's PCI subsystem. When an
8874 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
8875 * at PCI device-specific information of the device and driver to see if the
8876 * driver state that it can support this kind of device. If the match is
8877 * successful, the driver core invokes this routine. This routine dispatches
8878 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
8879 * do all the initialization that it needs to do to handle the HBA device
8883 * 0 - driver can claim the device
8884 * negative value - driver can not claim the device
8886 static int __devinit
8887 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
8890 struct lpfc_sli_intf intf;
8892 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
8895 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
8896 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
8897 rc = lpfc_pci_probe_one_s4(pdev, pid);
8899 rc = lpfc_pci_probe_one_s3(pdev, pid);
8905 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
8906 * @pdev: pointer to PCI device
8908 * This routine is to be registered to the kernel's PCI subsystem. When an
8909 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
8910 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
8911 * remove routine, which will perform all the necessary cleanup for the
8912 * device to be removed from the PCI subsystem properly.
8914 static void __devexit
8915 lpfc_pci_remove_one(struct pci_dev *pdev)
8917 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8918 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8920 switch (phba->pci_dev_grp) {
8921 case LPFC_PCI_DEV_LP:
8922 lpfc_pci_remove_one_s3(pdev);
8924 case LPFC_PCI_DEV_OC:
8925 lpfc_pci_remove_one_s4(pdev);
8928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8929 "1424 Invalid PCI device group: 0x%x\n",
8937 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
8938 * @pdev: pointer to PCI device
8939 * @msg: power management message
8941 * This routine is to be registered to the kernel's PCI subsystem to support
8942 * system Power Management (PM). When PM invokes this method, it dispatches
8943 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
8944 * suspend the device.
8947 * 0 - driver suspended the device
8951 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
8953 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8954 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8957 switch (phba->pci_dev_grp) {
8958 case LPFC_PCI_DEV_LP:
8959 rc = lpfc_pci_suspend_one_s3(pdev, msg);
8961 case LPFC_PCI_DEV_OC:
8962 rc = lpfc_pci_suspend_one_s4(pdev, msg);
8965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8966 "1425 Invalid PCI device group: 0x%x\n",
8974 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
8975 * @pdev: pointer to PCI device
8977 * This routine is to be registered to the kernel's PCI subsystem to support
8978 * system Power Management (PM). When PM invokes this method, it dispatches
8979 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
8980 * resume the device.
8983 * 0 - driver suspended the device
8987 lpfc_pci_resume_one(struct pci_dev *pdev)
8989 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8990 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8993 switch (phba->pci_dev_grp) {
8994 case LPFC_PCI_DEV_LP:
8995 rc = lpfc_pci_resume_one_s3(pdev);
8997 case LPFC_PCI_DEV_OC:
8998 rc = lpfc_pci_resume_one_s4(pdev);
9001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9002 "1426 Invalid PCI device group: 0x%x\n",
9010 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
9011 * @pdev: pointer to PCI device.
9012 * @state: the current PCI connection state.
9014 * This routine is registered to the PCI subsystem for error handling. This
9015 * function is called by the PCI subsystem after a PCI bus error affecting
9016 * this device has been detected. When this routine is invoked, it dispatches
9017 * the action to the proper SLI-3 or SLI-4 device error detected handling
9018 * routine, which will perform the proper error detected operation.
9021 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9022 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9024 static pci_ers_result_t
9025 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9027 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9028 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9029 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9031 switch (phba->pci_dev_grp) {
9032 case LPFC_PCI_DEV_LP:
9033 rc = lpfc_io_error_detected_s3(pdev, state);
9035 case LPFC_PCI_DEV_OC:
9036 rc = lpfc_io_error_detected_s4(pdev, state);
9039 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9040 "1427 Invalid PCI device group: 0x%x\n",
9048 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
9049 * @pdev: pointer to PCI device.
9051 * This routine is registered to the PCI subsystem for error handling. This
9052 * function is called after PCI bus has been reset to restart the PCI card
9053 * from scratch, as if from a cold-boot. When this routine is invoked, it
9054 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
9055 * routine, which will perform the proper device reset.
9058 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9059 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9061 static pci_ers_result_t
9062 lpfc_io_slot_reset(struct pci_dev *pdev)
9064 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9065 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9066 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9068 switch (phba->pci_dev_grp) {
9069 case LPFC_PCI_DEV_LP:
9070 rc = lpfc_io_slot_reset_s3(pdev);
9072 case LPFC_PCI_DEV_OC:
9073 rc = lpfc_io_slot_reset_s4(pdev);
9076 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9077 "1428 Invalid PCI device group: 0x%x\n",
9085 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
9086 * @pdev: pointer to PCI device
9088 * This routine is registered to the PCI subsystem for error handling. It
9089 * is called when kernel error recovery tells the lpfc driver that it is
9090 * OK to resume normal PCI operation after PCI bus error recovery. When
9091 * this routine is invoked, it dispatches the action to the proper SLI-3
9092 * or SLI-4 device io_resume routine, which will resume the device operation.
9095 lpfc_io_resume(struct pci_dev *pdev)
9097 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9098 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9100 switch (phba->pci_dev_grp) {
9101 case LPFC_PCI_DEV_LP:
9102 lpfc_io_resume_s3(pdev);
9104 case LPFC_PCI_DEV_OC:
9105 lpfc_io_resume_s4(pdev);
9108 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9109 "1429 Invalid PCI device group: 0x%x\n",
9116 static struct pci_device_id lpfc_id_table[] = {
9117 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
9118 PCI_ANY_ID, PCI_ANY_ID, },
9119 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
9120 PCI_ANY_ID, PCI_ANY_ID, },
9121 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
9122 PCI_ANY_ID, PCI_ANY_ID, },
9123 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
9124 PCI_ANY_ID, PCI_ANY_ID, },
9125 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
9126 PCI_ANY_ID, PCI_ANY_ID, },
9127 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
9128 PCI_ANY_ID, PCI_ANY_ID, },
9129 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
9130 PCI_ANY_ID, PCI_ANY_ID, },
9131 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
9132 PCI_ANY_ID, PCI_ANY_ID, },
9133 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
9134 PCI_ANY_ID, PCI_ANY_ID, },
9135 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
9136 PCI_ANY_ID, PCI_ANY_ID, },
9137 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
9138 PCI_ANY_ID, PCI_ANY_ID, },
9139 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
9140 PCI_ANY_ID, PCI_ANY_ID, },
9141 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
9142 PCI_ANY_ID, PCI_ANY_ID, },
9143 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
9144 PCI_ANY_ID, PCI_ANY_ID, },
9145 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
9146 PCI_ANY_ID, PCI_ANY_ID, },
9147 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
9148 PCI_ANY_ID, PCI_ANY_ID, },
9149 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
9150 PCI_ANY_ID, PCI_ANY_ID, },
9151 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
9152 PCI_ANY_ID, PCI_ANY_ID, },
9153 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
9154 PCI_ANY_ID, PCI_ANY_ID, },
9155 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
9156 PCI_ANY_ID, PCI_ANY_ID, },
9157 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
9158 PCI_ANY_ID, PCI_ANY_ID, },
9159 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
9160 PCI_ANY_ID, PCI_ANY_ID, },
9161 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
9162 PCI_ANY_ID, PCI_ANY_ID, },
9163 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
9164 PCI_ANY_ID, PCI_ANY_ID, },
9165 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
9166 PCI_ANY_ID, PCI_ANY_ID, },
9167 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
9168 PCI_ANY_ID, PCI_ANY_ID, },
9169 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
9170 PCI_ANY_ID, PCI_ANY_ID, },
9171 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
9172 PCI_ANY_ID, PCI_ANY_ID, },
9173 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
9174 PCI_ANY_ID, PCI_ANY_ID, },
9175 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
9176 PCI_ANY_ID, PCI_ANY_ID, },
9177 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
9178 PCI_ANY_ID, PCI_ANY_ID, },
9179 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
9180 PCI_ANY_ID, PCI_ANY_ID, },
9181 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
9182 PCI_ANY_ID, PCI_ANY_ID, },
9183 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
9184 PCI_ANY_ID, PCI_ANY_ID, },
9185 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
9186 PCI_ANY_ID, PCI_ANY_ID, },
9187 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
9188 PCI_ANY_ID, PCI_ANY_ID, },
9189 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
9190 PCI_ANY_ID, PCI_ANY_ID, },
9191 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
9192 PCI_ANY_ID, PCI_ANY_ID, },
9193 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
9194 PCI_ANY_ID, PCI_ANY_ID, },
9195 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
9196 PCI_ANY_ID, PCI_ANY_ID, },
9197 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9198 PCI_ANY_ID, PCI_ANY_ID, },
9199 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
9200 PCI_ANY_ID, PCI_ANY_ID, },
9201 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9202 PCI_ANY_ID, PCI_ANY_ID, },
9206 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
9208 static struct pci_error_handlers lpfc_err_handler = {
9209 .error_detected = lpfc_io_error_detected,
9210 .slot_reset = lpfc_io_slot_reset,
9211 .resume = lpfc_io_resume,
9214 static struct pci_driver lpfc_driver = {
9215 .name = LPFC_DRIVER_NAME,
9216 .id_table = lpfc_id_table,
9217 .probe = lpfc_pci_probe_one,
9218 .remove = __devexit_p(lpfc_pci_remove_one),
9219 .suspend = lpfc_pci_suspend_one,
9220 .resume = lpfc_pci_resume_one,
9221 .err_handler = &lpfc_err_handler,
9225 * lpfc_init - lpfc module initialization routine
9227 * This routine is to be invoked when the lpfc module is loaded into the
9228 * kernel. The special kernel macro module_init() is used to indicate the
9229 * role of this routine to the kernel as lpfc module entry point.
9233 * -ENOMEM - FC attach transport failed
9234 * all others - failed
9241 printk(LPFC_MODULE_DESC "\n");
9242 printk(LPFC_COPYRIGHT "\n");
9244 if (lpfc_enable_npiv) {
9245 lpfc_transport_functions.vport_create = lpfc_vport_create;
9246 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
9248 lpfc_transport_template =
9249 fc_attach_transport(&lpfc_transport_functions);
9250 if (lpfc_transport_template == NULL)
9252 if (lpfc_enable_npiv) {
9253 lpfc_vport_transport_template =
9254 fc_attach_transport(&lpfc_vport_transport_functions);
9255 if (lpfc_vport_transport_template == NULL) {
9256 fc_release_transport(lpfc_transport_template);
9260 error = pci_register_driver(&lpfc_driver);
9262 fc_release_transport(lpfc_transport_template);
9263 if (lpfc_enable_npiv)
9264 fc_release_transport(lpfc_vport_transport_template);
9271 * lpfc_exit - lpfc module removal routine
9273 * This routine is invoked when the lpfc module is removed from the kernel.
9274 * The special kernel macro module_exit() is used to indicate the role of
9275 * this routine to the kernel as lpfc module exit point.
9280 pci_unregister_driver(&lpfc_driver);
9281 fc_release_transport(lpfc_transport_template);
9282 if (lpfc_enable_npiv)
9283 fc_release_transport(lpfc_vport_transport_template);
9284 if (_dump_buf_data) {
9285 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
9286 "_dump_buf_data at 0x%p\n",
9287 (1L << _dump_buf_data_order), _dump_buf_data);
9288 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
9291 if (_dump_buf_dif) {
9292 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
9293 "_dump_buf_dif at 0x%p\n",
9294 (1L << _dump_buf_dif_order), _dump_buf_dif);
9295 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
9299 module_init(lpfc_init);
9300 module_exit(lpfc_exit);
9301 MODULE_LICENSE("GPL");
9302 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
9303 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
9304 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);