2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 * bfad.c Linux driver PCI interface module.
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/firmware.h>
29 #include <asm/uaccess.h>
30 #include <asm/fcntl.h>
38 BFA_TRC_FILE(LDRV, BFAD);
39 DEFINE_MUTEX(bfad_mutex);
43 static int num_sgpgs_parm;
45 char *host_name, *os_name, *os_patch;
46 int num_rports, num_ios, num_tms;
47 int num_fcxps, num_ufbufs;
48 int reqq_size, rspq_size, num_sgpgs;
49 int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
50 int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
51 int bfa_io_max_sge = BFAD_IO_MAX_SGE;
52 int bfa_log_level = 3; /* WARNING log level */
53 int ioc_auto_recover = BFA_TRUE;
54 int bfa_linkup_delay = -1;
55 int fdmi_enable = BFA_TRUE;
56 int pcie_max_read_reqsz;
57 int bfa_debugfs_enable = 1;
58 int msix_disable_cb = 0, msix_disable_ct = 0;
60 u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
61 u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
63 static const char *msix_name_ct[] = {
64 "cpe0", "cpe1", "cpe2", "cpe3",
65 "rme0", "rme1", "rme2", "rme3",
68 static const char *msix_name_cb[] = {
69 "cpe0", "cpe1", "cpe2", "cpe3",
70 "rme0", "rme1", "rme2", "rme3",
71 "eemc", "elpu0", "elpu1", "epss", "mlpu" };
73 MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
74 MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
75 MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
77 module_param(os_name, charp, S_IRUGO | S_IWUSR);
78 MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
79 module_param(os_patch, charp, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
81 module_param(host_name, charp, S_IRUGO | S_IWUSR);
82 MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
83 module_param(num_rports, int, S_IRUGO | S_IWUSR);
84 MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
85 "(physical/logical), default=1024");
86 module_param(num_ios, int, S_IRUGO | S_IWUSR);
87 MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
88 module_param(num_tms, int, S_IRUGO | S_IWUSR);
89 MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
90 module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
91 MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
92 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
93 MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
94 "buffers, default=64");
95 module_param(reqq_size, int, S_IRUGO | S_IWUSR);
96 MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
98 module_param(rspq_size, int, S_IRUGO | S_IWUSR);
99 MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
101 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
102 MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
103 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
104 MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
106 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
108 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
109 MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
110 module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
111 MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
112 "Range[Critical:1|Error:2|Warning:3|Info:4]");
113 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
114 MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
115 "Range[off:0|on:1]");
116 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
117 MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
118 "boot port. Otherwise 10 secs in RHEL4 & 0 for "
119 "[RHEL5, SLES10, ESX40] Range[>0]");
120 module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
121 MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
122 "for Brocade-415/425/815/825 cards, default=0, "
123 " Range[false:0|true:1]");
124 module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
125 MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
126 "if possible for Brocade-1010/1020/804/1007/902/1741 "
127 "cards, default=0, Range[false:0|true:1]");
128 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
129 MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
130 "Range[false:0|true:1]");
131 module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
132 MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
133 "(use system setting), Range[128|256|512|1024|2048|4096]");
134 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
135 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
136 " Range[false:0|true:1]");
139 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
141 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
143 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
145 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
147 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
149 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
151 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
154 * Beginning state for the driver instance, awaiting the pci_probe event
157 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
159 bfa_trc(bfad, event);
163 bfa_sm_set_state(bfad, bfad_sm_created);
164 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
165 "%s", "bfad_worker");
166 if (IS_ERR(bfad->bfad_tsk)) {
167 printk(KERN_INFO "bfad[%d]: Kernel thread "
168 "creation failed!\n", bfad->inst_no);
169 bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
171 bfa_sm_send_event(bfad, BFAD_E_INIT);
175 /* Ignore stop; already in uninit */
179 bfa_sm_fault(bfad, event);
184 * Driver Instance is created, awaiting event INIT to initialize the bfad
187 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
191 bfa_trc(bfad, event);
195 bfa_sm_set_state(bfad, bfad_sm_initializing);
197 init_completion(&bfad->comp);
199 /* Enable Interrupt and wait bfa_init completion */
200 if (bfad_setup_intr(bfad)) {
201 printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
203 bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
207 spin_lock_irqsave(&bfad->bfad_lock, flags);
208 bfa_iocfc_init(&bfad->bfa);
209 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
211 /* Set up interrupt handler for each vectors */
212 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
213 bfad_install_msix_handler(bfad)) {
214 printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
215 __func__, bfad->inst_no);
218 bfad_init_timer(bfad);
220 wait_for_completion(&bfad->comp);
222 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
223 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
225 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
226 bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
231 case BFAD_E_KTHREAD_CREATE_FAILED:
232 bfa_sm_set_state(bfad, bfad_sm_uninit);
236 bfa_sm_fault(bfad, event);
241 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
246 bfa_trc(bfad, event);
249 case BFAD_E_INIT_SUCCESS:
250 kthread_stop(bfad->bfad_tsk);
251 spin_lock_irqsave(&bfad->bfad_lock, flags);
252 bfad->bfad_tsk = NULL;
253 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
255 retval = bfad_start_ops(bfad);
256 if (retval != BFA_STATUS_OK)
258 bfa_sm_set_state(bfad, bfad_sm_operational);
261 case BFAD_E_INTR_INIT_FAILED:
262 bfa_sm_set_state(bfad, bfad_sm_uninit);
263 kthread_stop(bfad->bfad_tsk);
264 spin_lock_irqsave(&bfad->bfad_lock, flags);
265 bfad->bfad_tsk = NULL;
266 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
269 case BFAD_E_INIT_FAILED:
270 bfa_sm_set_state(bfad, bfad_sm_failed);
273 bfa_sm_fault(bfad, event);
278 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
282 bfa_trc(bfad, event);
285 case BFAD_E_INIT_SUCCESS:
286 retval = bfad_start_ops(bfad);
287 if (retval != BFA_STATUS_OK)
289 bfa_sm_set_state(bfad, bfad_sm_operational);
293 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
294 bfad_uncfg_pport(bfad);
295 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
296 bfad_im_probe_undo(bfad);
297 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
302 case BFAD_E_EXIT_COMP:
303 bfa_sm_set_state(bfad, bfad_sm_uninit);
304 bfad_remove_intr(bfad);
305 del_timer_sync(&bfad->hal_tmo);
309 bfa_sm_fault(bfad, event);
314 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
316 bfa_trc(bfad, event);
320 bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
325 bfa_sm_fault(bfad, event);
330 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
332 bfa_trc(bfad, event);
335 case BFAD_E_FCS_EXIT_COMP:
336 bfa_sm_set_state(bfad, bfad_sm_stopping);
341 bfa_sm_fault(bfad, event);
346 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
348 bfa_trc(bfad, event);
351 case BFAD_E_EXIT_COMP:
352 bfa_sm_set_state(bfad, bfad_sm_uninit);
353 bfad_remove_intr(bfad);
354 del_timer_sync(&bfad->hal_tmo);
355 bfad_im_probe_undo(bfad);
356 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
357 bfad_uncfg_pport(bfad);
361 bfa_sm_fault(bfad, event);
370 bfad_hcb_comp(void *arg, bfa_status_t status)
372 struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
374 fcomp->status = status;
375 complete(&fcomp->comp);
382 bfa_cb_init(void *drv, bfa_status_t init_status)
384 struct bfad_s *bfad = drv;
386 if (init_status == BFA_STATUS_OK) {
387 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
390 * If BFAD_HAL_INIT_FAIL flag is set:
391 * Wake up the kernel thread to start
392 * the bfad operations after HAL init done
394 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
395 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
396 wake_up_process(bfad->bfad_tsk);
400 complete(&bfad->comp);
407 bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
408 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
409 struct bfad_vport_s *vp_drv)
412 struct bfad_port_s *port_drv;
414 if (!vp_drv && !vf_drv) {
415 port_drv = &bfad->pport;
416 port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
417 } else if (!vp_drv && vf_drv) {
418 port_drv = &vf_drv->base_port;
419 port_drv->pvb_type = BFAD_PORT_VF_BASE;
420 } else if (vp_drv && !vf_drv) {
421 port_drv = &vp_drv->drv_port;
422 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
424 port_drv = &vp_drv->drv_port;
425 port_drv->pvb_type = BFAD_PORT_VF_VPORT;
428 port_drv->fcs_port = port;
429 port_drv->roles = roles;
431 if (roles & BFA_LPORT_ROLE_FCP_IM) {
432 rc = bfad_im_port_new(bfad, port_drv);
433 if (rc != BFA_STATUS_OK) {
434 bfad_im_port_delete(bfad, port_drv);
443 bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
444 struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
446 struct bfad_port_s *port_drv;
448 /* this will be only called from rmmod context */
449 if (vp_drv && !vp_drv->comp_del) {
450 port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
451 ((vf_drv) ? (&(vf_drv)->base_port) :
453 bfa_trc(bfad, roles);
454 if (roles & BFA_LPORT_ROLE_FCP_IM)
455 bfad_im_port_delete(bfad, port_drv);
460 * FCS RPORT alloc callback, after successful PLOGI by FCS
463 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
464 struct bfad_rport_s **rport_drv)
466 bfa_status_t rc = BFA_STATUS_OK;
468 *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
469 if (*rport_drv == NULL) {
470 rc = BFA_STATUS_ENOMEM;
474 *rport = &(*rport_drv)->fcs_rport;
481 * FCS PBC VPORT Create
484 bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
487 struct bfa_lport_cfg_s port_cfg = {0};
488 struct bfad_vport_s *vport;
491 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
497 vport->drv_port.bfad = bfad;
498 port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
499 port_cfg.pwwn = pbc_vport.vp_pwwn;
500 port_cfg.nwwn = pbc_vport.vp_nwwn;
501 port_cfg.preboot_vp = BFA_TRUE;
503 rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
506 if (rc != BFA_STATUS_OK) {
511 list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
515 bfad_hal_mem_release(struct bfad_s *bfad)
518 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
519 struct bfa_mem_elem_s *meminfo_elem;
521 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
522 meminfo_elem = &hal_meminfo->meminfo[i];
523 if (meminfo_elem->kva != NULL) {
524 switch (meminfo_elem->mem_type) {
525 case BFA_MEM_TYPE_KVA:
526 vfree(meminfo_elem->kva);
528 case BFA_MEM_TYPE_DMA:
529 dma_free_coherent(&bfad->pcidev->dev,
530 meminfo_elem->mem_len,
532 (dma_addr_t) meminfo_elem->dma);
541 memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
545 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
548 bfa_cfg->fwcfg.num_rports = num_rports;
550 bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
552 bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
554 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
556 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
558 bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
560 bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
562 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
565 * populate the hal values back to the driver for sysfs use.
566 * otherwise, the default values will be shown as 0 in sysfs
568 num_rports = bfa_cfg->fwcfg.num_rports;
569 num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
570 num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
571 num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
572 num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
573 reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
574 rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
575 num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
579 bfad_hal_mem_alloc(struct bfad_s *bfad)
582 struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
583 struct bfa_mem_elem_s *meminfo_elem;
584 dma_addr_t phys_addr;
586 bfa_status_t rc = BFA_STATUS_OK;
589 int min_num_sgpgs = 512;
591 bfa_cfg_get_default(&bfad->ioc_cfg);
594 bfad_update_hal_cfg(&bfad->ioc_cfg);
595 bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
596 bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo);
598 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
599 meminfo_elem = &hal_meminfo->meminfo[i];
600 switch (meminfo_elem->mem_type) {
601 case BFA_MEM_TYPE_KVA:
602 kva = vmalloc(meminfo_elem->mem_len);
604 bfad_hal_mem_release(bfad);
605 rc = BFA_STATUS_ENOMEM;
608 memset(kva, 0, meminfo_elem->mem_len);
609 meminfo_elem->kva = kva;
611 case BFA_MEM_TYPE_DMA:
612 kva = dma_alloc_coherent(&bfad->pcidev->dev,
613 meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
615 bfad_hal_mem_release(bfad);
617 * If we cannot allocate with default
618 * num_sgpages try with half the value.
620 if (num_sgpgs > min_num_sgpgs) {
622 "bfad[%d]: memory allocation failed"
623 " with num_sgpgs: %d\n",
624 bfad->inst_no, num_sgpgs);
625 nextLowerInt(&num_sgpgs);
627 "bfad[%d]: trying to allocate memory"
628 " with num_sgpgs: %d\n",
629 bfad->inst_no, num_sgpgs);
633 if (num_sgpgs_parm > 0)
634 num_sgpgs = num_sgpgs_parm;
638 num_sgpgs *= reset_value;
640 rc = BFA_STATUS_ENOMEM;
645 if (num_sgpgs_parm > 0)
646 num_sgpgs = num_sgpgs_parm;
648 reset_value = (1 << retry_count);
649 num_sgpgs *= reset_value;
652 memset(kva, 0, meminfo_elem->mem_len);
653 meminfo_elem->kva = kva;
654 meminfo_elem->dma = phys_addr;
666 * Create a vport under a vf.
669 bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
670 struct bfa_lport_cfg_s *port_cfg, struct device *dev)
672 struct bfad_vport_s *vport;
673 int rc = BFA_STATUS_OK;
675 struct completion fcomp;
677 vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
679 rc = BFA_STATUS_ENOMEM;
683 vport->drv_port.bfad = bfad;
684 spin_lock_irqsave(&bfad->bfad_lock, flags);
685 rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
687 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
689 if (rc != BFA_STATUS_OK)
692 if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
693 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
695 if (rc != BFA_STATUS_OK)
696 goto ext_free_fcs_vport;
699 spin_lock_irqsave(&bfad->bfad_lock, flags);
700 bfa_fcs_vport_start(&vport->fcs_vport);
701 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
703 return BFA_STATUS_OK;
706 spin_lock_irqsave(&bfad->bfad_lock, flags);
707 vport->comp_del = &fcomp;
708 init_completion(vport->comp_del);
709 bfa_fcs_vport_delete(&vport->fcs_vport);
710 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
711 wait_for_completion(vport->comp_del);
719 bfad_bfa_tmo(unsigned long data)
721 struct bfad_s *bfad = (struct bfad_s *) data;
723 struct list_head doneq;
725 spin_lock_irqsave(&bfad->bfad_lock, flags);
727 bfa_timer_beat(&bfad->bfa.timer_mod);
729 bfa_comp_deq(&bfad->bfa, &doneq);
730 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
732 if (!list_empty(&doneq)) {
733 bfa_comp_process(&bfad->bfa, &doneq);
734 spin_lock_irqsave(&bfad->bfad_lock, flags);
735 bfa_comp_free(&bfad->bfa, &doneq);
736 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
739 mod_timer(&bfad->hal_tmo,
740 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
744 bfad_init_timer(struct bfad_s *bfad)
746 init_timer(&bfad->hal_tmo);
747 bfad->hal_tmo.function = bfad_bfa_tmo;
748 bfad->hal_tmo.data = (unsigned long)bfad;
750 mod_timer(&bfad->hal_tmo,
751 jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
755 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
759 if (pci_enable_device(pdev)) {
760 printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
764 if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
765 goto out_disable_device;
767 pci_set_master(pdev);
770 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
771 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
772 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
773 goto out_release_region;
776 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
778 if (bfad->pci_bar0_kva == NULL) {
779 printk(KERN_ERR "Fail to map bar0\n");
780 goto out_release_region;
783 bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
784 bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
785 bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
786 bfad->hal_pcidev.device_id = pdev->device;
787 bfad->pci_name = pci_name(pdev);
789 bfad->pci_attr.vendor_id = pdev->vendor;
790 bfad->pci_attr.device_id = pdev->device;
791 bfad->pci_attr.ssid = pdev->subsystem_device;
792 bfad->pci_attr.ssvid = pdev->subsystem_vendor;
793 bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
797 /* Adjust PCIe Maximum Read Request Size */
798 if (pcie_max_read_reqsz > 0) {
803 switch (pcie_max_read_reqsz) {
826 pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
827 if (mask != 0xffff && pcie_cap_reg) {
828 pcie_cap_reg += 0x08;
829 pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
830 if ((pcie_dev_ctl & 0x7000) != mask) {
831 printk(KERN_WARNING "BFA[%s]: "
832 "pcie_max_read_request_size is %d, "
833 "reset to %d\n", bfad->pci_name,
834 (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
835 pcie_max_read_reqsz);
837 pcie_dev_ctl &= ~0x7000;
838 pci_write_config_word(pdev, pcie_cap_reg,
839 pcie_dev_ctl | mask);
847 pci_release_regions(pdev);
849 pci_disable_device(pdev);
855 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
857 pci_iounmap(pdev, bfad->pci_bar0_kva);
858 pci_release_regions(pdev);
859 pci_disable_device(pdev);
860 pci_set_drvdata(pdev, NULL);
864 bfad_drv_init(struct bfad_s *bfad)
869 bfad->cfg_data.rport_del_timeout = rport_del_timeout;
870 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
871 bfad->cfg_data.io_max_sge = bfa_io_max_sge;
872 bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
874 rc = bfad_hal_mem_alloc(bfad);
875 if (rc != BFA_STATUS_OK) {
876 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
879 "Not enough memory to attach all Brocade HBA ports, %s",
880 "System may need more memory.\n");
881 goto out_hal_mem_alloc_failure;
884 bfad->bfa.trcmod = bfad->trcmod;
885 bfad->bfa.plog = &bfad->plog_buf;
886 bfa_plog_init(&bfad->plog_buf);
887 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
890 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
894 spin_lock_irqsave(&bfad->bfad_lock, flags);
895 bfad->bfa_fcs.trcmod = bfad->trcmod;
896 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
897 bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
898 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
900 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
902 return BFA_STATUS_OK;
904 out_hal_mem_alloc_failure:
905 return BFA_STATUS_FAILED;
909 bfad_drv_uninit(struct bfad_s *bfad)
913 spin_lock_irqsave(&bfad->bfad_lock, flags);
914 init_completion(&bfad->comp);
915 bfa_iocfc_stop(&bfad->bfa);
916 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
917 wait_for_completion(&bfad->comp);
919 del_timer_sync(&bfad->hal_tmo);
920 bfa_isr_disable(&bfad->bfa);
921 bfa_detach(&bfad->bfa);
922 bfad_remove_intr(bfad);
923 bfad_hal_mem_release(bfad);
925 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
929 bfad_drv_start(struct bfad_s *bfad)
933 spin_lock_irqsave(&bfad->bfad_lock, flags);
934 bfa_iocfc_start(&bfad->bfa);
935 bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
936 bfad->bfad_flags |= BFAD_HAL_START_DONE;
937 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
940 flush_workqueue(bfad->im->drv_workq);
944 bfad_fcs_stop(struct bfad_s *bfad)
948 spin_lock_irqsave(&bfad->bfad_lock, flags);
949 init_completion(&bfad->comp);
950 bfad->pport.flags |= BFAD_PORT_DELETE;
951 bfa_fcs_exit(&bfad->bfa_fcs);
952 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
953 wait_for_completion(&bfad->comp);
955 bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
959 bfad_stop(struct bfad_s *bfad)
963 spin_lock_irqsave(&bfad->bfad_lock, flags);
964 init_completion(&bfad->comp);
965 bfa_iocfc_stop(&bfad->bfa);
966 bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
967 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
968 wait_for_completion(&bfad->comp);
970 bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
974 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
976 int rc = BFA_STATUS_OK;
978 /* Allocate scsi_host for the physical port */
979 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
980 (role & BFA_LPORT_ROLE_FCP_IM)) {
981 if (bfad->pport.im_port == NULL) {
982 rc = BFA_STATUS_FAILED;
986 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
988 if (rc != BFA_STATUS_OK)
991 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
994 /* Setup the debugfs node for this scsi_host */
995 if (bfa_debugfs_enable)
996 bfad_debugfs_init(&bfad->pport);
998 bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
1005 bfad_uncfg_pport(struct bfad_s *bfad)
1007 /* Remove the debugfs node for this scsi_host */
1008 kfree(bfad->regdata);
1009 bfad_debugfs_exit(&bfad->pport);
1011 if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1012 (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
1013 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
1014 bfad_im_port_clean(bfad->pport.im_port);
1015 kfree(bfad->pport.im_port);
1016 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
1019 bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
1023 bfad_start_ops(struct bfad_s *bfad) {
1026 unsigned long flags;
1027 struct bfad_vport_s *vport, *vport_new;
1028 struct bfa_fcs_driver_info_s driver_info;
1030 /* Fill the driver_info info to fcs*/
1031 memset(&driver_info, 0, sizeof(driver_info));
1032 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1033 sizeof(driver_info.version) - 1);
1035 strncpy(driver_info.host_machine_name, host_name,
1036 sizeof(driver_info.host_machine_name) - 1);
1038 strncpy(driver_info.host_os_name, os_name,
1039 sizeof(driver_info.host_os_name) - 1);
1041 strncpy(driver_info.host_os_patch, os_patch,
1042 sizeof(driver_info.host_os_patch) - 1);
1044 strncpy(driver_info.os_device_name, bfad->pci_name,
1045 sizeof(driver_info.os_device_name - 1));
1048 spin_lock_irqsave(&bfad->bfad_lock, flags);
1049 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1050 bfa_fcs_init(&bfad->bfa_fcs);
1051 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1053 retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
1054 if (retval != BFA_STATUS_OK) {
1055 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1056 bfa_sm_set_state(bfad, bfad_sm_failed);
1058 return BFA_STATUS_FAILED;
1061 /* BFAD level FC4 IM specific resource allocation */
1062 retval = bfad_im_probe(bfad);
1063 if (retval != BFA_STATUS_OK) {
1064 printk(KERN_WARNING "bfad_im_probe failed\n");
1065 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1066 bfa_sm_set_state(bfad, bfad_sm_failed);
1067 bfad_im_probe_undo(bfad);
1068 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
1069 bfad_uncfg_pport(bfad);
1071 return BFA_STATUS_FAILED;
1073 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1075 bfad_drv_start(bfad);
1077 /* Complete pbc vport create */
1078 list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
1080 struct fc_vport_identifiers vid;
1081 struct fc_vport *fc_vport;
1082 char pwwn_buf[BFA_STRING_32];
1084 memset(&vid, 0, sizeof(vid));
1085 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1086 vid.vport_type = FC_PORTTYPE_NPIV;
1087 vid.disable = false;
1088 vid.node_name = wwn_to_u64((u8 *)
1089 (&((vport->fcs_vport).lport.port_cfg.nwwn)));
1090 vid.port_name = wwn_to_u64((u8 *)
1091 (&((vport->fcs_vport).lport.port_cfg.pwwn)));
1092 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
1094 wwn2str(pwwn_buf, vid.port_name);
1095 printk(KERN_WARNING "bfad%d: failed to create pbc vport"
1096 " %s\n", bfad->inst_no, pwwn_buf);
1098 list_del(&vport->list_entry);
1103 * If bfa_linkup_delay is set to -1 default; try to retrive the
1104 * value using the bfad_get_linkup_delay(); else use the
1105 * passed in module param value as the bfa_linkup_delay.
1107 if (bfa_linkup_delay < 0) {
1108 bfa_linkup_delay = bfad_get_linkup_delay(bfad);
1109 bfad_rport_online_wait(bfad);
1110 bfa_linkup_delay = -1;
1112 bfad_rport_online_wait(bfad);
1114 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
1116 return BFA_STATUS_OK;
1120 bfad_worker(void *ptr)
1122 struct bfad_s *bfad;
1123 unsigned long flags;
1125 bfad = (struct bfad_s *)ptr;
1127 while (!kthread_should_stop()) {
1129 /* Send event BFAD_E_INIT_SUCCESS */
1130 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1132 spin_lock_irqsave(&bfad->bfad_lock, flags);
1133 bfad->bfad_tsk = NULL;
1134 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1143 * BFA driver interrupt functions
1146 bfad_intx(int irq, void *dev_id)
1148 struct bfad_s *bfad = dev_id;
1149 struct list_head doneq;
1150 unsigned long flags;
1153 spin_lock_irqsave(&bfad->bfad_lock, flags);
1154 rc = bfa_intx(&bfad->bfa);
1156 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1160 bfa_comp_deq(&bfad->bfa, &doneq);
1161 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1163 if (!list_empty(&doneq)) {
1164 bfa_comp_process(&bfad->bfa, &doneq);
1166 spin_lock_irqsave(&bfad->bfad_lock, flags);
1167 bfa_comp_free(&bfad->bfa, &doneq);
1168 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1169 bfa_trc_fp(bfad, irq);
1177 bfad_msix(int irq, void *dev_id)
1179 struct bfad_msix_s *vec = dev_id;
1180 struct bfad_s *bfad = vec->bfad;
1181 struct list_head doneq;
1182 unsigned long flags;
1184 spin_lock_irqsave(&bfad->bfad_lock, flags);
1186 bfa_msix(&bfad->bfa, vec->msix.entry);
1187 bfa_comp_deq(&bfad->bfa, &doneq);
1188 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1190 if (!list_empty(&doneq)) {
1191 bfa_comp_process(&bfad->bfa, &doneq);
1193 spin_lock_irqsave(&bfad->bfad_lock, flags);
1194 bfa_comp_free(&bfad->bfa, &doneq);
1195 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1202 * Initialize the MSIX entry table.
1205 bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1206 int mask, int max_bit)
1209 int match = 0x00000001;
1211 for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1213 bfad->msix_tab[bfad->nvec].msix.entry = i;
1214 bfad->msix_tab[bfad->nvec].bfad = bfad;
1215 msix_entries[bfad->nvec].entry = i;
1225 bfad_install_msix_handler(struct bfad_s *bfad)
1229 for (i = 0; i < bfad->nvec; i++) {
1230 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1232 ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ?
1233 msix_name_ct[i] : msix_name_cb[i]));
1235 error = request_irq(bfad->msix_tab[i].msix.vector,
1236 (irq_handler_t) bfad_msix, 0,
1237 bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1239 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1243 for (j = 0; j < i; j++)
1244 free_irq(bfad->msix_tab[j].msix.vector,
1245 &bfad->msix_tab[j]);
1255 * Setup MSIX based interrupt.
1258 bfad_setup_intr(struct bfad_s *bfad)
1261 u32 mask = 0, i, num_bit = 0, max_bit = 0;
1262 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1263 struct pci_dev *pdev = bfad->pcidev;
1265 /* Call BFA to get the msix map for this PCI function. */
1266 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1268 /* Set up the msix entry table */
1269 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1271 if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
1272 (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
1274 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1277 * Only error number of vector is available.
1278 * We don't have a mechanism to map multiple
1279 * interrupts into one vector, so even if we
1280 * can try to request less vectors, we don't
1281 * know how to associate interrupt events to
1282 * vectors. Linux doesn't dupicate vectors
1283 * in the MSIX table for this case.
1286 printk(KERN_WARNING "bfad%d: "
1287 "pci_enable_msix failed (%d),"
1288 " use line based.\n", bfad->inst_no, error);
1293 /* Save the vectors */
1294 for (i = 0; i < bfad->nvec; i++) {
1295 bfa_trc(bfad, msix_entries[i].vector);
1296 bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1299 bfa_msix_init(&bfad->bfa, bfad->nvec);
1301 bfad->bfad_flags |= BFAD_MSIX_ON;
1309 (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
1310 BFAD_DRIVER_NAME, bfad) != 0) {
1311 /* Enable interrupt handler failed */
1319 bfad_remove_intr(struct bfad_s *bfad)
1323 if (bfad->bfad_flags & BFAD_MSIX_ON) {
1324 for (i = 0; i < bfad->nvec; i++)
1325 free_irq(bfad->msix_tab[i].msix.vector,
1326 &bfad->msix_tab[i]);
1328 pci_disable_msix(bfad->pcidev);
1329 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1331 free_irq(bfad->pcidev->irq, bfad);
1339 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1341 struct bfad_s *bfad;
1342 int error = -ENODEV, retval;
1344 /* For single port cards - only claim function 0 */
1345 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1346 (PCI_FUNC(pdev->devfn) != 0))
1349 bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1355 bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
1356 if (!bfad->trcmod) {
1357 printk(KERN_WARNING "Error alloc trace buffer!\n");
1359 goto out_alloc_trace_failure;
1363 bfa_trc_init(bfad->trcmod);
1364 bfa_trc(bfad, bfad_inst);
1366 if (!(bfad_load_fwimg(pdev))) {
1367 kfree(bfad->trcmod);
1368 goto out_alloc_trace_failure;
1371 retval = bfad_pci_init(pdev, bfad);
1373 printk(KERN_WARNING "bfad_pci_init failure!\n");
1375 goto out_pci_init_failure;
1378 mutex_lock(&bfad_mutex);
1379 bfad->inst_no = bfad_inst++;
1380 list_add_tail(&bfad->list_entry, &bfad_list);
1381 mutex_unlock(&bfad_mutex);
1383 /* Initializing the state machine: State set to uninit */
1384 bfa_sm_set_state(bfad, bfad_sm_uninit);
1386 spin_lock_init(&bfad->bfad_lock);
1387 pci_set_drvdata(pdev, bfad);
1389 bfad->ref_count = 0;
1390 bfad->pport.bfad = bfad;
1391 INIT_LIST_HEAD(&bfad->pbc_vport_list);
1393 retval = bfad_drv_init(bfad);
1394 if (retval != BFA_STATUS_OK)
1395 goto out_drv_init_failure;
1397 bfa_sm_send_event(bfad, BFAD_E_CREATE);
1399 if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1400 goto out_bfad_sm_failure;
1404 out_bfad_sm_failure:
1405 bfa_detach(&bfad->bfa);
1406 bfad_hal_mem_release(bfad);
1407 out_drv_init_failure:
1408 mutex_lock(&bfad_mutex);
1410 list_del(&bfad->list_entry);
1411 mutex_unlock(&bfad_mutex);
1412 bfad_pci_uninit(pdev, bfad);
1413 out_pci_init_failure:
1414 kfree(bfad->trcmod);
1415 out_alloc_trace_failure:
1425 bfad_pci_remove(struct pci_dev *pdev)
1427 struct bfad_s *bfad = pci_get_drvdata(pdev);
1428 unsigned long flags;
1430 bfa_trc(bfad, bfad->inst_no);
1432 spin_lock_irqsave(&bfad->bfad_lock, flags);
1433 if (bfad->bfad_tsk != NULL) {
1434 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1435 kthread_stop(bfad->bfad_tsk);
1437 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1440 /* Send Event BFAD_E_STOP */
1441 bfa_sm_send_event(bfad, BFAD_E_STOP);
1443 /* Driver detach and dealloc mem */
1444 spin_lock_irqsave(&bfad->bfad_lock, flags);
1445 bfa_detach(&bfad->bfa);
1446 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1447 bfad_hal_mem_release(bfad);
1449 /* Cleaning the BFAD instance */
1450 mutex_lock(&bfad_mutex);
1452 list_del(&bfad->list_entry);
1453 mutex_unlock(&bfad_mutex);
1454 bfad_pci_uninit(pdev, bfad);
1456 kfree(bfad->trcmod);
1460 struct pci_device_id bfad_id_table[] = {
1462 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1463 .device = BFA_PCI_DEVICE_ID_FC_8G2P,
1464 .subvendor = PCI_ANY_ID,
1465 .subdevice = PCI_ANY_ID,
1468 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1469 .device = BFA_PCI_DEVICE_ID_FC_8G1P,
1470 .subvendor = PCI_ANY_ID,
1471 .subdevice = PCI_ANY_ID,
1474 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1475 .device = BFA_PCI_DEVICE_ID_CT,
1476 .subvendor = PCI_ANY_ID,
1477 .subdevice = PCI_ANY_ID,
1478 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1482 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1483 .device = BFA_PCI_DEVICE_ID_CT_FC,
1484 .subvendor = PCI_ANY_ID,
1485 .subdevice = PCI_ANY_ID,
1486 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1493 MODULE_DEVICE_TABLE(pci, bfad_id_table);
1495 static struct pci_driver bfad_pci_driver = {
1496 .name = BFAD_DRIVER_NAME,
1497 .id_table = bfad_id_table,
1498 .probe = bfad_pci_probe,
1499 .remove = __devexit_p(bfad_pci_remove),
1503 * Driver module init.
1510 printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1511 BFAD_DRIVER_VERSION);
1514 num_sgpgs_parm = num_sgpgs;
1516 error = bfad_im_module_init();
1519 printk(KERN_WARNING "bfad_im_module_init failure\n");
1523 if (strcmp(FCPI_NAME, " fcpim") == 0)
1524 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1526 bfa_auto_recover = ioc_auto_recover;
1527 bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1529 error = pci_register_driver(&bfad_pci_driver);
1531 printk(KERN_WARNING "pci_register_driver failure\n");
1538 bfad_im_module_exit();
1543 * Driver module exit.
1548 pci_unregister_driver(&bfad_pci_driver);
1549 bfad_im_module_exit();
1553 /* Firmware handling */
1555 bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1556 u32 *bfi_image_size, char *fw_name)
1558 const struct firmware *fw;
1560 if (request_firmware(&fw, fw_name, &pdev->dev)) {
1561 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1565 *bfi_image = vmalloc(fw->size);
1566 if (NULL == *bfi_image) {
1567 printk(KERN_ALERT "Fail to allocate buffer for fw image "
1568 "size=%x!\n", (u32) fw->size);
1572 memcpy(*bfi_image, fw->data, fw->size);
1573 *bfi_image_size = fw->size/sizeof(u32);
1582 bfad_get_firmware_buf(struct pci_dev *pdev)
1584 if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
1585 if (bfi_image_ct_fc_size == 0)
1586 bfad_read_firmware(pdev, &bfi_image_ct_fc,
1587 &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
1588 return bfi_image_ct_fc;
1589 } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
1590 if (bfi_image_ct_cna_size == 0)
1591 bfad_read_firmware(pdev, &bfi_image_ct_cna,
1592 &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
1593 return bfi_image_ct_cna;
1595 if (bfi_image_cb_fc_size == 0)
1596 bfad_read_firmware(pdev, &bfi_image_cb_fc,
1597 &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
1598 return bfi_image_cb_fc;
1602 module_init(bfad_init);
1603 module_exit(bfad_exit);
1604 MODULE_LICENSE("GPL");
1605 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1606 MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1607 MODULE_VERSION(BFAD_DRIVER_VERSION);