[SCSI] bfa: FC credit recovery and misc bug fixes.
[linux-2.6-block.git] / drivers / scsi / bfa / bfa_core.c
CommitLineData
7725ccfd 1/*
a36c61f9 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
7725ccfd
JH
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
f16a1750 18#include "bfad_drv.h"
a36c61f9 19#include "bfa_modules.h"
11189208 20#include "bfi_reg.h"
7725ccfd 21
a36c61f9 22BFA_TRC_FILE(HAL, CORE);
7725ccfd 23
b77ee1fb
MZ
24/*
25 * BFA module list terminated by NULL
26 */
27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
34 &hal_mod_fcpim,
35 NULL
36};
37
38/*
39 * Message handlers for various modules.
40 */
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
dd5aaf45 54 bfa_itnim_isr, /* BFI_MC_ITN */
b77ee1fb
MZ
55 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74};
75/*
76 * Message handlers for mailbox command classes
77 */
78static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87};
88
89
90
91static void
92bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
93{
94 struct bfa_port_s *port = &bfa->modules.port;
95 u32 dm_len;
96 u8 *dm_kva;
97 u64 dm_pa;
98
99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi);
102
103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa);
106
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
109}
110
5fbe25c7 111/*
a36c61f9
KG
112 * BFA IOC FC related definitions
113 */
114
5fbe25c7 115/*
a36c61f9
KG
116 * IOC local definitions
117 */
118#define BFA_IOCFC_TOV 5000 /* msecs */
119
120enum {
121 BFA_IOCFC_ACT_NONE = 0,
122 BFA_IOCFC_ACT_INIT = 1,
123 BFA_IOCFC_ACT_STOP = 2,
124 BFA_IOCFC_ACT_DISABLE = 3,
125};
126
127#define DEF_CFG_NUM_FABRICS 1
128#define DEF_CFG_NUM_LPORTS 256
129#define DEF_CFG_NUM_CQS 4
130#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
131#define DEF_CFG_NUM_TSKIM_REQS 128
132#define DEF_CFG_NUM_FCXP_REQS 64
133#define DEF_CFG_NUM_UF_BUFS 64
134#define DEF_CFG_NUM_RPORTS 1024
135#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
136#define DEF_CFG_NUM_TINS 256
137
138#define DEF_CFG_NUM_SGPGS 2048
139#define DEF_CFG_NUM_REQQ_ELEMS 256
140#define DEF_CFG_NUM_RSPQ_ELEMS 64
141#define DEF_CFG_NUM_SBOOT_TGTS 16
142#define DEF_CFG_NUM_SBOOT_LUNS 16
143
5fbe25c7 144/*
a36c61f9
KG
145 * forward declaration for IOC FC functions
146 */
147static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
148static void bfa_iocfc_disable_cbfn(void *bfa_arg);
149static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
150static void bfa_iocfc_reset_cbfn(void *bfa_arg);
151static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
152
5fbe25c7 153/*
a36c61f9
KG
154 * BFA Interrupt handling functions
155 */
a36c61f9
KG
156static void
157bfa_reqq_resume(struct bfa_s *bfa, int qid)
158{
159 struct list_head *waitq, *qe, *qen;
160 struct bfa_reqq_wait_s *wqe;
161
162 waitq = bfa_reqq(bfa, qid);
163 list_for_each_safe(qe, qen, waitq) {
5fbe25c7 164 /*
a36c61f9
KG
165 * Callback only as long as there is room in request queue
166 */
167 if (bfa_reqq_full(bfa, qid))
168 break;
169
170 list_del(qe);
171 wqe = (struct bfa_reqq_wait_s *) qe;
172 wqe->qresume(wqe->cbarg);
173 }
174}
175
11189208
KG
176static inline void
177bfa_isr_rspq(struct bfa_s *bfa, int qid)
178{
179 struct bfi_msg_s *m;
180 u32 pi, ci;
181 struct list_head *waitq;
182
183 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
184
185 ci = bfa_rspq_ci(bfa, qid);
186 pi = bfa_rspq_pi(bfa, qid);
187
188 while (ci != pi) {
189 m = bfa_rspq_elem(bfa, qid, ci);
190 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
191
192 bfa_isrs[m->mhdr.msg_class] (bfa, m);
193 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
194 }
195
196 /*
197 * update CI
198 */
199 bfa_rspq_ci(bfa, qid) = pi;
200 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
201 mmiowb();
202
203 /*
204 * Resume any pending requests in the corresponding reqq.
205 */
206 waitq = bfa_reqq(bfa, qid);
207 if (!list_empty(waitq))
208 bfa_reqq_resume(bfa, qid);
209}
210
211static inline void
212bfa_isr_reqq(struct bfa_s *bfa, int qid)
213{
214 struct list_head *waitq;
215
216 qid &= (BFI_IOC_MAX_CQS - 1);
217
218 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
219
220 /*
221 * Resume any pending requests in the corresponding reqq.
222 */
223 waitq = bfa_reqq(bfa, qid);
224 if (!list_empty(waitq))
225 bfa_reqq_resume(bfa, qid);
226}
227
a36c61f9
KG
228void
229bfa_msix_all(struct bfa_s *bfa, int vec)
230{
231 bfa_intx(bfa);
232}
233
a36c61f9
KG
234bfa_boolean_t
235bfa_intx(struct bfa_s *bfa)
236{
237 u32 intr, qintr;
238 int queue;
239
53440260 240 intr = readl(bfa->iocfc.bfa_regs.intr_status);
a36c61f9
KG
241 if (!intr)
242 return BFA_FALSE;
243
5fbe25c7 244 /*
a36c61f9
KG
245 * RME completion queue interrupt
246 */
247 qintr = intr & __HFN_INT_RME_MASK;
53440260 248 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
a36c61f9
KG
249
250 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
775c7742 251 if ((intr & (__HFN_INT_RME_Q0 << queue)) && bfa->queue_process)
11189208 252 bfa_isr_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
a36c61f9
KG
253 }
254 intr &= ~qintr;
255 if (!intr)
256 return BFA_TRUE;
257
5fbe25c7 258 /*
a36c61f9
KG
259 * CPE completion queue interrupt
260 */
261 qintr = intr & __HFN_INT_CPE_MASK;
53440260 262 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
a36c61f9
KG
263
264 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
775c7742 265 if ((intr & (__HFN_INT_CPE_Q0 << queue)) && bfa->queue_process)
11189208 266 bfa_isr_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
a36c61f9
KG
267 }
268 intr &= ~qintr;
269 if (!intr)
270 return BFA_TRUE;
271
272 bfa_msix_lpu_err(bfa, intr);
273
274 return BFA_TRUE;
275}
276
a36c61f9
KG
277void
278bfa_isr_enable(struct bfa_s *bfa)
279{
11189208 280 u32 umsk;
a36c61f9
KG
281 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
282
283 bfa_trc(bfa, pci_func);
284
775c7742 285 bfa_msix_ctrl_install(bfa);
11189208
KG
286
287 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
288 umsk = __HFN_INT_ERR_MASK_CT2;
289 umsk |= pci_func == 0 ?
290 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
291 } else {
292 umsk = __HFN_INT_ERR_MASK;
293 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
294 }
295
296 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
297 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
298 bfa->iocfc.intr_mask = ~umsk;
a36c61f9
KG
299 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
300}
301
302void
303bfa_isr_disable(struct bfa_s *bfa)
304{
305 bfa_isr_mode_set(bfa, BFA_FALSE);
53440260 306 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
a36c61f9
KG
307 bfa_msix_uninstall(bfa);
308}
309
310void
11189208 311bfa_msix_reqq(struct bfa_s *bfa, int vec)
a36c61f9 312{
11189208 313 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
a36c61f9
KG
314}
315
316void
317bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
318{
319 bfa_trc(bfa, m->mhdr.msg_class);
320 bfa_trc(bfa, m->mhdr.msg_id);
321 bfa_trc(bfa, m->mhdr.mtag.i2htok);
d4b671c5 322 WARN_ON(1);
a36c61f9
KG
323 bfa_trc_stop(bfa->trcmod);
324}
325
326void
11189208 327bfa_msix_rspq(struct bfa_s *bfa, int vec)
a36c61f9 328{
11189208 329 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
a36c61f9
KG
330}
331
332void
333bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
334{
335 u32 intr, curr_value;
11189208 336 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
a36c61f9 337
53440260 338 intr = readl(bfa->iocfc.bfa_regs.intr_status);
a36c61f9 339
11189208
KG
340 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
341 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
342 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
343 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
344 __HFN_INT_MBOX_LPU1_CT2);
345 intr &= __HFN_INT_ERR_MASK_CT2;
346 } else {
347 halt_isr = intr & __HFN_INT_LL_HALT;
348 pss_isr = intr & __HFN_INT_ERR_PSS;
349 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
350 intr &= __HFN_INT_ERR_MASK;
351 }
a36c61f9 352
11189208
KG
353 if (lpu_isr)
354 bfa_ioc_mbox_isr(&bfa->ioc);
a36c61f9
KG
355
356 if (intr) {
11189208 357 if (halt_isr) {
5fbe25c7 358 /*
a36c61f9
KG
359 * If LL_HALT bit is set then FW Init Halt LL Port
360 * Register needs to be cleared as well so Interrupt
361 * Status Register will be cleared.
362 */
53440260 363 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
a36c61f9 364 curr_value &= ~__FW_INIT_HALT_P;
53440260 365 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
a36c61f9
KG
366 }
367
11189208 368 if (pss_isr) {
5fbe25c7 369 /*
a36c61f9
KG
370 * ERR_PSS bit needs to be cleared as well in case
371 * interrups are shared so driver's interrupt handler is
25985edc 372 * still called even though it is already masked out.
a36c61f9 373 */
53440260 374 curr_value = readl(
a36c61f9 375 bfa->ioc.ioc_regs.pss_err_status_reg);
53440260
JH
376 writel(curr_value,
377 bfa->ioc.ioc_regs.pss_err_status_reg);
a36c61f9
KG
378 }
379
53440260 380 writel(intr, bfa->iocfc.bfa_regs.intr_status);
f7f73812 381 bfa_ioc_error_isr(&bfa->ioc);
a36c61f9
KG
382 }
383}
384
5fbe25c7 385/*
a36c61f9
KG
386 * BFA IOC FC related functions
387 */
388
5fbe25c7 389/*
df0f1933 390 * BFA IOC private functions
a36c61f9
KG
391 */
392
393static void
394bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
395{
396 int i, per_reqq_sz, per_rspq_sz;
397
398 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
399 BFA_DMA_ALIGN_SZ);
400 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
401 BFA_DMA_ALIGN_SZ);
402
403 /*
404 * Calculate CQ size
405 */
406 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
407 *dm_len = *dm_len + per_reqq_sz;
408 *dm_len = *dm_len + per_rspq_sz;
409 }
410
411 /*
412 * Calculate Shadow CI/PI size
413 */
414 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
415 *dm_len += (2 * BFA_CACHELINE_SZ);
416}
417
418static void
419bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
420{
421 *dm_len +=
422 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
423 *dm_len +=
424 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
425 BFA_CACHELINE_SZ);
426}
427
5fbe25c7 428/*
a36c61f9
KG
429 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
430 */
431static void
432bfa_iocfc_send_cfg(void *bfa_arg)
433{
434 struct bfa_s *bfa = bfa_arg;
435 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
436 struct bfi_iocfc_cfg_req_s cfg_req;
437 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
438 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
439 int i;
440
d4b671c5 441 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
a36c61f9
KG
442 bfa_trc(bfa, cfg->fwcfg.num_cqs);
443
444 bfa_iocfc_reset_queues(bfa);
445
5fbe25c7 446 /*
a36c61f9
KG
447 * initialize IOC configuration info
448 */
449 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
450 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
451
452 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
5fbe25c7 453 /*
a36c61f9
KG
454 * dma map REQ and RSP circular queues and shadow pointers
455 */
456 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
457 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
458 iocfc->req_cq_ba[i].pa);
459 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
460 iocfc->req_cq_shadow_ci[i].pa);
461 cfg_info->req_cq_elems[i] =
ba816ea8 462 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
a36c61f9
KG
463
464 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
465 iocfc->rsp_cq_ba[i].pa);
466 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
467 iocfc->rsp_cq_shadow_pi[i].pa);
468 cfg_info->rsp_cq_elems[i] =
ba816ea8 469 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
a36c61f9
KG
470 }
471
5fbe25c7 472 /*
a36c61f9
KG
473 * Enable interrupt coalescing if it is driver init path
474 * and not ioc disable/enable path.
475 */
476 if (!iocfc->cfgdone)
477 cfg_info->intr_attr.coalesce = BFA_TRUE;
478
479 iocfc->cfgdone = BFA_FALSE;
480
5fbe25c7 481 /*
a36c61f9
KG
482 * dma map IOC configuration itself
483 */
484 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
485 bfa_lpuid(bfa));
486 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
487
488 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
489 sizeof(struct bfi_iocfc_cfg_req_s));
490}
491
492static void
493bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
494 struct bfa_pcidev_s *pcidev)
495{
496 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
497
498 bfa->bfad = bfad;
499 iocfc->bfa = bfa;
500 iocfc->action = BFA_IOCFC_ACT_NONE;
501
6a18b167 502 iocfc->cfg = *cfg;
a36c61f9 503
5fbe25c7 504 /*
a36c61f9
KG
505 * Initialize chip specific handlers.
506 */
11189208 507 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
a36c61f9
KG
508 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
509 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
510 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
511 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
775c7742
KG
512 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
513 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
a36c61f9
KG
514 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
515 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
516 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
517 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
11189208
KG
518 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
519 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
a36c61f9
KG
520 } else {
521 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
522 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
523 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
524 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
775c7742
KG
525 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
526 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
a36c61f9
KG
527 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
528 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
529 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
530 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
11189208
KG
531 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
532 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
533 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
534 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
535 }
536
537 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
538 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
539 iocfc->hwif.hw_isr_mode_set = NULL;
a36c61f9
KG
540 }
541
542 iocfc->hwif.hw_reginit(bfa);
543 bfa->msix.nvecs = 0;
544}
545
546static void
547bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
548 struct bfa_meminfo_s *meminfo)
549{
550 u8 *dm_kva;
551 u64 dm_pa;
552 int i, per_reqq_sz, per_rspq_sz;
553 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
554 int dbgsz;
555
556 dm_kva = bfa_meminfo_dma_virt(meminfo);
557 dm_pa = bfa_meminfo_dma_phys(meminfo);
558
559 /*
560 * First allocate dma memory for IOC.
561 */
562 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
f7f73812
MZ
563 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
564 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
a36c61f9
KG
565
566 /*
567 * Claim DMA-able memory for the request/response queues and for shadow
568 * ci/pi registers
569 */
570 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
571 BFA_DMA_ALIGN_SZ);
572 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
573 BFA_DMA_ALIGN_SZ);
574
575 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
576 iocfc->req_cq_ba[i].kva = dm_kva;
577 iocfc->req_cq_ba[i].pa = dm_pa;
6a18b167 578 memset(dm_kva, 0, per_reqq_sz);
a36c61f9
KG
579 dm_kva += per_reqq_sz;
580 dm_pa += per_reqq_sz;
581
582 iocfc->rsp_cq_ba[i].kva = dm_kva;
583 iocfc->rsp_cq_ba[i].pa = dm_pa;
6a18b167 584 memset(dm_kva, 0, per_rspq_sz);
a36c61f9
KG
585 dm_kva += per_rspq_sz;
586 dm_pa += per_rspq_sz;
587 }
588
589 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
590 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
591 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
592 dm_kva += BFA_CACHELINE_SZ;
593 dm_pa += BFA_CACHELINE_SZ;
594
595 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
596 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
597 dm_kva += BFA_CACHELINE_SZ;
598 dm_pa += BFA_CACHELINE_SZ;
599 }
600
601 /*
602 * Claim DMA-able memory for the config info page
603 */
604 bfa->iocfc.cfg_info.kva = dm_kva;
605 bfa->iocfc.cfg_info.pa = dm_pa;
606 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
607 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
608 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
609
610 /*
611 * Claim DMA-able memory for the config response
612 */
613 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
614 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
615 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
616
617 dm_kva +=
618 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
619 BFA_CACHELINE_SZ);
620 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
621 BFA_CACHELINE_SZ);
622
623
624 bfa_meminfo_dma_virt(meminfo) = dm_kva;
625 bfa_meminfo_dma_phys(meminfo) = dm_pa;
626
f7f73812 627 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
a36c61f9
KG
628 if (dbgsz > 0) {
629 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
630 bfa_meminfo_kva(meminfo) += dbgsz;
631 }
632}
633
5fbe25c7 634/*
a36c61f9
KG
635 * Start BFA submodules.
636 */
637static void
638bfa_iocfc_start_submod(struct bfa_s *bfa)
639{
640 int i;
641
775c7742 642 bfa->queue_process = BFA_TRUE;
11189208
KG
643 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
644 bfa->iocfc.hwif.hw_rspq_ack(bfa, i);
a36c61f9
KG
645
646 for (i = 0; hal_mods[i]; i++)
647 hal_mods[i]->start(bfa);
648}
649
5fbe25c7 650/*
a36c61f9
KG
651 * Disable BFA submodules.
652 */
653static void
654bfa_iocfc_disable_submod(struct bfa_s *bfa)
655{
656 int i;
657
658 for (i = 0; hal_mods[i]; i++)
659 hal_mods[i]->iocdisable(bfa);
660}
661
662static void
663bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
664{
665 struct bfa_s *bfa = bfa_arg;
666
667 if (complete) {
668 if (bfa->iocfc.cfgdone)
669 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
670 else
671 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
672 } else {
673 if (bfa->iocfc.cfgdone)
674 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
675 }
676}
677
678static void
679bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
680{
681 struct bfa_s *bfa = bfa_arg;
682 struct bfad_s *bfad = bfa->bfad;
683
684 if (compl)
685 complete(&bfad->comp);
686 else
687 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
688}
689
690static void
691bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
692{
693 struct bfa_s *bfa = bfa_arg;
694 struct bfad_s *bfad = bfa->bfad;
695
696 if (compl)
697 complete(&bfad->disable_comp);
698}
699
11189208
KG
700/**
701 * configure queue registers from firmware response
702 */
703static void
704bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
705{
706 int i;
707 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
708 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
709
710 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
711 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
712 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
713 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
714 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
715 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
716 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
717 }
718}
719
5fbe25c7 720/*
a36c61f9
KG
721 * Update BFA configuration from firmware configuration.
722 */
723static void
724bfa_iocfc_cfgrsp(struct bfa_s *bfa)
725{
726 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
727 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
728 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
729
730 fwcfg->num_cqs = fwcfg->num_cqs;
ba816ea8
JH
731 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
732 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
733 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
734 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
735 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
a36c61f9
KG
736
737 iocfc->cfgdone = BFA_TRUE;
738
11189208
KG
739 /*
740 * configure queue register offsets as learnt from firmware
741 */
742 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
743
775c7742
KG
744 /*
745 * Install MSIX queue handlers
746 */
747 bfa_msix_queue_install(bfa);
748
5fbe25c7 749 /*
a36c61f9
KG
750 * Configuration is complete - initialize/start submodules
751 */
752 bfa_fcport_init(bfa);
753
754 if (iocfc->action == BFA_IOCFC_ACT_INIT)
755 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
756 else
757 bfa_iocfc_start_submod(bfa);
758}
759void
760bfa_iocfc_reset_queues(struct bfa_s *bfa)
761{
762 int q;
763
764 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
765 bfa_reqq_ci(bfa, q) = 0;
766 bfa_reqq_pi(bfa, q) = 0;
767 bfa_rspq_ci(bfa, q) = 0;
768 bfa_rspq_pi(bfa, q) = 0;
769 }
770}
771
5fbe25c7 772/*
a36c61f9
KG
773 * IOC enable request is complete
774 */
775static void
776bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
777{
778 struct bfa_s *bfa = bfa_arg;
779
780 if (status != BFA_STATUS_OK) {
781 bfa_isr_disable(bfa);
782 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
783 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
784 bfa_iocfc_init_cb, bfa);
785 return;
786 }
787
788 bfa_iocfc_send_cfg(bfa);
789}
790
5fbe25c7 791/*
a36c61f9
KG
792 * IOC disable request is complete
793 */
794static void
795bfa_iocfc_disable_cbfn(void *bfa_arg)
796{
797 struct bfa_s *bfa = bfa_arg;
798
799 bfa_isr_disable(bfa);
800 bfa_iocfc_disable_submod(bfa);
801
802 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
803 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
804 bfa);
805 else {
d4b671c5 806 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
a36c61f9
KG
807 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
808 bfa);
809 }
810}
811
5fbe25c7 812/*
a36c61f9
KG
813 * Notify sub-modules of hardware failure.
814 */
815static void
816bfa_iocfc_hbfail_cbfn(void *bfa_arg)
817{
818 struct bfa_s *bfa = bfa_arg;
819
775c7742 820 bfa->queue_process = BFA_FALSE;
a36c61f9
KG
821
822 bfa_isr_disable(bfa);
823 bfa_iocfc_disable_submod(bfa);
824
825 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
826 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
827 bfa);
828}
829
5fbe25c7 830/*
a36c61f9
KG
831 * Actions on chip-reset completion.
832 */
833static void
834bfa_iocfc_reset_cbfn(void *bfa_arg)
835{
836 struct bfa_s *bfa = bfa_arg;
837
838 bfa_iocfc_reset_queues(bfa);
839 bfa_isr_enable(bfa);
840}
841
a36c61f9 842
5fbe25c7 843/*
a36c61f9
KG
844 * Query IOC memory requirement information.
845 */
846void
847bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
848 u32 *dm_len)
849{
850 /* dma memory for IOC */
f7f73812 851 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
a36c61f9
KG
852
853 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
854 bfa_iocfc_cqs_sz(cfg, dm_len);
f7f73812 855 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
a36c61f9
KG
856}
857
5fbe25c7 858/*
a36c61f9
KG
859 * Query IOC memory requirement information.
860 */
861void
862bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
863 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
864{
865 int i;
866 struct bfa_ioc_s *ioc = &bfa->ioc;
867
868 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
869 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
870 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
871 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
872
873 ioc->trcmod = bfa->trcmod;
874 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
875
5fbe25c7 876 /*
a36c61f9
KG
877 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
878 */
879 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
880 bfa_ioc_set_fcmode(&bfa->ioc);
881
d37779f8 882 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
a36c61f9
KG
883 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
884
885 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
886 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
f7f73812 887 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
a36c61f9
KG
888
889 INIT_LIST_HEAD(&bfa->comp_q);
890 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
891 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
892}
893
5fbe25c7 894/*
a36c61f9
KG
895 * Query IOC memory requirement information.
896 */
897void
898bfa_iocfc_init(struct bfa_s *bfa)
899{
900 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
901 bfa_ioc_enable(&bfa->ioc);
902}
903
5fbe25c7 904/*
a36c61f9
KG
905 * IOC start called from bfa_start(). Called to start IOC operations
906 * at driver instantiation for this instance.
907 */
908void
909bfa_iocfc_start(struct bfa_s *bfa)
910{
911 if (bfa->iocfc.cfgdone)
912 bfa_iocfc_start_submod(bfa);
913}
914
5fbe25c7 915/*
a36c61f9
KG
916 * IOC stop called from bfa_stop(). Called only when driver is unloaded
917 * for this instance.
918 */
919void
920bfa_iocfc_stop(struct bfa_s *bfa)
921{
922 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
923
775c7742 924 bfa->queue_process = BFA_FALSE;
a36c61f9
KG
925 bfa_ioc_disable(&bfa->ioc);
926}
927
928void
929bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
930{
931 struct bfa_s *bfa = bfaarg;
932 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
933 union bfi_iocfc_i2h_msg_u *msg;
934
935 msg = (union bfi_iocfc_i2h_msg_u *) m;
936 bfa_trc(bfa, msg->mh.msg_id);
937
938 switch (msg->mh.msg_id) {
939 case BFI_IOCFC_I2H_CFG_REPLY:
a36c61f9
KG
940 bfa_iocfc_cfgrsp(bfa);
941 break;
942 case BFI_IOCFC_I2H_UPDATEQ_RSP:
943 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
944 break;
945 default:
d4b671c5 946 WARN_ON(1);
a36c61f9
KG
947 }
948}
949
a36c61f9
KG
950void
951bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
952{
953 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
954
955 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
956
957 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
ba816ea8
JH
958 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
959 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
a36c61f9
KG
960
961 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
ba816ea8
JH
962 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
963 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
a36c61f9
KG
964
965 attr->config = iocfc->cfg;
966}
967
968bfa_status_t
969bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
970{
971 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
972 struct bfi_iocfc_set_intr_req_s *m;
973
974 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
ba816ea8
JH
975 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
976 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
a36c61f9
KG
977
978 if (!bfa_iocfc_is_operational(bfa))
979 return BFA_STATUS_OK;
980
981 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
982 if (!m)
983 return BFA_STATUS_DEVBUSY;
984
985 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
986 bfa_lpuid(bfa));
987 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
988 m->delay = iocfc->cfginfo->intr_attr.delay;
989 m->latency = iocfc->cfginfo->intr_attr.latency;
990
991 bfa_trc(bfa, attr->delay);
992 bfa_trc(bfa, attr->latency);
993
994 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
995 return BFA_STATUS_OK;
996}
997
998void
999bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
1000{
1001 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1002
1003 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1004 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
1005}
5fbe25c7 1006/*
a36c61f9
KG
1007 * Enable IOC after it is disabled.
1008 */
1009void
1010bfa_iocfc_enable(struct bfa_s *bfa)
1011{
1012 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1013 "IOC Enable");
1014 bfa_ioc_enable(&bfa->ioc);
1015}
1016
1017void
1018bfa_iocfc_disable(struct bfa_s *bfa)
1019{
1020 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1021 "IOC Disable");
1022 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1023
775c7742 1024 bfa->queue_process = BFA_FALSE;
a36c61f9
KG
1025 bfa_ioc_disable(&bfa->ioc);
1026}
1027
1028
1029bfa_boolean_t
1030bfa_iocfc_is_operational(struct bfa_s *bfa)
1031{
1032 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1033}
1034
5fbe25c7 1035/*
a36c61f9
KG
1036 * Return boot target port wwns -- read from boot information in flash.
1037 */
1038void
1039bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1040{
1041 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1042 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1043 int i;
1044
1045 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1046 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1047 *nwwns = cfgrsp->pbc_cfg.nbluns;
1048 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1049 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1050
1051 return;
1052 }
1053
1054 *nwwns = cfgrsp->bootwwns.nwwns;
1055 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1056}
1057
a36c61f9
KG
1058int
1059bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1060{
1061 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1062 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1063
1064 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1065 return cfgrsp->pbc_cfg.nvports;
1066}
1067
7725ccfd 1068
5fbe25c7 1069/*
7725ccfd
JH
1070 * Use this function query the memory requirement of the BFA library.
1071 * This function needs to be called before bfa_attach() to get the
1072 * memory required of the BFA layer for a given driver configuration.
1073 *
1074 * This call will fail, if the cap is out of range compared to pre-defined
1075 * values within the BFA library
1076 *
a36c61f9
KG
1077 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1078 * its configuration in this structure.
7725ccfd
JH
1079 * The default values for struct bfa_iocfc_cfg_s can be
1080 * fetched using bfa_cfg_get_default() API.
1081 *
a36c61f9 1082 * If cap's boundary check fails, the library will use
7725ccfd
JH
1083 * the default bfa_cap_t values (and log a warning msg).
1084 *
1085 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
a36c61f9 1086 * indicates the memory type (see bfa_mem_type_t) and
7725ccfd
JH
1087 * amount of memory required.
1088 *
1089 * Driver should allocate the memory, populate the
1090 * starting address for each block and provide the same
1091 * structure as input parameter to bfa_attach() call.
1092 *
1093 * @return void
1094 *
1095 * Special Considerations: @note
1096 */
1097void
1098bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1099{
a36c61f9
KG
1100 int i;
1101 u32 km_len = 0, dm_len = 0;
7725ccfd 1102
d4b671c5 1103 WARN_ON((cfg == NULL) || (meminfo == NULL));
7725ccfd 1104
6a18b167 1105 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
7725ccfd
JH
1106 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1107 BFA_MEM_TYPE_KVA;
1108 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1109 BFA_MEM_TYPE_DMA;
1110
1111 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
1112
1113 for (i = 0; hal_mods[i]; i++)
1114 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1115
7873ca4e 1116 dm_len += bfa_port_meminfo();
7725ccfd
JH
1117
1118 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
1119 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1120}
1121
5fbe25c7 1122/*
7725ccfd
JH
1123 * Use this function to do attach the driver instance with the BFA
1124 * library. This function will not trigger any HW initialization
1125 * process (which will be done in bfa_init() call)
1126 *
1127 * This call will fail, if the cap is out of range compared to
1128 * pre-defined values within the BFA library
1129 *
1130 * @param[out] bfa Pointer to bfa_t.
a36c61f9 1131 * @param[in] bfad Opaque handle back to the driver's IOC structure
7725ccfd 1132 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
a36c61f9
KG
1133 * that was used in bfa_cfg_get_meminfo().
1134 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1135 * use the bfa_cfg_get_meminfo() call to
1136 * find the memory blocks required, allocate the
1137 * required memory and provide the starting addresses.
1138 * @param[in] pcidev pointer to struct bfa_pcidev_s
7725ccfd
JH
1139 *
1140 * @return
1141 * void
1142 *
1143 * Special Considerations:
1144 *
1145 * @note
1146 *
1147 */
1148void
1149bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1150 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1151{
a36c61f9
KG
1152 int i;
1153 struct bfa_mem_elem_s *melem;
7725ccfd
JH
1154
1155 bfa->fcs = BFA_FALSE;
1156
d4b671c5 1157 WARN_ON((cfg == NULL) || (meminfo == NULL));
7725ccfd 1158
5fbe25c7 1159 /*
7725ccfd
JH
1160 * initialize all memory pointers for iterative allocation
1161 */
1162 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
1163 melem = meminfo->meminfo + i;
1164 melem->kva_curp = melem->kva;
1165 melem->dma_curp = melem->dma;
1166 }
1167
1168 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
1169
1170 for (i = 0; hal_mods[i]; i++)
1171 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
1172
7873ca4e 1173 bfa_com_port_attach(bfa, meminfo);
7725ccfd
JH
1174}
1175
5fbe25c7 1176/*
7725ccfd
JH
1177 * Use this function to delete a BFA IOC. IOC should be stopped (by
1178 * calling bfa_stop()) before this function call.
1179 *
1180 * @param[in] bfa - pointer to bfa_t.
1181 *
1182 * @return
1183 * void
1184 *
1185 * Special Considerations:
1186 *
1187 * @note
1188 */
1189void
1190bfa_detach(struct bfa_s *bfa)
1191{
1192 int i;
1193
1194 for (i = 0; hal_mods[i]; i++)
1195 hal_mods[i]->detach(bfa);
f7f73812 1196 bfa_ioc_detach(&bfa->ioc);
7725ccfd
JH
1197}
1198
1199void
1200bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1201{
1202 INIT_LIST_HEAD(comp_q);
1203 list_splice_tail_init(&bfa->comp_q, comp_q);
1204}
1205
1206void
1207bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1208{
a36c61f9
KG
1209 struct list_head *qe;
1210 struct list_head *qen;
1211 struct bfa_cb_qe_s *hcb_qe;
7725ccfd
JH
1212
1213 list_for_each_safe(qe, qen, comp_q) {
1214 hcb_qe = (struct bfa_cb_qe_s *) qe;
1215 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1216 }
1217}
1218
1219void
1220bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1221{
a36c61f9
KG
1222 struct list_head *qe;
1223 struct bfa_cb_qe_s *hcb_qe;
7725ccfd
JH
1224
1225 while (!list_empty(comp_q)) {
1226 bfa_q_deq(comp_q, &qe);
1227 hcb_qe = (struct bfa_cb_qe_s *) qe;
1228 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1229 }
1230}
1231
7725ccfd 1232
5fbe25c7 1233/*
7725ccfd
JH
1234 * Return the list of PCI vendor/device id lists supported by this
1235 * BFA instance.
1236 */
1237void
1238bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1239{
1240 static struct bfa_pciid_s __pciids[] = {
1241 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1242 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1243 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
293f82d5 1244 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
7725ccfd
JH
1245 };
1246
a36c61f9 1247 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
7725ccfd
JH
1248 *pciids = __pciids;
1249}
1250
5fbe25c7 1251/*
7725ccfd
JH
1252 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1253 * into BFA layer). The OS driver can then turn back and overwrite entries that
1254 * have been configured by the user.
1255 *
1256 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1257 *
1258 * @return
1259 * void
1260 *
1261 * Special Considerations:
a36c61f9 1262 * note
7725ccfd
JH
1263 */
1264void
1265bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1266{
1267 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1268 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1269 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1270 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1271 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1272 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1273 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1274 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1275
1276 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1277 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1278 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1279 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1280 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1281 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1282 cfg->drvcfg.ioc_recover = BFA_FALSE;
1283 cfg->drvcfg.delay_comp = BFA_FALSE;
1284
1285}
1286
1287void
1288bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1289{
1290 bfa_cfg_get_default(cfg);
1291 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1292 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1293 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1294 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1295 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
1296
1297 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1298 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1299 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
a36c61f9 1300 cfg->drvcfg.min_cfg = BFA_TRUE;
7725ccfd 1301}