[SCSI] bfa: Added support to obtain SFP info.
[linux-2.6-block.git] / drivers / scsi / bfa / bfa_core.c
CommitLineData
7725ccfd 1/*
a36c61f9 2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
7725ccfd
JH
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
f16a1750 18#include "bfad_drv.h"
a36c61f9 19#include "bfa_modules.h"
11189208 20#include "bfi_reg.h"
7725ccfd 21
a36c61f9 22BFA_TRC_FILE(HAL, CORE);
7725ccfd 23
b77ee1fb
MZ
24/*
25 * BFA module list terminated by NULL
26 */
27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
e2187d7f 34 &hal_mod_fcp,
b77ee1fb
MZ
35 NULL
36};
37
38/*
39 * Message handlers for various modules.
40 */
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
e2187d7f 54 bfa_itn_isr, /* BFI_MC_ITN */
b77ee1fb
MZ
55 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74};
75/*
76 * Message handlers for mailbox command classes
77 */
78static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87};
88
89
90
91static void
4507025d 92bfa_com_port_attach(struct bfa_s *bfa)
b77ee1fb
MZ
93{
94 struct bfa_port_s *port = &bfa->modules.port;
4507025d 95 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
b77ee1fb 96
b77ee1fb 97 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
4507025d 98 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
b77ee1fb
MZ
99}
100
1a4d8e1b
KG
101/*
102 * ablk module attach
103 */
104static void
4507025d 105bfa_com_ablk_attach(struct bfa_s *bfa)
1a4d8e1b
KG
106{
107 struct bfa_ablk_s *ablk = &bfa->modules.ablk;
4507025d 108 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
1a4d8e1b 109
1a4d8e1b 110 bfa_ablk_attach(ablk, &bfa->ioc);
4507025d 111 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
1a4d8e1b
KG
112}
113
148d6103
KG
114static void
115bfa_com_cee_attach(struct bfa_s *bfa)
116{
117 struct bfa_cee_s *cee = &bfa->modules.cee;
118 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
119
120 cee->trcmod = bfa->trcmod;
121 bfa_cee_attach(cee, &bfa->ioc, bfa);
122 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
123}
124
51e569aa
KG
125static void
126bfa_com_sfp_attach(struct bfa_s *bfa)
127{
128 struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa);
129 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
130
131 bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
132 bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
133}
134
5fbe25c7 135/*
a36c61f9
KG
136 * BFA IOC FC related definitions
137 */
138
5fbe25c7 139/*
a36c61f9
KG
140 * IOC local definitions
141 */
142#define BFA_IOCFC_TOV 5000 /* msecs */
143
144enum {
145 BFA_IOCFC_ACT_NONE = 0,
146 BFA_IOCFC_ACT_INIT = 1,
147 BFA_IOCFC_ACT_STOP = 2,
148 BFA_IOCFC_ACT_DISABLE = 3,
60138066 149 BFA_IOCFC_ACT_ENABLE = 4,
a36c61f9
KG
150};
151
152#define DEF_CFG_NUM_FABRICS 1
153#define DEF_CFG_NUM_LPORTS 256
154#define DEF_CFG_NUM_CQS 4
155#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
156#define DEF_CFG_NUM_TSKIM_REQS 128
157#define DEF_CFG_NUM_FCXP_REQS 64
158#define DEF_CFG_NUM_UF_BUFS 64
159#define DEF_CFG_NUM_RPORTS 1024
160#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
161#define DEF_CFG_NUM_TINS 256
162
163#define DEF_CFG_NUM_SGPGS 2048
164#define DEF_CFG_NUM_REQQ_ELEMS 256
165#define DEF_CFG_NUM_RSPQ_ELEMS 64
166#define DEF_CFG_NUM_SBOOT_TGTS 16
167#define DEF_CFG_NUM_SBOOT_LUNS 16
168
5fbe25c7 169/*
a36c61f9
KG
170 * forward declaration for IOC FC functions
171 */
172static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
173static void bfa_iocfc_disable_cbfn(void *bfa_arg);
174static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
175static void bfa_iocfc_reset_cbfn(void *bfa_arg);
176static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
177
5fbe25c7 178/*
a36c61f9
KG
179 * BFA Interrupt handling functions
180 */
a36c61f9
KG
181static void
182bfa_reqq_resume(struct bfa_s *bfa, int qid)
183{
184 struct list_head *waitq, *qe, *qen;
185 struct bfa_reqq_wait_s *wqe;
186
187 waitq = bfa_reqq(bfa, qid);
188 list_for_each_safe(qe, qen, waitq) {
5fbe25c7 189 /*
a36c61f9
KG
190 * Callback only as long as there is room in request queue
191 */
192 if (bfa_reqq_full(bfa, qid))
193 break;
194
195 list_del(qe);
196 wqe = (struct bfa_reqq_wait_s *) qe;
197 wqe->qresume(wqe->cbarg);
198 }
199}
200
11189208
KG
201static inline void
202bfa_isr_rspq(struct bfa_s *bfa, int qid)
203{
204 struct bfi_msg_s *m;
205 u32 pi, ci;
206 struct list_head *waitq;
207
3fd45980 208 bfa_isr_rspq_ack(bfa, qid);
11189208
KG
209
210 ci = bfa_rspq_ci(bfa, qid);
211 pi = bfa_rspq_pi(bfa, qid);
212
213 while (ci != pi) {
214 m = bfa_rspq_elem(bfa, qid, ci);
215 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
216
217 bfa_isrs[m->mhdr.msg_class] (bfa, m);
218 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
219 }
220
221 /*
222 * update CI
223 */
224 bfa_rspq_ci(bfa, qid) = pi;
225 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
226 mmiowb();
227
228 /*
229 * Resume any pending requests in the corresponding reqq.
230 */
231 waitq = bfa_reqq(bfa, qid);
232 if (!list_empty(waitq))
233 bfa_reqq_resume(bfa, qid);
234}
235
236static inline void
237bfa_isr_reqq(struct bfa_s *bfa, int qid)
238{
239 struct list_head *waitq;
240
3fd45980 241 bfa_isr_reqq_ack(bfa, qid);
11189208
KG
242
243 /*
244 * Resume any pending requests in the corresponding reqq.
245 */
246 waitq = bfa_reqq(bfa, qid);
247 if (!list_empty(waitq))
248 bfa_reqq_resume(bfa, qid);
249}
250
a36c61f9
KG
251void
252bfa_msix_all(struct bfa_s *bfa, int vec)
253{
10a07379
KG
254 u32 intr, qintr;
255 int queue;
256
257 intr = readl(bfa->iocfc.bfa_regs.intr_status);
258 if (!intr)
259 return;
260
261 /*
262 * RME completion queue interrupt
263 */
264 qintr = intr & __HFN_INT_RME_MASK;
265 if (qintr && bfa->queue_process) {
266 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
267 bfa_isr_rspq(bfa, queue);
268 }
269
270 intr &= ~qintr;
271 if (!intr)
272 return;
273
274 /*
275 * CPE completion queue interrupt
276 */
277 qintr = intr & __HFN_INT_CPE_MASK;
278 if (qintr && bfa->queue_process) {
279 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
280 bfa_isr_reqq(bfa, queue);
281 }
282 intr &= ~qintr;
283 if (!intr)
284 return;
285
286 bfa_msix_lpu_err(bfa, intr);
a36c61f9
KG
287}
288
a36c61f9
KG
289bfa_boolean_t
290bfa_intx(struct bfa_s *bfa)
291{
292 u32 intr, qintr;
293 int queue;
294
53440260 295 intr = readl(bfa->iocfc.bfa_regs.intr_status);
a36c61f9
KG
296 if (!intr)
297 return BFA_FALSE;
298
3fd45980
KG
299 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
300 if (qintr)
301 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
302
5fbe25c7 303 /*
a36c61f9
KG
304 * RME completion queue interrupt
305 */
306 qintr = intr & __HFN_INT_RME_MASK;
3fd45980
KG
307 if (qintr && bfa->queue_process) {
308 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
309 bfa_isr_rspq(bfa, queue);
a36c61f9 310 }
3fd45980 311
a36c61f9
KG
312 intr &= ~qintr;
313 if (!intr)
314 return BFA_TRUE;
315
5fbe25c7 316 /*
a36c61f9
KG
317 * CPE completion queue interrupt
318 */
319 qintr = intr & __HFN_INT_CPE_MASK;
3fd45980
KG
320 if (qintr && bfa->queue_process) {
321 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
322 bfa_isr_reqq(bfa, queue);
a36c61f9
KG
323 }
324 intr &= ~qintr;
325 if (!intr)
326 return BFA_TRUE;
327
328 bfa_msix_lpu_err(bfa, intr);
329
330 return BFA_TRUE;
331}
332
a36c61f9
KG
333void
334bfa_isr_enable(struct bfa_s *bfa)
335{
11189208 336 u32 umsk;
a36c61f9
KG
337 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
338
339 bfa_trc(bfa, pci_func);
340
775c7742 341 bfa_msix_ctrl_install(bfa);
11189208
KG
342
343 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
344 umsk = __HFN_INT_ERR_MASK_CT2;
345 umsk |= pci_func == 0 ?
346 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
347 } else {
348 umsk = __HFN_INT_ERR_MASK;
349 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
350 }
351
352 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
353 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
354 bfa->iocfc.intr_mask = ~umsk;
a36c61f9
KG
355 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
356}
357
358void
359bfa_isr_disable(struct bfa_s *bfa)
360{
361 bfa_isr_mode_set(bfa, BFA_FALSE);
53440260 362 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
a36c61f9
KG
363 bfa_msix_uninstall(bfa);
364}
365
366void
11189208 367bfa_msix_reqq(struct bfa_s *bfa, int vec)
a36c61f9 368{
11189208 369 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
a36c61f9
KG
370}
371
372void
373bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
374{
375 bfa_trc(bfa, m->mhdr.msg_class);
376 bfa_trc(bfa, m->mhdr.msg_id);
377 bfa_trc(bfa, m->mhdr.mtag.i2htok);
d4b671c5 378 WARN_ON(1);
a36c61f9
KG
379 bfa_trc_stop(bfa->trcmod);
380}
381
382void
11189208 383bfa_msix_rspq(struct bfa_s *bfa, int vec)
a36c61f9 384{
11189208 385 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
a36c61f9
KG
386}
387
388void
389bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
390{
391 u32 intr, curr_value;
11189208 392 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
a36c61f9 393
53440260 394 intr = readl(bfa->iocfc.bfa_regs.intr_status);
a36c61f9 395
11189208
KG
396 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
397 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
398 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
399 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
400 __HFN_INT_MBOX_LPU1_CT2);
401 intr &= __HFN_INT_ERR_MASK_CT2;
402 } else {
403 halt_isr = intr & __HFN_INT_LL_HALT;
404 pss_isr = intr & __HFN_INT_ERR_PSS;
405 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
406 intr &= __HFN_INT_ERR_MASK;
407 }
a36c61f9 408
11189208
KG
409 if (lpu_isr)
410 bfa_ioc_mbox_isr(&bfa->ioc);
a36c61f9
KG
411
412 if (intr) {
11189208 413 if (halt_isr) {
5fbe25c7 414 /*
a36c61f9
KG
415 * If LL_HALT bit is set then FW Init Halt LL Port
416 * Register needs to be cleared as well so Interrupt
417 * Status Register will be cleared.
418 */
53440260 419 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
a36c61f9 420 curr_value &= ~__FW_INIT_HALT_P;
53440260 421 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
a36c61f9
KG
422 }
423
11189208 424 if (pss_isr) {
5fbe25c7 425 /*
a36c61f9
KG
426 * ERR_PSS bit needs to be cleared as well in case
427 * interrups are shared so driver's interrupt handler is
25985edc 428 * still called even though it is already masked out.
a36c61f9 429 */
53440260 430 curr_value = readl(
a36c61f9 431 bfa->ioc.ioc_regs.pss_err_status_reg);
53440260
JH
432 writel(curr_value,
433 bfa->ioc.ioc_regs.pss_err_status_reg);
a36c61f9
KG
434 }
435
53440260 436 writel(intr, bfa->iocfc.bfa_regs.intr_status);
f7f73812 437 bfa_ioc_error_isr(&bfa->ioc);
a36c61f9
KG
438 }
439}
440
5fbe25c7 441/*
a36c61f9
KG
442 * BFA IOC FC related functions
443 */
444
5fbe25c7 445/*
df0f1933 446 * BFA IOC private functions
a36c61f9
KG
447 */
448
5fbe25c7 449/*
a36c61f9
KG
450 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
451 */
452static void
453bfa_iocfc_send_cfg(void *bfa_arg)
454{
455 struct bfa_s *bfa = bfa_arg;
456 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
457 struct bfi_iocfc_cfg_req_s cfg_req;
458 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
459 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
460 int i;
461
d4b671c5 462 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
a36c61f9
KG
463 bfa_trc(bfa, cfg->fwcfg.num_cqs);
464
465 bfa_iocfc_reset_queues(bfa);
466
5fbe25c7 467 /*
a36c61f9
KG
468 * initialize IOC configuration info
469 */
10a07379
KG
470 cfg_info->single_msix_vec = 0;
471 if (bfa->msix.nvecs == 1)
472 cfg_info->single_msix_vec = 1;
a36c61f9
KG
473 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
474 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
e2187d7f
KG
475 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
476 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
a36c61f9
KG
477
478 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
5fbe25c7 479 /*
a36c61f9
KG
480 * dma map REQ and RSP circular queues and shadow pointers
481 */
482 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
483 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
484 iocfc->req_cq_ba[i].pa);
485 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
486 iocfc->req_cq_shadow_ci[i].pa);
487 cfg_info->req_cq_elems[i] =
ba816ea8 488 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
a36c61f9
KG
489
490 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
491 iocfc->rsp_cq_ba[i].pa);
492 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
493 iocfc->rsp_cq_shadow_pi[i].pa);
494 cfg_info->rsp_cq_elems[i] =
ba816ea8 495 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
a36c61f9
KG
496 }
497
5fbe25c7 498 /*
a36c61f9
KG
499 * Enable interrupt coalescing if it is driver init path
500 * and not ioc disable/enable path.
501 */
502 if (!iocfc->cfgdone)
503 cfg_info->intr_attr.coalesce = BFA_TRUE;
504
505 iocfc->cfgdone = BFA_FALSE;
506
5fbe25c7 507 /*
a36c61f9
KG
508 * dma map IOC configuration itself
509 */
510 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
3fd45980 511 bfa_fn_lpu(bfa));
a36c61f9
KG
512 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
513
514 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
515 sizeof(struct bfi_iocfc_cfg_req_s));
516}
517
518static void
519bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
520 struct bfa_pcidev_s *pcidev)
521{
522 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
523
524 bfa->bfad = bfad;
525 iocfc->bfa = bfa;
526 iocfc->action = BFA_IOCFC_ACT_NONE;
527
6a18b167 528 iocfc->cfg = *cfg;
a36c61f9 529
5fbe25c7 530 /*
a36c61f9
KG
531 * Initialize chip specific handlers.
532 */
11189208 533 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
a36c61f9
KG
534 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
535 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
536 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
537 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
775c7742
KG
538 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
539 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
a36c61f9
KG
540 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
541 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
542 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
543 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
11189208
KG
544 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
545 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
a36c61f9
KG
546 } else {
547 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
3fd45980
KG
548 iocfc->hwif.hw_reqq_ack = NULL;
549 iocfc->hwif.hw_rspq_ack = NULL;
a36c61f9 550 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
775c7742
KG
551 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
552 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
a36c61f9
KG
553 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
554 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
555 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
556 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
11189208
KG
557 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
558 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
559 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
560 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
561 }
562
563 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
564 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
565 iocfc->hwif.hw_isr_mode_set = NULL;
3fd45980 566 iocfc->hwif.hw_rspq_ack = NULL;
a36c61f9
KG
567 }
568
569 iocfc->hwif.hw_reginit(bfa);
570 bfa->msix.nvecs = 0;
571}
572
573static void
4507025d 574bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
a36c61f9 575{
4507025d
KG
576 u8 *dm_kva = NULL;
577 u64 dm_pa = 0;
578 int i, per_reqq_sz, per_rspq_sz, dbgsz;
a36c61f9 579 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
4507025d
KG
580 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
581 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
582 struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
a36c61f9 583
4507025d
KG
584 /* First allocate dma memory for IOC */
585 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
586 bfa_mem_dma_phys(ioc_dma));
a36c61f9 587
4507025d 588 /* Claim DMA-able memory for the request/response queues */
a36c61f9 589 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
4507025d 590 BFA_DMA_ALIGN_SZ);
a36c61f9 591 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
4507025d 592 BFA_DMA_ALIGN_SZ);
a36c61f9
KG
593
594 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
4507025d
KG
595 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
596 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
597 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
598 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
599
600 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
601 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
602 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
603 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
a36c61f9
KG
604 }
605
4507025d
KG
606 /* Claim IOCFC dma memory - for shadow CI/PI */
607 dm_kva = bfa_mem_dma_virt(iocfc_dma);
608 dm_pa = bfa_mem_dma_phys(iocfc_dma);
609
a36c61f9
KG
610 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
611 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
612 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
613 dm_kva += BFA_CACHELINE_SZ;
614 dm_pa += BFA_CACHELINE_SZ;
615
616 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
617 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
618 dm_kva += BFA_CACHELINE_SZ;
619 dm_pa += BFA_CACHELINE_SZ;
620 }
621
4507025d 622 /* Claim IOCFC dma memory - for the config info page */
a36c61f9
KG
623 bfa->iocfc.cfg_info.kva = dm_kva;
624 bfa->iocfc.cfg_info.pa = dm_pa;
625 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
626 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
627 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
628
4507025d 629 /* Claim IOCFC dma memory - for the config response */
a36c61f9
KG
630 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
631 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
632 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
4507025d
KG
633 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
634 BFA_CACHELINE_SZ);
a36c61f9 635 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
4507025d 636 BFA_CACHELINE_SZ);
a36c61f9 637
4507025d 638 /* Claim IOCFC kva memory */
f7f73812 639 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
a36c61f9 640 if (dbgsz > 0) {
4507025d
KG
641 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
642 bfa_mem_kva_curp(iocfc) += dbgsz;
a36c61f9
KG
643 }
644}
645
5fbe25c7 646/*
a36c61f9
KG
647 * Start BFA submodules.
648 */
649static void
650bfa_iocfc_start_submod(struct bfa_s *bfa)
651{
652 int i;
653
775c7742 654 bfa->queue_process = BFA_TRUE;
11189208 655 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
3fd45980 656 bfa_isr_rspq_ack(bfa, i);
a36c61f9
KG
657
658 for (i = 0; hal_mods[i]; i++)
659 hal_mods[i]->start(bfa);
660}
661
5fbe25c7 662/*
a36c61f9
KG
663 * Disable BFA submodules.
664 */
665static void
666bfa_iocfc_disable_submod(struct bfa_s *bfa)
667{
668 int i;
669
670 for (i = 0; hal_mods[i]; i++)
671 hal_mods[i]->iocdisable(bfa);
672}
673
674static void
675bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
676{
677 struct bfa_s *bfa = bfa_arg;
678
679 if (complete) {
680 if (bfa->iocfc.cfgdone)
681 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
682 else
683 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
684 } else {
685 if (bfa->iocfc.cfgdone)
686 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
687 }
688}
689
690static void
691bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
692{
693 struct bfa_s *bfa = bfa_arg;
694 struct bfad_s *bfad = bfa->bfad;
695
696 if (compl)
697 complete(&bfad->comp);
698 else
699 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
700}
701
60138066
KG
702static void
703bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
704{
705 struct bfa_s *bfa = bfa_arg;
706 struct bfad_s *bfad = bfa->bfad;
707
708 if (compl)
709 complete(&bfad->enable_comp);
710}
711
a36c61f9
KG
712static void
713bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
714{
715 struct bfa_s *bfa = bfa_arg;
716 struct bfad_s *bfad = bfa->bfad;
717
718 if (compl)
719 complete(&bfad->disable_comp);
720}
721
11189208
KG
722/**
723 * configure queue registers from firmware response
724 */
725static void
726bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
727{
728 int i;
729 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
730 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
731
732 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
3fd45980 733 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
11189208
KG
734 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
735 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
736 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
737 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
738 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
739 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
740 }
741}
742
3fd45980
KG
743static void
744bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
745{
746 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
747 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
748 bfa_rport_res_recfg(bfa, fwcfg->num_rports);
749 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
750 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
751}
752
5fbe25c7 753/*
a36c61f9
KG
754 * Update BFA configuration from firmware configuration.
755 */
756static void
757bfa_iocfc_cfgrsp(struct bfa_s *bfa)
758{
759 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
760 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
761 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
762
763 fwcfg->num_cqs = fwcfg->num_cqs;
ba816ea8 764 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
e2187d7f 765 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
ba816ea8
JH
766 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
767 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
768 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
769 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
a36c61f9
KG
770
771 iocfc->cfgdone = BFA_TRUE;
772
11189208
KG
773 /*
774 * configure queue register offsets as learnt from firmware
775 */
776 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
777
3fd45980
KG
778 /*
779 * Re-configure resources as learnt from Firmware
780 */
781 bfa_iocfc_res_recfg(bfa, fwcfg);
782
775c7742
KG
783 /*
784 * Install MSIX queue handlers
785 */
786 bfa_msix_queue_install(bfa);
787
5fbe25c7 788 /*
a36c61f9
KG
789 * Configuration is complete - initialize/start submodules
790 */
791 bfa_fcport_init(bfa);
792
793 if (iocfc->action == BFA_IOCFC_ACT_INIT)
794 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
60138066
KG
795 else {
796 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
797 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
798 bfa_iocfc_enable_cb, bfa);
a36c61f9 799 bfa_iocfc_start_submod(bfa);
60138066 800 }
a36c61f9
KG
801}
802void
803bfa_iocfc_reset_queues(struct bfa_s *bfa)
804{
805 int q;
806
807 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
808 bfa_reqq_ci(bfa, q) = 0;
809 bfa_reqq_pi(bfa, q) = 0;
810 bfa_rspq_ci(bfa, q) = 0;
811 bfa_rspq_pi(bfa, q) = 0;
812 }
813}
814
a714134a
KG
815/* Fabric Assigned Address specific functions */
816
817/*
818 * Check whether IOC is ready before sending command down
819 */
820static bfa_status_t
821bfa_faa_validate_request(struct bfa_s *bfa)
822{
823 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
824 u32 card_type = bfa->ioc.attr->card_type;
825
826 if (bfa_ioc_is_operational(&bfa->ioc)) {
827 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
828 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
829 } else {
830 if (!bfa_ioc_is_acq_addr(&bfa->ioc))
831 return BFA_STATUS_IOC_NON_OP;
832 }
833
834 return BFA_STATUS_OK;
835}
836
837bfa_status_t
838bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
839{
840 struct bfi_faa_en_dis_s faa_enable_req;
841 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
842 bfa_status_t status;
843
844 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
845 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
846
847 status = bfa_faa_validate_request(bfa);
848 if (status != BFA_STATUS_OK)
849 return status;
850
851 if (iocfc->faa_args.busy == BFA_TRUE)
852 return BFA_STATUS_DEVBUSY;
853
854 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
855 return BFA_STATUS_FAA_ENABLED;
856
857 if (bfa_fcport_is_trunk_enabled(bfa))
858 return BFA_STATUS_ERROR_TRUNK_ENABLED;
859
860 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
861 iocfc->faa_args.busy = BFA_TRUE;
862
863 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
864 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
3fd45980 865 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
a714134a
KG
866
867 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
868 sizeof(struct bfi_faa_en_dis_s));
869
870 return BFA_STATUS_OK;
871}
872
873bfa_status_t
874bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
875 void *cbarg)
876{
877 struct bfi_faa_en_dis_s faa_disable_req;
878 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
879 bfa_status_t status;
880
881 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
882 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
883
884 status = bfa_faa_validate_request(bfa);
885 if (status != BFA_STATUS_OK)
886 return status;
887
888 if (iocfc->faa_args.busy == BFA_TRUE)
889 return BFA_STATUS_DEVBUSY;
890
891 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
892 return BFA_STATUS_FAA_DISABLED;
893
894 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
895 iocfc->faa_args.busy = BFA_TRUE;
896
897 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
898 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
3fd45980 899 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
a714134a
KG
900
901 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
902 sizeof(struct bfi_faa_en_dis_s));
903
904 return BFA_STATUS_OK;
905}
906
907bfa_status_t
908bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
909 bfa_cb_iocfc_t cbfn, void *cbarg)
910{
911 struct bfi_faa_query_s faa_attr_req;
912 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
913 bfa_status_t status;
914
915 iocfc->faa_args.faa_attr = attr;
916 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
917 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
918
919 status = bfa_faa_validate_request(bfa);
920 if (status != BFA_STATUS_OK)
921 return status;
922
923 if (iocfc->faa_args.busy == BFA_TRUE)
924 return BFA_STATUS_DEVBUSY;
925
926 iocfc->faa_args.busy = BFA_TRUE;
927 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
928 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
3fd45980 929 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
a714134a
KG
930
931 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
932 sizeof(struct bfi_faa_query_s));
933
934 return BFA_STATUS_OK;
935}
936
937/*
938 * FAA enable response
939 */
940static void
941bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
942 struct bfi_faa_en_dis_rsp_s *rsp)
943{
944 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
945 bfa_status_t status = rsp->status;
946
947 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
948
949 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
950 iocfc->faa_args.busy = BFA_FALSE;
951}
952
953/*
954 * FAA disable response
955 */
956static void
957bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
958 struct bfi_faa_en_dis_rsp_s *rsp)
959{
960 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
961 bfa_status_t status = rsp->status;
962
963 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
964
965 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
966 iocfc->faa_args.busy = BFA_FALSE;
967}
968
969/*
970 * FAA query response
971 */
972static void
973bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
974 bfi_faa_query_rsp_t *rsp)
975{
976 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
977
978 if (iocfc->faa_args.faa_attr) {
979 iocfc->faa_args.faa_attr->faa = rsp->faa;
980 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
981 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
982 }
983
984 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
985
986 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
987 iocfc->faa_args.busy = BFA_FALSE;
988}
989
5fbe25c7 990/*
a36c61f9
KG
991 * IOC enable request is complete
992 */
993static void
994bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
995{
996 struct bfa_s *bfa = bfa_arg;
997
a714134a
KG
998 if (status == BFA_STATUS_FAA_ACQ_ADDR) {
999 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1000 bfa_iocfc_init_cb, bfa);
1001 return;
1002 }
1003
a36c61f9
KG
1004 if (status != BFA_STATUS_OK) {
1005 bfa_isr_disable(bfa);
1006 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1007 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1008 bfa_iocfc_init_cb, bfa);
60138066
KG
1009 else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
1010 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
1011 bfa_iocfc_enable_cb, bfa);
a36c61f9
KG
1012 return;
1013 }
1014
1015 bfa_iocfc_send_cfg(bfa);
1016}
1017
5fbe25c7 1018/*
a36c61f9
KG
1019 * IOC disable request is complete
1020 */
1021static void
1022bfa_iocfc_disable_cbfn(void *bfa_arg)
1023{
1024 struct bfa_s *bfa = bfa_arg;
1025
1026 bfa_isr_disable(bfa);
1027 bfa_iocfc_disable_submod(bfa);
1028
1029 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
1030 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
1031 bfa);
1032 else {
d4b671c5 1033 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
a36c61f9
KG
1034 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
1035 bfa);
1036 }
1037}
1038
5fbe25c7 1039/*
a36c61f9
KG
1040 * Notify sub-modules of hardware failure.
1041 */
1042static void
1043bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1044{
1045 struct bfa_s *bfa = bfa_arg;
1046
775c7742 1047 bfa->queue_process = BFA_FALSE;
a36c61f9
KG
1048
1049 bfa_isr_disable(bfa);
1050 bfa_iocfc_disable_submod(bfa);
1051
1052 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1053 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
1054 bfa);
1055}
1056
5fbe25c7 1057/*
a36c61f9
KG
1058 * Actions on chip-reset completion.
1059 */
1060static void
1061bfa_iocfc_reset_cbfn(void *bfa_arg)
1062{
1063 struct bfa_s *bfa = bfa_arg;
1064
1065 bfa_iocfc_reset_queues(bfa);
1066 bfa_isr_enable(bfa);
1067}
1068
a36c61f9 1069
5fbe25c7 1070/*
a36c61f9
KG
1071 * Query IOC memory requirement information.
1072 */
1073void
4507025d
KG
1074bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1075 struct bfa_s *bfa)
a36c61f9 1076{
4507025d
KG
1077 int q, per_reqq_sz, per_rspq_sz;
1078 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1079 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1080 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
1081 u32 dm_len = 0;
a36c61f9 1082
4507025d
KG
1083 /* dma memory setup for IOC */
1084 bfa_mem_dma_setup(meminfo, ioc_dma,
1085 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
1086
1087 /* dma memory setup for REQ/RSP queues */
1088 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1089 BFA_DMA_ALIGN_SZ);
1090 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1091 BFA_DMA_ALIGN_SZ);
1092
1093 for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
1094 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
1095 per_reqq_sz);
1096 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
1097 per_rspq_sz);
1098 }
1099
1100 /* IOCFC dma memory - calculate Shadow CI/PI size */
1101 for (q = 0; q < cfg->fwcfg.num_cqs; q++)
1102 dm_len += (2 * BFA_CACHELINE_SZ);
1103
1104 /* IOCFC dma memory - calculate config info / rsp size */
1105 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1106 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1107 BFA_CACHELINE_SZ);
1108
1109 /* dma memory setup for IOCFC */
1110 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
1111
1112 /* kva memory setup for IOCFC */
1113 bfa_mem_kva_setup(meminfo, iocfc_kva,
1114 ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
a36c61f9
KG
1115}
1116
5fbe25c7 1117/*
a36c61f9
KG
1118 * Query IOC memory requirement information.
1119 */
1120void
1121bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4507025d 1122 struct bfa_pcidev_s *pcidev)
a36c61f9
KG
1123{
1124 int i;
1125 struct bfa_ioc_s *ioc = &bfa->ioc;
1126
1127 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
1128 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
1129 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
1130 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
1131
1132 ioc->trcmod = bfa->trcmod;
1133 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
1134
d37779f8 1135 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
a36c61f9
KG
1136 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
1137
1138 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
4507025d 1139 bfa_iocfc_mem_claim(bfa, cfg);
f7f73812 1140 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
a36c61f9
KG
1141
1142 INIT_LIST_HEAD(&bfa->comp_q);
1143 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1144 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1145}
1146
5fbe25c7 1147/*
a36c61f9
KG
1148 * Query IOC memory requirement information.
1149 */
1150void
1151bfa_iocfc_init(struct bfa_s *bfa)
1152{
1153 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
1154 bfa_ioc_enable(&bfa->ioc);
1155}
1156
5fbe25c7 1157/*
a36c61f9
KG
1158 * IOC start called from bfa_start(). Called to start IOC operations
1159 * at driver instantiation for this instance.
1160 */
1161void
1162bfa_iocfc_start(struct bfa_s *bfa)
1163{
1164 if (bfa->iocfc.cfgdone)
1165 bfa_iocfc_start_submod(bfa);
1166}
1167
5fbe25c7 1168/*
a36c61f9
KG
1169 * IOC stop called from bfa_stop(). Called only when driver is unloaded
1170 * for this instance.
1171 */
1172void
1173bfa_iocfc_stop(struct bfa_s *bfa)
1174{
1175 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1176
775c7742 1177 bfa->queue_process = BFA_FALSE;
a36c61f9
KG
1178 bfa_ioc_disable(&bfa->ioc);
1179}
1180
1181void
1182bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1183{
1184 struct bfa_s *bfa = bfaarg;
1185 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1186 union bfi_iocfc_i2h_msg_u *msg;
1187
1188 msg = (union bfi_iocfc_i2h_msg_u *) m;
1189 bfa_trc(bfa, msg->mh.msg_id);
1190
1191 switch (msg->mh.msg_id) {
1192 case BFI_IOCFC_I2H_CFG_REPLY:
a36c61f9
KG
1193 bfa_iocfc_cfgrsp(bfa);
1194 break;
1195 case BFI_IOCFC_I2H_UPDATEQ_RSP:
1196 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1197 break;
a714134a
KG
1198 case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
1199 bfa_faa_enable_reply(iocfc,
1200 (struct bfi_faa_en_dis_rsp_s *)msg);
1201 break;
1202 case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
1203 bfa_faa_disable_reply(iocfc,
1204 (struct bfi_faa_en_dis_rsp_s *)msg);
1205 break;
1206 case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1207 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1208 break;
a36c61f9 1209 default:
d4b671c5 1210 WARN_ON(1);
a36c61f9
KG
1211 }
1212}
1213
a36c61f9
KG
1214void
1215bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
1216{
1217 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1218
1219 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
1220
1221 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
ba816ea8
JH
1222 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
1223 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
a36c61f9
KG
1224
1225 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
ba816ea8
JH
1226 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
1227 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
a36c61f9
KG
1228
1229 attr->config = iocfc->cfg;
1230}
1231
1232bfa_status_t
1233bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
1234{
1235 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1236 struct bfi_iocfc_set_intr_req_s *m;
1237
1238 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
ba816ea8
JH
1239 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1240 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
a36c61f9
KG
1241
1242 if (!bfa_iocfc_is_operational(bfa))
1243 return BFA_STATUS_OK;
1244
1245 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1246 if (!m)
1247 return BFA_STATUS_DEVBUSY;
1248
1249 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
3fd45980 1250 bfa_fn_lpu(bfa));
a36c61f9
KG
1251 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1252 m->delay = iocfc->cfginfo->intr_attr.delay;
1253 m->latency = iocfc->cfginfo->intr_attr.latency;
1254
1255 bfa_trc(bfa, attr->delay);
1256 bfa_trc(bfa, attr->latency);
1257
3fd45980 1258 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
a36c61f9
KG
1259 return BFA_STATUS_OK;
1260}
1261
1262void
4507025d 1263bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
a36c61f9
KG
1264{
1265 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1266
1267 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
4507025d 1268 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
a36c61f9 1269}
5fbe25c7 1270/*
a36c61f9
KG
1271 * Enable IOC after it is disabled.
1272 */
1273void
1274bfa_iocfc_enable(struct bfa_s *bfa)
1275{
1276 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1277 "IOC Enable");
60138066 1278 bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
a36c61f9
KG
1279 bfa_ioc_enable(&bfa->ioc);
1280}
1281
1282void
1283bfa_iocfc_disable(struct bfa_s *bfa)
1284{
1285 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1286 "IOC Disable");
1287 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1288
775c7742 1289 bfa->queue_process = BFA_FALSE;
a36c61f9
KG
1290 bfa_ioc_disable(&bfa->ioc);
1291}
1292
1293
1294bfa_boolean_t
1295bfa_iocfc_is_operational(struct bfa_s *bfa)
1296{
1297 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1298}
1299
5fbe25c7 1300/*
a36c61f9
KG
1301 * Return boot target port wwns -- read from boot information in flash.
1302 */
1303void
1304bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1305{
1306 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1307 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1308 int i;
1309
1310 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1311 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1312 *nwwns = cfgrsp->pbc_cfg.nbluns;
1313 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1314 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1315
1316 return;
1317 }
1318
1319 *nwwns = cfgrsp->bootwwns.nwwns;
1320 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1321}
1322
a36c61f9
KG
1323int
1324bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1325{
1326 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1327 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1328
1329 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1330 return cfgrsp->pbc_cfg.nvports;
1331}
1332
7725ccfd 1333
5fbe25c7 1334/*
7725ccfd
JH
1335 * Use this function query the memory requirement of the BFA library.
1336 * This function needs to be called before bfa_attach() to get the
1337 * memory required of the BFA layer for a given driver configuration.
1338 *
1339 * This call will fail, if the cap is out of range compared to pre-defined
1340 * values within the BFA library
1341 *
a36c61f9
KG
1342 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1343 * its configuration in this structure.
7725ccfd
JH
1344 * The default values for struct bfa_iocfc_cfg_s can be
1345 * fetched using bfa_cfg_get_default() API.
1346 *
a36c61f9 1347 * If cap's boundary check fails, the library will use
7725ccfd
JH
1348 * the default bfa_cap_t values (and log a warning msg).
1349 *
1350 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
a36c61f9 1351 * indicates the memory type (see bfa_mem_type_t) and
7725ccfd
JH
1352 * amount of memory required.
1353 *
1354 * Driver should allocate the memory, populate the
1355 * starting address for each block and provide the same
1356 * structure as input parameter to bfa_attach() call.
1357 *
4507025d
KG
1358 * @param[in] bfa - pointer to the bfa structure, used while fetching the
1359 * dma, kva memory information of the bfa sub-modules.
1360 *
7725ccfd
JH
1361 * @return void
1362 *
1363 * Special Considerations: @note
1364 */
1365void
4507025d
KG
1366bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1367 struct bfa_s *bfa)
7725ccfd 1368{
a36c61f9 1369 int i;
4507025d
KG
1370 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
1371 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
148d6103 1372 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
51e569aa 1373 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
7725ccfd 1374
d4b671c5 1375 WARN_ON((cfg == NULL) || (meminfo == NULL));
7725ccfd 1376
6a18b167 1377 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
7725ccfd 1378
4507025d
KG
1379 /* Initialize the DMA & KVA meminfo queues */
1380 INIT_LIST_HEAD(&meminfo->dma_info.qe);
1381 INIT_LIST_HEAD(&meminfo->kva_info.qe);
7725ccfd 1382
4507025d 1383 bfa_iocfc_meminfo(cfg, meminfo, bfa);
7725ccfd 1384
4507025d
KG
1385 for (i = 0; hal_mods[i]; i++)
1386 hal_mods[i]->meminfo(cfg, meminfo, bfa);
7725ccfd 1387
4507025d
KG
1388 /* dma info setup */
1389 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
1390 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
148d6103 1391 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
51e569aa 1392 bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
7725ccfd
JH
1393}
1394
5fbe25c7 1395/*
7725ccfd
JH
1396 * Use this function to do attach the driver instance with the BFA
1397 * library. This function will not trigger any HW initialization
1398 * process (which will be done in bfa_init() call)
1399 *
1400 * This call will fail, if the cap is out of range compared to
1401 * pre-defined values within the BFA library
1402 *
1403 * @param[out] bfa Pointer to bfa_t.
a36c61f9 1404 * @param[in] bfad Opaque handle back to the driver's IOC structure
7725ccfd 1405 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
a36c61f9
KG
1406 * that was used in bfa_cfg_get_meminfo().
1407 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1408 * use the bfa_cfg_get_meminfo() call to
1409 * find the memory blocks required, allocate the
1410 * required memory and provide the starting addresses.
1411 * @param[in] pcidev pointer to struct bfa_pcidev_s
7725ccfd
JH
1412 *
1413 * @return
1414 * void
1415 *
1416 * Special Considerations:
1417 *
1418 * @note
1419 *
1420 */
1421void
1422bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1423 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1424{
4507025d
KG
1425 int i;
1426 struct bfa_mem_dma_s *dma_info, *dma_elem;
1427 struct bfa_mem_kva_s *kva_info, *kva_elem;
1428 struct list_head *dm_qe, *km_qe;
7725ccfd
JH
1429
1430 bfa->fcs = BFA_FALSE;
1431
d4b671c5 1432 WARN_ON((cfg == NULL) || (meminfo == NULL));
7725ccfd 1433
4507025d
KG
1434 /* Initialize memory pointers for iterative allocation */
1435 dma_info = &meminfo->dma_info;
1436 dma_info->kva_curp = dma_info->kva;
1437 dma_info->dma_curp = dma_info->dma;
1438
1439 kva_info = &meminfo->kva_info;
1440 kva_info->kva_curp = kva_info->kva;
1441
1442 list_for_each(dm_qe, &dma_info->qe) {
1443 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
1444 dma_elem->kva_curp = dma_elem->kva;
1445 dma_elem->dma_curp = dma_elem->dma;
1446 }
1447
1448 list_for_each(km_qe, &kva_info->qe) {
1449 kva_elem = (struct bfa_mem_kva_s *) km_qe;
1450 kva_elem->kva_curp = kva_elem->kva;
7725ccfd
JH
1451 }
1452
4507025d 1453 bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
7725ccfd
JH
1454
1455 for (i = 0; hal_mods[i]; i++)
4507025d 1456 hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
7725ccfd 1457
4507025d
KG
1458 bfa_com_port_attach(bfa);
1459 bfa_com_ablk_attach(bfa);
148d6103 1460 bfa_com_cee_attach(bfa);
51e569aa 1461 bfa_com_sfp_attach(bfa);
7725ccfd
JH
1462}
1463
5fbe25c7 1464/*
7725ccfd
JH
1465 * Use this function to delete a BFA IOC. IOC should be stopped (by
1466 * calling bfa_stop()) before this function call.
1467 *
1468 * @param[in] bfa - pointer to bfa_t.
1469 *
1470 * @return
1471 * void
1472 *
1473 * Special Considerations:
1474 *
1475 * @note
1476 */
1477void
1478bfa_detach(struct bfa_s *bfa)
1479{
1480 int i;
1481
1482 for (i = 0; hal_mods[i]; i++)
1483 hal_mods[i]->detach(bfa);
f7f73812 1484 bfa_ioc_detach(&bfa->ioc);
7725ccfd
JH
1485}
1486
1487void
1488bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1489{
1490 INIT_LIST_HEAD(comp_q);
1491 list_splice_tail_init(&bfa->comp_q, comp_q);
1492}
1493
1494void
1495bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1496{
a36c61f9
KG
1497 struct list_head *qe;
1498 struct list_head *qen;
1499 struct bfa_cb_qe_s *hcb_qe;
7725ccfd
JH
1500
1501 list_for_each_safe(qe, qen, comp_q) {
1502 hcb_qe = (struct bfa_cb_qe_s *) qe;
1503 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1504 }
1505}
1506
1507void
1508bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1509{
a36c61f9
KG
1510 struct list_head *qe;
1511 struct bfa_cb_qe_s *hcb_qe;
7725ccfd
JH
1512
1513 while (!list_empty(comp_q)) {
1514 bfa_q_deq(comp_q, &qe);
1515 hcb_qe = (struct bfa_cb_qe_s *) qe;
1516 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1517 }
1518}
1519
7725ccfd 1520
5fbe25c7 1521/*
7725ccfd
JH
1522 * Return the list of PCI vendor/device id lists supported by this
1523 * BFA instance.
1524 */
1525void
1526bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1527{
1528 static struct bfa_pciid_s __pciids[] = {
1529 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1530 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1531 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
293f82d5 1532 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
7725ccfd
JH
1533 };
1534
a36c61f9 1535 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
7725ccfd
JH
1536 *pciids = __pciids;
1537}
1538
5fbe25c7 1539/*
7725ccfd
JH
1540 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1541 * into BFA layer). The OS driver can then turn back and overwrite entries that
1542 * have been configured by the user.
1543 *
1544 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1545 *
1546 * @return
1547 * void
1548 *
1549 * Special Considerations:
a36c61f9 1550 * note
7725ccfd
JH
1551 */
1552void
1553bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1554{
1555 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1556 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1557 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1558 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1559 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1560 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1561 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1562 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
e2187d7f 1563 cfg->fwcfg.num_fwtio_reqs = 0;
7725ccfd
JH
1564
1565 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1566 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1567 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1568 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1569 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1570 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1571 cfg->drvcfg.ioc_recover = BFA_FALSE;
1572 cfg->drvcfg.delay_comp = BFA_FALSE;
1573
1574}
1575
1576void
1577bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1578{
1579 bfa_cfg_get_default(cfg);
1580 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1581 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1582 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1583 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1584 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
e2187d7f 1585 cfg->fwcfg.num_fwtio_reqs = 0;
7725ccfd
JH
1586
1587 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1588 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1589 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
a36c61f9 1590 cfg->drvcfg.min_cfg = BFA_TRUE;
7725ccfd 1591}