bna: Move the Brocade driver
[linux-2.6-block.git] / drivers / net / ethernet / brocade / bna / bfa_ioc_ct.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_reg.h"
23 #include "bfa_defs.h"
24
25 #define bfa_ioc_ct_sync_pos(__ioc)      \
26                 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
27 #define BFA_IOC_SYNC_REQD_SH            16
28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32                 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
33
34 /*
35  * forward declarations
36  */
37 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
39 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
40 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
41 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
42 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
43 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
44 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
45 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
46 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
47 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
48 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
49 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
50
51 static struct bfa_ioc_hwif nw_hwif_ct;
52
53 static void
54 bfa_ioc_set_ctx_hwif(struct bfa_ioc *ioc, struct bfa_ioc_hwif *hwif)
55 {
56         hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
57         hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
58         hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
59         hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
60         hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
61         hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
62         hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
63         hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
64         hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
65 }
66
67 /**
68  * Called from bfa_ioc_attach() to map asic specific calls.
69  */
70 void
71 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
72 {
73         bfa_ioc_set_ctx_hwif(ioc, &nw_hwif_ct);
74
75         nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
76         nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
77         nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
78         nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
79         ioc->ioc_hwif = &nw_hwif_ct;
80 }
81
82 /**
83  * Return true if firmware of current driver matches the running firmware.
84  */
85 static bool
86 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
87 {
88         enum bfi_ioc_state ioc_fwstate;
89         u32 usecnt;
90         struct bfi_ioc_image_hdr fwhdr;
91
92         /**
93          * If bios boot (flash based) -- do not increment usage count
94          */
95         if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
96                                                 BFA_IOC_FWIMG_MINSZ)
97                 return true;
98
99         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
100         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
101
102         /**
103          * If usage count is 0, always return TRUE.
104          */
105         if (usecnt == 0) {
106                 writel(1, ioc->ioc_regs.ioc_usage_reg);
107                 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
108                 writel(0, ioc->ioc_regs.ioc_fail_sync);
109                 return true;
110         }
111
112         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
113
114         /**
115          * Use count cannot be non-zero and chip in uninitialized state.
116          */
117         BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
118
119         /**
120          * Check if another driver with a different firmware is active
121          */
122         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
123         if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
124                 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
125                 return false;
126         }
127
128         /**
129          * Same firmware version. Increment the reference count.
130          */
131         usecnt++;
132         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
133         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
134         return true;
135 }
136
137 static void
138 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
139 {
140         u32 usecnt;
141
142         /**
143          * If bios boot (flash based) -- do not decrement usage count
144          */
145         if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
146                                                 BFA_IOC_FWIMG_MINSZ)
147                 return;
148
149         /**
150          * decrement usage count
151          */
152         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
153         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
154         BUG_ON(!(usecnt > 0));
155
156         usecnt--;
157         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
158
159         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
160 }
161
162 /**
163  * Notify other functions on HB failure.
164  */
165 static void
166 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
167 {
168         if (ioc->cna) {
169                 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
170                 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
171                 /* Wait for halt to take effect */
172                 readl(ioc->ioc_regs.ll_halt);
173                 readl(ioc->ioc_regs.alt_ll_halt);
174         } else {
175                 writel(~0U, ioc->ioc_regs.err_set);
176                 readl(ioc->ioc_regs.err_set);
177         }
178 }
179
180 /**
181  * Host to LPU mailbox message addresses
182  */
183 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
184         { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
185         { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
186         { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
187         { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
188 };
189
190 /**
191  * Host <-> LPU mailbox command/status registers - port 0
192  */
193 static struct { u32 hfn, lpu; } ct_p0reg[] = {
194         { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
195         { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
196         { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
197         { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
198 };
199
200 /**
201  * Host <-> LPU mailbox command/status registers - port 1
202  */
203 static struct { u32 hfn, lpu; } ct_p1reg[] = {
204         { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
205         { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
206         { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
207         { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
208 };
209
210 static void
211 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
212 {
213         void __iomem *rb;
214         int             pcifn = bfa_ioc_pcifn(ioc);
215
216         rb = bfa_ioc_bar0(ioc);
217
218         ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
219         ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
220         ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
221
222         if (ioc->port_id == 0) {
223                 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
224                 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
225                 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
226                 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
227                 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
228                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
229                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
230         } else {
231                 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
232                 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
233                 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
234                 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
235                 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
236                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
237                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
238         }
239
240         /*
241          * PSS control registers
242          */
243         ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
244         ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
245         ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
246         ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
247
248         /*
249          * IOC semaphore registers and serialization
250          */
251         ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
252         ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
253         ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
254         ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
255         ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
256
257         /**
258          * sram memory access
259          */
260         ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
261         ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
262
263         /*
264          * err set reg : for notification of hb failure in fcmode
265          */
266         ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
267 }
268
269 /**
270  * Initialize IOC to port mapping.
271  */
272
273 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
274 static void
275 bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
276 {
277         void __iomem *rb = ioc->pcidev.pci_bar_kva;
278         u32     r32;
279
280         /**
281          * For catapult, base port id on personality register and IOC type
282          */
283         r32 = readl(rb + FNC_PERS_REG);
284         r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
285         ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
286
287 }
288
289 /**
290  * Set interrupt mode for a function: INTX or MSIX
291  */
292 static void
293 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
294 {
295         void __iomem *rb = ioc->pcidev.pci_bar_kva;
296         u32     r32, mode;
297
298         r32 = readl(rb + FNC_PERS_REG);
299
300         mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
301                 __F0_INTX_STATUS;
302
303         /**
304          * If already in desired mode, do not change anything
305          */
306         if ((!msix && mode) || (msix && !mode))
307                 return;
308
309         if (msix)
310                 mode = __F0_INTX_STATUS_MSIX;
311         else
312                 mode = __F0_INTX_STATUS_INTA;
313
314         r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
315         r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
316
317         writel(r32, rb + FNC_PERS_REG);
318 }
319
320 /**
321  * Cleanup hw semaphore and usecnt registers
322  */
323 static void
324 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
325 {
326         if (ioc->cna) {
327                 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
328                 writel(0, ioc->ioc_regs.ioc_usage_reg);
329                 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
330         }
331
332         /*
333          * Read the hw sem reg to make sure that it is locked
334          * before we clear it. If it is not locked, writing 1
335          * will lock it instead of clearing it.
336          */
337         readl(ioc->ioc_regs.ioc_sem_reg);
338         bfa_nw_ioc_hw_sem_release(ioc);
339 }
340
341 /**
342  * Synchronized IOC failure processing routines
343  */
344 static bool
345 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
346 {
347         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
348         u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
349
350         /*
351          * Driver load time.  If the sync required bit for this PCI fn
352          * is set, it is due to an unclean exit by the driver for this
353          * PCI fn in the previous incarnation. Whoever comes here first
354          * should clean it up, no matter which PCI fn.
355          */
356
357         if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
358                 writel(0, ioc->ioc_regs.ioc_fail_sync);
359                 writel(1, ioc->ioc_regs.ioc_usage_reg);
360                 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
361                 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
362                 return true;
363         }
364
365         return bfa_ioc_ct_sync_complete(ioc);
366 }
367 /**
368  * Synchronized IOC failure processing routines
369  */
370 static void
371 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
372 {
373         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
374         u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
375
376         writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
377 }
378
379 static void
380 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
381 {
382         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
383         u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
384                                         bfa_ioc_ct_sync_pos(ioc);
385
386         writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
387 }
388
389 static void
390 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
391 {
392         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
393
394         writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
395 }
396
397 static bool
398 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
399 {
400         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
401         u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
402         u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
403         u32 tmp_ackd;
404
405         if (sync_ackd == 0)
406                 return true;
407
408         /**
409          * The check below is to see whether any other PCI fn
410          * has reinitialized the ASIC (reset sync_ackd bits)
411          * and failed again while this IOC was waiting for hw
412          * semaphore (in bfa_iocpf_sm_semwait()).
413          */
414         tmp_ackd = sync_ackd;
415         if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
416                         !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
417                 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
418
419         if (sync_reqd == sync_ackd) {
420                 writel(bfa_ioc_ct_clear_sync_ackd(r32),
421                                 ioc->ioc_regs.ioc_fail_sync);
422                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
423                 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
424                 return true;
425         }
426
427         /**
428          * If another PCI fn reinitialized and failed again while
429          * this IOC was waiting for hw sem, the sync_ackd bit for
430          * this IOC need to be set again to allow reinitialization.
431          */
432         if (tmp_ackd != sync_ackd)
433                 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
434
435         return false;
436 }
437
438 static enum bfa_status
439 bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
440 {
441         u32     pll_sclk, pll_fclk, r32;
442
443         pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
444                 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
445                 __APP_PLL_SCLK_JITLMT0_1(3U) |
446                 __APP_PLL_SCLK_CNTLMT0_1(1U);
447         pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
448                 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
449                 __APP_PLL_LCLK_JITLMT0_1(3U) |
450                 __APP_PLL_LCLK_CNTLMT0_1(1U);
451
452         if (fcmode) {
453                 writel(0, (rb + OP_MODE));
454                 writel(__APP_EMS_CMLCKSEL |
455                                 __APP_EMS_REFCKBUFEN2 |
456                                 __APP_EMS_CHANNEL_SEL,
457                                 (rb + ETH_MAC_SER_REG));
458         } else {
459                 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
460                 writel(__APP_EMS_REFCKBUFEN1,
461                                 (rb + ETH_MAC_SER_REG));
462         }
463         writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
464         writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
465         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
466         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
467         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
468         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
469         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
470         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
471         writel(pll_sclk |
472                 __APP_PLL_SCLK_LOGIC_SOFT_RESET,
473                 rb + APP_PLL_SCLK_CTL_REG);
474         writel(pll_fclk |
475                 __APP_PLL_LCLK_LOGIC_SOFT_RESET,
476                 rb + APP_PLL_LCLK_CTL_REG);
477         writel(pll_sclk |
478                 __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
479                 rb + APP_PLL_SCLK_CTL_REG);
480         writel(pll_fclk |
481                 __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
482                 rb + APP_PLL_LCLK_CTL_REG);
483         readl(rb + HOSTFN0_INT_MSK);
484         udelay(2000);
485         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
486         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
487         writel(pll_sclk |
488                 __APP_PLL_SCLK_ENABLE,
489                 rb + APP_PLL_SCLK_CTL_REG);
490         writel(pll_fclk |
491                 __APP_PLL_LCLK_ENABLE,
492                 rb + APP_PLL_LCLK_CTL_REG);
493
494         if (!fcmode) {
495                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
496                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
497         }
498         r32 = readl((rb + PSS_CTL_REG));
499         r32 &= ~__PSS_LMEM_RESET;
500         writel(r32, (rb + PSS_CTL_REG));
501         udelay(1000);
502         if (!fcmode) {
503                 writel(0, (rb + PMM_1T_RESET_REG_P0));
504                 writel(0, (rb + PMM_1T_RESET_REG_P1));
505         }
506
507         writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
508         udelay(1000);
509         r32 = readl((rb + MBIST_STAT_REG));
510         writel(0, (rb + MBIST_CTL_REG));
511         return BFA_STATUS_OK;
512 }