bna: Add debugfs interface.
[linux-block.git] / drivers / net / ethernet / brocade / bna / bfa_ioc.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "bfa_ioc.h"
20 #include "bfi_reg.h"
21 #include "bfa_defs.h"
22
23 /**
24  * IOC local definitions
25  */
26
27 /**
28  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
29  */
30
31 #define bfa_ioc_firmware_lock(__ioc)                    \
32                         ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
33 #define bfa_ioc_firmware_unlock(__ioc)                  \
34                         ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
35 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
36 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
37 #define bfa_ioc_notify_fail(__ioc)                      \
38                         ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
39 #define bfa_ioc_sync_start(__ioc)               \
40                         ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
41 #define bfa_ioc_sync_join(__ioc)                        \
42                         ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
43 #define bfa_ioc_sync_leave(__ioc)                       \
44                         ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
45 #define bfa_ioc_sync_ack(__ioc)                         \
46                         ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
47 #define bfa_ioc_sync_complete(__ioc)                    \
48                         ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
49
50 #define bfa_ioc_mbox_cmd_pending(__ioc)         \
51                         (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
52                         readl((__ioc)->ioc_regs.hfn_mbox_cmd))
53
54 static bool bfa_nw_auto_recover = true;
55
56 /*
57  * forward declarations
58  */
59 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
60 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
61 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
62 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
63 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
64 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
65 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
66 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
67 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
68 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
69 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
70 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
71 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
72 static void bfa_ioc_recover(struct bfa_ioc *ioc);
73 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
74 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
75 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
76 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
77 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
78 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
79 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
80 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
81 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
82 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
83 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
84 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
85                          u32 boot_param);
86 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
87 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
88                                                 char *serial_num);
89 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
90                                                 char *fw_ver);
91 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
92                                                 char *chip_rev);
93 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
94                                                 char *optrom_ver);
95 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
96                                                 char *manufacturer);
97 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
98 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
99
100 /**
101  * IOC state machine definitions/declarations
102  */
103 enum ioc_event {
104         IOC_E_RESET             = 1,    /*!< IOC reset request          */
105         IOC_E_ENABLE            = 2,    /*!< IOC enable request         */
106         IOC_E_DISABLE           = 3,    /*!< IOC disable request        */
107         IOC_E_DETACH            = 4,    /*!< driver detach cleanup      */
108         IOC_E_ENABLED           = 5,    /*!< f/w enabled                */
109         IOC_E_FWRSP_GETATTR     = 6,    /*!< IOC get attribute response */
110         IOC_E_DISABLED          = 7,    /*!< f/w disabled               */
111         IOC_E_PFFAILED          = 8,    /*!< failure notice by iocpf sm */
112         IOC_E_HBFAIL            = 9,    /*!< heartbeat failure          */
113         IOC_E_HWERROR           = 10,   /*!< hardware error interrupt   */
114         IOC_E_TIMEOUT           = 11,   /*!< timeout                    */
115         IOC_E_HWFAILED          = 12,   /*!< PCI mapping failure notice */
116 };
117
118 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
128
129 static struct bfa_sm_table ioc_sm_table[] = {
130         {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
131         {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
132         {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
133         {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
134         {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
135         {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
136         {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
137         {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
138         {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
139         {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
140 };
141
142 /*
143  * Forward declareations for iocpf state machine
144  */
145 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
146 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
147 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
148 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
149 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
150 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
151
152 /**
153  * IOCPF state machine events
154  */
155 enum iocpf_event {
156         IOCPF_E_ENABLE          = 1,    /*!< IOCPF enable request       */
157         IOCPF_E_DISABLE         = 2,    /*!< IOCPF disable request      */
158         IOCPF_E_STOP            = 3,    /*!< stop on driver detach      */
159         IOCPF_E_FWREADY         = 4,    /*!< f/w initialization done    */
160         IOCPF_E_FWRSP_ENABLE    = 5,    /*!< enable f/w response        */
161         IOCPF_E_FWRSP_DISABLE   = 6,    /*!< disable f/w response       */
162         IOCPF_E_FAIL            = 7,    /*!< failure notice by ioc sm   */
163         IOCPF_E_INITFAIL        = 8,    /*!< init fail notice by ioc sm */
164         IOCPF_E_GETATTRFAIL     = 9,    /*!< init fail notice by ioc sm */
165         IOCPF_E_SEMLOCKED       = 10,   /*!< h/w semaphore is locked    */
166         IOCPF_E_TIMEOUT         = 11,   /*!< f/w response timeout       */
167         IOCPF_E_SEM_ERROR       = 12,   /*!< h/w sem mapping error      */
168 };
169
170 /**
171  * IOCPF states
172  */
173 enum bfa_iocpf_state {
174         BFA_IOCPF_RESET         = 1,    /*!< IOC is in reset state */
175         BFA_IOCPF_SEMWAIT       = 2,    /*!< Waiting for IOC h/w semaphore */
176         BFA_IOCPF_HWINIT        = 3,    /*!< IOC h/w is being initialized */
177         BFA_IOCPF_READY         = 4,    /*!< IOCPF is initialized */
178         BFA_IOCPF_INITFAIL      = 5,    /*!< IOCPF failed */
179         BFA_IOCPF_FAIL          = 6,    /*!< IOCPF failed */
180         BFA_IOCPF_DISABLING     = 7,    /*!< IOCPF is being disabled */
181         BFA_IOCPF_DISABLED      = 8,    /*!< IOCPF is disabled */
182         BFA_IOCPF_FWMISMATCH    = 9,    /*!< IOC f/w different from drivers */
183 };
184
185 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
186 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
188 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
189 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
190 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
191 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
192 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
193                                                 enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
198 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
199                                                 enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
201
202 static struct bfa_sm_table iocpf_sm_table[] = {
203         {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
204         {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
205         {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
206         {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
207         {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
208         {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
209         {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
210         {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
211         {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
212         {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
213         {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
214         {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
215         {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
216         {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
217 };
218
219 /**
220  * IOC State Machine
221  */
222
223 /**
224  * Beginning state. IOC uninit state.
225  */
226 static void
227 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
228 {
229 }
230
231 /**
232  * IOC is in uninit state.
233  */
234 static void
235 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
236 {
237         switch (event) {
238         case IOC_E_RESET:
239                 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
240                 break;
241
242         default:
243                 bfa_sm_fault(event);
244         }
245 }
246
247 /**
248  * Reset entry actions -- initialize state machine
249  */
250 static void
251 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
252 {
253         bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
254 }
255
256 /**
257  * IOC is in reset state.
258  */
259 static void
260 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
261 {
262         switch (event) {
263         case IOC_E_ENABLE:
264                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
265                 break;
266
267         case IOC_E_DISABLE:
268                 bfa_ioc_disable_comp(ioc);
269                 break;
270
271         case IOC_E_DETACH:
272                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
273                 break;
274
275         default:
276                 bfa_sm_fault(event);
277         }
278 }
279
280 static void
281 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
282 {
283         bfa_iocpf_enable(ioc);
284 }
285
286 /**
287  * Host IOC function is being enabled, awaiting response from firmware.
288  * Semaphore is acquired.
289  */
290 static void
291 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
292 {
293         switch (event) {
294         case IOC_E_ENABLED:
295                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
296                 break;
297
298         case IOC_E_PFFAILED:
299                 /* !!! fall through !!! */
300         case IOC_E_HWERROR:
301                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
302                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
303                 if (event != IOC_E_PFFAILED)
304                         bfa_iocpf_initfail(ioc);
305                 break;
306
307         case IOC_E_HWFAILED:
308                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
309                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
310                 break;
311
312         case IOC_E_DISABLE:
313                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
314                 break;
315
316         case IOC_E_DETACH:
317                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
318                 bfa_iocpf_stop(ioc);
319                 break;
320
321         case IOC_E_ENABLE:
322                 break;
323
324         default:
325                 bfa_sm_fault(event);
326         }
327 }
328
329 /**
330  * Semaphore should be acquired for version check.
331  */
332 static void
333 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
334 {
335         mod_timer(&ioc->ioc_timer, jiffies +
336                 msecs_to_jiffies(BFA_IOC_TOV));
337         bfa_ioc_send_getattr(ioc);
338 }
339
340 /**
341  * IOC configuration in progress. Timer is active.
342  */
343 static void
344 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
345 {
346         switch (event) {
347         case IOC_E_FWRSP_GETATTR:
348                 del_timer(&ioc->ioc_timer);
349                 bfa_ioc_check_attr_wwns(ioc);
350                 bfa_ioc_hb_monitor(ioc);
351                 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
352                 break;
353
354         case IOC_E_PFFAILED:
355         case IOC_E_HWERROR:
356                 del_timer(&ioc->ioc_timer);
357                 /* fall through */
358         case IOC_E_TIMEOUT:
359                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
360                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
361                 if (event != IOC_E_PFFAILED)
362                         bfa_iocpf_getattrfail(ioc);
363                 break;
364
365         case IOC_E_DISABLE:
366                 del_timer(&ioc->ioc_timer);
367                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
368                 break;
369
370         case IOC_E_ENABLE:
371                 break;
372
373         default:
374                 bfa_sm_fault(event);
375         }
376 }
377
378 static void
379 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
380 {
381         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
382         bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
383 }
384
385 static void
386 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
387 {
388         switch (event) {
389         case IOC_E_ENABLE:
390                 break;
391
392         case IOC_E_DISABLE:
393                 bfa_ioc_hb_stop(ioc);
394                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
395                 break;
396
397         case IOC_E_PFFAILED:
398         case IOC_E_HWERROR:
399                 bfa_ioc_hb_stop(ioc);
400                 /* !!! fall through !!! */
401         case IOC_E_HBFAIL:
402                 if (ioc->iocpf.auto_recover)
403                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
404                 else
405                         bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
406
407                 bfa_ioc_fail_notify(ioc);
408
409                 if (event != IOC_E_PFFAILED)
410                         bfa_iocpf_fail(ioc);
411                 break;
412
413         default:
414                 bfa_sm_fault(event);
415         }
416 }
417
418 static void
419 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
420 {
421         bfa_iocpf_disable(ioc);
422 }
423
424 /**
425  * IOC is being disabled
426  */
427 static void
428 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
429 {
430         switch (event) {
431         case IOC_E_DISABLED:
432                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
433                 break;
434
435         case IOC_E_HWERROR:
436                 /*
437                  * No state change.  Will move to disabled state
438                  * after iocpf sm completes failure processing and
439                  * moves to disabled state.
440                  */
441                 bfa_iocpf_fail(ioc);
442                 break;
443
444         case IOC_E_HWFAILED:
445                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
446                 bfa_ioc_disable_comp(ioc);
447                 break;
448
449         default:
450                 bfa_sm_fault(event);
451         }
452 }
453
454 /**
455  * IOC disable completion entry.
456  */
457 static void
458 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
459 {
460         bfa_ioc_disable_comp(ioc);
461 }
462
463 static void
464 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
465 {
466         switch (event) {
467         case IOC_E_ENABLE:
468                 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
469                 break;
470
471         case IOC_E_DISABLE:
472                 ioc->cbfn->disable_cbfn(ioc->bfa);
473                 break;
474
475         case IOC_E_DETACH:
476                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
477                 bfa_iocpf_stop(ioc);
478                 break;
479
480         default:
481                 bfa_sm_fault(event);
482         }
483 }
484
485 static void
486 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
487 {
488 }
489
490 /**
491  * Hardware initialization retry.
492  */
493 static void
494 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
495 {
496         switch (event) {
497         case IOC_E_ENABLED:
498                 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
499                 break;
500
501         case IOC_E_PFFAILED:
502         case IOC_E_HWERROR:
503                 /**
504                  * Initialization retry failed.
505                  */
506                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
507                 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
508                 if (event != IOC_E_PFFAILED)
509                         bfa_iocpf_initfail(ioc);
510                 break;
511
512         case IOC_E_HWFAILED:
513                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
514                 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
515                 break;
516
517         case IOC_E_ENABLE:
518                 break;
519
520         case IOC_E_DISABLE:
521                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
522                 break;
523
524         case IOC_E_DETACH:
525                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
526                 bfa_iocpf_stop(ioc);
527                 break;
528
529         default:
530                 bfa_sm_fault(event);
531         }
532 }
533
534 static void
535 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
536 {
537 }
538
539 /**
540  * IOC failure.
541  */
542 static void
543 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
544 {
545         switch (event) {
546         case IOC_E_ENABLE:
547                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
548                 break;
549
550         case IOC_E_DISABLE:
551                 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
552                 break;
553
554         case IOC_E_DETACH:
555                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
556                 bfa_iocpf_stop(ioc);
557                 break;
558
559         case IOC_E_HWERROR:
560                 /* HB failure notification, ignore. */
561                 break;
562
563         default:
564                 bfa_sm_fault(event);
565         }
566 }
567
568 static void
569 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
570 {
571 }
572
573 /**
574  * IOC failure.
575  */
576 static void
577 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
578 {
579         switch (event) {
580
581         case IOC_E_ENABLE:
582                 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
583                 break;
584
585         case IOC_E_DISABLE:
586                 ioc->cbfn->disable_cbfn(ioc->bfa);
587                 break;
588
589         case IOC_E_DETACH:
590                 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
591                 break;
592
593         default:
594                 bfa_sm_fault(event);
595         }
596 }
597
598 /**
599  * IOCPF State Machine
600  */
601
602 /**
603  * Reset entry actions -- initialize state machine
604  */
605 static void
606 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
607 {
608         iocpf->fw_mismatch_notified = false;
609         iocpf->auto_recover = bfa_nw_auto_recover;
610 }
611
612 /**
613  * Beginning state. IOC is in reset state.
614  */
615 static void
616 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
617 {
618         switch (event) {
619         case IOCPF_E_ENABLE:
620                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
621                 break;
622
623         case IOCPF_E_STOP:
624                 break;
625
626         default:
627                 bfa_sm_fault(event);
628         }
629 }
630
631 /**
632  * Semaphore should be acquired for version check.
633  */
634 static void
635 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
636 {
637         bfa_ioc_hw_sem_init(iocpf->ioc);
638         bfa_ioc_hw_sem_get(iocpf->ioc);
639 }
640
641 /**
642  * Awaiting h/w semaphore to continue with version check.
643  */
644 static void
645 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
646 {
647         struct bfa_ioc *ioc = iocpf->ioc;
648
649         switch (event) {
650         case IOCPF_E_SEMLOCKED:
651                 if (bfa_ioc_firmware_lock(ioc)) {
652                         if (bfa_ioc_sync_start(ioc)) {
653                                 bfa_ioc_sync_join(ioc);
654                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
655                         } else {
656                                 bfa_ioc_firmware_unlock(ioc);
657                                 bfa_nw_ioc_hw_sem_release(ioc);
658                                 mod_timer(&ioc->sem_timer, jiffies +
659                                         msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
660                         }
661                 } else {
662                         bfa_nw_ioc_hw_sem_release(ioc);
663                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
664                 }
665                 break;
666
667         case IOCPF_E_SEM_ERROR:
668                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
669                 bfa_ioc_pf_hwfailed(ioc);
670                 break;
671
672         case IOCPF_E_DISABLE:
673                 bfa_ioc_hw_sem_get_cancel(ioc);
674                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
675                 bfa_ioc_pf_disabled(ioc);
676                 break;
677
678         case IOCPF_E_STOP:
679                 bfa_ioc_hw_sem_get_cancel(ioc);
680                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
681                 break;
682
683         default:
684                 bfa_sm_fault(event);
685         }
686 }
687
688 /**
689  * Notify enable completion callback
690  */
691 static void
692 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
693 {
694         /* Call only the first time sm enters fwmismatch state. */
695         if (iocpf->fw_mismatch_notified == false)
696                 bfa_ioc_pf_fwmismatch(iocpf->ioc);
697
698         iocpf->fw_mismatch_notified = true;
699         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
700                 msecs_to_jiffies(BFA_IOC_TOV));
701 }
702
703 /**
704  * Awaiting firmware version match.
705  */
706 static void
707 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
708 {
709         struct bfa_ioc *ioc = iocpf->ioc;
710
711         switch (event) {
712         case IOCPF_E_TIMEOUT:
713                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
714                 break;
715
716         case IOCPF_E_DISABLE:
717                 del_timer(&ioc->iocpf_timer);
718                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
719                 bfa_ioc_pf_disabled(ioc);
720                 break;
721
722         case IOCPF_E_STOP:
723                 del_timer(&ioc->iocpf_timer);
724                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
725                 break;
726
727         default:
728                 bfa_sm_fault(event);
729         }
730 }
731
732 /**
733  * Request for semaphore.
734  */
735 static void
736 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
737 {
738         bfa_ioc_hw_sem_get(iocpf->ioc);
739 }
740
741 /**
742  * Awaiting semaphore for h/w initialzation.
743  */
744 static void
745 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
746 {
747         struct bfa_ioc *ioc = iocpf->ioc;
748
749         switch (event) {
750         case IOCPF_E_SEMLOCKED:
751                 if (bfa_ioc_sync_complete(ioc)) {
752                         bfa_ioc_sync_join(ioc);
753                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
754                 } else {
755                         bfa_nw_ioc_hw_sem_release(ioc);
756                         mod_timer(&ioc->sem_timer, jiffies +
757                                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
758                 }
759                 break;
760
761         case IOCPF_E_SEM_ERROR:
762                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
763                 bfa_ioc_pf_hwfailed(ioc);
764                 break;
765
766         case IOCPF_E_DISABLE:
767                 bfa_ioc_hw_sem_get_cancel(ioc);
768                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
769                 break;
770
771         default:
772                 bfa_sm_fault(event);
773         }
774 }
775
776 static void
777 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
778 {
779         iocpf->poll_time = 0;
780         bfa_ioc_reset(iocpf->ioc, false);
781 }
782
783 /**
784  * Hardware is being initialized. Interrupts are enabled.
785  * Holding hardware semaphore lock.
786  */
787 static void
788 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
789 {
790         struct bfa_ioc *ioc = iocpf->ioc;
791
792         switch (event) {
793         case IOCPF_E_FWREADY:
794                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
795                 break;
796
797         case IOCPF_E_TIMEOUT:
798                 bfa_nw_ioc_hw_sem_release(ioc);
799                         bfa_ioc_pf_failed(ioc);
800                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
801                 break;
802
803         case IOCPF_E_DISABLE:
804                 del_timer(&ioc->iocpf_timer);
805                 bfa_ioc_sync_leave(ioc);
806                 bfa_nw_ioc_hw_sem_release(ioc);
807                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
808                 break;
809
810         default:
811                 bfa_sm_fault(event);
812         }
813 }
814
815 static void
816 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
817 {
818         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
819                 msecs_to_jiffies(BFA_IOC_TOV));
820         /**
821          * Enable Interrupts before sending fw IOC ENABLE cmd.
822          */
823         iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
824         bfa_ioc_send_enable(iocpf->ioc);
825 }
826
827 /**
828  * Host IOC function is being enabled, awaiting response from firmware.
829  * Semaphore is acquired.
830  */
831 static void
832 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
833 {
834         struct bfa_ioc *ioc = iocpf->ioc;
835
836         switch (event) {
837         case IOCPF_E_FWRSP_ENABLE:
838                 del_timer(&ioc->iocpf_timer);
839                 bfa_nw_ioc_hw_sem_release(ioc);
840                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
841                 break;
842
843         case IOCPF_E_INITFAIL:
844                 del_timer(&ioc->iocpf_timer);
845                 /*
846                  * !!! fall through !!!
847                  */
848         case IOCPF_E_TIMEOUT:
849                 bfa_nw_ioc_hw_sem_release(ioc);
850                 if (event == IOCPF_E_TIMEOUT)
851                         bfa_ioc_pf_failed(ioc);
852                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
853                 break;
854
855         case IOCPF_E_DISABLE:
856                 del_timer(&ioc->iocpf_timer);
857                 bfa_nw_ioc_hw_sem_release(ioc);
858                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
859                 break;
860
861         default:
862                 bfa_sm_fault(event);
863         }
864 }
865
866 static void
867 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
868 {
869         bfa_ioc_pf_enabled(iocpf->ioc);
870 }
871
872 static void
873 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
874 {
875         switch (event) {
876         case IOCPF_E_DISABLE:
877                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
878                 break;
879
880         case IOCPF_E_GETATTRFAIL:
881                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
882                 break;
883
884         case IOCPF_E_FAIL:
885                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
886                 break;
887
888         default:
889                 bfa_sm_fault(event);
890         }
891 }
892
893 static void
894 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
895 {
896         mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
897                 msecs_to_jiffies(BFA_IOC_TOV));
898         bfa_ioc_send_disable(iocpf->ioc);
899 }
900
901 /**
902  * IOC is being disabled
903  */
904 static void
905 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
906 {
907         struct bfa_ioc *ioc = iocpf->ioc;
908
909         switch (event) {
910         case IOCPF_E_FWRSP_DISABLE:
911                 del_timer(&ioc->iocpf_timer);
912                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
913                 break;
914
915         case IOCPF_E_FAIL:
916                 del_timer(&ioc->iocpf_timer);
917                 /*
918                  * !!! fall through !!!
919                  */
920
921         case IOCPF_E_TIMEOUT:
922                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
923                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
924                 break;
925
926         case IOCPF_E_FWRSP_ENABLE:
927                 break;
928
929         default:
930                 bfa_sm_fault(event);
931         }
932 }
933
934 static void
935 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
936 {
937         bfa_ioc_hw_sem_get(iocpf->ioc);
938 }
939
940 /**
941  * IOC hb ack request is being removed.
942  */
943 static void
944 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
945 {
946         struct bfa_ioc *ioc = iocpf->ioc;
947
948         switch (event) {
949         case IOCPF_E_SEMLOCKED:
950                 bfa_ioc_sync_leave(ioc);
951                 bfa_nw_ioc_hw_sem_release(ioc);
952                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
953                 break;
954
955         case IOCPF_E_SEM_ERROR:
956                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
957                 bfa_ioc_pf_hwfailed(ioc);
958                 break;
959
960         case IOCPF_E_FAIL:
961                 break;
962
963         default:
964                 bfa_sm_fault(event);
965         }
966 }
967
968 /**
969  * IOC disable completion entry.
970  */
971 static void
972 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
973 {
974         bfa_ioc_mbox_flush(iocpf->ioc);
975         bfa_ioc_pf_disabled(iocpf->ioc);
976 }
977
978 static void
979 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
980 {
981         struct bfa_ioc *ioc = iocpf->ioc;
982
983         switch (event) {
984         case IOCPF_E_ENABLE:
985                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
986                 break;
987
988         case IOCPF_E_STOP:
989                 bfa_ioc_firmware_unlock(ioc);
990                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
991                 break;
992
993         default:
994                 bfa_sm_fault(event);
995         }
996 }
997
998 static void
999 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
1000 {
1001         bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
1002         bfa_ioc_hw_sem_get(iocpf->ioc);
1003 }
1004
1005 /**
1006  * Hardware initialization failed.
1007  */
1008 static void
1009 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1010 {
1011         struct bfa_ioc *ioc = iocpf->ioc;
1012
1013         switch (event) {
1014         case IOCPF_E_SEMLOCKED:
1015                 bfa_ioc_notify_fail(ioc);
1016                 bfa_ioc_sync_leave(ioc);
1017                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1018                 bfa_nw_ioc_hw_sem_release(ioc);
1019                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1020                 break;
1021
1022         case IOCPF_E_SEM_ERROR:
1023                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1024                 bfa_ioc_pf_hwfailed(ioc);
1025                 break;
1026
1027         case IOCPF_E_DISABLE:
1028                 bfa_ioc_hw_sem_get_cancel(ioc);
1029                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1030                 break;
1031
1032         case IOCPF_E_STOP:
1033                 bfa_ioc_hw_sem_get_cancel(ioc);
1034                 bfa_ioc_firmware_unlock(ioc);
1035                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1036                 break;
1037
1038         case IOCPF_E_FAIL:
1039                 break;
1040
1041         default:
1042                 bfa_sm_fault(event);
1043         }
1044 }
1045
1046 static void
1047 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1048 {
1049 }
1050
1051 /**
1052  * Hardware initialization failed.
1053  */
1054 static void
1055 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1056 {
1057         struct bfa_ioc *ioc = iocpf->ioc;
1058
1059         switch (event) {
1060         case IOCPF_E_DISABLE:
1061                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1062                 break;
1063
1064         case IOCPF_E_STOP:
1065                 bfa_ioc_firmware_unlock(ioc);
1066                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1067                 break;
1068
1069         default:
1070                 bfa_sm_fault(event);
1071         }
1072 }
1073
1074 static void
1075 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1076 {
1077         /**
1078          * Mark IOC as failed in hardware and stop firmware.
1079          */
1080         bfa_ioc_lpu_stop(iocpf->ioc);
1081
1082         /**
1083          * Flush any queued up mailbox requests.
1084          */
1085         bfa_ioc_mbox_flush(iocpf->ioc);
1086         bfa_ioc_hw_sem_get(iocpf->ioc);
1087 }
1088
1089 /**
1090  * IOC is in failed state.
1091  */
1092 static void
1093 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1094 {
1095         struct bfa_ioc *ioc = iocpf->ioc;
1096
1097         switch (event) {
1098         case IOCPF_E_SEMLOCKED:
1099                 bfa_ioc_sync_ack(ioc);
1100                 bfa_ioc_notify_fail(ioc);
1101                 if (!iocpf->auto_recover) {
1102                         bfa_ioc_sync_leave(ioc);
1103                         writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1104                         bfa_nw_ioc_hw_sem_release(ioc);
1105                         bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1106                 } else {
1107                         if (bfa_ioc_sync_complete(ioc))
1108                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1109                         else {
1110                                 bfa_nw_ioc_hw_sem_release(ioc);
1111                                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1112                         }
1113                 }
1114                 break;
1115
1116         case IOCPF_E_SEM_ERROR:
1117                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1118                 bfa_ioc_pf_hwfailed(ioc);
1119                 break;
1120
1121         case IOCPF_E_DISABLE:
1122                 bfa_ioc_hw_sem_get_cancel(ioc);
1123                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1124                 break;
1125
1126         case IOCPF_E_FAIL:
1127                 break;
1128
1129         default:
1130                 bfa_sm_fault(event);
1131         }
1132 }
1133
1134 static void
1135 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1136 {
1137 }
1138
1139 /**
1140  * @brief
1141  * IOC is in failed state.
1142  */
1143 static void
1144 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1145 {
1146         switch (event) {
1147         case IOCPF_E_DISABLE:
1148                 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1149                 break;
1150
1151         default:
1152                 bfa_sm_fault(event);
1153         }
1154 }
1155
1156 /**
1157  * BFA IOC private functions
1158  */
1159
1160 /**
1161  * Notify common modules registered for notification.
1162  */
1163 static void
1164 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1165 {
1166         struct bfa_ioc_notify *notify;
1167         struct list_head                        *qe;
1168
1169         list_for_each(qe, &ioc->notify_q) {
1170                 notify = (struct bfa_ioc_notify *)qe;
1171                 notify->cbfn(notify->cbarg, event);
1172         }
1173 }
1174
1175 static void
1176 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1177 {
1178         ioc->cbfn->disable_cbfn(ioc->bfa);
1179         bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1180 }
1181
1182 bool
1183 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1184 {
1185         u32 r32;
1186         int cnt = 0;
1187 #define BFA_SEM_SPINCNT 3000
1188
1189         r32 = readl(sem_reg);
1190
1191         while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1192                 cnt++;
1193                 udelay(2);
1194                 r32 = readl(sem_reg);
1195         }
1196
1197         if (!(r32 & 1))
1198                 return true;
1199
1200         return false;
1201 }
1202
1203 void
1204 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1205 {
1206         readl(sem_reg);
1207         writel(1, sem_reg);
1208 }
1209
1210 static void
1211 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1212 {
1213         struct bfi_ioc_image_hdr fwhdr;
1214         u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1215
1216         if (fwstate == BFI_IOC_UNINIT)
1217                 return;
1218
1219         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1220
1221         if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
1222                 return;
1223
1224         writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1225
1226         /*
1227          * Try to lock and then unlock the semaphore.
1228          */
1229         readl(ioc->ioc_regs.ioc_sem_reg);
1230         writel(1, ioc->ioc_regs.ioc_sem_reg);
1231 }
1232
1233 static void
1234 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1235 {
1236         u32     r32;
1237
1238         /**
1239          * First read to the semaphore register will return 0, subsequent reads
1240          * will return 1. Semaphore is released by writing 1 to the register
1241          */
1242         r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1243         if (r32 == ~0) {
1244                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1245                 return;
1246         }
1247         if (!(r32 & 1)) {
1248                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1249                 return;
1250         }
1251
1252         mod_timer(&ioc->sem_timer, jiffies +
1253                 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1254 }
1255
1256 void
1257 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1258 {
1259         writel(1, ioc->ioc_regs.ioc_sem_reg);
1260 }
1261
1262 static void
1263 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1264 {
1265         del_timer(&ioc->sem_timer);
1266 }
1267
1268 /**
1269  * @brief
1270  * Initialize LPU local memory (aka secondary memory / SRAM)
1271  */
1272 static void
1273 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1274 {
1275         u32     pss_ctl;
1276         int             i;
1277 #define PSS_LMEM_INIT_TIME  10000
1278
1279         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1280         pss_ctl &= ~__PSS_LMEM_RESET;
1281         pss_ctl |= __PSS_LMEM_INIT_EN;
1282
1283         /*
1284          * i2c workaround 12.5khz clock
1285          */
1286         pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1287         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1288
1289         /**
1290          * wait for memory initialization to be complete
1291          */
1292         i = 0;
1293         do {
1294                 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1295                 i++;
1296         } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1297
1298         /**
1299          * If memory initialization is not successful, IOC timeout will catch
1300          * such failures.
1301          */
1302         BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1303
1304         pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1305         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1306 }
1307
1308 static void
1309 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1310 {
1311         u32     pss_ctl;
1312
1313         /**
1314          * Take processor out of reset.
1315          */
1316         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1317         pss_ctl &= ~__PSS_LPU0_RESET;
1318
1319         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1320 }
1321
1322 static void
1323 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1324 {
1325         u32     pss_ctl;
1326
1327         /**
1328          * Put processors in reset.
1329          */
1330         pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1331         pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1332
1333         writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1334 }
1335
1336 /**
1337  * Get driver and firmware versions.
1338  */
1339 void
1340 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1341 {
1342         u32     pgnum;
1343         u32     loff = 0;
1344         int             i;
1345         u32     *fwsig = (u32 *) fwhdr;
1346
1347         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1348         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1349
1350         for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1351              i++) {
1352                 fwsig[i] =
1353                         swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1354                 loff += sizeof(u32);
1355         }
1356 }
1357
1358 /**
1359  * Returns TRUE if same.
1360  */
1361 bool
1362 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1363 {
1364         struct bfi_ioc_image_hdr *drv_fwhdr;
1365         int i;
1366
1367         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1368                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1369
1370         for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1371                 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1372                         return false;
1373         }
1374
1375         return true;
1376 }
1377
1378 /**
1379  * Return true if current running version is valid. Firmware signature and
1380  * execution context (driver/bios) must match.
1381  */
1382 static bool
1383 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1384 {
1385         struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1386
1387         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1388         drv_fwhdr = (struct bfi_ioc_image_hdr *)
1389                 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1390
1391         if (fwhdr.signature != drv_fwhdr->signature)
1392                 return false;
1393
1394         if (swab32(fwhdr.bootenv) != boot_env)
1395                 return false;
1396
1397         return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1398 }
1399
1400 /**
1401  * Conditionally flush any pending message from firmware at start.
1402  */
1403 static void
1404 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1405 {
1406         u32     r32;
1407
1408         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1409         if (r32)
1410                 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1411 }
1412
1413 /**
1414  * @img ioc_init_logic.jpg
1415  */
1416 static void
1417 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1418 {
1419         enum bfi_ioc_state ioc_fwstate;
1420         bool fwvalid;
1421         u32 boot_env;
1422
1423         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1424
1425         if (force)
1426                 ioc_fwstate = BFI_IOC_UNINIT;
1427
1428         boot_env = BFI_FWBOOT_ENV_OS;
1429
1430         /**
1431          * check if firmware is valid
1432          */
1433         fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1434                 false : bfa_ioc_fwver_valid(ioc, boot_env);
1435
1436         if (!fwvalid) {
1437                 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1438                 bfa_ioc_poll_fwinit(ioc);
1439                 return;
1440         }
1441
1442         /**
1443          * If hardware initialization is in progress (initialized by other IOC),
1444          * just wait for an initialization completion interrupt.
1445          */
1446         if (ioc_fwstate == BFI_IOC_INITING) {
1447                 bfa_ioc_poll_fwinit(ioc);
1448                 return;
1449         }
1450
1451         /**
1452          * If IOC function is disabled and firmware version is same,
1453          * just re-enable IOC.
1454          */
1455         if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1456                 /**
1457                  * When using MSI-X any pending firmware ready event should
1458                  * be flushed. Otherwise MSI-X interrupts are not delivered.
1459                  */
1460                 bfa_ioc_msgflush(ioc);
1461                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1462                 return;
1463         }
1464
1465         /**
1466          * Initialize the h/w for any other states.
1467          */
1468         bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1469         bfa_ioc_poll_fwinit(ioc);
1470 }
1471
1472 void
1473 bfa_nw_ioc_timeout(void *ioc_arg)
1474 {
1475         struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1476
1477         bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1478 }
1479
1480 static void
1481 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1482 {
1483         u32 *msgp = (u32 *) ioc_msg;
1484         u32 i;
1485
1486         BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1487
1488         /*
1489          * first write msg to mailbox registers
1490          */
1491         for (i = 0; i < len / sizeof(u32); i++)
1492                 writel(cpu_to_le32(msgp[i]),
1493                               ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1494
1495         for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1496                 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1497
1498         /*
1499          * write 1 to mailbox CMD to trigger LPU event
1500          */
1501         writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1502         (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1503 }
1504
1505 static void
1506 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1507 {
1508         struct bfi_ioc_ctrl_req enable_req;
1509         struct timeval tv;
1510
1511         bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1512                     bfa_ioc_portid(ioc));
1513         enable_req.clscode = htons(ioc->clscode);
1514         do_gettimeofday(&tv);
1515         enable_req.tv_sec = ntohl(tv.tv_sec);
1516         bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1517 }
1518
1519 static void
1520 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1521 {
1522         struct bfi_ioc_ctrl_req disable_req;
1523
1524         bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1525                     bfa_ioc_portid(ioc));
1526         bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1527 }
1528
1529 static void
1530 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1531 {
1532         struct bfi_ioc_getattr_req attr_req;
1533
1534         bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1535                     bfa_ioc_portid(ioc));
1536         bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1537         bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1538 }
1539
1540 void
1541 bfa_nw_ioc_hb_check(void *cbarg)
1542 {
1543         struct bfa_ioc *ioc = cbarg;
1544         u32     hb_count;
1545
1546         hb_count = readl(ioc->ioc_regs.heartbeat);
1547         if (ioc->hb_count == hb_count) {
1548                 bfa_ioc_recover(ioc);
1549                 return;
1550         } else {
1551                 ioc->hb_count = hb_count;
1552         }
1553
1554         bfa_ioc_mbox_poll(ioc);
1555         mod_timer(&ioc->hb_timer, jiffies +
1556                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1557 }
1558
1559 static void
1560 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1561 {
1562         ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1563         mod_timer(&ioc->hb_timer, jiffies +
1564                 msecs_to_jiffies(BFA_IOC_HB_TOV));
1565 }
1566
1567 static void
1568 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1569 {
1570         del_timer(&ioc->hb_timer);
1571 }
1572
1573 /**
1574  * @brief
1575  *      Initiate a full firmware download.
1576  */
1577 static void
1578 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1579                     u32 boot_env)
1580 {
1581         u32 *fwimg;
1582         u32 pgnum;
1583         u32 loff = 0;
1584         u32 chunkno = 0;
1585         u32 i;
1586         u32 asicmode;
1587
1588         /**
1589          * Initialize LMEM first before code download
1590          */
1591         bfa_ioc_lmem_init(ioc);
1592
1593         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1594
1595         pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1596
1597         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1598
1599         for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1600                 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1601                         chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1602                         fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1603                                         BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1604                 }
1605
1606                 /**
1607                  * write smem
1608                  */
1609                 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1610                               ((ioc->ioc_regs.smem_page_start) + (loff)));
1611
1612                 loff += sizeof(u32);
1613
1614                 /**
1615                  * handle page offset wrap around
1616                  */
1617                 loff = PSS_SMEM_PGOFF(loff);
1618                 if (loff == 0) {
1619                         pgnum++;
1620                         writel(pgnum,
1621                                       ioc->ioc_regs.host_page_num_fn);
1622                 }
1623         }
1624
1625         writel(bfa_ioc_smem_pgnum(ioc, 0),
1626                       ioc->ioc_regs.host_page_num_fn);
1627
1628         /*
1629          * Set boot type, env and device mode at the end.
1630         */
1631         asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1632                                         ioc->port0_mode, ioc->port1_mode);
1633         writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1634                         + BFI_FWBOOT_DEVMODE_OFF));
1635         writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1636                         + (BFI_FWBOOT_TYPE_OFF)));
1637         writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1638                         + (BFI_FWBOOT_ENV_OFF)));
1639 }
1640
1641 static void
1642 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1643 {
1644         bfa_ioc_hwinit(ioc, force);
1645 }
1646
1647 /**
1648  * BFA ioc enable reply by firmware
1649  */
1650 static void
1651 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1652                         u8 cap_bm)
1653 {
1654         struct bfa_iocpf *iocpf = &ioc->iocpf;
1655
1656         ioc->port_mode = ioc->port_mode_cfg = port_mode;
1657         ioc->ad_cap_bm = cap_bm;
1658         bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1659 }
1660
1661 /**
1662  * @brief
1663  * Update BFA configuration from firmware configuration.
1664  */
1665 static void
1666 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1667 {
1668         struct bfi_ioc_attr *attr = ioc->attr;
1669
1670         attr->adapter_prop  = ntohl(attr->adapter_prop);
1671         attr->card_type     = ntohl(attr->card_type);
1672         attr->maxfrsize     = ntohs(attr->maxfrsize);
1673
1674         bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1675 }
1676
1677 /**
1678  * Attach time initialization of mbox logic.
1679  */
1680 static void
1681 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1682 {
1683         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1684         int     mc;
1685
1686         INIT_LIST_HEAD(&mod->cmd_q);
1687         for (mc = 0; mc < BFI_MC_MAX; mc++) {
1688                 mod->mbhdlr[mc].cbfn = NULL;
1689                 mod->mbhdlr[mc].cbarg = ioc->bfa;
1690         }
1691 }
1692
1693 /**
1694  * Mbox poll timer -- restarts any pending mailbox requests.
1695  */
1696 static void
1697 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1698 {
1699         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1700         struct bfa_mbox_cmd *cmd;
1701         bfa_mbox_cmd_cbfn_t cbfn;
1702         void *cbarg;
1703         u32 stat;
1704
1705         /**
1706          * If no command pending, do nothing
1707          */
1708         if (list_empty(&mod->cmd_q))
1709                 return;
1710
1711         /**
1712          * If previous command is not yet fetched by firmware, do nothing
1713          */
1714         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1715         if (stat)
1716                 return;
1717
1718         /**
1719          * Enqueue command to firmware.
1720          */
1721         bfa_q_deq(&mod->cmd_q, &cmd);
1722         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1723
1724         /**
1725          * Give a callback to the client, indicating that the command is sent
1726          */
1727         if (cmd->cbfn) {
1728                 cbfn = cmd->cbfn;
1729                 cbarg = cmd->cbarg;
1730                 cmd->cbfn = NULL;
1731                 cbfn(cbarg);
1732         }
1733 }
1734
1735 /**
1736  * Cleanup any pending requests.
1737  */
1738 static void
1739 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1740 {
1741         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1742         struct bfa_mbox_cmd *cmd;
1743
1744         while (!list_empty(&mod->cmd_q))
1745                 bfa_q_deq(&mod->cmd_q, &cmd);
1746 }
1747
1748 /**
1749  * Read data from SMEM to host through PCI memmap
1750  *
1751  * @param[in]  ioc     memory for IOC
1752  * @param[in]  tbuf    app memory to store data from smem
1753  * @param[in]  soff    smem offset
1754  * @param[in]  sz      size of smem in bytes
1755  */
1756 static int
1757 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
1758 {
1759         u32 pgnum, loff, r32;
1760         int i, len;
1761         u32 *buf = tbuf;
1762
1763         pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1764         loff = PSS_SMEM_PGOFF(soff);
1765
1766         /*
1767          *  Hold semaphore to serialize pll init and fwtrc.
1768         */
1769         if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
1770                 return 1;
1771
1772         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1773
1774         len = sz/sizeof(u32);
1775         for (i = 0; i < len; i++) {
1776                 r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1777                 buf[i] = be32_to_cpu(r32);
1778                 loff += sizeof(u32);
1779
1780                 /**
1781                  * handle page offset wrap around
1782                  */
1783                 loff = PSS_SMEM_PGOFF(loff);
1784                 if (loff == 0) {
1785                         pgnum++;
1786                         writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1787                 }
1788         }
1789
1790         writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1791                ioc->ioc_regs.host_page_num_fn);
1792
1793         /*
1794          * release semaphore
1795          */
1796         readl(ioc->ioc_regs.ioc_init_sem_reg);
1797         writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1798         return 0;
1799 }
1800
1801 /**
1802  * Retrieve saved firmware trace from a prior IOC failure.
1803  */
1804 int
1805 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1806 {
1807         u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
1808         int tlen, status = 0;
1809
1810         tlen = *trclen;
1811         if (tlen > BNA_DBG_FWTRC_LEN)
1812                 tlen = BNA_DBG_FWTRC_LEN;
1813
1814         status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
1815         *trclen = tlen;
1816         return status;
1817 }
1818
1819 /**
1820  * Save firmware trace if configured.
1821  */
1822 static void
1823 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
1824 {
1825         int tlen;
1826
1827         if (ioc->dbg_fwsave_once) {
1828                 ioc->dbg_fwsave_once = 0;
1829                 if (ioc->dbg_fwsave_len) {
1830                         tlen = ioc->dbg_fwsave_len;
1831                         bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
1832                 }
1833         }
1834 }
1835
1836 /**
1837  * Retrieve saved firmware trace from a prior IOC failure.
1838  */
1839 int
1840 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
1841 {
1842         int tlen;
1843
1844         if (ioc->dbg_fwsave_len == 0)
1845                 return BFA_STATUS_ENOFSAVE;
1846
1847         tlen = *trclen;
1848         if (tlen > ioc->dbg_fwsave_len)
1849                 tlen = ioc->dbg_fwsave_len;
1850
1851         memcpy(trcdata, ioc->dbg_fwsave, tlen);
1852         *trclen = tlen;
1853         return BFA_STATUS_OK;
1854 }
1855
1856 static void
1857 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1858 {
1859         /**
1860          * Notify driver and common modules registered for notification.
1861          */
1862         ioc->cbfn->hbfail_cbfn(ioc->bfa);
1863         bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1864         bfa_nw_ioc_debug_save_ftrc(ioc);
1865 }
1866
1867 /**
1868  * IOCPF to IOC interface
1869  */
1870 static void
1871 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1872 {
1873         bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1874 }
1875
1876 static void
1877 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1878 {
1879         bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1880 }
1881
1882 static void
1883 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1884 {
1885         bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1886 }
1887
1888 static void
1889 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1890 {
1891         bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1892 }
1893
1894 static void
1895 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1896 {
1897         /**
1898          * Provide enable completion callback and AEN notification.
1899          */
1900         ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1901 }
1902
1903 /**
1904  * IOC public
1905  */
1906 static enum bfa_status
1907 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1908 {
1909         /*
1910          *  Hold semaphore so that nobody can access the chip during init.
1911          */
1912         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1913
1914         bfa_ioc_pll_init_asic(ioc);
1915
1916         ioc->pllinit = true;
1917         /*
1918          *  release semaphore.
1919          */
1920         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1921
1922         return BFA_STATUS_OK;
1923 }
1924
1925 /**
1926  * Interface used by diag module to do firmware boot with memory test
1927  * as the entry vector.
1928  */
1929 static void
1930 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1931                 u32 boot_env)
1932 {
1933         bfa_ioc_stats(ioc, ioc_boots);
1934
1935         if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1936                 return;
1937
1938         /**
1939          * Initialize IOC state of all functions on a chip reset.
1940          */
1941         if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1942                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1943                 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1944         } else {
1945                 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1946                 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1947         }
1948
1949         bfa_ioc_msgflush(ioc);
1950         bfa_ioc_download_fw(ioc, boot_type, boot_env);
1951         bfa_ioc_lpu_start(ioc);
1952 }
1953
1954 /**
1955  * Enable/disable IOC failure auto recovery.
1956  */
1957 void
1958 bfa_nw_ioc_auto_recover(bool auto_recover)
1959 {
1960         bfa_nw_auto_recover = auto_recover;
1961 }
1962
1963 static bool
1964 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1965 {
1966         u32     *msgp = mbmsg;
1967         u32     r32;
1968         int             i;
1969
1970         r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1971         if ((r32 & 1) == 0)
1972                 return false;
1973
1974         /**
1975          * read the MBOX msg
1976          */
1977         for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1978              i++) {
1979                 r32 = readl(ioc->ioc_regs.lpu_mbox +
1980                                    i * sizeof(u32));
1981                 msgp[i] = htonl(r32);
1982         }
1983
1984         /**
1985          * turn off mailbox interrupt by clearing mailbox status
1986          */
1987         writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1988         readl(ioc->ioc_regs.lpu_mbox_cmd);
1989
1990         return true;
1991 }
1992
1993 static void
1994 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1995 {
1996         union bfi_ioc_i2h_msg_u *msg;
1997         struct bfa_iocpf *iocpf = &ioc->iocpf;
1998
1999         msg = (union bfi_ioc_i2h_msg_u *) m;
2000
2001         bfa_ioc_stats(ioc, ioc_isrs);
2002
2003         switch (msg->mh.msg_id) {
2004         case BFI_IOC_I2H_HBEAT:
2005                 break;
2006
2007         case BFI_IOC_I2H_ENABLE_REPLY:
2008                 bfa_ioc_enable_reply(ioc,
2009                         (enum bfa_mode)msg->fw_event.port_mode,
2010                         msg->fw_event.cap_bm);
2011                 break;
2012
2013         case BFI_IOC_I2H_DISABLE_REPLY:
2014                 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2015                 break;
2016
2017         case BFI_IOC_I2H_GETATTR_REPLY:
2018                 bfa_ioc_getattr_reply(ioc);
2019                 break;
2020
2021         default:
2022                 BUG_ON(1);
2023         }
2024 }
2025
2026 /**
2027  * IOC attach time initialization and setup.
2028  *
2029  * @param[in]   ioc     memory for IOC
2030  * @param[in]   bfa     driver instance structure
2031  */
2032 void
2033 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
2034 {
2035         ioc->bfa        = bfa;
2036         ioc->cbfn       = cbfn;
2037         ioc->fcmode     = false;
2038         ioc->pllinit    = false;
2039         ioc->dbg_fwsave_once = true;
2040         ioc->iocpf.ioc  = ioc;
2041
2042         bfa_ioc_mbox_attach(ioc);
2043         INIT_LIST_HEAD(&ioc->notify_q);
2044
2045         bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2046         bfa_fsm_send_event(ioc, IOC_E_RESET);
2047 }
2048
2049 /**
2050  * Driver detach time IOC cleanup.
2051  */
2052 void
2053 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2054 {
2055         bfa_fsm_send_event(ioc, IOC_E_DETACH);
2056
2057         /* Done with detach, empty the notify_q. */
2058         INIT_LIST_HEAD(&ioc->notify_q);
2059 }
2060
2061 /**
2062  * Setup IOC PCI properties.
2063  *
2064  * @param[in]   pcidev  PCI device information for this IOC
2065  */
2066 void
2067 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
2068                  enum bfi_pcifn_class clscode)
2069 {
2070         ioc->clscode    = clscode;
2071         ioc->pcidev     = *pcidev;
2072
2073         /**
2074          * Initialize IOC and device personality
2075          */
2076         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2077         ioc->asic_mode  = BFI_ASIC_MODE_FC;
2078
2079         switch (pcidev->device_id) {
2080         case PCI_DEVICE_ID_BROCADE_CT:
2081                 ioc->asic_gen = BFI_ASIC_GEN_CT;
2082                 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2083                 ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2084                 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2085                 ioc->ad_cap_bm = BFA_CM_CNA;
2086                 break;
2087
2088         case BFA_PCI_DEVICE_ID_CT2:
2089                 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2090                 if (clscode == BFI_PCIFN_CLASS_FC &&
2091                         pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2092                         ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2093                         ioc->fcmode = true;
2094                         ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2095                         ioc->ad_cap_bm = BFA_CM_HBA;
2096                 } else {
2097                         ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2098                         ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2099                         if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2100                                 ioc->port_mode =
2101                                 ioc->port_mode_cfg = BFA_MODE_CNA;
2102                                 ioc->ad_cap_bm = BFA_CM_CNA;
2103                         } else {
2104                                 ioc->port_mode =
2105                                 ioc->port_mode_cfg = BFA_MODE_NIC;
2106                                 ioc->ad_cap_bm = BFA_CM_NIC;
2107                         }
2108                 }
2109                 break;
2110
2111         default:
2112                 BUG_ON(1);
2113         }
2114
2115         /**
2116          * Set asic specific interfaces.
2117          */
2118         if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2119                 bfa_nw_ioc_set_ct_hwif(ioc);
2120         else {
2121                 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2122                 bfa_nw_ioc_set_ct2_hwif(ioc);
2123                 bfa_nw_ioc_ct2_poweron(ioc);
2124         }
2125
2126         bfa_ioc_map_port(ioc);
2127         bfa_ioc_reg_init(ioc);
2128 }
2129
2130 /**
2131  * Initialize IOC dma memory
2132  *
2133  * @param[in]   dm_kva  kernel virtual address of IOC dma memory
2134  * @param[in]   dm_pa   physical address of IOC dma memory
2135  */
2136 void
2137 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
2138 {
2139         /**
2140          * dma memory for firmware attribute
2141          */
2142         ioc->attr_dma.kva = dm_kva;
2143         ioc->attr_dma.pa = dm_pa;
2144         ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2145 }
2146
2147 /**
2148  * Return size of dma memory required.
2149  */
2150 u32
2151 bfa_nw_ioc_meminfo(void)
2152 {
2153         return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2154 }
2155
2156 void
2157 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2158 {
2159         bfa_ioc_stats(ioc, ioc_enables);
2160         ioc->dbg_fwsave_once = true;
2161
2162         bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2163 }
2164
2165 void
2166 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2167 {
2168         bfa_ioc_stats(ioc, ioc_disables);
2169         bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2170 }
2171
2172 /**
2173  * Initialize memory for saving firmware trace.
2174  */
2175 void
2176 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2177 {
2178         ioc->dbg_fwsave = dbg_fwsave;
2179         ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2180 }
2181
2182 static u32
2183 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2184 {
2185         return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2186 }
2187
2188 /**
2189  * Register mailbox message handler function, to be called by common modules
2190  */
2191 void
2192 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2193                     bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2194 {
2195         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2196
2197         mod->mbhdlr[mc].cbfn    = cbfn;
2198         mod->mbhdlr[mc].cbarg = cbarg;
2199 }
2200
2201 /**
2202  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2203  * Responsibility of caller to serialize
2204  *
2205  * @param[in]   ioc     IOC instance
2206  * @param[i]    cmd     Mailbox command
2207  */
2208 bool
2209 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2210                         bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2211 {
2212         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2213         u32                     stat;
2214
2215         cmd->cbfn = cbfn;
2216         cmd->cbarg = cbarg;
2217
2218         /**
2219          * If a previous command is pending, queue new command
2220          */
2221         if (!list_empty(&mod->cmd_q)) {
2222                 list_add_tail(&cmd->qe, &mod->cmd_q);
2223                 return true;
2224         }
2225
2226         /**
2227          * If mailbox is busy, queue command for poll timer
2228          */
2229         stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2230         if (stat) {
2231                 list_add_tail(&cmd->qe, &mod->cmd_q);
2232                 return true;
2233         }
2234
2235         /**
2236          * mailbox is free -- queue command to firmware
2237          */
2238         bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2239
2240         return false;
2241 }
2242
2243 /**
2244  * Handle mailbox interrupts
2245  */
2246 void
2247 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2248 {
2249         struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2250         struct bfi_mbmsg m;
2251         int                             mc;
2252
2253         if (bfa_ioc_msgget(ioc, &m)) {
2254                 /**
2255                  * Treat IOC message class as special.
2256                  */
2257                 mc = m.mh.msg_class;
2258                 if (mc == BFI_MC_IOC) {
2259                         bfa_ioc_isr(ioc, &m);
2260                         return;
2261                 }
2262
2263                 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2264                         return;
2265
2266                 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2267         }
2268
2269         bfa_ioc_lpu_read_stat(ioc);
2270
2271         /**
2272          * Try to send pending mailbox commands
2273          */
2274         bfa_ioc_mbox_poll(ioc);
2275 }
2276
2277 void
2278 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2279 {
2280         bfa_ioc_stats(ioc, ioc_hbfails);
2281         bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2282         bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2283 }
2284
2285 /**
2286  * return true if IOC is disabled
2287  */
2288 bool
2289 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2290 {
2291         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2292                 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2293 }
2294
2295 /**
2296  * return true if IOC is operational
2297  */
2298 bool
2299 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2300 {
2301         return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2302 }
2303
2304 /**
2305  * Add to IOC heartbeat failure notification queue. To be used by common
2306  * modules such as cee, port, diag.
2307  */
2308 void
2309 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2310                         struct bfa_ioc_notify *notify)
2311 {
2312         list_add_tail(&notify->qe, &ioc->notify_q);
2313 }
2314
2315 #define BFA_MFG_NAME "Brocade"
2316 static void
2317 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2318                          struct bfa_adapter_attr *ad_attr)
2319 {
2320         struct bfi_ioc_attr *ioc_attr;
2321
2322         ioc_attr = ioc->attr;
2323
2324         bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2325         bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2326         bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2327         bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2328         memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2329                       sizeof(struct bfa_mfg_vpd));
2330
2331         ad_attr->nports = bfa_ioc_get_nports(ioc);
2332         ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2333
2334         bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2335         /* For now, model descr uses same model string */
2336         bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2337
2338         ad_attr->card_type = ioc_attr->card_type;
2339         ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2340
2341         if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2342                 ad_attr->prototype = 1;
2343         else
2344                 ad_attr->prototype = 0;
2345
2346         ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2347         ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
2348
2349         ad_attr->pcie_gen = ioc_attr->pcie_gen;
2350         ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2351         ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2352         ad_attr->asic_rev = ioc_attr->asic_rev;
2353
2354         bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2355 }
2356
2357 static enum bfa_ioc_type
2358 bfa_ioc_get_type(struct bfa_ioc *ioc)
2359 {
2360         if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2361                 return BFA_IOC_TYPE_LL;
2362
2363         BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2364
2365         return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2366                 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2367 }
2368
2369 static void
2370 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2371 {
2372         memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2373         memcpy(serial_num,
2374                         (void *)ioc->attr->brcd_serialnum,
2375                         BFA_ADAPTER_SERIAL_NUM_LEN);
2376 }
2377
2378 static void
2379 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2380 {
2381         memset(fw_ver, 0, BFA_VERSION_LEN);
2382         memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2383 }
2384
2385 static void
2386 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2387 {
2388         BUG_ON(!(chip_rev));
2389
2390         memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2391
2392         chip_rev[0] = 'R';
2393         chip_rev[1] = 'e';
2394         chip_rev[2] = 'v';
2395         chip_rev[3] = '-';
2396         chip_rev[4] = ioc->attr->asic_rev;
2397         chip_rev[5] = '\0';
2398 }
2399
2400 static void
2401 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2402 {
2403         memset(optrom_ver, 0, BFA_VERSION_LEN);
2404         memcpy(optrom_ver, ioc->attr->optrom_version,
2405                       BFA_VERSION_LEN);
2406 }
2407
2408 static void
2409 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2410 {
2411         memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2412         memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2413 }
2414
2415 static void
2416 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2417 {
2418         struct bfi_ioc_attr *ioc_attr;
2419
2420         BUG_ON(!(model));
2421         memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2422
2423         ioc_attr = ioc->attr;
2424
2425         snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2426                 BFA_MFG_NAME, ioc_attr->card_type);
2427 }
2428
2429 static enum bfa_ioc_state
2430 bfa_ioc_get_state(struct bfa_ioc *ioc)
2431 {
2432         enum bfa_iocpf_state iocpf_st;
2433         enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2434
2435         if (ioc_st == BFA_IOC_ENABLING ||
2436                 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2437
2438                 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2439
2440                 switch (iocpf_st) {
2441                 case BFA_IOCPF_SEMWAIT:
2442                         ioc_st = BFA_IOC_SEMWAIT;
2443                         break;
2444
2445                 case BFA_IOCPF_HWINIT:
2446                         ioc_st = BFA_IOC_HWINIT;
2447                         break;
2448
2449                 case BFA_IOCPF_FWMISMATCH:
2450                         ioc_st = BFA_IOC_FWMISMATCH;
2451                         break;
2452
2453                 case BFA_IOCPF_FAIL:
2454                         ioc_st = BFA_IOC_FAIL;
2455                         break;
2456
2457                 case BFA_IOCPF_INITFAIL:
2458                         ioc_st = BFA_IOC_INITFAIL;
2459                         break;
2460
2461                 default:
2462                         break;
2463                 }
2464         }
2465         return ioc_st;
2466 }
2467
2468 void
2469 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2470 {
2471         memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2472
2473         ioc_attr->state = bfa_ioc_get_state(ioc);
2474         ioc_attr->port_id = ioc->port_id;
2475         ioc_attr->port_mode = ioc->port_mode;
2476
2477         ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2478         ioc_attr->cap_bm = ioc->ad_cap_bm;
2479
2480         ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2481
2482         bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2483
2484         ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2485         ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2486         bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2487 }
2488
2489 /**
2490  * WWN public
2491  */
2492 static u64
2493 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2494 {
2495         return ioc->attr->pwwn;
2496 }
2497
2498 mac_t
2499 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2500 {
2501         return ioc->attr->mac;
2502 }
2503
2504 /**
2505  * Firmware failure detected. Start recovery actions.
2506  */
2507 static void
2508 bfa_ioc_recover(struct bfa_ioc *ioc)
2509 {
2510         pr_crit("Heart Beat of IOC has failed\n");
2511         bfa_ioc_stats(ioc, ioc_hbfails);
2512         bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2513         bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2514 }
2515
2516 static void
2517 bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2518 {
2519         if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2520                 return;
2521 }
2522
2523 /**
2524  * @dg hal_iocpf_pvt BFA IOC PF private functions
2525  * @{
2526  */
2527
2528 static void
2529 bfa_iocpf_enable(struct bfa_ioc *ioc)
2530 {
2531         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2532 }
2533
2534 static void
2535 bfa_iocpf_disable(struct bfa_ioc *ioc)
2536 {
2537         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2538 }
2539
2540 static void
2541 bfa_iocpf_fail(struct bfa_ioc *ioc)
2542 {
2543         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2544 }
2545
2546 static void
2547 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2548 {
2549         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2550 }
2551
2552 static void
2553 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2554 {
2555         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2556 }
2557
2558 static void
2559 bfa_iocpf_stop(struct bfa_ioc *ioc)
2560 {
2561         bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2562 }
2563
2564 void
2565 bfa_nw_iocpf_timeout(void *ioc_arg)
2566 {
2567         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2568         enum bfa_iocpf_state iocpf_st;
2569
2570         iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2571
2572         if (iocpf_st == BFA_IOCPF_HWINIT)
2573                 bfa_ioc_poll_fwinit(ioc);
2574         else
2575                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2576 }
2577
2578 void
2579 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2580 {
2581         struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2582
2583         bfa_ioc_hw_sem_get(ioc);
2584 }
2585
2586 static void
2587 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2588 {
2589         u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2590
2591         if (fwstate == BFI_IOC_DISABLED) {
2592                 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2593                 return;
2594         }
2595
2596         if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2597                 bfa_nw_iocpf_timeout(ioc);
2598         } else {
2599                 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2600                 mod_timer(&ioc->iocpf_timer, jiffies +
2601                         msecs_to_jiffies(BFA_IOC_POLL_TOV));
2602         }
2603 }
2604
2605 /*
2606  *      Flash module specific
2607  */
2608
2609 /*
2610  * FLASH DMA buffer should be big enough to hold both MFG block and
2611  * asic block(64k) at the same time and also should be 2k aligned to
2612  * avoid write segement to cross sector boundary.
2613  */
2614 #define BFA_FLASH_SEG_SZ        2048
2615 #define BFA_FLASH_DMA_BUF_SZ    \
2616         roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
2617
2618 static void
2619 bfa_flash_cb(struct bfa_flash *flash)
2620 {
2621         flash->op_busy = 0;
2622         if (flash->cbfn)
2623                 flash->cbfn(flash->cbarg, flash->status);
2624 }
2625
2626 static void
2627 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
2628 {
2629         struct bfa_flash *flash = cbarg;
2630
2631         switch (event) {
2632         case BFA_IOC_E_DISABLED:
2633         case BFA_IOC_E_FAILED:
2634                 if (flash->op_busy) {
2635                         flash->status = BFA_STATUS_IOC_FAILURE;
2636                         flash->cbfn(flash->cbarg, flash->status);
2637                         flash->op_busy = 0;
2638                 }
2639                 break;
2640         default:
2641                 break;
2642         }
2643 }
2644
2645 /*
2646  * Send flash write request.
2647  *
2648  * @param[in] cbarg - callback argument
2649  */
2650 static void
2651 bfa_flash_write_send(struct bfa_flash *flash)
2652 {
2653         struct bfi_flash_write_req *msg =
2654                         (struct bfi_flash_write_req *) flash->mb.msg;
2655         u32     len;
2656
2657         msg->type = be32_to_cpu(flash->type);
2658         msg->instance = flash->instance;
2659         msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2660         len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2661                flash->residue : BFA_FLASH_DMA_BUF_SZ;
2662         msg->length = be32_to_cpu(len);
2663
2664         /* indicate if it's the last msg of the whole write operation */
2665         msg->last = (len == flash->residue) ? 1 : 0;
2666
2667         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
2668                     bfa_ioc_portid(flash->ioc));
2669         bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2670         memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
2671         bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2672
2673         flash->residue -= len;
2674         flash->offset += len;
2675 }
2676
2677 /*
2678  * Send flash read request.
2679  *
2680  * @param[in] cbarg - callback argument
2681  */
2682 static void
2683 bfa_flash_read_send(void *cbarg)
2684 {
2685         struct bfa_flash *flash = cbarg;
2686         struct bfi_flash_read_req *msg =
2687                         (struct bfi_flash_read_req *) flash->mb.msg;
2688         u32     len;
2689
2690         msg->type = be32_to_cpu(flash->type);
2691         msg->instance = flash->instance;
2692         msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
2693         len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
2694                flash->residue : BFA_FLASH_DMA_BUF_SZ;
2695         msg->length = be32_to_cpu(len);
2696         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
2697                     bfa_ioc_portid(flash->ioc));
2698         bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
2699         bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2700 }
2701
2702 /*
2703  * Process flash response messages upon receiving interrupts.
2704  *
2705  * @param[in] flasharg - flash structure
2706  * @param[in] msg - message structure
2707  */
2708 static void
2709 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
2710 {
2711         struct bfa_flash *flash = flasharg;
2712         u32     status;
2713
2714         union {
2715                 struct bfi_flash_query_rsp *query;
2716                 struct bfi_flash_write_rsp *write;
2717                 struct bfi_flash_read_rsp *read;
2718                 struct bfi_mbmsg   *msg;
2719         } m;
2720
2721         m.msg = msg;
2722
2723         /* receiving response after ioc failure */
2724         if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
2725                 return;
2726
2727         switch (msg->mh.msg_id) {
2728         case BFI_FLASH_I2H_QUERY_RSP:
2729                 status = be32_to_cpu(m.query->status);
2730                 if (status == BFA_STATUS_OK) {
2731                         u32     i;
2732                         struct bfa_flash_attr *attr, *f;
2733
2734                         attr = (struct bfa_flash_attr *) flash->ubuf;
2735                         f = (struct bfa_flash_attr *) flash->dbuf_kva;
2736                         attr->status = be32_to_cpu(f->status);
2737                         attr->npart = be32_to_cpu(f->npart);
2738                         for (i = 0; i < attr->npart; i++) {
2739                                 attr->part[i].part_type =
2740                                         be32_to_cpu(f->part[i].part_type);
2741                                 attr->part[i].part_instance =
2742                                         be32_to_cpu(f->part[i].part_instance);
2743                                 attr->part[i].part_off =
2744                                         be32_to_cpu(f->part[i].part_off);
2745                                 attr->part[i].part_size =
2746                                         be32_to_cpu(f->part[i].part_size);
2747                                 attr->part[i].part_len =
2748                                         be32_to_cpu(f->part[i].part_len);
2749                                 attr->part[i].part_status =
2750                                         be32_to_cpu(f->part[i].part_status);
2751                         }
2752                 }
2753                 flash->status = status;
2754                 bfa_flash_cb(flash);
2755                 break;
2756         case BFI_FLASH_I2H_WRITE_RSP:
2757                 status = be32_to_cpu(m.write->status);
2758                 if (status != BFA_STATUS_OK || flash->residue == 0) {
2759                         flash->status = status;
2760                         bfa_flash_cb(flash);
2761                 } else
2762                         bfa_flash_write_send(flash);
2763                 break;
2764         case BFI_FLASH_I2H_READ_RSP:
2765                 status = be32_to_cpu(m.read->status);
2766                 if (status != BFA_STATUS_OK) {
2767                         flash->status = status;
2768                         bfa_flash_cb(flash);
2769                 } else {
2770                         u32 len = be32_to_cpu(m.read->length);
2771                         memcpy(flash->ubuf + flash->offset,
2772                                flash->dbuf_kva, len);
2773                         flash->residue -= len;
2774                         flash->offset += len;
2775                         if (flash->residue == 0) {
2776                                 flash->status = status;
2777                                 bfa_flash_cb(flash);
2778                         } else
2779                                 bfa_flash_read_send(flash);
2780                 }
2781                 break;
2782         case BFI_FLASH_I2H_BOOT_VER_RSP:
2783         case BFI_FLASH_I2H_EVENT:
2784                 break;
2785         default:
2786                 WARN_ON(1);
2787         }
2788 }
2789
2790 /*
2791  * Flash memory info API.
2792  */
2793 u32
2794 bfa_nw_flash_meminfo(void)
2795 {
2796         return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2797 }
2798
2799 /*
2800  * Flash attach API.
2801  *
2802  * @param[in] flash - flash structure
2803  * @param[in] ioc  - ioc structure
2804  * @param[in] dev  - device structure
2805  */
2806 void
2807 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
2808 {
2809         flash->ioc = ioc;
2810         flash->cbfn = NULL;
2811         flash->cbarg = NULL;
2812         flash->op_busy = 0;
2813
2814         bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
2815         bfa_q_qe_init(&flash->ioc_notify);
2816         bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
2817         list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
2818 }
2819
2820 /*
2821  * Claim memory for flash
2822  *
2823  * @param[in] flash - flash structure
2824  * @param[in] dm_kva - pointer to virtual memory address
2825  * @param[in] dm_pa - physical memory address
2826  */
2827 void
2828 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
2829 {
2830         flash->dbuf_kva = dm_kva;
2831         flash->dbuf_pa = dm_pa;
2832         memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
2833         dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2834         dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
2835 }
2836
2837 /*
2838  * Get flash attribute.
2839  *
2840  * @param[in] flash - flash structure
2841  * @param[in] attr - flash attribute structure
2842  * @param[in] cbfn - callback function
2843  * @param[in] cbarg - callback argument
2844  *
2845  * Return status.
2846  */
2847 enum bfa_status
2848 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
2849                       bfa_cb_flash cbfn, void *cbarg)
2850 {
2851         struct bfi_flash_query_req *msg =
2852                         (struct bfi_flash_query_req *) flash->mb.msg;
2853
2854         if (!bfa_nw_ioc_is_operational(flash->ioc))
2855                 return BFA_STATUS_IOC_NON_OP;
2856
2857         if (flash->op_busy)
2858                 return BFA_STATUS_DEVBUSY;
2859
2860         flash->op_busy = 1;
2861         flash->cbfn = cbfn;
2862         flash->cbarg = cbarg;
2863         flash->ubuf = (u8 *) attr;
2864
2865         bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
2866                     bfa_ioc_portid(flash->ioc));
2867         bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
2868         bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
2869
2870         return BFA_STATUS_OK;
2871 }
2872
2873 /*
2874  * Update flash partition.
2875  *
2876  * @param[in] flash - flash structure
2877  * @param[in] type - flash partition type
2878  * @param[in] instance - flash partition instance
2879  * @param[in] buf - update data buffer
2880  * @param[in] len - data buffer length
2881  * @param[in] offset - offset relative to the partition starting address
2882  * @param[in] cbfn - callback function
2883  * @param[in] cbarg - callback argument
2884  *
2885  * Return status.
2886  */
2887 enum bfa_status
2888 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
2889                          void *buf, u32 len, u32 offset,
2890                          bfa_cb_flash cbfn, void *cbarg)
2891 {
2892         if (!bfa_nw_ioc_is_operational(flash->ioc))
2893                 return BFA_STATUS_IOC_NON_OP;
2894
2895         /*
2896          * 'len' must be in word (4-byte) boundary
2897          */
2898         if (!len || (len & 0x03))
2899                 return BFA_STATUS_FLASH_BAD_LEN;
2900
2901         if (type == BFA_FLASH_PART_MFG)
2902                 return BFA_STATUS_EINVAL;
2903
2904         if (flash->op_busy)
2905                 return BFA_STATUS_DEVBUSY;
2906
2907         flash->op_busy = 1;
2908         flash->cbfn = cbfn;
2909         flash->cbarg = cbarg;
2910         flash->type = type;
2911         flash->instance = instance;
2912         flash->residue = len;
2913         flash->offset = 0;
2914         flash->addr_off = offset;
2915         flash->ubuf = buf;
2916
2917         bfa_flash_write_send(flash);
2918
2919         return BFA_STATUS_OK;
2920 }
2921
2922 /*
2923  * Read flash partition.
2924  *
2925  * @param[in] flash - flash structure
2926  * @param[in] type - flash partition type
2927  * @param[in] instance - flash partition instance
2928  * @param[in] buf - read data buffer
2929  * @param[in] len - data buffer length
2930  * @param[in] offset - offset relative to the partition starting address
2931  * @param[in] cbfn - callback function
2932  * @param[in] cbarg - callback argument
2933  *
2934  * Return status.
2935  */
2936 enum bfa_status
2937 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
2938                        void *buf, u32 len, u32 offset,
2939                        bfa_cb_flash cbfn, void *cbarg)
2940 {
2941         if (!bfa_nw_ioc_is_operational(flash->ioc))
2942                 return BFA_STATUS_IOC_NON_OP;
2943
2944         /*
2945          * 'len' must be in word (4-byte) boundary
2946          */
2947         if (!len || (len & 0x03))
2948                 return BFA_STATUS_FLASH_BAD_LEN;
2949
2950         if (flash->op_busy)
2951                 return BFA_STATUS_DEVBUSY;
2952
2953         flash->op_busy = 1;
2954         flash->cbfn = cbfn;
2955         flash->cbarg = cbarg;
2956         flash->type = type;
2957         flash->instance = instance;
2958         flash->residue = len;
2959         flash->offset = 0;
2960         flash->addr_off = offset;
2961         flash->ubuf = buf;
2962
2963         bfa_flash_read_send(flash);
2964
2965         return BFA_STATUS_OK;
2966 }