Commit | Line | Data |
---|---|---|
8b230ed8 RM |
1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | |
6 | * published by the Free Software Foundation | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | /* | |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | |
15 | * All rights reserved | |
16 | * www.brocade.com | |
17 | */ | |
18 | ||
19 | #include "bfa_ioc.h" | |
20 | #include "cna.h" | |
21 | #include "bfi.h" | |
22 | #include "bfi_ctreg.h" | |
23 | #include "bfa_defs.h" | |
24 | ||
25 | /** | |
26 | * IOC local definitions | |
27 | */ | |
28 | ||
29 | #define bfa_ioc_timer_start(__ioc) \ | |
30 | mod_timer(&(__ioc)->ioc_timer, jiffies + \ | |
31 | msecs_to_jiffies(BFA_IOC_TOV)) | |
32 | #define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer) | |
33 | ||
34 | #define bfa_ioc_recovery_timer_start(__ioc) \ | |
35 | mod_timer(&(__ioc)->ioc_timer, jiffies + \ | |
36 | msecs_to_jiffies(BFA_IOC_TOV_RECOVER)) | |
37 | ||
38 | #define bfa_sem_timer_start(__ioc) \ | |
39 | mod_timer(&(__ioc)->sem_timer, jiffies + \ | |
40 | msecs_to_jiffies(BFA_IOC_HWSEM_TOV)) | |
41 | #define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer) | |
42 | ||
43 | #define bfa_hb_timer_start(__ioc) \ | |
44 | mod_timer(&(__ioc)->hb_timer, jiffies + \ | |
45 | msecs_to_jiffies(BFA_IOC_HB_TOV)) | |
46 | #define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer) | |
47 | ||
48 | /** | |
49 | * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. | |
50 | */ | |
51 | ||
52 | #define bfa_ioc_firmware_lock(__ioc) \ | |
53 | ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) | |
54 | #define bfa_ioc_firmware_unlock(__ioc) \ | |
55 | ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) | |
56 | #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) | |
57 | #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) | |
58 | #define bfa_ioc_notify_hbfail(__ioc) \ | |
59 | ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) | |
60 | ||
61 | #define bfa_ioc_is_optrom(__ioc) \ | |
62 | (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) | |
63 | ||
64 | #define bfa_ioc_mbox_cmd_pending(__ioc) \ | |
65 | (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ | |
66 | readl((__ioc)->ioc_regs.hfn_mbox_cmd)) | |
67 | ||
68 | bool bfa_auto_recover = true; | |
69 | ||
70 | /* | |
71 | * forward declarations | |
72 | */ | |
73 | static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc); | |
74 | static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc); | |
75 | static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force); | |
76 | static void bfa_ioc_send_enable(struct bfa_ioc *ioc); | |
77 | static void bfa_ioc_send_disable(struct bfa_ioc *ioc); | |
78 | static void bfa_ioc_send_getattr(struct bfa_ioc *ioc); | |
79 | static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc); | |
80 | static void bfa_ioc_hb_stop(struct bfa_ioc *ioc); | |
81 | static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force); | |
82 | static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); | |
83 | static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc); | |
84 | static void bfa_ioc_recover(struct bfa_ioc *ioc); | |
85 | static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); | |
86 | static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); | |
87 | static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); | |
88 | ||
89 | /** | |
90 | * IOC state machine events | |
91 | */ | |
92 | enum ioc_event { | |
93 | IOC_E_ENABLE = 1, /*!< IOC enable request */ | |
94 | IOC_E_DISABLE = 2, /*!< IOC disable request */ | |
95 | IOC_E_TIMEOUT = 3, /*!< f/w response timeout */ | |
96 | IOC_E_FWREADY = 4, /*!< f/w initialization done */ | |
97 | IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */ | |
98 | IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */ | |
99 | IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */ | |
100 | IOC_E_HBFAIL = 8, /*!< heartbeat failure */ | |
101 | IOC_E_HWERROR = 9, /*!< hardware error interrupt */ | |
102 | IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ | |
103 | IOC_E_DETACH = 11, /*!< driver detach cleanup */ | |
104 | }; | |
105 | ||
106 | bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); | |
107 | bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event); | |
108 | bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event); | |
109 | bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event); | |
110 | bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event); | |
111 | bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); | |
112 | bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); | |
113 | bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); | |
114 | bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event); | |
115 | bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event); | |
116 | bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); | |
117 | bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); | |
118 | ||
119 | static struct bfa_sm_table ioc_sm_table[] = { | |
120 | {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, | |
121 | {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, | |
122 | {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH}, | |
123 | {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT}, | |
124 | {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT}, | |
125 | {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT}, | |
126 | {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, | |
127 | {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, | |
128 | {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, | |
129 | {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, | |
130 | {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, | |
131 | {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, | |
132 | }; | |
133 | ||
134 | /** | |
135 | * Reset entry actions -- initialize state machine | |
136 | */ | |
137 | static void | |
138 | bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) | |
139 | { | |
140 | ioc->retry_count = 0; | |
141 | ioc->auto_recover = bfa_auto_recover; | |
142 | } | |
143 | ||
144 | /** | |
145 | * Beginning state. IOC is in reset state. | |
146 | */ | |
147 | static void | |
148 | bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) | |
149 | { | |
150 | switch (event) { | |
151 | case IOC_E_ENABLE: | |
152 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); | |
153 | break; | |
154 | ||
155 | case IOC_E_DISABLE: | |
156 | bfa_ioc_disable_comp(ioc); | |
157 | break; | |
158 | ||
159 | case IOC_E_DETACH: | |
160 | break; | |
161 | ||
162 | default: | |
163 | bfa_sm_fault(ioc, event); | |
164 | } | |
165 | } | |
166 | ||
167 | /** | |
168 | * Semaphore should be acquired for version check. | |
169 | */ | |
170 | static void | |
171 | bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc) | |
172 | { | |
173 | bfa_ioc_hw_sem_get(ioc); | |
174 | } | |
175 | ||
176 | /** | |
177 | * Awaiting h/w semaphore to continue with version check. | |
178 | */ | |
179 | static void | |
180 | bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event) | |
181 | { | |
182 | switch (event) { | |
183 | case IOC_E_SEMLOCKED: | |
184 | if (bfa_ioc_firmware_lock(ioc)) { | |
185 | ioc->retry_count = 0; | |
186 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); | |
187 | } else { | |
188 | bfa_ioc_hw_sem_release(ioc); | |
189 | bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); | |
190 | } | |
191 | break; | |
192 | ||
193 | case IOC_E_DISABLE: | |
194 | bfa_ioc_disable_comp(ioc); | |
195 | /* fall through */ | |
196 | ||
197 | case IOC_E_DETACH: | |
198 | bfa_ioc_hw_sem_get_cancel(ioc); | |
199 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | |
200 | break; | |
201 | ||
202 | case IOC_E_FWREADY: | |
203 | break; | |
204 | ||
205 | default: | |
206 | bfa_sm_fault(ioc, event); | |
207 | } | |
208 | } | |
209 | ||
210 | /** | |
211 | * Notify enable completion callback and generate mismatch AEN. | |
212 | */ | |
213 | static void | |
214 | bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc) | |
215 | { | |
216 | /** | |
217 | * Provide enable completion callback and AEN notification only once. | |
218 | */ | |
219 | if (ioc->retry_count == 0) | |
220 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | |
221 | ioc->retry_count++; | |
222 | bfa_ioc_timer_start(ioc); | |
223 | } | |
224 | ||
225 | /** | |
226 | * Awaiting firmware version match. | |
227 | */ | |
228 | static void | |
229 | bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event) | |
230 | { | |
231 | switch (event) { | |
232 | case IOC_E_TIMEOUT: | |
233 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); | |
234 | break; | |
235 | ||
236 | case IOC_E_DISABLE: | |
237 | bfa_ioc_disable_comp(ioc); | |
238 | /* fall through */ | |
239 | ||
240 | case IOC_E_DETACH: | |
241 | bfa_ioc_timer_stop(ioc); | |
242 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | |
243 | break; | |
244 | ||
245 | case IOC_E_FWREADY: | |
246 | break; | |
247 | ||
248 | default: | |
249 | bfa_sm_fault(ioc, event); | |
250 | } | |
251 | } | |
252 | ||
253 | /** | |
254 | * Request for semaphore. | |
255 | */ | |
256 | static void | |
257 | bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc) | |
258 | { | |
259 | bfa_ioc_hw_sem_get(ioc); | |
260 | } | |
261 | ||
262 | /** | |
263 | * Awaiting semaphore for h/w initialzation. | |
264 | */ | |
265 | static void | |
266 | bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event) | |
267 | { | |
268 | switch (event) { | |
269 | case IOC_E_SEMLOCKED: | |
270 | ioc->retry_count = 0; | |
271 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); | |
272 | break; | |
273 | ||
274 | case IOC_E_DISABLE: | |
275 | bfa_ioc_hw_sem_get_cancel(ioc); | |
276 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | |
277 | break; | |
278 | ||
279 | default: | |
280 | bfa_sm_fault(ioc, event); | |
281 | } | |
282 | } | |
283 | ||
284 | static void | |
285 | bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc) | |
286 | { | |
287 | bfa_ioc_timer_start(ioc); | |
288 | bfa_ioc_reset(ioc, false); | |
289 | } | |
290 | ||
291 | /** | |
292 | * @brief | |
293 | * Hardware is being initialized. Interrupts are enabled. | |
294 | * Holding hardware semaphore lock. | |
295 | */ | |
296 | static void | |
297 | bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event) | |
298 | { | |
299 | switch (event) { | |
300 | case IOC_E_FWREADY: | |
301 | bfa_ioc_timer_stop(ioc); | |
302 | bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); | |
303 | break; | |
304 | ||
305 | case IOC_E_HWERROR: | |
306 | bfa_ioc_timer_stop(ioc); | |
307 | /* fall through */ | |
308 | ||
309 | case IOC_E_TIMEOUT: | |
310 | ioc->retry_count++; | |
311 | if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { | |
312 | bfa_ioc_timer_start(ioc); | |
313 | bfa_ioc_reset(ioc, true); | |
314 | break; | |
315 | } | |
316 | ||
317 | bfa_ioc_hw_sem_release(ioc); | |
318 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | |
319 | break; | |
320 | ||
321 | case IOC_E_DISABLE: | |
322 | bfa_ioc_hw_sem_release(ioc); | |
323 | bfa_ioc_timer_stop(ioc); | |
324 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | |
325 | break; | |
326 | ||
327 | default: | |
328 | bfa_sm_fault(ioc, event); | |
329 | } | |
330 | } | |
331 | ||
332 | static void | |
333 | bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) | |
334 | { | |
335 | bfa_ioc_timer_start(ioc); | |
336 | bfa_ioc_send_enable(ioc); | |
337 | } | |
338 | ||
339 | /** | |
340 | * Host IOC function is being enabled, awaiting response from firmware. | |
341 | * Semaphore is acquired. | |
342 | */ | |
343 | static void | |
344 | bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) | |
345 | { | |
346 | switch (event) { | |
347 | case IOC_E_FWRSP_ENABLE: | |
348 | bfa_ioc_timer_stop(ioc); | |
349 | bfa_ioc_hw_sem_release(ioc); | |
350 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); | |
351 | break; | |
352 | ||
353 | case IOC_E_HWERROR: | |
354 | bfa_ioc_timer_stop(ioc); | |
355 | /* fall through */ | |
356 | ||
357 | case IOC_E_TIMEOUT: | |
358 | ioc->retry_count++; | |
359 | if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { | |
360 | writel(BFI_IOC_UNINIT, | |
361 | ioc->ioc_regs.ioc_fwstate); | |
362 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); | |
363 | break; | |
364 | } | |
365 | ||
366 | bfa_ioc_hw_sem_release(ioc); | |
367 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | |
368 | break; | |
369 | ||
370 | case IOC_E_DISABLE: | |
371 | bfa_ioc_timer_stop(ioc); | |
372 | bfa_ioc_hw_sem_release(ioc); | |
373 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | |
374 | break; | |
375 | ||
376 | case IOC_E_FWREADY: | |
377 | bfa_ioc_send_enable(ioc); | |
378 | break; | |
379 | ||
380 | default: | |
381 | bfa_sm_fault(ioc, event); | |
382 | } | |
383 | } | |
384 | ||
385 | static void | |
386 | bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) | |
387 | { | |
388 | bfa_ioc_timer_start(ioc); | |
389 | bfa_ioc_send_getattr(ioc); | |
390 | } | |
391 | ||
392 | /** | |
393 | * @brief | |
394 | * IOC configuration in progress. Timer is active. | |
395 | */ | |
396 | static void | |
397 | bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) | |
398 | { | |
399 | switch (event) { | |
400 | case IOC_E_FWRSP_GETATTR: | |
401 | bfa_ioc_timer_stop(ioc); | |
402 | bfa_ioc_check_attr_wwns(ioc); | |
403 | bfa_fsm_set_state(ioc, bfa_ioc_sm_op); | |
404 | break; | |
405 | ||
406 | case IOC_E_HWERROR: | |
407 | bfa_ioc_timer_stop(ioc); | |
408 | /* fall through */ | |
409 | ||
410 | case IOC_E_TIMEOUT: | |
411 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | |
412 | break; | |
413 | ||
414 | case IOC_E_DISABLE: | |
415 | bfa_ioc_timer_stop(ioc); | |
416 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | |
417 | break; | |
418 | ||
419 | default: | |
420 | bfa_sm_fault(ioc, event); | |
421 | } | |
422 | } | |
423 | ||
424 | static void | |
425 | bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) | |
426 | { | |
427 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); | |
428 | bfa_ioc_hb_monitor(ioc); | |
429 | } | |
430 | ||
431 | static void | |
432 | bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) | |
433 | { | |
434 | switch (event) { | |
435 | case IOC_E_ENABLE: | |
436 | break; | |
437 | ||
438 | case IOC_E_DISABLE: | |
439 | bfa_ioc_hb_stop(ioc); | |
440 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | |
441 | break; | |
442 | ||
443 | case IOC_E_HWERROR: | |
444 | case IOC_E_FWREADY: | |
445 | /** | |
446 | * Hard error or IOC recovery by other function. | |
447 | * Treat it same as heartbeat failure. | |
448 | */ | |
449 | bfa_ioc_hb_stop(ioc); | |
450 | /* !!! fall through !!! */ | |
451 | ||
452 | case IOC_E_HBFAIL: | |
453 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); | |
454 | break; | |
455 | ||
456 | default: | |
457 | bfa_sm_fault(ioc, event); | |
458 | } | |
459 | } | |
460 | ||
461 | static void | |
462 | bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) | |
463 | { | |
464 | bfa_ioc_timer_start(ioc); | |
465 | bfa_ioc_send_disable(ioc); | |
466 | } | |
467 | ||
468 | /** | |
469 | * IOC is being disabled | |
470 | */ | |
471 | static void | |
472 | bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) | |
473 | { | |
474 | switch (event) { | |
475 | case IOC_E_FWRSP_DISABLE: | |
476 | bfa_ioc_timer_stop(ioc); | |
477 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | |
478 | break; | |
479 | ||
480 | case IOC_E_HWERROR: | |
481 | bfa_ioc_timer_stop(ioc); | |
482 | /* | |
483 | * !!! fall through !!! | |
484 | */ | |
485 | ||
486 | case IOC_E_TIMEOUT: | |
487 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | |
488 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | |
489 | break; | |
490 | ||
491 | default: | |
492 | bfa_sm_fault(ioc, event); | |
493 | } | |
494 | } | |
495 | ||
496 | /** | |
497 | * IOC disable completion entry. | |
498 | */ | |
499 | static void | |
500 | bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) | |
501 | { | |
502 | bfa_ioc_disable_comp(ioc); | |
503 | } | |
504 | ||
505 | static void | |
506 | bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) | |
507 | { | |
508 | switch (event) { | |
509 | case IOC_E_ENABLE: | |
510 | bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); | |
511 | break; | |
512 | ||
513 | case IOC_E_DISABLE: | |
514 | ioc->cbfn->disable_cbfn(ioc->bfa); | |
515 | break; | |
516 | ||
517 | case IOC_E_FWREADY: | |
518 | break; | |
519 | ||
520 | case IOC_E_DETACH: | |
521 | bfa_ioc_firmware_unlock(ioc); | |
522 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | |
523 | break; | |
524 | ||
525 | default: | |
526 | bfa_sm_fault(ioc, event); | |
527 | } | |
528 | } | |
529 | ||
530 | static void | |
531 | bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc) | |
532 | { | |
533 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | |
534 | bfa_ioc_timer_start(ioc); | |
535 | } | |
536 | ||
537 | /** | |
538 | * @brief | |
539 | * Hardware initialization failed. | |
540 | */ | |
541 | static void | |
542 | bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event) | |
543 | { | |
544 | switch (event) { | |
545 | case IOC_E_DISABLE: | |
546 | bfa_ioc_timer_stop(ioc); | |
547 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | |
548 | break; | |
549 | ||
550 | case IOC_E_DETACH: | |
551 | bfa_ioc_timer_stop(ioc); | |
552 | bfa_ioc_firmware_unlock(ioc); | |
553 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | |
554 | break; | |
555 | ||
556 | case IOC_E_TIMEOUT: | |
557 | bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); | |
558 | break; | |
559 | ||
560 | default: | |
561 | bfa_sm_fault(ioc, event); | |
562 | } | |
563 | } | |
564 | ||
565 | static void | |
566 | bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc) | |
567 | { | |
568 | struct list_head *qe; | |
569 | struct bfa_ioc_hbfail_notify *notify; | |
570 | ||
571 | /** | |
572 | * Mark IOC as failed in hardware and stop firmware. | |
573 | */ | |
574 | bfa_ioc_lpu_stop(ioc); | |
575 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | |
576 | ||
577 | /** | |
578 | * Notify other functions on HB failure. | |
579 | */ | |
580 | bfa_ioc_notify_hbfail(ioc); | |
581 | ||
582 | /** | |
583 | * Notify driver and common modules registered for notification. | |
584 | */ | |
585 | ioc->cbfn->hbfail_cbfn(ioc->bfa); | |
586 | list_for_each(qe, &ioc->hb_notify_q) { | |
587 | notify = (struct bfa_ioc_hbfail_notify *) qe; | |
588 | notify->cbfn(notify->cbarg); | |
589 | } | |
590 | ||
591 | /** | |
592 | * Flush any queued up mailbox requests. | |
593 | */ | |
594 | bfa_ioc_mbox_hbfail(ioc); | |
595 | ||
596 | /** | |
597 | * Trigger auto-recovery after a delay. | |
598 | */ | |
599 | if (ioc->auto_recover) | |
600 | mod_timer(&ioc->ioc_timer, jiffies + | |
601 | msecs_to_jiffies(BFA_IOC_TOV_RECOVER)); | |
602 | } | |
603 | ||
604 | /** | |
605 | * @brief | |
606 | * IOC heartbeat failure. | |
607 | */ | |
608 | static void | |
609 | bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event) | |
610 | { | |
611 | switch (event) { | |
612 | ||
613 | case IOC_E_ENABLE: | |
614 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | |
615 | break; | |
616 | ||
617 | case IOC_E_DISABLE: | |
618 | if (ioc->auto_recover) | |
619 | bfa_ioc_timer_stop(ioc); | |
620 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | |
621 | break; | |
622 | ||
623 | case IOC_E_TIMEOUT: | |
624 | bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); | |
625 | break; | |
626 | ||
627 | case IOC_E_FWREADY: | |
628 | /** | |
629 | * Recovery is already initiated by other function. | |
630 | */ | |
631 | break; | |
632 | ||
633 | case IOC_E_HWERROR: | |
634 | /* | |
635 | * HB failure notification, ignore. | |
636 | */ | |
637 | break; | |
638 | default: | |
639 | bfa_sm_fault(ioc, event); | |
640 | } | |
641 | } | |
642 | ||
643 | /** | |
644 | * BFA IOC private functions | |
645 | */ | |
646 | ||
647 | static void | |
648 | bfa_ioc_disable_comp(struct bfa_ioc *ioc) | |
649 | { | |
650 | struct list_head *qe; | |
651 | struct bfa_ioc_hbfail_notify *notify; | |
652 | ||
653 | ioc->cbfn->disable_cbfn(ioc->bfa); | |
654 | ||
655 | /** | |
656 | * Notify common modules registered for notification. | |
657 | */ | |
658 | list_for_each(qe, &ioc->hb_notify_q) { | |
659 | notify = (struct bfa_ioc_hbfail_notify *) qe; | |
660 | notify->cbfn(notify->cbarg); | |
661 | } | |
662 | } | |
663 | ||
664 | void | |
665 | bfa_ioc_sem_timeout(void *ioc_arg) | |
666 | { | |
667 | struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; | |
668 | ||
669 | bfa_ioc_hw_sem_get(ioc); | |
670 | } | |
671 | ||
672 | bool | |
673 | bfa_ioc_sem_get(void __iomem *sem_reg) | |
674 | { | |
675 | u32 r32; | |
676 | int cnt = 0; | |
677 | #define BFA_SEM_SPINCNT 3000 | |
678 | ||
679 | r32 = readl(sem_reg); | |
680 | ||
681 | while (r32 && (cnt < BFA_SEM_SPINCNT)) { | |
682 | cnt++; | |
683 | udelay(2); | |
684 | r32 = readl(sem_reg); | |
685 | } | |
686 | ||
687 | if (r32 == 0) | |
688 | return true; | |
689 | ||
690 | BUG_ON(!(cnt < BFA_SEM_SPINCNT)); | |
691 | return false; | |
692 | } | |
693 | ||
694 | void | |
695 | bfa_ioc_sem_release(void __iomem *sem_reg) | |
696 | { | |
697 | writel(1, sem_reg); | |
698 | } | |
699 | ||
700 | static void | |
701 | bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) | |
702 | { | |
703 | u32 r32; | |
704 | ||
705 | /** | |
706 | * First read to the semaphore register will return 0, subsequent reads | |
707 | * will return 1. Semaphore is released by writing 1 to the register | |
708 | */ | |
709 | r32 = readl(ioc->ioc_regs.ioc_sem_reg); | |
710 | if (r32 == 0) { | |
711 | bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); | |
712 | return; | |
713 | } | |
714 | ||
715 | mod_timer(&ioc->sem_timer, jiffies + | |
716 | msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); | |
717 | } | |
718 | ||
719 | void | |
720 | bfa_ioc_hw_sem_release(struct bfa_ioc *ioc) | |
721 | { | |
722 | writel(1, ioc->ioc_regs.ioc_sem_reg); | |
723 | } | |
724 | ||
725 | static void | |
726 | bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) | |
727 | { | |
728 | del_timer(&ioc->sem_timer); | |
729 | } | |
730 | ||
731 | /** | |
732 | * @brief | |
733 | * Initialize LPU local memory (aka secondary memory / SRAM) | |
734 | */ | |
735 | static void | |
736 | bfa_ioc_lmem_init(struct bfa_ioc *ioc) | |
737 | { | |
738 | u32 pss_ctl; | |
739 | int i; | |
740 | #define PSS_LMEM_INIT_TIME 10000 | |
741 | ||
742 | pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); | |
743 | pss_ctl &= ~__PSS_LMEM_RESET; | |
744 | pss_ctl |= __PSS_LMEM_INIT_EN; | |
745 | ||
746 | /* | |
747 | * i2c workaround 12.5khz clock | |
748 | */ | |
749 | pss_ctl |= __PSS_I2C_CLK_DIV(3UL); | |
750 | writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); | |
751 | ||
752 | /** | |
753 | * wait for memory initialization to be complete | |
754 | */ | |
755 | i = 0; | |
756 | do { | |
757 | pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); | |
758 | i++; | |
759 | } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); | |
760 | ||
761 | /** | |
762 | * If memory initialization is not successful, IOC timeout will catch | |
763 | * such failures. | |
764 | */ | |
765 | BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); | |
766 | ||
767 | pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); | |
768 | writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); | |
769 | } | |
770 | ||
771 | static void | |
772 | bfa_ioc_lpu_start(struct bfa_ioc *ioc) | |
773 | { | |
774 | u32 pss_ctl; | |
775 | ||
776 | /** | |
777 | * Take processor out of reset. | |
778 | */ | |
779 | pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); | |
780 | pss_ctl &= ~__PSS_LPU0_RESET; | |
781 | ||
782 | writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); | |
783 | } | |
784 | ||
785 | static void | |
786 | bfa_ioc_lpu_stop(struct bfa_ioc *ioc) | |
787 | { | |
788 | u32 pss_ctl; | |
789 | ||
790 | /** | |
791 | * Put processors in reset. | |
792 | */ | |
793 | pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); | |
794 | pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); | |
795 | ||
796 | writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); | |
797 | } | |
798 | ||
799 | /** | |
800 | * Get driver and firmware versions. | |
801 | */ | |
802 | void | |
803 | bfa_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) | |
804 | { | |
805 | u32 pgnum, pgoff; | |
806 | u32 loff = 0; | |
807 | int i; | |
808 | u32 *fwsig = (u32 *) fwhdr; | |
809 | ||
810 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | |
811 | pgoff = bfa_ioc_smem_pgoff(ioc, loff); | |
812 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); | |
813 | ||
814 | for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); | |
815 | i++) { | |
816 | fwsig[i] = | |
817 | swab32(readl((loff) + (ioc->ioc_regs.smem_page_start))); | |
818 | loff += sizeof(u32); | |
819 | } | |
820 | } | |
821 | ||
822 | /** | |
823 | * Returns TRUE if same. | |
824 | */ | |
825 | bool | |
826 | bfa_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) | |
827 | { | |
828 | struct bfi_ioc_image_hdr *drv_fwhdr; | |
829 | int i; | |
830 | ||
831 | drv_fwhdr = (struct bfi_ioc_image_hdr *) | |
832 | bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); | |
833 | ||
834 | for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { | |
835 | if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) | |
836 | return false; | |
837 | } | |
838 | ||
839 | return true; | |
840 | } | |
841 | ||
842 | /** | |
843 | * Return true if current running version is valid. Firmware signature and | |
844 | * execution context (driver/bios) must match. | |
845 | */ | |
846 | static bool | |
847 | bfa_ioc_fwver_valid(struct bfa_ioc *ioc) | |
848 | { | |
849 | struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; | |
850 | ||
851 | /** | |
852 | * If bios/efi boot (flash based) -- return true | |
853 | */ | |
854 | if (bfa_ioc_is_optrom(ioc)) | |
855 | return true; | |
856 | ||
857 | bfa_ioc_fwver_get(ioc, &fwhdr); | |
858 | drv_fwhdr = (struct bfi_ioc_image_hdr *) | |
859 | bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); | |
860 | ||
861 | if (fwhdr.signature != drv_fwhdr->signature) | |
862 | return false; | |
863 | ||
864 | if (fwhdr.exec != drv_fwhdr->exec) | |
865 | return false; | |
866 | ||
867 | return bfa_ioc_fwver_cmp(ioc, &fwhdr); | |
868 | } | |
869 | ||
870 | /** | |
871 | * Conditionally flush any pending message from firmware at start. | |
872 | */ | |
873 | static void | |
874 | bfa_ioc_msgflush(struct bfa_ioc *ioc) | |
875 | { | |
876 | u32 r32; | |
877 | ||
878 | r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); | |
879 | if (r32) | |
880 | writel(1, ioc->ioc_regs.lpu_mbox_cmd); | |
881 | } | |
882 | ||
883 | /** | |
884 | * @img ioc_init_logic.jpg | |
885 | */ | |
886 | static void | |
887 | bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) | |
888 | { | |
889 | enum bfi_ioc_state ioc_fwstate; | |
890 | bool fwvalid; | |
891 | ||
892 | ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); | |
893 | ||
894 | if (force) | |
895 | ioc_fwstate = BFI_IOC_UNINIT; | |
896 | ||
897 | /** | |
898 | * check if firmware is valid | |
899 | */ | |
900 | fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? | |
901 | false : bfa_ioc_fwver_valid(ioc); | |
902 | ||
903 | if (!fwvalid) { | |
904 | bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); | |
905 | return; | |
906 | } | |
907 | ||
908 | /** | |
909 | * If hardware initialization is in progress (initialized by other IOC), | |
910 | * just wait for an initialization completion interrupt. | |
911 | */ | |
912 | if (ioc_fwstate == BFI_IOC_INITING) { | |
913 | ioc->cbfn->reset_cbfn(ioc->bfa); | |
914 | return; | |
915 | } | |
916 | ||
917 | /** | |
918 | * If IOC function is disabled and firmware version is same, | |
919 | * just re-enable IOC. | |
920 | * | |
921 | * If option rom, IOC must not be in operational state. With | |
922 | * convergence, IOC will be in operational state when 2nd driver | |
923 | * is loaded. | |
924 | */ | |
925 | if (ioc_fwstate == BFI_IOC_DISABLED || | |
926 | (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { | |
927 | /** | |
928 | * When using MSI-X any pending firmware ready event should | |
929 | * be flushed. Otherwise MSI-X interrupts are not delivered. | |
930 | */ | |
931 | bfa_ioc_msgflush(ioc); | |
932 | ioc->cbfn->reset_cbfn(ioc->bfa); | |
933 | bfa_fsm_send_event(ioc, IOC_E_FWREADY); | |
934 | return; | |
935 | } | |
936 | ||
937 | /** | |
938 | * Initialize the h/w for any other states. | |
939 | */ | |
940 | bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); | |
941 | } | |
942 | ||
943 | void | |
944 | bfa_ioc_timeout(void *ioc_arg) | |
945 | { | |
946 | struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; | |
947 | ||
948 | bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); | |
949 | } | |
950 | ||
951 | void | |
952 | bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) | |
953 | { | |
954 | u32 *msgp = (u32 *) ioc_msg; | |
955 | u32 i; | |
956 | ||
957 | BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX)); | |
958 | ||
959 | /* | |
960 | * first write msg to mailbox registers | |
961 | */ | |
962 | for (i = 0; i < len / sizeof(u32); i++) | |
963 | writel(cpu_to_le32(msgp[i]), | |
964 | ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); | |
965 | ||
966 | for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) | |
967 | writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); | |
968 | ||
969 | /* | |
970 | * write 1 to mailbox CMD to trigger LPU event | |
971 | */ | |
972 | writel(1, ioc->ioc_regs.hfn_mbox_cmd); | |
973 | (void) readl(ioc->ioc_regs.hfn_mbox_cmd); | |
974 | } | |
975 | ||
976 | static void | |
977 | bfa_ioc_send_enable(struct bfa_ioc *ioc) | |
978 | { | |
979 | struct bfi_ioc_ctrl_req enable_req; | |
980 | struct timeval tv; | |
981 | ||
982 | bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, | |
983 | bfa_ioc_portid(ioc)); | |
984 | enable_req.ioc_class = ioc->ioc_mc; | |
985 | do_gettimeofday(&tv); | |
986 | enable_req.tv_sec = ntohl(tv.tv_sec); | |
987 | bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); | |
988 | } | |
989 | ||
990 | static void | |
991 | bfa_ioc_send_disable(struct bfa_ioc *ioc) | |
992 | { | |
993 | struct bfi_ioc_ctrl_req disable_req; | |
994 | ||
995 | bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, | |
996 | bfa_ioc_portid(ioc)); | |
997 | bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); | |
998 | } | |
999 | ||
1000 | static void | |
1001 | bfa_ioc_send_getattr(struct bfa_ioc *ioc) | |
1002 | { | |
1003 | struct bfi_ioc_getattr_req attr_req; | |
1004 | ||
1005 | bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, | |
1006 | bfa_ioc_portid(ioc)); | |
1007 | bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); | |
1008 | bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); | |
1009 | } | |
1010 | ||
1011 | void | |
1012 | bfa_ioc_hb_check(void *cbarg) | |
1013 | { | |
1014 | struct bfa_ioc *ioc = cbarg; | |
1015 | u32 hb_count; | |
1016 | ||
1017 | hb_count = readl(ioc->ioc_regs.heartbeat); | |
1018 | if (ioc->hb_count == hb_count) { | |
1019 | pr_crit("Firmware heartbeat failure at %d", hb_count); | |
1020 | bfa_ioc_recover(ioc); | |
1021 | return; | |
1022 | } else { | |
1023 | ioc->hb_count = hb_count; | |
1024 | } | |
1025 | ||
1026 | bfa_ioc_mbox_poll(ioc); | |
1027 | mod_timer(&ioc->hb_timer, jiffies + | |
1028 | msecs_to_jiffies(BFA_IOC_HB_TOV)); | |
1029 | } | |
1030 | ||
1031 | static void | |
1032 | bfa_ioc_hb_monitor(struct bfa_ioc *ioc) | |
1033 | { | |
1034 | ioc->hb_count = readl(ioc->ioc_regs.heartbeat); | |
1035 | mod_timer(&ioc->hb_timer, jiffies + | |
1036 | msecs_to_jiffies(BFA_IOC_HB_TOV)); | |
1037 | } | |
1038 | ||
1039 | static void | |
1040 | bfa_ioc_hb_stop(struct bfa_ioc *ioc) | |
1041 | { | |
1042 | del_timer(&ioc->hb_timer); | |
1043 | } | |
1044 | ||
1045 | /** | |
1046 | * @brief | |
1047 | * Initiate a full firmware download. | |
1048 | */ | |
1049 | static void | |
1050 | bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, | |
1051 | u32 boot_param) | |
1052 | { | |
1053 | u32 *fwimg; | |
1054 | u32 pgnum, pgoff; | |
1055 | u32 loff = 0; | |
1056 | u32 chunkno = 0; | |
1057 | u32 i; | |
1058 | ||
1059 | /** | |
1060 | * Initialize LMEM first before code download | |
1061 | */ | |
1062 | bfa_ioc_lmem_init(ioc); | |
1063 | ||
1064 | /** | |
1065 | * Flash based firmware boot | |
1066 | */ | |
1067 | if (bfa_ioc_is_optrom(ioc)) | |
1068 | boot_type = BFI_BOOT_TYPE_FLASH; | |
1069 | fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); | |
1070 | ||
1071 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | |
1072 | pgoff = bfa_ioc_smem_pgoff(ioc, loff); | |
1073 | ||
1074 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); | |
1075 | ||
1076 | for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { | |
1077 | if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { | |
1078 | chunkno = BFA_IOC_FLASH_CHUNK_NO(i); | |
1079 | fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), | |
1080 | BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); | |
1081 | } | |
1082 | ||
1083 | /** | |
1084 | * write smem | |
1085 | */ | |
1086 | writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])), | |
1087 | ((ioc->ioc_regs.smem_page_start) + (loff))); | |
1088 | ||
1089 | loff += sizeof(u32); | |
1090 | ||
1091 | /** | |
1092 | * handle page offset wrap around | |
1093 | */ | |
1094 | loff = PSS_SMEM_PGOFF(loff); | |
1095 | if (loff == 0) { | |
1096 | pgnum++; | |
1097 | writel(pgnum, | |
1098 | ioc->ioc_regs.host_page_num_fn); | |
1099 | } | |
1100 | } | |
1101 | ||
1102 | writel(bfa_ioc_smem_pgnum(ioc, 0), | |
1103 | ioc->ioc_regs.host_page_num_fn); | |
1104 | ||
1105 | /* | |
1106 | * Set boot type and boot param at the end. | |
1107 | */ | |
1108 | writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start) | |
1109 | + (BFI_BOOT_TYPE_OFF))); | |
1110 | writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start) | |
1111 | + (BFI_BOOT_PARAM_OFF))); | |
1112 | } | |
1113 | ||
1114 | static void | |
1115 | bfa_ioc_reset(struct bfa_ioc *ioc, bool force) | |
1116 | { | |
1117 | bfa_ioc_hwinit(ioc, force); | |
1118 | } | |
1119 | ||
1120 | /** | |
1121 | * @brief | |
1122 | * Update BFA configuration from firmware configuration. | |
1123 | */ | |
1124 | static void | |
1125 | bfa_ioc_getattr_reply(struct bfa_ioc *ioc) | |
1126 | { | |
1127 | struct bfi_ioc_attr *attr = ioc->attr; | |
1128 | ||
1129 | attr->adapter_prop = ntohl(attr->adapter_prop); | |
1130 | attr->card_type = ntohl(attr->card_type); | |
1131 | attr->maxfrsize = ntohs(attr->maxfrsize); | |
1132 | ||
1133 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); | |
1134 | } | |
1135 | ||
1136 | /** | |
1137 | * Attach time initialization of mbox logic. | |
1138 | */ | |
1139 | static void | |
1140 | bfa_ioc_mbox_attach(struct bfa_ioc *ioc) | |
1141 | { | |
1142 | struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; | |
1143 | int mc; | |
1144 | ||
1145 | INIT_LIST_HEAD(&mod->cmd_q); | |
1146 | for (mc = 0; mc < BFI_MC_MAX; mc++) { | |
1147 | mod->mbhdlr[mc].cbfn = NULL; | |
1148 | mod->mbhdlr[mc].cbarg = ioc->bfa; | |
1149 | } | |
1150 | } | |
1151 | ||
1152 | /** | |
1153 | * Mbox poll timer -- restarts any pending mailbox requests. | |
1154 | */ | |
1155 | static void | |
1156 | bfa_ioc_mbox_poll(struct bfa_ioc *ioc) | |
1157 | { | |
1158 | struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; | |
1159 | struct bfa_mbox_cmd *cmd; | |
1160 | u32 stat; | |
1161 | ||
1162 | /** | |
1163 | * If no command pending, do nothing | |
1164 | */ | |
1165 | if (list_empty(&mod->cmd_q)) | |
1166 | return; | |
1167 | ||
1168 | /** | |
1169 | * If previous command is not yet fetched by firmware, do nothing | |
1170 | */ | |
1171 | stat = readl(ioc->ioc_regs.hfn_mbox_cmd); | |
1172 | if (stat) | |
1173 | return; | |
1174 | ||
1175 | /** | |
1176 | * Enqueue command to firmware. | |
1177 | */ | |
1178 | bfa_q_deq(&mod->cmd_q, &cmd); | |
1179 | bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); | |
1180 | } | |
1181 | ||
1182 | /** | |
1183 | * Cleanup any pending requests. | |
1184 | */ | |
1185 | static void | |
1186 | bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc) | |
1187 | { | |
1188 | struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; | |
1189 | struct bfa_mbox_cmd *cmd; | |
1190 | ||
1191 | while (!list_empty(&mod->cmd_q)) | |
1192 | bfa_q_deq(&mod->cmd_q, &cmd); | |
1193 | } | |
1194 | ||
1195 | /** | |
1196 | * IOC public | |
1197 | */ | |
1198 | enum bfa_status | |
1199 | bfa_ioc_pll_init(struct bfa_ioc *ioc) | |
1200 | { | |
1201 | /* | |
1202 | * Hold semaphore so that nobody can access the chip during init. | |
1203 | */ | |
1204 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); | |
1205 | ||
1206 | bfa_ioc_pll_init_asic(ioc); | |
1207 | ||
1208 | ioc->pllinit = true; | |
1209 | /* | |
1210 | * release semaphore. | |
1211 | */ | |
1212 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); | |
1213 | ||
1214 | return BFA_STATUS_OK; | |
1215 | } | |
1216 | ||
1217 | /** | |
1218 | * Interface used by diag module to do firmware boot with memory test | |
1219 | * as the entry vector. | |
1220 | */ | |
1221 | void | |
1222 | bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param) | |
1223 | { | |
1224 | void __iomem *rb; | |
1225 | ||
1226 | bfa_ioc_stats(ioc, ioc_boots); | |
1227 | ||
1228 | if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) | |
1229 | return; | |
1230 | ||
1231 | /** | |
1232 | * Initialize IOC state of all functions on a chip reset. | |
1233 | */ | |
1234 | rb = ioc->pcidev.pci_bar_kva; | |
1235 | if (boot_param == BFI_BOOT_TYPE_MEMTEST) { | |
1236 | writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); | |
1237 | writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG)); | |
1238 | } else { | |
1239 | writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG)); | |
1240 | writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG)); | |
1241 | } | |
1242 | ||
1243 | bfa_ioc_msgflush(ioc); | |
1244 | bfa_ioc_download_fw(ioc, boot_type, boot_param); | |
1245 | ||
1246 | /** | |
1247 | * Enable interrupts just before starting LPU | |
1248 | */ | |
1249 | ioc->cbfn->reset_cbfn(ioc->bfa); | |
1250 | bfa_ioc_lpu_start(ioc); | |
1251 | } | |
1252 | ||
1253 | /** | |
1254 | * Enable/disable IOC failure auto recovery. | |
1255 | */ | |
1256 | void | |
1257 | bfa_ioc_auto_recover(bool auto_recover) | |
1258 | { | |
1259 | bfa_auto_recover = auto_recover; | |
1260 | } | |
1261 | ||
1262 | bool | |
1263 | bfa_ioc_is_operational(struct bfa_ioc *ioc) | |
1264 | { | |
1265 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); | |
1266 | } | |
1267 | ||
1268 | bool | |
1269 | bfa_ioc_is_initialized(struct bfa_ioc *ioc) | |
1270 | { | |
1271 | u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); | |
1272 | ||
1273 | return ((r32 != BFI_IOC_UNINIT) && | |
1274 | (r32 != BFI_IOC_INITING) && | |
1275 | (r32 != BFI_IOC_MEMTEST)); | |
1276 | } | |
1277 | ||
1278 | void | |
1279 | bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) | |
1280 | { | |
1281 | u32 *msgp = mbmsg; | |
1282 | u32 r32; | |
1283 | int i; | |
1284 | ||
1285 | /** | |
1286 | * read the MBOX msg | |
1287 | */ | |
1288 | for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); | |
1289 | i++) { | |
1290 | r32 = readl(ioc->ioc_regs.lpu_mbox + | |
1291 | i * sizeof(u32)); | |
1292 | msgp[i] = htonl(r32); | |
1293 | } | |
1294 | ||
1295 | /** | |
1296 | * turn off mailbox interrupt by clearing mailbox status | |
1297 | */ | |
1298 | writel(1, ioc->ioc_regs.lpu_mbox_cmd); | |
1299 | readl(ioc->ioc_regs.lpu_mbox_cmd); | |
1300 | } | |
1301 | ||
1302 | void | |
1303 | bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) | |
1304 | { | |
1305 | union bfi_ioc_i2h_msg_u *msg; | |
1306 | ||
1307 | msg = (union bfi_ioc_i2h_msg_u *) m; | |
1308 | ||
1309 | bfa_ioc_stats(ioc, ioc_isrs); | |
1310 | ||
1311 | switch (msg->mh.msg_id) { | |
1312 | case BFI_IOC_I2H_HBEAT: | |
1313 | break; | |
1314 | ||
1315 | case BFI_IOC_I2H_READY_EVENT: | |
1316 | bfa_fsm_send_event(ioc, IOC_E_FWREADY); | |
1317 | break; | |
1318 | ||
1319 | case BFI_IOC_I2H_ENABLE_REPLY: | |
1320 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); | |
1321 | break; | |
1322 | ||
1323 | case BFI_IOC_I2H_DISABLE_REPLY: | |
1324 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); | |
1325 | break; | |
1326 | ||
1327 | case BFI_IOC_I2H_GETATTR_REPLY: | |
1328 | bfa_ioc_getattr_reply(ioc); | |
1329 | break; | |
1330 | ||
1331 | default: | |
1332 | BUG_ON(1); | |
1333 | } | |
1334 | } | |
1335 | ||
1336 | /** | |
1337 | * IOC attach time initialization and setup. | |
1338 | * | |
1339 | * @param[in] ioc memory for IOC | |
1340 | * @param[in] bfa driver instance structure | |
1341 | */ | |
1342 | void | |
1343 | bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) | |
1344 | { | |
1345 | ioc->bfa = bfa; | |
1346 | ioc->cbfn = cbfn; | |
1347 | ioc->fcmode = false; | |
1348 | ioc->pllinit = false; | |
1349 | ioc->dbg_fwsave_once = true; | |
1350 | ||
1351 | bfa_ioc_mbox_attach(ioc); | |
1352 | INIT_LIST_HEAD(&ioc->hb_notify_q); | |
1353 | ||
1354 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | |
1355 | } | |
1356 | ||
1357 | /** | |
1358 | * Driver detach time IOC cleanup. | |
1359 | */ | |
1360 | void | |
1361 | bfa_ioc_detach(struct bfa_ioc *ioc) | |
1362 | { | |
1363 | bfa_fsm_send_event(ioc, IOC_E_DETACH); | |
1364 | } | |
1365 | ||
1366 | /** | |
1367 | * Setup IOC PCI properties. | |
1368 | * | |
1369 | * @param[in] pcidev PCI device information for this IOC | |
1370 | */ | |
1371 | void | |
1372 | bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, | |
1373 | enum bfi_mclass mc) | |
1374 | { | |
1375 | ioc->ioc_mc = mc; | |
1376 | ioc->pcidev = *pcidev; | |
1377 | ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); | |
1378 | ioc->cna = ioc->ctdev && !ioc->fcmode; | |
1379 | ||
1380 | bfa_ioc_set_ct_hwif(ioc); | |
1381 | ||
1382 | bfa_ioc_map_port(ioc); | |
1383 | bfa_ioc_reg_init(ioc); | |
1384 | } | |
1385 | ||
1386 | /** | |
1387 | * Initialize IOC dma memory | |
1388 | * | |
1389 | * @param[in] dm_kva kernel virtual address of IOC dma memory | |
1390 | * @param[in] dm_pa physical address of IOC dma memory | |
1391 | */ | |
1392 | void | |
1393 | bfa_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) | |
1394 | { | |
1395 | /** | |
1396 | * dma memory for firmware attribute | |
1397 | */ | |
1398 | ioc->attr_dma.kva = dm_kva; | |
1399 | ioc->attr_dma.pa = dm_pa; | |
1400 | ioc->attr = (struct bfi_ioc_attr *) dm_kva; | |
1401 | } | |
1402 | ||
1403 | /** | |
1404 | * Return size of dma memory required. | |
1405 | */ | |
1406 | u32 | |
1407 | bfa_ioc_meminfo(void) | |
1408 | { | |
1409 | return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); | |
1410 | } | |
1411 | ||
1412 | void | |
1413 | bfa_ioc_enable(struct bfa_ioc *ioc) | |
1414 | { | |
1415 | bfa_ioc_stats(ioc, ioc_enables); | |
1416 | ioc->dbg_fwsave_once = true; | |
1417 | ||
1418 | bfa_fsm_send_event(ioc, IOC_E_ENABLE); | |
1419 | } | |
1420 | ||
1421 | void | |
1422 | bfa_ioc_disable(struct bfa_ioc *ioc) | |
1423 | { | |
1424 | bfa_ioc_stats(ioc, ioc_disables); | |
1425 | bfa_fsm_send_event(ioc, IOC_E_DISABLE); | |
1426 | } | |
1427 | ||
1428 | u32 | |
1429 | bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) | |
1430 | { | |
1431 | return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); | |
1432 | } | |
1433 | ||
1434 | u32 | |
1435 | bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr) | |
1436 | { | |
1437 | return PSS_SMEM_PGOFF(fmaddr); | |
1438 | } | |
1439 | ||
1440 | /** | |
1441 | * Register mailbox message handler functions | |
1442 | * | |
1443 | * @param[in] ioc IOC instance | |
1444 | * @param[in] mcfuncs message class handler functions | |
1445 | */ | |
1446 | void | |
1447 | bfa_ioc_mbox_register(struct bfa_ioc *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) | |
1448 | { | |
1449 | struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; | |
1450 | int mc; | |
1451 | ||
1452 | for (mc = 0; mc < BFI_MC_MAX; mc++) | |
1453 | mod->mbhdlr[mc].cbfn = mcfuncs[mc]; | |
1454 | } | |
1455 | ||
1456 | /** | |
1457 | * Register mailbox message handler function, to be called by common modules | |
1458 | */ | |
1459 | void | |
1460 | bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, | |
1461 | bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) | |
1462 | { | |
1463 | struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; | |
1464 | ||
1465 | mod->mbhdlr[mc].cbfn = cbfn; | |
1466 | mod->mbhdlr[mc].cbarg = cbarg; | |
1467 | } | |
1468 | ||
1469 | /** | |
1470 | * Queue a mailbox command request to firmware. Waits if mailbox is busy. | |
1471 | * Responsibility of caller to serialize | |
1472 | * | |
1473 | * @param[in] ioc IOC instance | |
1474 | * @param[i] cmd Mailbox command | |
1475 | */ | |
1476 | void | |
1477 | bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd) | |
1478 | { | |
1479 | struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; | |
1480 | u32 stat; | |
1481 | ||
1482 | /** | |
1483 | * If a previous command is pending, queue new command | |
1484 | */ | |
1485 | if (!list_empty(&mod->cmd_q)) { | |
1486 | list_add_tail(&cmd->qe, &mod->cmd_q); | |
1487 | return; | |
1488 | } | |
1489 | ||
1490 | /** | |
1491 | * If mailbox is busy, queue command for poll timer | |
1492 | */ | |
1493 | stat = readl(ioc->ioc_regs.hfn_mbox_cmd); | |
1494 | if (stat) { | |
1495 | list_add_tail(&cmd->qe, &mod->cmd_q); | |
1496 | return; | |
1497 | } | |
1498 | ||
1499 | /** | |
1500 | * mailbox is free -- queue command to firmware | |
1501 | */ | |
1502 | bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); | |
1503 | } | |
1504 | ||
1505 | /** | |
1506 | * Handle mailbox interrupts | |
1507 | */ | |
1508 | void | |
1509 | bfa_ioc_mbox_isr(struct bfa_ioc *ioc) | |
1510 | { | |
1511 | struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; | |
1512 | struct bfi_mbmsg m; | |
1513 | int mc; | |
1514 | ||
1515 | bfa_ioc_msgget(ioc, &m); | |
1516 | ||
1517 | /** | |
1518 | * Treat IOC message class as special. | |
1519 | */ | |
1520 | mc = m.mh.msg_class; | |
1521 | if (mc == BFI_MC_IOC) { | |
1522 | bfa_ioc_isr(ioc, &m); | |
1523 | return; | |
1524 | } | |
1525 | ||
1526 | if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) | |
1527 | return; | |
1528 | ||
1529 | mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); | |
1530 | } | |
1531 | ||
1532 | void | |
1533 | bfa_ioc_error_isr(struct bfa_ioc *ioc) | |
1534 | { | |
1535 | bfa_fsm_send_event(ioc, IOC_E_HWERROR); | |
1536 | } | |
1537 | ||
1538 | void | |
1539 | bfa_ioc_set_fcmode(struct bfa_ioc *ioc) | |
1540 | { | |
1541 | ioc->fcmode = true; | |
1542 | ioc->port_id = bfa_ioc_pcifn(ioc); | |
1543 | } | |
1544 | ||
1545 | /** | |
1546 | * return true if IOC is disabled | |
1547 | */ | |
1548 | bool | |
1549 | bfa_ioc_is_disabled(struct bfa_ioc *ioc) | |
1550 | { | |
1551 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || | |
1552 | bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); | |
1553 | } | |
1554 | ||
1555 | /** | |
1556 | * return true if IOC firmware is different. | |
1557 | */ | |
1558 | bool | |
1559 | bfa_ioc_fw_mismatch(struct bfa_ioc *ioc) | |
1560 | { | |
1561 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) || | |
1562 | bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) || | |
1563 | bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch); | |
1564 | } | |
1565 | ||
1566 | #define bfa_ioc_state_disabled(__sm) \ | |
1567 | (((__sm) == BFI_IOC_UNINIT) || \ | |
1568 | ((__sm) == BFI_IOC_INITING) || \ | |
1569 | ((__sm) == BFI_IOC_HWINIT) || \ | |
1570 | ((__sm) == BFI_IOC_DISABLED) || \ | |
1571 | ((__sm) == BFI_IOC_FAIL) || \ | |
1572 | ((__sm) == BFI_IOC_CFG_DISABLED)) | |
1573 | ||
1574 | /** | |
1575 | * Check if adapter is disabled -- both IOCs should be in a disabled | |
1576 | * state. | |
1577 | */ | |
1578 | bool | |
1579 | bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc) | |
1580 | { | |
1581 | u32 ioc_state; | |
1582 | void __iomem *rb = ioc->pcidev.pci_bar_kva; | |
1583 | ||
1584 | if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) | |
1585 | return false; | |
1586 | ||
1587 | ioc_state = readl(rb + BFA_IOC0_STATE_REG); | |
1588 | if (!bfa_ioc_state_disabled(ioc_state)) | |
1589 | return false; | |
1590 | ||
1591 | if (ioc->pcidev.device_id != PCI_DEVICE_ID_BROCADE_FC_8G1P) { | |
1592 | ioc_state = readl(rb + BFA_IOC1_STATE_REG); | |
1593 | if (!bfa_ioc_state_disabled(ioc_state)) | |
1594 | return false; | |
1595 | } | |
1596 | ||
1597 | return true; | |
1598 | } | |
1599 | ||
1600 | /** | |
1601 | * Add to IOC heartbeat failure notification queue. To be used by common | |
1602 | * modules such as cee, port, diag. | |
1603 | */ | |
1604 | void | |
1605 | bfa_ioc_hbfail_register(struct bfa_ioc *ioc, | |
1606 | struct bfa_ioc_hbfail_notify *notify) | |
1607 | { | |
1608 | list_add_tail(¬ify->qe, &ioc->hb_notify_q); | |
1609 | } | |
1610 | ||
1611 | #define BFA_MFG_NAME "Brocade" | |
1612 | void | |
1613 | bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, | |
1614 | struct bfa_adapter_attr *ad_attr) | |
1615 | { | |
1616 | struct bfi_ioc_attr *ioc_attr; | |
1617 | ||
1618 | ioc_attr = ioc->attr; | |
1619 | ||
1620 | bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); | |
1621 | bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); | |
1622 | bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); | |
1623 | bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); | |
1624 | memcpy(&ad_attr->vpd, &ioc_attr->vpd, | |
1625 | sizeof(struct bfa_mfg_vpd)); | |
1626 | ||
1627 | ad_attr->nports = bfa_ioc_get_nports(ioc); | |
1628 | ad_attr->max_speed = bfa_ioc_speed_sup(ioc); | |
1629 | ||
1630 | bfa_ioc_get_adapter_model(ioc, ad_attr->model); | |
1631 | /* For now, model descr uses same model string */ | |
1632 | bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); | |
1633 | ||
1634 | ad_attr->card_type = ioc_attr->card_type; | |
1635 | ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); | |
1636 | ||
1637 | if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) | |
1638 | ad_attr->prototype = 1; | |
1639 | else | |
1640 | ad_attr->prototype = 0; | |
1641 | ||
1642 | ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); | |
1643 | ad_attr->mac = bfa_ioc_get_mac(ioc); | |
1644 | ||
1645 | ad_attr->pcie_gen = ioc_attr->pcie_gen; | |
1646 | ad_attr->pcie_lanes = ioc_attr->pcie_lanes; | |
1647 | ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; | |
1648 | ad_attr->asic_rev = ioc_attr->asic_rev; | |
1649 | ||
1650 | bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); | |
1651 | ||
1652 | ad_attr->cna_capable = ioc->cna; | |
1653 | ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna; | |
1654 | } | |
1655 | ||
1656 | enum bfa_ioc_type | |
1657 | bfa_ioc_get_type(struct bfa_ioc *ioc) | |
1658 | { | |
1659 | if (!ioc->ctdev || ioc->fcmode) | |
1660 | return BFA_IOC_TYPE_FC; | |
1661 | else if (ioc->ioc_mc == BFI_MC_IOCFC) | |
1662 | return BFA_IOC_TYPE_FCoE; | |
1663 | else if (ioc->ioc_mc == BFI_MC_LL) | |
1664 | return BFA_IOC_TYPE_LL; | |
1665 | else { | |
1666 | BUG_ON(!(ioc->ioc_mc == BFI_MC_LL)); | |
1667 | return BFA_IOC_TYPE_LL; | |
1668 | } | |
1669 | } | |
1670 | ||
1671 | void | |
1672 | bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) | |
1673 | { | |
1674 | memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); | |
1675 | memcpy(serial_num, | |
1676 | (void *)ioc->attr->brcd_serialnum, | |
1677 | BFA_ADAPTER_SERIAL_NUM_LEN); | |
1678 | } | |
1679 | ||
1680 | void | |
1681 | bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) | |
1682 | { | |
1683 | memset(fw_ver, 0, BFA_VERSION_LEN); | |
1684 | memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); | |
1685 | } | |
1686 | ||
1687 | void | |
1688 | bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) | |
1689 | { | |
1690 | BUG_ON(!(chip_rev)); | |
1691 | ||
1692 | memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN); | |
1693 | ||
1694 | chip_rev[0] = 'R'; | |
1695 | chip_rev[1] = 'e'; | |
1696 | chip_rev[2] = 'v'; | |
1697 | chip_rev[3] = '-'; | |
1698 | chip_rev[4] = ioc->attr->asic_rev; | |
1699 | chip_rev[5] = '\0'; | |
1700 | } | |
1701 | ||
1702 | void | |
1703 | bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) | |
1704 | { | |
1705 | memset(optrom_ver, 0, BFA_VERSION_LEN); | |
1706 | memcpy(optrom_ver, ioc->attr->optrom_version, | |
1707 | BFA_VERSION_LEN); | |
1708 | } | |
1709 | ||
1710 | void | |
1711 | bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) | |
1712 | { | |
1713 | memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); | |
1714 | memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); | |
1715 | } | |
1716 | ||
1717 | void | |
1718 | bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) | |
1719 | { | |
1720 | struct bfi_ioc_attr *ioc_attr; | |
1721 | ||
1722 | BUG_ON(!(model)); | |
1723 | memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN); | |
1724 | ||
1725 | ioc_attr = ioc->attr; | |
1726 | ||
1727 | /** | |
1728 | * model name | |
1729 | */ | |
1730 | snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", | |
1731 | BFA_MFG_NAME, ioc_attr->card_type); | |
1732 | } | |
1733 | ||
1734 | enum bfa_ioc_state | |
1735 | bfa_ioc_get_state(struct bfa_ioc *ioc) | |
1736 | { | |
1737 | return bfa_sm_to_state(ioc_sm_table, ioc->fsm); | |
1738 | } | |
1739 | ||
1740 | void | |
1741 | bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) | |
1742 | { | |
1743 | memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); | |
1744 | ||
1745 | ioc_attr->state = bfa_ioc_get_state(ioc); | |
1746 | ioc_attr->port_id = ioc->port_id; | |
1747 | ||
1748 | ioc_attr->ioc_type = bfa_ioc_get_type(ioc); | |
1749 | ||
1750 | bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); | |
1751 | ||
1752 | ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; | |
1753 | ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; | |
1754 | bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); | |
1755 | } | |
1756 | ||
1757 | /** | |
1758 | * WWN public | |
1759 | */ | |
1760 | u64 | |
1761 | bfa_ioc_get_pwwn(struct bfa_ioc *ioc) | |
1762 | { | |
1763 | return ioc->attr->pwwn; | |
1764 | } | |
1765 | ||
1766 | u64 | |
1767 | bfa_ioc_get_nwwn(struct bfa_ioc *ioc) | |
1768 | { | |
1769 | return ioc->attr->nwwn; | |
1770 | } | |
1771 | ||
1772 | u64 | |
1773 | bfa_ioc_get_adid(struct bfa_ioc *ioc) | |
1774 | { | |
1775 | return ioc->attr->mfg_pwwn; | |
1776 | } | |
1777 | ||
1778 | mac_t | |
1779 | bfa_ioc_get_mac(struct bfa_ioc *ioc) | |
1780 | { | |
1781 | /* | |
1782 | * Currently mfg mac is used as FCoE enode mac (not configured by PBC) | |
1783 | */ | |
1784 | if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE) | |
1785 | return bfa_ioc_get_mfg_mac(ioc); | |
1786 | else | |
1787 | return ioc->attr->mac; | |
1788 | } | |
1789 | ||
1790 | u64 | |
1791 | bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc) | |
1792 | { | |
1793 | return ioc->attr->mfg_pwwn; | |
1794 | } | |
1795 | ||
1796 | u64 | |
1797 | bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc) | |
1798 | { | |
1799 | return ioc->attr->mfg_nwwn; | |
1800 | } | |
1801 | ||
1802 | mac_t | |
1803 | bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc) | |
1804 | { | |
1805 | mac_t m; | |
1806 | ||
1807 | m = ioc->attr->mfg_mac; | |
1808 | if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type)) | |
1809 | m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); | |
1810 | else | |
1811 | bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]), | |
1812 | bfa_ioc_pcifn(ioc)); | |
1813 | ||
1814 | return m; | |
1815 | } | |
1816 | ||
1817 | bool | |
1818 | bfa_ioc_get_fcmode(struct bfa_ioc *ioc) | |
1819 | { | |
1820 | return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id); | |
1821 | } | |
1822 | ||
1823 | /** | |
1824 | * Firmware failure detected. Start recovery actions. | |
1825 | */ | |
1826 | static void | |
1827 | bfa_ioc_recover(struct bfa_ioc *ioc) | |
1828 | { | |
1829 | bfa_ioc_stats(ioc, ioc_hbfails); | |
1830 | bfa_fsm_send_event(ioc, IOC_E_HBFAIL); | |
1831 | } | |
1832 | ||
1833 | static void | |
1834 | bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc) | |
1835 | { | |
1836 | if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) | |
1837 | return; | |
1838 | ||
1839 | } |