2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
20 #include <bfi/bfi_pport.h>
21 #include <cs/bfa_debug.h>
22 #include <aen/bfa_aen.h>
23 #include <cs/bfa_plog.h>
24 #include <aen/bfa_aen_port.h>
26 BFA_TRC_FILE(HAL, PPORT);
29 #define bfa_pport_callback(__pport, __event) do { \
30 if ((__pport)->bfa->fcs) { \
31 (__pport)->event_cbfn((__pport)->event_cbarg, (__event)); \
33 (__pport)->hcb_event = (__event); \
34 bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe, \
35 __bfa_cb_port_event, (__pport)); \
40 * The port is considered disabled if corresponding physical port or IOC are
43 #define BFA_PORT_IS_DISABLED(bfa) \
44 ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \
45 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
48 * forward declarations
50 static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port);
51 static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port);
52 static void bfa_pport_update_linkinfo(struct bfa_pport_s *pport);
53 static void bfa_pport_reset_linkinfo(struct bfa_pport_s *pport);
54 static void bfa_pport_set_wwns(struct bfa_pport_s *port);
55 static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete);
56 static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete);
57 static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete);
58 static void bfa_port_stats_timeout(void *cbarg);
59 static void bfa_port_stats_clr_timeout(void *cbarg);
66 * BFA port state machine events
68 enum bfa_pport_sm_event {
69 BFA_PPORT_SM_START = 1, /* start port state machine */
70 BFA_PPORT_SM_STOP = 2, /* stop port state machine */
71 BFA_PPORT_SM_ENABLE = 3, /* enable port */
72 BFA_PPORT_SM_DISABLE = 4, /* disable port state machine */
73 BFA_PPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
74 BFA_PPORT_SM_LINKUP = 6, /* firmware linkup event */
75 BFA_PPORT_SM_LINKDOWN = 7, /* firmware linkup down */
76 BFA_PPORT_SM_QRESUME = 8, /* CQ space available */
77 BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */
80 static void bfa_pport_sm_uninit(struct bfa_pport_s *pport,
81 enum bfa_pport_sm_event event);
82 static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
83 enum bfa_pport_sm_event event);
84 static void bfa_pport_sm_enabling(struct bfa_pport_s *pport,
85 enum bfa_pport_sm_event event);
86 static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport,
87 enum bfa_pport_sm_event event);
88 static void bfa_pport_sm_linkup(struct bfa_pport_s *pport,
89 enum bfa_pport_sm_event event);
90 static void bfa_pport_sm_disabling(struct bfa_pport_s *pport,
91 enum bfa_pport_sm_event event);
92 static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
93 enum bfa_pport_sm_event event);
94 static void bfa_pport_sm_disabled(struct bfa_pport_s *pport,
95 enum bfa_pport_sm_event event);
96 static void bfa_pport_sm_stopped(struct bfa_pport_s *pport,
97 enum bfa_pport_sm_event event);
98 static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport,
99 enum bfa_pport_sm_event event);
100 static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport,
101 enum bfa_pport_sm_event event);
103 static struct bfa_sm_table_s hal_pport_sm_table[] = {
104 {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT},
105 {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
106 {BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING},
107 {BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
108 {BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP},
109 {BFA_SM(bfa_pport_sm_disabling_qwait),
110 BFA_PPORT_ST_DISABLING_QWAIT},
111 {BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING},
112 {BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED},
113 {BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED},
114 {BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
115 {BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
119 bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event)
121 union bfa_aen_data_u aen_data;
122 struct bfa_log_mod_s *logmod = pport->bfa->logm;
123 wwn_t pwwn = pport->pwwn;
124 char pwwn_ptr[BFA_STRING_32];
125 struct bfa_ioc_attr_s ioc_attr;
127 wwn2str(pwwn_ptr, pwwn);
129 case BFA_PORT_AEN_ONLINE:
130 bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr);
132 case BFA_PORT_AEN_OFFLINE:
133 bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr);
135 case BFA_PORT_AEN_ENABLE:
136 bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr);
138 case BFA_PORT_AEN_DISABLE:
139 bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr);
141 case BFA_PORT_AEN_DISCONNECT:
142 bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr);
144 case BFA_PORT_AEN_QOS_NEG:
145 bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr);
151 bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr);
152 aen_data.port.ioc_type = ioc_attr.ioc_type;
153 aen_data.port.pwwn = pwwn;
157 bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
159 bfa_trc(pport->bfa, event);
162 case BFA_PPORT_SM_START:
164 * Start event after IOC is configured and BFA is started.
166 if (bfa_pport_send_enable(pport))
167 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
169 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
172 case BFA_PPORT_SM_ENABLE:
174 * Port is persistently configured to be in enabled state. Do
175 * not change state. Port enabling is done when START event is
180 case BFA_PPORT_SM_DISABLE:
182 * If a port is persistently configured to be disabled, the
183 * first event will a port disable request.
185 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
188 case BFA_PPORT_SM_HWFAIL:
189 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
193 bfa_sm_fault(pport->bfa, event);
198 bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
199 enum bfa_pport_sm_event event)
201 bfa_trc(pport->bfa, event);
204 case BFA_PPORT_SM_QRESUME:
205 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
206 bfa_pport_send_enable(pport);
209 case BFA_PPORT_SM_STOP:
210 bfa_reqq_wcancel(&pport->reqq_wait);
211 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
214 case BFA_PPORT_SM_ENABLE:
216 * Already enable is in progress.
220 case BFA_PPORT_SM_DISABLE:
222 * Just send disable request to firmware when room becomes
223 * available in request queue.
225 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
226 bfa_reqq_wcancel(&pport->reqq_wait);
227 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
228 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
229 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
232 case BFA_PPORT_SM_LINKUP:
233 case BFA_PPORT_SM_LINKDOWN:
235 * Possible to get link events when doing back-to-back
240 case BFA_PPORT_SM_HWFAIL:
241 bfa_reqq_wcancel(&pport->reqq_wait);
242 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
246 bfa_sm_fault(pport->bfa, event);
251 bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
253 bfa_trc(pport->bfa, event);
256 case BFA_PPORT_SM_FWRSP:
257 case BFA_PPORT_SM_LINKDOWN:
258 bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
261 case BFA_PPORT_SM_LINKUP:
262 bfa_pport_update_linkinfo(pport);
263 bfa_sm_set_state(pport, bfa_pport_sm_linkup);
265 bfa_assert(pport->event_cbfn);
266 bfa_pport_callback(pport, BFA_PPORT_LINKUP);
269 case BFA_PPORT_SM_ENABLE:
271 * Already being enabled.
275 case BFA_PPORT_SM_DISABLE:
276 if (bfa_pport_send_disable(pport))
277 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
279 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
281 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
282 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
283 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
286 case BFA_PPORT_SM_STOP:
287 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
290 case BFA_PPORT_SM_HWFAIL:
291 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
295 bfa_sm_fault(pport->bfa, event);
300 bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
302 bfa_trc(pport->bfa, event);
305 case BFA_PPORT_SM_LINKUP:
306 bfa_pport_update_linkinfo(pport);
307 bfa_sm_set_state(pport, bfa_pport_sm_linkup);
308 bfa_assert(pport->event_cbfn);
309 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
310 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
311 bfa_pport_callback(pport, BFA_PPORT_LINKUP);
312 bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE);
314 * If QoS is enabled and it is not online,
315 * Send a separate event.
317 if ((pport->cfg.qos_enabled)
318 && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE))
319 bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG);
323 case BFA_PPORT_SM_LINKDOWN:
325 * Possible to get link down event.
329 case BFA_PPORT_SM_ENABLE:
335 case BFA_PPORT_SM_DISABLE:
336 if (bfa_pport_send_disable(pport))
337 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
339 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
341 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
342 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
343 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
346 case BFA_PPORT_SM_STOP:
347 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
350 case BFA_PPORT_SM_HWFAIL:
351 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
355 bfa_sm_fault(pport->bfa, event);
360 bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
362 bfa_trc(pport->bfa, event);
365 case BFA_PPORT_SM_ENABLE:
371 case BFA_PPORT_SM_DISABLE:
372 if (bfa_pport_send_disable(pport))
373 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
375 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
377 bfa_pport_reset_linkinfo(pport);
378 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
379 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
380 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
381 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
382 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
385 case BFA_PPORT_SM_LINKDOWN:
386 bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
387 bfa_pport_reset_linkinfo(pport);
388 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
389 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
390 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
391 if (BFA_PORT_IS_DISABLED(pport->bfa))
392 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
394 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
397 case BFA_PPORT_SM_STOP:
398 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
399 bfa_pport_reset_linkinfo(pport);
400 if (BFA_PORT_IS_DISABLED(pport->bfa))
401 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
403 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
406 case BFA_PPORT_SM_HWFAIL:
407 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
408 bfa_pport_reset_linkinfo(pport);
409 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
410 if (BFA_PORT_IS_DISABLED(pport->bfa))
411 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
413 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
417 bfa_sm_fault(pport->bfa, event);
422 bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
423 enum bfa_pport_sm_event event)
425 bfa_trc(pport->bfa, event);
428 case BFA_PPORT_SM_QRESUME:
429 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
430 bfa_pport_send_disable(pport);
433 case BFA_PPORT_SM_STOP:
434 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
435 bfa_reqq_wcancel(&pport->reqq_wait);
438 case BFA_PPORT_SM_DISABLE:
440 * Already being disabled.
444 case BFA_PPORT_SM_LINKUP:
445 case BFA_PPORT_SM_LINKDOWN:
447 * Possible to get link events when doing back-to-back
452 case BFA_PPORT_SM_HWFAIL:
453 bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
454 bfa_reqq_wcancel(&pport->reqq_wait);
458 bfa_sm_fault(pport->bfa, event);
463 bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
465 bfa_trc(pport->bfa, event);
468 case BFA_PPORT_SM_FWRSP:
469 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
472 case BFA_PPORT_SM_DISABLE:
474 * Already being disabled.
478 case BFA_PPORT_SM_ENABLE:
479 if (bfa_pport_send_enable(pport))
480 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
482 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
484 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
485 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
486 bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
489 case BFA_PPORT_SM_STOP:
490 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
493 case BFA_PPORT_SM_LINKUP:
494 case BFA_PPORT_SM_LINKDOWN:
496 * Possible to get link events when doing back-to-back
501 case BFA_PPORT_SM_HWFAIL:
502 bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
506 bfa_sm_fault(pport->bfa, event);
511 bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
513 bfa_trc(pport->bfa, event);
516 case BFA_PPORT_SM_START:
518 * Ignore start event for a port that is disabled.
522 case BFA_PPORT_SM_STOP:
523 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
526 case BFA_PPORT_SM_ENABLE:
527 if (bfa_pport_send_enable(pport))
528 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
530 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
532 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
533 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
534 bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
537 case BFA_PPORT_SM_DISABLE:
543 case BFA_PPORT_SM_HWFAIL:
544 bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
548 bfa_sm_fault(pport->bfa, event);
553 bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
555 bfa_trc(pport->bfa, event);
558 case BFA_PPORT_SM_START:
559 if (bfa_pport_send_enable(pport))
560 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
562 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
567 * Ignore all other events.
574 * Port is enabled. IOC is down/failed.
577 bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
579 bfa_trc(pport->bfa, event);
582 case BFA_PPORT_SM_START:
583 if (bfa_pport_send_enable(pport))
584 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
586 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
598 * Port is disabled. IOC is down/failed.
601 bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
603 bfa_trc(pport->bfa, event);
606 case BFA_PPORT_SM_START:
607 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
610 case BFA_PPORT_SM_ENABLE:
611 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
629 __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete)
631 struct bfa_pport_s *pport = cbarg;
634 pport->event_cbfn(pport->event_cbarg, pport->hcb_event);
637 #define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \
641 bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
644 *dm_len += PPORT_STATS_DMA_SZ;
648 bfa_pport_qresume(void *cbarg)
650 struct bfa_pport_s *port = cbarg;
652 bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME);
656 bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
661 dm_kva = bfa_meminfo_dma_virt(meminfo);
662 dm_pa = bfa_meminfo_dma_phys(meminfo);
664 pport->stats_kva = dm_kva;
665 pport->stats_pa = dm_pa;
666 pport->stats = (union bfa_pport_stats_u *)dm_kva;
668 dm_kva += PPORT_STATS_DMA_SZ;
669 dm_pa += PPORT_STATS_DMA_SZ;
671 bfa_meminfo_dma_virt(meminfo) = dm_kva;
672 bfa_meminfo_dma_phys(meminfo) = dm_pa;
676 * Memory initialization.
679 bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
680 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
682 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
683 struct bfa_pport_cfg_s *port_cfg = &pport->cfg;
685 bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s));
688 bfa_pport_mem_claim(pport, meminfo);
690 bfa_sm_set_state(pport, bfa_pport_sm_uninit);
693 * initialize and set default configuration
695 port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
696 port_cfg->speed = BFA_PPORT_SPEED_AUTO;
697 port_cfg->trunked = BFA_FALSE;
698 port_cfg->maxfrsize = 0;
700 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
702 bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport);
706 bfa_pport_initdone(struct bfa_s *bfa)
708 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
711 * Initialize port attributes from IOC hardware data.
713 bfa_pport_set_wwns(pport);
714 if (pport->cfg.maxfrsize == 0)
715 pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
716 pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
717 pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
719 bfa_assert(pport->cfg.maxfrsize);
720 bfa_assert(pport->cfg.rx_bbcredit);
721 bfa_assert(pport->speed_sup);
725 bfa_pport_detach(struct bfa_s *bfa)
730 * Called when IOC is ready.
733 bfa_pport_start(struct bfa_s *bfa)
735 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START);
739 * Called before IOC is stopped.
742 bfa_pport_stop(struct bfa_s *bfa)
744 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP);
748 * Called when IOC failure is detected.
751 bfa_pport_iocdisable(struct bfa_s *bfa)
753 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL);
757 bfa_pport_update_linkinfo(struct bfa_pport_s *pport)
759 struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event;
761 pport->speed = pevent->link_state.speed;
762 pport->topology = pevent->link_state.topology;
764 if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP)
765 pport->myalpa = pevent->link_state.tl.loop_info.myalpa;
770 bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr);
771 bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr);
773 bfa_trc(pport->bfa, pport->speed);
774 bfa_trc(pport->bfa, pport->topology);
778 bfa_pport_reset_linkinfo(struct bfa_pport_s *pport)
780 pport->speed = BFA_PPORT_SPEED_UNKNOWN;
781 pport->topology = BFA_PPORT_TOPOLOGY_NONE;
785 * Send port enable message to firmware.
788 bfa_pport_send_enable(struct bfa_pport_s *port)
790 struct bfi_pport_enable_req_s *m;
793 * Increment message tag before queue check, so that responses to old
794 * requests are discarded.
799 * check for room in queue to send request now
801 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
803 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
807 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ,
808 bfa_lpuid(port->bfa));
809 m->nwwn = port->nwwn;
810 m->pwwn = port->pwwn;
811 m->port_cfg = port->cfg;
812 m->msgtag = port->msgtag;
813 m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize);
814 bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa);
815 bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo);
816 bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi);
819 * queue I/O message to firmware
821 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
826 * Send port disable message to firmware.
829 bfa_pport_send_disable(struct bfa_pport_s *port)
831 bfi_pport_disable_req_t *m;
834 * Increment message tag before queue check, so that responses to old
835 * requests are discarded.
840 * check for room in queue to send request now
842 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
844 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
848 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ,
849 bfa_lpuid(port->bfa));
850 m->msgtag = port->msgtag;
853 * queue I/O message to firmware
855 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
861 bfa_pport_set_wwns(struct bfa_pport_s *port)
863 port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc);
864 port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc);
866 bfa_trc(port->bfa, port->pwwn);
867 bfa_trc(port->bfa, port->nwwn);
871 bfa_port_send_txcredit(void *port_cbarg)
874 struct bfa_pport_s *port = port_cbarg;
875 struct bfi_pport_set_svc_params_req_s *m;
878 * check for room in queue to send request now
880 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
882 bfa_trc(port->bfa, port->cfg.tx_bbcredit);
886 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ,
887 bfa_lpuid(port->bfa));
888 m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit);
891 * queue I/O message to firmware
893 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
903 * Firmware message handler.
906 bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
908 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
909 union bfi_pport_i2h_msg_u i2hmsg;
912 pport->event_arg.i2hmsg = i2hmsg;
914 switch (msg->mhdr.msg_id) {
915 case BFI_PPORT_I2H_ENABLE_RSP:
916 if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
917 bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
920 case BFI_PPORT_I2H_DISABLE_RSP:
921 if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
922 bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
925 case BFI_PPORT_I2H_EVENT:
926 switch (i2hmsg.event->link_state.linkstate) {
927 case BFA_PPORT_LINKUP:
928 bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP);
930 case BFA_PPORT_LINKDOWN:
931 bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN);
933 case BFA_PPORT_TRUNK_LINKDOWN:
934 /** todo: event notification */
939 case BFI_PPORT_I2H_GET_STATS_RSP:
940 case BFI_PPORT_I2H_GET_QOS_STATS_RSP:
942 * check for timer pop before processing the rsp
944 if (pport->stats_busy == BFA_FALSE
945 || pport->stats_status == BFA_STATUS_ETIMER)
948 bfa_timer_stop(&pport->timer);
949 pport->stats_status = i2hmsg.getstats_rsp->status;
950 bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats,
953 case BFI_PPORT_I2H_CLEAR_STATS_RSP:
954 case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP:
956 * check for timer pop before processing the rsp
958 if (pport->stats_busy == BFA_FALSE
959 || pport->stats_status == BFA_STATUS_ETIMER)
962 bfa_timer_stop(&pport->timer);
963 pport->stats_status = BFA_STATUS_OK;
964 bfa_cb_queue(pport->bfa, &pport->hcb_qe,
965 __bfa_cb_port_stats_clr, pport);
980 * Registered callback for port events.
983 bfa_pport_event_register(struct bfa_s *bfa,
984 void (*cbfn) (void *cbarg, bfa_pport_event_t event),
987 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
989 pport->event_cbfn = cbfn;
990 pport->event_cbarg = cbarg;
994 bfa_pport_enable(struct bfa_s *bfa)
996 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
998 if (pport->diag_busy)
999 return BFA_STATUS_DIAG_BUSY;
1000 else if (bfa_sm_cmp_state
1001 (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait))
1002 return BFA_STATUS_DEVBUSY;
1004 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE);
1005 return BFA_STATUS_OK;
1009 bfa_pport_disable(struct bfa_s *bfa)
1011 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE);
1012 return BFA_STATUS_OK;
1016 * Configure port speed.
1019 bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1021 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1023 bfa_trc(bfa, speed);
1025 if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) {
1026 bfa_trc(bfa, pport->speed_sup);
1027 return BFA_STATUS_UNSUPP_SPEED;
1030 pport->cfg.speed = speed;
1032 return BFA_STATUS_OK;
1036 * Get current speed.
1038 enum bfa_pport_speed
1039 bfa_pport_get_speed(struct bfa_s *bfa)
1041 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1047 * Configure port topology.
1050 bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1052 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1054 bfa_trc(bfa, topology);
1055 bfa_trc(bfa, pport->cfg.topology);
1058 case BFA_PPORT_TOPOLOGY_P2P:
1059 case BFA_PPORT_TOPOLOGY_LOOP:
1060 case BFA_PPORT_TOPOLOGY_AUTO:
1064 return BFA_STATUS_EINVAL;
1067 pport->cfg.topology = topology;
1068 return BFA_STATUS_OK;
1072 * Get current topology.
1074 enum bfa_pport_topology
1075 bfa_pport_get_topology(struct bfa_s *bfa)
1077 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1079 return port->topology;
1083 bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
1085 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1088 bfa_trc(bfa, pport->cfg.cfg_hardalpa);
1089 bfa_trc(bfa, pport->cfg.hardalpa);
1091 pport->cfg.cfg_hardalpa = BFA_TRUE;
1092 pport->cfg.hardalpa = alpa;
1094 return BFA_STATUS_OK;
1098 bfa_pport_clr_hardalpa(struct bfa_s *bfa)
1100 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1102 bfa_trc(bfa, pport->cfg.cfg_hardalpa);
1103 bfa_trc(bfa, pport->cfg.hardalpa);
1105 pport->cfg.cfg_hardalpa = BFA_FALSE;
1106 return BFA_STATUS_OK;
1110 bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
1112 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1114 *alpa = port->cfg.hardalpa;
1115 return port->cfg.cfg_hardalpa;
1119 bfa_pport_get_myalpa(struct bfa_s *bfa)
1121 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1123 return port->myalpa;
1127 bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
1129 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1131 bfa_trc(bfa, maxfrsize);
1132 bfa_trc(bfa, pport->cfg.maxfrsize);
1137 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
1138 return BFA_STATUS_INVLD_DFSZ;
1141 * power of 2, if not the max frame size of 2112
1143 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
1144 return BFA_STATUS_INVLD_DFSZ;
1146 pport->cfg.maxfrsize = maxfrsize;
1147 return BFA_STATUS_OK;
1151 bfa_pport_get_maxfrsize(struct bfa_s *bfa)
1153 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1155 return port->cfg.maxfrsize;
1159 bfa_pport_mypid(struct bfa_s *bfa)
1161 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1167 bfa_pport_get_rx_bbcredit(struct bfa_s *bfa)
1169 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1171 return port->cfg.rx_bbcredit;
1175 bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
1177 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1179 port->cfg.tx_bbcredit = (u8) tx_bbcredit;
1180 bfa_port_send_txcredit(port);
1184 * Get port attributes.
1188 bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
1190 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1198 bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
1200 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1202 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
1204 attr->nwwn = pport->nwwn;
1205 attr->pwwn = pport->pwwn;
1207 bfa_os_memcpy(&attr->pport_cfg, &pport->cfg,
1208 sizeof(struct bfa_pport_cfg_s));
1212 attr->pport_cfg.speed = pport->cfg.speed;
1213 attr->speed_supported = pport->speed_sup;
1214 attr->speed = pport->speed;
1215 attr->cos_supported = FC_CLASS_3;
1218 * topology attributes
1220 attr->pport_cfg.topology = pport->cfg.topology;
1221 attr->topology = pport->topology;
1226 attr->beacon = pport->beacon;
1227 attr->link_e2e_beacon = pport->link_e2e_beacon;
1228 attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog);
1230 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
1231 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
1232 attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm);
1233 if (bfa_ioc_is_disabled(&pport->bfa->ioc))
1234 attr->port_state = BFA_PPORT_ST_IOCDIS;
1235 else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc))
1236 attr->port_state = BFA_PPORT_ST_FWMISMATCH;
1240 bfa_port_stats_query(void *cbarg)
1242 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1243 bfi_pport_get_stats_req_t *msg;
1245 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1248 port->stats_qfull = BFA_TRUE;
1249 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query,
1251 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1254 port->stats_qfull = BFA_FALSE;
1256 bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t));
1257 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ,
1258 bfa_lpuid(port->bfa));
1259 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1265 bfa_port_stats_clear(void *cbarg)
1267 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1268 bfi_pport_clear_stats_req_t *msg;
1270 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1273 port->stats_qfull = BFA_TRUE;
1274 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear,
1276 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1279 port->stats_qfull = BFA_FALSE;
1281 bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t));
1282 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ,
1283 bfa_lpuid(port->bfa));
1284 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1289 bfa_port_qos_stats_clear(void *cbarg)
1291 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1292 bfi_pport_clear_qos_stats_req_t *msg;
1294 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1297 port->stats_qfull = BFA_TRUE;
1298 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear,
1300 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1303 port->stats_qfull = BFA_FALSE;
1305 bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t));
1306 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ,
1307 bfa_lpuid(port->bfa));
1308 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1313 bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s)
1315 u32 *dip = (u32 *) d;
1316 u32 *sip = (u32 *) s;
1320 * Do 64 bit fields swap first
1324 ((sizeof(union bfa_pport_stats_u) -
1325 sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) {
1327 dip[i] = bfa_os_ntohl(sip[i]);
1328 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
1330 dip[i] = bfa_os_ntohl(sip[i + 1]);
1331 dip[i + 1] = bfa_os_ntohl(sip[i]);
1336 * Now swap the 32 bit fields
1338 for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i)
1339 dip[i] = bfa_os_ntohl(sip[i]);
1343 __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete)
1345 struct bfa_pport_s *port = cbarg;
1348 port->stats_cbfn(port->stats_cbarg, port->stats_status);
1350 port->stats_busy = BFA_FALSE;
1351 port->stats_status = BFA_STATUS_OK;
1356 bfa_port_stats_clr_timeout(void *cbarg)
1358 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1360 bfa_trc(port->bfa, port->stats_qfull);
1362 if (port->stats_qfull) {
1363 bfa_reqq_wcancel(&port->stats_reqq_wait);
1364 port->stats_qfull = BFA_FALSE;
1367 port->stats_status = BFA_STATUS_ETIMER;
1368 bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port);
1372 __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete)
1374 struct bfa_pport_s *port = cbarg;
1377 if (port->stats_status == BFA_STATUS_OK)
1378 bfa_pport_stats_swap(port->stats_ret, port->stats);
1379 port->stats_cbfn(port->stats_cbarg, port->stats_status);
1381 port->stats_busy = BFA_FALSE;
1382 port->stats_status = BFA_STATUS_OK;
1387 bfa_port_stats_timeout(void *cbarg)
1389 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1391 bfa_trc(port->bfa, port->stats_qfull);
1393 if (port->stats_qfull) {
1394 bfa_reqq_wcancel(&port->stats_reqq_wait);
1395 port->stats_qfull = BFA_FALSE;
1398 port->stats_status = BFA_STATUS_ETIMER;
1399 bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port);
1402 #define BFA_PORT_STATS_TOV 1000
1405 * Fetch port attributes.
1408 bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
1409 bfa_cb_pport_t cbfn, void *cbarg)
1411 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1413 if (port->stats_busy) {
1414 bfa_trc(bfa, port->stats_busy);
1415 return BFA_STATUS_DEVBUSY;
1418 port->stats_busy = BFA_TRUE;
1419 port->stats_ret = stats;
1420 port->stats_cbfn = cbfn;
1421 port->stats_cbarg = cbarg;
1423 bfa_port_stats_query(port);
1425 bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port,
1426 BFA_PORT_STATS_TOV);
1427 return BFA_STATUS_OK;
1431 bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1433 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1435 if (port->stats_busy) {
1436 bfa_trc(bfa, port->stats_busy);
1437 return BFA_STATUS_DEVBUSY;
1440 port->stats_busy = BFA_TRUE;
1441 port->stats_cbfn = cbfn;
1442 port->stats_cbarg = cbarg;
1444 bfa_port_stats_clear(port);
1446 bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
1447 BFA_PORT_STATS_TOV);
1448 return BFA_STATUS_OK;
1452 bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
1454 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1456 bfa_trc(bfa, bitmap);
1457 bfa_trc(bfa, pport->cfg.trunked);
1458 bfa_trc(bfa, pport->cfg.trunk_ports);
1460 if (!bitmap || (bitmap & (bitmap - 1)))
1461 return BFA_STATUS_EINVAL;
1463 pport->cfg.trunked = BFA_TRUE;
1464 pport->cfg.trunk_ports = bitmap;
1466 return BFA_STATUS_OK;
1470 bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
1472 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1474 qos_attr->state = bfa_os_ntohl(pport->qos_attr.state);
1475 qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr);
1479 bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
1480 struct bfa_qos_vc_attr_s *qos_vc_attr)
1482 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1483 struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr;
1486 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
1487 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
1488 qos_vc_attr->elp_opmode_flags =
1489 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
1492 * Individual VC info
1494 while (i < qos_vc_attr->total_vc_count) {
1495 qos_vc_attr->vc_info[i].vc_credit =
1496 bfa_vc_attr->vc_info[i].vc_credit;
1497 qos_vc_attr->vc_info[i].borrow_credit =
1498 bfa_vc_attr->vc_info[i].borrow_credit;
1499 qos_vc_attr->vc_info[i].priority =
1500 bfa_vc_attr->vc_info[i].priority;
1509 bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
1510 bfa_cb_pport_t cbfn, void *cbarg)
1513 * QoS stats is embedded in port stats
1515 return bfa_pport_get_stats(bfa, stats, cbfn, cbarg);
1519 bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1521 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1523 if (port->stats_busy) {
1524 bfa_trc(bfa, port->stats_busy);
1525 return BFA_STATUS_DEVBUSY;
1528 port->stats_busy = BFA_TRUE;
1529 port->stats_cbfn = cbfn;
1530 port->stats_cbarg = cbarg;
1532 bfa_port_qos_stats_clear(port);
1534 bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
1535 BFA_PORT_STATS_TOV);
1536 return BFA_STATUS_OK;
1540 * Fetch port attributes.
1543 bfa_pport_trunk_disable(struct bfa_s *bfa)
1545 return BFA_STATUS_OK;
1549 bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
1551 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1553 *bitmap = port->cfg.trunk_ports;
1554 return port->cfg.trunked;
1558 bfa_pport_is_disabled(struct bfa_s *bfa)
1560 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1562 return bfa_sm_to_state(hal_pport_sm_table, port->sm) ==
1563 BFA_PPORT_ST_DISABLED;
1568 bfa_pport_is_ratelim(struct bfa_s *bfa)
1570 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1572 return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
1577 bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
1579 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1581 bfa_trc(bfa, on_off);
1582 bfa_trc(bfa, pport->cfg.qos_enabled);
1584 pport->cfg.qos_enabled = on_off;
1588 bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
1590 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1592 bfa_trc(bfa, on_off);
1593 bfa_trc(bfa, pport->cfg.ratelimit);
1595 pport->cfg.ratelimit = on_off;
1596 if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
1597 pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
1601 * Configure default minimum ratelim speed
1604 bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1606 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1608 bfa_trc(bfa, speed);
1611 * Auto and speeds greater than the supported speed, are invalid
1613 if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) {
1614 bfa_trc(bfa, pport->speed_sup);
1615 return BFA_STATUS_UNSUPP_SPEED;
1618 pport->cfg.trl_def_speed = speed;
1620 return BFA_STATUS_OK;
1624 * Get default minimum ratelim speed
1626 enum bfa_pport_speed
1627 bfa_pport_get_ratelim_speed(struct bfa_s *bfa)
1629 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1631 bfa_trc(bfa, pport->cfg.trl_def_speed);
1632 return pport->cfg.trl_def_speed;
1637 bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status)
1639 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1641 bfa_trc(bfa, status);
1642 bfa_trc(bfa, pport->diag_busy);
1644 pport->diag_busy = status;
1648 bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
1649 bfa_boolean_t link_e2e_beacon)
1651 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1653 bfa_trc(bfa, beacon);
1654 bfa_trc(bfa, link_e2e_beacon);
1655 bfa_trc(bfa, pport->beacon);
1656 bfa_trc(bfa, pport->link_e2e_beacon);
1658 pport->beacon = beacon;
1659 pport->link_e2e_beacon = link_e2e_beacon;
1663 bfa_pport_is_linkup(struct bfa_s *bfa)
1665 return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup);