2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/device.h>
33 #include <linux/netdevice.h>
36 #define MLX5E_MAX_PRIORITY 8
38 #define MLX5E_100MB (100000)
39 #define MLX5E_1GB (1000000)
41 #define MLX5E_CEE_STATE_UP 1
42 #define MLX5E_CEE_STATE_DOWN 0
45 MLX5E_VENDOR_TC_GROUP_NUM = 7,
46 MLX5E_LOWEST_PRIO_GROUP = 0,
49 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
50 MLX5_CAP_QCAM_REG(mdev, qpts) && \
51 MLX5_CAP_QCAM_REG(mdev, qpdpm))
53 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
54 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
56 /* If dcbx mode is non-host set the dcbx mode to host.
58 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
59 enum mlx5_dcbx_oper_mode mode)
61 struct mlx5_core_dev *mdev = priv->mdev;
62 u32 param[MLX5_ST_SZ_DW(dcbx_param)];
65 err = mlx5_query_port_dcbx_param(mdev, param);
69 MLX5_SET(dcbx_param, param, version_admin, mode);
70 if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
71 MLX5_SET(dcbx_param, param, willing_admin, 1);
73 return mlx5_set_port_dcbx_param(mdev, param);
76 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
78 struct mlx5e_dcbx *dcbx = &priv->dcbx;
81 if (!MLX5_CAP_GEN(priv->mdev, dcbx))
84 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
87 err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
91 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
95 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
98 struct mlx5e_priv *priv = netdev_priv(netdev);
99 struct mlx5_core_dev *mdev = priv->mdev;
100 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
101 bool is_tc_group_6_exist = false;
102 bool is_zero_bw_ets_tc = false;
106 if (!MLX5_CAP_GEN(priv->mdev, ets))
109 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
110 for (i = 0; i < ets->ets_cap; i++) {
111 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
115 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
119 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
123 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
124 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
125 is_zero_bw_ets_tc = true;
127 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
128 is_tc_group_6_exist = true;
131 /* Report 0% ets tc if exits*/
132 if (is_zero_bw_ets_tc) {
133 for (i = 0; i < ets->ets_cap; i++)
134 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
135 ets->tc_tx_bw[i] = 0;
138 /* Update tc_tsa based on fw setting*/
139 for (i = 0; i < ets->ets_cap; i++) {
140 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
141 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
142 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
143 !is_tc_group_6_exist)
144 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
146 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
151 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
153 bool any_tc_mapped_to_ets = false;
154 bool ets_zero_bw = false;
158 for (i = 0; i <= max_tc; i++) {
159 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
160 any_tc_mapped_to_ets = true;
161 if (!ets->tc_tx_bw[i])
166 /* strict group has higher priority than ets group */
167 strict_group = MLX5E_LOWEST_PRIO_GROUP;
168 if (any_tc_mapped_to_ets)
173 for (i = 0; i <= max_tc; i++) {
174 switch (ets->tc_tsa[i]) {
175 case IEEE_8021QAZ_TSA_VENDOR:
176 tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
178 case IEEE_8021QAZ_TSA_STRICT:
179 tc_group[i] = strict_group++;
181 case IEEE_8021QAZ_TSA_ETS:
182 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
183 if (ets->tc_tx_bw[i] && ets_zero_bw)
184 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
190 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
191 u8 *tc_group, int max_tc)
193 int bw_for_ets_zero_bw_tc = 0;
194 int last_ets_zero_bw_tc = -1;
195 int num_ets_zero_bw = 0;
198 for (i = 0; i <= max_tc; i++) {
199 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
202 last_ets_zero_bw_tc = i;
207 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
209 for (i = 0; i <= max_tc; i++) {
210 switch (ets->tc_tsa[i]) {
211 case IEEE_8021QAZ_TSA_VENDOR:
212 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
214 case IEEE_8021QAZ_TSA_STRICT:
215 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
217 case IEEE_8021QAZ_TSA_ETS:
218 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
220 bw_for_ets_zero_bw_tc;
225 /* Make sure the total bw for ets zero bw group is 100% */
226 if (last_ets_zero_bw_tc != -1)
227 tc_tx_bw[last_ets_zero_bw_tc] +=
228 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
231 /* If there are ETS BW 0,
232 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
233 * Set group #0 to all the ETS BW 0 tcs and
234 * equally splits the 100% BW between them
235 * Report both group #0 and #1 as ETS type.
236 * All the tcs in group #0 will be reported with 0% BW.
238 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
240 struct mlx5_core_dev *mdev = priv->mdev;
241 u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
242 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
243 int max_tc = mlx5_max_tc(mdev);
246 mlx5e_build_tc_group(ets, tc_group, max_tc);
247 mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
249 err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
253 err = mlx5_set_port_tc_group(mdev, tc_group);
257 err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
262 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
264 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
265 mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
266 __func__, i, ets->prio_tc[i]);
267 mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
268 __func__, i, tc_tx_bw[i], tc_group[i]);
274 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
275 struct ieee_ets *ets)
277 bool have_ets_tc = false;
281 /* Validate Priority */
282 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
283 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
285 "Failed to validate ETS: priority value greater than max(%d)\n",
291 /* Validate Bandwidth Sum */
292 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
293 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
295 bw_sum += ets->tc_tx_bw[i];
299 if (have_ets_tc && bw_sum != 100) {
301 "Failed to validate ETS: BW sum is illegal\n");
307 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
308 struct ieee_ets *ets)
310 struct mlx5e_priv *priv = netdev_priv(netdev);
313 if (!MLX5_CAP_GEN(priv->mdev, ets))
316 err = mlx5e_dbcnl_validate_ets(netdev, ets);
320 err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
327 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
328 struct ieee_pfc *pfc)
330 struct mlx5e_priv *priv = netdev_priv(dev);
331 struct mlx5_core_dev *mdev = priv->mdev;
332 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
335 pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
336 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
337 pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
338 pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
341 return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
344 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
345 struct ieee_pfc *pfc)
347 struct mlx5e_priv *priv = netdev_priv(dev);
348 struct mlx5_core_dev *mdev = priv->mdev;
352 mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
354 if (pfc->pfc_en == curr_pfc_en)
357 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
358 mlx5_toggle_port_link(mdev);
362 "%s: PFC per priority bit mask: 0x%x\n",
363 __func__, pfc->pfc_en);
368 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
370 struct mlx5e_priv *priv = netdev_priv(dev);
372 return priv->dcbx.cap;
375 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
377 struct mlx5e_priv *priv = netdev_priv(dev);
378 struct mlx5e_dcbx *dcbx = &priv->dcbx;
380 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
383 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
384 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
387 /* set dcbx to fw controlled */
388 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
389 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
390 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
397 if (!(mode & DCB_CAP_DCBX_HOST))
400 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
408 static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
410 struct mlx5e_priv *priv = netdev_priv(dev);
415 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
418 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
421 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
424 if (app->protocol >= MLX5E_MAX_DSCP)
427 /* Save the old entry info */
428 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
429 temp.protocol = app->protocol;
430 temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
432 /* Check if need to switch to dscp trust state */
433 if (!priv->dcbx.dscp_app_cnt) {
434 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
439 /* Skip the fw command if new and old mapping are the same */
440 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
441 err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
446 /* Delete the old entry if exists */
448 err = dcb_ieee_delapp(dev, &temp);
452 /* Add new entry and update counter */
453 err = dcb_ieee_setapp(dev, app);
458 priv->dcbx.dscp_app_cnt++;
463 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
467 static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
469 struct mlx5e_priv *priv = netdev_priv(dev);
472 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
475 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
478 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
481 if (app->protocol >= MLX5E_MAX_DSCP)
484 /* Skip if no dscp app entry */
485 if (!priv->dcbx.dscp_app_cnt)
488 /* Check if the entry matches fw setting */
489 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
492 /* Delete the app entry */
493 err = dcb_ieee_delapp(dev, app);
497 /* Reset the priority mapping back to zero */
498 err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
502 priv->dcbx.dscp_app_cnt--;
504 /* Check if need to switch to pcp trust state */
505 if (!priv->dcbx.dscp_app_cnt)
506 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
511 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
515 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
516 struct ieee_maxrate *maxrate)
518 struct mlx5e_priv *priv = netdev_priv(netdev);
519 struct mlx5_core_dev *mdev = priv->mdev;
520 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
521 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
525 err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
529 memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
531 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
532 switch (max_bw_unit[i]) {
533 case MLX5_100_MBPS_UNIT:
534 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
537 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
539 case MLX5_BW_NO_LIMIT:
542 WARN(true, "non-supported BW unit");
550 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
551 struct ieee_maxrate *maxrate)
553 struct mlx5e_priv *priv = netdev_priv(netdev);
554 struct mlx5_core_dev *mdev = priv->mdev;
555 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
556 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
557 __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
560 memset(max_bw_value, 0, sizeof(max_bw_value));
561 memset(max_bw_unit, 0, sizeof(max_bw_unit));
563 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
564 if (!maxrate->tc_maxrate[i]) {
565 max_bw_unit[i] = MLX5_BW_NO_LIMIT;
568 if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
569 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
571 max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
572 max_bw_unit[i] = MLX5_100_MBPS_UNIT;
574 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
576 max_bw_unit[i] = MLX5_GBPS_UNIT;
580 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
581 mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
582 __func__, i, max_bw_value[i]);
585 return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
588 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
590 struct mlx5e_priv *priv = netdev_priv(netdev);
591 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
592 struct mlx5_core_dev *mdev = priv->mdev;
595 int err = -EOPNOTSUPP;
598 if (!MLX5_CAP_GEN(mdev, ets))
601 memset(&ets, 0, sizeof(ets));
602 memset(&pfc, 0, sizeof(pfc));
604 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
605 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
606 ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
607 ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
608 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
609 ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
611 "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
612 __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
616 err = mlx5e_dbcnl_validate_ets(netdev, &ets);
619 "%s, Failed to validate ETS: %d\n", __func__, err);
623 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
626 "%s, Failed to set ETS: %d\n", __func__, err);
631 pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
632 if (!cee_cfg->pfc_enable)
635 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
636 pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
638 err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
641 "%s, Failed to set PFC: %d\n", __func__, err);
645 return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
648 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
650 return MLX5E_CEE_STATE_UP;
653 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
656 struct mlx5e_priv *priv = netdev_priv(netdev);
661 memset(perm_addr, 0xff, MAX_ADDR_LEN);
663 mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
666 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
667 int priority, u8 prio_type,
668 u8 pgid, u8 bw_pct, u8 up_map)
670 struct mlx5e_priv *priv = netdev_priv(netdev);
671 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
673 if (priority >= CEE_DCBX_MAX_PRIO) {
675 "%s, priority is out of range\n", __func__);
679 if (pgid >= CEE_DCBX_MAX_PGS) {
681 "%s, priority group is out of range\n", __func__);
685 cee_cfg->prio_to_pg_map[priority] = pgid;
688 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
691 struct mlx5e_priv *priv = netdev_priv(netdev);
692 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
694 if (pgid >= CEE_DCBX_MAX_PGS) {
696 "%s, priority group is out of range\n", __func__);
700 cee_cfg->pg_bw_pct[pgid] = bw_pct;
703 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
704 int priority, u8 *prio_type,
705 u8 *pgid, u8 *bw_pct, u8 *up_map)
707 struct mlx5e_priv *priv = netdev_priv(netdev);
708 struct mlx5_core_dev *mdev = priv->mdev;
710 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
711 netdev_err(netdev, "%s, ets is not supported\n", __func__);
715 if (priority >= CEE_DCBX_MAX_PRIO) {
717 "%s, priority is out of range\n", __func__);
725 if (mlx5_query_port_prio_tc(mdev, priority, pgid))
729 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
730 int pgid, u8 *bw_pct)
734 if (pgid >= CEE_DCBX_MAX_PGS) {
736 "%s, priority group is out of range\n", __func__);
740 mlx5e_dcbnl_ieee_getets(netdev, &ets);
741 *bw_pct = ets.tc_tx_bw[pgid];
744 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
745 int priority, u8 setting)
747 struct mlx5e_priv *priv = netdev_priv(netdev);
748 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
750 if (priority >= CEE_DCBX_MAX_PRIO) {
752 "%s, priority is out of range\n", __func__);
759 cee_cfg->pfc_setting[priority] = setting;
763 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
764 int priority, u8 *setting)
769 err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
774 *setting = (pfc.pfc_en >> priority) & 0x01;
779 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
780 int priority, u8 *setting)
782 if (priority >= CEE_DCBX_MAX_PRIO) {
784 "%s, priority is out of range\n", __func__);
791 mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
794 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
797 struct mlx5e_priv *priv = netdev_priv(netdev);
798 struct mlx5_core_dev *mdev = priv->mdev;
802 case DCB_CAP_ATTR_PG:
805 case DCB_CAP_ATTR_PFC:
808 case DCB_CAP_ATTR_UP2TC:
811 case DCB_CAP_ATTR_PG_TCS:
812 *cap = 1 << mlx5_max_tc(mdev);
814 case DCB_CAP_ATTR_PFC_TCS:
815 *cap = 1 << mlx5_max_tc(mdev);
817 case DCB_CAP_ATTR_GSP:
820 case DCB_CAP_ATTR_BCN:
823 case DCB_CAP_ATTR_DCBX:
824 *cap = priv->dcbx.cap |
825 DCB_CAP_DCBX_VER_CEE |
826 DCB_CAP_DCBX_VER_IEEE;
837 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
840 struct mlx5e_priv *priv = netdev_priv(netdev);
841 struct mlx5_core_dev *mdev = priv->mdev;
844 case DCB_NUMTCS_ATTR_PG:
845 case DCB_NUMTCS_ATTR_PFC:
846 *num = mlx5_max_tc(mdev) + 1;
855 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
859 if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
860 return MLX5E_CEE_STATE_DOWN;
862 return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
865 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
867 struct mlx5e_priv *priv = netdev_priv(netdev);
868 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
870 if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
873 cee_cfg->pfc_enable = state;
876 const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
877 .ieee_getets = mlx5e_dcbnl_ieee_getets,
878 .ieee_setets = mlx5e_dcbnl_ieee_setets,
879 .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
880 .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
881 .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
882 .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
883 .ieee_setapp = mlx5e_dcbnl_ieee_setapp,
884 .ieee_delapp = mlx5e_dcbnl_ieee_delapp,
885 .getdcbx = mlx5e_dcbnl_getdcbx,
886 .setdcbx = mlx5e_dcbnl_setdcbx,
889 .setall = mlx5e_dcbnl_setall,
890 .getstate = mlx5e_dcbnl_getstate,
891 .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
893 .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
894 .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
895 .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
896 .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
898 .setpfccfg = mlx5e_dcbnl_setpfccfg,
899 .getpfccfg = mlx5e_dcbnl_getpfccfg,
900 .getcap = mlx5e_dcbnl_getcap,
901 .getnumtcs = mlx5e_dcbnl_getnumtcs,
902 .getpfcstate = mlx5e_dcbnl_getpfcstate,
903 .setpfcstate = mlx5e_dcbnl_setpfcstate,
906 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
907 enum mlx5_dcbx_oper_mode *mode)
909 u32 out[MLX5_ST_SZ_DW(dcbx_param)];
911 *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
913 if (!mlx5_query_port_dcbx_param(priv->mdev, out))
914 *mode = MLX5_GET(dcbx_param, out, version_oper);
916 /* From driver's point of view, we only care if the mode
917 * is host (HOST) or non-host (AUTO)
919 if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
920 *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
923 static void mlx5e_ets_init(struct mlx5e_priv *priv)
928 if (!MLX5_CAP_GEN(priv->mdev, ets))
931 memset(&ets, 0, sizeof(ets));
932 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
933 for (i = 0; i < ets.ets_cap; i++) {
934 ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
935 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
939 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
943 mlx5e_dcbnl_ieee_setets_core(priv, &ets);
951 static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
956 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
959 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
962 /* No SEL_DSCP entry in non DSCP state */
963 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
966 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
967 for (i = 0; i < MLX5E_MAX_DSCP; i++) {
969 temp.priority = priv->dcbx_dp.dscp2prio[i];
971 dcb_ieee_setapp(priv->netdev, &temp);
973 dcb_ieee_delapp(priv->netdev, &temp);
976 priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
979 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
981 mlx5e_dcbnl_dscp_app(priv, INIT);
984 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
986 mlx5e_dcbnl_dscp_app(priv, DELETE);
989 static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
990 struct mlx5e_params *params)
992 params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
993 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
994 params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
995 params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
998 static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
1000 struct mlx5e_channels new_channels = {};
1002 mutex_lock(&priv->state_lock);
1004 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1007 new_channels.params = priv->channels.params;
1008 mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
1010 /* Skip if tx_min_inline is the same */
1011 if (new_channels.params.tx_min_inline_mode ==
1012 priv->channels.params.tx_min_inline_mode)
1015 if (mlx5e_open_channels(priv, &new_channels))
1017 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
1020 mutex_unlock(&priv->state_lock);
1023 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1027 err = mlx5_set_trust_state(priv->mdev, trust_state);
1030 priv->dcbx_dp.trust_state = trust_state;
1031 mlx5e_trust_update_sq_inline_mode(priv);
1036 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
1040 err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
1044 priv->dcbx_dp.dscp2prio[dscp] = prio;
1048 static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1050 struct mlx5_core_dev *mdev = priv->mdev;
1053 if (!MLX5_DSCP_SUPPORTED(mdev))
1056 err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
1060 mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params);
1062 err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
1069 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
1071 struct mlx5e_dcbx *dcbx = &priv->dcbx;
1073 mlx5e_trust_initialize(priv);
1075 if (!MLX5_CAP_GEN(priv->mdev, qos))
1078 if (MLX5_CAP_GEN(priv->mdev, dcbx))
1079 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
1081 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
1082 DCB_CAP_DCBX_VER_IEEE;
1083 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
1084 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
1086 mlx5e_ets_init(priv);