1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
8 #define CREATE_TRACE_POINTS
9 #include "diag/qos_tracepoint.h"
11 /* Minimum supported BW share value by the HW is 1 Mbit/sec */
12 #define MLX5_MIN_BW_SHARE 1
14 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
15 min_t(u32, max_t(u32, DIV_ROUND_UP(rate, divider), MLX5_MIN_BW_SHARE), limit)
17 struct mlx5_esw_rate_group {
22 struct list_head list;
25 static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
26 u32 tsar_ix, u32 max_rate, u32 bw_share)
30 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
33 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
34 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
35 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
36 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
38 return mlx5_modify_scheduling_element_cmd(dev,
39 SCHEDULING_HIERARCHY_E_SWITCH,
45 static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group,
46 u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
48 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
49 struct mlx5_core_dev *dev = esw->dev;
52 err = esw_qos_tsar_config(dev, sched_ctx,
56 NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
58 trace_mlx5_esw_group_qos_config(dev, group, group->tsar_ix, bw_share, max_rate);
63 static int esw_qos_vport_config(struct mlx5_eswitch *esw,
64 struct mlx5_vport *vport,
65 u32 max_rate, u32 bw_share,
66 struct netlink_ext_ack *extack)
68 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
69 struct mlx5_core_dev *dev = esw->dev;
72 if (!vport->qos.enabled)
75 err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix,
79 "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
81 NL_SET_ERR_MSG_MOD(extack, "E-Switch modify TSAR vport element failed");
85 trace_mlx5_esw_vport_qos_config(vport, bw_share, max_rate);
90 static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
91 struct mlx5_esw_rate_group *group,
94 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
95 struct mlx5_vport *evport;
96 u32 max_guarantee = 0;
100 struct mlx5_esw_rate_group *group;
102 list_for_each_entry(group, &esw->qos.groups, list) {
103 if (group->min_rate < max_guarantee)
105 max_guarantee = group->min_rate;
108 mlx5_esw_for_each_vport(esw, i, evport) {
109 if (!evport->enabled || !evport->qos.enabled ||
110 evport->qos.group != group || evport->qos.min_rate < max_guarantee)
112 max_guarantee = evport->qos.min_rate;
117 return max_t(u32, max_guarantee / fw_max_bw_share, 1);
119 /* If vports min rate divider is 0 but their group has bw_share configured, then
120 * need to set bw_share for vports to minimal value.
122 if (!group_level && !max_guarantee && group && group->bw_share)
127 static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max)
130 return MLX5_RATE_TO_BW_SHARE(min_rate, divider, fw_max);
135 static int esw_qos_normalize_vports_min_rate(struct mlx5_eswitch *esw,
136 struct mlx5_esw_rate_group *group,
137 struct netlink_ext_ack *extack)
139 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
140 u32 divider = esw_qos_calculate_min_rate_divider(esw, group, false);
141 struct mlx5_vport *evport;
146 mlx5_esw_for_each_vport(esw, i, evport) {
147 if (!evport->enabled || !evport->qos.enabled || evport->qos.group != group)
149 bw_share = esw_qos_calc_bw_share(evport->qos.min_rate, divider, fw_max_bw_share);
151 if (bw_share == evport->qos.bw_share)
154 err = esw_qos_vport_config(esw, evport, evport->qos.max_rate, bw_share, extack);
158 evport->qos.bw_share = bw_share;
164 static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divider,
165 struct netlink_ext_ack *extack)
167 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
168 struct mlx5_esw_rate_group *group;
172 list_for_each_entry(group, &esw->qos.groups, list) {
173 bw_share = esw_qos_calc_bw_share(group->min_rate, divider, fw_max_bw_share);
175 if (bw_share == group->bw_share)
178 err = esw_qos_group_config(esw, group, group->max_rate, bw_share, extack);
182 group->bw_share = bw_share;
184 /* All the group's vports need to be set with default bw_share
185 * to enable them with QOS
187 err = esw_qos_normalize_vports_min_rate(esw, group, extack);
196 static int esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
197 u32 min_rate, struct netlink_ext_ack *extack)
199 u32 fw_max_bw_share, previous_min_rate;
200 bool min_rate_supported;
203 lockdep_assert_held(&esw->state_lock);
204 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
205 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
206 fw_max_bw_share >= MLX5_MIN_BW_SHARE;
207 if (min_rate && !min_rate_supported)
209 if (min_rate == evport->qos.min_rate)
212 previous_min_rate = evport->qos.min_rate;
213 evport->qos.min_rate = min_rate;
214 err = esw_qos_normalize_vports_min_rate(esw, evport->qos.group, extack);
216 evport->qos.min_rate = previous_min_rate;
221 static int esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
222 u32 max_rate, struct netlink_ext_ack *extack)
224 u32 act_max_rate = max_rate;
225 bool max_rate_supported;
228 lockdep_assert_held(&esw->state_lock);
229 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
231 if (max_rate && !max_rate_supported)
233 if (max_rate == evport->qos.max_rate)
236 /* If parent group has rate limit need to set to group
237 * value when new max rate is 0.
239 if (evport->qos.group && !max_rate)
240 act_max_rate = evport->qos.group->max_rate;
242 err = esw_qos_vport_config(esw, evport, act_max_rate, evport->qos.bw_share, extack);
245 evport->qos.max_rate = max_rate;
250 static int esw_qos_set_group_min_rate(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group,
251 u32 min_rate, struct netlink_ext_ack *extack)
253 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
254 struct mlx5_core_dev *dev = esw->dev;
255 u32 previous_min_rate, divider;
258 if (!(MLX5_CAP_QOS(dev, esw_bw_share) && fw_max_bw_share >= MLX5_MIN_BW_SHARE))
261 if (min_rate == group->min_rate)
264 previous_min_rate = group->min_rate;
265 group->min_rate = min_rate;
266 divider = esw_qos_calculate_min_rate_divider(esw, group, true);
267 err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
269 group->min_rate = previous_min_rate;
270 NL_SET_ERR_MSG_MOD(extack, "E-Switch group min rate setting failed");
272 /* Attempt restoring previous configuration */
273 divider = esw_qos_calculate_min_rate_divider(esw, group, true);
274 if (esw_qos_normalize_groups_min_rate(esw, divider, extack))
275 NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed");
281 static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
282 struct mlx5_esw_rate_group *group,
283 u32 max_rate, struct netlink_ext_ack *extack)
285 struct mlx5_vport *vport;
289 if (group->max_rate == max_rate)
292 err = esw_qos_group_config(esw, group, max_rate, group->bw_share, extack);
296 group->max_rate = max_rate;
298 /* Any unlimited vports in the group should be set
299 * with the value of the group.
301 mlx5_esw_for_each_vport(esw, i, vport) {
302 if (!vport->enabled || !vport->qos.enabled ||
303 vport->qos.group != group || vport->qos.max_rate)
306 err = esw_qos_vport_config(esw, vport, max_rate, vport->qos.bw_share, extack);
308 NL_SET_ERR_MSG_MOD(extack,
309 "E-Switch vport implicit rate limit setting failed");
315 static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
316 struct mlx5_vport *vport,
317 u32 max_rate, u32 bw_share)
319 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
320 struct mlx5_esw_rate_group *group = vport->qos.group;
321 struct mlx5_core_dev *dev = esw->dev;
326 parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
327 MLX5_SET(scheduling_context, sched_ctx, element_type,
328 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
329 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
330 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
331 MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_tsar_ix);
332 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
333 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
335 err = mlx5_create_scheduling_element_cmd(dev,
336 SCHEDULING_HIERARCHY_E_SWITCH,
338 &vport->qos.esw_tsar_ix);
340 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
348 static int esw_qos_update_group_scheduling_element(struct mlx5_eswitch *esw,
349 struct mlx5_vport *vport,
350 struct mlx5_esw_rate_group *curr_group,
351 struct mlx5_esw_rate_group *new_group,
352 struct netlink_ext_ack *extack)
357 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
358 SCHEDULING_HIERARCHY_E_SWITCH,
359 vport->qos.esw_tsar_ix);
361 NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR vport element failed");
365 vport->qos.group = new_group;
366 max_rate = vport->qos.max_rate ? vport->qos.max_rate : new_group->max_rate;
368 /* If vport is unlimited, we set the group's value.
369 * Therefore, if the group is limited it will apply to
370 * the vport as well and if not, vport will remain unlimited.
372 err = esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share);
374 NL_SET_ERR_MSG_MOD(extack, "E-Switch vport group set failed.");
381 vport->qos.group = curr_group;
382 max_rate = vport->qos.max_rate ? vport->qos.max_rate : curr_group->max_rate;
383 if (esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share))
384 esw_warn(esw->dev, "E-Switch vport group restore failed (vport=%d)\n",
390 static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
391 struct mlx5_vport *vport,
392 struct mlx5_esw_rate_group *group,
393 struct netlink_ext_ack *extack)
395 struct mlx5_esw_rate_group *new_group, *curr_group;
401 curr_group = vport->qos.group;
402 new_group = group ?: esw->qos.group0;
403 if (curr_group == new_group)
406 err = esw_qos_update_group_scheduling_element(esw, vport, curr_group, new_group, extack);
410 /* Recalculate bw share weights of old and new groups */
411 if (vport->qos.bw_share || new_group->bw_share) {
412 esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
413 esw_qos_normalize_vports_min_rate(esw, new_group, extack);
419 static struct mlx5_esw_rate_group *
420 __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
422 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
423 struct mlx5_esw_rate_group *group;
427 group = kzalloc(sizeof(*group), GFP_KERNEL);
429 return ERR_PTR(-ENOMEM);
431 MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
432 esw->qos.root_tsar_ix);
433 err = mlx5_create_scheduling_element_cmd(esw->dev,
434 SCHEDULING_HIERARCHY_E_SWITCH,
438 NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for group failed");
442 list_add_tail(&group->list, &esw->qos.groups);
444 divider = esw_qos_calculate_min_rate_divider(esw, group, true);
446 err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
448 NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed");
452 trace_mlx5_esw_group_qos_create(esw->dev, group, group->tsar_ix);
457 list_del(&group->list);
458 if (mlx5_destroy_scheduling_element_cmd(esw->dev,
459 SCHEDULING_HIERARCHY_E_SWITCH,
461 NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
467 static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack);
468 static void esw_qos_put(struct mlx5_eswitch *esw);
470 static struct mlx5_esw_rate_group *
471 esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
473 struct mlx5_esw_rate_group *group;
476 if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
477 return ERR_PTR(-EOPNOTSUPP);
479 err = esw_qos_get(esw, extack);
483 group = __esw_qos_create_rate_group(esw, extack);
490 static int __esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
491 struct mlx5_esw_rate_group *group,
492 struct netlink_ext_ack *extack)
497 list_del(&group->list);
499 divider = esw_qos_calculate_min_rate_divider(esw, NULL, true);
500 err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
502 NL_SET_ERR_MSG_MOD(extack, "E-Switch groups' normalization failed");
504 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
505 SCHEDULING_HIERARCHY_E_SWITCH,
508 NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
510 trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix);
517 static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
518 struct mlx5_esw_rate_group *group,
519 struct netlink_ext_ack *extack)
523 err = __esw_qos_destroy_rate_group(esw, group, extack);
529 static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
532 case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
533 return MLX5_CAP_QOS(dev, esw_element_type) &
534 ELEMENT_TYPE_CAP_MASK_TASR;
535 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
536 return MLX5_CAP_QOS(dev, esw_element_type) &
537 ELEMENT_TYPE_CAP_MASK_VPORT;
538 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
539 return MLX5_CAP_QOS(dev, esw_element_type) &
540 ELEMENT_TYPE_CAP_MASK_VPORT_TC;
541 case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
542 return MLX5_CAP_QOS(dev, esw_element_type) &
543 ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
548 static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
550 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
551 struct mlx5_core_dev *dev = esw->dev;
555 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
558 if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
561 MLX5_SET(scheduling_context, tsar_ctx, element_type,
562 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
564 attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
565 *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
567 err = mlx5_create_scheduling_element_cmd(dev,
568 SCHEDULING_HIERARCHY_E_SWITCH,
570 &esw->qos.root_tsar_ix);
572 esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
576 INIT_LIST_HEAD(&esw->qos.groups);
577 if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
578 esw->qos.group0 = __esw_qos_create_rate_group(esw, extack);
579 if (IS_ERR(esw->qos.group0)) {
580 esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n",
581 PTR_ERR(esw->qos.group0));
582 err = PTR_ERR(esw->qos.group0);
586 refcount_set(&esw->qos.refcnt, 1);
591 if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
592 esw->qos.root_tsar_ix))
593 esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
598 static void esw_qos_destroy(struct mlx5_eswitch *esw)
603 __esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
605 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
606 SCHEDULING_HIERARCHY_E_SWITCH,
607 esw->qos.root_tsar_ix);
609 esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
612 static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
616 lockdep_assert_held(&esw->state_lock);
618 if (!refcount_inc_not_zero(&esw->qos.refcnt)) {
619 /* esw_qos_create() set refcount to 1 only on success.
620 * No need to decrement on failure.
622 err = esw_qos_create(esw, extack);
628 static void esw_qos_put(struct mlx5_eswitch *esw)
630 lockdep_assert_held(&esw->state_lock);
631 if (refcount_dec_and_test(&esw->qos.refcnt))
632 esw_qos_destroy(esw);
635 static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
636 u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
640 lockdep_assert_held(&esw->state_lock);
641 if (vport->qos.enabled)
644 err = esw_qos_get(esw, extack);
648 vport->qos.group = esw->qos.group0;
650 err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share);
654 vport->qos.enabled = true;
655 trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
665 void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
669 lockdep_assert_held(&esw->state_lock);
670 if (!vport->qos.enabled)
672 WARN(vport->qos.group && vport->qos.group != esw->qos.group0,
673 "Disabling QoS on port before detaching it from group");
675 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
676 SCHEDULING_HIERARCHY_E_SWITCH,
677 vport->qos.esw_tsar_ix);
679 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
682 memset(&vport->qos, 0, sizeof(vport->qos));
683 trace_mlx5_esw_vport_qos_destroy(vport);
688 int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
689 u32 max_rate, u32 min_rate)
693 lockdep_assert_held(&esw->state_lock);
694 err = esw_qos_vport_enable(esw, vport, 0, 0, NULL);
698 err = esw_qos_set_vport_min_rate(esw, vport, min_rate, NULL);
700 err = esw_qos_set_vport_max_rate(esw, vport, max_rate, NULL);
705 static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
707 struct ethtool_link_ksettings lksettings;
708 struct net_device *slave, *master;
709 u32 speed = SPEED_UNKNOWN;
711 /* Lock ensures a stable reference to master and slave netdevice
712 * while port speed of master is queried.
716 slave = mlx5_uplink_netdev_get(mdev);
720 master = netdev_master_upper_dev_get(slave);
721 if (master && !__ethtool_get_link_ksettings(master, &lksettings))
722 speed = lksettings.base.speed;
728 static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max,
729 bool hold_rtnl_lock, struct netlink_ext_ack *extack)
733 if (!mlx5_lag_is_active(mdev))
739 *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev);
744 if (*link_speed_max != (u32)SPEED_UNKNOWN)
748 err = mlx5_port_max_linkspeed(mdev, link_speed_max);
750 NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
755 static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
756 const char *name, u32 link_speed_max,
757 u64 value, struct netlink_ext_ack *extack)
759 if (value > link_speed_max) {
760 pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
761 name, value, link_speed_max);
762 NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
769 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
771 u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
772 struct mlx5_vport *vport;
777 vport = mlx5_eswitch_get_vport(esw, vport_num);
779 return PTR_ERR(vport);
782 err = mlx5_esw_qos_max_link_speed_get(esw->dev, &link_speed_max, false, NULL);
786 err = mlx5_esw_qos_link_speed_verify(esw->dev, "Police",
787 link_speed_max, rate_mbps, NULL);
792 mutex_lock(&esw->state_lock);
793 if (!vport->qos.enabled) {
794 /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
795 err = esw_qos_vport_enable(esw, vport, rate_mbps, vport->qos.bw_share, NULL);
797 MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
799 bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
800 err = mlx5_modify_scheduling_element_cmd(esw->dev,
801 SCHEDULING_HIERARCHY_E_SWITCH,
803 vport->qos.esw_tsar_ix,
806 mutex_unlock(&esw->state_lock);
811 #define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */
813 /* Converts bytes per second value passed in a pointer into megabits per
814 * second, rewriting last. If converted rate exceed link speed or is not a
815 * fraction of Mbps - returns error.
817 static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *name,
818 u64 *rate, struct netlink_ext_ack *extack)
820 u32 link_speed_max, remainder;
824 value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
826 pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
828 NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps");
832 err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack);
836 err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack);
844 /* Eswitch devlink rate API */
846 int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
847 u64 tx_share, struct netlink_ext_ack *extack)
849 struct mlx5_vport *vport = priv;
850 struct mlx5_eswitch *esw;
853 esw = vport->dev->priv.eswitch;
854 if (!mlx5_esw_allowed(esw))
857 err = esw_qos_devlink_rate_to_mbps(vport->dev, "tx_share", &tx_share, extack);
861 mutex_lock(&esw->state_lock);
862 err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
866 err = esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
868 mutex_unlock(&esw->state_lock);
872 int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv,
873 u64 tx_max, struct netlink_ext_ack *extack)
875 struct mlx5_vport *vport = priv;
876 struct mlx5_eswitch *esw;
879 esw = vport->dev->priv.eswitch;
880 if (!mlx5_esw_allowed(esw))
883 err = esw_qos_devlink_rate_to_mbps(vport->dev, "tx_max", &tx_max, extack);
887 mutex_lock(&esw->state_lock);
888 err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
892 err = esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
894 mutex_unlock(&esw->state_lock);
898 int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
899 u64 tx_share, struct netlink_ext_ack *extack)
901 struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink);
902 struct mlx5_eswitch *esw = dev->priv.eswitch;
903 struct mlx5_esw_rate_group *group = priv;
906 err = esw_qos_devlink_rate_to_mbps(dev, "tx_share", &tx_share, extack);
910 mutex_lock(&esw->state_lock);
911 err = esw_qos_set_group_min_rate(esw, group, tx_share, extack);
912 mutex_unlock(&esw->state_lock);
916 int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
917 u64 tx_max, struct netlink_ext_ack *extack)
919 struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink);
920 struct mlx5_eswitch *esw = dev->priv.eswitch;
921 struct mlx5_esw_rate_group *group = priv;
924 err = esw_qos_devlink_rate_to_mbps(dev, "tx_max", &tx_max, extack);
928 mutex_lock(&esw->state_lock);
929 err = esw_qos_set_group_max_rate(esw, group, tx_max, extack);
930 mutex_unlock(&esw->state_lock);
934 int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
935 struct netlink_ext_ack *extack)
937 struct mlx5_esw_rate_group *group;
938 struct mlx5_eswitch *esw;
941 esw = mlx5_devlink_eswitch_get(rate_node->devlink);
945 mutex_lock(&esw->state_lock);
946 if (esw->mode != MLX5_ESWITCH_OFFLOADS) {
947 NL_SET_ERR_MSG_MOD(extack,
948 "Rate node creation supported only in switchdev mode");
953 group = esw_qos_create_rate_group(esw, extack);
955 err = PTR_ERR(group);
961 mutex_unlock(&esw->state_lock);
965 int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
966 struct netlink_ext_ack *extack)
968 struct mlx5_esw_rate_group *group = priv;
969 struct mlx5_eswitch *esw;
972 esw = mlx5_devlink_eswitch_get(rate_node->devlink);
976 mutex_lock(&esw->state_lock);
977 err = esw_qos_destroy_rate_group(esw, group, extack);
978 mutex_unlock(&esw->state_lock);
982 int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
983 struct mlx5_vport *vport,
984 struct mlx5_esw_rate_group *group,
985 struct netlink_ext_ack *extack)
989 mutex_lock(&esw->state_lock);
990 if (!vport->qos.enabled && !group)
993 err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
995 err = esw_qos_vport_update_group(esw, vport, group, extack);
997 mutex_unlock(&esw->state_lock);
1001 int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
1002 struct devlink_rate *parent,
1003 void *priv, void *parent_priv,
1004 struct netlink_ext_ack *extack)
1006 struct mlx5_esw_rate_group *group;
1007 struct mlx5_vport *vport = priv;
1010 return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch,
1011 vport, NULL, extack);
1013 group = parent_priv;
1014 return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch, vport, group, extack);