1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/pkt_cls.h>
11 #include "spectrum_span.h"
14 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
15 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
16 MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
18 enum mlxsw_sp_qdisc_type {
19 MLXSW_SP_QDISC_NO_QDISC,
27 struct mlxsw_sp_qdisc;
29 struct mlxsw_sp_qdisc_ops {
30 enum mlxsw_sp_qdisc_type type;
31 int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
33 int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
34 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
35 int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
36 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
37 int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
38 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
39 struct tc_qopt_offload_stats *stats_ptr);
40 int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
41 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
43 void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
44 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
45 /* unoffload - to be used for a qdisc that stops being offloaded without
48 void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
49 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
50 struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
52 unsigned int num_classes;
54 u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
55 struct mlxsw_sp_qdisc *child);
56 int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
57 struct mlxsw_sp_qdisc *child);
60 struct mlxsw_sp_qdisc_ets_band {
65 struct mlxsw_sp_qdisc_ets_data {
66 struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS];
69 struct mlxsw_sp_qdisc {
74 struct mlxsw_sp_qdisc_stats {
83 struct mlxsw_sp_qdisc_ets_data *ets_data;
86 struct mlxsw_sp_qdisc_ops *ops;
87 struct mlxsw_sp_qdisc *parent;
88 struct mlxsw_sp_qdisc *qdiscs;
89 unsigned int num_classes;
92 struct mlxsw_sp_qdisc_state {
93 struct mlxsw_sp_qdisc root_qdisc;
95 /* When a PRIO or ETS are added, the invisible FIFOs in their bands are
96 * created first. When notifications for these FIFOs arrive, it is not
97 * known what qdisc their parent handle refers to. It could be a
98 * newly-created PRIO that will replace the currently-offloaded one, or
99 * it could be e.g. a RED that will be attached below it.
101 * As the notifications start to arrive, use them to note what the
102 * future parent handle is, and keep track of which child FIFOs were
103 * seen. Then when the parent is known, retroactively offload those
107 bool future_fifos[IEEE_8021QAZ_MAX_TCS];
108 struct mutex lock; /* Protects qdisc state. */
112 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle)
114 return mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->handle == handle;
117 static struct mlxsw_sp_qdisc *
118 mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc *qdisc,
119 struct mlxsw_sp_qdisc *(*pre)(struct mlxsw_sp_qdisc *,
123 struct mlxsw_sp_qdisc *tmp;
127 tmp = pre(qdisc, data);
133 for (i = 0; i < qdisc->num_classes; i++) {
134 tmp = &qdisc->qdiscs[i];
136 tmp = mlxsw_sp_qdisc_walk(tmp, pre, data);
146 static struct mlxsw_sp_qdisc *
147 mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
149 u32 parent = *(u32 *)data;
151 if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) {
152 if (qdisc->ops->find_class)
153 return qdisc->ops->find_class(qdisc, parent);
159 static struct mlxsw_sp_qdisc *
160 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
163 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
167 if (parent == TC_H_ROOT)
168 return &qdisc_state->root_qdisc;
171 return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
172 mlxsw_sp_qdisc_walk_cb_find, &parent);
175 static struct mlxsw_sp_qdisc *
176 mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc *qdisc, void *data)
178 u32 handle = *(u32 *)data;
180 if (qdisc->ops && qdisc->handle == handle)
185 static struct mlxsw_sp_qdisc *
186 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
188 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
192 return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
193 mlxsw_sp_qdisc_walk_cb_find_by_handle,
198 mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
200 struct mlxsw_sp_qdisc *tmp;
202 for (tmp = mlxsw_sp_qdisc->parent; tmp; tmp = tmp->parent)
203 tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
206 static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port,
207 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
209 struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
213 return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc);
216 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
218 static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port,
219 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
221 struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
224 return MLXSW_SP_PORT_DEFAULT_TCLASS;
225 return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc);
229 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
230 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
232 struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
240 if (root_qdisc == mlxsw_sp_qdisc) {
241 struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
243 hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
244 mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
245 mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
246 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
247 err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
250 if (!mlxsw_sp_qdisc->ops)
253 for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++)
254 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
255 &mlxsw_sp_qdisc->qdiscs[i]);
256 mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
257 if (mlxsw_sp_qdisc->ops->destroy)
258 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
260 if (mlxsw_sp_qdisc->ops->clean_stats)
261 mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
263 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
264 mlxsw_sp_qdisc->ops = NULL;
265 mlxsw_sp_qdisc->num_classes = 0;
266 kfree(mlxsw_sp_qdisc->qdiscs);
267 mlxsw_sp_qdisc->qdiscs = NULL;
268 return err_hdroom ?: err;
271 static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
273 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
274 struct mlxsw_sp_qdisc_ops *ops, void *params)
276 struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
277 struct mlxsw_sp_hdroom orig_hdroom;
281 err = ops->check_params(mlxsw_sp_port, params);
285 if (ops->num_classes) {
286 mlxsw_sp_qdisc->qdiscs = kcalloc(ops->num_classes,
287 sizeof(*mlxsw_sp_qdisc->qdiscs),
289 if (!mlxsw_sp_qdisc->qdiscs)
292 for (i = 0; i < ops->num_classes; i++)
293 mlxsw_sp_qdisc->qdiscs[i].parent = mlxsw_sp_qdisc;
296 orig_hdroom = *mlxsw_sp_port->hdroom;
297 if (root_qdisc == mlxsw_sp_qdisc) {
298 struct mlxsw_sp_hdroom hdroom = orig_hdroom;
300 hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
301 mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
302 mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
303 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
305 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
307 goto err_hdroom_configure;
310 mlxsw_sp_qdisc->num_classes = ops->num_classes;
311 mlxsw_sp_qdisc->ops = ops;
312 mlxsw_sp_qdisc->handle = handle;
313 err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
320 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
321 mlxsw_sp_qdisc->ops = NULL;
322 mlxsw_sp_qdisc->num_classes = 0;
323 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
324 err_hdroom_configure:
325 kfree(mlxsw_sp_qdisc->qdiscs);
326 mlxsw_sp_qdisc->qdiscs = NULL;
331 mlxsw_sp_qdisc_change(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
332 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params)
334 struct mlxsw_sp_qdisc_ops *ops = mlxsw_sp_qdisc->ops;
337 err = ops->check_params(mlxsw_sp_port, params);
341 err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
345 /* Check if the Qdisc changed. That includes a situation where an
346 * invisible Qdisc replaces another one, or is being added for the
349 if (mlxsw_sp_qdisc->handle != handle) {
350 if (ops->clean_stats)
351 ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
354 mlxsw_sp_qdisc->handle = handle;
359 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
361 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
366 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
367 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
368 struct mlxsw_sp_qdisc_ops *ops, void *params)
370 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
371 /* In case this location contained a different qdisc of the
372 * same type we can override the old qdisc configuration.
373 * Otherwise, we need to remove the old qdisc before setting the
376 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
378 if (!mlxsw_sp_qdisc->ops)
379 return mlxsw_sp_qdisc_create(mlxsw_sp_port, handle,
380 mlxsw_sp_qdisc, ops, params);
382 return mlxsw_sp_qdisc_change(mlxsw_sp_port, handle,
383 mlxsw_sp_qdisc, params);
387 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
388 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
389 struct tc_qopt_offload_stats *stats_ptr)
391 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
392 mlxsw_sp_qdisc->ops->get_stats)
393 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
401 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
402 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
405 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
406 mlxsw_sp_qdisc->ops->get_xstats)
407 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
415 mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
417 return xstats->backlog[tclass_num] +
418 xstats->backlog[tclass_num + 8];
422 mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
424 return xstats->tail_drop[tclass_num] +
425 xstats->tail_drop[tclass_num + 8];
429 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
430 u8 prio_bitmap, u64 *tx_packets,
437 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
438 if (prio_bitmap & BIT(i)) {
439 *tx_packets += xstats->tx_packets[i];
440 *tx_bytes += xstats->tx_bytes[i];
446 mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
447 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
448 u64 *p_tx_bytes, u64 *p_tx_packets,
449 u64 *p_drops, u64 *p_backlog)
451 struct mlxsw_sp_port_xstats *xstats;
452 u64 tx_bytes, tx_packets;
456 prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
458 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
460 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
461 mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
462 &tx_packets, &tx_bytes);
464 *p_tx_packets += tx_packets;
465 *p_tx_bytes += tx_bytes;
466 *p_drops += xstats->wred_drop[tclass_num] +
467 mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
468 *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
472 mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
473 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
474 u64 tx_bytes, u64 tx_packets,
475 u64 drops, u64 backlog,
476 struct tc_qopt_offload_stats *stats_ptr)
478 struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
480 tx_bytes -= stats_base->tx_bytes;
481 tx_packets -= stats_base->tx_packets;
482 drops -= stats_base->drops;
483 backlog -= stats_base->backlog;
485 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
486 stats_ptr->qstats->drops += drops;
487 stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
489 stats_base->backlog += backlog;
490 stats_base->drops += drops;
491 stats_base->tx_bytes += tx_bytes;
492 stats_base->tx_packets += tx_packets;
496 mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
497 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
498 struct tc_qopt_offload_stats *stats_ptr)
505 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
506 &tx_bytes, &tx_packets,
508 mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
509 tx_bytes, tx_packets, drops, backlog,
514 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
515 int tclass_num, u32 min, u32 max,
516 u32 probability, bool is_wred, bool is_ecn)
518 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
519 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
520 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
523 mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
524 mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
525 roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
526 roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
529 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
533 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
534 MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
540 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
543 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
544 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
546 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
547 MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
548 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
552 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
553 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
555 struct mlxsw_sp_qdisc_stats *stats_base;
556 struct mlxsw_sp_port_xstats *xstats;
557 struct red_stats *red_base;
561 prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
563 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
565 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
566 stats_base = &mlxsw_sp_qdisc->stats_base;
567 red_base = &mlxsw_sp_qdisc->xstats_base.red;
569 mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
570 &stats_base->tx_packets,
571 &stats_base->tx_bytes);
572 red_base->prob_mark = xstats->tc_ecn[tclass_num];
573 red_base->prob_drop = xstats->wred_drop[tclass_num];
574 red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
576 stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
577 stats_base->drops = red_base->prob_drop + red_base->pdrop;
579 stats_base->backlog = 0;
583 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
584 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
586 int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
589 return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
593 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
596 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
597 struct tc_red_qopt_offload_params *p = params;
599 if (p->min > p->max) {
600 dev_err(mlxsw_sp->bus_info->dev,
601 "spectrum: RED: min %u is bigger then max %u\n", p->min,
605 if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
606 GUARANTEED_SHARED_BUFFER)) {
607 dev_err(mlxsw_sp->bus_info->dev,
608 "spectrum: RED: max value %u is too big\n", p->max);
611 if (p->min == 0 || p->max == 0) {
612 dev_err(mlxsw_sp->bus_info->dev,
613 "spectrum: RED: 0 value is illegal for min and max\n");
620 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
621 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
624 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
625 struct tc_red_qopt_offload_params *p = params;
630 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
633 /* calculate probability in percentage */
634 prob = p->probability;
636 prob = DIV_ROUND_UP(prob, 1 << 16);
637 prob = DIV_ROUND_UP(prob, 1 << 16);
638 min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
639 max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
640 return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
642 !p->is_nodrop, p->is_ecn);
646 mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
647 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
648 struct gnet_stats_queue *qstats)
652 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
653 mlxsw_sp_qdisc->stats_base.backlog);
654 qstats->backlog -= backlog;
655 mlxsw_sp_qdisc->stats_base.backlog = 0;
659 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
660 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
663 struct tc_red_qopt_offload_params *p = params;
665 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
669 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
670 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
673 struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
674 struct mlxsw_sp_port_xstats *xstats;
675 struct red_stats *res = xstats_ptr;
676 int early_drops, marks, pdrops;
679 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
681 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
683 early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
684 marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark;
685 pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
688 res->pdrop += pdrops;
689 res->prob_drop += early_drops;
690 res->prob_mark += marks;
692 xstats_base->pdrop += pdrops;
693 xstats_base->prob_drop += early_drops;
694 xstats_base->prob_mark += marks;
699 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
700 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
701 struct tc_qopt_offload_stats *stats_ptr)
703 struct mlxsw_sp_qdisc_stats *stats_base;
704 struct mlxsw_sp_port_xstats *xstats;
708 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
710 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
711 stats_base = &mlxsw_sp_qdisc->stats_base;
713 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
714 overlimits = xstats->wred_drop[tclass_num] +
715 xstats->tc_ecn[tclass_num] - stats_base->overlimits;
717 stats_ptr->qstats->overlimits += overlimits;
718 stats_base->overlimits += overlimits;
723 static struct mlxsw_sp_qdisc *
724 mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
730 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
731 .type = MLXSW_SP_QDISC_RED,
732 .check_params = mlxsw_sp_qdisc_red_check_params,
733 .replace = mlxsw_sp_qdisc_red_replace,
734 .unoffload = mlxsw_sp_qdisc_red_unoffload,
735 .destroy = mlxsw_sp_qdisc_red_destroy,
736 .get_stats = mlxsw_sp_qdisc_get_red_stats,
737 .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
738 .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
739 .find_class = mlxsw_sp_qdisc_leaf_find_class,
742 static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
743 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
744 u8 band, u32 child_handle);
746 static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
747 struct tc_red_qopt_offload *p)
749 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
751 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
755 if (p->command == TC_RED_REPLACE)
756 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
758 &mlxsw_sp_qdisc_ops_red,
761 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
764 switch (p->command) {
766 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
768 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
771 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
774 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
781 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
782 struct tc_red_qopt_offload *p)
786 mutex_lock(&mlxsw_sp_port->qdisc->lock);
787 err = __mlxsw_sp_setup_tc_red(mlxsw_sp_port, p);
788 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
794 mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
795 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
797 u64 backlog_cells = 0;
802 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
803 &tx_bytes, &tx_packets,
804 &drops, &backlog_cells);
806 mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
807 mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
808 mlxsw_sp_qdisc->stats_base.drops = drops;
809 mlxsw_sp_qdisc->stats_base.backlog = 0;
813 mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
814 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
816 int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
819 return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
820 MLXSW_REG_QEEC_HR_SUBGROUP,
822 MLXSW_REG_QEEC_MAS_DIS, 0);
826 mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
827 u32 max_size, u8 *p_burst_size)
829 /* TBF burst size is configured in bytes. The ASIC burst size value is
830 * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
832 u32 bs512 = max_size / 64;
839 /* Demand a power of two. */
840 if ((1 << bs) != bs512)
843 if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
844 bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
852 mlxsw_sp_qdisc_tbf_max_size(u8 bs)
854 return (1U << bs) * 64;
858 mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
860 /* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
863 return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
867 mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
870 struct tc_tbf_qopt_offload_replace_params *p = params;
871 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
872 u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
876 if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
877 dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
878 "spectrum: TBF: rate of %lluKbps must be below %u\n",
879 rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
883 err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
885 u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
887 dev_err(mlxsw_sp->bus_info->dev,
888 "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
890 mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
891 mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
899 mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
900 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
903 struct tc_tbf_qopt_offload_replace_params *p = params;
904 u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
909 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
912 err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
913 if (WARN_ON_ONCE(err))
914 /* check_params above was supposed to reject this value. */
917 /* Configure subgroup shaper, so that both UC and MC traffic is subject
918 * to shaping. That is unlike RED, however UC queue lengths are going to
919 * be different than MC ones due to different pool and quota
920 * configurations, so the configuration is not applicable. For shaper on
921 * the other hand, subjecting the overall stream to the configured
922 * shaper makes sense. Also note that that is what we do for
925 return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
926 MLXSW_REG_QEEC_HR_SUBGROUP,
928 rate_kbps, burst_size);
932 mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
933 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
936 struct tc_tbf_qopt_offload_replace_params *p = params;
938 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
942 mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
943 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
944 struct tc_qopt_offload_stats *stats_ptr)
946 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
951 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
952 .type = MLXSW_SP_QDISC_TBF,
953 .check_params = mlxsw_sp_qdisc_tbf_check_params,
954 .replace = mlxsw_sp_qdisc_tbf_replace,
955 .unoffload = mlxsw_sp_qdisc_tbf_unoffload,
956 .destroy = mlxsw_sp_qdisc_tbf_destroy,
957 .get_stats = mlxsw_sp_qdisc_get_tbf_stats,
958 .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
959 .find_class = mlxsw_sp_qdisc_leaf_find_class,
962 static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
963 struct tc_tbf_qopt_offload *p)
965 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
967 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
971 if (p->command == TC_TBF_REPLACE)
972 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
974 &mlxsw_sp_qdisc_ops_tbf,
977 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
980 switch (p->command) {
982 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
984 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
987 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
994 int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
995 struct tc_tbf_qopt_offload *p)
999 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1000 err = __mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p);
1001 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1007 mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1014 mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1015 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1022 mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1023 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1024 struct tc_qopt_offload_stats *stats_ptr)
1026 mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1031 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
1032 .type = MLXSW_SP_QDISC_FIFO,
1033 .check_params = mlxsw_sp_qdisc_fifo_check_params,
1034 .replace = mlxsw_sp_qdisc_fifo_replace,
1035 .get_stats = mlxsw_sp_qdisc_get_fifo_stats,
1036 .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
1040 mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1041 u32 handle, unsigned int band,
1042 struct mlxsw_sp_qdisc *child_qdisc)
1044 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1046 if (handle == qdisc_state->future_handle &&
1047 qdisc_state->future_fifos[band])
1048 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
1050 &mlxsw_sp_qdisc_ops_fifo,
1056 mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
1059 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1061 qdisc_state->future_handle = handle;
1062 memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
1065 static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1066 struct tc_fifo_qopt_offload *p)
1068 struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1069 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1073 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
1074 if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
1075 parent_handle = TC_H_MAJ(p->parent);
1076 if (parent_handle != qdisc_state->future_handle) {
1077 /* This notifications is for a different Qdisc than
1078 * previously. Wipe the future cache.
1080 mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port,
1084 band = TC_H_MIN(p->parent) - 1;
1085 if (band < IEEE_8021QAZ_MAX_TCS) {
1086 if (p->command == TC_FIFO_REPLACE)
1087 qdisc_state->future_fifos[band] = true;
1088 else if (p->command == TC_FIFO_DESTROY)
1089 qdisc_state->future_fifos[band] = false;
1092 if (!mlxsw_sp_qdisc)
1095 if (p->command == TC_FIFO_REPLACE) {
1096 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1098 &mlxsw_sp_qdisc_ops_fifo, NULL);
1101 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1104 switch (p->command) {
1105 case TC_FIFO_DESTROY:
1106 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1108 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1110 case TC_FIFO_REPLACE: /* Handled above. */
1117 int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1118 struct tc_fifo_qopt_offload *p)
1122 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1123 err = __mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p);
1124 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1129 static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1130 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1134 for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1135 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
1136 MLXSW_SP_PORT_DEFAULT_TCLASS);
1137 mlxsw_sp_port_ets_set(mlxsw_sp_port,
1138 MLXSW_REG_QEEC_HR_SUBGROUP,
1142 kfree(mlxsw_sp_qdisc->ets_data);
1143 mlxsw_sp_qdisc->ets_data = NULL;
1148 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1149 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1151 return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1155 __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
1157 if (nbands > IEEE_8021QAZ_MAX_TCS)
1164 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1167 struct tc_prio_qopt_offload_params *p = params;
1169 return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1172 static struct mlxsw_sp_qdisc *
1173 mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1174 void *mlxsw_sp_port)
1178 if (mlxsw_sp_qdisc->ops) {
1179 backlog = mlxsw_sp_qdisc->stats_base.backlog;
1180 if (mlxsw_sp_qdisc->ops->clean_stats)
1181 mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port,
1183 mlxsw_sp_qdisc->stats_base.backlog = backlog;
1190 mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1191 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1193 mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats,
1198 __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1199 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1200 u32 handle, unsigned int nbands,
1201 const unsigned int *quanta,
1202 const unsigned int *weights,
1205 struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data;
1206 struct mlxsw_sp_qdisc_ets_band *ets_band;
1207 struct mlxsw_sp_qdisc *child_qdisc;
1208 u8 old_priomap, new_priomap;
1213 ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL);
1216 mlxsw_sp_qdisc->ets_data = ets_data;
1218 for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) {
1219 int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1221 ets_band = &ets_data->bands[band];
1222 ets_band->tclass_num = tclass_num;
1226 for (band = 0; band < nbands; band++) {
1229 child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1230 ets_band = &ets_data->bands[band];
1232 tclass_num = ets_band->tclass_num;
1233 old_priomap = ets_band->prio_bitmap;
1236 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1237 MLXSW_REG_QEEC_HR_SUBGROUP,
1238 tclass_num, 0, !!quanta[band],
1243 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1244 if (priomap[i] == band) {
1245 new_priomap |= BIT(i);
1246 if (BIT(i) & old_priomap)
1248 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
1255 ets_band->prio_bitmap = new_priomap;
1257 if (old_priomap != new_priomap)
1258 mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port,
1261 err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle,
1266 for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
1267 ets_band = &ets_data->bands[band];
1268 ets_band->prio_bitmap = 0;
1270 child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1271 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
1273 mlxsw_sp_port_ets_set(mlxsw_sp_port,
1274 MLXSW_REG_QEEC_HR_SUBGROUP,
1275 ets_band->tclass_num, 0, false, 0);
1278 mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
1283 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1284 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1287 struct tc_prio_qopt_offload_params *p = params;
1288 unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
1290 return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1291 handle, p->bands, zeroes,
1292 zeroes, p->priomap);
1296 __mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1297 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1298 struct gnet_stats_queue *qstats)
1302 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
1303 mlxsw_sp_qdisc->stats_base.backlog);
1304 qstats->backlog -= backlog;
1308 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1309 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1312 struct tc_prio_qopt_offload_params *p = params;
1314 __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1319 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1320 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1321 struct tc_qopt_offload_stats *stats_ptr)
1323 struct mlxsw_sp_qdisc *tc_qdisc;
1330 for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1331 tc_qdisc = &mlxsw_sp_qdisc->qdiscs[i];
1332 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
1333 &tx_bytes, &tx_packets,
1337 mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
1338 tx_bytes, tx_packets, drops, backlog,
1344 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1345 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1347 struct mlxsw_sp_qdisc_stats *stats_base;
1348 struct mlxsw_sp_port_xstats *xstats;
1349 struct rtnl_link_stats64 *stats;
1352 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
1353 stats = &mlxsw_sp_port->periodic_hw_stats.stats;
1354 stats_base = &mlxsw_sp_qdisc->stats_base;
1356 stats_base->tx_packets = stats->tx_packets;
1357 stats_base->tx_bytes = stats->tx_bytes;
1359 stats_base->drops = 0;
1360 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1361 stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
1362 stats_base->drops += xstats->wred_drop[i];
1365 mlxsw_sp_qdisc->stats_base.backlog = 0;
1368 static struct mlxsw_sp_qdisc *
1369 mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1372 int child_index = TC_H_MIN(parent);
1373 int band = child_index - 1;
1375 if (band < 0 || band >= mlxsw_sp_qdisc->num_classes)
1377 return &mlxsw_sp_qdisc->qdiscs[band];
1380 static struct mlxsw_sp_qdisc_ets_band *
1381 mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1382 struct mlxsw_sp_qdisc *child)
1384 unsigned int band = child - mlxsw_sp_qdisc->qdiscs;
1386 if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS))
1388 return &mlxsw_sp_qdisc->ets_data->bands[band];
1392 mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1393 struct mlxsw_sp_qdisc *child)
1395 return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap;
1399 mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1400 struct mlxsw_sp_qdisc *child)
1402 return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num;
1405 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
1406 .type = MLXSW_SP_QDISC_PRIO,
1407 .check_params = mlxsw_sp_qdisc_prio_check_params,
1408 .replace = mlxsw_sp_qdisc_prio_replace,
1409 .unoffload = mlxsw_sp_qdisc_prio_unoffload,
1410 .destroy = mlxsw_sp_qdisc_prio_destroy,
1411 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1412 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1413 .find_class = mlxsw_sp_qdisc_prio_find_class,
1414 .num_classes = IEEE_8021QAZ_MAX_TCS,
1415 .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
1416 .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
1420 mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1423 struct tc_ets_qopt_offload_replace_params *p = params;
1425 return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1429 mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1430 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1433 struct tc_ets_qopt_offload_replace_params *p = params;
1435 return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1436 handle, p->bands, p->quanta,
1437 p->weights, p->priomap);
1441 mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1442 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1445 struct tc_ets_qopt_offload_replace_params *p = params;
1447 __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1452 mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1453 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1455 return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1458 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
1459 .type = MLXSW_SP_QDISC_ETS,
1460 .check_params = mlxsw_sp_qdisc_ets_check_params,
1461 .replace = mlxsw_sp_qdisc_ets_replace,
1462 .unoffload = mlxsw_sp_qdisc_ets_unoffload,
1463 .destroy = mlxsw_sp_qdisc_ets_destroy,
1464 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1465 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1466 .find_class = mlxsw_sp_qdisc_prio_find_class,
1467 .num_classes = IEEE_8021QAZ_MAX_TCS,
1468 .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
1469 .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
1472 /* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
1473 * graph is free of cycles). These operations do not change the parent handle
1474 * though, which means it can be incomplete (if there is more than one class
1475 * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
1476 * linked to a different class and then removed from the original class).
1478 * E.g. consider this sequence of operations:
1480 * # tc qdisc add dev swp1 root handle 1: prio
1481 * # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
1482 * RED: set bandwidth to 10Mbit
1483 * # tc qdisc link dev swp1 handle 13: parent 1:2
1485 * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
1486 * child. But RED will still only claim that 1:3 is its parent. If it's removed
1487 * from that band, its only parent will be 1:2, but it will continue to claim
1488 * that it is in fact 1:3.
1490 * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
1491 * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
1492 * notification to offload the child Qdisc, based on its parent handle, and use
1493 * the graft operation to validate that the class where the child is actually
1494 * grafted corresponds to the parent handle. If the two don't match, we
1495 * unoffload the child.
1497 static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1498 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1499 u8 band, u32 child_handle)
1501 struct mlxsw_sp_qdisc *old_qdisc;
1504 if (band < mlxsw_sp_qdisc->num_classes &&
1505 mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
1508 if (!child_handle) {
1509 /* This is an invisible FIFO replacing the original Qdisc.
1510 * Ignore it--the original Qdisc's destroy will follow.
1515 /* See if the grafted qdisc is already offloaded on any tclass. If so,
1518 old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
1521 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
1523 parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
1524 mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
1526 if (!WARN_ON(!mlxsw_sp_qdisc))
1527 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1532 static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1533 struct tc_prio_qopt_offload *p)
1535 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1537 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1538 if (!mlxsw_sp_qdisc)
1541 if (p->command == TC_PRIO_REPLACE)
1542 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1544 &mlxsw_sp_qdisc_ops_prio,
1545 &p->replace_params);
1547 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1550 switch (p->command) {
1551 case TC_PRIO_DESTROY:
1552 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1554 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1557 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1558 p->graft_params.band,
1559 p->graft_params.child_handle);
1565 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1566 struct tc_prio_qopt_offload *p)
1570 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1571 err = __mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p);
1572 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1577 static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1578 struct tc_ets_qopt_offload *p)
1580 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1582 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1583 if (!mlxsw_sp_qdisc)
1586 if (p->command == TC_ETS_REPLACE)
1587 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1589 &mlxsw_sp_qdisc_ops_ets,
1590 &p->replace_params);
1592 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1595 switch (p->command) {
1596 case TC_ETS_DESTROY:
1597 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1599 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1602 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1603 p->graft_params.band,
1604 p->graft_params.child_handle);
1610 int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1611 struct tc_ets_qopt_offload *p)
1615 mutex_lock(&mlxsw_sp_port->qdisc->lock);
1616 err = __mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p);
1617 mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1622 struct mlxsw_sp_qevent_block {
1623 struct list_head binding_list;
1624 struct list_head mall_entry_list;
1625 struct mlxsw_sp *mlxsw_sp;
1628 struct mlxsw_sp_qevent_binding {
1629 struct list_head list;
1630 struct mlxsw_sp_port *mlxsw_sp_port;
1633 enum mlxsw_sp_span_trigger span_trigger;
1634 unsigned int action_mask;
1637 static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
1639 static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
1640 struct mlxsw_sp_mall_entry *mall_entry,
1641 struct mlxsw_sp_qevent_binding *qevent_binding,
1642 const struct mlxsw_sp_span_agent_parms *agent_parms,
1645 enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
1646 struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1647 struct mlxsw_sp_span_trigger_parms trigger_parms = {};
1652 err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
1656 ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
1657 err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
1659 goto err_analyzed_port_get;
1661 trigger_parms.span_id = span_id;
1662 trigger_parms.probability_rate = 1;
1663 err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1666 goto err_agent_bind;
1668 err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger,
1669 qevent_binding->tclass_num);
1671 goto err_trigger_enable;
1673 *p_span_id = span_id;
1677 mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1680 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
1681 err_analyzed_port_get:
1682 mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1686 static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
1687 struct mlxsw_sp_qevent_binding *qevent_binding,
1690 enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
1691 struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1692 struct mlxsw_sp_span_trigger_parms trigger_parms = {
1697 ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
1699 mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger,
1700 qevent_binding->tclass_num);
1701 mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1703 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
1704 mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1707 static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
1708 struct mlxsw_sp_mall_entry *mall_entry,
1709 struct mlxsw_sp_qevent_binding *qevent_binding)
1711 struct mlxsw_sp_span_agent_parms agent_parms = {
1712 .to_dev = mall_entry->mirror.to_dev,
1715 return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1716 &agent_parms, &mall_entry->mirror.span_id);
1719 static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
1720 struct mlxsw_sp_mall_entry *mall_entry,
1721 struct mlxsw_sp_qevent_binding *qevent_binding)
1723 mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
1726 static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
1727 struct mlxsw_sp_mall_entry *mall_entry,
1728 struct mlxsw_sp_qevent_binding *qevent_binding)
1730 struct mlxsw_sp_span_agent_parms agent_parms = {
1731 .session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER,
1735 err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
1736 DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
1737 &agent_parms.policer_enable,
1738 &agent_parms.policer_id);
1742 return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1743 &agent_parms, &mall_entry->trap.span_id);
1746 static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
1747 struct mlxsw_sp_mall_entry *mall_entry,
1748 struct mlxsw_sp_qevent_binding *qevent_binding)
1750 mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
1754 mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
1755 struct mlxsw_sp_mall_entry *mall_entry,
1756 struct mlxsw_sp_qevent_binding *qevent_binding,
1757 struct netlink_ext_ack *extack)
1759 if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) {
1760 NL_SET_ERR_MSG(extack, "Action not supported at this qevent");
1764 switch (mall_entry->type) {
1765 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1766 return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
1767 case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1768 return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
1770 /* This should have been validated away. */
1776 static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
1777 struct mlxsw_sp_mall_entry *mall_entry,
1778 struct mlxsw_sp_qevent_binding *qevent_binding)
1780 switch (mall_entry->type) {
1781 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1782 return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1783 case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1784 return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1792 mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
1793 struct mlxsw_sp_qevent_binding *qevent_binding,
1794 struct netlink_ext_ack *extack)
1796 struct mlxsw_sp_mall_entry *mall_entry;
1799 list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
1800 err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
1801 qevent_binding, extack);
1803 goto err_entry_configure;
1808 err_entry_configure:
1809 list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
1810 mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1815 static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
1816 struct mlxsw_sp_qevent_binding *qevent_binding)
1818 struct mlxsw_sp_mall_entry *mall_entry;
1820 list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
1821 mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1826 mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block,
1827 struct netlink_ext_ack *extack)
1829 struct mlxsw_sp_qevent_binding *qevent_binding;
1832 list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
1833 err = mlxsw_sp_qevent_binding_configure(qevent_block,
1837 goto err_binding_configure;
1842 err_binding_configure:
1843 list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
1844 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1848 static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
1850 struct mlxsw_sp_qevent_binding *qevent_binding;
1852 list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
1853 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1856 static struct mlxsw_sp_mall_entry *
1857 mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
1859 struct mlxsw_sp_mall_entry *mall_entry;
1861 list_for_each_entry(mall_entry, &block->mall_entry_list, list)
1862 if (mall_entry->cookie == cookie)
1868 static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
1869 struct mlxsw_sp_qevent_block *qevent_block,
1870 struct tc_cls_matchall_offload *f)
1872 struct mlxsw_sp_mall_entry *mall_entry;
1873 struct flow_action_entry *act;
1876 /* It should not currently be possible to replace a matchall rule. So
1877 * this must be a new rule.
1879 if (!list_empty(&qevent_block->mall_entry_list)) {
1880 NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
1883 if (f->rule->action.num_entries != 1) {
1884 NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
1887 if (f->common.chain_index) {
1888 NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
1891 if (f->common.protocol != htons(ETH_P_ALL)) {
1892 NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
1896 act = &f->rule->action.entries[0];
1897 if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
1898 NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
1902 mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
1905 mall_entry->cookie = f->cookie;
1907 if (act->id == FLOW_ACTION_MIRRED) {
1908 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
1909 mall_entry->mirror.to_dev = act->dev;
1910 } else if (act->id == FLOW_ACTION_TRAP) {
1911 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
1913 NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
1915 goto err_unsupported_action;
1918 list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
1920 err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack);
1922 goto err_block_configure;
1926 err_block_configure:
1927 list_del(&mall_entry->list);
1928 err_unsupported_action:
1933 static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
1934 struct tc_cls_matchall_offload *f)
1936 struct mlxsw_sp_mall_entry *mall_entry;
1938 mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
1942 mlxsw_sp_qevent_block_deconfigure(qevent_block);
1944 list_del(&mall_entry->list);
1948 static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
1949 struct tc_cls_matchall_offload *f)
1951 struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
1953 switch (f->command) {
1954 case TC_CLSMATCHALL_REPLACE:
1955 return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
1956 case TC_CLSMATCHALL_DESTROY:
1957 mlxsw_sp_qevent_mall_destroy(qevent_block, f);
1964 static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1966 struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1969 case TC_SETUP_CLSMATCHALL:
1970 return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
1976 static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
1979 struct mlxsw_sp_qevent_block *qevent_block;
1981 qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
1985 INIT_LIST_HEAD(&qevent_block->binding_list);
1986 INIT_LIST_HEAD(&qevent_block->mall_entry_list);
1987 qevent_block->mlxsw_sp = mlxsw_sp;
1988 return qevent_block;
1992 mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
1994 WARN_ON(!list_empty(&qevent_block->binding_list));
1995 WARN_ON(!list_empty(&qevent_block->mall_entry_list));
1996 kfree(qevent_block);
1999 static void mlxsw_sp_qevent_block_release(void *cb_priv)
2001 struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
2003 mlxsw_sp_qevent_block_destroy(qevent_block);
2006 static struct mlxsw_sp_qevent_binding *
2007 mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
2008 enum mlxsw_sp_span_trigger span_trigger,
2009 unsigned int action_mask)
2011 struct mlxsw_sp_qevent_binding *binding;
2013 binding = kzalloc(sizeof(*binding), GFP_KERNEL);
2015 return ERR_PTR(-ENOMEM);
2017 binding->mlxsw_sp_port = mlxsw_sp_port;
2018 binding->handle = handle;
2019 binding->tclass_num = tclass_num;
2020 binding->span_trigger = span_trigger;
2021 binding->action_mask = action_mask;
2026 mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
2031 static struct mlxsw_sp_qevent_binding *
2032 mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
2033 struct mlxsw_sp_port *mlxsw_sp_port,
2035 enum mlxsw_sp_span_trigger span_trigger)
2037 struct mlxsw_sp_qevent_binding *qevent_binding;
2039 list_for_each_entry(qevent_binding, &block->binding_list, list)
2040 if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
2041 qevent_binding->handle == handle &&
2042 qevent_binding->span_trigger == span_trigger)
2043 return qevent_binding;
2048 mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
2049 struct flow_block_offload *f,
2050 enum mlxsw_sp_span_trigger span_trigger,
2051 unsigned int action_mask)
2053 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2054 struct mlxsw_sp_qevent_binding *qevent_binding;
2055 struct mlxsw_sp_qevent_block *qevent_block;
2056 struct flow_block_cb *block_cb;
2057 struct mlxsw_sp_qdisc *qdisc;
2058 bool register_block = false;
2062 block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
2064 qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
2067 block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
2068 mlxsw_sp_qevent_block_release);
2069 if (IS_ERR(block_cb)) {
2070 mlxsw_sp_qevent_block_destroy(qevent_block);
2071 return PTR_ERR(block_cb);
2073 register_block = true;
2075 qevent_block = flow_block_cb_priv(block_cb);
2077 flow_block_cb_incref(block_cb);
2079 qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
2081 NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
2083 goto err_find_qdisc;
2086 if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
2089 goto err_binding_exists;
2092 tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc);
2093 qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port,
2098 if (IS_ERR(qevent_binding)) {
2099 err = PTR_ERR(qevent_binding);
2100 goto err_binding_create;
2103 err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding,
2106 goto err_binding_configure;
2108 list_add(&qevent_binding->list, &qevent_block->binding_list);
2110 if (register_block) {
2111 flow_block_cb_add(block_cb, f);
2112 list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
2117 err_binding_configure:
2118 mlxsw_sp_qevent_binding_destroy(qevent_binding);
2122 if (!flow_block_cb_decref(block_cb))
2123 flow_block_cb_free(block_cb);
2127 static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
2128 struct flow_block_offload *f,
2129 enum mlxsw_sp_span_trigger span_trigger)
2131 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2132 struct mlxsw_sp_qevent_binding *qevent_binding;
2133 struct mlxsw_sp_qevent_block *qevent_block;
2134 struct flow_block_cb *block_cb;
2136 block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
2139 qevent_block = flow_block_cb_priv(block_cb);
2141 qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
2143 if (!qevent_binding)
2146 list_del(&qevent_binding->list);
2147 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
2148 mlxsw_sp_qevent_binding_destroy(qevent_binding);
2150 if (!flow_block_cb_decref(block_cb)) {
2151 flow_block_cb_remove(block_cb, f);
2152 list_del(&block_cb->driver_list);
2157 mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
2158 struct flow_block_offload *f,
2159 enum mlxsw_sp_span_trigger span_trigger,
2160 unsigned int action_mask)
2162 f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
2164 switch (f->command) {
2165 case FLOW_BLOCK_BIND:
2166 return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f,
2169 case FLOW_BLOCK_UNBIND:
2170 mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
2177 int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
2178 struct flow_block_offload *f)
2180 unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) |
2181 BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP);
2183 return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
2184 MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
2188 int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
2189 struct flow_block_offload *f)
2191 unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR);
2193 return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
2194 MLXSW_SP_SPAN_TRIGGER_ECN,
2198 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
2200 struct mlxsw_sp_qdisc_state *qdisc_state;
2202 qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
2206 mutex_init(&qdisc_state->lock);
2207 mlxsw_sp_port->qdisc = qdisc_state;
2211 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2213 mutex_destroy(&mlxsw_sp_port->qdisc->lock);
2214 kfree(mlxsw_sp_port->qdisc);