2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/netdevice.h>
38 #include <net/pkt_cls.h>
44 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
45 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
46 MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
48 enum mlxsw_sp_qdisc_type {
49 MLXSW_SP_QDISC_NO_QDISC,
54 struct mlxsw_sp_qdisc_ops {
55 enum mlxsw_sp_qdisc_type type;
56 int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
57 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
59 int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
60 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
61 int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
62 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
63 int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
64 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
65 struct tc_qopt_offload_stats *stats_ptr);
66 int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
67 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
69 void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
70 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
71 /* unoffload - to be used for a qdisc that stops being offloaded without
74 void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
75 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
78 struct mlxsw_sp_qdisc {
85 struct mlxsw_sp_qdisc_stats {
93 struct mlxsw_sp_qdisc_ops *ops;
97 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
98 enum mlxsw_sp_qdisc_type type)
100 return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
101 mlxsw_sp_qdisc->ops->type == type &&
102 mlxsw_sp_qdisc->handle == handle;
105 static struct mlxsw_sp_qdisc *
106 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
109 int tclass, child_index;
111 if (parent == TC_H_ROOT)
112 return mlxsw_sp_port->root_qdisc;
114 if (root_only || !mlxsw_sp_port->root_qdisc ||
115 !mlxsw_sp_port->root_qdisc->ops ||
116 TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
117 TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
120 child_index = TC_H_MIN(parent);
121 tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
122 return &mlxsw_sp_port->tclass_qdiscs[tclass];
125 static struct mlxsw_sp_qdisc *
126 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
130 if (mlxsw_sp_port->root_qdisc->handle == handle)
131 return mlxsw_sp_port->root_qdisc;
133 if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
136 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
137 if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
138 return &mlxsw_sp_port->tclass_qdiscs[i];
144 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
145 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
152 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
153 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
156 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
157 mlxsw_sp_qdisc->ops = NULL;
162 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
163 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
164 struct mlxsw_sp_qdisc_ops *ops, void *params)
168 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
169 /* In case this location contained a different qdisc of the
170 * same type we can override the old qdisc configuration.
171 * Otherwise, we need to remove the old qdisc before setting the
174 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
175 err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
179 err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
183 if (mlxsw_sp_qdisc->handle != handle) {
184 mlxsw_sp_qdisc->ops = ops;
185 if (ops->clean_stats)
186 ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
189 mlxsw_sp_qdisc->handle = handle;
194 if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
195 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
197 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
202 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
203 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
204 struct tc_qopt_offload_stats *stats_ptr)
206 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
207 mlxsw_sp_qdisc->ops->get_stats)
208 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
216 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
217 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
220 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
221 mlxsw_sp_qdisc->ops->get_xstats)
222 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
230 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
231 u8 prio_bitmap, u64 *tx_packets,
238 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
239 if (prio_bitmap & BIT(i)) {
240 *tx_packets += xstats->tx_packets[i];
241 *tx_bytes += xstats->tx_bytes[i];
247 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
248 int tclass_num, u32 min, u32 max,
249 u32 probability, bool is_ecn)
251 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
252 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
253 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
256 mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
257 mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
258 roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
259 roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
262 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
266 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
267 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
269 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
273 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
276 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
277 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
279 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
280 MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
281 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
285 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
286 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
288 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
289 struct mlxsw_sp_qdisc_stats *stats_base;
290 struct mlxsw_sp_port_xstats *xstats;
291 struct red_stats *red_base;
293 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
294 stats_base = &mlxsw_sp_qdisc->stats_base;
295 red_base = &mlxsw_sp_qdisc->xstats_base.red;
297 mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
298 mlxsw_sp_qdisc->prio_bitmap,
299 &stats_base->tx_packets,
300 &stats_base->tx_bytes);
301 red_base->prob_mark = xstats->ecn;
302 red_base->prob_drop = xstats->wred_drop[tclass_num];
303 red_base->pdrop = xstats->tail_drop[tclass_num];
305 stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
306 stats_base->drops = red_base->prob_drop + red_base->pdrop;
308 stats_base->backlog = 0;
312 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
313 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
315 struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
317 if (root_qdisc != mlxsw_sp_qdisc)
318 root_qdisc->stats_base.backlog -=
319 mlxsw_sp_qdisc->stats_base.backlog;
321 return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
322 mlxsw_sp_qdisc->tclass_num);
326 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
327 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
330 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
331 struct tc_red_qopt_offload_params *p = params;
333 if (p->min > p->max) {
334 dev_err(mlxsw_sp->bus_info->dev,
335 "spectrum: RED: min %u is bigger then max %u\n", p->min,
339 if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
340 dev_err(mlxsw_sp->bus_info->dev,
341 "spectrum: RED: max value %u is too big\n", p->max);
344 if (p->min == 0 || p->max == 0) {
345 dev_err(mlxsw_sp->bus_info->dev,
346 "spectrum: RED: 0 value is illegal for min and max\n");
353 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
354 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
357 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
358 struct tc_red_qopt_offload_params *p = params;
359 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
363 /* calculate probability in percentage */
364 prob = p->probability;
366 prob = DIV_ROUND_UP(prob, 1 << 16);
367 prob = DIV_ROUND_UP(prob, 1 << 16);
368 min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
369 max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
370 return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
371 max, prob, p->is_ecn);
375 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
376 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
379 struct tc_red_qopt_offload_params *p = params;
382 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
383 mlxsw_sp_qdisc->stats_base.backlog);
384 p->qstats->backlog -= backlog;
385 mlxsw_sp_qdisc->stats_base.backlog = 0;
389 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
390 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
393 struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
394 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
395 struct mlxsw_sp_port_xstats *xstats;
396 struct red_stats *res = xstats_ptr;
397 int early_drops, marks, pdrops;
399 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
401 early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
402 marks = xstats->ecn - xstats_base->prob_mark;
403 pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
405 res->pdrop += pdrops;
406 res->prob_drop += early_drops;
407 res->prob_mark += marks;
409 xstats_base->pdrop += pdrops;
410 xstats_base->prob_drop += early_drops;
411 xstats_base->prob_mark += marks;
416 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
417 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
418 struct tc_qopt_offload_stats *stats_ptr)
420 u64 tx_bytes, tx_packets, overlimits, drops, backlog;
421 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
422 struct mlxsw_sp_qdisc_stats *stats_base;
423 struct mlxsw_sp_port_xstats *xstats;
425 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
426 stats_base = &mlxsw_sp_qdisc->stats_base;
428 mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
429 mlxsw_sp_qdisc->prio_bitmap,
430 &tx_packets, &tx_bytes);
431 tx_bytes = tx_bytes - stats_base->tx_bytes;
432 tx_packets = tx_packets - stats_base->tx_packets;
434 overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
435 stats_base->overlimits;
436 drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
438 backlog = xstats->backlog[tclass_num];
440 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
441 stats_ptr->qstats->overlimits += overlimits;
442 stats_ptr->qstats->drops += drops;
443 stats_ptr->qstats->backlog +=
444 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
446 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
447 stats_base->backlog);
449 stats_base->backlog = backlog;
450 stats_base->drops += drops;
451 stats_base->overlimits += overlimits;
452 stats_base->tx_bytes += tx_bytes;
453 stats_base->tx_packets += tx_packets;
457 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
459 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
460 .type = MLXSW_SP_QDISC_RED,
461 .check_params = mlxsw_sp_qdisc_red_check_params,
462 .replace = mlxsw_sp_qdisc_red_replace,
463 .unoffload = mlxsw_sp_qdisc_red_unoffload,
464 .destroy = mlxsw_sp_qdisc_red_destroy,
465 .get_stats = mlxsw_sp_qdisc_get_red_stats,
466 .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
467 .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
470 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
471 struct tc_red_qopt_offload *p)
473 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
475 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
479 if (p->command == TC_RED_REPLACE)
480 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
482 &mlxsw_sp_qdisc_ops_red,
485 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
489 switch (p->command) {
491 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
493 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
496 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
504 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
505 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
509 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
510 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
511 MLXSW_SP_PORT_DEFAULT_TCLASS);
512 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
513 &mlxsw_sp_port->tclass_qdiscs[i]);
514 mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
521 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
522 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
525 struct tc_prio_qopt_offload_params *p = params;
527 if (p->bands > IEEE_8021QAZ_MAX_TCS)
534 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
535 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
538 struct tc_prio_qopt_offload_params *p = params;
539 struct mlxsw_sp_qdisc *child_qdisc;
540 int tclass, i, band, backlog;
544 for (band = 0; band < p->bands; band++) {
545 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
546 child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
547 old_priomap = child_qdisc->prio_bitmap;
548 child_qdisc->prio_bitmap = 0;
549 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
550 if (p->priomap[i] == band) {
551 child_qdisc->prio_bitmap |= BIT(i);
552 if (BIT(i) & old_priomap)
554 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
560 if (old_priomap != child_qdisc->prio_bitmap &&
561 child_qdisc->ops && child_qdisc->ops->clean_stats) {
562 backlog = child_qdisc->stats_base.backlog;
563 child_qdisc->ops->clean_stats(mlxsw_sp_port,
565 child_qdisc->stats_base.backlog = backlog;
568 for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
569 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
570 child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
571 child_qdisc->prio_bitmap = 0;
572 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
578 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
579 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
582 struct tc_prio_qopt_offload_params *p = params;
585 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
586 mlxsw_sp_qdisc->stats_base.backlog);
587 p->qstats->backlog -= backlog;
591 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
592 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
593 struct tc_qopt_offload_stats *stats_ptr)
595 u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
596 struct mlxsw_sp_qdisc_stats *stats_base;
597 struct mlxsw_sp_port_xstats *xstats;
598 struct rtnl_link_stats64 *stats;
601 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
602 stats = &mlxsw_sp_port->periodic_hw_stats.stats;
603 stats_base = &mlxsw_sp_qdisc->stats_base;
605 tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
606 tx_packets = stats->tx_packets - stats_base->tx_packets;
608 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
609 drops += xstats->tail_drop[i];
610 drops += xstats->wred_drop[i];
611 backlog += xstats->backlog[i];
613 drops = drops - stats_base->drops;
615 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
616 stats_ptr->qstats->drops += drops;
617 stats_ptr->qstats->backlog +=
618 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
620 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
621 stats_base->backlog);
622 stats_base->backlog = backlog;
623 stats_base->drops += drops;
624 stats_base->tx_bytes += tx_bytes;
625 stats_base->tx_packets += tx_packets;
630 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
631 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
633 struct mlxsw_sp_qdisc_stats *stats_base;
634 struct mlxsw_sp_port_xstats *xstats;
635 struct rtnl_link_stats64 *stats;
638 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
639 stats = &mlxsw_sp_port->periodic_hw_stats.stats;
640 stats_base = &mlxsw_sp_qdisc->stats_base;
642 stats_base->tx_packets = stats->tx_packets;
643 stats_base->tx_bytes = stats->tx_bytes;
645 stats_base->drops = 0;
646 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
647 stats_base->drops += xstats->tail_drop[i];
648 stats_base->drops += xstats->wred_drop[i];
651 mlxsw_sp_qdisc->stats_base.backlog = 0;
654 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
655 .type = MLXSW_SP_QDISC_PRIO,
656 .check_params = mlxsw_sp_qdisc_prio_check_params,
657 .replace = mlxsw_sp_qdisc_prio_replace,
658 .unoffload = mlxsw_sp_qdisc_prio_unoffload,
659 .destroy = mlxsw_sp_qdisc_prio_destroy,
660 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
661 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
664 /* Grafting is not supported in mlxsw. It will result in un-offloading of the
665 * grafted qdisc as well as the qdisc in the qdisc new location.
666 * (However, if the graft is to the location where the qdisc is already at, it
667 * will be ignored completely and won't cause un-offloading).
670 mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
671 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
672 struct tc_prio_qopt_offload_graft_params *p)
674 int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
675 struct mlxsw_sp_qdisc *old_qdisc;
677 /* Check if the grafted qdisc is already in its "new" location. If so -
678 * nothing needs to be done.
680 if (p->band < IEEE_8021QAZ_MAX_TCS &&
681 mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
684 /* See if the grafted qdisc is already offloaded on any tclass. If so,
687 old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
690 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
692 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
693 &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
697 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
698 struct tc_prio_qopt_offload *p)
700 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
702 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
706 if (p->command == TC_PRIO_REPLACE)
707 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
709 &mlxsw_sp_qdisc_ops_prio,
712 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
713 MLXSW_SP_QDISC_PRIO))
716 switch (p->command) {
717 case TC_PRIO_DESTROY:
718 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
720 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
723 return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
730 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
732 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
735 mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
737 goto err_root_qdisc_init;
739 mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
740 mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
741 mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
743 mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc) * IEEE_8021QAZ_MAX_TCS,
746 goto err_tclass_qdiscs_init;
748 mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
749 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
750 mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
754 err_tclass_qdiscs_init:
755 kfree(mlxsw_sp_port->root_qdisc);
760 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
762 kfree(mlxsw_sp_port->tclass_qdiscs);
763 kfree(mlxsw_sp_port->root_qdisc);