mlxsw: spectrum_qdisc: Clean stats recursively when priomap changes
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_qdisc.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/pkt_cls.h>
8 #include <net/red.h>
9
10 #include "spectrum.h"
11 #include "spectrum_span.h"
12 #include "reg.h"
13
14 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
15 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
16         MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
17
18 enum mlxsw_sp_qdisc_type {
19         MLXSW_SP_QDISC_NO_QDISC,
20         MLXSW_SP_QDISC_RED,
21         MLXSW_SP_QDISC_PRIO,
22         MLXSW_SP_QDISC_ETS,
23         MLXSW_SP_QDISC_TBF,
24         MLXSW_SP_QDISC_FIFO,
25 };
26
27 struct mlxsw_sp_qdisc;
28
29 struct mlxsw_sp_qdisc_ops {
30         enum mlxsw_sp_qdisc_type type;
31         int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
32                             void *params);
33         int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
34                        struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
35         int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
36                        struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
37         int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
38                          struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
39                          struct tc_qopt_offload_stats *stats_ptr);
40         int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
41                           struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
42                           void *xstats_ptr);
43         void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
44                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
45         /* unoffload - to be used for a qdisc that stops being offloaded without
46          * being destroyed.
47          */
48         void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
49                           struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
50         struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
51                                              u32 parent);
52         unsigned int num_classes;
53
54         u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
55                               struct mlxsw_sp_qdisc *child);
56         int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
57                               struct mlxsw_sp_qdisc *child);
58 };
59
60 struct mlxsw_sp_qdisc_ets_band {
61         u8 prio_bitmap;
62         int tclass_num;
63 };
64
65 struct mlxsw_sp_qdisc_ets_data {
66         struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS];
67 };
68
69 struct mlxsw_sp_qdisc {
70         u32 handle;
71         union {
72                 struct red_stats red;
73         } xstats_base;
74         struct mlxsw_sp_qdisc_stats {
75                 u64 tx_bytes;
76                 u64 tx_packets;
77                 u64 drops;
78                 u64 overlimits;
79                 u64 backlog;
80         } stats_base;
81
82         union {
83                 struct mlxsw_sp_qdisc_ets_data *ets_data;
84         };
85
86         struct mlxsw_sp_qdisc_ops *ops;
87         struct mlxsw_sp_qdisc *parent;
88         struct mlxsw_sp_qdisc *qdiscs;
89         unsigned int num_classes;
90 };
91
92 struct mlxsw_sp_qdisc_state {
93         struct mlxsw_sp_qdisc root_qdisc;
94
95         /* When a PRIO or ETS are added, the invisible FIFOs in their bands are
96          * created first. When notifications for these FIFOs arrive, it is not
97          * known what qdisc their parent handle refers to. It could be a
98          * newly-created PRIO that will replace the currently-offloaded one, or
99          * it could be e.g. a RED that will be attached below it.
100          *
101          * As the notifications start to arrive, use them to note what the
102          * future parent handle is, and keep track of which child FIFOs were
103          * seen. Then when the parent is known, retroactively offload those
104          * FIFOs.
105          */
106         u32 future_handle;
107         bool future_fifos[IEEE_8021QAZ_MAX_TCS];
108         struct mutex lock; /* Protects qdisc state. */
109 };
110
111 static bool
112 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle)
113 {
114         return mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->handle == handle;
115 }
116
117 static struct mlxsw_sp_qdisc *
118 mlxsw_sp_qdisc_walk(struct mlxsw_sp_qdisc *qdisc,
119                     struct mlxsw_sp_qdisc *(*pre)(struct mlxsw_sp_qdisc *,
120                                                   void *),
121                     void *data)
122 {
123         struct mlxsw_sp_qdisc *tmp;
124         unsigned int i;
125
126         if (pre) {
127                 tmp = pre(qdisc, data);
128                 if (tmp)
129                         return tmp;
130         }
131
132         if (qdisc->ops) {
133                 for (i = 0; i < qdisc->num_classes; i++) {
134                         tmp = &qdisc->qdiscs[i];
135                         if (qdisc->ops) {
136                                 tmp = mlxsw_sp_qdisc_walk(tmp, pre, data);
137                                 if (tmp)
138                                         return tmp;
139                         }
140                 }
141         }
142
143         return NULL;
144 }
145
146 static struct mlxsw_sp_qdisc *
147 mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
148 {
149         u32 parent = *(u32 *)data;
150
151         if (qdisc->ops && TC_H_MAJ(qdisc->handle) == TC_H_MAJ(parent)) {
152                 if (qdisc->ops->find_class)
153                         return qdisc->ops->find_class(qdisc, parent);
154         }
155
156         return NULL;
157 }
158
159 static struct mlxsw_sp_qdisc *
160 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
161                     bool root_only)
162 {
163         struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
164
165         if (!qdisc_state)
166                 return NULL;
167         if (parent == TC_H_ROOT)
168                 return &qdisc_state->root_qdisc;
169         if (root_only)
170                 return NULL;
171         return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
172                                    mlxsw_sp_qdisc_walk_cb_find, &parent);
173 }
174
175 static struct mlxsw_sp_qdisc *
176 mlxsw_sp_qdisc_walk_cb_find_by_handle(struct mlxsw_sp_qdisc *qdisc, void *data)
177 {
178         u32 handle = *(u32 *)data;
179
180         if (qdisc->ops && qdisc->handle == handle)
181                 return qdisc;
182         return NULL;
183 }
184
185 static struct mlxsw_sp_qdisc *
186 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
187 {
188         struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
189
190         if (!qdisc_state)
191                 return NULL;
192         return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
193                                    mlxsw_sp_qdisc_walk_cb_find_by_handle,
194                                    &handle);
195 }
196
197 static void
198 mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
199 {
200         struct mlxsw_sp_qdisc *tmp;
201
202         for (tmp = mlxsw_sp_qdisc->parent; tmp; tmp = tmp->parent)
203                 tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
204 }
205
206 static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port,
207                                          struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
208 {
209         struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
210
211         if (!parent)
212                 return 0xff;
213         return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc);
214 }
215
216 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
217
218 static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port,
219                                          struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
220 {
221         struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
222
223         if (!parent)
224                 return MLXSW_SP_PORT_DEFAULT_TCLASS;
225         return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc);
226 }
227
228 static int
229 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
230                        struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
231 {
232         struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
233         int err_hdroom = 0;
234         int err = 0;
235         int i;
236
237         if (!mlxsw_sp_qdisc)
238                 return 0;
239
240         if (root_qdisc == mlxsw_sp_qdisc) {
241                 struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
242
243                 hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
244                 mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
245                 mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
246                 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
247                 err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
248         }
249
250         if (!mlxsw_sp_qdisc->ops)
251                 return 0;
252
253         for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++)
254                 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
255                                        &mlxsw_sp_qdisc->qdiscs[i]);
256         mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
257         if (mlxsw_sp_qdisc->ops->destroy)
258                 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
259                                                    mlxsw_sp_qdisc);
260         if (mlxsw_sp_qdisc->ops->clean_stats)
261                 mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
262
263         mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
264         mlxsw_sp_qdisc->ops = NULL;
265         mlxsw_sp_qdisc->num_classes = 0;
266         kfree(mlxsw_sp_qdisc->qdiscs);
267         mlxsw_sp_qdisc->qdiscs = NULL;
268         return err_hdroom ?: err;
269 }
270
271 static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
272                                  u32 handle,
273                                  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
274                                  struct mlxsw_sp_qdisc_ops *ops, void *params)
275 {
276         struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
277         struct mlxsw_sp_hdroom orig_hdroom;
278         unsigned int i;
279         int err;
280
281         err = ops->check_params(mlxsw_sp_port, params);
282         if (err)
283                 return err;
284
285         if (ops->num_classes) {
286                 mlxsw_sp_qdisc->qdiscs = kcalloc(ops->num_classes,
287                                                  sizeof(*mlxsw_sp_qdisc->qdiscs),
288                                                  GFP_KERNEL);
289                 if (!mlxsw_sp_qdisc->qdiscs)
290                         return -ENOMEM;
291
292                 for (i = 0; i < ops->num_classes; i++)
293                         mlxsw_sp_qdisc->qdiscs[i].parent = mlxsw_sp_qdisc;
294         }
295
296         orig_hdroom = *mlxsw_sp_port->hdroom;
297         if (root_qdisc == mlxsw_sp_qdisc) {
298                 struct mlxsw_sp_hdroom hdroom = orig_hdroom;
299
300                 hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
301                 mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
302                 mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
303                 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
304
305                 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
306                 if (err)
307                         goto err_hdroom_configure;
308         }
309
310         mlxsw_sp_qdisc->num_classes = ops->num_classes;
311         mlxsw_sp_qdisc->ops = ops;
312         mlxsw_sp_qdisc->handle = handle;
313         err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
314         if (err)
315                 goto err_replace;
316
317         return 0;
318
319 err_replace:
320         mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
321         mlxsw_sp_qdisc->ops = NULL;
322         mlxsw_sp_qdisc->num_classes = 0;
323         mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
324 err_hdroom_configure:
325         kfree(mlxsw_sp_qdisc->qdiscs);
326         mlxsw_sp_qdisc->qdiscs = NULL;
327         return err;
328 }
329
330 static int
331 mlxsw_sp_qdisc_change(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
332                       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params)
333 {
334         struct mlxsw_sp_qdisc_ops *ops = mlxsw_sp_qdisc->ops;
335         int err;
336
337         err = ops->check_params(mlxsw_sp_port, params);
338         if (err)
339                 goto unoffload;
340
341         err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
342         if (err)
343                 goto unoffload;
344
345         /* Check if the Qdisc changed. That includes a situation where an
346          * invisible Qdisc replaces another one, or is being added for the
347          * first time.
348          */
349         if (mlxsw_sp_qdisc->handle != handle) {
350                 if (ops->clean_stats)
351                         ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
352         }
353
354         mlxsw_sp_qdisc->handle = handle;
355         return 0;
356
357 unoffload:
358         if (ops->unoffload)
359                 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
360
361         mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
362         return err;
363 }
364
365 static int
366 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
367                        struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
368                        struct mlxsw_sp_qdisc_ops *ops, void *params)
369 {
370         if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
371                 /* In case this location contained a different qdisc of the
372                  * same type we can override the old qdisc configuration.
373                  * Otherwise, we need to remove the old qdisc before setting the
374                  * new one.
375                  */
376                 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
377
378         if (!mlxsw_sp_qdisc->ops)
379                 return mlxsw_sp_qdisc_create(mlxsw_sp_port, handle,
380                                              mlxsw_sp_qdisc, ops, params);
381         else
382                 return mlxsw_sp_qdisc_change(mlxsw_sp_port, handle,
383                                              mlxsw_sp_qdisc, params);
384 }
385
386 static int
387 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
388                          struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
389                          struct tc_qopt_offload_stats *stats_ptr)
390 {
391         if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
392             mlxsw_sp_qdisc->ops->get_stats)
393                 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
394                                                       mlxsw_sp_qdisc,
395                                                       stats_ptr);
396
397         return -EOPNOTSUPP;
398 }
399
400 static int
401 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
402                           struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
403                           void *xstats_ptr)
404 {
405         if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
406             mlxsw_sp_qdisc->ops->get_xstats)
407                 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
408                                                       mlxsw_sp_qdisc,
409                                                       xstats_ptr);
410
411         return -EOPNOTSUPP;
412 }
413
414 static u64
415 mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
416 {
417         return xstats->backlog[tclass_num] +
418                xstats->backlog[tclass_num + 8];
419 }
420
421 static u64
422 mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
423 {
424         return xstats->tail_drop[tclass_num] +
425                xstats->tail_drop[tclass_num + 8];
426 }
427
428 static void
429 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
430                                        u8 prio_bitmap, u64 *tx_packets,
431                                        u64 *tx_bytes)
432 {
433         int i;
434
435         *tx_packets = 0;
436         *tx_bytes = 0;
437         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
438                 if (prio_bitmap & BIT(i)) {
439                         *tx_packets += xstats->tx_packets[i];
440                         *tx_bytes += xstats->tx_bytes[i];
441                 }
442         }
443 }
444
445 static void
446 mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
447                                 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
448                                 u64 *p_tx_bytes, u64 *p_tx_packets,
449                                 u64 *p_drops, u64 *p_backlog)
450 {
451         struct mlxsw_sp_port_xstats *xstats;
452         u64 tx_bytes, tx_packets;
453         u8 prio_bitmap;
454         int tclass_num;
455
456         prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
457                                                      mlxsw_sp_qdisc);
458         tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
459                                                    mlxsw_sp_qdisc);
460         xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
461         mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
462                                                &tx_packets, &tx_bytes);
463
464         *p_tx_packets += tx_packets;
465         *p_tx_bytes += tx_bytes;
466         *p_drops += xstats->wred_drop[tclass_num] +
467                     mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
468         *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
469 }
470
471 static void
472 mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
473                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
474                             u64 tx_bytes, u64 tx_packets,
475                             u64 drops, u64 backlog,
476                             struct tc_qopt_offload_stats *stats_ptr)
477 {
478         struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
479
480         tx_bytes -= stats_base->tx_bytes;
481         tx_packets -= stats_base->tx_packets;
482         drops -= stats_base->drops;
483         backlog -= stats_base->backlog;
484
485         _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
486         stats_ptr->qstats->drops += drops;
487         stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
488
489         stats_base->backlog += backlog;
490         stats_base->drops += drops;
491         stats_base->tx_bytes += tx_bytes;
492         stats_base->tx_packets += tx_packets;
493 }
494
495 static void
496 mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
497                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
498                             struct tc_qopt_offload_stats *stats_ptr)
499 {
500         u64 tx_packets = 0;
501         u64 tx_bytes = 0;
502         u64 backlog = 0;
503         u64 drops = 0;
504
505         mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
506                                         &tx_bytes, &tx_packets,
507                                         &drops, &backlog);
508         mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
509                                     tx_bytes, tx_packets, drops, backlog,
510                                     stats_ptr);
511 }
512
513 static int
514 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
515                                   int tclass_num, u32 min, u32 max,
516                                   u32 probability, bool is_wred, bool is_ecn)
517 {
518         char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
519         char cwtp_cmd[MLXSW_REG_CWTP_LEN];
520         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
521         int err;
522
523         mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
524         mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
525                                     roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
526                                     roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
527                                     probability);
528
529         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
530         if (err)
531                 return err;
532
533         mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
534                              MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
535
536         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
537 }
538
539 static int
540 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
541                                    int tclass_num)
542 {
543         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
544         char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
545
546         mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
547                              MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
548         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
549 }
550
551 static void
552 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
553                                         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
554 {
555         struct mlxsw_sp_qdisc_stats *stats_base;
556         struct mlxsw_sp_port_xstats *xstats;
557         struct red_stats *red_base;
558         u8 prio_bitmap;
559         int tclass_num;
560
561         prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
562                                                      mlxsw_sp_qdisc);
563         tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
564                                                    mlxsw_sp_qdisc);
565         xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
566         stats_base = &mlxsw_sp_qdisc->stats_base;
567         red_base = &mlxsw_sp_qdisc->xstats_base.red;
568
569         mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
570                                                &stats_base->tx_packets,
571                                                &stats_base->tx_bytes);
572         red_base->prob_mark = xstats->tc_ecn[tclass_num];
573         red_base->prob_drop = xstats->wred_drop[tclass_num];
574         red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
575
576         stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
577         stats_base->drops = red_base->prob_drop + red_base->pdrop;
578
579         stats_base->backlog = 0;
580 }
581
582 static int
583 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
584                            struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
585 {
586         int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
587                                                        mlxsw_sp_qdisc);
588
589         return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
590 }
591
592 static int
593 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
594                                 void *params)
595 {
596         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
597         struct tc_red_qopt_offload_params *p = params;
598
599         if (p->min > p->max) {
600                 dev_err(mlxsw_sp->bus_info->dev,
601                         "spectrum: RED: min %u is bigger then max %u\n", p->min,
602                         p->max);
603                 return -EINVAL;
604         }
605         if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
606                                         GUARANTEED_SHARED_BUFFER)) {
607                 dev_err(mlxsw_sp->bus_info->dev,
608                         "spectrum: RED: max value %u is too big\n", p->max);
609                 return -EINVAL;
610         }
611         if (p->min == 0 || p->max == 0) {
612                 dev_err(mlxsw_sp->bus_info->dev,
613                         "spectrum: RED: 0 value is illegal for min and max\n");
614                 return -EINVAL;
615         }
616         return 0;
617 }
618
619 static int
620 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
621                            struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
622                            void *params)
623 {
624         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
625         struct tc_red_qopt_offload_params *p = params;
626         int tclass_num;
627         u32 min, max;
628         u64 prob;
629
630         tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
631                                                    mlxsw_sp_qdisc);
632
633         /* calculate probability in percentage */
634         prob = p->probability;
635         prob *= 100;
636         prob = DIV_ROUND_UP(prob, 1 << 16);
637         prob = DIV_ROUND_UP(prob, 1 << 16);
638         min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
639         max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
640         return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
641                                                  min, max, prob,
642                                                  !p->is_nodrop, p->is_ecn);
643 }
644
645 static void
646 mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
647                               struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
648                               struct gnet_stats_queue *qstats)
649 {
650         u64 backlog;
651
652         backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
653                                        mlxsw_sp_qdisc->stats_base.backlog);
654         qstats->backlog -= backlog;
655         mlxsw_sp_qdisc->stats_base.backlog = 0;
656 }
657
658 static void
659 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
660                              struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
661                              void *params)
662 {
663         struct tc_red_qopt_offload_params *p = params;
664
665         mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
666 }
667
668 static int
669 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
670                               struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
671                               void *xstats_ptr)
672 {
673         struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
674         struct mlxsw_sp_port_xstats *xstats;
675         struct red_stats *res = xstats_ptr;
676         int early_drops, marks, pdrops;
677         int tclass_num;
678
679         tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
680                                                    mlxsw_sp_qdisc);
681         xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
682
683         early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
684         marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark;
685         pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
686                  xstats_base->pdrop;
687
688         res->pdrop += pdrops;
689         res->prob_drop += early_drops;
690         res->prob_mark += marks;
691
692         xstats_base->pdrop += pdrops;
693         xstats_base->prob_drop += early_drops;
694         xstats_base->prob_mark += marks;
695         return 0;
696 }
697
698 static int
699 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
700                              struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
701                              struct tc_qopt_offload_stats *stats_ptr)
702 {
703         struct mlxsw_sp_qdisc_stats *stats_base;
704         struct mlxsw_sp_port_xstats *xstats;
705         u64 overlimits;
706         int tclass_num;
707
708         tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
709                                                    mlxsw_sp_qdisc);
710         xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
711         stats_base = &mlxsw_sp_qdisc->stats_base;
712
713         mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
714         overlimits = xstats->wred_drop[tclass_num] +
715                      xstats->tc_ecn[tclass_num] - stats_base->overlimits;
716
717         stats_ptr->qstats->overlimits += overlimits;
718         stats_base->overlimits += overlimits;
719
720         return 0;
721 }
722
723 static struct mlxsw_sp_qdisc *
724 mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
725                                u32 parent)
726 {
727         return NULL;
728 }
729
730 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
731         .type = MLXSW_SP_QDISC_RED,
732         .check_params = mlxsw_sp_qdisc_red_check_params,
733         .replace = mlxsw_sp_qdisc_red_replace,
734         .unoffload = mlxsw_sp_qdisc_red_unoffload,
735         .destroy = mlxsw_sp_qdisc_red_destroy,
736         .get_stats = mlxsw_sp_qdisc_get_red_stats,
737         .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
738         .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
739         .find_class = mlxsw_sp_qdisc_leaf_find_class,
740 };
741
742 static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
743                                 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
744                                 u8 band, u32 child_handle);
745
746 static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
747                                    struct tc_red_qopt_offload *p)
748 {
749         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
750
751         mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
752         if (!mlxsw_sp_qdisc)
753                 return -EOPNOTSUPP;
754
755         if (p->command == TC_RED_REPLACE)
756                 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
757                                               mlxsw_sp_qdisc,
758                                               &mlxsw_sp_qdisc_ops_red,
759                                               &p->set);
760
761         if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
762                 return -EOPNOTSUPP;
763
764         switch (p->command) {
765         case TC_RED_DESTROY:
766                 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
767         case TC_RED_XSTATS:
768                 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
769                                                  p->xstats);
770         case TC_RED_STATS:
771                 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
772                                                 &p->stats);
773         case TC_RED_GRAFT:
774                 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
775                                             p->child_handle);
776         default:
777                 return -EOPNOTSUPP;
778         }
779 }
780
781 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
782                           struct tc_red_qopt_offload *p)
783 {
784         int err;
785
786         mutex_lock(&mlxsw_sp_port->qdisc->lock);
787         err = __mlxsw_sp_setup_tc_red(mlxsw_sp_port, p);
788         mutex_unlock(&mlxsw_sp_port->qdisc->lock);
789
790         return err;
791 }
792
793 static void
794 mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
795                                          struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
796 {
797         u64 backlog_cells = 0;
798         u64 tx_packets = 0;
799         u64 tx_bytes = 0;
800         u64 drops = 0;
801
802         mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
803                                         &tx_bytes, &tx_packets,
804                                         &drops, &backlog_cells);
805
806         mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
807         mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
808         mlxsw_sp_qdisc->stats_base.drops = drops;
809         mlxsw_sp_qdisc->stats_base.backlog = 0;
810 }
811
812 static int
813 mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
814                            struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
815 {
816         int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
817                                                        mlxsw_sp_qdisc);
818
819         return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
820                                              MLXSW_REG_QEEC_HR_SUBGROUP,
821                                              tclass_num, 0,
822                                              MLXSW_REG_QEEC_MAS_DIS, 0);
823 }
824
825 static int
826 mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
827                       u32 max_size, u8 *p_burst_size)
828 {
829         /* TBF burst size is configured in bytes. The ASIC burst size value is
830          * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
831          */
832         u32 bs512 = max_size / 64;
833         u8 bs = fls(bs512);
834
835         if (!bs)
836                 return -EINVAL;
837         --bs;
838
839         /* Demand a power of two. */
840         if ((1 << bs) != bs512)
841                 return -EINVAL;
842
843         if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
844             bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
845                 return -EINVAL;
846
847         *p_burst_size = bs;
848         return 0;
849 }
850
851 static u32
852 mlxsw_sp_qdisc_tbf_max_size(u8 bs)
853 {
854         return (1U << bs) * 64;
855 }
856
857 static u64
858 mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
859 {
860         /* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
861          * Kbits/s.
862          */
863         return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
864 }
865
866 static int
867 mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
868                                 void *params)
869 {
870         struct tc_tbf_qopt_offload_replace_params *p = params;
871         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
872         u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
873         u8 burst_size;
874         int err;
875
876         if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
877                 dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
878                         "spectrum: TBF: rate of %lluKbps must be below %u\n",
879                         rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
880                 return -EINVAL;
881         }
882
883         err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
884         if (err) {
885                 u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
886
887                 dev_err(mlxsw_sp->bus_info->dev,
888                         "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
889                         p->max_size,
890                         mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
891                         mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
892                 return -EINVAL;
893         }
894
895         return 0;
896 }
897
898 static int
899 mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
900                            struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
901                            void *params)
902 {
903         struct tc_tbf_qopt_offload_replace_params *p = params;
904         u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
905         int tclass_num;
906         u8 burst_size;
907         int err;
908
909         tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
910                                                    mlxsw_sp_qdisc);
911
912         err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
913         if (WARN_ON_ONCE(err))
914                 /* check_params above was supposed to reject this value. */
915                 return -EINVAL;
916
917         /* Configure subgroup shaper, so that both UC and MC traffic is subject
918          * to shaping. That is unlike RED, however UC queue lengths are going to
919          * be different than MC ones due to different pool and quota
920          * configurations, so the configuration is not applicable. For shaper on
921          * the other hand, subjecting the overall stream to the configured
922          * shaper makes sense. Also note that that is what we do for
923          * ieee_setmaxrate().
924          */
925         return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
926                                              MLXSW_REG_QEEC_HR_SUBGROUP,
927                                              tclass_num, 0,
928                                              rate_kbps, burst_size);
929 }
930
931 static void
932 mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
933                              struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
934                              void *params)
935 {
936         struct tc_tbf_qopt_offload_replace_params *p = params;
937
938         mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
939 }
940
941 static int
942 mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
943                              struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
944                              struct tc_qopt_offload_stats *stats_ptr)
945 {
946         mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
947                                     stats_ptr);
948         return 0;
949 }
950
951 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
952         .type = MLXSW_SP_QDISC_TBF,
953         .check_params = mlxsw_sp_qdisc_tbf_check_params,
954         .replace = mlxsw_sp_qdisc_tbf_replace,
955         .unoffload = mlxsw_sp_qdisc_tbf_unoffload,
956         .destroy = mlxsw_sp_qdisc_tbf_destroy,
957         .get_stats = mlxsw_sp_qdisc_get_tbf_stats,
958         .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
959         .find_class = mlxsw_sp_qdisc_leaf_find_class,
960 };
961
962 static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
963                                    struct tc_tbf_qopt_offload *p)
964 {
965         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
966
967         mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
968         if (!mlxsw_sp_qdisc)
969                 return -EOPNOTSUPP;
970
971         if (p->command == TC_TBF_REPLACE)
972                 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
973                                               mlxsw_sp_qdisc,
974                                               &mlxsw_sp_qdisc_ops_tbf,
975                                               &p->replace_params);
976
977         if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
978                 return -EOPNOTSUPP;
979
980         switch (p->command) {
981         case TC_TBF_DESTROY:
982                 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
983         case TC_TBF_STATS:
984                 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
985                                                 &p->stats);
986         case TC_TBF_GRAFT:
987                 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
988                                             p->child_handle);
989         default:
990                 return -EOPNOTSUPP;
991         }
992 }
993
994 int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
995                           struct tc_tbf_qopt_offload *p)
996 {
997         int err;
998
999         mutex_lock(&mlxsw_sp_port->qdisc->lock);
1000         err = __mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p);
1001         mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1002
1003         return err;
1004 }
1005
1006 static int
1007 mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1008                                  void *params)
1009 {
1010         return 0;
1011 }
1012
1013 static int
1014 mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1015                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1016                             void *params)
1017 {
1018         return 0;
1019 }
1020
1021 static int
1022 mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1023                               struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1024                               struct tc_qopt_offload_stats *stats_ptr)
1025 {
1026         mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1027                                     stats_ptr);
1028         return 0;
1029 }
1030
1031 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
1032         .type = MLXSW_SP_QDISC_FIFO,
1033         .check_params = mlxsw_sp_qdisc_fifo_check_params,
1034         .replace = mlxsw_sp_qdisc_fifo_replace,
1035         .get_stats = mlxsw_sp_qdisc_get_fifo_stats,
1036         .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
1037 };
1038
1039 static int
1040 mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1041                                    u32 handle, unsigned int band,
1042                                    struct mlxsw_sp_qdisc *child_qdisc)
1043 {
1044         struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1045
1046         if (handle == qdisc_state->future_handle &&
1047             qdisc_state->future_fifos[band])
1048                 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
1049                                               child_qdisc,
1050                                               &mlxsw_sp_qdisc_ops_fifo,
1051                                               NULL);
1052         return 0;
1053 }
1054
1055 static void
1056 mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
1057                                  u32 handle)
1058 {
1059         struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1060
1061         qdisc_state->future_handle = handle;
1062         memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
1063 }
1064
1065 static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1066                                     struct tc_fifo_qopt_offload *p)
1067 {
1068         struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1069         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1070         unsigned int band;
1071         u32 parent_handle;
1072
1073         mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
1074         if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
1075                 parent_handle = TC_H_MAJ(p->parent);
1076                 if (parent_handle != qdisc_state->future_handle) {
1077                         /* This notifications is for a different Qdisc than
1078                          * previously. Wipe the future cache.
1079                          */
1080                         mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port,
1081                                                          parent_handle);
1082                 }
1083
1084                 band = TC_H_MIN(p->parent) - 1;
1085                 if (band < IEEE_8021QAZ_MAX_TCS) {
1086                         if (p->command == TC_FIFO_REPLACE)
1087                                 qdisc_state->future_fifos[band] = true;
1088                         else if (p->command == TC_FIFO_DESTROY)
1089                                 qdisc_state->future_fifos[band] = false;
1090                 }
1091         }
1092         if (!mlxsw_sp_qdisc)
1093                 return -EOPNOTSUPP;
1094
1095         if (p->command == TC_FIFO_REPLACE) {
1096                 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1097                                               mlxsw_sp_qdisc,
1098                                               &mlxsw_sp_qdisc_ops_fifo, NULL);
1099         }
1100
1101         if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1102                 return -EOPNOTSUPP;
1103
1104         switch (p->command) {
1105         case TC_FIFO_DESTROY:
1106                 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1107         case TC_FIFO_STATS:
1108                 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1109                                                 &p->stats);
1110         case TC_FIFO_REPLACE: /* Handled above. */
1111                 break;
1112         }
1113
1114         return -EOPNOTSUPP;
1115 }
1116
1117 int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
1118                            struct tc_fifo_qopt_offload *p)
1119 {
1120         int err;
1121
1122         mutex_lock(&mlxsw_sp_port->qdisc->lock);
1123         err = __mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p);
1124         mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1125
1126         return err;
1127 }
1128
1129 static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1130                                         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1131 {
1132         int i;
1133
1134         for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1135                 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
1136                                           MLXSW_SP_PORT_DEFAULT_TCLASS);
1137                 mlxsw_sp_port_ets_set(mlxsw_sp_port,
1138                                       MLXSW_REG_QEEC_HR_SUBGROUP,
1139                                       i, 0, false, 0);
1140         }
1141
1142         kfree(mlxsw_sp_qdisc->ets_data);
1143         mlxsw_sp_qdisc->ets_data = NULL;
1144         return 0;
1145 }
1146
1147 static int
1148 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1149                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1150 {
1151         return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1152 }
1153
1154 static int
1155 __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
1156 {
1157         if (nbands > IEEE_8021QAZ_MAX_TCS)
1158                 return -EOPNOTSUPP;
1159
1160         return 0;
1161 }
1162
1163 static int
1164 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1165                                  void *params)
1166 {
1167         struct tc_prio_qopt_offload_params *p = params;
1168
1169         return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1170 }
1171
1172 static struct mlxsw_sp_qdisc *
1173 mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1174                                    void *mlxsw_sp_port)
1175 {
1176         u64 backlog;
1177
1178         if (mlxsw_sp_qdisc->ops) {
1179                 backlog = mlxsw_sp_qdisc->stats_base.backlog;
1180                 if (mlxsw_sp_qdisc->ops->clean_stats)
1181                         mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port,
1182                                                          mlxsw_sp_qdisc);
1183                 mlxsw_sp_qdisc->stats_base.backlog = backlog;
1184         }
1185
1186         return NULL;
1187 }
1188
1189 static void
1190 mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1191                                 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1192 {
1193         mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats,
1194                             mlxsw_sp_port);
1195 }
1196
1197 static int
1198 __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
1199                              struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1200                              u32 handle, unsigned int nbands,
1201                              const unsigned int *quanta,
1202                              const unsigned int *weights,
1203                              const u8 *priomap)
1204 {
1205         struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data;
1206         struct mlxsw_sp_qdisc_ets_band *ets_band;
1207         struct mlxsw_sp_qdisc *child_qdisc;
1208         u8 old_priomap, new_priomap;
1209         int i, band;
1210         int err;
1211
1212         if (!ets_data) {
1213                 ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL);
1214                 if (!ets_data)
1215                         return -ENOMEM;
1216                 mlxsw_sp_qdisc->ets_data = ets_data;
1217
1218                 for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) {
1219                         int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1220
1221                         ets_band = &ets_data->bands[band];
1222                         ets_band->tclass_num = tclass_num;
1223                 }
1224         }
1225
1226         for (band = 0; band < nbands; band++) {
1227                 int tclass_num;
1228
1229                 child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1230                 ets_band = &ets_data->bands[band];
1231
1232                 tclass_num = ets_band->tclass_num;
1233                 old_priomap = ets_band->prio_bitmap;
1234                 new_priomap = 0;
1235
1236                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1237                                             MLXSW_REG_QEEC_HR_SUBGROUP,
1238                                             tclass_num, 0, !!quanta[band],
1239                                             weights[band]);
1240                 if (err)
1241                         return err;
1242
1243                 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1244                         if (priomap[i] == band) {
1245                                 new_priomap |= BIT(i);
1246                                 if (BIT(i) & old_priomap)
1247                                         continue;
1248                                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
1249                                                                 i, tclass_num);
1250                                 if (err)
1251                                         return err;
1252                         }
1253                 }
1254
1255                 ets_band->prio_bitmap = new_priomap;
1256
1257                 if (old_priomap != new_priomap)
1258                         mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port,
1259                                                         child_qdisc);
1260
1261                 err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle,
1262                                                          band, child_qdisc);
1263                 if (err)
1264                         return err;
1265         }
1266         for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
1267                 ets_band = &ets_data->bands[band];
1268                 ets_band->prio_bitmap = 0;
1269
1270                 child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
1271                 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
1272
1273                 mlxsw_sp_port_ets_set(mlxsw_sp_port,
1274                                       MLXSW_REG_QEEC_HR_SUBGROUP,
1275                                       ets_band->tclass_num, 0, false, 0);
1276         }
1277
1278         mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
1279         return 0;
1280 }
1281
1282 static int
1283 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1284                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1285                             void *params)
1286 {
1287         struct tc_prio_qopt_offload_params *p = params;
1288         unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
1289
1290         return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1291                                             handle, p->bands, zeroes,
1292                                             zeroes, p->priomap);
1293 }
1294
1295 static void
1296 __mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1297                                struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1298                                struct gnet_stats_queue *qstats)
1299 {
1300         u64 backlog;
1301
1302         backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
1303                                        mlxsw_sp_qdisc->stats_base.backlog);
1304         qstats->backlog -= backlog;
1305 }
1306
1307 static void
1308 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1309                               struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1310                               void *params)
1311 {
1312         struct tc_prio_qopt_offload_params *p = params;
1313
1314         __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1315                                        p->qstats);
1316 }
1317
1318 static int
1319 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1320                               struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1321                               struct tc_qopt_offload_stats *stats_ptr)
1322 {
1323         struct mlxsw_sp_qdisc *tc_qdisc;
1324         u64 tx_packets = 0;
1325         u64 tx_bytes = 0;
1326         u64 backlog = 0;
1327         u64 drops = 0;
1328         int i;
1329
1330         for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
1331                 tc_qdisc = &mlxsw_sp_qdisc->qdiscs[i];
1332                 mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
1333                                                 &tx_bytes, &tx_packets,
1334                                                 &drops, &backlog);
1335         }
1336
1337         mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
1338                                     tx_bytes, tx_packets, drops, backlog,
1339                                     stats_ptr);
1340         return 0;
1341 }
1342
1343 static void
1344 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1345                                          struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1346 {
1347         struct mlxsw_sp_qdisc_stats *stats_base;
1348         struct mlxsw_sp_port_xstats *xstats;
1349         struct rtnl_link_stats64 *stats;
1350         int i;
1351
1352         xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
1353         stats = &mlxsw_sp_port->periodic_hw_stats.stats;
1354         stats_base = &mlxsw_sp_qdisc->stats_base;
1355
1356         stats_base->tx_packets = stats->tx_packets;
1357         stats_base->tx_bytes = stats->tx_bytes;
1358
1359         stats_base->drops = 0;
1360         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1361                 stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
1362                 stats_base->drops += xstats->wred_drop[i];
1363         }
1364
1365         mlxsw_sp_qdisc->stats_base.backlog = 0;
1366 }
1367
1368 static struct mlxsw_sp_qdisc *
1369 mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1370                                u32 parent)
1371 {
1372         int child_index = TC_H_MIN(parent);
1373         int band = child_index - 1;
1374
1375         if (band < 0 || band >= mlxsw_sp_qdisc->num_classes)
1376                 return NULL;
1377         return &mlxsw_sp_qdisc->qdiscs[band];
1378 }
1379
1380 static struct mlxsw_sp_qdisc_ets_band *
1381 mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1382                             struct mlxsw_sp_qdisc *child)
1383 {
1384         unsigned int band = child - mlxsw_sp_qdisc->qdiscs;
1385
1386         if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS))
1387                 band = 0;
1388         return &mlxsw_sp_qdisc->ets_data->bands[band];
1389 }
1390
1391 static u8
1392 mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1393                                    struct mlxsw_sp_qdisc *child)
1394 {
1395         return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap;
1396 }
1397
1398 static int
1399 mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1400                                   struct mlxsw_sp_qdisc *child)
1401 {
1402         return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num;
1403 }
1404
1405 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
1406         .type = MLXSW_SP_QDISC_PRIO,
1407         .check_params = mlxsw_sp_qdisc_prio_check_params,
1408         .replace = mlxsw_sp_qdisc_prio_replace,
1409         .unoffload = mlxsw_sp_qdisc_prio_unoffload,
1410         .destroy = mlxsw_sp_qdisc_prio_destroy,
1411         .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1412         .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1413         .find_class = mlxsw_sp_qdisc_prio_find_class,
1414         .num_classes = IEEE_8021QAZ_MAX_TCS,
1415         .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
1416         .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
1417 };
1418
1419 static int
1420 mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1421                                 void *params)
1422 {
1423         struct tc_ets_qopt_offload_replace_params *p = params;
1424
1425         return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1426 }
1427
1428 static int
1429 mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1430                            struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1431                            void *params)
1432 {
1433         struct tc_ets_qopt_offload_replace_params *p = params;
1434
1435         return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, mlxsw_sp_qdisc,
1436                                             handle, p->bands, p->quanta,
1437                                             p->weights, p->priomap);
1438 }
1439
1440 static void
1441 mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1442                              struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1443                              void *params)
1444 {
1445         struct tc_ets_qopt_offload_replace_params *p = params;
1446
1447         __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1448                                        p->qstats);
1449 }
1450
1451 static int
1452 mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1453                            struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1454 {
1455         return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1456 }
1457
1458 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
1459         .type = MLXSW_SP_QDISC_ETS,
1460         .check_params = mlxsw_sp_qdisc_ets_check_params,
1461         .replace = mlxsw_sp_qdisc_ets_replace,
1462         .unoffload = mlxsw_sp_qdisc_ets_unoffload,
1463         .destroy = mlxsw_sp_qdisc_ets_destroy,
1464         .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1465         .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1466         .find_class = mlxsw_sp_qdisc_prio_find_class,
1467         .num_classes = IEEE_8021QAZ_MAX_TCS,
1468         .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
1469         .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
1470 };
1471
1472 /* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
1473  * graph is free of cycles). These operations do not change the parent handle
1474  * though, which means it can be incomplete (if there is more than one class
1475  * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
1476  * linked to a different class and then removed from the original class).
1477  *
1478  * E.g. consider this sequence of operations:
1479  *
1480  *  # tc qdisc add dev swp1 root handle 1: prio
1481  *  # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
1482  *  RED: set bandwidth to 10Mbit
1483  *  # tc qdisc link dev swp1 handle 13: parent 1:2
1484  *
1485  * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
1486  * child. But RED will still only claim that 1:3 is its parent. If it's removed
1487  * from that band, its only parent will be 1:2, but it will continue to claim
1488  * that it is in fact 1:3.
1489  *
1490  * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
1491  * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
1492  * notification to offload the child Qdisc, based on its parent handle, and use
1493  * the graft operation to validate that the class where the child is actually
1494  * grafted corresponds to the parent handle. If the two don't match, we
1495  * unoffload the child.
1496  */
1497 static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1498                                 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1499                                 u8 band, u32 child_handle)
1500 {
1501         struct mlxsw_sp_qdisc *old_qdisc;
1502         u32 parent;
1503
1504         if (band < mlxsw_sp_qdisc->num_classes &&
1505             mlxsw_sp_qdisc->qdiscs[band].handle == child_handle)
1506                 return 0;
1507
1508         if (!child_handle) {
1509                 /* This is an invisible FIFO replacing the original Qdisc.
1510                  * Ignore it--the original Qdisc's destroy will follow.
1511                  */
1512                 return 0;
1513         }
1514
1515         /* See if the grafted qdisc is already offloaded on any tclass. If so,
1516          * unoffload it.
1517          */
1518         old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
1519                                                   child_handle);
1520         if (old_qdisc)
1521                 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
1522
1523         parent = TC_H_MAKE(mlxsw_sp_qdisc->handle, band + 1);
1524         mlxsw_sp_qdisc = mlxsw_sp_qdisc->ops->find_class(mlxsw_sp_qdisc,
1525                                                          parent);
1526         if (!WARN_ON(!mlxsw_sp_qdisc))
1527                 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1528
1529         return -EOPNOTSUPP;
1530 }
1531
1532 static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1533                                     struct tc_prio_qopt_offload *p)
1534 {
1535         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1536
1537         mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1538         if (!mlxsw_sp_qdisc)
1539                 return -EOPNOTSUPP;
1540
1541         if (p->command == TC_PRIO_REPLACE)
1542                 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1543                                               mlxsw_sp_qdisc,
1544                                               &mlxsw_sp_qdisc_ops_prio,
1545                                               &p->replace_params);
1546
1547         if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1548                 return -EOPNOTSUPP;
1549
1550         switch (p->command) {
1551         case TC_PRIO_DESTROY:
1552                 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1553         case TC_PRIO_STATS:
1554                 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1555                                                 &p->stats);
1556         case TC_PRIO_GRAFT:
1557                 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1558                                             p->graft_params.band,
1559                                             p->graft_params.child_handle);
1560         default:
1561                 return -EOPNOTSUPP;
1562         }
1563 }
1564
1565 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1566                            struct tc_prio_qopt_offload *p)
1567 {
1568         int err;
1569
1570         mutex_lock(&mlxsw_sp_port->qdisc->lock);
1571         err = __mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p);
1572         mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1573
1574         return err;
1575 }
1576
1577 static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1578                                    struct tc_ets_qopt_offload *p)
1579 {
1580         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1581
1582         mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1583         if (!mlxsw_sp_qdisc)
1584                 return -EOPNOTSUPP;
1585
1586         if (p->command == TC_ETS_REPLACE)
1587                 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1588                                               mlxsw_sp_qdisc,
1589                                               &mlxsw_sp_qdisc_ops_ets,
1590                                               &p->replace_params);
1591
1592         if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle))
1593                 return -EOPNOTSUPP;
1594
1595         switch (p->command) {
1596         case TC_ETS_DESTROY:
1597                 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1598         case TC_ETS_STATS:
1599                 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1600                                                 &p->stats);
1601         case TC_ETS_GRAFT:
1602                 return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1603                                             p->graft_params.band,
1604                                             p->graft_params.child_handle);
1605         default:
1606                 return -EOPNOTSUPP;
1607         }
1608 }
1609
1610 int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1611                           struct tc_ets_qopt_offload *p)
1612 {
1613         int err;
1614
1615         mutex_lock(&mlxsw_sp_port->qdisc->lock);
1616         err = __mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p);
1617         mutex_unlock(&mlxsw_sp_port->qdisc->lock);
1618
1619         return err;
1620 }
1621
1622 struct mlxsw_sp_qevent_block {
1623         struct list_head binding_list;
1624         struct list_head mall_entry_list;
1625         struct mlxsw_sp *mlxsw_sp;
1626 };
1627
1628 struct mlxsw_sp_qevent_binding {
1629         struct list_head list;
1630         struct mlxsw_sp_port *mlxsw_sp_port;
1631         u32 handle;
1632         int tclass_num;
1633         enum mlxsw_sp_span_trigger span_trigger;
1634         unsigned int action_mask;
1635 };
1636
1637 static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
1638
1639 static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
1640                                           struct mlxsw_sp_mall_entry *mall_entry,
1641                                           struct mlxsw_sp_qevent_binding *qevent_binding,
1642                                           const struct mlxsw_sp_span_agent_parms *agent_parms,
1643                                           int *p_span_id)
1644 {
1645         enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
1646         struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1647         struct mlxsw_sp_span_trigger_parms trigger_parms = {};
1648         bool ingress;
1649         int span_id;
1650         int err;
1651
1652         err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
1653         if (err)
1654                 return err;
1655
1656         ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
1657         err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
1658         if (err)
1659                 goto err_analyzed_port_get;
1660
1661         trigger_parms.span_id = span_id;
1662         trigger_parms.probability_rate = 1;
1663         err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1664                                        &trigger_parms);
1665         if (err)
1666                 goto err_agent_bind;
1667
1668         err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger,
1669                                            qevent_binding->tclass_num);
1670         if (err)
1671                 goto err_trigger_enable;
1672
1673         *p_span_id = span_id;
1674         return 0;
1675
1676 err_trigger_enable:
1677         mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1678                                    &trigger_parms);
1679 err_agent_bind:
1680         mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
1681 err_analyzed_port_get:
1682         mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1683         return err;
1684 }
1685
1686 static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
1687                                              struct mlxsw_sp_qevent_binding *qevent_binding,
1688                                              int span_id)
1689 {
1690         enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
1691         struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1692         struct mlxsw_sp_span_trigger_parms trigger_parms = {
1693                 .span_id = span_id,
1694         };
1695         bool ingress;
1696
1697         ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
1698
1699         mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger,
1700                                       qevent_binding->tclass_num);
1701         mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
1702                                    &trigger_parms);
1703         mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
1704         mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1705 }
1706
1707 static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
1708                                             struct mlxsw_sp_mall_entry *mall_entry,
1709                                             struct mlxsw_sp_qevent_binding *qevent_binding)
1710 {
1711         struct mlxsw_sp_span_agent_parms agent_parms = {
1712                 .to_dev = mall_entry->mirror.to_dev,
1713         };
1714
1715         return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1716                                               &agent_parms, &mall_entry->mirror.span_id);
1717 }
1718
1719 static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
1720                                                struct mlxsw_sp_mall_entry *mall_entry,
1721                                                struct mlxsw_sp_qevent_binding *qevent_binding)
1722 {
1723         mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
1724 }
1725
1726 static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
1727                                           struct mlxsw_sp_mall_entry *mall_entry,
1728                                           struct mlxsw_sp_qevent_binding *qevent_binding)
1729 {
1730         struct mlxsw_sp_span_agent_parms agent_parms = {
1731                 .session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER,
1732         };
1733         int err;
1734
1735         err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
1736                                                     DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
1737                                                     &agent_parms.policer_enable,
1738                                                     &agent_parms.policer_id);
1739         if (err)
1740                 return err;
1741
1742         return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1743                                               &agent_parms, &mall_entry->trap.span_id);
1744 }
1745
1746 static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
1747                                              struct mlxsw_sp_mall_entry *mall_entry,
1748                                              struct mlxsw_sp_qevent_binding *qevent_binding)
1749 {
1750         mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
1751 }
1752
1753 static int
1754 mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
1755                                 struct mlxsw_sp_mall_entry *mall_entry,
1756                                 struct mlxsw_sp_qevent_binding *qevent_binding,
1757                                 struct netlink_ext_ack *extack)
1758 {
1759         if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) {
1760                 NL_SET_ERR_MSG(extack, "Action not supported at this qevent");
1761                 return -EOPNOTSUPP;
1762         }
1763
1764         switch (mall_entry->type) {
1765         case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1766                 return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
1767         case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1768                 return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
1769         default:
1770                 /* This should have been validated away. */
1771                 WARN_ON(1);
1772                 return -EOPNOTSUPP;
1773         }
1774 }
1775
1776 static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
1777                                               struct mlxsw_sp_mall_entry *mall_entry,
1778                                               struct mlxsw_sp_qevent_binding *qevent_binding)
1779 {
1780         switch (mall_entry->type) {
1781         case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1782                 return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1783         case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1784                 return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1785         default:
1786                 WARN_ON(1);
1787                 return;
1788         }
1789 }
1790
1791 static int
1792 mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
1793                                   struct mlxsw_sp_qevent_binding *qevent_binding,
1794                                   struct netlink_ext_ack *extack)
1795 {
1796         struct mlxsw_sp_mall_entry *mall_entry;
1797         int err;
1798
1799         list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
1800                 err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
1801                                                       qevent_binding, extack);
1802                 if (err)
1803                         goto err_entry_configure;
1804         }
1805
1806         return 0;
1807
1808 err_entry_configure:
1809         list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
1810                 mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1811                                                   qevent_binding);
1812         return err;
1813 }
1814
1815 static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
1816                                                 struct mlxsw_sp_qevent_binding *qevent_binding)
1817 {
1818         struct mlxsw_sp_mall_entry *mall_entry;
1819
1820         list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
1821                 mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1822                                                   qevent_binding);
1823 }
1824
1825 static int
1826 mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block,
1827                                 struct netlink_ext_ack *extack)
1828 {
1829         struct mlxsw_sp_qevent_binding *qevent_binding;
1830         int err;
1831
1832         list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
1833                 err = mlxsw_sp_qevent_binding_configure(qevent_block,
1834                                                         qevent_binding,
1835                                                         extack);
1836                 if (err)
1837                         goto err_binding_configure;
1838         }
1839
1840         return 0;
1841
1842 err_binding_configure:
1843         list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
1844                 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1845         return err;
1846 }
1847
1848 static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
1849 {
1850         struct mlxsw_sp_qevent_binding *qevent_binding;
1851
1852         list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
1853                 mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1854 }
1855
1856 static struct mlxsw_sp_mall_entry *
1857 mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
1858 {
1859         struct mlxsw_sp_mall_entry *mall_entry;
1860
1861         list_for_each_entry(mall_entry, &block->mall_entry_list, list)
1862                 if (mall_entry->cookie == cookie)
1863                         return mall_entry;
1864
1865         return NULL;
1866 }
1867
1868 static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
1869                                         struct mlxsw_sp_qevent_block *qevent_block,
1870                                         struct tc_cls_matchall_offload *f)
1871 {
1872         struct mlxsw_sp_mall_entry *mall_entry;
1873         struct flow_action_entry *act;
1874         int err;
1875
1876         /* It should not currently be possible to replace a matchall rule. So
1877          * this must be a new rule.
1878          */
1879         if (!list_empty(&qevent_block->mall_entry_list)) {
1880                 NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
1881                 return -EOPNOTSUPP;
1882         }
1883         if (f->rule->action.num_entries != 1) {
1884                 NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
1885                 return -EOPNOTSUPP;
1886         }
1887         if (f->common.chain_index) {
1888                 NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
1889                 return -EOPNOTSUPP;
1890         }
1891         if (f->common.protocol != htons(ETH_P_ALL)) {
1892                 NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
1893                 return -EOPNOTSUPP;
1894         }
1895
1896         act = &f->rule->action.entries[0];
1897         if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
1898                 NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
1899                 return -EOPNOTSUPP;
1900         }
1901
1902         mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
1903         if (!mall_entry)
1904                 return -ENOMEM;
1905         mall_entry->cookie = f->cookie;
1906
1907         if (act->id == FLOW_ACTION_MIRRED) {
1908                 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
1909                 mall_entry->mirror.to_dev = act->dev;
1910         } else if (act->id == FLOW_ACTION_TRAP) {
1911                 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
1912         } else {
1913                 NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
1914                 err = -EOPNOTSUPP;
1915                 goto err_unsupported_action;
1916         }
1917
1918         list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
1919
1920         err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack);
1921         if (err)
1922                 goto err_block_configure;
1923
1924         return 0;
1925
1926 err_block_configure:
1927         list_del(&mall_entry->list);
1928 err_unsupported_action:
1929         kfree(mall_entry);
1930         return err;
1931 }
1932
1933 static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
1934                                          struct tc_cls_matchall_offload *f)
1935 {
1936         struct mlxsw_sp_mall_entry *mall_entry;
1937
1938         mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
1939         if (!mall_entry)
1940                 return;
1941
1942         mlxsw_sp_qevent_block_deconfigure(qevent_block);
1943
1944         list_del(&mall_entry->list);
1945         kfree(mall_entry);
1946 }
1947
1948 static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
1949                                          struct tc_cls_matchall_offload *f)
1950 {
1951         struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
1952
1953         switch (f->command) {
1954         case TC_CLSMATCHALL_REPLACE:
1955                 return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
1956         case TC_CLSMATCHALL_DESTROY:
1957                 mlxsw_sp_qevent_mall_destroy(qevent_block, f);
1958                 return 0;
1959         default:
1960                 return -EOPNOTSUPP;
1961         }
1962 }
1963
1964 static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1965 {
1966         struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1967
1968         switch (type) {
1969         case TC_SETUP_CLSMATCHALL:
1970                 return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
1971         default:
1972                 return -EOPNOTSUPP;
1973         }
1974 }
1975
1976 static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
1977                                                                   struct net *net)
1978 {
1979         struct mlxsw_sp_qevent_block *qevent_block;
1980
1981         qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
1982         if (!qevent_block)
1983                 return NULL;
1984
1985         INIT_LIST_HEAD(&qevent_block->binding_list);
1986         INIT_LIST_HEAD(&qevent_block->mall_entry_list);
1987         qevent_block->mlxsw_sp = mlxsw_sp;
1988         return qevent_block;
1989 }
1990
1991 static void
1992 mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
1993 {
1994         WARN_ON(!list_empty(&qevent_block->binding_list));
1995         WARN_ON(!list_empty(&qevent_block->mall_entry_list));
1996         kfree(qevent_block);
1997 }
1998
1999 static void mlxsw_sp_qevent_block_release(void *cb_priv)
2000 {
2001         struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
2002
2003         mlxsw_sp_qevent_block_destroy(qevent_block);
2004 }
2005
2006 static struct mlxsw_sp_qevent_binding *
2007 mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
2008                                enum mlxsw_sp_span_trigger span_trigger,
2009                                unsigned int action_mask)
2010 {
2011         struct mlxsw_sp_qevent_binding *binding;
2012
2013         binding = kzalloc(sizeof(*binding), GFP_KERNEL);
2014         if (!binding)
2015                 return ERR_PTR(-ENOMEM);
2016
2017         binding->mlxsw_sp_port = mlxsw_sp_port;
2018         binding->handle = handle;
2019         binding->tclass_num = tclass_num;
2020         binding->span_trigger = span_trigger;
2021         binding->action_mask = action_mask;
2022         return binding;
2023 }
2024
2025 static void
2026 mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
2027 {
2028         kfree(binding);
2029 }
2030
2031 static struct mlxsw_sp_qevent_binding *
2032 mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
2033                                struct mlxsw_sp_port *mlxsw_sp_port,
2034                                u32 handle,
2035                                enum mlxsw_sp_span_trigger span_trigger)
2036 {
2037         struct mlxsw_sp_qevent_binding *qevent_binding;
2038
2039         list_for_each_entry(qevent_binding, &block->binding_list, list)
2040                 if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
2041                     qevent_binding->handle == handle &&
2042                     qevent_binding->span_trigger == span_trigger)
2043                         return qevent_binding;
2044         return NULL;
2045 }
2046
2047 static int
2048 mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
2049                                     struct flow_block_offload *f,
2050                                     enum mlxsw_sp_span_trigger span_trigger,
2051                                     unsigned int action_mask)
2052 {
2053         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2054         struct mlxsw_sp_qevent_binding *qevent_binding;
2055         struct mlxsw_sp_qevent_block *qevent_block;
2056         struct flow_block_cb *block_cb;
2057         struct mlxsw_sp_qdisc *qdisc;
2058         bool register_block = false;
2059         int tclass_num;
2060         int err;
2061
2062         block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
2063         if (!block_cb) {
2064                 qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
2065                 if (!qevent_block)
2066                         return -ENOMEM;
2067                 block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
2068                                                mlxsw_sp_qevent_block_release);
2069                 if (IS_ERR(block_cb)) {
2070                         mlxsw_sp_qevent_block_destroy(qevent_block);
2071                         return PTR_ERR(block_cb);
2072                 }
2073                 register_block = true;
2074         } else {
2075                 qevent_block = flow_block_cb_priv(block_cb);
2076         }
2077         flow_block_cb_incref(block_cb);
2078
2079         qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
2080         if (!qdisc) {
2081                 NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
2082                 err = -ENOENT;
2083                 goto err_find_qdisc;
2084         }
2085
2086         if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
2087                                                    span_trigger))) {
2088                 err = -EEXIST;
2089                 goto err_binding_exists;
2090         }
2091
2092         tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc);
2093         qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port,
2094                                                         f->sch->handle,
2095                                                         tclass_num,
2096                                                         span_trigger,
2097                                                         action_mask);
2098         if (IS_ERR(qevent_binding)) {
2099                 err = PTR_ERR(qevent_binding);
2100                 goto err_binding_create;
2101         }
2102
2103         err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding,
2104                                                 f->extack);
2105         if (err)
2106                 goto err_binding_configure;
2107
2108         list_add(&qevent_binding->list, &qevent_block->binding_list);
2109
2110         if (register_block) {
2111                 flow_block_cb_add(block_cb, f);
2112                 list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
2113         }
2114
2115         return 0;
2116
2117 err_binding_configure:
2118         mlxsw_sp_qevent_binding_destroy(qevent_binding);
2119 err_binding_create:
2120 err_binding_exists:
2121 err_find_qdisc:
2122         if (!flow_block_cb_decref(block_cb))
2123                 flow_block_cb_free(block_cb);
2124         return err;
2125 }
2126
2127 static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
2128                                                   struct flow_block_offload *f,
2129                                                   enum mlxsw_sp_span_trigger span_trigger)
2130 {
2131         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2132         struct mlxsw_sp_qevent_binding *qevent_binding;
2133         struct mlxsw_sp_qevent_block *qevent_block;
2134         struct flow_block_cb *block_cb;
2135
2136         block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
2137         if (!block_cb)
2138                 return;
2139         qevent_block = flow_block_cb_priv(block_cb);
2140
2141         qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
2142                                                         span_trigger);
2143         if (!qevent_binding)
2144                 return;
2145
2146         list_del(&qevent_binding->list);
2147         mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
2148         mlxsw_sp_qevent_binding_destroy(qevent_binding);
2149
2150         if (!flow_block_cb_decref(block_cb)) {
2151                 flow_block_cb_remove(block_cb, f);
2152                 list_del(&block_cb->driver_list);
2153         }
2154 }
2155
2156 static int
2157 mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
2158                                struct flow_block_offload *f,
2159                                enum mlxsw_sp_span_trigger span_trigger,
2160                                unsigned int action_mask)
2161 {
2162         f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
2163
2164         switch (f->command) {
2165         case FLOW_BLOCK_BIND:
2166                 return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f,
2167                                                            span_trigger,
2168                                                            action_mask);
2169         case FLOW_BLOCK_UNBIND:
2170                 mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
2171                 return 0;
2172         default:
2173                 return -EOPNOTSUPP;
2174         }
2175 }
2176
2177 int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
2178                                               struct flow_block_offload *f)
2179 {
2180         unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) |
2181                                    BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP);
2182
2183         return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
2184                                               MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
2185                                               action_mask);
2186 }
2187
2188 int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
2189                                         struct flow_block_offload *f)
2190 {
2191         unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR);
2192
2193         return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
2194                                               MLXSW_SP_SPAN_TRIGGER_ECN,
2195                                               action_mask);
2196 }
2197
2198 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
2199 {
2200         struct mlxsw_sp_qdisc_state *qdisc_state;
2201
2202         qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
2203         if (!qdisc_state)
2204                 return -ENOMEM;
2205
2206         mutex_init(&qdisc_state->lock);
2207         mlxsw_sp_port->qdisc = qdisc_state;
2208         return 0;
2209 }
2210
2211 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2212 {
2213         mutex_destroy(&mlxsw_sp_port->qdisc->lock);
2214         kfree(mlxsw_sp_port->qdisc);
2215 }