Merge tag 'perf-core-for-mingo-4.17-20180413' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_qdisc.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/netdevice.h>
38 #include <net/pkt_cls.h>
39 #include <net/red.h>
40
41 #include "spectrum.h"
42 #include "reg.h"
43
44 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
45 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
46         MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
47
48 enum mlxsw_sp_qdisc_type {
49         MLXSW_SP_QDISC_NO_QDISC,
50         MLXSW_SP_QDISC_RED,
51         MLXSW_SP_QDISC_PRIO,
52 };
53
54 struct mlxsw_sp_qdisc_ops {
55         enum mlxsw_sp_qdisc_type type;
56         int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
57                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
58                             void *params);
59         int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
60                        struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
61         int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
62                        struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
63         int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
64                          struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
65                          struct tc_qopt_offload_stats *stats_ptr);
66         int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
67                           struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
68                           void *xstats_ptr);
69         void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
70                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
71         /* unoffload - to be used for a qdisc that stops being offloaded without
72          * being destroyed.
73          */
74         void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
75                           struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
76 };
77
78 struct mlxsw_sp_qdisc {
79         u32 handle;
80         u8 tclass_num;
81         u8 prio_bitmap;
82         union {
83                 struct red_stats red;
84         } xstats_base;
85         struct mlxsw_sp_qdisc_stats {
86                 u64 tx_bytes;
87                 u64 tx_packets;
88                 u64 drops;
89                 u64 overlimits;
90                 u64 backlog;
91         } stats_base;
92
93         struct mlxsw_sp_qdisc_ops *ops;
94 };
95
96 static bool
97 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
98                        enum mlxsw_sp_qdisc_type type)
99 {
100         return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
101                mlxsw_sp_qdisc->ops->type == type &&
102                mlxsw_sp_qdisc->handle == handle;
103 }
104
105 static struct mlxsw_sp_qdisc *
106 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
107                     bool root_only)
108 {
109         int tclass, child_index;
110
111         if (parent == TC_H_ROOT)
112                 return mlxsw_sp_port->root_qdisc;
113
114         if (root_only || !mlxsw_sp_port->root_qdisc ||
115             !mlxsw_sp_port->root_qdisc->ops ||
116             TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
117             TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
118                 return NULL;
119
120         child_index = TC_H_MIN(parent);
121         tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
122         return &mlxsw_sp_port->tclass_qdiscs[tclass];
123 }
124
125 static struct mlxsw_sp_qdisc *
126 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
127 {
128         int i;
129
130         if (mlxsw_sp_port->root_qdisc->handle == handle)
131                 return mlxsw_sp_port->root_qdisc;
132
133         if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
134                 return NULL;
135
136         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
137                 if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
138                         return &mlxsw_sp_port->tclass_qdiscs[i];
139
140         return NULL;
141 }
142
143 static int
144 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
145                        struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
146 {
147         int err = 0;
148
149         if (!mlxsw_sp_qdisc)
150                 return 0;
151
152         if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
153                 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
154                                                    mlxsw_sp_qdisc);
155
156         mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
157         mlxsw_sp_qdisc->ops = NULL;
158         return err;
159 }
160
161 static int
162 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
163                        struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
164                        struct mlxsw_sp_qdisc_ops *ops, void *params)
165 {
166         int err;
167
168         if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
169                 /* In case this location contained a different qdisc of the
170                  * same type we can override the old qdisc configuration.
171                  * Otherwise, we need to remove the old qdisc before setting the
172                  * new one.
173                  */
174                 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
175         err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
176         if (err)
177                 goto err_bad_param;
178
179         err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
180         if (err)
181                 goto err_config;
182
183         if (mlxsw_sp_qdisc->handle != handle) {
184                 mlxsw_sp_qdisc->ops = ops;
185                 if (ops->clean_stats)
186                         ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
187         }
188
189         mlxsw_sp_qdisc->handle = handle;
190         return 0;
191
192 err_bad_param:
193 err_config:
194         if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
195                 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
196
197         mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
198         return err;
199 }
200
201 static int
202 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
203                          struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
204                          struct tc_qopt_offload_stats *stats_ptr)
205 {
206         if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
207             mlxsw_sp_qdisc->ops->get_stats)
208                 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
209                                                       mlxsw_sp_qdisc,
210                                                       stats_ptr);
211
212         return -EOPNOTSUPP;
213 }
214
215 static int
216 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
217                           struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
218                           void *xstats_ptr)
219 {
220         if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
221             mlxsw_sp_qdisc->ops->get_xstats)
222                 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
223                                                       mlxsw_sp_qdisc,
224                                                       xstats_ptr);
225
226         return -EOPNOTSUPP;
227 }
228
229 static void
230 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
231                                        u8 prio_bitmap, u64 *tx_packets,
232                                        u64 *tx_bytes)
233 {
234         int i;
235
236         *tx_packets = 0;
237         *tx_bytes = 0;
238         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
239                 if (prio_bitmap & BIT(i)) {
240                         *tx_packets += xstats->tx_packets[i];
241                         *tx_bytes += xstats->tx_bytes[i];
242                 }
243         }
244 }
245
246 static int
247 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
248                                   int tclass_num, u32 min, u32 max,
249                                   u32 probability, bool is_ecn)
250 {
251         char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
252         char cwtp_cmd[MLXSW_REG_CWTP_LEN];
253         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
254         int err;
255
256         mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
257         mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
258                                     roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
259                                     roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
260                                     probability);
261
262         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
263         if (err)
264                 return err;
265
266         mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
267                              MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
268
269         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
270 }
271
272 static int
273 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
274                                    int tclass_num)
275 {
276         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
277         char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
278
279         mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
280                              MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
281         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
282 }
283
284 static void
285 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
286                                         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
287 {
288         u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
289         struct mlxsw_sp_qdisc_stats *stats_base;
290         struct mlxsw_sp_port_xstats *xstats;
291         struct red_stats *red_base;
292
293         xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
294         stats_base = &mlxsw_sp_qdisc->stats_base;
295         red_base = &mlxsw_sp_qdisc->xstats_base.red;
296
297         mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
298                                                mlxsw_sp_qdisc->prio_bitmap,
299                                                &stats_base->tx_packets,
300                                                &stats_base->tx_bytes);
301         red_base->prob_mark = xstats->ecn;
302         red_base->prob_drop = xstats->wred_drop[tclass_num];
303         red_base->pdrop = xstats->tail_drop[tclass_num];
304
305         stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
306         stats_base->drops = red_base->prob_drop + red_base->pdrop;
307
308         stats_base->backlog = 0;
309 }
310
311 static int
312 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
313                            struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
314 {
315         struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
316
317         if (root_qdisc != mlxsw_sp_qdisc)
318                 root_qdisc->stats_base.backlog -=
319                                         mlxsw_sp_qdisc->stats_base.backlog;
320
321         return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
322                                                   mlxsw_sp_qdisc->tclass_num);
323 }
324
325 static int
326 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
327                                 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
328                                 void *params)
329 {
330         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
331         struct tc_red_qopt_offload_params *p = params;
332
333         if (p->min > p->max) {
334                 dev_err(mlxsw_sp->bus_info->dev,
335                         "spectrum: RED: min %u is bigger then max %u\n", p->min,
336                         p->max);
337                 return -EINVAL;
338         }
339         if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
340                 dev_err(mlxsw_sp->bus_info->dev,
341                         "spectrum: RED: max value %u is too big\n", p->max);
342                 return -EINVAL;
343         }
344         if (p->min == 0 || p->max == 0) {
345                 dev_err(mlxsw_sp->bus_info->dev,
346                         "spectrum: RED: 0 value is illegal for min and max\n");
347                 return -EINVAL;
348         }
349         return 0;
350 }
351
352 static int
353 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
354                            struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
355                            void *params)
356 {
357         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
358         struct tc_red_qopt_offload_params *p = params;
359         u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
360         u32 min, max;
361         u64 prob;
362
363         /* calculate probability in percentage */
364         prob = p->probability;
365         prob *= 100;
366         prob = DIV_ROUND_UP(prob, 1 << 16);
367         prob = DIV_ROUND_UP(prob, 1 << 16);
368         min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
369         max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
370         return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
371                                                  max, prob, p->is_ecn);
372 }
373
374 static void
375 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
376                              struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
377                              void *params)
378 {
379         struct tc_red_qopt_offload_params *p = params;
380         u64 backlog;
381
382         backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
383                                        mlxsw_sp_qdisc->stats_base.backlog);
384         p->qstats->backlog -= backlog;
385         mlxsw_sp_qdisc->stats_base.backlog = 0;
386 }
387
388 static int
389 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
390                               struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
391                               void *xstats_ptr)
392 {
393         struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
394         u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
395         struct mlxsw_sp_port_xstats *xstats;
396         struct red_stats *res = xstats_ptr;
397         int early_drops, marks, pdrops;
398
399         xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
400
401         early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
402         marks = xstats->ecn - xstats_base->prob_mark;
403         pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
404
405         res->pdrop += pdrops;
406         res->prob_drop += early_drops;
407         res->prob_mark += marks;
408
409         xstats_base->pdrop += pdrops;
410         xstats_base->prob_drop += early_drops;
411         xstats_base->prob_mark += marks;
412         return 0;
413 }
414
415 static int
416 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
417                              struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
418                              struct tc_qopt_offload_stats *stats_ptr)
419 {
420         u64 tx_bytes, tx_packets, overlimits, drops, backlog;
421         u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
422         struct mlxsw_sp_qdisc_stats *stats_base;
423         struct mlxsw_sp_port_xstats *xstats;
424
425         xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
426         stats_base = &mlxsw_sp_qdisc->stats_base;
427
428         mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
429                                                mlxsw_sp_qdisc->prio_bitmap,
430                                                &tx_packets, &tx_bytes);
431         tx_bytes = tx_bytes - stats_base->tx_bytes;
432         tx_packets = tx_packets - stats_base->tx_packets;
433
434         overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
435                      stats_base->overlimits;
436         drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
437                 stats_base->drops;
438         backlog = xstats->backlog[tclass_num];
439
440         _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
441         stats_ptr->qstats->overlimits += overlimits;
442         stats_ptr->qstats->drops += drops;
443         stats_ptr->qstats->backlog +=
444                                 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
445                                                      backlog) -
446                                 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
447                                                      stats_base->backlog);
448
449         stats_base->backlog = backlog;
450         stats_base->drops +=  drops;
451         stats_base->overlimits += overlimits;
452         stats_base->tx_bytes += tx_bytes;
453         stats_base->tx_packets += tx_packets;
454         return 0;
455 }
456
457 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
458
459 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
460         .type = MLXSW_SP_QDISC_RED,
461         .check_params = mlxsw_sp_qdisc_red_check_params,
462         .replace = mlxsw_sp_qdisc_red_replace,
463         .unoffload = mlxsw_sp_qdisc_red_unoffload,
464         .destroy = mlxsw_sp_qdisc_red_destroy,
465         .get_stats = mlxsw_sp_qdisc_get_red_stats,
466         .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
467         .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
468 };
469
470 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
471                           struct tc_red_qopt_offload *p)
472 {
473         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
474
475         mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
476         if (!mlxsw_sp_qdisc)
477                 return -EOPNOTSUPP;
478
479         if (p->command == TC_RED_REPLACE)
480                 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
481                                               mlxsw_sp_qdisc,
482                                               &mlxsw_sp_qdisc_ops_red,
483                                               &p->set);
484
485         if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
486                                     MLXSW_SP_QDISC_RED))
487                 return -EOPNOTSUPP;
488
489         switch (p->command) {
490         case TC_RED_DESTROY:
491                 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
492         case TC_RED_XSTATS:
493                 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
494                                                  p->xstats);
495         case TC_RED_STATS:
496                 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
497                                                 &p->stats);
498         default:
499                 return -EOPNOTSUPP;
500         }
501 }
502
503 static int
504 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
505                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
506 {
507         int i;
508
509         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
510                 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
511                                           MLXSW_SP_PORT_DEFAULT_TCLASS);
512                 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
513                                        &mlxsw_sp_port->tclass_qdiscs[i]);
514                 mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
515         }
516
517         return 0;
518 }
519
520 static int
521 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
522                                  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
523                                  void *params)
524 {
525         struct tc_prio_qopt_offload_params *p = params;
526
527         if (p->bands > IEEE_8021QAZ_MAX_TCS)
528                 return -EOPNOTSUPP;
529
530         return 0;
531 }
532
533 static int
534 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
535                             struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
536                             void *params)
537 {
538         struct tc_prio_qopt_offload_params *p = params;
539         struct mlxsw_sp_qdisc *child_qdisc;
540         int tclass, i, band, backlog;
541         u8 old_priomap;
542         int err;
543
544         for (band = 0; band < p->bands; band++) {
545                 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
546                 child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
547                 old_priomap = child_qdisc->prio_bitmap;
548                 child_qdisc->prio_bitmap = 0;
549                 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
550                         if (p->priomap[i] == band) {
551                                 child_qdisc->prio_bitmap |= BIT(i);
552                                 if (BIT(i) & old_priomap)
553                                         continue;
554                                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
555                                                                 i, tclass);
556                                 if (err)
557                                         return err;
558                         }
559                 }
560                 if (old_priomap != child_qdisc->prio_bitmap &&
561                     child_qdisc->ops && child_qdisc->ops->clean_stats) {
562                         backlog = child_qdisc->stats_base.backlog;
563                         child_qdisc->ops->clean_stats(mlxsw_sp_port,
564                                                       child_qdisc);
565                         child_qdisc->stats_base.backlog = backlog;
566                 }
567         }
568         for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
569                 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
570                 child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
571                 child_qdisc->prio_bitmap = 0;
572                 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
573         }
574         return 0;
575 }
576
577 static void
578 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
579                               struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
580                               void *params)
581 {
582         struct tc_prio_qopt_offload_params *p = params;
583         u64 backlog;
584
585         backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
586                                        mlxsw_sp_qdisc->stats_base.backlog);
587         p->qstats->backlog -= backlog;
588 }
589
590 static int
591 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
592                               struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
593                               struct tc_qopt_offload_stats *stats_ptr)
594 {
595         u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
596         struct mlxsw_sp_qdisc_stats *stats_base;
597         struct mlxsw_sp_port_xstats *xstats;
598         struct rtnl_link_stats64 *stats;
599         int i;
600
601         xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
602         stats = &mlxsw_sp_port->periodic_hw_stats.stats;
603         stats_base = &mlxsw_sp_qdisc->stats_base;
604
605         tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
606         tx_packets = stats->tx_packets - stats_base->tx_packets;
607
608         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
609                 drops += xstats->tail_drop[i];
610                 drops += xstats->wred_drop[i];
611                 backlog += xstats->backlog[i];
612         }
613         drops = drops - stats_base->drops;
614
615         _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
616         stats_ptr->qstats->drops += drops;
617         stats_ptr->qstats->backlog +=
618                                 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
619                                                      backlog) -
620                                 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
621                                                      stats_base->backlog);
622         stats_base->backlog = backlog;
623         stats_base->drops += drops;
624         stats_base->tx_bytes += tx_bytes;
625         stats_base->tx_packets += tx_packets;
626         return 0;
627 }
628
629 static void
630 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
631                                          struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
632 {
633         struct mlxsw_sp_qdisc_stats *stats_base;
634         struct mlxsw_sp_port_xstats *xstats;
635         struct rtnl_link_stats64 *stats;
636         int i;
637
638         xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
639         stats = &mlxsw_sp_port->periodic_hw_stats.stats;
640         stats_base = &mlxsw_sp_qdisc->stats_base;
641
642         stats_base->tx_packets = stats->tx_packets;
643         stats_base->tx_bytes = stats->tx_bytes;
644
645         stats_base->drops = 0;
646         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
647                 stats_base->drops += xstats->tail_drop[i];
648                 stats_base->drops += xstats->wred_drop[i];
649         }
650
651         mlxsw_sp_qdisc->stats_base.backlog = 0;
652 }
653
654 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
655         .type = MLXSW_SP_QDISC_PRIO,
656         .check_params = mlxsw_sp_qdisc_prio_check_params,
657         .replace = mlxsw_sp_qdisc_prio_replace,
658         .unoffload = mlxsw_sp_qdisc_prio_unoffload,
659         .destroy = mlxsw_sp_qdisc_prio_destroy,
660         .get_stats = mlxsw_sp_qdisc_get_prio_stats,
661         .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
662 };
663
664 /* Grafting is not supported in mlxsw. It will result in un-offloading of the
665  * grafted qdisc as well as the qdisc in the qdisc new location.
666  * (However, if the graft is to the location where the qdisc is already at, it
667  * will be ignored completely and won't cause un-offloading).
668  */
669 static int
670 mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
671                           struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
672                           struct tc_prio_qopt_offload_graft_params *p)
673 {
674         int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
675         struct mlxsw_sp_qdisc *old_qdisc;
676
677         /* Check if the grafted qdisc is already in its "new" location. If so -
678          * nothing needs to be done.
679          */
680         if (p->band < IEEE_8021QAZ_MAX_TCS &&
681             mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
682                 return 0;
683
684         /* See if the grafted qdisc is already offloaded on any tclass. If so,
685          * unoffload it.
686          */
687         old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
688                                                   p->child_handle);
689         if (old_qdisc)
690                 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
691
692         mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
693                                &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
694         return -EOPNOTSUPP;
695 }
696
697 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
698                            struct tc_prio_qopt_offload *p)
699 {
700         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
701
702         mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
703         if (!mlxsw_sp_qdisc)
704                 return -EOPNOTSUPP;
705
706         if (p->command == TC_PRIO_REPLACE)
707                 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
708                                               mlxsw_sp_qdisc,
709                                               &mlxsw_sp_qdisc_ops_prio,
710                                               &p->replace_params);
711
712         if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
713                                     MLXSW_SP_QDISC_PRIO))
714                 return -EOPNOTSUPP;
715
716         switch (p->command) {
717         case TC_PRIO_DESTROY:
718                 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
719         case TC_PRIO_STATS:
720                 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
721                                                 &p->stats);
722         case TC_PRIO_GRAFT:
723                 return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
724                                                  &p->graft_params);
725         default:
726                 return -EOPNOTSUPP;
727         }
728 }
729
730 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
731 {
732         struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
733         int i;
734
735         mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
736         if (!mlxsw_sp_qdisc)
737                 goto err_root_qdisc_init;
738
739         mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
740         mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
741         mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
742
743         mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc) * IEEE_8021QAZ_MAX_TCS,
744                                  GFP_KERNEL);
745         if (!mlxsw_sp_qdisc)
746                 goto err_tclass_qdiscs_init;
747
748         mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
749         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
750                 mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
751
752         return 0;
753
754 err_tclass_qdiscs_init:
755         kfree(mlxsw_sp_port->root_qdisc);
756 err_root_qdisc_init:
757         return -ENOMEM;
758 }
759
760 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
761 {
762         kfree(mlxsw_sp_port->tclass_qdiscs);
763         kfree(mlxsw_sp_port->root_qdisc);
764 }