Merge branch 'aux-bus-v11' of https://github.com/ajitkhaparde1/linux
[linux-block.git] / net / sched / sch_mqprio.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
b8970f0b
JF
2/*
3 * net/sched/sch_mqprio.c
4 *
5 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
b8970f0b
JF
6 */
7
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/kernel.h>
11#include <linux/string.h>
12#include <linux/errno.h>
13#include <linux/skbuff.h>
3a9a231d 14#include <linux/module.h>
b8970f0b
JF
15#include <net/netlink.h>
16#include <net/pkt_sched.h>
17#include <net/sch_generic.h>
4e8b86c0 18#include <net/pkt_cls.h>
b8970f0b 19
1dfe086d
VO
20#include "sch_mqprio_lib.h"
21
b8970f0b
JF
22struct mqprio_sched {
23 struct Qdisc **qdiscs;
4e8b86c0
AN
24 u16 mode;
25 u16 shaper;
2026fecf 26 int hw_offload;
4e8b86c0
AN
27 u32 flags;
28 u64 min_rate[TC_QOPT_MAX_QUEUE];
29 u64 max_rate[TC_QOPT_MAX_QUEUE];
b8970f0b
JF
30};
31
5cfb45e2 32static int mqprio_enable_offload(struct Qdisc *sch,
d404959f
VO
33 const struct tc_mqprio_qopt *qopt,
34 struct netlink_ext_ack *extack)
5cfb45e2
VO
35{
36 struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
37 struct mqprio_sched *priv = qdisc_priv(sch);
38 struct net_device *dev = qdisc_dev(sch);
39 int err, i;
40
41 switch (priv->mode) {
42 case TC_MQPRIO_MODE_DCB:
43 if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
44 return -EINVAL;
45 break;
46 case TC_MQPRIO_MODE_CHANNEL:
47 mqprio.flags = priv->flags;
48 if (priv->flags & TC_MQPRIO_F_MODE)
49 mqprio.mode = priv->mode;
50 if (priv->flags & TC_MQPRIO_F_SHAPER)
51 mqprio.shaper = priv->shaper;
52 if (priv->flags & TC_MQPRIO_F_MIN_RATE)
53 for (i = 0; i < mqprio.qopt.num_tc; i++)
54 mqprio.min_rate[i] = priv->min_rate[i];
55 if (priv->flags & TC_MQPRIO_F_MAX_RATE)
56 for (i = 0; i < mqprio.qopt.num_tc; i++)
57 mqprio.max_rate[i] = priv->max_rate[i];
58 break;
59 default:
60 return -EINVAL;
61 }
62
63 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
64 &mqprio);
65 if (err)
66 return err;
67
68 priv->hw_offload = mqprio.qopt.hw;
69
70 return 0;
71}
72
73static void mqprio_disable_offload(struct Qdisc *sch)
74{
75 struct tc_mqprio_qopt_offload mqprio = { { 0 } };
76 struct mqprio_sched *priv = qdisc_priv(sch);
77 struct net_device *dev = qdisc_dev(sch);
78
79 switch (priv->mode) {
80 case TC_MQPRIO_MODE_DCB:
81 case TC_MQPRIO_MODE_CHANNEL:
82 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO,
83 &mqprio);
84 break;
85 }
86}
87
b8970f0b
JF
88static void mqprio_destroy(struct Qdisc *sch)
89{
90 struct net_device *dev = qdisc_dev(sch);
91 struct mqprio_sched *priv = qdisc_priv(sch);
92 unsigned int ntx;
93
ac7100ba
BH
94 if (priv->qdiscs) {
95 for (ntx = 0;
96 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
97 ntx++)
86bd446b 98 qdisc_put(priv->qdiscs[ntx]);
ac7100ba
BH
99 kfree(priv->qdiscs);
100 }
b8970f0b 101
5cfb45e2
VO
102 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc)
103 mqprio_disable_offload(sch);
104 else
b8970f0b 105 netdev_set_num_tc(dev, 0);
b8970f0b
JF
106}
107
19278d76 108static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
d404959f
VO
109 const struct tc_mqprio_caps *caps,
110 struct netlink_ext_ack *extack)
b8970f0b 111{
1dfe086d 112 int err;
b8970f0b 113
2026fecf
AD
114 /* Limit qopt->hw to maximum supported offload value. Drivers have
115 * the option of overriding this later if they don't support the a
116 * given offload type.
117 */
118 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
119 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
b8970f0b 120
19278d76
VO
121 /* If hardware offload is requested, we will leave 3 options to the
122 * device driver:
123 * - populate the queue counts itself (and ignore what was requested)
124 * - validate the provided queue counts by itself (and apply them)
125 * - request queue count validation here (and apply them)
b8970f0b 126 */
1dfe086d
VO
127 err = mqprio_validate_qopt(dev, qopt,
128 !qopt->hw || caps->validate_queue_counts,
129 false, extack);
130 if (err)
131 return err;
b8970f0b 132
19278d76
VO
133 /* If ndo_setup_tc is not present then hardware doesn't support offload
134 * and we should return an error.
135 */
136 if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
137 return -EINVAL;
138
b8970f0b
JF
139 return 0;
140}
141
4e8b86c0
AN
142static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
143 [TCA_MQPRIO_MODE] = { .len = sizeof(u16) },
144 [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) },
145 [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
146 [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
147};
148
149static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
150 const struct nla_policy *policy, int len)
151{
152 int nested_len = nla_len(nla) - NLA_ALIGN(len);
153
154 if (nested_len >= nla_attr_size(0))
8cb08174
JB
155 return nla_parse_deprecated(tb, maxtype,
156 nla_data(nla) + NLA_ALIGN(len),
157 nested_len, policy, NULL);
4e8b86c0
AN
158
159 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
160 return 0;
161}
162
feb2cf3d
VO
163static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
164 struct nlattr *opt)
165{
166 struct mqprio_sched *priv = qdisc_priv(sch);
167 struct nlattr *tb[TCA_MQPRIO_MAX + 1];
168 struct nlattr *attr;
169 int i, rem, err;
170
171 err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
172 sizeof(*qopt));
173 if (err < 0)
174 return err;
175
176 if (!qopt->hw)
177 return -EINVAL;
178
179 if (tb[TCA_MQPRIO_MODE]) {
180 priv->flags |= TC_MQPRIO_F_MODE;
181 priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
182 }
183
184 if (tb[TCA_MQPRIO_SHAPER]) {
185 priv->flags |= TC_MQPRIO_F_SHAPER;
186 priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
187 }
188
189 if (tb[TCA_MQPRIO_MIN_RATE64]) {
190 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
191 return -EINVAL;
192 i = 0;
193 nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
194 rem) {
195 if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
196 return -EINVAL;
197 if (i >= qopt->num_tc)
198 break;
199 priv->min_rate[i] = *(u64 *)nla_data(attr);
200 i++;
201 }
202 priv->flags |= TC_MQPRIO_F_MIN_RATE;
203 }
204
205 if (tb[TCA_MQPRIO_MAX_RATE64]) {
206 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
207 return -EINVAL;
208 i = 0;
209 nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
210 rem) {
211 if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
212 return -EINVAL;
213 if (i >= qopt->num_tc)
214 break;
215 priv->max_rate[i] = *(u64 *)nla_data(attr);
216 i++;
217 }
218 priv->flags |= TC_MQPRIO_F_MAX_RATE;
219 }
220
221 return 0;
222}
223
e63d7dfd
AA
224static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
225 struct netlink_ext_ack *extack)
b8970f0b
JF
226{
227 struct net_device *dev = qdisc_dev(sch);
228 struct mqprio_sched *priv = qdisc_priv(sch);
229 struct netdev_queue *dev_queue;
230 struct Qdisc *qdisc;
231 int i, err = -EOPNOTSUPP;
232 struct tc_mqprio_qopt *qopt = NULL;
19278d76 233 struct tc_mqprio_caps caps;
22ce97fe 234 int len;
b8970f0b
JF
235
236 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
237 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
238
239 if (sch->parent != TC_H_ROOT)
240 return -EOPNOTSUPP;
241
242 if (!netif_is_multiqueue(dev))
243 return -EOPNOTSUPP;
244
32302902
AD
245 /* make certain can allocate enough classids to handle queues */
246 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
247 return -ENOMEM;
248
7838f2ce 249 if (!opt || nla_len(opt) < sizeof(*qopt))
b8970f0b
JF
250 return -EINVAL;
251
19278d76
VO
252 qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO,
253 &caps, sizeof(caps));
254
b8970f0b 255 qopt = nla_data(opt);
d404959f 256 if (mqprio_parse_opt(dev, qopt, &caps, extack))
b8970f0b
JF
257 return -EINVAL;
258
22ce97fe 259 len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
4e8b86c0 260 if (len > 0) {
feb2cf3d
VO
261 err = mqprio_parse_nlattr(sch, qopt, opt);
262 if (err)
4e8b86c0 263 return err;
4e8b86c0
AN
264 }
265
b8970f0b
JF
266 /* pre-allocate qdisc, attachment can't fail */
267 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
268 GFP_KERNEL);
87b60cfa
ED
269 if (!priv->qdiscs)
270 return -ENOMEM;
b8970f0b
JF
271
272 for (i = 0; i < dev->num_tx_queues; i++) {
273 dev_queue = netdev_get_tx_queue(dev, i);
1f27cde3
ED
274 qdisc = qdisc_create_dflt(dev_queue,
275 get_default_qdisc_ops(dev, i),
b8970f0b 276 TC_H_MAKE(TC_H_MAJ(sch->handle),
a38a9882 277 TC_H_MIN(i + 1)), extack);
87b60cfa
ED
278 if (!qdisc)
279 return -ENOMEM;
280
b8970f0b 281 priv->qdiscs[i] = qdisc;
4eaf3b84 282 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
b8970f0b
JF
283 }
284
285 /* If the mqprio options indicate that hardware should own
286 * the queue mapping then run ndo_setup_tc otherwise use the
287 * supplied and verified mapping
288 */
289 if (qopt->hw) {
d404959f 290 err = mqprio_enable_offload(sch, qopt, extack);
b8970f0b 291 if (err)
87b60cfa 292 return err;
b8970f0b
JF
293 } else {
294 netdev_set_num_tc(dev, qopt->num_tc);
295 for (i = 0; i < qopt->num_tc; i++)
296 netdev_set_tc_queue(dev, i,
297 qopt->count[i], qopt->offset[i]);
298 }
299
300 /* Always use supplied priority mappings */
301 for (i = 0; i < TC_BITMASK + 1; i++)
302 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
303
304 sch->flags |= TCQ_F_MQROOT;
305 return 0;
b8970f0b
JF
306}
307
308static void mqprio_attach(struct Qdisc *sch)
309{
310 struct net_device *dev = qdisc_dev(sch);
311 struct mqprio_sched *priv = qdisc_priv(sch);
95dc1929 312 struct Qdisc *qdisc, *old;
b8970f0b
JF
313 unsigned int ntx;
314
315 /* Attach underlying qdisc */
316 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
317 qdisc = priv->qdiscs[ntx];
95dc1929
ED
318 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
319 if (old)
86bd446b 320 qdisc_put(old);
95dc1929 321 if (ntx < dev->real_num_tx_queues)
49b49971 322 qdisc_hash_add(qdisc, false);
b8970f0b
JF
323 }
324 kfree(priv->qdiscs);
325 priv->qdiscs = NULL;
326}
327
328static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
329 unsigned long cl)
330{
331 struct net_device *dev = qdisc_dev(sch);
32302902 332 unsigned long ntx = cl - 1;
b8970f0b
JF
333
334 if (ntx >= dev->num_tx_queues)
335 return NULL;
336 return netdev_get_tx_queue(dev, ntx);
337}
338
339static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
653d6fd6 340 struct Qdisc **old, struct netlink_ext_ack *extack)
b8970f0b
JF
341{
342 struct net_device *dev = qdisc_dev(sch);
343 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
344
345 if (!dev_queue)
346 return -EINVAL;
347
348 if (dev->flags & IFF_UP)
349 dev_deactivate(dev);
350
351 *old = dev_graft_qdisc(dev_queue, new);
352
1abbe139 353 if (new)
4eaf3b84 354 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1abbe139 355
b8970f0b
JF
356 if (dev->flags & IFF_UP)
357 dev_activate(dev);
358
359 return 0;
360}
361
4e8b86c0
AN
362static int dump_rates(struct mqprio_sched *priv,
363 struct tc_mqprio_qopt *opt, struct sk_buff *skb)
364{
365 struct nlattr *nest;
366 int i;
367
368 if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
ae0be8de 369 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
4e8b86c0
AN
370 if (!nest)
371 goto nla_put_failure;
372
373 for (i = 0; i < opt->num_tc; i++) {
374 if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
375 sizeof(priv->min_rate[i]),
376 &priv->min_rate[i]))
377 goto nla_put_failure;
378 }
379 nla_nest_end(skb, nest);
380 }
381
382 if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
ae0be8de 383 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
4e8b86c0
AN
384 if (!nest)
385 goto nla_put_failure;
386
387 for (i = 0; i < opt->num_tc; i++) {
388 if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
389 sizeof(priv->max_rate[i]),
390 &priv->max_rate[i]))
391 goto nla_put_failure;
392 }
393 nla_nest_end(skb, nest);
394 }
395 return 0;
396
397nla_put_failure:
398 nla_nest_cancel(skb, nest);
399 return -1;
400}
401
b8970f0b
JF
402static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
403{
404 struct net_device *dev = qdisc_dev(sch);
405 struct mqprio_sched *priv = qdisc_priv(sch);
4e8b86c0 406 struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
144ce879 407 struct tc_mqprio_qopt opt = { 0 };
b8970f0b 408 struct Qdisc *qdisc;
9dd6ad67 409 unsigned int ntx;
b8970f0b
JF
410
411 sch->q.qlen = 0;
50dc9a85 412 gnet_stats_basic_sync_init(&sch->bstats);
b8970f0b
JF
413 memset(&sch->qstats, 0, sizeof(sch->qstats));
414
ce679e8d
JF
415 /* MQ supports lockless qdiscs. However, statistics accounting needs
416 * to account for all, none, or a mix of locked and unlocked child
417 * qdiscs. Percpu stats are added to counters in-band and locking
418 * qdisc totals are added at end.
419 */
420 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
421 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
b8970f0b 422 spin_lock_bh(qdisc_lock(qdisc));
ce679e8d 423
29cbcd85
AD
424 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
425 &qdisc->bstats, false);
7361df46
SAS
426 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
427 &qdisc->qstats);
428 sch->q.qlen += qdisc_qlen(qdisc);
ce679e8d 429
b8970f0b
JF
430 spin_unlock_bh(qdisc_lock(qdisc));
431 }
432
9dd6ad67 433 mqprio_qopt_reconstruct(dev, &opt);
2026fecf 434 opt.hw = priv->hw_offload;
b8970f0b 435
9f104c77 436 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
4e8b86c0
AN
437 goto nla_put_failure;
438
439 if ((priv->flags & TC_MQPRIO_F_MODE) &&
440 nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
441 goto nla_put_failure;
442
443 if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
444 nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
445 goto nla_put_failure;
446
447 if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
448 priv->flags & TC_MQPRIO_F_MAX_RATE) &&
449 (dump_rates(priv, &opt, skb) != 0))
1b34ec43 450 goto nla_put_failure;
b8970f0b 451
4e8b86c0 452 return nla_nest_end(skb, nla);
b8970f0b 453nla_put_failure:
4e8b86c0 454 nlmsg_trim(skb, nla);
b8970f0b
JF
455 return -1;
456}
457
458static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
459{
460 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
461
462 if (!dev_queue)
463 return NULL;
464
465 return dev_queue->qdisc_sleeping;
466}
467
143976ce 468static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
b8970f0b
JF
469{
470 struct net_device *dev = qdisc_dev(sch);
471 unsigned int ntx = TC_H_MIN(classid);
472
32302902
AD
473 /* There are essentially two regions here that have valid classid
474 * values. The first region will have a classid value of 1 through
475 * num_tx_queues. All of these are backed by actual Qdiscs.
476 */
477 if (ntx < TC_H_MIN_PRIORITY)
478 return (ntx <= dev->num_tx_queues) ? ntx : 0;
479
480 /* The second region represents the hardware traffic classes. These
481 * are represented by classid values of TC_H_MIN_PRIORITY through
482 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
483 */
484 return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
b8970f0b
JF
485}
486
b8970f0b
JF
487static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
488 struct sk_buff *skb, struct tcmsg *tcm)
489{
32302902
AD
490 if (cl < TC_H_MIN_PRIORITY) {
491 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
492 struct net_device *dev = qdisc_dev(sch);
493 int tc = netdev_txq_to_tc(dev, cl - 1);
b8970f0b 494
32302902
AD
495 tcm->tcm_parent = (tc < 0) ? 0 :
496 TC_H_MAKE(TC_H_MAJ(sch->handle),
497 TC_H_MIN(tc + TC_H_MIN_PRIORITY));
498 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
499 } else {
b8970f0b
JF
500 tcm->tcm_parent = TC_H_ROOT;
501 tcm->tcm_info = 0;
b8970f0b
JF
502 }
503 tcm->tcm_handle |= TC_H_MIN(cl);
504 return 0;
505}
506
507static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
ea18fd95 508 struct gnet_dump *d)
509 __releases(d->lock)
510 __acquires(d->lock)
b8970f0b 511{
32302902 512 if (cl >= TC_H_MIN_PRIORITY) {
b8970f0b 513 int i;
7361df46 514 __u32 qlen;
b8970f0b 515 struct gnet_stats_queue qstats = {0};
50dc9a85 516 struct gnet_stats_basic_sync bstats;
32302902
AD
517 struct net_device *dev = qdisc_dev(sch);
518 struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
b8970f0b 519
50dc9a85 520 gnet_stats_basic_sync_init(&bstats);
b8970f0b
JF
521 /* Drop lock here it will be reclaimed before touching
522 * statistics this is required because the d->lock we
523 * hold here is the look on dev_queue->qdisc_sleeping
524 * also acquired below.
525 */
edb09eb1
ED
526 if (d->lock)
527 spin_unlock_bh(d->lock);
b8970f0b
JF
528
529 for (i = tc.offset; i < tc.offset + tc.count; i++) {
46e5da40 530 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
ce679e8d 531 struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
46e5da40 532
b8970f0b 533 spin_lock_bh(qdisc_lock(qdisc));
14132690 534
29cbcd85
AD
535 gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
536 &qdisc->bstats, false);
7361df46
SAS
537 gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
538 &qdisc->qstats);
539 sch->q.qlen += qdisc_qlen(qdisc);
540
b8970f0b
JF
541 spin_unlock_bh(qdisc_lock(qdisc));
542 }
7361df46 543 qlen = qdisc_qlen(sch) + qstats.qlen;
ce679e8d 544
b8970f0b 545 /* Reclaim root sleeping lock before completing stats */
edb09eb1
ED
546 if (d->lock)
547 spin_lock_bh(d->lock);
29cbcd85 548 if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 ||
b0ab6f92 549 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
b8970f0b
JF
550 return -1;
551 } else {
552 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
553
554 sch = dev_queue->qdisc_sleeping;
29cbcd85
AD
555 if (gnet_stats_copy_basic(d, sch->cpu_bstats,
556 &sch->bstats, true) < 0 ||
5dd431b6 557 qdisc_qstats_copy(d, sch) < 0)
b8970f0b
JF
558 return -1;
559 }
560 return 0;
561}
562
563static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
564{
565 struct net_device *dev = qdisc_dev(sch);
566 unsigned long ntx;
567
568 if (arg->stop)
569 return;
570
571 /* Walk hierarchy with a virtual class per tc */
572 arg->count = arg->skip;
32302902 573 for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
e046fa89 574 if (!tc_qdisc_stats_dump(sch, ntx + TC_H_MIN_PRIORITY, arg))
32302902 575 return;
32302902
AD
576 }
577
578 /* Pad the values and skip over unused traffic classes */
579 if (ntx < TC_MAX_QUEUE) {
580 arg->count = TC_MAX_QUEUE;
581 ntx = TC_MAX_QUEUE;
582 }
583
584 /* Reset offset, sort out remaining per-queue qdiscs */
585 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
b8970f0b
JF
586 if (arg->fn(sch, ntx + 1, arg) < 0) {
587 arg->stop = 1;
32302902 588 return;
b8970f0b
JF
589 }
590 arg->count++;
591 }
592}
593
0f7787b4
JSP
594static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
595 struct tcmsg *tcm)
596{
597 return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
598}
599
b8970f0b
JF
600static const struct Qdisc_class_ops mqprio_class_ops = {
601 .graft = mqprio_graft,
602 .leaf = mqprio_leaf,
143976ce 603 .find = mqprio_find,
b8970f0b
JF
604 .walk = mqprio_walk,
605 .dump = mqprio_dump_class,
606 .dump_stats = mqprio_dump_class_stats,
0f7787b4 607 .select_queue = mqprio_select_queue,
b8970f0b
JF
608};
609
ea18fd95 610static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
b8970f0b
JF
611 .cl_ops = &mqprio_class_ops,
612 .id = "mqprio",
613 .priv_size = sizeof(struct mqprio_sched),
614 .init = mqprio_init,
615 .destroy = mqprio_destroy,
616 .attach = mqprio_attach,
f7116fb4 617 .change_real_num_tx = mq_change_real_num_tx,
b8970f0b
JF
618 .dump = mqprio_dump,
619 .owner = THIS_MODULE,
620};
621
622static int __init mqprio_module_init(void)
623{
624 return register_qdisc(&mqprio_qdisc_ops);
625}
626
627static void __exit mqprio_module_exit(void)
628{
629 unregister_qdisc(&mqprio_qdisc_ops);
630}
631
632module_init(mqprio_module_init);
633module_exit(mqprio_module_exit);
634
635MODULE_LICENSE("GPL");