2 * net/sched/sch_mqprio.c
4 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <linux/module.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
24 struct Qdisc **qdiscs;
29 u64 min_rate[TC_QOPT_MAX_QUEUE];
30 u64 max_rate[TC_QOPT_MAX_QUEUE];
33 static void mqprio_destroy(struct Qdisc *sch)
35 struct net_device *dev = qdisc_dev(sch);
36 struct mqprio_sched *priv = qdisc_priv(sch);
41 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
43 qdisc_destroy(priv->qdiscs[ntx]);
47 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
48 struct tc_mqprio_qopt_offload mqprio = { { 0 } };
51 case TC_MQPRIO_MODE_DCB:
52 case TC_MQPRIO_MODE_CHANNEL:
53 dev->netdev_ops->ndo_setup_tc(dev,
54 TC_SETUP_QDISC_MQPRIO,
61 netdev_set_num_tc(dev, 0);
65 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
69 /* Verify num_tc is not out of max range */
70 if (qopt->num_tc > TC_MAX_QUEUE)
73 /* Verify priority mapping uses valid tcs */
74 for (i = 0; i < TC_BITMASK + 1; i++) {
75 if (qopt->prio_tc_map[i] >= qopt->num_tc)
79 /* Limit qopt->hw to maximum supported offload value. Drivers have
80 * the option of overriding this later if they don't support the a
83 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
84 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
86 /* If hardware offload is requested we will leave it to the device
87 * to either populate the queue counts itself or to validate the
88 * provided queue counts. If ndo_setup_tc is not present then
89 * hardware doesn't support offload and we should return an error.
92 return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
94 for (i = 0; i < qopt->num_tc; i++) {
95 unsigned int last = qopt->offset[i] + qopt->count[i];
97 /* Verify the queue count is in tx range being equal to the
98 * real_num_tx_queues indicates the last queue is in use.
100 if (qopt->offset[i] >= dev->real_num_tx_queues ||
102 last > dev->real_num_tx_queues)
105 /* Verify that the offset and counts do not overlap */
106 for (j = i + 1; j < qopt->num_tc; j++) {
107 if (last > qopt->offset[j])
115 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
116 [TCA_MQPRIO_MODE] = { .len = sizeof(u16) },
117 [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) },
118 [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
119 [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
122 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
123 const struct nla_policy *policy, int len)
125 int nested_len = nla_len(nla) - NLA_ALIGN(len);
127 if (nested_len >= nla_attr_size(0))
128 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
129 nested_len, policy, NULL);
131 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
135 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
137 struct net_device *dev = qdisc_dev(sch);
138 struct mqprio_sched *priv = qdisc_priv(sch);
139 struct netdev_queue *dev_queue;
141 int i, err = -EOPNOTSUPP;
142 struct tc_mqprio_qopt *qopt = NULL;
143 struct nlattr *tb[TCA_MQPRIO_MAX + 1];
148 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
149 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
151 if (sch->parent != TC_H_ROOT)
154 if (!netif_is_multiqueue(dev))
157 /* make certain can allocate enough classids to handle queues */
158 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
161 if (!opt || nla_len(opt) < sizeof(*qopt))
164 qopt = nla_data(opt);
165 if (mqprio_parse_opt(dev, qopt))
168 len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
170 err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
178 if (tb[TCA_MQPRIO_MODE]) {
179 priv->flags |= TC_MQPRIO_F_MODE;
180 priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
183 if (tb[TCA_MQPRIO_SHAPER]) {
184 priv->flags |= TC_MQPRIO_F_SHAPER;
185 priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
188 if (tb[TCA_MQPRIO_MIN_RATE64]) {
189 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
192 nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
194 if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
196 if (i >= qopt->num_tc)
198 priv->min_rate[i] = *(u64 *)nla_data(attr);
201 priv->flags |= TC_MQPRIO_F_MIN_RATE;
204 if (tb[TCA_MQPRIO_MAX_RATE64]) {
205 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
208 nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
210 if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
212 if (i >= qopt->num_tc)
214 priv->max_rate[i] = *(u64 *)nla_data(attr);
217 priv->flags |= TC_MQPRIO_F_MAX_RATE;
221 /* pre-allocate qdisc, attachment can't fail */
222 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
227 for (i = 0; i < dev->num_tx_queues; i++) {
228 dev_queue = netdev_get_tx_queue(dev, i);
229 qdisc = qdisc_create_dflt(dev_queue,
230 get_default_qdisc_ops(dev, i),
231 TC_H_MAKE(TC_H_MAJ(sch->handle),
236 priv->qdiscs[i] = qdisc;
237 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
240 /* If the mqprio options indicate that hardware should own
241 * the queue mapping then run ndo_setup_tc otherwise use the
242 * supplied and verified mapping
245 struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
247 switch (priv->mode) {
248 case TC_MQPRIO_MODE_DCB:
249 if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
252 case TC_MQPRIO_MODE_CHANNEL:
253 mqprio.flags = priv->flags;
254 if (priv->flags & TC_MQPRIO_F_MODE)
255 mqprio.mode = priv->mode;
256 if (priv->flags & TC_MQPRIO_F_SHAPER)
257 mqprio.shaper = priv->shaper;
258 if (priv->flags & TC_MQPRIO_F_MIN_RATE)
259 for (i = 0; i < mqprio.qopt.num_tc; i++)
260 mqprio.min_rate[i] = priv->min_rate[i];
261 if (priv->flags & TC_MQPRIO_F_MAX_RATE)
262 for (i = 0; i < mqprio.qopt.num_tc; i++)
263 mqprio.max_rate[i] = priv->max_rate[i];
268 err = dev->netdev_ops->ndo_setup_tc(dev,
269 TC_SETUP_QDISC_MQPRIO,
274 priv->hw_offload = mqprio.qopt.hw;
276 netdev_set_num_tc(dev, qopt->num_tc);
277 for (i = 0; i < qopt->num_tc; i++)
278 netdev_set_tc_queue(dev, i,
279 qopt->count[i], qopt->offset[i]);
282 /* Always use supplied priority mappings */
283 for (i = 0; i < TC_BITMASK + 1; i++)
284 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
286 sch->flags |= TCQ_F_MQROOT;
290 static void mqprio_attach(struct Qdisc *sch)
292 struct net_device *dev = qdisc_dev(sch);
293 struct mqprio_sched *priv = qdisc_priv(sch);
294 struct Qdisc *qdisc, *old;
297 /* Attach underlying qdisc */
298 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
299 qdisc = priv->qdiscs[ntx];
300 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
303 if (ntx < dev->real_num_tx_queues)
304 qdisc_hash_add(qdisc, false);
310 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
313 struct net_device *dev = qdisc_dev(sch);
314 unsigned long ntx = cl - 1;
316 if (ntx >= dev->num_tx_queues)
318 return netdev_get_tx_queue(dev, ntx);
321 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
324 struct net_device *dev = qdisc_dev(sch);
325 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
330 if (dev->flags & IFF_UP)
333 *old = dev_graft_qdisc(dev_queue, new);
336 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
338 if (dev->flags & IFF_UP)
344 static int dump_rates(struct mqprio_sched *priv,
345 struct tc_mqprio_qopt *opt, struct sk_buff *skb)
350 if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
351 nest = nla_nest_start(skb, TCA_MQPRIO_MIN_RATE64);
353 goto nla_put_failure;
355 for (i = 0; i < opt->num_tc; i++) {
356 if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
357 sizeof(priv->min_rate[i]),
359 goto nla_put_failure;
361 nla_nest_end(skb, nest);
364 if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
365 nest = nla_nest_start(skb, TCA_MQPRIO_MAX_RATE64);
367 goto nla_put_failure;
369 for (i = 0; i < opt->num_tc; i++) {
370 if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
371 sizeof(priv->max_rate[i]),
373 goto nla_put_failure;
375 nla_nest_end(skb, nest);
380 nla_nest_cancel(skb, nest);
384 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
386 struct net_device *dev = qdisc_dev(sch);
387 struct mqprio_sched *priv = qdisc_priv(sch);
388 struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
389 struct tc_mqprio_qopt opt = { 0 };
394 memset(&sch->bstats, 0, sizeof(sch->bstats));
395 memset(&sch->qstats, 0, sizeof(sch->qstats));
397 for (i = 0; i < dev->num_tx_queues; i++) {
398 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
399 spin_lock_bh(qdisc_lock(qdisc));
400 sch->q.qlen += qdisc->q.qlen;
401 sch->bstats.bytes += qdisc->bstats.bytes;
402 sch->bstats.packets += qdisc->bstats.packets;
403 sch->qstats.backlog += qdisc->qstats.backlog;
404 sch->qstats.drops += qdisc->qstats.drops;
405 sch->qstats.requeues += qdisc->qstats.requeues;
406 sch->qstats.overlimits += qdisc->qstats.overlimits;
407 spin_unlock_bh(qdisc_lock(qdisc));
410 opt.num_tc = netdev_get_num_tc(dev);
411 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
412 opt.hw = priv->hw_offload;
414 for (i = 0; i < netdev_get_num_tc(dev); i++) {
415 opt.count[i] = dev->tc_to_txq[i].count;
416 opt.offset[i] = dev->tc_to_txq[i].offset;
419 if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt))
420 goto nla_put_failure;
422 if ((priv->flags & TC_MQPRIO_F_MODE) &&
423 nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
424 goto nla_put_failure;
426 if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
427 nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
428 goto nla_put_failure;
430 if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
431 priv->flags & TC_MQPRIO_F_MAX_RATE) &&
432 (dump_rates(priv, &opt, skb) != 0))
433 goto nla_put_failure;
435 return nla_nest_end(skb, nla);
437 nlmsg_trim(skb, nla);
441 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
443 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
448 return dev_queue->qdisc_sleeping;
451 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
453 struct net_device *dev = qdisc_dev(sch);
454 unsigned int ntx = TC_H_MIN(classid);
456 /* There are essentially two regions here that have valid classid
457 * values. The first region will have a classid value of 1 through
458 * num_tx_queues. All of these are backed by actual Qdiscs.
460 if (ntx < TC_H_MIN_PRIORITY)
461 return (ntx <= dev->num_tx_queues) ? ntx : 0;
463 /* The second region represents the hardware traffic classes. These
464 * are represented by classid values of TC_H_MIN_PRIORITY through
465 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
467 return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
470 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
471 struct sk_buff *skb, struct tcmsg *tcm)
473 if (cl < TC_H_MIN_PRIORITY) {
474 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
475 struct net_device *dev = qdisc_dev(sch);
476 int tc = netdev_txq_to_tc(dev, cl - 1);
478 tcm->tcm_parent = (tc < 0) ? 0 :
479 TC_H_MAKE(TC_H_MAJ(sch->handle),
480 TC_H_MIN(tc + TC_H_MIN_PRIORITY));
481 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
483 tcm->tcm_parent = TC_H_ROOT;
486 tcm->tcm_handle |= TC_H_MIN(cl);
490 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
495 if (cl >= TC_H_MIN_PRIORITY) {
499 struct gnet_stats_queue qstats = {0};
500 struct gnet_stats_basic_packed bstats = {0};
501 struct net_device *dev = qdisc_dev(sch);
502 struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
504 /* Drop lock here it will be reclaimed before touching
505 * statistics this is required because the d->lock we
506 * hold here is the look on dev_queue->qdisc_sleeping
507 * also acquired below.
510 spin_unlock_bh(d->lock);
512 for (i = tc.offset; i < tc.offset + tc.count; i++) {
513 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
515 qdisc = rtnl_dereference(q->qdisc);
516 spin_lock_bh(qdisc_lock(qdisc));
517 qlen += qdisc->q.qlen;
518 bstats.bytes += qdisc->bstats.bytes;
519 bstats.packets += qdisc->bstats.packets;
520 qstats.backlog += qdisc->qstats.backlog;
521 qstats.drops += qdisc->qstats.drops;
522 qstats.requeues += qdisc->qstats.requeues;
523 qstats.overlimits += qdisc->qstats.overlimits;
524 spin_unlock_bh(qdisc_lock(qdisc));
526 /* Reclaim root sleeping lock before completing stats */
528 spin_lock_bh(d->lock);
529 if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
530 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
533 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
535 sch = dev_queue->qdisc_sleeping;
536 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
537 d, NULL, &sch->bstats) < 0 ||
538 gnet_stats_copy_queue(d, NULL,
539 &sch->qstats, sch->q.qlen) < 0)
545 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
547 struct net_device *dev = qdisc_dev(sch);
553 /* Walk hierarchy with a virtual class per tc */
554 arg->count = arg->skip;
555 for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
556 if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) {
563 /* Pad the values and skip over unused traffic classes */
564 if (ntx < TC_MAX_QUEUE) {
565 arg->count = TC_MAX_QUEUE;
569 /* Reset offset, sort out remaining per-queue qdiscs */
570 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
571 if (arg->fn(sch, ntx + 1, arg) < 0) {
579 static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
582 return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
585 static const struct Qdisc_class_ops mqprio_class_ops = {
586 .graft = mqprio_graft,
590 .dump = mqprio_dump_class,
591 .dump_stats = mqprio_dump_class_stats,
592 .select_queue = mqprio_select_queue,
595 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
596 .cl_ops = &mqprio_class_ops,
598 .priv_size = sizeof(struct mqprio_sched),
600 .destroy = mqprio_destroy,
601 .attach = mqprio_attach,
603 .owner = THIS_MODULE,
606 static int __init mqprio_module_init(void)
608 return register_qdisc(&mqprio_qdisc_ops);
611 static void __exit mqprio_module_exit(void)
613 unregister_qdisc(&mqprio_qdisc_ops);
616 module_init(mqprio_module_init);
617 module_exit(mqprio_module_exit);
619 MODULE_LICENSE("GPL");