1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_api.c Packet scheduler API.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmod.h>
24 #include <linux/list.h>
25 #include <linux/hrtimer.h>
26 #include <linux/slab.h>
27 #include <linux/hashtable.h>
29 #include <net/net_namespace.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
34 #include <net/tc_wrapper.h>
36 #include <trace/events/qdisc.h>
43 This file consists of two interrelated parts:
45 1. queueing disciplines manager frontend.
46 2. traffic classes manager frontend.
48 Generally, queueing discipline ("qdisc") is a black box,
49 which is able to enqueue packets and to dequeue them (when
50 device is ready to send something) in order and at times
51 determined by algorithm hidden in it.
53 qdisc's are divided to two categories:
54 - "queues", which have no internal structure visible from outside.
55 - "schedulers", which split all the packets to "traffic classes",
56 using "packet classifiers" (look at cls_api.c)
58 In turn, classes may have child qdiscs (as rule, queues)
59 attached to them etc. etc. etc.
61 The goal of the routines in this file is to translate
62 information supplied by user in the form of handles
63 to more intelligible for kernel form, to make some sanity
64 checks and part of work, which is common to all qdiscs
65 and to provide rtnetlink notifications.
67 All real intelligent work is done inside qdisc modules.
71 Every discipline has two major routines: enqueue and dequeue.
75 dequeue usually returns a skb to send. It is allowed to return NULL,
76 but it does not mean that queue is empty, it just means that
77 discipline does not want to send anything this time.
78 Queue is really empty if q->q.qlen == 0.
79 For complicated disciplines with multiple queues q->q is not
80 real packet queue, but however q->q.qlen must be valid.
84 enqueue returns 0, if packet was enqueued successfully.
85 If packet (this one or another one) was dropped, it returns
87 NET_XMIT_DROP - this packet dropped
88 Expected action: do not backoff, but wait until queue will clear.
89 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
90 Expected action: backoff or ignore
96 like dequeue but without removing a packet from the queue
100 returns qdisc to initial state: purge all buffers, clear all
101 timers, counters (except for statistics) etc.
105 initializes newly created qdisc.
109 destroys resources allocated by init and during lifetime of qdisc.
113 changes qdisc parameters.
116 /* Protects list of registered TC modules. It is pure SMP lock. */
117 static DEFINE_RWLOCK(qdisc_mod_lock);
120 /************************************************
121 * Queueing disciplines manipulation. *
122 ************************************************/
125 /* The list of all installed queueing disciplines. */
127 static struct Qdisc_ops *qdisc_base;
129 /* Register/unregister queueing discipline */
131 int register_qdisc(struct Qdisc_ops *qops)
133 struct Qdisc_ops *q, **qp;
136 write_lock(&qdisc_mod_lock);
137 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
138 if (!strcmp(qops->id, q->id))
141 if (qops->enqueue == NULL)
142 qops->enqueue = noop_qdisc_ops.enqueue;
143 if (qops->peek == NULL) {
144 if (qops->dequeue == NULL)
145 qops->peek = noop_qdisc_ops.peek;
149 if (qops->dequeue == NULL)
150 qops->dequeue = noop_qdisc_ops.dequeue;
153 const struct Qdisc_class_ops *cops = qops->cl_ops;
155 if (!(cops->find && cops->walk && cops->leaf))
158 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
166 write_unlock(&qdisc_mod_lock);
173 EXPORT_SYMBOL(register_qdisc);
175 void unregister_qdisc(struct Qdisc_ops *qops)
177 struct Qdisc_ops *q, **qp;
180 write_lock(&qdisc_mod_lock);
181 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
189 write_unlock(&qdisc_mod_lock);
191 WARN(err, "unregister qdisc(%s) failed\n", qops->id);
193 EXPORT_SYMBOL(unregister_qdisc);
195 /* Get default qdisc if not otherwise specified */
196 void qdisc_get_default(char *name, size_t len)
198 read_lock(&qdisc_mod_lock);
199 strscpy(name, default_qdisc_ops->id, len);
200 read_unlock(&qdisc_mod_lock);
203 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
205 struct Qdisc_ops *q = NULL;
207 for (q = qdisc_base; q; q = q->next) {
208 if (!strcmp(name, q->id)) {
209 if (!try_module_get(q->owner))
218 /* Set new default qdisc to use */
219 int qdisc_set_default(const char *name)
221 const struct Qdisc_ops *ops;
223 if (!capable(CAP_NET_ADMIN))
226 write_lock(&qdisc_mod_lock);
227 ops = qdisc_lookup_default(name);
229 /* Not found, drop lock and try to load module */
230 write_unlock(&qdisc_mod_lock);
231 request_module("sch_%s", name);
232 write_lock(&qdisc_mod_lock);
234 ops = qdisc_lookup_default(name);
238 /* Set new default */
239 module_put(default_qdisc_ops->owner);
240 default_qdisc_ops = ops;
242 write_unlock(&qdisc_mod_lock);
244 return ops ? 0 : -ENOENT;
247 #ifdef CONFIG_NET_SCH_DEFAULT
248 /* Set default value from kernel config */
249 static int __init sch_default_qdisc(void)
251 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
253 late_initcall(sch_default_qdisc);
256 /* We know handle. Find qdisc among all qdisc's attached to device
257 * (root qdisc, all its children, children of children etc.)
258 * Note: caller either uses rtnl or rcu_read_lock()
261 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
265 if (!qdisc_dev(root))
266 return (root->handle == handle ? root : NULL);
268 if (!(root->flags & TCQ_F_BUILTIN) &&
269 root->handle == handle)
272 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
273 lockdep_rtnl_is_held()) {
274 if (q->handle == handle)
280 void qdisc_hash_add(struct Qdisc *q, bool invisible)
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
286 q->flags |= TCQ_F_INVISIBLE;
289 EXPORT_SYMBOL(qdisc_hash_add);
291 void qdisc_hash_del(struct Qdisc *q)
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
295 hash_del_rcu(&q->hash);
298 EXPORT_SYMBOL(qdisc_hash_del);
300 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
306 q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 dev_ingress_queue(dev)->qdisc_sleeping,
318 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
320 struct netdev_queue *nq;
325 q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
329 nq = dev_ingress_queue_rcu(dev);
331 q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
336 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
339 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
343 cl = cops->find(p, classid);
347 return cops->leaf(p, cl);
350 /* Find queueing discipline by name */
352 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
354 struct Qdisc_ops *q = NULL;
357 read_lock(&qdisc_mod_lock);
358 for (q = qdisc_base; q; q = q->next) {
359 if (nla_strcmp(kind, q->id) == 0) {
360 if (!try_module_get(q->owner))
365 read_unlock(&qdisc_mod_lock);
370 /* The linklayer setting were not transferred from iproute2, in older
371 * versions, and the rate tables lookup systems have been dropped in
372 * the kernel. To keep backward compatible with older iproute2 tc
373 * utils, we detect the linklayer setting by detecting if the rate
374 * table were modified.
376 * For linklayer ATM table entries, the rate table will be aligned to
377 * 48 bytes, thus some table entries will contain the same value. The
378 * mpu (min packet unit) is also encoded into the old rate table, thus
379 * starting from the mpu, we find low and high table entries for
380 * mapping this cell. If these entries contain the same value, when
381 * the rate tables have been modified for linklayer ATM.
383 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
384 * and then roundup to the next cell, calc the table entry one below,
387 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
389 int low = roundup(r->mpu, 48);
390 int high = roundup(low+1, 48);
391 int cell_low = low >> r->cell_log;
392 int cell_high = (high >> r->cell_log) - 1;
394 /* rtab is too inaccurate at rates > 100Mbit/s */
395 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
396 pr_debug("TC linklayer: Giving up ATM detection\n");
397 return TC_LINKLAYER_ETHERNET;
400 if ((cell_high > cell_low) && (cell_high < 256)
401 && (rtab[cell_low] == rtab[cell_high])) {
402 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
403 cell_low, cell_high, rtab[cell_high]);
404 return TC_LINKLAYER_ATM;
406 return TC_LINKLAYER_ETHERNET;
409 static struct qdisc_rate_table *qdisc_rtab_list;
411 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
413 struct netlink_ext_ack *extack)
415 struct qdisc_rate_table *rtab;
417 if (tab == NULL || r->rate == 0 ||
418 r->cell_log == 0 || r->cell_log >= 32 ||
419 nla_len(tab) != TC_RTAB_SIZE) {
420 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
424 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
425 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
426 !memcmp(&rtab->data, nla_data(tab), 1024)) {
432 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
436 memcpy(rtab->data, nla_data(tab), 1024);
437 if (r->linklayer == TC_LINKLAYER_UNAWARE)
438 r->linklayer = __detect_linklayer(r, rtab->data);
439 rtab->next = qdisc_rtab_list;
440 qdisc_rtab_list = rtab;
442 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
446 EXPORT_SYMBOL(qdisc_get_rtab);
448 void qdisc_put_rtab(struct qdisc_rate_table *tab)
450 struct qdisc_rate_table *rtab, **rtabp;
452 if (!tab || --tab->refcnt)
455 for (rtabp = &qdisc_rtab_list;
456 (rtab = *rtabp) != NULL;
457 rtabp = &rtab->next) {
465 EXPORT_SYMBOL(qdisc_put_rtab);
467 static LIST_HEAD(qdisc_stab_list);
469 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
470 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
471 [TCA_STAB_DATA] = { .type = NLA_BINARY },
474 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
475 struct netlink_ext_ack *extack)
477 struct nlattr *tb[TCA_STAB_MAX + 1];
478 struct qdisc_size_table *stab;
479 struct tc_sizespec *s;
480 unsigned int tsize = 0;
484 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
488 if (!tb[TCA_STAB_BASE]) {
489 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
490 return ERR_PTR(-EINVAL);
493 s = nla_data(tb[TCA_STAB_BASE]);
496 if (!tb[TCA_STAB_DATA]) {
497 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
498 return ERR_PTR(-EINVAL);
500 tab = nla_data(tb[TCA_STAB_DATA]);
501 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
504 if (tsize != s->tsize || (!tab && tsize > 0)) {
505 NL_SET_ERR_MSG(extack, "Invalid size of size table");
506 return ERR_PTR(-EINVAL);
509 list_for_each_entry(stab, &qdisc_stab_list, list) {
510 if (memcmp(&stab->szopts, s, sizeof(*s)))
513 memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
519 if (s->size_log > STAB_SIZE_LOG_MAX ||
520 s->cell_log > STAB_SIZE_LOG_MAX) {
521 NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
522 return ERR_PTR(-EINVAL);
525 stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
527 return ERR_PTR(-ENOMEM);
532 memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
534 list_add_tail(&stab->list, &qdisc_stab_list);
539 void qdisc_put_stab(struct qdisc_size_table *tab)
544 if (--tab->refcnt == 0) {
545 list_del(&tab->list);
549 EXPORT_SYMBOL(qdisc_put_stab);
551 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
555 nest = nla_nest_start_noflag(skb, TCA_STAB);
557 goto nla_put_failure;
558 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
559 goto nla_put_failure;
560 nla_nest_end(skb, nest);
568 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
569 const struct qdisc_size_table *stab)
573 pkt_len = skb->len + stab->szopts.overhead;
574 if (unlikely(!stab->szopts.tsize))
577 slot = pkt_len + stab->szopts.cell_align;
578 if (unlikely(slot < 0))
581 slot >>= stab->szopts.cell_log;
582 if (likely(slot < stab->szopts.tsize))
583 pkt_len = stab->data[slot];
585 pkt_len = stab->data[stab->szopts.tsize - 1] *
586 (slot / stab->szopts.tsize) +
587 stab->data[slot % stab->szopts.tsize];
589 pkt_len <<= stab->szopts.size_log;
591 if (unlikely(pkt_len < 1))
593 qdisc_skb_cb(skb)->pkt_len = pkt_len;
595 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
597 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
599 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
600 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
601 txt, qdisc->ops->id, qdisc->handle >> 16);
602 qdisc->flags |= TCQ_F_WARN_NONWC;
605 EXPORT_SYMBOL(qdisc_warn_nonwc);
607 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
609 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
613 __netif_schedule(qdisc_root(wd->qdisc));
616 return HRTIMER_NORESTART;
619 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
622 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
623 wd->timer.function = qdisc_watchdog;
626 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
628 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
630 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
632 EXPORT_SYMBOL(qdisc_watchdog_init);
634 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
637 if (test_bit(__QDISC_STATE_DEACTIVATED,
638 &qdisc_root_sleeping(wd->qdisc)->state))
641 if (hrtimer_is_queued(&wd->timer)) {
642 /* If timer is already set in [expires, expires + delta_ns],
643 * do not reprogram it.
645 if (wd->last_expires - expires <= delta_ns)
649 wd->last_expires = expires;
650 hrtimer_start_range_ns(&wd->timer,
651 ns_to_ktime(expires),
653 HRTIMER_MODE_ABS_PINNED);
655 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
657 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
659 hrtimer_cancel(&wd->timer);
661 EXPORT_SYMBOL(qdisc_watchdog_cancel);
663 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
665 struct hlist_head *h;
668 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
671 for (i = 0; i < n; i++)
672 INIT_HLIST_HEAD(&h[i]);
677 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
679 struct Qdisc_class_common *cl;
680 struct hlist_node *next;
681 struct hlist_head *nhash, *ohash;
682 unsigned int nsize, nmask, osize;
685 /* Rehash when load factor exceeds 0.75 */
686 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
688 nsize = clhash->hashsize * 2;
690 nhash = qdisc_class_hash_alloc(nsize);
694 ohash = clhash->hash;
695 osize = clhash->hashsize;
698 for (i = 0; i < osize; i++) {
699 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
700 h = qdisc_class_hash(cl->classid, nmask);
701 hlist_add_head(&cl->hnode, &nhash[h]);
704 clhash->hash = nhash;
705 clhash->hashsize = nsize;
706 clhash->hashmask = nmask;
707 sch_tree_unlock(sch);
711 EXPORT_SYMBOL(qdisc_class_hash_grow);
713 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
715 unsigned int size = 4;
717 clhash->hash = qdisc_class_hash_alloc(size);
720 clhash->hashsize = size;
721 clhash->hashmask = size - 1;
722 clhash->hashelems = 0;
725 EXPORT_SYMBOL(qdisc_class_hash_init);
727 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
729 kvfree(clhash->hash);
731 EXPORT_SYMBOL(qdisc_class_hash_destroy);
733 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
734 struct Qdisc_class_common *cl)
738 INIT_HLIST_NODE(&cl->hnode);
739 h = qdisc_class_hash(cl->classid, clhash->hashmask);
740 hlist_add_head(&cl->hnode, &clhash->hash[h]);
743 EXPORT_SYMBOL(qdisc_class_hash_insert);
745 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
746 struct Qdisc_class_common *cl)
748 hlist_del(&cl->hnode);
751 EXPORT_SYMBOL(qdisc_class_hash_remove);
753 /* Allocate an unique handle from space managed by kernel
754 * Possible range is [8000-FFFF]:0000 (0x8000 values)
756 static u32 qdisc_alloc_handle(struct net_device *dev)
759 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
762 autohandle += TC_H_MAKE(0x10000U, 0);
763 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
764 autohandle = TC_H_MAKE(0x80000000U, 0);
765 if (!qdisc_lookup(dev, autohandle))
773 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
775 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
776 const struct Qdisc_class_ops *cops;
782 if (n == 0 && len == 0)
784 drops = max_t(int, n, 0);
786 while ((parentid = sch->parent)) {
787 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
790 if (sch->flags & TCQ_F_NOPARENT)
792 /* Notify parent qdisc only if child qdisc becomes empty.
794 * If child was empty even before update then backlog
795 * counter is screwed and we skip notification because
796 * parent class is already passive.
798 * If the original child was offloaded then it is allowed
799 * to be seem as empty, so the parent is notified anyway.
801 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
802 !qdisc_is_offloaded);
803 /* TODO: perform the search on a per txq basis */
804 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
806 WARN_ON_ONCE(parentid != TC_H_ROOT);
809 cops = sch->ops->cl_ops;
810 if (notify && cops->qlen_notify) {
811 cl = cops->find(sch, parentid);
812 cops->qlen_notify(sch, cl);
815 sch->qstats.backlog -= len;
816 __qdisc_qstats_drop(sch, drops);
820 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
822 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
825 struct net_device *dev = qdisc_dev(sch);
828 sch->flags &= ~TCQ_F_OFFLOADED;
829 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
832 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
833 if (err == -EOPNOTSUPP)
837 sch->flags |= TCQ_F_OFFLOADED;
841 EXPORT_SYMBOL(qdisc_offload_dump_helper);
843 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
844 struct Qdisc *new, struct Qdisc *old,
845 enum tc_setup_type type, void *type_data,
846 struct netlink_ext_ack *extack)
848 bool any_qdisc_is_offloaded;
851 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
854 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
856 /* Don't report error if the graft is part of destroy operation. */
857 if (!err || !new || new == &noop_qdisc)
860 /* Don't report error if the parent, the old child and the new
861 * one are not offloaded.
863 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
864 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
865 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
867 if (any_qdisc_is_offloaded)
868 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
870 EXPORT_SYMBOL(qdisc_offload_graft_helper);
872 void qdisc_offload_query_caps(struct net_device *dev,
873 enum tc_setup_type type,
874 void *caps, size_t caps_len)
876 const struct net_device_ops *ops = dev->netdev_ops;
877 struct tc_query_caps_base base = {
882 memset(caps, 0, caps_len);
884 if (ops->ndo_setup_tc)
885 ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
887 EXPORT_SYMBOL(qdisc_offload_query_caps);
889 static void qdisc_offload_graft_root(struct net_device *dev,
890 struct Qdisc *new, struct Qdisc *old,
891 struct netlink_ext_ack *extack)
893 struct tc_root_qopt_offload graft_offload = {
894 .command = TC_ROOT_GRAFT,
895 .handle = new ? new->handle : 0,
896 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
897 (old && old->flags & TCQ_F_INGRESS),
900 qdisc_offload_graft_helper(dev, NULL, new, old,
901 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
904 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
905 u32 portid, u32 seq, u16 flags, int event,
906 struct netlink_ext_ack *extack)
908 struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
909 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
911 struct nlmsghdr *nlh;
912 unsigned char *b = skb_tail_pointer(skb);
914 struct qdisc_size_table *stab;
919 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
922 tcm = nlmsg_data(nlh);
923 tcm->tcm_family = AF_UNSPEC;
926 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
927 tcm->tcm_parent = clid;
928 tcm->tcm_handle = q->handle;
929 tcm->tcm_info = refcount_read(&q->refcnt);
930 if (nla_put_string(skb, TCA_KIND, q->ops->id))
931 goto nla_put_failure;
932 if (q->ops->ingress_block_get) {
933 block_index = q->ops->ingress_block_get(q);
935 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
936 goto nla_put_failure;
938 if (q->ops->egress_block_get) {
939 block_index = q->ops->egress_block_get(q);
941 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
942 goto nla_put_failure;
944 if (q->ops->dump && q->ops->dump(q, skb) < 0)
945 goto nla_put_failure;
946 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
947 goto nla_put_failure;
948 qlen = qdisc_qlen_sum(q);
950 stab = rtnl_dereference(q->stab);
951 if (stab && qdisc_dump_stab(skb, stab) < 0)
952 goto nla_put_failure;
954 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
955 NULL, &d, TCA_PAD) < 0)
956 goto nla_put_failure;
958 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
959 goto nla_put_failure;
961 if (qdisc_is_percpu_stats(q)) {
962 cpu_bstats = q->cpu_bstats;
963 cpu_qstats = q->cpu_qstats;
966 if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
967 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
968 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
969 goto nla_put_failure;
971 if (gnet_stats_finish_copy(&d) < 0)
972 goto nla_put_failure;
974 if (extack && extack->_msg &&
975 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
978 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
988 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
990 if (q->flags & TCQ_F_BUILTIN)
992 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
998 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
999 struct nlmsghdr *n, u32 clid,
1000 struct Qdisc *old, struct Qdisc *new,
1001 struct netlink_ext_ack *extack)
1003 struct sk_buff *skb;
1004 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1006 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1010 if (old && !tc_qdisc_dump_ignore(old, false)) {
1011 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1012 0, RTM_DELQDISC, extack) < 0)
1015 if (new && !tc_qdisc_dump_ignore(new, false)) {
1016 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1017 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
1022 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1023 n->nlmsg_flags & NLM_F_ECHO);
1030 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1031 struct nlmsghdr *n, u32 clid,
1032 struct Qdisc *old, struct Qdisc *new,
1033 struct netlink_ext_ack *extack)
1036 qdisc_notify(net, skb, n, clid, old, new, extack);
1042 static void qdisc_clear_nolock(struct Qdisc *sch)
1044 sch->flags &= ~TCQ_F_NOLOCK;
1045 if (!(sch->flags & TCQ_F_CPUSTATS))
1048 free_percpu(sch->cpu_bstats);
1049 free_percpu(sch->cpu_qstats);
1050 sch->cpu_bstats = NULL;
1051 sch->cpu_qstats = NULL;
1052 sch->flags &= ~TCQ_F_CPUSTATS;
1055 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1058 * When appropriate send a netlink notification using 'skb'
1061 * On success, destroy old qdisc.
1064 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1065 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1066 struct Qdisc *new, struct Qdisc *old,
1067 struct netlink_ext_ack *extack)
1069 struct Qdisc *q = old;
1070 struct net *net = dev_net(dev);
1072 if (parent == NULL) {
1073 unsigned int i, num_q, ingress;
1076 num_q = dev->num_tx_queues;
1077 if ((q && q->flags & TCQ_F_INGRESS) ||
1078 (new && new->flags & TCQ_F_INGRESS)) {
1081 if (!dev_ingress_queue(dev)) {
1082 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1087 if (dev->flags & IFF_UP)
1088 dev_deactivate(dev);
1090 qdisc_offload_graft_root(dev, new, old, extack);
1092 if (new && new->ops->attach && !ingress)
1095 for (i = 0; i < num_q; i++) {
1096 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1099 dev_queue = netdev_get_tx_queue(dev, i);
1101 old = dev_graft_qdisc(dev_queue, new);
1103 qdisc_refcount_inc(new);
1111 old = rtnl_dereference(dev->qdisc);
1112 if (new && !new->ops->attach)
1113 qdisc_refcount_inc(new);
1114 rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1116 notify_and_destroy(net, skb, n, classid, old, new, extack);
1118 if (new && new->ops->attach)
1119 new->ops->attach(new);
1121 notify_and_destroy(net, skb, n, classid, old, new, extack);
1124 if (dev->flags & IFF_UP)
1127 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1131 /* Only support running class lockless if parent is lockless */
1132 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1133 qdisc_clear_nolock(new);
1135 if (!cops || !cops->graft)
1138 cl = cops->find(parent, classid);
1140 NL_SET_ERR_MSG(extack, "Specified class not found");
1144 if (new && new->ops == &noqueue_qdisc_ops) {
1145 NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
1149 err = cops->graft(parent, cl, new, &old, extack);
1152 notify_and_destroy(net, skb, n, classid, old, new, extack);
1157 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1158 struct netlink_ext_ack *extack)
1162 if (tca[TCA_INGRESS_BLOCK]) {
1163 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1166 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1169 if (!sch->ops->ingress_block_set) {
1170 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1173 sch->ops->ingress_block_set(sch, block_index);
1175 if (tca[TCA_EGRESS_BLOCK]) {
1176 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1179 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1182 if (!sch->ops->egress_block_set) {
1183 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1186 sch->ops->egress_block_set(sch, block_index);
1192 Allocate and initialize new qdisc.
1194 Parameters are passed via opt.
1197 static struct Qdisc *qdisc_create(struct net_device *dev,
1198 struct netdev_queue *dev_queue,
1199 u32 parent, u32 handle,
1200 struct nlattr **tca, int *errp,
1201 struct netlink_ext_ack *extack)
1204 struct nlattr *kind = tca[TCA_KIND];
1206 struct Qdisc_ops *ops;
1207 struct qdisc_size_table *stab;
1209 ops = qdisc_lookup_ops(kind);
1210 #ifdef CONFIG_MODULES
1211 if (ops == NULL && kind != NULL) {
1212 char name[IFNAMSIZ];
1213 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1214 /* We dropped the RTNL semaphore in order to
1215 * perform the module load. So, even if we
1216 * succeeded in loading the module we have to
1217 * tell the caller to replay the request. We
1218 * indicate this using -EAGAIN.
1219 * We replay the request because the device may
1220 * go away in the mean time.
1223 request_module("sch_%s", name);
1225 ops = qdisc_lookup_ops(kind);
1227 /* We will try again qdisc_lookup_ops,
1228 * so don't keep a reference.
1230 module_put(ops->owner);
1240 NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1244 sch = qdisc_alloc(dev_queue, ops, extack);
1250 sch->parent = parent;
1252 if (handle == TC_H_INGRESS) {
1253 sch->flags |= TCQ_F_INGRESS;
1254 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1257 handle = qdisc_alloc_handle(dev);
1259 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1264 if (!netif_is_multiqueue(dev))
1265 sch->flags |= TCQ_F_ONETXQUEUE;
1268 sch->handle = handle;
1270 /* This exist to keep backward compatible with a userspace
1271 * loophole, what allowed userspace to get IFF_NO_QUEUE
1272 * facility on older kernels by setting tx_queue_len=0 (prior
1273 * to qdisc init), and then forgot to reinit tx_queue_len
1274 * before again attaching a qdisc.
1276 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1277 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1278 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1281 err = qdisc_block_indexes_set(sch, tca, extack);
1286 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1291 if (tca[TCA_STAB]) {
1292 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1294 err = PTR_ERR(stab);
1297 rcu_assign_pointer(sch->stab, stab);
1299 if (tca[TCA_RATE]) {
1301 if (sch->flags & TCQ_F_MQROOT) {
1302 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1306 err = gen_new_estimator(&sch->bstats,
1313 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1318 qdisc_hash_add(sch, false);
1319 trace_qdisc_create(ops, dev, parent);
1324 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1328 netdev_put(dev, &sch->dev_tracker);
1331 module_put(ops->owner);
1338 * Any broken qdiscs that would require a ops->reset() here?
1339 * The qdisc was never in action so it shouldn't be necessary.
1341 qdisc_put_stab(rtnl_dereference(sch->stab));
1347 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1348 struct netlink_ext_ack *extack)
1350 struct qdisc_size_table *ostab, *stab = NULL;
1353 if (tca[TCA_OPTIONS]) {
1354 if (!sch->ops->change) {
1355 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1358 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1359 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1362 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1367 if (tca[TCA_STAB]) {
1368 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1370 return PTR_ERR(stab);
1373 ostab = rtnl_dereference(sch->stab);
1374 rcu_assign_pointer(sch->stab, stab);
1375 qdisc_put_stab(ostab);
1377 if (tca[TCA_RATE]) {
1378 /* NB: ignores errors from replace_estimator
1379 because change can't be undone. */
1380 if (sch->flags & TCQ_F_MQROOT)
1382 gen_replace_estimator(&sch->bstats,
1393 struct check_loop_arg {
1394 struct qdisc_walker w;
1399 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1400 struct qdisc_walker *w);
1402 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1404 struct check_loop_arg arg;
1406 if (q->ops->cl_ops == NULL)
1409 arg.w.stop = arg.w.skip = arg.w.count = 0;
1410 arg.w.fn = check_loop_fn;
1413 q->ops->cl_ops->walk(q, &arg.w);
1414 return arg.w.stop ? -ELOOP : 0;
1418 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1421 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1422 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1424 leaf = cops->leaf(q, cl);
1426 if (leaf == arg->p || arg->depth > 7)
1428 return check_loop(leaf, arg->p, arg->depth + 1);
1433 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1434 [TCA_KIND] = { .type = NLA_STRING },
1435 [TCA_RATE] = { .type = NLA_BINARY,
1436 .len = sizeof(struct tc_estimator) },
1437 [TCA_STAB] = { .type = NLA_NESTED },
1438 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1439 [TCA_CHAIN] = { .type = NLA_U32 },
1440 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1441 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1448 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1449 struct netlink_ext_ack *extack)
1451 struct net *net = sock_net(skb->sk);
1452 struct tcmsg *tcm = nlmsg_data(n);
1453 struct nlattr *tca[TCA_MAX + 1];
1454 struct net_device *dev;
1456 struct Qdisc *q = NULL;
1457 struct Qdisc *p = NULL;
1460 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1461 rtm_tca_policy, extack);
1465 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1469 clid = tcm->tcm_parent;
1471 if (clid != TC_H_ROOT) {
1472 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1473 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1475 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1478 q = qdisc_leaf(p, clid);
1479 } else if (dev_ingress_queue(dev)) {
1480 q = dev_ingress_queue(dev)->qdisc_sleeping;
1483 q = rtnl_dereference(dev->qdisc);
1486 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1490 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1491 NL_SET_ERR_MSG(extack, "Invalid handle");
1495 q = qdisc_lookup(dev, tcm->tcm_handle);
1497 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1502 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1503 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1507 if (n->nlmsg_type == RTM_DELQDISC) {
1509 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1512 if (q->handle == 0) {
1513 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1516 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1520 qdisc_notify(net, skb, n, clid, NULL, q, NULL);
1526 * Create/change qdisc.
1529 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1530 struct netlink_ext_ack *extack)
1532 struct net *net = sock_net(skb->sk);
1534 struct nlattr *tca[TCA_MAX + 1];
1535 struct net_device *dev;
1537 struct Qdisc *q, *p;
1541 /* Reinit, just in case something touches this. */
1542 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1543 rtm_tca_policy, extack);
1547 tcm = nlmsg_data(n);
1548 clid = tcm->tcm_parent;
1551 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1557 if (clid != TC_H_ROOT) {
1558 if (clid != TC_H_INGRESS) {
1559 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1561 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1564 q = qdisc_leaf(p, clid);
1565 } else if (dev_ingress_queue_create(dev)) {
1566 q = dev_ingress_queue(dev)->qdisc_sleeping;
1569 q = rtnl_dereference(dev->qdisc);
1572 /* It may be default qdisc, ignore it */
1573 if (q && q->handle == 0)
1576 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1577 if (tcm->tcm_handle) {
1578 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1579 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1582 if (TC_H_MIN(tcm->tcm_handle)) {
1583 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1586 q = qdisc_lookup(dev, tcm->tcm_handle);
1588 goto create_n_graft;
1589 if (n->nlmsg_flags & NLM_F_EXCL) {
1590 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1593 if (tca[TCA_KIND] &&
1594 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1595 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1599 (p && check_loop(q, p, 0))) {
1600 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1603 qdisc_refcount_inc(q);
1607 goto create_n_graft;
1609 /* This magic test requires explanation.
1611 * We know, that some child q is already
1612 * attached to this parent and have choice:
1613 * either to change it or to create/graft new one.
1615 * 1. We are allowed to create/graft only
1616 * if CREATE and REPLACE flags are set.
1618 * 2. If EXCL is set, requestor wanted to say,
1619 * that qdisc tcm_handle is not expected
1620 * to exist, so that we choose create/graft too.
1622 * 3. The last case is when no flags are set.
1623 * Alas, it is sort of hole in API, we
1624 * cannot decide what to do unambiguously.
1625 * For now we select create/graft, if
1626 * user gave KIND, which does not match existing.
1628 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1629 (n->nlmsg_flags & NLM_F_REPLACE) &&
1630 ((n->nlmsg_flags & NLM_F_EXCL) ||
1632 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1633 goto create_n_graft;
1637 if (!tcm->tcm_handle) {
1638 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1641 q = qdisc_lookup(dev, tcm->tcm_handle);
1644 /* Change qdisc parameters */
1646 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1649 if (n->nlmsg_flags & NLM_F_EXCL) {
1650 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1653 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1654 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1657 err = qdisc_change(q, tca, extack);
1659 qdisc_notify(net, skb, n, clid, NULL, q, extack);
1663 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1664 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1667 if (clid == TC_H_INGRESS) {
1668 if (dev_ingress_queue(dev)) {
1669 q = qdisc_create(dev, dev_ingress_queue(dev),
1670 tcm->tcm_parent, tcm->tcm_parent,
1673 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1677 struct netdev_queue *dev_queue;
1679 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1680 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1682 dev_queue = p->dev_queue;
1684 dev_queue = netdev_get_tx_queue(dev, 0);
1686 q = qdisc_create(dev, dev_queue,
1687 tcm->tcm_parent, tcm->tcm_handle,
1697 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1707 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1708 struct netlink_callback *cb,
1709 int *q_idx_p, int s_q_idx, bool recur,
1710 bool dump_invisible)
1712 int ret = 0, q_idx = *q_idx_p;
1720 if (q_idx < s_q_idx) {
1723 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1724 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1725 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1726 RTM_NEWQDISC, NULL) <= 0)
1731 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1732 * itself has already been dumped.
1734 * If we've already dumped the top-level (ingress) qdisc above and the global
1735 * qdisc hashtable, we don't want to hit it again
1737 if (!qdisc_dev(root) || !recur)
1740 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1741 if (q_idx < s_q_idx) {
1745 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1746 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1747 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1748 RTM_NEWQDISC, NULL) <= 0)
1761 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1763 struct net *net = sock_net(skb->sk);
1766 struct net_device *dev;
1767 const struct nlmsghdr *nlh = cb->nlh;
1768 struct nlattr *tca[TCA_MAX + 1];
1771 s_idx = cb->args[0];
1772 s_q_idx = q_idx = cb->args[1];
1777 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1778 rtm_tca_policy, cb->extack);
1782 for_each_netdev(net, dev) {
1783 struct netdev_queue *dev_queue;
1791 if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1792 skb, cb, &q_idx, s_q_idx,
1793 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1796 dev_queue = dev_ingress_queue(dev);
1798 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1799 &q_idx, s_q_idx, false,
1800 tca[TCA_DUMP_INVISIBLE]) < 0)
1809 cb->args[1] = q_idx;
1816 /************************************************
1817 * Traffic classes manipulation. *
1818 ************************************************/
1820 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1821 unsigned long cl, u32 portid, u32 seq, u16 flags,
1822 int event, struct netlink_ext_ack *extack)
1825 struct nlmsghdr *nlh;
1826 unsigned char *b = skb_tail_pointer(skb);
1828 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1831 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1833 goto out_nlmsg_trim;
1834 tcm = nlmsg_data(nlh);
1835 tcm->tcm_family = AF_UNSPEC;
1838 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1839 tcm->tcm_parent = q->handle;
1840 tcm->tcm_handle = q->handle;
1842 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1843 goto nla_put_failure;
1844 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1845 goto nla_put_failure;
1847 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1848 NULL, &d, TCA_PAD) < 0)
1849 goto nla_put_failure;
1851 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1852 goto nla_put_failure;
1854 if (gnet_stats_finish_copy(&d) < 0)
1855 goto nla_put_failure;
1857 if (extack && extack->_msg &&
1858 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
1859 goto out_nlmsg_trim;
1861 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1871 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1872 struct nlmsghdr *n, struct Qdisc *q,
1873 unsigned long cl, int event, struct netlink_ext_ack *extack)
1875 struct sk_buff *skb;
1876 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1878 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1882 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
1887 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1888 n->nlmsg_flags & NLM_F_ECHO);
1891 static int tclass_del_notify(struct net *net,
1892 const struct Qdisc_class_ops *cops,
1893 struct sk_buff *oskb, struct nlmsghdr *n,
1894 struct Qdisc *q, unsigned long cl,
1895 struct netlink_ext_ack *extack)
1897 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1898 struct sk_buff *skb;
1904 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1908 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1909 RTM_DELTCLASS, extack) < 0) {
1914 err = cops->delete(q, cl, extack);
1920 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1921 n->nlmsg_flags & NLM_F_ECHO);
1925 #ifdef CONFIG_NET_CLS
1927 struct tcf_bind_args {
1928 struct tcf_walker w;
1934 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1936 struct tcf_bind_args *a = (void *)arg;
1938 if (n && tp->ops->bind_class) {
1939 struct Qdisc *q = tcf_block_q(tp->chain->block);
1942 tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
1948 struct tc_bind_class_args {
1949 struct qdisc_walker w;
1950 unsigned long new_cl;
1955 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
1956 struct qdisc_walker *w)
1958 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
1959 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1960 struct tcf_block *block;
1961 struct tcf_chain *chain;
1963 block = cops->tcf_block(q, cl, NULL);
1966 for (chain = tcf_get_next_chain(block, NULL);
1968 chain = tcf_get_next_chain(block, chain)) {
1969 struct tcf_proto *tp;
1971 for (tp = tcf_get_next_proto(chain, NULL);
1972 tp; tp = tcf_get_next_proto(chain, tp)) {
1973 struct tcf_bind_args arg = {};
1975 arg.w.fn = tcf_node_bind;
1976 arg.classid = a->clid;
1979 tp->ops->walk(tp, &arg.w, true);
1986 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1987 unsigned long new_cl)
1989 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1990 struct tc_bind_class_args args = {};
1992 if (!cops->tcf_block)
1994 args.portid = portid;
1996 args.new_cl = new_cl;
1997 args.w.fn = tc_bind_class_walker;
1998 q->ops->cl_ops->walk(q, &args.w);
2003 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2004 unsigned long new_cl)
2010 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
2011 struct netlink_ext_ack *extack)
2013 struct net *net = sock_net(skb->sk);
2014 struct tcmsg *tcm = nlmsg_data(n);
2015 struct nlattr *tca[TCA_MAX + 1];
2016 struct net_device *dev;
2017 struct Qdisc *q = NULL;
2018 const struct Qdisc_class_ops *cops;
2019 unsigned long cl = 0;
2020 unsigned long new_cl;
2026 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2027 rtm_tca_policy, extack);
2031 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2036 parent == TC_H_UNSPEC - unspecified parent.
2037 parent == TC_H_ROOT - class is root, which has no parent.
2038 parent == X:0 - parent is root class.
2039 parent == X:Y - parent is a node in hierarchy.
2040 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
2042 handle == 0:0 - generate handle from kernel pool.
2043 handle == 0:Y - class is X:Y, where X:0 is qdisc.
2044 handle == X:Y - clear.
2045 handle == X:0 - root class.
2048 /* Step 1. Determine qdisc handle X:0 */
2050 portid = tcm->tcm_parent;
2051 clid = tcm->tcm_handle;
2052 qid = TC_H_MAJ(clid);
2054 if (portid != TC_H_ROOT) {
2055 u32 qid1 = TC_H_MAJ(portid);
2058 /* If both majors are known, they must be identical. */
2063 } else if (qid == 0)
2064 qid = rtnl_dereference(dev->qdisc)->handle;
2066 /* Now qid is genuine qdisc handle consistent
2067 * both with parent and child.
2069 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2072 portid = TC_H_MAKE(qid, portid);
2075 qid = rtnl_dereference(dev->qdisc)->handle;
2078 /* OK. Locate qdisc */
2079 q = qdisc_lookup(dev, qid);
2083 /* An check that it supports classes */
2084 cops = q->ops->cl_ops;
2088 /* Now try to get class */
2090 if (portid == TC_H_ROOT)
2093 clid = TC_H_MAKE(qid, clid);
2096 cl = cops->find(q, clid);
2100 if (n->nlmsg_type != RTM_NEWTCLASS ||
2101 !(n->nlmsg_flags & NLM_F_CREATE))
2104 switch (n->nlmsg_type) {
2107 if (n->nlmsg_flags & NLM_F_EXCL)
2111 err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2112 /* Unbind the class with flilters with 0 */
2113 tc_bind_tclass(q, portid, clid, 0);
2116 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS, extack);
2124 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2125 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2132 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2134 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
2135 /* We just create a new class, need to do reverse binding. */
2137 tc_bind_tclass(q, portid, clid, new_cl);
2143 struct qdisc_dump_args {
2144 struct qdisc_walker w;
2145 struct sk_buff *skb;
2146 struct netlink_callback *cb;
2149 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2150 struct qdisc_walker *arg)
2152 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2154 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2155 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2156 RTM_NEWTCLASS, NULL);
2159 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2160 struct tcmsg *tcm, struct netlink_callback *cb,
2163 struct qdisc_dump_args arg;
2165 if (tc_qdisc_dump_ignore(q, false) ||
2166 *t_p < s_t || !q->ops->cl_ops ||
2168 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2173 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2174 arg.w.fn = qdisc_class_dump;
2178 arg.w.skip = cb->args[1];
2180 q->ops->cl_ops->walk(q, &arg.w);
2181 cb->args[1] = arg.w.count;
2188 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2189 struct tcmsg *tcm, struct netlink_callback *cb,
2190 int *t_p, int s_t, bool recur)
2198 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2201 if (!qdisc_dev(root) || !recur)
2204 if (tcm->tcm_parent) {
2205 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2206 if (q && q != root &&
2207 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2211 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2212 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2219 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2221 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2222 struct net *net = sock_net(skb->sk);
2223 struct netdev_queue *dev_queue;
2224 struct net_device *dev;
2227 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2229 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2236 if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2237 skb, tcm, cb, &t, s_t, true) < 0)
2240 dev_queue = dev_ingress_queue(dev);
2242 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2243 &t, s_t, false) < 0)
2253 #ifdef CONFIG_PROC_FS
2254 static int psched_show(struct seq_file *seq, void *v)
2256 seq_printf(seq, "%08x %08x %08x %08x\n",
2257 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2259 (u32)NSEC_PER_SEC / hrtimer_resolution);
2264 static int __net_init psched_net_init(struct net *net)
2266 struct proc_dir_entry *e;
2268 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2275 static void __net_exit psched_net_exit(struct net *net)
2277 remove_proc_entry("psched", net->proc_net);
2280 static int __net_init psched_net_init(struct net *net)
2285 static void __net_exit psched_net_exit(struct net *net)
2290 static struct pernet_operations psched_net_ops = {
2291 .init = psched_net_init,
2292 .exit = psched_net_exit,
2295 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
2297 static int __init pktsched_init(void)
2301 err = register_pernet_subsys(&psched_net_ops);
2303 pr_err("pktsched_init: "
2304 "cannot initialize per netns operations\n");
2308 register_qdisc(&pfifo_fast_ops);
2309 register_qdisc(&pfifo_qdisc_ops);
2310 register_qdisc(&bfifo_qdisc_ops);
2311 register_qdisc(&pfifo_head_drop_qdisc_ops);
2312 register_qdisc(&mq_qdisc_ops);
2313 register_qdisc(&noqueue_qdisc_ops);
2315 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2316 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2317 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2319 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2320 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2321 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2329 subsys_initcall(pktsched_init);