2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 #include <linux/hashtable.h>
34 #include <net/net_namespace.h>
36 #include <net/netlink.h>
37 #include <net/pkt_sched.h>
38 #include <net/pkt_cls.h>
45 This file consists of two interrelated parts:
47 1. queueing disciplines manager frontend.
48 2. traffic classes manager frontend.
50 Generally, queueing discipline ("qdisc") is a black box,
51 which is able to enqueue packets and to dequeue them (when
52 device is ready to send something) in order and at times
53 determined by algorithm hidden in it.
55 qdisc's are divided to two categories:
56 - "queues", which have no internal structure visible from outside.
57 - "schedulers", which split all the packets to "traffic classes",
58 using "packet classifiers" (look at cls_api.c)
60 In turn, classes may have child qdiscs (as rule, queues)
61 attached to them etc. etc. etc.
63 The goal of the routines in this file is to translate
64 information supplied by user in the form of handles
65 to more intelligible for kernel form, to make some sanity
66 checks and part of work, which is common to all qdiscs
67 and to provide rtnetlink notifications.
69 All real intelligent work is done inside qdisc modules.
73 Every discipline has two major routines: enqueue and dequeue.
77 dequeue usually returns a skb to send. It is allowed to return NULL,
78 but it does not mean that queue is empty, it just means that
79 discipline does not want to send anything this time.
80 Queue is really empty if q->q.qlen == 0.
81 For complicated disciplines with multiple queues q->q is not
82 real packet queue, but however q->q.qlen must be valid.
86 enqueue returns 0, if packet was enqueued successfully.
87 If packet (this one or another one) was dropped, it returns
89 NET_XMIT_DROP - this packet dropped
90 Expected action: do not backoff, but wait until queue will clear.
91 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
92 Expected action: backoff or ignore
98 like dequeue but without removing a packet from the queue
102 returns qdisc to initial state: purge all buffers, clear all
103 timers, counters (except for statistics) etc.
107 initializes newly created qdisc.
111 destroys resources allocated by init and during lifetime of qdisc.
115 changes qdisc parameters.
118 /* Protects list of registered TC modules. It is pure SMP lock. */
119 static DEFINE_RWLOCK(qdisc_mod_lock);
122 /************************************************
123 * Queueing disciplines manipulation. *
124 ************************************************/
127 /* The list of all installed queueing disciplines. */
129 static struct Qdisc_ops *qdisc_base;
131 /* Register/unregister queueing discipline */
133 int register_qdisc(struct Qdisc_ops *qops)
135 struct Qdisc_ops *q, **qp;
138 write_lock(&qdisc_mod_lock);
139 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
140 if (!strcmp(qops->id, q->id))
143 if (qops->enqueue == NULL)
144 qops->enqueue = noop_qdisc_ops.enqueue;
145 if (qops->peek == NULL) {
146 if (qops->dequeue == NULL)
147 qops->peek = noop_qdisc_ops.peek;
151 if (qops->dequeue == NULL)
152 qops->dequeue = noop_qdisc_ops.dequeue;
155 const struct Qdisc_class_ops *cops = qops->cl_ops;
157 if (!(cops->find && cops->walk && cops->leaf))
160 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
168 write_unlock(&qdisc_mod_lock);
175 EXPORT_SYMBOL(register_qdisc);
177 int unregister_qdisc(struct Qdisc_ops *qops)
179 struct Qdisc_ops *q, **qp;
182 write_lock(&qdisc_mod_lock);
183 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
191 write_unlock(&qdisc_mod_lock);
194 EXPORT_SYMBOL(unregister_qdisc);
196 /* Get default qdisc if not otherwise specified */
197 void qdisc_get_default(char *name, size_t len)
199 read_lock(&qdisc_mod_lock);
200 strlcpy(name, default_qdisc_ops->id, len);
201 read_unlock(&qdisc_mod_lock);
204 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
206 struct Qdisc_ops *q = NULL;
208 for (q = qdisc_base; q; q = q->next) {
209 if (!strcmp(name, q->id)) {
210 if (!try_module_get(q->owner))
219 /* Set new default qdisc to use */
220 int qdisc_set_default(const char *name)
222 const struct Qdisc_ops *ops;
224 if (!capable(CAP_NET_ADMIN))
227 write_lock(&qdisc_mod_lock);
228 ops = qdisc_lookup_default(name);
230 /* Not found, drop lock and try to load module */
231 write_unlock(&qdisc_mod_lock);
232 request_module("sch_%s", name);
233 write_lock(&qdisc_mod_lock);
235 ops = qdisc_lookup_default(name);
239 /* Set new default */
240 module_put(default_qdisc_ops->owner);
241 default_qdisc_ops = ops;
243 write_unlock(&qdisc_mod_lock);
245 return ops ? 0 : -ENOENT;
248 #ifdef CONFIG_NET_SCH_DEFAULT
249 /* Set default value from kernel config */
250 static int __init sch_default_qdisc(void)
252 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
254 late_initcall(sch_default_qdisc);
257 /* We know handle. Find qdisc among all qdisc's attached to device
258 * (root qdisc, all its children, children of children etc.)
259 * Note: caller either uses rtnl or rcu_read_lock()
262 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
266 if (!qdisc_dev(root))
267 return (root->handle == handle ? root : NULL);
269 if (!(root->flags & TCQ_F_BUILTIN) &&
270 root->handle == handle)
273 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
274 if (q->handle == handle)
280 void qdisc_hash_add(struct Qdisc *q, bool invisible)
282 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
284 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
286 q->flags |= TCQ_F_INVISIBLE;
289 EXPORT_SYMBOL(qdisc_hash_add);
291 void qdisc_hash_del(struct Qdisc *q)
293 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
295 hash_del_rcu(&q->hash);
298 EXPORT_SYMBOL(qdisc_hash_del);
300 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
306 q = qdisc_match_from_root(dev->qdisc, handle);
310 if (dev_ingress_queue(dev))
311 q = qdisc_match_from_root(
312 dev_ingress_queue(dev)->qdisc_sleeping,
318 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
322 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
326 cl = cops->find(p, classid);
330 leaf = cops->leaf(p, cl);
334 /* Find queueing discipline by name */
336 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
338 struct Qdisc_ops *q = NULL;
341 read_lock(&qdisc_mod_lock);
342 for (q = qdisc_base; q; q = q->next) {
343 if (nla_strcmp(kind, q->id) == 0) {
344 if (!try_module_get(q->owner))
349 read_unlock(&qdisc_mod_lock);
354 /* The linklayer setting were not transferred from iproute2, in older
355 * versions, and the rate tables lookup systems have been dropped in
356 * the kernel. To keep backward compatible with older iproute2 tc
357 * utils, we detect the linklayer setting by detecting if the rate
358 * table were modified.
360 * For linklayer ATM table entries, the rate table will be aligned to
361 * 48 bytes, thus some table entries will contain the same value. The
362 * mpu (min packet unit) is also encoded into the old rate table, thus
363 * starting from the mpu, we find low and high table entries for
364 * mapping this cell. If these entries contain the same value, when
365 * the rate tables have been modified for linklayer ATM.
367 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
368 * and then roundup to the next cell, calc the table entry one below,
371 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
373 int low = roundup(r->mpu, 48);
374 int high = roundup(low+1, 48);
375 int cell_low = low >> r->cell_log;
376 int cell_high = (high >> r->cell_log) - 1;
378 /* rtab is too inaccurate at rates > 100Mbit/s */
379 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
380 pr_debug("TC linklayer: Giving up ATM detection\n");
381 return TC_LINKLAYER_ETHERNET;
384 if ((cell_high > cell_low) && (cell_high < 256)
385 && (rtab[cell_low] == rtab[cell_high])) {
386 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
387 cell_low, cell_high, rtab[cell_high]);
388 return TC_LINKLAYER_ATM;
390 return TC_LINKLAYER_ETHERNET;
393 static struct qdisc_rate_table *qdisc_rtab_list;
395 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
397 struct netlink_ext_ack *extack)
399 struct qdisc_rate_table *rtab;
401 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
402 nla_len(tab) != TC_RTAB_SIZE) {
403 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
407 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
408 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
409 !memcmp(&rtab->data, nla_data(tab), 1024)) {
415 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
419 memcpy(rtab->data, nla_data(tab), 1024);
420 if (r->linklayer == TC_LINKLAYER_UNAWARE)
421 r->linklayer = __detect_linklayer(r, rtab->data);
422 rtab->next = qdisc_rtab_list;
423 qdisc_rtab_list = rtab;
425 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
429 EXPORT_SYMBOL(qdisc_get_rtab);
431 void qdisc_put_rtab(struct qdisc_rate_table *tab)
433 struct qdisc_rate_table *rtab, **rtabp;
435 if (!tab || --tab->refcnt)
438 for (rtabp = &qdisc_rtab_list;
439 (rtab = *rtabp) != NULL;
440 rtabp = &rtab->next) {
448 EXPORT_SYMBOL(qdisc_put_rtab);
450 static LIST_HEAD(qdisc_stab_list);
452 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
453 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
454 [TCA_STAB_DATA] = { .type = NLA_BINARY },
457 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
458 struct netlink_ext_ack *extack)
460 struct nlattr *tb[TCA_STAB_MAX + 1];
461 struct qdisc_size_table *stab;
462 struct tc_sizespec *s;
463 unsigned int tsize = 0;
467 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
470 if (!tb[TCA_STAB_BASE]) {
471 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
472 return ERR_PTR(-EINVAL);
475 s = nla_data(tb[TCA_STAB_BASE]);
478 if (!tb[TCA_STAB_DATA]) {
479 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
480 return ERR_PTR(-EINVAL);
482 tab = nla_data(tb[TCA_STAB_DATA]);
483 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
486 if (tsize != s->tsize || (!tab && tsize > 0)) {
487 NL_SET_ERR_MSG(extack, "Invalid size of size table");
488 return ERR_PTR(-EINVAL);
491 list_for_each_entry(stab, &qdisc_stab_list, list) {
492 if (memcmp(&stab->szopts, s, sizeof(*s)))
494 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
500 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
502 return ERR_PTR(-ENOMEM);
507 memcpy(stab->data, tab, tsize * sizeof(u16));
509 list_add_tail(&stab->list, &qdisc_stab_list);
514 static void stab_kfree_rcu(struct rcu_head *head)
516 kfree(container_of(head, struct qdisc_size_table, rcu));
519 void qdisc_put_stab(struct qdisc_size_table *tab)
524 if (--tab->refcnt == 0) {
525 list_del(&tab->list);
526 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
529 EXPORT_SYMBOL(qdisc_put_stab);
531 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
535 nest = nla_nest_start(skb, TCA_STAB);
537 goto nla_put_failure;
538 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
539 goto nla_put_failure;
540 nla_nest_end(skb, nest);
548 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
549 const struct qdisc_size_table *stab)
553 pkt_len = skb->len + stab->szopts.overhead;
554 if (unlikely(!stab->szopts.tsize))
557 slot = pkt_len + stab->szopts.cell_align;
558 if (unlikely(slot < 0))
561 slot >>= stab->szopts.cell_log;
562 if (likely(slot < stab->szopts.tsize))
563 pkt_len = stab->data[slot];
565 pkt_len = stab->data[stab->szopts.tsize - 1] *
566 (slot / stab->szopts.tsize) +
567 stab->data[slot % stab->szopts.tsize];
569 pkt_len <<= stab->szopts.size_log;
571 if (unlikely(pkt_len < 1))
573 qdisc_skb_cb(skb)->pkt_len = pkt_len;
575 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
577 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
579 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
580 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
581 txt, qdisc->ops->id, qdisc->handle >> 16);
582 qdisc->flags |= TCQ_F_WARN_NONWC;
585 EXPORT_SYMBOL(qdisc_warn_nonwc);
587 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
589 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
593 __netif_schedule(qdisc_root(wd->qdisc));
596 return HRTIMER_NORESTART;
599 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
601 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
602 wd->timer.function = qdisc_watchdog;
605 EXPORT_SYMBOL(qdisc_watchdog_init);
607 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
609 if (test_bit(__QDISC_STATE_DEACTIVATED,
610 &qdisc_root_sleeping(wd->qdisc)->state))
613 if (wd->last_expires == expires)
616 wd->last_expires = expires;
617 hrtimer_start(&wd->timer,
618 ns_to_ktime(expires),
619 HRTIMER_MODE_ABS_PINNED);
621 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
623 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
625 hrtimer_cancel(&wd->timer);
627 EXPORT_SYMBOL(qdisc_watchdog_cancel);
629 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
631 struct hlist_head *h;
634 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
637 for (i = 0; i < n; i++)
638 INIT_HLIST_HEAD(&h[i]);
643 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
645 struct Qdisc_class_common *cl;
646 struct hlist_node *next;
647 struct hlist_head *nhash, *ohash;
648 unsigned int nsize, nmask, osize;
651 /* Rehash when load factor exceeds 0.75 */
652 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
654 nsize = clhash->hashsize * 2;
656 nhash = qdisc_class_hash_alloc(nsize);
660 ohash = clhash->hash;
661 osize = clhash->hashsize;
664 for (i = 0; i < osize; i++) {
665 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
666 h = qdisc_class_hash(cl->classid, nmask);
667 hlist_add_head(&cl->hnode, &nhash[h]);
670 clhash->hash = nhash;
671 clhash->hashsize = nsize;
672 clhash->hashmask = nmask;
673 sch_tree_unlock(sch);
677 EXPORT_SYMBOL(qdisc_class_hash_grow);
679 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
681 unsigned int size = 4;
683 clhash->hash = qdisc_class_hash_alloc(size);
686 clhash->hashsize = size;
687 clhash->hashmask = size - 1;
688 clhash->hashelems = 0;
691 EXPORT_SYMBOL(qdisc_class_hash_init);
693 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
695 kvfree(clhash->hash);
697 EXPORT_SYMBOL(qdisc_class_hash_destroy);
699 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
700 struct Qdisc_class_common *cl)
704 INIT_HLIST_NODE(&cl->hnode);
705 h = qdisc_class_hash(cl->classid, clhash->hashmask);
706 hlist_add_head(&cl->hnode, &clhash->hash[h]);
709 EXPORT_SYMBOL(qdisc_class_hash_insert);
711 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
712 struct Qdisc_class_common *cl)
714 hlist_del(&cl->hnode);
717 EXPORT_SYMBOL(qdisc_class_hash_remove);
719 /* Allocate an unique handle from space managed by kernel
720 * Possible range is [8000-FFFF]:0000 (0x8000 values)
722 static u32 qdisc_alloc_handle(struct net_device *dev)
725 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
728 autohandle += TC_H_MAKE(0x10000U, 0);
729 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
730 autohandle = TC_H_MAKE(0x80000000U, 0);
731 if (!qdisc_lookup(dev, autohandle))
739 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
742 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
743 const struct Qdisc_class_ops *cops;
749 if (n == 0 && len == 0)
751 drops = max_t(int, n, 0);
753 while ((parentid = sch->parent)) {
754 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
757 if (sch->flags & TCQ_F_NOPARENT)
759 /* Notify parent qdisc only if child qdisc becomes empty.
761 * If child was empty even before update then backlog
762 * counter is screwed and we skip notification because
763 * parent class is already passive.
765 * If the original child was offloaded then it is allowed
766 * to be seem as empty, so the parent is notified anyway.
768 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
769 !qdisc_is_offloaded);
770 /* TODO: perform the search on a per txq basis */
771 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
773 WARN_ON_ONCE(parentid != TC_H_ROOT);
776 cops = sch->ops->cl_ops;
777 if (notify && cops->qlen_notify) {
778 cl = cops->find(sch, parentid);
779 cops->qlen_notify(sch, cl);
782 sch->qstats.backlog -= len;
783 __qdisc_qstats_drop(sch, drops);
787 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
789 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
790 u32 portid, u32 seq, u16 flags, int event)
792 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
793 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
795 struct nlmsghdr *nlh;
796 unsigned char *b = skb_tail_pointer(skb);
798 struct qdisc_size_table *stab;
803 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
806 tcm = nlmsg_data(nlh);
807 tcm->tcm_family = AF_UNSPEC;
810 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
811 tcm->tcm_parent = clid;
812 tcm->tcm_handle = q->handle;
813 tcm->tcm_info = refcount_read(&q->refcnt);
814 if (nla_put_string(skb, TCA_KIND, q->ops->id))
815 goto nla_put_failure;
816 if (q->ops->ingress_block_get) {
817 block_index = q->ops->ingress_block_get(q);
819 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
820 goto nla_put_failure;
822 if (q->ops->egress_block_get) {
823 block_index = q->ops->egress_block_get(q);
825 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
826 goto nla_put_failure;
828 if (q->ops->dump && q->ops->dump(q, skb) < 0)
829 goto nla_put_failure;
830 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
831 goto nla_put_failure;
832 qlen = qdisc_qlen_sum(q);
834 stab = rtnl_dereference(q->stab);
835 if (stab && qdisc_dump_stab(skb, stab) < 0)
836 goto nla_put_failure;
838 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
839 NULL, &d, TCA_PAD) < 0)
840 goto nla_put_failure;
842 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
843 goto nla_put_failure;
845 if (qdisc_is_percpu_stats(q)) {
846 cpu_bstats = q->cpu_bstats;
847 cpu_qstats = q->cpu_qstats;
850 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
851 &d, cpu_bstats, &q->bstats) < 0 ||
852 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
853 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
854 goto nla_put_failure;
856 if (gnet_stats_finish_copy(&d) < 0)
857 goto nla_put_failure;
859 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
868 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
870 if (q->flags & TCQ_F_BUILTIN)
872 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
878 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
879 struct nlmsghdr *n, u32 clid,
880 struct Qdisc *old, struct Qdisc *new)
883 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
885 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
889 if (old && !tc_qdisc_dump_ignore(old, false)) {
890 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
891 0, RTM_DELQDISC) < 0)
894 if (new && !tc_qdisc_dump_ignore(new, false)) {
895 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
896 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
901 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
902 n->nlmsg_flags & NLM_F_ECHO);
909 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
910 struct nlmsghdr *n, u32 clid,
911 struct Qdisc *old, struct Qdisc *new)
914 qdisc_notify(net, skb, n, clid, old, new);
920 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
923 * When appropriate send a netlink notification using 'skb'
926 * On success, destroy old qdisc.
929 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
930 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
931 struct Qdisc *new, struct Qdisc *old,
932 struct netlink_ext_ack *extack)
934 struct Qdisc *q = old;
935 struct net *net = dev_net(dev);
938 if (parent == NULL) {
939 unsigned int i, num_q, ingress;
942 num_q = dev->num_tx_queues;
943 if ((q && q->flags & TCQ_F_INGRESS) ||
944 (new && new->flags & TCQ_F_INGRESS)) {
947 if (!dev_ingress_queue(dev)) {
948 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
953 if (dev->flags & IFF_UP)
956 if (new && new->ops->attach)
959 for (i = 0; i < num_q; i++) {
960 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
963 dev_queue = netdev_get_tx_queue(dev, i);
965 old = dev_graft_qdisc(dev_queue, new);
967 qdisc_refcount_inc(new);
975 notify_and_destroy(net, skb, n, classid,
977 if (new && !new->ops->attach)
978 qdisc_refcount_inc(new);
979 dev->qdisc = new ? : &noop_qdisc;
981 if (new && new->ops->attach)
982 new->ops->attach(new);
984 notify_and_destroy(net, skb, n, classid, old, new);
987 if (dev->flags & IFF_UP)
990 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
992 /* Only support running class lockless if parent is lockless */
993 if (new && (new->flags & TCQ_F_NOLOCK) &&
994 parent && !(parent->flags & TCQ_F_NOLOCK))
995 new->flags &= ~TCQ_F_NOLOCK;
998 if (cops && cops->graft) {
999 unsigned long cl = cops->find(parent, classid);
1002 err = cops->graft(parent, cl, new, &old,
1005 NL_SET_ERR_MSG(extack, "Specified class not found");
1010 notify_and_destroy(net, skb, n, classid, old, new);
1015 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1016 struct netlink_ext_ack *extack)
1020 if (tca[TCA_INGRESS_BLOCK]) {
1021 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1024 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1027 if (!sch->ops->ingress_block_set) {
1028 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1031 sch->ops->ingress_block_set(sch, block_index);
1033 if (tca[TCA_EGRESS_BLOCK]) {
1034 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1037 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1040 if (!sch->ops->egress_block_set) {
1041 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1044 sch->ops->egress_block_set(sch, block_index);
1049 /* lockdep annotation is needed for ingress; egress gets it only for name */
1050 static struct lock_class_key qdisc_tx_lock;
1051 static struct lock_class_key qdisc_rx_lock;
1054 Allocate and initialize new qdisc.
1056 Parameters are passed via opt.
1059 static struct Qdisc *qdisc_create(struct net_device *dev,
1060 struct netdev_queue *dev_queue,
1061 struct Qdisc *p, u32 parent, u32 handle,
1062 struct nlattr **tca, int *errp,
1063 struct netlink_ext_ack *extack)
1066 struct nlattr *kind = tca[TCA_KIND];
1068 struct Qdisc_ops *ops;
1069 struct qdisc_size_table *stab;
1071 ops = qdisc_lookup_ops(kind);
1072 #ifdef CONFIG_MODULES
1073 if (ops == NULL && kind != NULL) {
1074 char name[IFNAMSIZ];
1075 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
1076 /* We dropped the RTNL semaphore in order to
1077 * perform the module load. So, even if we
1078 * succeeded in loading the module we have to
1079 * tell the caller to replay the request. We
1080 * indicate this using -EAGAIN.
1081 * We replay the request because the device may
1082 * go away in the mean time.
1085 request_module("sch_%s", name);
1087 ops = qdisc_lookup_ops(kind);
1089 /* We will try again qdisc_lookup_ops,
1090 * so don't keep a reference.
1092 module_put(ops->owner);
1102 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1106 sch = qdisc_alloc(dev_queue, ops, extack);
1112 sch->parent = parent;
1114 if (handle == TC_H_INGRESS) {
1115 sch->flags |= TCQ_F_INGRESS;
1116 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1117 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
1120 handle = qdisc_alloc_handle(dev);
1125 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
1126 if (!netif_is_multiqueue(dev))
1127 sch->flags |= TCQ_F_ONETXQUEUE;
1130 sch->handle = handle;
1132 /* This exist to keep backward compatible with a userspace
1133 * loophole, what allowed userspace to get IFF_NO_QUEUE
1134 * facility on older kernels by setting tx_queue_len=0 (prior
1135 * to qdisc init), and then forgot to reinit tx_queue_len
1136 * before again attaching a qdisc.
1138 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1139 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1140 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1143 err = qdisc_block_indexes_set(sch, tca, extack);
1148 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1153 if (tca[TCA_STAB]) {
1154 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1156 err = PTR_ERR(stab);
1159 rcu_assign_pointer(sch->stab, stab);
1161 if (tca[TCA_RATE]) {
1162 seqcount_t *running;
1165 if (sch->flags & TCQ_F_MQROOT) {
1166 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1170 if (sch->parent != TC_H_ROOT &&
1171 !(sch->flags & TCQ_F_INGRESS) &&
1172 (!p || !(p->flags & TCQ_F_MQROOT)))
1173 running = qdisc_root_sleeping_running(sch);
1175 running = &sch->running;
1177 err = gen_new_estimator(&sch->bstats,
1184 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1189 qdisc_hash_add(sch, false);
1194 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1201 module_put(ops->owner);
1208 * Any broken qdiscs that would require a ops->reset() here?
1209 * The qdisc was never in action so it shouldn't be necessary.
1211 qdisc_put_stab(rtnl_dereference(sch->stab));
1217 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1218 struct netlink_ext_ack *extack)
1220 struct qdisc_size_table *ostab, *stab = NULL;
1223 if (tca[TCA_OPTIONS]) {
1224 if (!sch->ops->change) {
1225 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1228 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1229 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1232 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1237 if (tca[TCA_STAB]) {
1238 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1240 return PTR_ERR(stab);
1243 ostab = rtnl_dereference(sch->stab);
1244 rcu_assign_pointer(sch->stab, stab);
1245 qdisc_put_stab(ostab);
1247 if (tca[TCA_RATE]) {
1248 /* NB: ignores errors from replace_estimator
1249 because change can't be undone. */
1250 if (sch->flags & TCQ_F_MQROOT)
1252 gen_replace_estimator(&sch->bstats,
1256 qdisc_root_sleeping_running(sch),
1263 struct check_loop_arg {
1264 struct qdisc_walker w;
1269 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1270 struct qdisc_walker *w);
1272 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1274 struct check_loop_arg arg;
1276 if (q->ops->cl_ops == NULL)
1279 arg.w.stop = arg.w.skip = arg.w.count = 0;
1280 arg.w.fn = check_loop_fn;
1283 q->ops->cl_ops->walk(q, &arg.w);
1284 return arg.w.stop ? -ELOOP : 0;
1288 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1291 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1292 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1294 leaf = cops->leaf(q, cl);
1296 if (leaf == arg->p || arg->depth > 7)
1298 return check_loop(leaf, arg->p, arg->depth + 1);
1307 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1308 struct netlink_ext_ack *extack)
1310 struct net *net = sock_net(skb->sk);
1311 struct tcmsg *tcm = nlmsg_data(n);
1312 struct nlattr *tca[TCA_MAX + 1];
1313 struct net_device *dev;
1315 struct Qdisc *q = NULL;
1316 struct Qdisc *p = NULL;
1319 if ((n->nlmsg_type != RTM_GETQDISC) &&
1320 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1323 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1327 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1331 clid = tcm->tcm_parent;
1333 if (clid != TC_H_ROOT) {
1334 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1335 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1337 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1340 q = qdisc_leaf(p, clid);
1341 } else if (dev_ingress_queue(dev)) {
1342 q = dev_ingress_queue(dev)->qdisc_sleeping;
1348 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1352 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1353 NL_SET_ERR_MSG(extack, "Invalid handle");
1357 q = qdisc_lookup(dev, tcm->tcm_handle);
1359 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1364 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1365 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1369 if (n->nlmsg_type == RTM_DELQDISC) {
1371 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1374 if (q->handle == 0) {
1375 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1378 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1382 qdisc_notify(net, skb, n, clid, NULL, q);
1388 * Create/change qdisc.
1391 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1392 struct netlink_ext_ack *extack)
1394 struct net *net = sock_net(skb->sk);
1396 struct nlattr *tca[TCA_MAX + 1];
1397 struct net_device *dev;
1399 struct Qdisc *q, *p;
1402 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1406 /* Reinit, just in case something touches this. */
1407 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1411 tcm = nlmsg_data(n);
1412 clid = tcm->tcm_parent;
1415 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1421 if (clid != TC_H_ROOT) {
1422 if (clid != TC_H_INGRESS) {
1423 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1425 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1428 q = qdisc_leaf(p, clid);
1429 } else if (dev_ingress_queue_create(dev)) {
1430 q = dev_ingress_queue(dev)->qdisc_sleeping;
1436 /* It may be default qdisc, ignore it */
1437 if (q && q->handle == 0)
1440 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1441 if (tcm->tcm_handle) {
1442 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1443 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1446 if (TC_H_MIN(tcm->tcm_handle)) {
1447 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1450 q = qdisc_lookup(dev, tcm->tcm_handle);
1452 goto create_n_graft;
1453 if (n->nlmsg_flags & NLM_F_EXCL) {
1454 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1457 if (tca[TCA_KIND] &&
1458 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1459 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1463 (p && check_loop(q, p, 0))) {
1464 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1467 qdisc_refcount_inc(q);
1471 goto create_n_graft;
1473 /* This magic test requires explanation.
1475 * We know, that some child q is already
1476 * attached to this parent and have choice:
1477 * either to change it or to create/graft new one.
1479 * 1. We are allowed to create/graft only
1480 * if CREATE and REPLACE flags are set.
1482 * 2. If EXCL is set, requestor wanted to say,
1483 * that qdisc tcm_handle is not expected
1484 * to exist, so that we choose create/graft too.
1486 * 3. The last case is when no flags are set.
1487 * Alas, it is sort of hole in API, we
1488 * cannot decide what to do unambiguously.
1489 * For now we select create/graft, if
1490 * user gave KIND, which does not match existing.
1492 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1493 (n->nlmsg_flags & NLM_F_REPLACE) &&
1494 ((n->nlmsg_flags & NLM_F_EXCL) ||
1496 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1497 goto create_n_graft;
1501 if (!tcm->tcm_handle) {
1502 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1505 q = qdisc_lookup(dev, tcm->tcm_handle);
1508 /* Change qdisc parameters */
1510 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1513 if (n->nlmsg_flags & NLM_F_EXCL) {
1514 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1517 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1518 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1521 err = qdisc_change(q, tca, extack);
1523 qdisc_notify(net, skb, n, clid, NULL, q);
1527 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1528 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1531 if (clid == TC_H_INGRESS) {
1532 if (dev_ingress_queue(dev)) {
1533 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1534 tcm->tcm_parent, tcm->tcm_parent,
1537 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1541 struct netdev_queue *dev_queue;
1543 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1544 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1546 dev_queue = p->dev_queue;
1548 dev_queue = netdev_get_tx_queue(dev, 0);
1550 q = qdisc_create(dev, dev_queue, p,
1551 tcm->tcm_parent, tcm->tcm_handle,
1561 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1571 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1572 struct netlink_callback *cb,
1573 int *q_idx_p, int s_q_idx, bool recur,
1574 bool dump_invisible)
1576 int ret = 0, q_idx = *q_idx_p;
1584 if (q_idx < s_q_idx) {
1587 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1588 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1589 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1595 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1596 * itself has already been dumped.
1598 * If we've already dumped the top-level (ingress) qdisc above and the global
1599 * qdisc hashtable, we don't want to hit it again
1601 if (!qdisc_dev(root) || !recur)
1604 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1605 if (q_idx < s_q_idx) {
1609 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1610 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1611 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1625 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1627 struct net *net = sock_net(skb->sk);
1630 struct net_device *dev;
1631 const struct nlmsghdr *nlh = cb->nlh;
1632 struct nlattr *tca[TCA_MAX + 1];
1635 s_idx = cb->args[0];
1636 s_q_idx = q_idx = cb->args[1];
1641 err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL);
1645 for_each_netdev(net, dev) {
1646 struct netdev_queue *dev_queue;
1654 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1655 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1658 dev_queue = dev_ingress_queue(dev);
1660 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1661 &q_idx, s_q_idx, false,
1662 tca[TCA_DUMP_INVISIBLE]) < 0)
1671 cb->args[1] = q_idx;
1678 /************************************************
1679 * Traffic classes manipulation. *
1680 ************************************************/
1682 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1684 u32 portid, u32 seq, u16 flags, int event)
1687 struct nlmsghdr *nlh;
1688 unsigned char *b = skb_tail_pointer(skb);
1690 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1693 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1695 goto out_nlmsg_trim;
1696 tcm = nlmsg_data(nlh);
1697 tcm->tcm_family = AF_UNSPEC;
1700 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1701 tcm->tcm_parent = q->handle;
1702 tcm->tcm_handle = q->handle;
1704 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1705 goto nla_put_failure;
1706 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1707 goto nla_put_failure;
1709 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1710 NULL, &d, TCA_PAD) < 0)
1711 goto nla_put_failure;
1713 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1714 goto nla_put_failure;
1716 if (gnet_stats_finish_copy(&d) < 0)
1717 goto nla_put_failure;
1719 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1728 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1729 struct nlmsghdr *n, struct Qdisc *q,
1730 unsigned long cl, int event)
1732 struct sk_buff *skb;
1733 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1735 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1739 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1744 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1745 n->nlmsg_flags & NLM_F_ECHO);
1748 static int tclass_del_notify(struct net *net,
1749 const struct Qdisc_class_ops *cops,
1750 struct sk_buff *oskb, struct nlmsghdr *n,
1751 struct Qdisc *q, unsigned long cl)
1753 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1754 struct sk_buff *skb;
1760 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1764 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1765 RTM_DELTCLASS) < 0) {
1770 err = cops->delete(q, cl);
1776 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1777 n->nlmsg_flags & NLM_F_ECHO);
1780 #ifdef CONFIG_NET_CLS
1782 struct tcf_bind_args {
1783 struct tcf_walker w;
1788 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1790 struct tcf_bind_args *a = (void *)arg;
1792 if (tp->ops->bind_class) {
1793 struct Qdisc *q = tcf_block_q(tp->chain->block);
1796 tp->ops->bind_class(n, a->classid, a->cl);
1802 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1803 unsigned long new_cl)
1805 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1806 struct tcf_block *block;
1807 struct tcf_chain *chain;
1810 cl = cops->find(q, portid);
1813 block = cops->tcf_block(q, cl, NULL);
1816 list_for_each_entry(chain, &block->chain_list, list) {
1817 struct tcf_proto *tp;
1819 for (tp = rtnl_dereference(chain->filter_chain);
1820 tp; tp = rtnl_dereference(tp->next)) {
1821 struct tcf_bind_args arg = {};
1823 arg.w.fn = tcf_node_bind;
1826 tp->ops->walk(tp, &arg.w);
1833 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1834 unsigned long new_cl)
1840 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1841 struct netlink_ext_ack *extack)
1843 struct net *net = sock_net(skb->sk);
1844 struct tcmsg *tcm = nlmsg_data(n);
1845 struct nlattr *tca[TCA_MAX + 1];
1846 struct net_device *dev;
1847 struct Qdisc *q = NULL;
1848 const struct Qdisc_class_ops *cops;
1849 unsigned long cl = 0;
1850 unsigned long new_cl;
1856 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1857 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1860 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1864 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1869 parent == TC_H_UNSPEC - unspecified parent.
1870 parent == TC_H_ROOT - class is root, which has no parent.
1871 parent == X:0 - parent is root class.
1872 parent == X:Y - parent is a node in hierarchy.
1873 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1875 handle == 0:0 - generate handle from kernel pool.
1876 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1877 handle == X:Y - clear.
1878 handle == X:0 - root class.
1881 /* Step 1. Determine qdisc handle X:0 */
1883 portid = tcm->tcm_parent;
1884 clid = tcm->tcm_handle;
1885 qid = TC_H_MAJ(clid);
1887 if (portid != TC_H_ROOT) {
1888 u32 qid1 = TC_H_MAJ(portid);
1891 /* If both majors are known, they must be identical. */
1896 } else if (qid == 0)
1897 qid = dev->qdisc->handle;
1899 /* Now qid is genuine qdisc handle consistent
1900 * both with parent and child.
1902 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1905 portid = TC_H_MAKE(qid, portid);
1908 qid = dev->qdisc->handle;
1911 /* OK. Locate qdisc */
1912 q = qdisc_lookup(dev, qid);
1916 /* An check that it supports classes */
1917 cops = q->ops->cl_ops;
1921 /* Now try to get class */
1923 if (portid == TC_H_ROOT)
1926 clid = TC_H_MAKE(qid, clid);
1929 cl = cops->find(q, clid);
1933 if (n->nlmsg_type != RTM_NEWTCLASS ||
1934 !(n->nlmsg_flags & NLM_F_CREATE))
1937 switch (n->nlmsg_type) {
1940 if (n->nlmsg_flags & NLM_F_EXCL)
1944 err = tclass_del_notify(net, cops, skb, n, q, cl);
1945 /* Unbind the class with flilters with 0 */
1946 tc_bind_tclass(q, portid, clid, 0);
1949 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1957 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1958 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
1965 err = cops->change(q, clid, portid, tca, &new_cl, extack);
1967 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1968 /* We just create a new class, need to do reverse binding. */
1970 tc_bind_tclass(q, portid, clid, new_cl);
1976 struct qdisc_dump_args {
1977 struct qdisc_walker w;
1978 struct sk_buff *skb;
1979 struct netlink_callback *cb;
1982 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
1983 struct qdisc_walker *arg)
1985 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1987 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1988 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1992 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1993 struct tcmsg *tcm, struct netlink_callback *cb,
1996 struct qdisc_dump_args arg;
1998 if (tc_qdisc_dump_ignore(q, false) ||
1999 *t_p < s_t || !q->ops->cl_ops ||
2001 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2006 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2007 arg.w.fn = qdisc_class_dump;
2011 arg.w.skip = cb->args[1];
2013 q->ops->cl_ops->walk(q, &arg.w);
2014 cb->args[1] = arg.w.count;
2021 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2022 struct tcmsg *tcm, struct netlink_callback *cb,
2031 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2034 if (!qdisc_dev(root))
2037 if (tcm->tcm_parent) {
2038 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2039 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2043 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2044 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2051 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2053 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2054 struct net *net = sock_net(skb->sk);
2055 struct netdev_queue *dev_queue;
2056 struct net_device *dev;
2059 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2061 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2068 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
2071 dev_queue = dev_ingress_queue(dev);
2073 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2084 #ifdef CONFIG_PROC_FS
2085 static int psched_show(struct seq_file *seq, void *v)
2087 seq_printf(seq, "%08x %08x %08x %08x\n",
2088 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2090 (u32)NSEC_PER_SEC / hrtimer_resolution);
2095 static int psched_open(struct inode *inode, struct file *file)
2097 return single_open(file, psched_show, NULL);
2100 static const struct file_operations psched_fops = {
2101 .open = psched_open,
2103 .llseek = seq_lseek,
2104 .release = single_release,
2107 static int __net_init psched_net_init(struct net *net)
2109 struct proc_dir_entry *e;
2111 e = proc_create("psched", 0, net->proc_net, &psched_fops);
2118 static void __net_exit psched_net_exit(struct net *net)
2120 remove_proc_entry("psched", net->proc_net);
2123 static int __net_init psched_net_init(struct net *net)
2128 static void __net_exit psched_net_exit(struct net *net)
2133 static struct pernet_operations psched_net_ops = {
2134 .init = psched_net_init,
2135 .exit = psched_net_exit,
2138 static int __init pktsched_init(void)
2142 err = register_pernet_subsys(&psched_net_ops);
2144 pr_err("pktsched_init: "
2145 "cannot initialize per netns operations\n");
2149 register_qdisc(&pfifo_fast_ops);
2150 register_qdisc(&pfifo_qdisc_ops);
2151 register_qdisc(&bfifo_qdisc_ops);
2152 register_qdisc(&pfifo_head_drop_qdisc_ops);
2153 register_qdisc(&mq_qdisc_ops);
2154 register_qdisc(&noqueue_qdisc_ops);
2156 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2157 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2158 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2160 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2161 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2162 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2168 subsys_initcall(pktsched_init);