2 * net/sched/cls_api.c Packet classifier API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/kmod.h>
26 #include <linux/err.h>
27 #include <linux/slab.h>
28 #include <net/net_namespace.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <net/pkt_cls.h>
34 /* The list of all installed classifier types */
35 static LIST_HEAD(tcf_proto_base);
37 /* Protects list of registered TC modules. It is pure SMP lock. */
38 static DEFINE_RWLOCK(cls_mod_lock);
40 /* Find classifier type by string name */
42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
44 const struct tcf_proto_ops *t, *res = NULL;
47 read_lock(&cls_mod_lock);
48 list_for_each_entry(t, &tcf_proto_base, head) {
49 if (strcmp(kind, t->kind) == 0) {
50 if (try_module_get(t->owner))
55 read_unlock(&cls_mod_lock);
60 /* Register(unregister) new classifier type */
62 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
64 struct tcf_proto_ops *t;
67 write_lock(&cls_mod_lock);
68 list_for_each_entry(t, &tcf_proto_base, head)
69 if (!strcmp(ops->kind, t->kind))
72 list_add_tail(&ops->head, &tcf_proto_base);
75 write_unlock(&cls_mod_lock);
78 EXPORT_SYMBOL(register_tcf_proto_ops);
80 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
82 struct tcf_proto_ops *t;
85 /* Wait for outstanding call_rcu()s, if any, from a
86 * tcf_proto_ops's destroy() handler.
90 write_lock(&cls_mod_lock);
91 list_for_each_entry(t, &tcf_proto_base, head) {
98 write_unlock(&cls_mod_lock);
101 EXPORT_SYMBOL(unregister_tcf_proto_ops);
103 /* Select new prio value from the range, managed by kernel. */
105 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
107 u32 first = TC_H_MAKE(0xC0000000U, 0U);
110 first = tp->prio - 1;
112 return TC_H_MAJ(first);
115 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
116 u32 prio, u32 parent, struct Qdisc *q,
117 struct tcf_chain *chain)
119 struct tcf_proto *tp;
122 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
124 return ERR_PTR(-ENOBUFS);
127 tp->ops = tcf_proto_lookup_ops(kind);
129 #ifdef CONFIG_MODULES
131 request_module("cls_%s", kind);
133 tp->ops = tcf_proto_lookup_ops(kind);
134 /* We dropped the RTNL semaphore in order to perform
135 * the module load. So, even if we succeeded in loading
136 * the module we have to replay the request. We indicate
137 * this using -EAGAIN.
140 module_put(tp->ops->owner);
148 tp->classify = tp->ops->classify;
149 tp->protocol = protocol;
151 tp->classid = parent;
155 err = tp->ops->init(tp);
157 module_put(tp->ops->owner);
167 static void tcf_proto_destroy(struct tcf_proto *tp)
169 tp->ops->destroy(tp);
170 module_put(tp->ops->owner);
174 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
177 struct tcf_chain *chain;
179 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
182 list_add_tail(&chain->list, &block->chain_list);
183 chain->block = block;
184 chain->index = chain_index;
189 static void tcf_chain_flush(struct tcf_chain *chain)
191 struct tcf_proto *tp;
193 if (chain->p_filter_chain)
194 RCU_INIT_POINTER(*chain->p_filter_chain, NULL);
195 while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
196 RCU_INIT_POINTER(chain->filter_chain, tp->next);
197 tcf_chain_put(chain);
198 tcf_proto_destroy(tp);
202 static void tcf_chain_destroy(struct tcf_chain *chain)
204 list_del(&chain->list);
208 static void tcf_chain_hold(struct tcf_chain *chain)
213 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
216 struct tcf_chain *chain;
218 list_for_each_entry(chain, &block->chain_list, list) {
219 if (chain->index == chain_index) {
220 tcf_chain_hold(chain);
225 return create ? tcf_chain_create(block, chain_index) : NULL;
227 EXPORT_SYMBOL(tcf_chain_get);
229 void tcf_chain_put(struct tcf_chain *chain)
231 if (--chain->refcnt == 0)
232 tcf_chain_destroy(chain);
234 EXPORT_SYMBOL(tcf_chain_put);
237 tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain,
238 struct tcf_proto __rcu **p_filter_chain)
240 chain->p_filter_chain = p_filter_chain;
243 int tcf_block_get(struct tcf_block **p_block,
244 struct tcf_proto __rcu **p_filter_chain)
246 struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
247 struct tcf_chain *chain;
252 INIT_LIST_HEAD(&block->chain_list);
253 /* Create chain 0 by default, it has to be always present. */
254 chain = tcf_chain_create(block, 0);
257 goto err_chain_create;
259 tcf_chain_filter_chain_ptr_set(chain, p_filter_chain);
267 EXPORT_SYMBOL(tcf_block_get);
269 void tcf_block_put(struct tcf_block *block)
271 struct tcf_chain *chain, *tmp;
276 /* XXX: Standalone actions are not allowed to jump to any chain, and
277 * bound actions should be all removed after flushing. However,
278 * filters are destroyed in RCU callbacks, we have to hold the chains
279 * first, otherwise we would always race with RCU callbacks on this list
280 * without proper locking.
283 /* Wait for existing RCU callbacks to cool down. */
286 /* Hold a refcnt for all chains, except 0, in case they are gone. */
287 list_for_each_entry(chain, &block->chain_list, list)
289 tcf_chain_hold(chain);
291 /* No race on the list, because no chain could be destroyed. */
292 list_for_each_entry(chain, &block->chain_list, list)
293 tcf_chain_flush(chain);
295 /* Wait for RCU callbacks to release the reference count. */
298 /* At this point, all the chains should have refcnt == 1. */
299 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
300 tcf_chain_put(chain);
303 EXPORT_SYMBOL(tcf_block_put);
305 /* Main classifier routine: scans classifier chain attached
306 * to this qdisc, (optionally) tests for protocol and asks
307 * specific classifiers.
309 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
310 struct tcf_result *res, bool compat_mode)
312 __be16 protocol = tc_skb_protocol(skb);
313 #ifdef CONFIG_NET_CLS_ACT
314 const int max_reclassify_loop = 4;
315 const struct tcf_proto *orig_tp = tp;
316 const struct tcf_proto *first_tp;
321 for (; tp; tp = rcu_dereference_bh(tp->next)) {
324 if (tp->protocol != protocol &&
325 tp->protocol != htons(ETH_P_ALL))
328 err = tp->classify(skb, tp, res);
329 #ifdef CONFIG_NET_CLS_ACT
330 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
333 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
334 first_tp = res->goto_tp;
342 return TC_ACT_UNSPEC; /* signal: continue lookup */
343 #ifdef CONFIG_NET_CLS_ACT
345 if (unlikely(limit++ >= max_reclassify_loop)) {
346 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
347 tp->q->ops->id, tp->prio & 0xffff,
348 ntohs(tp->protocol));
353 protocol = tc_skb_protocol(skb);
357 EXPORT_SYMBOL(tcf_classify);
359 struct tcf_chain_info {
360 struct tcf_proto __rcu **pprev;
361 struct tcf_proto __rcu *next;
364 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
366 return rtnl_dereference(*chain_info->pprev);
369 static void tcf_chain_tp_insert(struct tcf_chain *chain,
370 struct tcf_chain_info *chain_info,
371 struct tcf_proto *tp)
373 if (chain->p_filter_chain &&
374 *chain_info->pprev == chain->filter_chain)
375 rcu_assign_pointer(*chain->p_filter_chain, tp);
376 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
377 rcu_assign_pointer(*chain_info->pprev, tp);
378 tcf_chain_hold(chain);
381 static void tcf_chain_tp_remove(struct tcf_chain *chain,
382 struct tcf_chain_info *chain_info,
383 struct tcf_proto *tp)
385 struct tcf_proto *next = rtnl_dereference(chain_info->next);
387 if (chain->p_filter_chain && tp == chain->filter_chain)
388 RCU_INIT_POINTER(*chain->p_filter_chain, next);
389 RCU_INIT_POINTER(*chain_info->pprev, next);
390 tcf_chain_put(chain);
393 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
394 struct tcf_chain_info *chain_info,
395 u32 protocol, u32 prio,
398 struct tcf_proto **pprev;
399 struct tcf_proto *tp;
401 /* Check the chain for existence of proto-tcf with this priority */
402 for (pprev = &chain->filter_chain;
403 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
404 if (tp->prio >= prio) {
405 if (tp->prio == prio) {
407 (tp->protocol != protocol && protocol))
408 return ERR_PTR(-EINVAL);
415 chain_info->pprev = pprev;
416 chain_info->next = tp ? tp->next : NULL;
420 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
421 struct tcf_proto *tp, void *fh, u32 portid,
422 u32 seq, u16 flags, int event)
425 struct nlmsghdr *nlh;
426 unsigned char *b = skb_tail_pointer(skb);
428 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
431 tcm = nlmsg_data(nlh);
432 tcm->tcm_family = AF_UNSPEC;
435 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
436 tcm->tcm_parent = tp->classid;
437 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
438 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
439 goto nla_put_failure;
440 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
441 goto nla_put_failure;
445 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
446 goto nla_put_failure;
448 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
457 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
458 struct nlmsghdr *n, struct tcf_proto *tp,
459 void *fh, int event, bool unicast)
462 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
464 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
468 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
469 n->nlmsg_flags, event) <= 0) {
475 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
477 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
478 n->nlmsg_flags & NLM_F_ECHO);
481 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
482 struct nlmsghdr *n, struct tcf_proto *tp,
483 void *fh, bool unicast, bool *last)
486 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
489 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
493 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
494 n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
499 err = tp->ops->delete(tp, fh, last);
506 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
508 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
509 n->nlmsg_flags & NLM_F_ECHO);
512 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
514 struct tcf_chain *chain, int event)
516 struct tcf_proto *tp;
518 for (tp = rtnl_dereference(chain->filter_chain);
519 tp; tp = rtnl_dereference(tp->next))
520 tfilter_notify(net, oskb, n, tp, 0, event, false);
523 /* Add/change/delete/get a filter node */
525 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
526 struct netlink_ext_ack *extack)
528 struct net *net = sock_net(skb->sk);
529 struct nlattr *tca[TCA_MAX + 1];
536 struct net_device *dev;
538 struct tcf_chain_info chain_info;
539 struct tcf_chain *chain = NULL;
540 struct tcf_block *block;
541 struct tcf_proto *tp;
542 const struct Qdisc_class_ops *cops;
548 if ((n->nlmsg_type != RTM_GETTFILTER) &&
549 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
555 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
560 protocol = TC_H_MIN(t->tcm_info);
561 prio = TC_H_MAJ(t->tcm_info);
562 prio_allocate = false;
563 parent = t->tcm_parent;
567 switch (n->nlmsg_type) {
569 if (protocol || t->tcm_handle || tca[TCA_KIND])
573 /* If no priority is provided by the user,
576 if (n->nlmsg_flags & NLM_F_CREATE) {
577 prio = TC_H_MAKE(0x80000000U, 0U);
578 prio_allocate = true;
587 /* Find head of filter chain. */
590 dev = __dev_get_by_index(net, t->tcm_ifindex);
599 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
604 /* Is it classful? */
605 cops = q->ops->cl_ops;
609 if (!cops->tcf_block)
612 /* Do we search for filter, attached to class? */
613 if (TC_H_MIN(parent)) {
614 cl = cops->find(q, parent);
619 /* And the last stroke */
620 block = cops->tcf_block(q, cl);
626 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
627 if (chain_index > TC_ACT_EXT_VAL_MASK) {
631 chain = tcf_chain_get(block, chain_index,
632 n->nlmsg_type == RTM_NEWTFILTER);
634 err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
638 if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
639 tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER);
640 tcf_chain_flush(chain);
645 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
646 prio, prio_allocate);
653 /* Proto-tcf does not exist, create new one */
655 if (tca[TCA_KIND] == NULL || !protocol) {
660 if (n->nlmsg_type != RTM_NEWTFILTER ||
661 !(n->nlmsg_flags & NLM_F_CREATE)) {
667 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
669 tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
670 protocol, prio, parent, q, chain);
676 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
681 fh = tp->ops->get(tp, t->tcm_handle);
684 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
685 tcf_chain_tp_remove(chain, &chain_info, tp);
686 tfilter_notify(net, skb, n, tp, fh,
687 RTM_DELTFILTER, false);
688 tcf_proto_destroy(tp);
693 if (n->nlmsg_type != RTM_NEWTFILTER ||
694 !(n->nlmsg_flags & NLM_F_CREATE)) {
701 switch (n->nlmsg_type) {
703 if (n->nlmsg_flags & NLM_F_EXCL) {
705 tcf_proto_destroy(tp);
711 err = tfilter_del_notify(net, skb, n, tp, fh, false,
716 tcf_chain_tp_remove(chain, &chain_info, tp);
717 tcf_proto_destroy(tp);
721 err = tfilter_notify(net, skb, n, tp, fh,
722 RTM_NEWTFILTER, true);
730 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
731 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
734 tcf_chain_tp_insert(chain, &chain_info, tp);
735 tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
738 tcf_proto_destroy(tp);
743 tcf_chain_put(chain);
745 /* Replay the request. */
750 struct tcf_dump_args {
753 struct netlink_callback *cb;
756 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
758 struct tcf_dump_args *a = (void *)arg;
759 struct net *net = sock_net(a->skb->sk);
761 return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
762 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
766 static bool tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb,
767 struct netlink_callback *cb,
768 long index_start, long *p_index)
770 struct net *net = sock_net(skb->sk);
771 struct tcmsg *tcm = nlmsg_data(cb->nlh);
772 struct tcf_dump_args arg;
773 struct tcf_proto *tp;
775 for (tp = rtnl_dereference(chain->filter_chain);
776 tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
777 if (*p_index < index_start)
779 if (TC_H_MAJ(tcm->tcm_info) &&
780 TC_H_MAJ(tcm->tcm_info) != tp->prio)
782 if (TC_H_MIN(tcm->tcm_info) &&
783 TC_H_MIN(tcm->tcm_info) != tp->protocol)
785 if (*p_index > index_start)
786 memset(&cb->args[1], 0,
787 sizeof(cb->args) - sizeof(cb->args[0]));
788 if (cb->args[1] == 0) {
789 if (tcf_fill_node(net, skb, tp, 0,
790 NETLINK_CB(cb->skb).portid,
791 cb->nlh->nlmsg_seq, NLM_F_MULTI,
792 RTM_NEWTFILTER) <= 0)
799 arg.w.fn = tcf_node_dump;
803 arg.w.skip = cb->args[1] - 1;
805 tp->ops->walk(tp, &arg.w);
806 cb->args[1] = arg.w.count + 1;
813 /* called with RTNL */
814 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
816 struct net *net = sock_net(skb->sk);
817 struct nlattr *tca[TCA_MAX + 1];
818 struct net_device *dev;
820 struct tcf_block *block;
821 struct tcf_chain *chain;
822 struct tcmsg *tcm = nlmsg_data(cb->nlh);
823 unsigned long cl = 0;
824 const struct Qdisc_class_ops *cops;
829 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
832 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
836 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
840 if (!tcm->tcm_parent)
843 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
846 cops = q->ops->cl_ops;
849 if (!cops->tcf_block)
851 if (TC_H_MIN(tcm->tcm_parent)) {
852 cl = cops->find(q, tcm->tcm_parent);
856 block = cops->tcf_block(q, cl);
860 index_start = cb->args[0];
863 list_for_each_entry(chain, &block->chain_list, list) {
864 if (tca[TCA_CHAIN] &&
865 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
867 if (!tcf_chain_dump(chain, skb, cb, index_start, &index))
877 void tcf_exts_destroy(struct tcf_exts *exts)
879 #ifdef CONFIG_NET_CLS_ACT
882 tcf_exts_to_list(exts, &actions);
883 tcf_action_destroy(&actions, TCA_ACT_UNBIND);
884 kfree(exts->actions);
885 exts->nr_actions = 0;
888 EXPORT_SYMBOL(tcf_exts_destroy);
890 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
891 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
893 #ifdef CONFIG_NET_CLS_ACT
895 struct tc_action *act;
897 if (exts->police && tb[exts->police]) {
898 act = tcf_action_init_1(net, tp, tb[exts->police],
899 rate_tlv, "police", ovr,
904 act->type = exts->type = TCA_OLD_COMPAT;
905 exts->actions[0] = act;
906 exts->nr_actions = 1;
907 } else if (exts->action && tb[exts->action]) {
911 err = tcf_action_init(net, tp, tb[exts->action],
912 rate_tlv, NULL, ovr, TCA_ACT_BIND,
916 list_for_each_entry(act, &actions, list)
917 exts->actions[i++] = act;
918 exts->nr_actions = i;
922 if ((exts->action && tb[exts->action]) ||
923 (exts->police && tb[exts->police]))
929 EXPORT_SYMBOL(tcf_exts_validate);
931 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
933 #ifdef CONFIG_NET_CLS_ACT
934 struct tcf_exts old = *dst;
937 tcf_exts_destroy(&old);
940 EXPORT_SYMBOL(tcf_exts_change);
942 #ifdef CONFIG_NET_CLS_ACT
943 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
945 if (exts->nr_actions == 0)
948 return exts->actions[0];
952 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
954 #ifdef CONFIG_NET_CLS_ACT
957 if (exts->action && tcf_exts_has_actions(exts)) {
959 * again for backward compatible mode - we want
960 * to work with both old and new modes of entering
961 * tc data even if iproute2 was newer - jhs
963 if (exts->type != TCA_OLD_COMPAT) {
966 nest = nla_nest_start(skb, exts->action);
968 goto nla_put_failure;
970 tcf_exts_to_list(exts, &actions);
971 if (tcf_action_dump(skb, &actions, 0, 0) < 0)
972 goto nla_put_failure;
973 nla_nest_end(skb, nest);
974 } else if (exts->police) {
975 struct tc_action *act = tcf_exts_first_act(exts);
976 nest = nla_nest_start(skb, exts->police);
977 if (nest == NULL || !act)
978 goto nla_put_failure;
979 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
980 goto nla_put_failure;
981 nla_nest_end(skb, nest);
987 nla_nest_cancel(skb, nest);
993 EXPORT_SYMBOL(tcf_exts_dump);
996 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
998 #ifdef CONFIG_NET_CLS_ACT
999 struct tc_action *a = tcf_exts_first_act(exts);
1000 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
1005 EXPORT_SYMBOL(tcf_exts_dump_stats);
1007 int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
1008 struct net_device **hw_dev)
1010 #ifdef CONFIG_NET_CLS_ACT
1011 const struct tc_action *a;
1014 if (!tcf_exts_has_actions(exts))
1017 tcf_exts_to_list(exts, &actions);
1018 list_for_each_entry(a, &actions, list) {
1019 if (a->ops->get_dev) {
1020 a->ops->get_dev(a, dev_net(dev), hw_dev);
1029 EXPORT_SYMBOL(tcf_exts_get_dev);
1031 static int __init tc_filter_init(void)
1033 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
1034 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
1035 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
1036 tc_dump_tfilter, 0);
1041 subsys_initcall(tc_filter_init);