2 * net/sched/cls_api.c Packet classifier API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/kmod.h>
26 #include <linux/err.h>
27 #include <linux/slab.h>
28 #include <net/net_namespace.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <net/pkt_cls.h>
34 /* The list of all installed classifier types */
35 static LIST_HEAD(tcf_proto_base);
37 /* Protects list of registered TC modules. It is pure SMP lock. */
38 static DEFINE_RWLOCK(cls_mod_lock);
40 /* Find classifier type by string name */
42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
44 const struct tcf_proto_ops *t, *res = NULL;
47 read_lock(&cls_mod_lock);
48 list_for_each_entry(t, &tcf_proto_base, head) {
49 if (strcmp(kind, t->kind) == 0) {
50 if (try_module_get(t->owner))
55 read_unlock(&cls_mod_lock);
60 /* Register(unregister) new classifier type */
62 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
64 struct tcf_proto_ops *t;
67 write_lock(&cls_mod_lock);
68 list_for_each_entry(t, &tcf_proto_base, head)
69 if (!strcmp(ops->kind, t->kind))
72 list_add_tail(&ops->head, &tcf_proto_base);
75 write_unlock(&cls_mod_lock);
78 EXPORT_SYMBOL(register_tcf_proto_ops);
80 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
82 struct tcf_proto_ops *t;
85 /* Wait for outstanding call_rcu()s, if any, from a
86 * tcf_proto_ops's destroy() handler.
90 write_lock(&cls_mod_lock);
91 list_for_each_entry(t, &tcf_proto_base, head) {
98 write_unlock(&cls_mod_lock);
101 EXPORT_SYMBOL(unregister_tcf_proto_ops);
103 /* Select new prio value from the range, managed by kernel. */
105 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
107 u32 first = TC_H_MAKE(0xC0000000U, 0U);
110 first = tp->prio - 1;
112 return TC_H_MAJ(first);
115 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
116 u32 prio, u32 parent, struct Qdisc *q,
117 struct tcf_chain *chain)
119 struct tcf_proto *tp;
122 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
124 return ERR_PTR(-ENOBUFS);
127 tp->ops = tcf_proto_lookup_ops(kind);
129 #ifdef CONFIG_MODULES
131 request_module("cls_%s", kind);
133 tp->ops = tcf_proto_lookup_ops(kind);
134 /* We dropped the RTNL semaphore in order to perform
135 * the module load. So, even if we succeeded in loading
136 * the module we have to replay the request. We indicate
137 * this using -EAGAIN.
140 module_put(tp->ops->owner);
148 tp->classify = tp->ops->classify;
149 tp->protocol = protocol;
151 tp->classid = parent;
155 err = tp->ops->init(tp);
157 module_put(tp->ops->owner);
167 static void tcf_proto_destroy(struct tcf_proto *tp)
169 tp->ops->destroy(tp);
170 module_put(tp->ops->owner);
174 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
177 struct tcf_chain *chain;
179 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
182 list_add_tail(&chain->list, &block->chain_list);
183 chain->block = block;
184 chain->index = chain_index;
189 static void tcf_chain_flush(struct tcf_chain *chain)
191 struct tcf_proto *tp;
193 if (chain->p_filter_chain)
194 RCU_INIT_POINTER(*chain->p_filter_chain, NULL);
195 while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
196 RCU_INIT_POINTER(chain->filter_chain, tp->next);
197 tcf_chain_put(chain);
198 tcf_proto_destroy(tp);
202 static void tcf_chain_destroy(struct tcf_chain *chain)
204 list_del(&chain->list);
208 static void tcf_chain_hold(struct tcf_chain *chain)
213 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
216 struct tcf_chain *chain;
218 list_for_each_entry(chain, &block->chain_list, list) {
219 if (chain->index == chain_index) {
220 tcf_chain_hold(chain);
225 return create ? tcf_chain_create(block, chain_index) : NULL;
227 EXPORT_SYMBOL(tcf_chain_get);
229 void tcf_chain_put(struct tcf_chain *chain)
231 if (--chain->refcnt == 0)
232 tcf_chain_destroy(chain);
234 EXPORT_SYMBOL(tcf_chain_put);
237 tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain,
238 struct tcf_proto __rcu **p_filter_chain)
240 chain->p_filter_chain = p_filter_chain;
243 int tcf_block_get(struct tcf_block **p_block,
244 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
246 struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
247 struct tcf_chain *chain;
252 INIT_LIST_HEAD(&block->chain_list);
253 /* Create chain 0 by default, it has to be always present. */
254 chain = tcf_chain_create(block, 0);
257 goto err_chain_create;
259 tcf_chain_filter_chain_ptr_set(chain, p_filter_chain);
260 block->net = qdisc_net(q);
269 EXPORT_SYMBOL(tcf_block_get);
271 void tcf_block_put(struct tcf_block *block)
273 struct tcf_chain *chain, *tmp;
278 /* XXX: Standalone actions are not allowed to jump to any chain, and
279 * bound actions should be all removed after flushing. However,
280 * filters are destroyed in RCU callbacks, we have to hold the chains
281 * first, otherwise we would always race with RCU callbacks on this list
282 * without proper locking.
285 /* Wait for existing RCU callbacks to cool down. */
288 /* Hold a refcnt for all chains, except 0, in case they are gone. */
289 list_for_each_entry(chain, &block->chain_list, list)
291 tcf_chain_hold(chain);
293 /* No race on the list, because no chain could be destroyed. */
294 list_for_each_entry(chain, &block->chain_list, list)
295 tcf_chain_flush(chain);
297 /* Wait for RCU callbacks to release the reference count. */
300 /* At this point, all the chains should have refcnt == 1. */
301 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
302 tcf_chain_put(chain);
305 EXPORT_SYMBOL(tcf_block_put);
307 /* Main classifier routine: scans classifier chain attached
308 * to this qdisc, (optionally) tests for protocol and asks
309 * specific classifiers.
311 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
312 struct tcf_result *res, bool compat_mode)
314 __be16 protocol = tc_skb_protocol(skb);
315 #ifdef CONFIG_NET_CLS_ACT
316 const int max_reclassify_loop = 4;
317 const struct tcf_proto *orig_tp = tp;
318 const struct tcf_proto *first_tp;
323 for (; tp; tp = rcu_dereference_bh(tp->next)) {
326 if (tp->protocol != protocol &&
327 tp->protocol != htons(ETH_P_ALL))
330 err = tp->classify(skb, tp, res);
331 #ifdef CONFIG_NET_CLS_ACT
332 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
335 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
336 first_tp = res->goto_tp;
344 return TC_ACT_UNSPEC; /* signal: continue lookup */
345 #ifdef CONFIG_NET_CLS_ACT
347 if (unlikely(limit++ >= max_reclassify_loop)) {
348 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
349 tp->q->ops->id, tp->prio & 0xffff,
350 ntohs(tp->protocol));
355 protocol = tc_skb_protocol(skb);
359 EXPORT_SYMBOL(tcf_classify);
361 struct tcf_chain_info {
362 struct tcf_proto __rcu **pprev;
363 struct tcf_proto __rcu *next;
366 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
368 return rtnl_dereference(*chain_info->pprev);
371 static void tcf_chain_tp_insert(struct tcf_chain *chain,
372 struct tcf_chain_info *chain_info,
373 struct tcf_proto *tp)
375 if (chain->p_filter_chain &&
376 *chain_info->pprev == chain->filter_chain)
377 rcu_assign_pointer(*chain->p_filter_chain, tp);
378 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
379 rcu_assign_pointer(*chain_info->pprev, tp);
380 tcf_chain_hold(chain);
383 static void tcf_chain_tp_remove(struct tcf_chain *chain,
384 struct tcf_chain_info *chain_info,
385 struct tcf_proto *tp)
387 struct tcf_proto *next = rtnl_dereference(chain_info->next);
389 if (chain->p_filter_chain && tp == chain->filter_chain)
390 RCU_INIT_POINTER(*chain->p_filter_chain, next);
391 RCU_INIT_POINTER(*chain_info->pprev, next);
392 tcf_chain_put(chain);
395 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
396 struct tcf_chain_info *chain_info,
397 u32 protocol, u32 prio,
400 struct tcf_proto **pprev;
401 struct tcf_proto *tp;
403 /* Check the chain for existence of proto-tcf with this priority */
404 for (pprev = &chain->filter_chain;
405 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
406 if (tp->prio >= prio) {
407 if (tp->prio == prio) {
409 (tp->protocol != protocol && protocol))
410 return ERR_PTR(-EINVAL);
417 chain_info->pprev = pprev;
418 chain_info->next = tp ? tp->next : NULL;
422 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
423 struct tcf_proto *tp, void *fh, u32 portid,
424 u32 seq, u16 flags, int event)
427 struct nlmsghdr *nlh;
428 unsigned char *b = skb_tail_pointer(skb);
430 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
433 tcm = nlmsg_data(nlh);
434 tcm->tcm_family = AF_UNSPEC;
437 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
438 tcm->tcm_parent = tp->classid;
439 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
440 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
441 goto nla_put_failure;
442 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
443 goto nla_put_failure;
447 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
448 goto nla_put_failure;
450 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
459 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
460 struct nlmsghdr *n, struct tcf_proto *tp,
461 void *fh, int event, bool unicast)
464 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
466 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
470 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
471 n->nlmsg_flags, event) <= 0) {
477 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
479 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
480 n->nlmsg_flags & NLM_F_ECHO);
483 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
484 struct nlmsghdr *n, struct tcf_proto *tp,
485 void *fh, bool unicast, bool *last)
488 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
491 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
495 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
496 n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
501 err = tp->ops->delete(tp, fh, last);
508 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
510 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
511 n->nlmsg_flags & NLM_F_ECHO);
514 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
516 struct tcf_chain *chain, int event)
518 struct tcf_proto *tp;
520 for (tp = rtnl_dereference(chain->filter_chain);
521 tp; tp = rtnl_dereference(tp->next))
522 tfilter_notify(net, oskb, n, tp, 0, event, false);
525 /* Add/change/delete/get a filter node */
527 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
528 struct netlink_ext_ack *extack)
530 struct net *net = sock_net(skb->sk);
531 struct nlattr *tca[TCA_MAX + 1];
538 struct net_device *dev;
540 struct tcf_chain_info chain_info;
541 struct tcf_chain *chain = NULL;
542 struct tcf_block *block;
543 struct tcf_proto *tp;
544 const struct Qdisc_class_ops *cops;
550 if ((n->nlmsg_type != RTM_GETTFILTER) &&
551 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
557 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
562 protocol = TC_H_MIN(t->tcm_info);
563 prio = TC_H_MAJ(t->tcm_info);
564 prio_allocate = false;
565 parent = t->tcm_parent;
569 switch (n->nlmsg_type) {
571 if (protocol || t->tcm_handle || tca[TCA_KIND])
575 /* If no priority is provided by the user,
578 if (n->nlmsg_flags & NLM_F_CREATE) {
579 prio = TC_H_MAKE(0x80000000U, 0U);
580 prio_allocate = true;
589 /* Find head of filter chain. */
592 dev = __dev_get_by_index(net, t->tcm_ifindex);
601 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
606 /* Is it classful? */
607 cops = q->ops->cl_ops;
611 if (!cops->tcf_block)
614 /* Do we search for filter, attached to class? */
615 if (TC_H_MIN(parent)) {
616 cl = cops->find(q, parent);
621 /* And the last stroke */
622 block = cops->tcf_block(q, cl);
628 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
629 if (chain_index > TC_ACT_EXT_VAL_MASK) {
633 chain = tcf_chain_get(block, chain_index,
634 n->nlmsg_type == RTM_NEWTFILTER);
636 err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
640 if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
641 tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER);
642 tcf_chain_flush(chain);
647 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
648 prio, prio_allocate);
655 /* Proto-tcf does not exist, create new one */
657 if (tca[TCA_KIND] == NULL || !protocol) {
662 if (n->nlmsg_type != RTM_NEWTFILTER ||
663 !(n->nlmsg_flags & NLM_F_CREATE)) {
669 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
671 tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
672 protocol, prio, parent, q, chain);
678 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
683 fh = tp->ops->get(tp, t->tcm_handle);
686 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
687 tcf_chain_tp_remove(chain, &chain_info, tp);
688 tfilter_notify(net, skb, n, tp, fh,
689 RTM_DELTFILTER, false);
690 tcf_proto_destroy(tp);
695 if (n->nlmsg_type != RTM_NEWTFILTER ||
696 !(n->nlmsg_flags & NLM_F_CREATE)) {
703 switch (n->nlmsg_type) {
705 if (n->nlmsg_flags & NLM_F_EXCL) {
707 tcf_proto_destroy(tp);
713 err = tfilter_del_notify(net, skb, n, tp, fh, false,
718 tcf_chain_tp_remove(chain, &chain_info, tp);
719 tcf_proto_destroy(tp);
723 err = tfilter_notify(net, skb, n, tp, fh,
724 RTM_NEWTFILTER, true);
732 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
733 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
736 tcf_chain_tp_insert(chain, &chain_info, tp);
737 tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
740 tcf_proto_destroy(tp);
745 tcf_chain_put(chain);
747 /* Replay the request. */
752 struct tcf_dump_args {
755 struct netlink_callback *cb;
758 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
760 struct tcf_dump_args *a = (void *)arg;
761 struct net *net = sock_net(a->skb->sk);
763 return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
764 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
768 static bool tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb,
769 struct netlink_callback *cb,
770 long index_start, long *p_index)
772 struct net *net = sock_net(skb->sk);
773 struct tcmsg *tcm = nlmsg_data(cb->nlh);
774 struct tcf_dump_args arg;
775 struct tcf_proto *tp;
777 for (tp = rtnl_dereference(chain->filter_chain);
778 tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
779 if (*p_index < index_start)
781 if (TC_H_MAJ(tcm->tcm_info) &&
782 TC_H_MAJ(tcm->tcm_info) != tp->prio)
784 if (TC_H_MIN(tcm->tcm_info) &&
785 TC_H_MIN(tcm->tcm_info) != tp->protocol)
787 if (*p_index > index_start)
788 memset(&cb->args[1], 0,
789 sizeof(cb->args) - sizeof(cb->args[0]));
790 if (cb->args[1] == 0) {
791 if (tcf_fill_node(net, skb, tp, 0,
792 NETLINK_CB(cb->skb).portid,
793 cb->nlh->nlmsg_seq, NLM_F_MULTI,
794 RTM_NEWTFILTER) <= 0)
801 arg.w.fn = tcf_node_dump;
805 arg.w.skip = cb->args[1] - 1;
807 tp->ops->walk(tp, &arg.w);
808 cb->args[1] = arg.w.count + 1;
815 /* called with RTNL */
816 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
818 struct net *net = sock_net(skb->sk);
819 struct nlattr *tca[TCA_MAX + 1];
820 struct net_device *dev;
822 struct tcf_block *block;
823 struct tcf_chain *chain;
824 struct tcmsg *tcm = nlmsg_data(cb->nlh);
825 unsigned long cl = 0;
826 const struct Qdisc_class_ops *cops;
831 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
834 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
838 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
842 if (!tcm->tcm_parent)
845 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
848 cops = q->ops->cl_ops;
851 if (!cops->tcf_block)
853 if (TC_H_MIN(tcm->tcm_parent)) {
854 cl = cops->find(q, tcm->tcm_parent);
858 block = cops->tcf_block(q, cl);
862 index_start = cb->args[0];
865 list_for_each_entry(chain, &block->chain_list, list) {
866 if (tca[TCA_CHAIN] &&
867 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
869 if (!tcf_chain_dump(chain, skb, cb, index_start, &index))
879 void tcf_exts_destroy(struct tcf_exts *exts)
881 #ifdef CONFIG_NET_CLS_ACT
884 tcf_exts_to_list(exts, &actions);
885 tcf_action_destroy(&actions, TCA_ACT_UNBIND);
886 kfree(exts->actions);
887 exts->nr_actions = 0;
890 EXPORT_SYMBOL(tcf_exts_destroy);
892 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
893 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
895 #ifdef CONFIG_NET_CLS_ACT
897 struct tc_action *act;
899 if (exts->police && tb[exts->police]) {
900 act = tcf_action_init_1(net, tp, tb[exts->police],
901 rate_tlv, "police", ovr,
906 act->type = exts->type = TCA_OLD_COMPAT;
907 exts->actions[0] = act;
908 exts->nr_actions = 1;
909 } else if (exts->action && tb[exts->action]) {
913 err = tcf_action_init(net, tp, tb[exts->action],
914 rate_tlv, NULL, ovr, TCA_ACT_BIND,
918 list_for_each_entry(act, &actions, list)
919 exts->actions[i++] = act;
920 exts->nr_actions = i;
924 if ((exts->action && tb[exts->action]) ||
925 (exts->police && tb[exts->police]))
931 EXPORT_SYMBOL(tcf_exts_validate);
933 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
935 #ifdef CONFIG_NET_CLS_ACT
936 struct tcf_exts old = *dst;
939 tcf_exts_destroy(&old);
942 EXPORT_SYMBOL(tcf_exts_change);
944 #ifdef CONFIG_NET_CLS_ACT
945 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
947 if (exts->nr_actions == 0)
950 return exts->actions[0];
954 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
956 #ifdef CONFIG_NET_CLS_ACT
959 if (exts->action && tcf_exts_has_actions(exts)) {
961 * again for backward compatible mode - we want
962 * to work with both old and new modes of entering
963 * tc data even if iproute2 was newer - jhs
965 if (exts->type != TCA_OLD_COMPAT) {
968 nest = nla_nest_start(skb, exts->action);
970 goto nla_put_failure;
972 tcf_exts_to_list(exts, &actions);
973 if (tcf_action_dump(skb, &actions, 0, 0) < 0)
974 goto nla_put_failure;
975 nla_nest_end(skb, nest);
976 } else if (exts->police) {
977 struct tc_action *act = tcf_exts_first_act(exts);
978 nest = nla_nest_start(skb, exts->police);
979 if (nest == NULL || !act)
980 goto nla_put_failure;
981 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
982 goto nla_put_failure;
983 nla_nest_end(skb, nest);
989 nla_nest_cancel(skb, nest);
995 EXPORT_SYMBOL(tcf_exts_dump);
998 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
1000 #ifdef CONFIG_NET_CLS_ACT
1001 struct tc_action *a = tcf_exts_first_act(exts);
1002 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
1007 EXPORT_SYMBOL(tcf_exts_dump_stats);
1009 static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts,
1010 enum tc_setup_type type,
1011 void *type_data, bool err_stop)
1014 #ifdef CONFIG_NET_CLS_ACT
1015 const struct tc_action *a;
1016 struct net_device *dev;
1020 if (!tcf_exts_has_actions(exts))
1023 tcf_exts_to_list(exts, &actions);
1024 list_for_each_entry(a, &actions, list) {
1025 if (!a->ops->get_dev)
1027 dev = a->ops->get_dev(a);
1028 if (!dev || !tc_can_offload(dev))
1030 ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop);
1039 int tc_setup_cb_call(struct tcf_exts *exts, enum tc_setup_type type,
1040 void *type_data, bool err_stop)
1042 return tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
1044 EXPORT_SYMBOL(tc_setup_cb_call);
1046 static int __init tc_filter_init(void)
1048 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
1049 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
1050 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
1051 tc_dump_tfilter, 0);
1056 subsys_initcall(tc_filter_init);