1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_api.c Packet classifier API.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
52 static struct xarray tcf_exts_miss_cookies_xa;
53 struct tcf_exts_miss_cookie_node {
54 const struct tcf_chain *chain;
55 const struct tcf_proto *tp;
56 const struct tcf_exts *exts;
64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65 * action index in the exts tc actions array.
67 union tcf_exts_miss_cookie {
75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
80 struct tcf_exts_miss_cookie_node *n;
84 if (WARN_ON(!handle || !tp->ops->get_exts))
87 n = kzalloc(sizeof(*n), GFP_KERNEL);
91 n->chain_index = tp->chain->index;
93 n->tp_prio = tp->prio;
98 err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99 n, xa_limit_32b, &next, GFP_KERNEL);
103 exts->miss_cookie_node = n;
111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
113 struct tcf_exts_miss_cookie_node *n;
115 if (!exts->miss_cookie_node)
118 n = exts->miss_cookie_node;
119 xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
123 static struct tcf_exts_miss_cookie_node *
124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
126 union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
128 *act_index = mc.act_index;
129 return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
146 union tcf_exts_miss_cookie mc = { .act_index = act_index, };
148 if (!miss_cookie_base)
151 mc.miss_cookie_base = miss_cookie_base;
152 return mc.miss_cookie;
155 #ifdef CONFIG_NET_CLS_ACT
156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157 EXPORT_SYMBOL(tc_skb_ext_tc);
159 void tc_skb_ext_tc_enable(void)
161 static_branch_inc(&tc_skb_ext_tc);
163 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
165 void tc_skb_ext_tc_disable(void)
167 static_branch_dec(&tc_skb_ext_tc);
169 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
174 return jhash_3words(tp->chain->index, tp->prio,
175 (__force __u32)tp->protocol, 0);
178 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179 struct tcf_proto *tp)
181 struct tcf_block *block = chain->block;
183 mutex_lock(&block->proto_destroy_lock);
184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185 destroy_obj_hashfn(tp));
186 mutex_unlock(&block->proto_destroy_lock);
189 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190 const struct tcf_proto *tp2)
192 return tp1->chain->index == tp2->chain->index &&
193 tp1->prio == tp2->prio &&
194 tp1->protocol == tp2->protocol;
197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198 struct tcf_proto *tp)
200 u32 hash = destroy_obj_hashfn(tp);
201 struct tcf_proto *iter;
205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206 destroy_ht_node, hash) {
207 if (tcf_proto_cmp(tp, iter)) {
218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
220 struct tcf_block *block = chain->block;
222 mutex_lock(&block->proto_destroy_lock);
223 if (hash_hashed(&tp->destroy_ht_node))
224 hash_del_rcu(&tp->destroy_ht_node);
225 mutex_unlock(&block->proto_destroy_lock);
228 /* Find classifier type by string name */
230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
232 const struct tcf_proto_ops *t, *res = NULL;
235 read_lock(&cls_mod_lock);
236 list_for_each_entry(t, &tcf_proto_base, head) {
237 if (strcmp(kind, t->kind) == 0) {
238 if (try_module_get(t->owner))
243 read_unlock(&cls_mod_lock);
248 static const struct tcf_proto_ops *
249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250 struct netlink_ext_ack *extack)
252 const struct tcf_proto_ops *ops;
254 ops = __tcf_proto_lookup_ops(kind);
257 #ifdef CONFIG_MODULES
260 request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
263 ops = __tcf_proto_lookup_ops(kind);
264 /* We dropped the RTNL semaphore in order to perform
265 * the module load. So, even if we succeeded in loading
266 * the module we have to replay the request. We indicate
267 * this using -EAGAIN.
270 module_put(ops->owner);
271 return ERR_PTR(-EAGAIN);
274 NL_SET_ERR_MSG(extack, "TC classifier not found");
275 return ERR_PTR(-ENOENT);
278 /* Register(unregister) new classifier type */
280 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
282 struct tcf_proto_ops *t;
285 write_lock(&cls_mod_lock);
286 list_for_each_entry(t, &tcf_proto_base, head)
287 if (!strcmp(ops->kind, t->kind))
290 list_add_tail(&ops->head, &tcf_proto_base);
293 write_unlock(&cls_mod_lock);
296 EXPORT_SYMBOL(register_tcf_proto_ops);
298 static struct workqueue_struct *tc_filter_wq;
300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
302 struct tcf_proto_ops *t;
305 /* Wait for outstanding call_rcu()s, if any, from a
306 * tcf_proto_ops's destroy() handler.
309 flush_workqueue(tc_filter_wq);
311 write_lock(&cls_mod_lock);
312 list_for_each_entry(t, &tcf_proto_base, head) {
319 write_unlock(&cls_mod_lock);
321 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
323 EXPORT_SYMBOL(unregister_tcf_proto_ops);
325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
327 INIT_RCU_WORK(rwork, func);
328 return queue_rcu_work(tc_filter_wq, rwork);
330 EXPORT_SYMBOL(tcf_queue_work);
332 /* Select new prio value from the range, managed by kernel. */
334 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
336 u32 first = TC_H_MAKE(0xC0000000U, 0U);
339 first = tp->prio - 1;
341 return TC_H_MAJ(first);
344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
347 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348 memset(name, 0, IFNAMSIZ);
352 static bool tcf_proto_is_unlocked(const char *kind)
354 const struct tcf_proto_ops *ops;
357 if (strlen(kind) == 0)
360 ops = tcf_proto_lookup_ops(kind, false, NULL);
361 /* On error return false to take rtnl lock. Proto lookup/create
362 * functions will perform lookup again and properly handle errors.
367 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368 module_put(ops->owner);
372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373 u32 prio, struct tcf_chain *chain,
375 struct netlink_ext_ack *extack)
377 struct tcf_proto *tp;
380 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
382 return ERR_PTR(-ENOBUFS);
384 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385 if (IS_ERR(tp->ops)) {
386 err = PTR_ERR(tp->ops);
389 tp->classify = tp->ops->classify;
390 tp->protocol = protocol;
393 tp->usesw = !tp->ops->reoffload;
394 spin_lock_init(&tp->lock);
395 refcount_set(&tp->refcnt, 1);
397 err = tp->ops->init(tp);
399 module_put(tp->ops->owner);
409 static void tcf_proto_get(struct tcf_proto *tp)
411 refcount_inc(&tp->refcnt);
414 static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add)
416 #ifdef CONFIG_NET_CLS_ACT
417 struct tcf_block *block = tp->chain->block;
418 bool counted = false;
421 if (tp->usesw && tp->counted) {
422 if (!atomic_dec_return(&block->useswcnt))
423 static_branch_dec(&tcf_sw_enabled_key);
429 spin_lock(&tp->lock);
430 if (tp->usesw && !tp->counted) {
434 spin_unlock(&tp->lock);
436 if (counted && atomic_inc_return(&block->useswcnt) == 1)
437 static_branch_inc(&tcf_sw_enabled_key);
441 static void tcf_chain_put(struct tcf_chain *chain);
443 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
444 bool sig_destroy, struct netlink_ext_ack *extack)
446 tp->ops->destroy(tp, rtnl_held, extack);
447 tcf_proto_count_usesw(tp, false);
449 tcf_proto_signal_destroyed(tp->chain, tp);
450 tcf_chain_put(tp->chain);
451 module_put(tp->ops->owner);
455 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
456 struct netlink_ext_ack *extack)
458 if (refcount_dec_and_test(&tp->refcnt))
459 tcf_proto_destroy(tp, rtnl_held, true, extack);
462 static bool tcf_proto_check_delete(struct tcf_proto *tp)
464 if (tp->ops->delete_empty)
465 return tp->ops->delete_empty(tp);
471 static void tcf_proto_mark_delete(struct tcf_proto *tp)
473 spin_lock(&tp->lock);
475 spin_unlock(&tp->lock);
478 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
482 spin_lock(&tp->lock);
483 deleting = tp->deleting;
484 spin_unlock(&tp->lock);
489 #define ASSERT_BLOCK_LOCKED(block) \
490 lockdep_assert_held(&(block)->lock)
492 struct tcf_filter_chain_list_item {
493 struct list_head list;
494 tcf_chain_head_change_t *chain_head_change;
495 void *chain_head_change_priv;
498 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
501 struct tcf_chain *chain;
503 ASSERT_BLOCK_LOCKED(block);
505 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
508 list_add_tail_rcu(&chain->list, &block->chain_list);
509 mutex_init(&chain->filter_chain_lock);
510 chain->block = block;
511 chain->index = chain_index;
514 block->chain0.chain = chain;
518 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
519 struct tcf_proto *tp_head)
521 if (item->chain_head_change)
522 item->chain_head_change(tp_head, item->chain_head_change_priv);
525 static void tcf_chain0_head_change(struct tcf_chain *chain,
526 struct tcf_proto *tp_head)
528 struct tcf_filter_chain_list_item *item;
529 struct tcf_block *block = chain->block;
534 mutex_lock(&block->lock);
535 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
536 tcf_chain_head_change_item(item, tp_head);
537 mutex_unlock(&block->lock);
540 /* Returns true if block can be safely freed. */
542 static bool tcf_chain_detach(struct tcf_chain *chain)
544 struct tcf_block *block = chain->block;
546 ASSERT_BLOCK_LOCKED(block);
548 list_del_rcu(&chain->list);
550 block->chain0.chain = NULL;
552 if (list_empty(&block->chain_list) &&
553 refcount_read(&block->refcnt) == 0)
559 static void tcf_block_destroy(struct tcf_block *block)
561 mutex_destroy(&block->lock);
562 mutex_destroy(&block->proto_destroy_lock);
563 xa_destroy(&block->ports);
564 kfree_rcu(block, rcu);
567 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
569 struct tcf_block *block = chain->block;
571 mutex_destroy(&chain->filter_chain_lock);
572 kfree_rcu(chain, rcu);
574 tcf_block_destroy(block);
577 static void tcf_chain_hold(struct tcf_chain *chain)
579 ASSERT_BLOCK_LOCKED(chain->block);
584 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
586 ASSERT_BLOCK_LOCKED(chain->block);
588 /* In case all the references are action references, this
589 * chain should not be shown to the user.
591 return chain->refcnt == chain->action_refcnt;
594 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
597 struct tcf_chain *chain;
599 ASSERT_BLOCK_LOCKED(block);
601 list_for_each_entry(chain, &block->chain_list, list) {
602 if (chain->index == chain_index)
608 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
609 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
612 struct tcf_chain *chain;
614 list_for_each_entry_rcu(chain, &block->chain_list, list) {
615 if (chain->index == chain_index)
622 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
623 u32 seq, u16 flags, int event, bool unicast,
624 struct netlink_ext_ack *extack);
626 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
627 u32 chain_index, bool create,
630 struct tcf_chain *chain = NULL;
631 bool is_first_reference;
633 mutex_lock(&block->lock);
634 chain = tcf_chain_lookup(block, chain_index);
636 tcf_chain_hold(chain);
640 chain = tcf_chain_create(block, chain_index);
646 ++chain->action_refcnt;
647 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
648 mutex_unlock(&block->lock);
650 /* Send notification only in case we got the first
651 * non-action reference. Until then, the chain acts only as
652 * a placeholder for actions pointing to it and user ought
653 * not know about them.
655 if (is_first_reference && !by_act)
656 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
657 RTM_NEWCHAIN, false, NULL);
662 mutex_unlock(&block->lock);
666 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
669 return __tcf_chain_get(block, chain_index, create, false);
672 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
674 return __tcf_chain_get(block, chain_index, true, true);
676 EXPORT_SYMBOL(tcf_chain_get_by_act);
678 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
680 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
681 void *tmplt_priv, u32 chain_index,
682 struct tcf_block *block, struct sk_buff *oskb,
685 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
686 bool explicitly_created)
688 struct tcf_block *block = chain->block;
689 const struct tcf_proto_ops *tmplt_ops;
690 unsigned int refcnt, non_act_refcnt;
691 bool free_block = false;
694 mutex_lock(&block->lock);
695 if (explicitly_created) {
696 if (!chain->explicitly_created) {
697 mutex_unlock(&block->lock);
700 chain->explicitly_created = false;
704 chain->action_refcnt--;
706 /* tc_chain_notify_delete can't be called while holding block lock.
707 * However, when block is unlocked chain can be changed concurrently, so
708 * save these to temporary variables.
710 refcnt = --chain->refcnt;
711 non_act_refcnt = refcnt - chain->action_refcnt;
712 tmplt_ops = chain->tmplt_ops;
713 tmplt_priv = chain->tmplt_priv;
715 if (non_act_refcnt == chain->explicitly_created && !by_act) {
716 if (non_act_refcnt == 0)
717 tc_chain_notify_delete(tmplt_ops, tmplt_priv,
718 chain->index, block, NULL, 0, 0);
719 /* Last reference to chain, no need to lock. */
720 chain->flushing = false;
724 free_block = tcf_chain_detach(chain);
725 mutex_unlock(&block->lock);
728 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
729 tcf_chain_destroy(chain, free_block);
733 static void tcf_chain_put(struct tcf_chain *chain)
735 __tcf_chain_put(chain, false, false);
738 void tcf_chain_put_by_act(struct tcf_chain *chain)
740 __tcf_chain_put(chain, true, false);
742 EXPORT_SYMBOL(tcf_chain_put_by_act);
744 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
746 __tcf_chain_put(chain, false, true);
749 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
751 struct tcf_proto *tp, *tp_next;
753 mutex_lock(&chain->filter_chain_lock);
754 tp = tcf_chain_dereference(chain->filter_chain, chain);
756 tp_next = rcu_dereference_protected(tp->next, 1);
757 tcf_proto_signal_destroying(chain, tp);
760 tp = tcf_chain_dereference(chain->filter_chain, chain);
761 RCU_INIT_POINTER(chain->filter_chain, NULL);
762 tcf_chain0_head_change(chain, NULL);
763 chain->flushing = true;
764 mutex_unlock(&chain->filter_chain_lock);
767 tp_next = rcu_dereference_protected(tp->next, 1);
768 tcf_proto_put(tp, rtnl_held, NULL);
773 static int tcf_block_setup(struct tcf_block *block,
774 struct flow_block_offload *bo);
776 static void tcf_block_offload_init(struct flow_block_offload *bo,
777 struct net_device *dev, struct Qdisc *sch,
778 enum flow_block_command command,
779 enum flow_block_binder_type binder_type,
780 struct flow_block *flow_block,
781 bool shared, struct netlink_ext_ack *extack)
783 bo->net = dev_net(dev);
784 bo->command = command;
785 bo->binder_type = binder_type;
786 bo->block = flow_block;
787 bo->block_shared = shared;
790 bo->cb_list_head = &flow_block->cb_list;
791 INIT_LIST_HEAD(&bo->cb_list);
794 static void tcf_block_unbind(struct tcf_block *block,
795 struct flow_block_offload *bo);
797 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
799 struct tcf_block *block = block_cb->indr.data;
800 struct net_device *dev = block_cb->indr.dev;
801 struct Qdisc *sch = block_cb->indr.sch;
802 struct netlink_ext_ack extack = {};
803 struct flow_block_offload bo = {};
805 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
806 block_cb->indr.binder_type,
807 &block->flow_block, tcf_block_shared(block),
810 down_write(&block->cb_lock);
811 list_del(&block_cb->driver_list);
812 list_move(&block_cb->list, &bo.cb_list);
813 tcf_block_unbind(block, &bo);
814 up_write(&block->cb_lock);
818 static bool tcf_block_offload_in_use(struct tcf_block *block)
820 return atomic_read(&block->offloadcnt);
823 static int tcf_block_offload_cmd(struct tcf_block *block,
824 struct net_device *dev, struct Qdisc *sch,
825 struct tcf_block_ext_info *ei,
826 enum flow_block_command command,
827 struct netlink_ext_ack *extack)
829 struct flow_block_offload bo = {};
831 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
832 &block->flow_block, tcf_block_shared(block),
835 if (dev->netdev_ops->ndo_setup_tc) {
838 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
840 if (err != -EOPNOTSUPP)
841 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
845 return tcf_block_setup(block, &bo);
848 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
849 tc_block_indr_cleanup);
850 tcf_block_setup(block, &bo);
855 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
856 struct tcf_block_ext_info *ei,
857 struct netlink_ext_ack *extack)
859 struct net_device *dev = q->dev_queue->dev;
862 down_write(&block->cb_lock);
864 /* If tc offload feature is disabled and the block we try to bind
865 * to already has some offloaded filters, forbid to bind.
867 if (dev->netdev_ops->ndo_setup_tc &&
868 !tc_can_offload(dev) &&
869 tcf_block_offload_in_use(block)) {
870 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
875 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
876 if (err == -EOPNOTSUPP)
877 goto no_offload_dev_inc;
881 up_write(&block->cb_lock);
885 if (tcf_block_offload_in_use(block))
889 block->nooffloaddevcnt++;
891 up_write(&block->cb_lock);
895 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
896 struct tcf_block_ext_info *ei)
898 struct net_device *dev = q->dev_queue->dev;
901 down_write(&block->cb_lock);
902 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
903 if (err == -EOPNOTSUPP)
904 goto no_offload_dev_dec;
905 up_write(&block->cb_lock);
909 WARN_ON(block->nooffloaddevcnt-- == 0);
910 up_write(&block->cb_lock);
914 tcf_chain0_head_change_cb_add(struct tcf_block *block,
915 struct tcf_block_ext_info *ei,
916 struct netlink_ext_ack *extack)
918 struct tcf_filter_chain_list_item *item;
919 struct tcf_chain *chain0;
921 item = kmalloc(sizeof(*item), GFP_KERNEL);
923 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
926 item->chain_head_change = ei->chain_head_change;
927 item->chain_head_change_priv = ei->chain_head_change_priv;
929 mutex_lock(&block->lock);
930 chain0 = block->chain0.chain;
932 tcf_chain_hold(chain0);
934 list_add(&item->list, &block->chain0.filter_chain_list);
935 mutex_unlock(&block->lock);
938 struct tcf_proto *tp_head;
940 mutex_lock(&chain0->filter_chain_lock);
942 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
944 tcf_chain_head_change_item(item, tp_head);
946 mutex_lock(&block->lock);
947 list_add(&item->list, &block->chain0.filter_chain_list);
948 mutex_unlock(&block->lock);
950 mutex_unlock(&chain0->filter_chain_lock);
951 tcf_chain_put(chain0);
958 tcf_chain0_head_change_cb_del(struct tcf_block *block,
959 struct tcf_block_ext_info *ei)
961 struct tcf_filter_chain_list_item *item;
963 mutex_lock(&block->lock);
964 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
965 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
966 (item->chain_head_change == ei->chain_head_change &&
967 item->chain_head_change_priv == ei->chain_head_change_priv)) {
968 if (block->chain0.chain)
969 tcf_chain_head_change_item(item, NULL);
970 list_del(&item->list);
971 mutex_unlock(&block->lock);
977 mutex_unlock(&block->lock);
982 spinlock_t idr_lock; /* Protects idr */
986 static unsigned int tcf_net_id;
988 static int tcf_block_insert(struct tcf_block *block, struct net *net,
989 struct netlink_ext_ack *extack)
991 struct tcf_net *tn = net_generic(net, tcf_net_id);
994 idr_preload(GFP_KERNEL);
995 spin_lock(&tn->idr_lock);
996 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
998 spin_unlock(&tn->idr_lock);
1004 static void tcf_block_remove(struct tcf_block *block, struct net *net)
1006 struct tcf_net *tn = net_generic(net, tcf_net_id);
1008 spin_lock(&tn->idr_lock);
1009 idr_remove(&tn->idr, block->index);
1010 spin_unlock(&tn->idr_lock);
1013 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
1015 struct netlink_ext_ack *extack)
1017 struct tcf_block *block;
1019 block = kzalloc(sizeof(*block), GFP_KERNEL);
1021 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
1022 return ERR_PTR(-ENOMEM);
1024 mutex_init(&block->lock);
1025 mutex_init(&block->proto_destroy_lock);
1026 init_rwsem(&block->cb_lock);
1027 flow_block_init(&block->flow_block);
1028 INIT_LIST_HEAD(&block->chain_list);
1029 INIT_LIST_HEAD(&block->owner_list);
1030 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1032 refcount_set(&block->refcnt, 1);
1034 block->index = block_index;
1035 xa_init(&block->ports);
1037 /* Don't store q pointer for blocks which are shared */
1038 if (!tcf_block_shared(block))
1043 struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1045 struct tcf_net *tn = net_generic(net, tcf_net_id);
1047 return idr_find(&tn->idr, block_index);
1049 EXPORT_SYMBOL(tcf_block_lookup);
1051 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1053 struct tcf_block *block;
1056 block = tcf_block_lookup(net, block_index);
1057 if (block && !refcount_inc_not_zero(&block->refcnt))
1064 static struct tcf_chain *
1065 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1067 mutex_lock(&block->lock);
1069 chain = list_is_last(&chain->list, &block->chain_list) ?
1070 NULL : list_next_entry(chain, list);
1072 chain = list_first_entry_or_null(&block->chain_list,
1073 struct tcf_chain, list);
1075 /* skip all action-only chains */
1076 while (chain && tcf_chain_held_by_acts_only(chain))
1077 chain = list_is_last(&chain->list, &block->chain_list) ?
1078 NULL : list_next_entry(chain, list);
1081 tcf_chain_hold(chain);
1082 mutex_unlock(&block->lock);
1087 /* Function to be used by all clients that want to iterate over all chains on
1088 * block. It properly obtains block->lock and takes reference to chain before
1089 * returning it. Users of this function must be tolerant to concurrent chain
1090 * insertion/deletion or ensure that no concurrent chain modification is
1091 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1092 * consistent dump because rtnl lock is released each time skb is filled with
1093 * data and sent to user-space.
1097 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1099 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1102 tcf_chain_put(chain);
1106 EXPORT_SYMBOL(tcf_get_next_chain);
1108 static struct tcf_proto *
1109 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1114 mutex_lock(&chain->filter_chain_lock);
1117 tp = tcf_chain_dereference(chain->filter_chain, chain);
1118 } else if (tcf_proto_is_deleting(tp)) {
1119 /* 'deleting' flag is set and chain->filter_chain_lock was
1120 * unlocked, which means next pointer could be invalid. Restart
1123 prio = tp->prio + 1;
1124 tp = tcf_chain_dereference(chain->filter_chain, chain);
1126 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1127 if (!tp->deleting && tp->prio >= prio)
1130 tp = tcf_chain_dereference(tp->next, chain);
1136 mutex_unlock(&chain->filter_chain_lock);
1141 /* Function to be used by all clients that want to iterate over all tp's on
1142 * chain. Users of this function must be tolerant to concurrent tp
1143 * insertion/deletion or ensure that no concurrent chain modification is
1144 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1145 * consistent dump because rtnl lock is released each time skb is filled with
1146 * data and sent to user-space.
1150 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1152 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1155 tcf_proto_put(tp, true, NULL);
1159 EXPORT_SYMBOL(tcf_get_next_proto);
1161 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1163 struct tcf_chain *chain;
1165 /* Last reference to block. At this point chains cannot be added or
1166 * removed concurrently.
1168 for (chain = tcf_get_next_chain(block, NULL);
1170 chain = tcf_get_next_chain(block, chain)) {
1171 tcf_chain_put_explicitly_created(chain);
1172 tcf_chain_flush(chain, rtnl_held);
1176 /* Lookup Qdisc and increments its reference counter.
1177 * Set parent, if necessary.
1180 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1181 u32 *parent, int ifindex, bool rtnl_held,
1182 struct netlink_ext_ack *extack)
1184 const struct Qdisc_class_ops *cops;
1185 struct net_device *dev;
1188 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1194 dev = dev_get_by_index_rcu(net, ifindex);
1202 *q = rcu_dereference(dev->qdisc);
1203 *parent = (*q)->handle;
1205 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1207 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1213 *q = qdisc_refcount_inc_nz(*q);
1215 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1220 /* Is it classful? */
1221 cops = (*q)->ops->cl_ops;
1223 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1228 if (!cops->tcf_block) {
1229 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1235 /* At this point we know that qdisc is not noop_qdisc,
1236 * which means that qdisc holds a reference to net_device
1237 * and we hold a reference to qdisc, so it is safe to release
1249 qdisc_put_unlocked(*q);
1255 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1256 int ifindex, struct netlink_ext_ack *extack)
1258 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1261 /* Do we search for filter, attached to class? */
1262 if (TC_H_MIN(parent)) {
1263 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1265 *cl = cops->find(q, parent);
1267 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1275 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1276 unsigned long cl, int ifindex,
1278 struct netlink_ext_ack *extack)
1280 struct tcf_block *block;
1282 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1283 block = tcf_block_refcnt_get(net, block_index);
1285 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1286 return ERR_PTR(-EINVAL);
1289 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1291 block = cops->tcf_block(q, cl, extack);
1293 return ERR_PTR(-EINVAL);
1295 if (tcf_block_shared(block)) {
1296 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1297 return ERR_PTR(-EOPNOTSUPP);
1300 /* Always take reference to block in order to support execution
1301 * of rules update path of cls API without rtnl lock. Caller
1302 * must release block when it is finished using it. 'if' block
1303 * of this conditional obtain reference to block by calling
1304 * tcf_block_refcnt_get().
1306 refcount_inc(&block->refcnt);
1312 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1313 struct tcf_block_ext_info *ei, bool rtnl_held)
1315 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1316 /* Flushing/putting all chains will cause the block to be
1317 * deallocated when last chain is freed. However, if chain_list
1318 * is empty, block has to be manually deallocated. After block
1319 * reference counter reached 0, it is no longer possible to
1320 * increment it or add new chains to block.
1322 bool free_block = list_empty(&block->chain_list);
1324 mutex_unlock(&block->lock);
1325 if (tcf_block_shared(block))
1326 tcf_block_remove(block, block->net);
1329 tcf_block_offload_unbind(block, q, ei);
1332 tcf_block_destroy(block);
1334 tcf_block_flush_all_chains(block, rtnl_held);
1336 tcf_block_offload_unbind(block, q, ei);
1340 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1342 __tcf_block_put(block, NULL, NULL, rtnl_held);
1346 * Set q, parent, cl when appropriate.
1349 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1350 u32 *parent, unsigned long *cl,
1351 int ifindex, u32 block_index,
1352 struct netlink_ext_ack *extack)
1354 struct tcf_block *block;
1359 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1363 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1367 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1368 if (IS_ERR(block)) {
1369 err = PTR_ERR(block);
1380 return ERR_PTR(err);
1383 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1386 if (!IS_ERR_OR_NULL(block))
1387 tcf_block_refcnt_put(block, rtnl_held);
1393 qdisc_put_unlocked(q);
1397 struct tcf_block_owner_item {
1398 struct list_head list;
1400 enum flow_block_binder_type binder_type;
1404 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1406 enum flow_block_binder_type binder_type)
1408 if (block->keep_dst &&
1409 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1410 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1411 netif_keep_dst(qdisc_dev(q));
1414 void tcf_block_netif_keep_dst(struct tcf_block *block)
1416 struct tcf_block_owner_item *item;
1418 block->keep_dst = true;
1419 list_for_each_entry(item, &block->owner_list, list)
1420 tcf_block_owner_netif_keep_dst(block, item->q,
1423 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1425 static int tcf_block_owner_add(struct tcf_block *block,
1427 enum flow_block_binder_type binder_type)
1429 struct tcf_block_owner_item *item;
1431 item = kmalloc(sizeof(*item), GFP_KERNEL);
1435 item->binder_type = binder_type;
1436 list_add(&item->list, &block->owner_list);
1440 static void tcf_block_owner_del(struct tcf_block *block,
1442 enum flow_block_binder_type binder_type)
1444 struct tcf_block_owner_item *item;
1446 list_for_each_entry(item, &block->owner_list, list) {
1447 if (item->q == q && item->binder_type == binder_type) {
1448 list_del(&item->list);
1456 static bool tcf_block_tracks_dev(struct tcf_block *block,
1457 struct tcf_block_ext_info *ei)
1459 return tcf_block_shared(block) &&
1460 (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS ||
1461 ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS);
1464 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1465 struct tcf_block_ext_info *ei,
1466 struct netlink_ext_ack *extack)
1468 struct net_device *dev = qdisc_dev(q);
1469 struct net *net = qdisc_net(q);
1470 struct tcf_block *block = NULL;
1473 if (ei->block_index)
1474 /* block_index not 0 means the shared block is requested */
1475 block = tcf_block_refcnt_get(net, ei->block_index);
1478 block = tcf_block_create(net, q, ei->block_index, extack);
1480 return PTR_ERR(block);
1481 if (tcf_block_shared(block)) {
1482 err = tcf_block_insert(block, net, extack);
1484 goto err_block_insert;
1488 err = tcf_block_owner_add(block, q, ei->binder_type);
1490 goto err_block_owner_add;
1492 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1494 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1496 goto err_chain0_head_change_cb_add;
1498 err = tcf_block_offload_bind(block, q, ei, extack);
1500 goto err_block_offload_bind;
1502 if (tcf_block_tracks_dev(block, ei)) {
1503 err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
1505 NL_SET_ERR_MSG(extack, "block dev insert failed");
1506 goto err_dev_insert;
1514 tcf_block_offload_unbind(block, q, ei);
1515 err_block_offload_bind:
1516 tcf_chain0_head_change_cb_del(block, ei);
1517 err_chain0_head_change_cb_add:
1518 tcf_block_owner_del(block, q, ei->binder_type);
1519 err_block_owner_add:
1521 tcf_block_refcnt_put(block, true);
1524 EXPORT_SYMBOL(tcf_block_get_ext);
1526 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1528 struct tcf_proto __rcu **p_filter_chain = priv;
1530 rcu_assign_pointer(*p_filter_chain, tp_head);
1533 int tcf_block_get(struct tcf_block **p_block,
1534 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1535 struct netlink_ext_ack *extack)
1537 struct tcf_block_ext_info ei = {
1538 .chain_head_change = tcf_chain_head_change_dflt,
1539 .chain_head_change_priv = p_filter_chain,
1542 WARN_ON(!p_filter_chain);
1543 return tcf_block_get_ext(p_block, q, &ei, extack);
1545 EXPORT_SYMBOL(tcf_block_get);
1547 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1548 * actions should be all removed after flushing.
1550 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1551 struct tcf_block_ext_info *ei)
1553 struct net_device *dev = qdisc_dev(q);
1557 if (tcf_block_tracks_dev(block, ei))
1558 xa_erase(&block->ports, dev->ifindex);
1559 tcf_chain0_head_change_cb_del(block, ei);
1560 tcf_block_owner_del(block, q, ei->binder_type);
1562 __tcf_block_put(block, q, ei, true);
1564 EXPORT_SYMBOL(tcf_block_put_ext);
1566 void tcf_block_put(struct tcf_block *block)
1568 struct tcf_block_ext_info ei = {0, };
1572 tcf_block_put_ext(block, block->q, &ei);
1575 EXPORT_SYMBOL(tcf_block_put);
1578 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1579 void *cb_priv, bool add, bool offload_in_use,
1580 struct netlink_ext_ack *extack)
1582 struct tcf_chain *chain, *chain_prev;
1583 struct tcf_proto *tp, *tp_prev;
1586 lockdep_assert_held(&block->cb_lock);
1588 for (chain = __tcf_get_next_chain(block, NULL);
1591 chain = __tcf_get_next_chain(block, chain),
1592 tcf_chain_put(chain_prev)) {
1593 if (chain->tmplt_ops && add)
1594 chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
1596 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1598 tp = __tcf_get_next_proto(chain, tp),
1599 tcf_proto_put(tp_prev, true, NULL)) {
1600 if (tp->ops->reoffload) {
1601 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1604 goto err_playback_remove;
1605 } else if (add && offload_in_use) {
1607 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1608 goto err_playback_remove;
1611 if (chain->tmplt_ops && !add)
1612 chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
1618 err_playback_remove:
1619 tcf_proto_put(tp, true, NULL);
1620 tcf_chain_put(chain);
1621 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1626 static int tcf_block_bind(struct tcf_block *block,
1627 struct flow_block_offload *bo)
1629 struct flow_block_cb *block_cb, *next;
1632 lockdep_assert_held(&block->cb_lock);
1634 list_for_each_entry(block_cb, &bo->cb_list, list) {
1635 err = tcf_block_playback_offloads(block, block_cb->cb,
1636 block_cb->cb_priv, true,
1637 tcf_block_offload_in_use(block),
1641 if (!bo->unlocked_driver_cb)
1642 block->lockeddevcnt++;
1646 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1651 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1652 list_del(&block_cb->driver_list);
1654 list_del(&block_cb->list);
1655 tcf_block_playback_offloads(block, block_cb->cb,
1656 block_cb->cb_priv, false,
1657 tcf_block_offload_in_use(block),
1659 if (!bo->unlocked_driver_cb)
1660 block->lockeddevcnt--;
1662 flow_block_cb_free(block_cb);
1668 static void tcf_block_unbind(struct tcf_block *block,
1669 struct flow_block_offload *bo)
1671 struct flow_block_cb *block_cb, *next;
1673 lockdep_assert_held(&block->cb_lock);
1675 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1676 tcf_block_playback_offloads(block, block_cb->cb,
1677 block_cb->cb_priv, false,
1678 tcf_block_offload_in_use(block),
1680 list_del(&block_cb->list);
1681 flow_block_cb_free(block_cb);
1682 if (!bo->unlocked_driver_cb)
1683 block->lockeddevcnt--;
1687 static int tcf_block_setup(struct tcf_block *block,
1688 struct flow_block_offload *bo)
1692 switch (bo->command) {
1693 case FLOW_BLOCK_BIND:
1694 err = tcf_block_bind(block, bo);
1696 case FLOW_BLOCK_UNBIND:
1698 tcf_block_unbind(block, bo);
1708 /* Main classifier routine: scans classifier chain attached
1709 * to this qdisc, (optionally) tests for protocol and asks
1710 * specific classifiers.
1712 static inline int __tcf_classify(struct sk_buff *skb,
1713 const struct tcf_proto *tp,
1714 const struct tcf_proto *orig_tp,
1715 struct tcf_result *res,
1717 struct tcf_exts_miss_cookie_node *n,
1719 u32 *last_executed_chain)
1721 #ifdef CONFIG_NET_CLS_ACT
1722 const int max_reclassify_loop = 16;
1723 const struct tcf_proto *first_tp;
1728 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1729 __be16 protocol = skb_protocol(skb, false);
1733 struct tcf_exts *exts;
1735 if (n->tp_prio != tp->prio)
1738 /* We re-lookup the tp and chain based on index instead
1739 * of having hard refs and locks to them, so do a sanity
1740 * check if any of tp,chain,exts was replaced by the
1741 * time we got here with a cookie from hardware.
1743 if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1744 !tp->ops->get_exts)) {
1745 tcf_set_drop_reason(skb,
1746 SKB_DROP_REASON_TC_COOKIE_ERROR);
1750 exts = tp->ops->get_exts(tp, n->handle);
1751 if (unlikely(!exts || n->exts != exts)) {
1752 tcf_set_drop_reason(skb,
1753 SKB_DROP_REASON_TC_COOKIE_ERROR);
1758 err = tcf_exts_exec_ex(skb, exts, act_index, res);
1760 if (tp->protocol != protocol &&
1761 tp->protocol != htons(ETH_P_ALL))
1764 err = tc_classify(skb, tp, res);
1766 #ifdef CONFIG_NET_CLS_ACT
1767 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1769 *last_executed_chain = first_tp->chain->index;
1771 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1772 first_tp = res->goto_tp;
1773 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1782 tcf_set_drop_reason(skb,
1783 SKB_DROP_REASON_TC_COOKIE_ERROR);
1787 return TC_ACT_UNSPEC; /* signal: continue lookup */
1788 #ifdef CONFIG_NET_CLS_ACT
1790 if (unlikely(limit++ >= max_reclassify_loop)) {
1791 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1792 tp->chain->block->index,
1794 ntohs(tp->protocol));
1795 tcf_set_drop_reason(skb,
1796 SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
1805 int tcf_classify(struct sk_buff *skb,
1806 const struct tcf_block *block,
1807 const struct tcf_proto *tp,
1808 struct tcf_result *res, bool compat_mode)
1810 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1811 u32 last_executed_chain = 0;
1813 return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1814 &last_executed_chain);
1816 u32 last_executed_chain = tp ? tp->chain->index : 0;
1817 struct tcf_exts_miss_cookie_node *n = NULL;
1818 const struct tcf_proto *orig_tp = tp;
1819 struct tc_skb_ext *ext;
1824 ext = skb_ext_find(skb, TC_SKB_EXT);
1826 if (ext && (ext->chain || ext->act_miss)) {
1827 struct tcf_chain *fchain;
1830 if (ext->act_miss) {
1831 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1834 tcf_set_drop_reason(skb,
1835 SKB_DROP_REASON_TC_COOKIE_ERROR);
1839 chain = n->chain_index;
1844 fchain = tcf_chain_lookup_rcu(block, chain);
1846 tcf_set_drop_reason(skb,
1847 SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1852 /* Consume, so cloned/redirect skbs won't inherit ext */
1853 skb_ext_del(skb, TC_SKB_EXT);
1855 tp = rcu_dereference_bh(fchain->filter_chain);
1856 last_executed_chain = fchain->index;
1860 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1861 &last_executed_chain);
1863 if (tc_skb_ext_tc_enabled()) {
1864 /* If we missed on some chain */
1865 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1866 struct tc_skb_cb *cb = tc_skb_cb(skb);
1868 ext = tc_skb_ext_alloc(skb);
1869 if (WARN_ON_ONCE(!ext)) {
1870 tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM);
1873 ext->chain = last_executed_chain;
1875 ext->post_ct = cb->post_ct;
1876 ext->post_ct_snat = cb->post_ct_snat;
1877 ext->post_ct_dnat = cb->post_ct_dnat;
1878 ext->zone = cb->zone;
1885 EXPORT_SYMBOL(tcf_classify);
1887 struct tcf_chain_info {
1888 struct tcf_proto __rcu **pprev;
1889 struct tcf_proto __rcu *next;
1892 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1893 struct tcf_chain_info *chain_info)
1895 return tcf_chain_dereference(*chain_info->pprev, chain);
1898 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1899 struct tcf_chain_info *chain_info,
1900 struct tcf_proto *tp)
1902 if (chain->flushing)
1905 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1906 if (*chain_info->pprev == chain->filter_chain)
1907 tcf_chain0_head_change(chain, tp);
1909 rcu_assign_pointer(*chain_info->pprev, tp);
1914 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1915 struct tcf_chain_info *chain_info,
1916 struct tcf_proto *tp)
1918 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1920 tcf_proto_mark_delete(tp);
1921 if (tp == chain->filter_chain)
1922 tcf_chain0_head_change(chain, next);
1923 RCU_INIT_POINTER(*chain_info->pprev, next);
1926 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1927 struct tcf_chain_info *chain_info,
1928 u32 protocol, u32 prio,
1930 struct netlink_ext_ack *extack);
1932 /* Try to insert new proto.
1933 * If proto with specified priority already exists, free new proto
1934 * and return existing one.
1937 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1938 struct tcf_proto *tp_new,
1939 u32 protocol, u32 prio,
1942 struct tcf_chain_info chain_info;
1943 struct tcf_proto *tp;
1946 mutex_lock(&chain->filter_chain_lock);
1948 if (tcf_proto_exists_destroying(chain, tp_new)) {
1949 mutex_unlock(&chain->filter_chain_lock);
1950 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1951 return ERR_PTR(-EAGAIN);
1954 tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, NULL);
1956 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1957 mutex_unlock(&chain->filter_chain_lock);
1960 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1963 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1964 tp_new = ERR_PTR(err);
1970 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1971 struct tcf_proto *tp, bool rtnl_held,
1972 struct netlink_ext_ack *extack)
1974 struct tcf_chain_info chain_info;
1975 struct tcf_proto *tp_iter;
1976 struct tcf_proto **pprev;
1977 struct tcf_proto *next;
1979 mutex_lock(&chain->filter_chain_lock);
1981 /* Atomically find and remove tp from chain. */
1982 for (pprev = &chain->filter_chain;
1983 (tp_iter = tcf_chain_dereference(*pprev, chain));
1984 pprev = &tp_iter->next) {
1985 if (tp_iter == tp) {
1986 chain_info.pprev = pprev;
1987 chain_info.next = tp_iter->next;
1988 WARN_ON(tp_iter->deleting);
1992 /* Verify that tp still exists and no new filters were inserted
1994 * Mark tp for deletion if it is empty.
1996 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1997 mutex_unlock(&chain->filter_chain_lock);
2001 tcf_proto_signal_destroying(chain, tp);
2002 next = tcf_chain_dereference(chain_info.next, chain);
2003 if (tp == chain->filter_chain)
2004 tcf_chain0_head_change(chain, next);
2005 RCU_INIT_POINTER(*chain_info.pprev, next);
2006 mutex_unlock(&chain->filter_chain_lock);
2008 tcf_proto_put(tp, rtnl_held, extack);
2011 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
2012 struct tcf_chain_info *chain_info,
2013 u32 protocol, u32 prio,
2015 struct netlink_ext_ack *extack)
2017 struct tcf_proto **pprev;
2018 struct tcf_proto *tp;
2020 /* Check the chain for existence of proto-tcf with this priority */
2021 for (pprev = &chain->filter_chain;
2022 (tp = tcf_chain_dereference(*pprev, chain));
2023 pprev = &tp->next) {
2024 if (tp->prio >= prio) {
2025 if (tp->prio == prio) {
2026 if (prio_allocate) {
2027 NL_SET_ERR_MSG(extack, "Lowest ID from auto-alloc range already in use");
2028 return ERR_PTR(-ENOSPC);
2030 if (tp->protocol != protocol && protocol) {
2031 NL_SET_ERR_MSG(extack, "Protocol mismatch for filter with specified priority");
2032 return ERR_PTR(-EINVAL);
2040 chain_info->pprev = pprev;
2042 chain_info->next = tp->next;
2045 chain_info->next = NULL;
2050 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
2051 struct tcf_proto *tp, struct tcf_block *block,
2052 struct Qdisc *q, u32 parent, void *fh,
2053 u32 portid, u32 seq, u16 flags, int event,
2054 bool terse_dump, bool rtnl_held,
2055 struct netlink_ext_ack *extack)
2058 struct nlmsghdr *nlh;
2059 unsigned char *b = skb_tail_pointer(skb);
2060 int ret = -EMSGSIZE;
2062 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2064 goto out_nlmsg_trim;
2065 tcm = nlmsg_data(nlh);
2066 tcm->tcm_family = AF_UNSPEC;
2070 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
2071 tcm->tcm_parent = parent;
2073 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2074 tcm->tcm_block_index = block->index;
2076 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2077 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2078 goto nla_put_failure;
2079 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2080 goto nla_put_failure;
2082 tcm->tcm_handle = 0;
2083 } else if (terse_dump) {
2084 if (tp->ops->terse_dump) {
2085 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2087 goto nla_put_failure;
2089 goto cls_op_not_supp;
2092 if (tp->ops->dump &&
2093 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2094 goto nla_put_failure;
2097 if (extack && extack->_msg &&
2098 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2099 goto nla_put_failure;
2101 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2113 static struct sk_buff *tfilter_notify_prep(struct net *net,
2114 struct sk_buff *oskb,
2116 struct tcf_proto *tp,
2117 struct tcf_block *block,
2118 struct Qdisc *q, u32 parent,
2119 void *fh, int event,
2120 u32 portid, bool rtnl_held,
2121 struct netlink_ext_ack *extack)
2123 unsigned int size = oskb ? max(NLMSG_GOODSIZE, oskb->len) : NLMSG_GOODSIZE;
2124 struct sk_buff *skb;
2128 skb = alloc_skb(size, GFP_KERNEL);
2130 return ERR_PTR(-ENOBUFS);
2132 ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2133 n->nlmsg_seq, n->nlmsg_flags, event, false,
2137 if (ret == -EMSGSIZE) {
2138 size += NLMSG_GOODSIZE;
2141 return ERR_PTR(-EINVAL);
2146 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2147 struct nlmsghdr *n, struct tcf_proto *tp,
2148 struct tcf_block *block, struct Qdisc *q,
2149 u32 parent, void *fh, int event, bool unicast,
2150 bool rtnl_held, struct netlink_ext_ack *extack)
2152 struct sk_buff *skb;
2153 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2156 if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2159 skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event,
2160 portid, rtnl_held, extack);
2162 return PTR_ERR(skb);
2165 err = rtnl_unicast(skb, net, portid);
2167 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2168 n->nlmsg_flags & NLM_F_ECHO);
2172 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2173 struct nlmsghdr *n, struct tcf_proto *tp,
2174 struct tcf_block *block, struct Qdisc *q,
2175 u32 parent, void *fh, bool *last, bool rtnl_held,
2176 struct netlink_ext_ack *extack)
2178 struct sk_buff *skb;
2179 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2182 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2183 return tp->ops->delete(tp, fh, last, rtnl_held, extack);
2185 skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh,
2186 RTM_DELTFILTER, portid, rtnl_held, extack);
2188 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2189 return PTR_ERR(skb);
2192 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2198 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2199 n->nlmsg_flags & NLM_F_ECHO);
2201 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2206 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2207 struct tcf_block *block, struct Qdisc *q,
2208 u32 parent, struct nlmsghdr *n,
2209 struct tcf_chain *chain, int event,
2210 struct netlink_ext_ack *extack)
2212 struct tcf_proto *tp;
2214 for (tp = tcf_get_next_proto(chain, NULL);
2215 tp; tp = tcf_get_next_proto(chain, tp))
2216 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2217 event, false, true, extack);
2220 static void tfilter_put(struct tcf_proto *tp, void *fh)
2222 if (tp->ops->put && fh)
2223 tp->ops->put(tp, fh);
2226 static bool is_qdisc_ingress(__u32 classid)
2228 return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2231 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2232 struct netlink_ext_ack *extack)
2234 struct net *net = sock_net(skb->sk);
2235 struct nlattr *tca[TCA_MAX + 1];
2236 char name[IFNAMSIZ];
2244 struct tcf_chain_info chain_info;
2245 struct tcf_chain *chain;
2246 struct tcf_block *block;
2247 struct tcf_proto *tp;
2252 bool rtnl_held = false;
2258 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2259 rtm_tca_policy, extack);
2264 protocol = TC_H_MIN(t->tcm_info);
2265 prio = TC_H_MAJ(t->tcm_info);
2266 prio_allocate = false;
2267 parent = t->tcm_parent;
2276 /* If no priority is provided by the user,
2279 if (n->nlmsg_flags & NLM_F_CREATE) {
2280 prio = TC_H_MAKE(0x80000000U, 0U);
2281 prio_allocate = true;
2283 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2288 /* Find head of filter chain. */
2290 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2294 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2295 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2300 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2301 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2302 * type is not specified, classifier is not unlocked.
2305 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2306 !tcf_proto_is_unlocked(name)) {
2311 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2315 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2317 if (IS_ERR(block)) {
2318 err = PTR_ERR(block);
2321 block->classid = parent;
2323 chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2324 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2325 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2329 chain = tcf_chain_get(block, chain_index, true);
2331 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2336 mutex_lock(&chain->filter_chain_lock);
2337 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2338 prio, prio_allocate, extack);
2345 struct tcf_proto *tp_new = NULL;
2347 if (chain->flushing) {
2352 /* Proto-tcf does not exist, create new one */
2354 if (tca[TCA_KIND] == NULL || !protocol) {
2355 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2360 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2361 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2367 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2370 mutex_unlock(&chain->filter_chain_lock);
2371 tp_new = tcf_proto_create(name, protocol, prio, chain,
2373 if (IS_ERR(tp_new)) {
2374 err = PTR_ERR(tp_new);
2379 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2386 mutex_unlock(&chain->filter_chain_lock);
2389 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2390 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2395 fh = tp->ops->get(tp, t->tcm_handle);
2398 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2399 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2403 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2404 tfilter_put(tp, fh);
2405 NL_SET_ERR_MSG(extack, "Filter already exists");
2410 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2411 tfilter_put(tp, fh);
2412 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2417 if (!(n->nlmsg_flags & NLM_F_CREATE))
2418 flags |= TCA_ACT_FLAGS_REPLACE;
2420 flags |= TCA_ACT_FLAGS_NO_RTNL;
2421 if (is_qdisc_ingress(parent))
2422 flags |= TCA_ACT_FLAGS_AT_INGRESS;
2423 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2426 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2427 RTM_NEWTFILTER, false, rtnl_held, extack);
2428 tfilter_put(tp, fh);
2429 tcf_proto_count_usesw(tp, true);
2430 /* q pointer is NULL for shared blocks */
2432 q->flags &= ~TCQ_F_CAN_BYPASS;
2436 if (err && tp_created)
2437 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2440 if (tp && !IS_ERR(tp))
2441 tcf_proto_put(tp, rtnl_held, NULL);
2443 tcf_chain_put(chain);
2445 tcf_block_release(q, block, rtnl_held);
2450 if (err == -EAGAIN) {
2451 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2455 /* Replay the request. */
2461 mutex_unlock(&chain->filter_chain_lock);
2465 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2466 struct netlink_ext_ack *extack)
2468 struct net *net = sock_net(skb->sk);
2469 struct nlattr *tca[TCA_MAX + 1];
2470 char name[IFNAMSIZ];
2476 struct Qdisc *q = NULL;
2477 struct tcf_chain_info chain_info;
2478 struct tcf_chain *chain = NULL;
2479 struct tcf_block *block = NULL;
2480 struct tcf_proto *tp = NULL;
2481 unsigned long cl = 0;
2484 bool rtnl_held = false;
2486 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2487 rtm_tca_policy, extack);
2492 protocol = TC_H_MIN(t->tcm_info);
2493 prio = TC_H_MAJ(t->tcm_info);
2494 parent = t->tcm_parent;
2496 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2497 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2501 /* Find head of filter chain. */
2503 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2507 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2508 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2512 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2513 * found), qdisc is not unlocked, classifier type is not specified,
2514 * classifier is not unlocked.
2517 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2518 !tcf_proto_is_unlocked(name)) {
2523 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2527 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2529 if (IS_ERR(block)) {
2530 err = PTR_ERR(block);
2534 chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2535 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2536 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2540 chain = tcf_chain_get(block, chain_index, false);
2542 /* User requested flush on non-existent chain. Nothing to do,
2543 * so just return success.
2549 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2555 tfilter_notify_chain(net, skb, block, q, parent, n,
2556 chain, RTM_DELTFILTER, extack);
2557 tcf_chain_flush(chain, rtnl_held);
2562 mutex_lock(&chain->filter_chain_lock);
2563 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2564 prio, false, extack);
2567 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2569 } else if (IS_ERR(tp)) {
2572 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2573 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2576 } else if (t->tcm_handle == 0) {
2577 tcf_proto_signal_destroying(chain, tp);
2578 tcf_chain_tp_remove(chain, &chain_info, tp);
2579 mutex_unlock(&chain->filter_chain_lock);
2581 tcf_proto_put(tp, rtnl_held, NULL);
2582 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2583 RTM_DELTFILTER, false, rtnl_held, extack);
2587 mutex_unlock(&chain->filter_chain_lock);
2589 fh = tp->ops->get(tp, t->tcm_handle);
2592 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2597 err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
2598 &last, rtnl_held, extack);
2603 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2608 if (tp && !IS_ERR(tp))
2609 tcf_proto_put(tp, rtnl_held, NULL);
2610 tcf_chain_put(chain);
2612 tcf_block_release(q, block, rtnl_held);
2620 mutex_unlock(&chain->filter_chain_lock);
2624 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2625 struct netlink_ext_ack *extack)
2627 struct net *net = sock_net(skb->sk);
2628 struct nlattr *tca[TCA_MAX + 1];
2629 char name[IFNAMSIZ];
2635 struct Qdisc *q = NULL;
2636 struct tcf_chain_info chain_info;
2637 struct tcf_chain *chain = NULL;
2638 struct tcf_block *block = NULL;
2639 struct tcf_proto *tp = NULL;
2640 unsigned long cl = 0;
2643 bool rtnl_held = false;
2645 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2646 rtm_tca_policy, extack);
2651 protocol = TC_H_MIN(t->tcm_info);
2652 prio = TC_H_MAJ(t->tcm_info);
2653 parent = t->tcm_parent;
2656 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2660 /* Find head of filter chain. */
2662 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2666 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2667 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2671 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2672 * unlocked, classifier type is not specified, classifier is not
2675 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2676 !tcf_proto_is_unlocked(name)) {
2681 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2685 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2687 if (IS_ERR(block)) {
2688 err = PTR_ERR(block);
2692 chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2693 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2694 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2698 chain = tcf_chain_get(block, chain_index, false);
2700 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2705 mutex_lock(&chain->filter_chain_lock);
2706 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2707 prio, false, extack);
2708 mutex_unlock(&chain->filter_chain_lock);
2711 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2713 } else if (IS_ERR(tp)) {
2716 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2717 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2722 fh = tp->ops->get(tp, t->tcm_handle);
2725 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2728 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2729 fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2731 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2734 tfilter_put(tp, fh);
2737 if (tp && !IS_ERR(tp))
2738 tcf_proto_put(tp, rtnl_held, NULL);
2739 tcf_chain_put(chain);
2741 tcf_block_release(q, block, rtnl_held);
2749 struct tcf_dump_args {
2750 struct tcf_walker w;
2751 struct sk_buff *skb;
2752 struct netlink_callback *cb;
2753 struct tcf_block *block;
2759 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2761 struct tcf_dump_args *a = (void *)arg;
2762 struct net *net = sock_net(a->skb->sk);
2764 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2765 n, NETLINK_CB(a->cb->skb).portid,
2766 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2767 RTM_NEWTFILTER, a->terse_dump, true, NULL);
2770 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2771 struct sk_buff *skb, struct netlink_callback *cb,
2772 long index_start, long *p_index, bool terse)
2774 struct net *net = sock_net(skb->sk);
2775 struct tcf_block *block = chain->block;
2776 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2777 struct tcf_proto *tp, *tp_prev;
2778 struct tcf_dump_args arg;
2780 for (tp = __tcf_get_next_proto(chain, NULL);
2783 tp = __tcf_get_next_proto(chain, tp),
2784 tcf_proto_put(tp_prev, true, NULL),
2786 if (*p_index < index_start)
2788 if (TC_H_MAJ(tcm->tcm_info) &&
2789 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2791 if (TC_H_MIN(tcm->tcm_info) &&
2792 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2794 if (*p_index > index_start)
2795 memset(&cb->args[1], 0,
2796 sizeof(cb->args) - sizeof(cb->args[0]));
2797 if (cb->args[1] == 0) {
2798 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2799 NETLINK_CB(cb->skb).portid,
2800 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2801 RTM_NEWTFILTER, false, true, NULL) <= 0)
2807 arg.w.fn = tcf_node_dump;
2812 arg.parent = parent;
2814 arg.w.skip = cb->args[1] - 1;
2816 arg.w.cookie = cb->args[2];
2817 arg.terse_dump = terse;
2818 tp->ops->walk(tp, &arg.w, true);
2819 cb->args[2] = arg.w.cookie;
2820 cb->args[1] = arg.w.count + 1;
2827 tcf_proto_put(tp, true, NULL);
2831 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2832 [TCA_CHAIN] = { .type = NLA_U32 },
2833 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2836 /* called with RTNL */
2837 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2839 struct tcf_chain *chain, *chain_prev;
2840 struct net *net = sock_net(skb->sk);
2841 struct nlattr *tca[TCA_MAX + 1];
2842 struct Qdisc *q = NULL;
2843 struct tcf_block *block;
2844 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2845 bool terse_dump = false;
2851 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2854 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2855 tcf_tfilter_dump_policy, cb->extack);
2859 if (tca[TCA_DUMP_FLAGS]) {
2860 struct nla_bitfield32 flags =
2861 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2863 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2866 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2867 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2870 /* If we work with block index, q is NULL and parent value
2871 * will never be used in the following code. The check
2872 * in tcf_fill_node prevents it. However, compiler does not
2873 * see that far, so set parent to zero to silence the warning
2874 * about parent being uninitialized.
2878 const struct Qdisc_class_ops *cops;
2879 struct net_device *dev;
2880 unsigned long cl = 0;
2882 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2886 parent = tcm->tcm_parent;
2888 q = rtnl_dereference(dev->qdisc);
2890 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2893 cops = q->ops->cl_ops;
2896 if (!cops->tcf_block)
2898 if (TC_H_MIN(tcm->tcm_parent)) {
2899 cl = cops->find(q, tcm->tcm_parent);
2903 block = cops->tcf_block(q, cl, NULL);
2906 parent = block->classid;
2907 if (tcf_block_shared(block))
2911 index_start = cb->args[0];
2914 for (chain = __tcf_get_next_chain(block, NULL);
2917 chain = __tcf_get_next_chain(block, chain),
2918 tcf_chain_put(chain_prev)) {
2919 if (tca[TCA_CHAIN] &&
2920 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2922 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2923 index_start, &index, terse_dump)) {
2924 tcf_chain_put(chain);
2930 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2931 tcf_block_refcnt_put(block, true);
2932 cb->args[0] = index;
2935 /* If we did no progress, the error (EMSGSIZE) is real */
2936 if (skb->len == 0 && err)
2941 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2942 void *tmplt_priv, u32 chain_index,
2943 struct net *net, struct sk_buff *skb,
2944 struct tcf_block *block,
2945 u32 portid, u32 seq, u16 flags, int event,
2946 struct netlink_ext_ack *extack)
2948 unsigned char *b = skb_tail_pointer(skb);
2949 const struct tcf_proto_ops *ops;
2950 struct nlmsghdr *nlh;
2957 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2959 goto out_nlmsg_trim;
2960 tcm = nlmsg_data(nlh);
2961 tcm->tcm_family = AF_UNSPEC;
2964 tcm->tcm_handle = 0;
2966 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2967 tcm->tcm_parent = block->q->handle;
2969 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2970 tcm->tcm_block_index = block->index;
2973 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2974 goto nla_put_failure;
2977 if (nla_put_string(skb, TCA_KIND, ops->kind))
2978 goto nla_put_failure;
2979 if (ops->tmplt_dump(skb, net, priv) < 0)
2980 goto nla_put_failure;
2983 if (extack && extack->_msg &&
2984 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2985 goto out_nlmsg_trim;
2987 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2997 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2998 u32 seq, u16 flags, int event, bool unicast,
2999 struct netlink_ext_ack *extack)
3001 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
3002 struct tcf_block *block = chain->block;
3003 struct net *net = block->net;
3004 struct sk_buff *skb;
3007 if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC))
3010 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3014 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3015 chain->index, net, skb, block, portid,
3016 seq, flags, event, extack) <= 0) {
3022 err = rtnl_unicast(skb, net, portid);
3024 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
3025 flags & NLM_F_ECHO);
3030 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
3031 void *tmplt_priv, u32 chain_index,
3032 struct tcf_block *block, struct sk_buff *oskb,
3035 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
3036 struct net *net = block->net;
3037 struct sk_buff *skb;
3039 if (!rtnl_notify_needed(net, flags, RTNLGRP_TC))
3042 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3046 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
3047 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
3052 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
3055 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
3056 struct nlattr **tca,
3057 struct netlink_ext_ack *extack)
3059 const struct tcf_proto_ops *ops;
3060 char name[IFNAMSIZ];
3063 /* If kind is not set, user did not specify template. */
3067 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
3068 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
3072 ops = tcf_proto_lookup_ops(name, true, extack);
3074 return PTR_ERR(ops);
3075 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
3076 !ops->tmplt_reoffload) {
3077 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
3078 module_put(ops->owner);
3082 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
3083 if (IS_ERR(tmplt_priv)) {
3084 module_put(ops->owner);
3085 return PTR_ERR(tmplt_priv);
3087 chain->tmplt_ops = ops;
3088 chain->tmplt_priv = tmplt_priv;
3092 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
3095 /* If template ops are set, no work to do for us. */
3099 tmplt_ops->tmplt_destroy(tmplt_priv);
3100 module_put(tmplt_ops->owner);
3103 /* Add/delete/get a chain */
3105 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
3106 struct netlink_ext_ack *extack)
3108 struct net *net = sock_net(skb->sk);
3109 struct nlattr *tca[TCA_MAX + 1];
3114 struct tcf_chain *chain;
3115 struct tcf_block *block;
3121 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3122 rtm_tca_policy, extack);
3127 parent = t->tcm_parent;
3130 block = tcf_block_find(net, &q, &parent, &cl,
3131 t->tcm_ifindex, t->tcm_block_index, extack);
3133 return PTR_ERR(block);
3135 chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
3136 if (chain_index > TC_ACT_EXT_VAL_MASK) {
3137 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3142 mutex_lock(&block->lock);
3143 chain = tcf_chain_lookup(block, chain_index);
3144 if (n->nlmsg_type == RTM_NEWCHAIN) {
3146 if (tcf_chain_held_by_acts_only(chain)) {
3147 /* The chain exists only because there is
3148 * some action referencing it.
3150 tcf_chain_hold(chain);
3152 NL_SET_ERR_MSG(extack, "Filter chain already exists");
3154 goto errout_block_locked;
3157 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3158 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3160 goto errout_block_locked;
3162 chain = tcf_chain_create(block, chain_index);
3164 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3166 goto errout_block_locked;
3170 if (!chain || tcf_chain_held_by_acts_only(chain)) {
3171 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3173 goto errout_block_locked;
3175 tcf_chain_hold(chain);
3178 if (n->nlmsg_type == RTM_NEWCHAIN) {
3179 /* Modifying chain requires holding parent block lock. In case
3180 * the chain was successfully added, take a reference to the
3181 * chain. This ensures that an empty chain does not disappear at
3182 * the end of this function.
3184 tcf_chain_hold(chain);
3185 chain->explicitly_created = true;
3187 mutex_unlock(&block->lock);
3189 switch (n->nlmsg_type) {
3191 err = tc_chain_tmplt_add(chain, net, tca, extack);
3193 tcf_chain_put_explicitly_created(chain);
3197 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3198 RTM_NEWCHAIN, false, extack);
3201 tfilter_notify_chain(net, skb, block, q, parent, n,
3202 chain, RTM_DELTFILTER, extack);
3203 /* Flush the chain first as the user requested chain removal. */
3204 tcf_chain_flush(chain, true);
3205 /* In case the chain was successfully deleted, put a reference
3206 * to the chain previously taken during addition.
3208 tcf_chain_put_explicitly_created(chain);
3211 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3212 n->nlmsg_flags, n->nlmsg_type, true, extack);
3214 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3218 NL_SET_ERR_MSG(extack, "Unsupported message type");
3223 tcf_chain_put(chain);
3225 tcf_block_release(q, block, true);
3227 /* Replay the request. */
3231 errout_block_locked:
3232 mutex_unlock(&block->lock);
3236 /* called with RTNL */
3237 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3239 struct net *net = sock_net(skb->sk);
3240 struct nlattr *tca[TCA_MAX + 1];
3241 struct Qdisc *q = NULL;
3242 struct tcf_block *block;
3243 struct tcmsg *tcm = nlmsg_data(cb->nlh);
3244 struct tcf_chain *chain;
3249 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3252 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3253 rtm_tca_policy, cb->extack);
3257 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3258 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3262 const struct Qdisc_class_ops *cops;
3263 struct net_device *dev;
3264 unsigned long cl = 0;
3266 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3270 if (!tcm->tcm_parent)
3271 q = rtnl_dereference(dev->qdisc);
3273 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3277 cops = q->ops->cl_ops;
3280 if (!cops->tcf_block)
3282 if (TC_H_MIN(tcm->tcm_parent)) {
3283 cl = cops->find(q, tcm->tcm_parent);
3287 block = cops->tcf_block(q, cl, NULL);
3290 if (tcf_block_shared(block))
3294 index_start = cb->args[0];
3297 mutex_lock(&block->lock);
3298 list_for_each_entry(chain, &block->chain_list, list) {
3299 if ((tca[TCA_CHAIN] &&
3300 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3302 if (index < index_start) {
3306 if (tcf_chain_held_by_acts_only(chain))
3308 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3309 chain->index, net, skb, block,
3310 NETLINK_CB(cb->skb).portid,
3311 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3312 RTM_NEWCHAIN, NULL);
3317 mutex_unlock(&block->lock);
3319 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3320 tcf_block_refcnt_put(block, true);
3321 cb->args[0] = index;
3324 /* If we did no progress, the error (EMSGSIZE) is real */
3325 if (skb->len == 0 && err)
3330 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3331 int police, struct tcf_proto *tp, u32 handle,
3332 bool use_action_miss)
3336 #ifdef CONFIG_NET_CLS_ACT
3338 exts->nr_actions = 0;
3339 exts->miss_cookie_node = NULL;
3340 /* Note: we do not own yet a reference on net.
3341 * This reference might be taken later from tcf_exts_get_net().
3344 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3350 exts->action = action;
3351 exts->police = police;
3353 if (!use_action_miss)
3356 err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3358 goto err_miss_alloc;
3363 tcf_exts_destroy(exts);
3364 #ifdef CONFIG_NET_CLS_ACT
3365 exts->actions = NULL;
3369 EXPORT_SYMBOL(tcf_exts_init_ex);
3371 void tcf_exts_destroy(struct tcf_exts *exts)
3373 tcf_exts_miss_cookie_base_destroy(exts);
3375 #ifdef CONFIG_NET_CLS_ACT
3376 if (exts->actions) {
3377 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3378 kfree(exts->actions);
3380 exts->nr_actions = 0;
3383 EXPORT_SYMBOL(tcf_exts_destroy);
3385 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3386 struct nlattr *rate_tlv, struct tcf_exts *exts,
3387 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3389 #ifdef CONFIG_NET_CLS_ACT
3391 int init_res[TCA_ACT_MAX_PRIO] = {};
3392 struct tc_action *act;
3393 size_t attr_size = 0;
3395 if (exts->police && tb[exts->police]) {
3396 struct tc_action_ops *a_o;
3398 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3399 a_o = tc_action_load_ops(tb[exts->police], flags,
3402 return PTR_ERR(a_o);
3403 act = tcf_action_init_1(net, tp, tb[exts->police],
3404 rate_tlv, a_o, init_res, flags,
3406 module_put(a_o->owner);
3408 return PTR_ERR(act);
3410 act->type = exts->type = TCA_OLD_COMPAT;
3411 exts->actions[0] = act;
3412 exts->nr_actions = 1;
3413 tcf_idr_insert_many(exts->actions, init_res);
3414 } else if (exts->action && tb[exts->action]) {
3417 flags |= TCA_ACT_FLAGS_BIND;
3418 err = tcf_action_init(net, tp, tb[exts->action],
3419 rate_tlv, exts->actions, init_res,
3420 &attr_size, flags, fl_flags,
3424 exts->nr_actions = err;
3428 if ((exts->action && tb[exts->action]) ||
3429 (exts->police && tb[exts->police])) {
3430 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3437 EXPORT_SYMBOL(tcf_exts_validate_ex);
3439 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3440 struct nlattr *rate_tlv, struct tcf_exts *exts,
3441 u32 flags, struct netlink_ext_ack *extack)
3443 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3446 EXPORT_SYMBOL(tcf_exts_validate);
3448 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3450 #ifdef CONFIG_NET_CLS_ACT
3451 struct tcf_exts old = *dst;
3454 tcf_exts_destroy(&old);
3457 EXPORT_SYMBOL(tcf_exts_change);
3459 #ifdef CONFIG_NET_CLS_ACT
3460 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3462 if (exts->nr_actions == 0)
3465 return exts->actions[0];
3469 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3471 #ifdef CONFIG_NET_CLS_ACT
3472 struct nlattr *nest;
3474 if (exts->action && tcf_exts_has_actions(exts)) {
3476 * again for backward compatible mode - we want
3477 * to work with both old and new modes of entering
3478 * tc data even if iproute2 was newer - jhs
3480 if (exts->type != TCA_OLD_COMPAT) {
3481 nest = nla_nest_start_noflag(skb, exts->action);
3483 goto nla_put_failure;
3485 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3487 goto nla_put_failure;
3488 nla_nest_end(skb, nest);
3489 } else if (exts->police) {
3490 struct tc_action *act = tcf_exts_first_act(exts);
3491 nest = nla_nest_start_noflag(skb, exts->police);
3492 if (nest == NULL || !act)
3493 goto nla_put_failure;
3494 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3495 goto nla_put_failure;
3496 nla_nest_end(skb, nest);
3502 nla_nest_cancel(skb, nest);
3508 EXPORT_SYMBOL(tcf_exts_dump);
3510 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3512 #ifdef CONFIG_NET_CLS_ACT
3513 struct nlattr *nest;
3515 if (!exts->action || !tcf_exts_has_actions(exts))
3518 nest = nla_nest_start_noflag(skb, exts->action);
3520 goto nla_put_failure;
3522 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3523 goto nla_put_failure;
3524 nla_nest_end(skb, nest);
3528 nla_nest_cancel(skb, nest);
3534 EXPORT_SYMBOL(tcf_exts_terse_dump);
3536 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3538 #ifdef CONFIG_NET_CLS_ACT
3539 struct tc_action *a = tcf_exts_first_act(exts);
3540 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3545 EXPORT_SYMBOL(tcf_exts_dump_stats);
3547 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3549 if (*flags & TCA_CLS_FLAGS_IN_HW)
3551 *flags |= TCA_CLS_FLAGS_IN_HW;
3552 atomic_inc(&block->offloadcnt);
3555 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3557 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3559 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3560 atomic_dec(&block->offloadcnt);
3563 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3564 struct tcf_proto *tp, u32 *cnt,
3565 u32 *flags, u32 diff, bool add)
3567 lockdep_assert_held(&block->cb_lock);
3569 spin_lock(&tp->lock);
3572 tcf_block_offload_inc(block, flags);
3577 tcf_block_offload_dec(block, flags);
3579 spin_unlock(&tp->lock);
3583 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3584 u32 *cnt, u32 *flags)
3586 lockdep_assert_held(&block->cb_lock);
3588 spin_lock(&tp->lock);
3589 tcf_block_offload_dec(block, flags);
3591 spin_unlock(&tp->lock);
3595 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3596 void *type_data, bool err_stop)
3598 struct flow_block_cb *block_cb;
3602 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3603 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3614 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3615 void *type_data, bool err_stop, bool rtnl_held)
3617 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3623 down_read(&block->cb_lock);
3624 /* Need to obtain rtnl lock if block is bound to devs that require it.
3625 * In block bind code cb_lock is obtained while holding rtnl, so we must
3626 * obtain the locks in same order here.
3628 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3629 up_read(&block->cb_lock);
3634 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3636 up_read(&block->cb_lock);
3641 EXPORT_SYMBOL(tc_setup_cb_call);
3643 /* Non-destructive filter add. If filter that wasn't already in hardware is
3644 * successfully offloaded, increment block offloads counter. On failure,
3645 * previously offloaded filter is considered to be intact and offloads counter
3646 * is not decremented.
3649 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3650 enum tc_setup_type type, void *type_data, bool err_stop,
3651 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3653 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3659 down_read(&block->cb_lock);
3660 /* Need to obtain rtnl lock if block is bound to devs that require it.
3661 * In block bind code cb_lock is obtained while holding rtnl, so we must
3662 * obtain the locks in same order here.
3664 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3665 up_read(&block->cb_lock);
3670 /* Make sure all netdevs sharing this block are offload-capable. */
3671 if (block->nooffloaddevcnt && err_stop) {
3672 ok_count = -EOPNOTSUPP;
3676 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3680 if (tp->ops->hw_add)
3681 tp->ops->hw_add(tp, type_data);
3683 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3686 up_read(&block->cb_lock);
3689 return min(ok_count, 0);
3691 EXPORT_SYMBOL(tc_setup_cb_add);
3693 /* Destructive filter replace. If filter that wasn't already in hardware is
3694 * successfully offloaded, increment block offload counter. On failure,
3695 * previously offloaded filter is considered to be destroyed and offload counter
3699 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3700 enum tc_setup_type type, void *type_data, bool err_stop,
3701 u32 *old_flags, unsigned int *old_in_hw_count,
3702 u32 *new_flags, unsigned int *new_in_hw_count,
3705 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3711 down_read(&block->cb_lock);
3712 /* Need to obtain rtnl lock if block is bound to devs that require it.
3713 * In block bind code cb_lock is obtained while holding rtnl, so we must
3714 * obtain the locks in same order here.
3716 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3717 up_read(&block->cb_lock);
3722 /* Make sure all netdevs sharing this block are offload-capable. */
3723 if (block->nooffloaddevcnt && err_stop) {
3724 ok_count = -EOPNOTSUPP;
3728 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3729 if (tp->ops->hw_del)
3730 tp->ops->hw_del(tp, type_data);
3732 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3736 if (tp->ops->hw_add)
3737 tp->ops->hw_add(tp, type_data);
3739 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3740 new_flags, ok_count, true);
3742 up_read(&block->cb_lock);
3745 return min(ok_count, 0);
3747 EXPORT_SYMBOL(tc_setup_cb_replace);
3749 /* Destroy filter and decrement block offload counter, if filter was previously
3753 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3754 enum tc_setup_type type, void *type_data, bool err_stop,
3755 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3757 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3763 down_read(&block->cb_lock);
3764 /* Need to obtain rtnl lock if block is bound to devs that require it.
3765 * In block bind code cb_lock is obtained while holding rtnl, so we must
3766 * obtain the locks in same order here.
3768 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3769 up_read(&block->cb_lock);
3774 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3776 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3777 if (tp->ops->hw_del)
3778 tp->ops->hw_del(tp, type_data);
3780 up_read(&block->cb_lock);
3783 return min(ok_count, 0);
3785 EXPORT_SYMBOL(tc_setup_cb_destroy);
3787 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3788 bool add, flow_setup_cb_t *cb,
3789 enum tc_setup_type type, void *type_data,
3790 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3792 int err = cb(type, type_data, cb_priv);
3795 if (add && tc_skip_sw(*flags))
3798 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3804 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3806 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3807 const struct tc_action *act)
3809 struct tc_cookie *user_cookie;
3813 user_cookie = rcu_dereference(act->user_cookie);
3815 entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3818 if (!entry->user_cookie)
3825 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3827 flow_action_cookie_destroy(entry->user_cookie);
3830 void tc_cleanup_offload_action(struct flow_action *flow_action)
3832 struct flow_action_entry *entry;
3835 flow_action_for_each(i, entry, flow_action) {
3836 tcf_act_put_user_cookie(entry);
3837 if (entry->destructor)
3838 entry->destructor(entry->destructor_priv);
3841 EXPORT_SYMBOL(tc_cleanup_offload_action);
3843 static int tc_setup_offload_act(struct tc_action *act,
3844 struct flow_action_entry *entry,
3846 struct netlink_ext_ack *extack)
3848 #ifdef CONFIG_NET_CLS_ACT
3849 if (act->ops->offload_act_setup) {
3850 return act->ops->offload_act_setup(act, entry, index_inc, true,
3853 NL_SET_ERR_MSG(extack, "Action does not support offload");
3861 int tc_setup_action(struct flow_action *flow_action,
3862 struct tc_action *actions[],
3863 u32 miss_cookie_base,
3864 struct netlink_ext_ack *extack)
3866 int i, j, k, index, err = 0;
3867 struct tc_action *act;
3869 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3870 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3871 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3877 tcf_act_for_each_action(i, act, actions) {
3878 struct flow_action_entry *entry;
3880 entry = &flow_action->entries[j];
3881 spin_lock_bh(&act->tcfa_lock);
3882 err = tcf_act_get_user_cookie(entry, act);
3884 goto err_out_locked;
3887 err = tc_setup_offload_act(act, entry, &index, extack);
3889 goto err_out_locked;
3891 for (k = 0; k < index ; k++) {
3892 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3893 entry[k].hw_index = act->tcfa_index;
3894 entry[k].cookie = (unsigned long)act;
3895 entry[k].miss_cookie =
3896 tcf_exts_miss_cookie_get(miss_cookie_base, i);
3901 spin_unlock_bh(&act->tcfa_lock);
3906 tc_cleanup_offload_action(flow_action);
3910 spin_unlock_bh(&act->tcfa_lock);
3914 int tc_setup_offload_action(struct flow_action *flow_action,
3915 const struct tcf_exts *exts,
3916 struct netlink_ext_ack *extack)
3918 #ifdef CONFIG_NET_CLS_ACT
3919 u32 miss_cookie_base;
3924 miss_cookie_base = exts->miss_cookie_node ?
3925 exts->miss_cookie_node->miss_cookie_base : 0;
3926 return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3932 EXPORT_SYMBOL(tc_setup_offload_action);
3934 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3936 unsigned int num_acts = 0;
3937 struct tc_action *act;
3940 tcf_exts_for_each_action(i, act, exts) {
3941 if (is_tcf_pedit(act))
3942 num_acts += tcf_pedit_nkeys(act);
3948 EXPORT_SYMBOL(tcf_exts_num_actions);
3950 #ifdef CONFIG_NET_CLS_ACT
3951 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3953 struct netlink_ext_ack *extack)
3955 *p_block_index = nla_get_u32(block_index_attr);
3956 if (!*p_block_index) {
3957 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3964 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3965 enum flow_block_binder_type binder_type,
3966 struct nlattr *block_index_attr,
3967 struct netlink_ext_ack *extack)
3972 if (!block_index_attr)
3975 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3979 qe->info.binder_type = binder_type;
3980 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3981 qe->info.chain_head_change_priv = &qe->filter_chain;
3982 qe->info.block_index = block_index;
3984 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3986 EXPORT_SYMBOL(tcf_qevent_init);
3988 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3990 if (qe->info.block_index)
3991 tcf_block_put_ext(qe->block, sch, &qe->info);
3993 EXPORT_SYMBOL(tcf_qevent_destroy);
3995 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3996 struct netlink_ext_ack *extack)
4001 if (!block_index_attr)
4004 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
4008 /* Bounce newly-configured block or change in block. */
4009 if (block_index != qe->info.block_index) {
4010 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
4016 EXPORT_SYMBOL(tcf_qevent_validate_change);
4018 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
4019 struct sk_buff **to_free, int *ret)
4021 struct tcf_result cl_res;
4022 struct tcf_proto *fl;
4024 if (!qe->info.block_index)
4027 fl = rcu_dereference_bh(qe->filter_chain);
4029 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
4031 qdisc_qstats_drop(sch);
4032 __qdisc_drop(skb, to_free);
4033 *ret = __NET_XMIT_BYPASS;
4038 __qdisc_drop(skb, to_free);
4039 *ret = __NET_XMIT_STOLEN;
4041 case TC_ACT_REDIRECT:
4042 skb_do_redirect(skb);
4043 *ret = __NET_XMIT_STOLEN;
4049 EXPORT_SYMBOL(tcf_qevent_handle);
4051 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
4053 if (!qe->info.block_index)
4055 return nla_put_u32(skb, attr_name, qe->info.block_index);
4057 EXPORT_SYMBOL(tcf_qevent_dump);
4060 static __net_init int tcf_net_init(struct net *net)
4062 struct tcf_net *tn = net_generic(net, tcf_net_id);
4064 spin_lock_init(&tn->idr_lock);
4069 static void __net_exit tcf_net_exit(struct net *net)
4071 struct tcf_net *tn = net_generic(net, tcf_net_id);
4073 idr_destroy(&tn->idr);
4076 static struct pernet_operations tcf_net_ops = {
4077 .init = tcf_net_init,
4078 .exit = tcf_net_exit,
4080 .size = sizeof(struct tcf_net),
4083 static const struct rtnl_msg_handler tc_filter_rtnl_msg_handlers[] __initconst = {
4084 {.msgtype = RTM_NEWTFILTER, .doit = tc_new_tfilter,
4085 .flags = RTNL_FLAG_DOIT_UNLOCKED},
4086 {.msgtype = RTM_DELTFILTER, .doit = tc_del_tfilter,
4087 .flags = RTNL_FLAG_DOIT_UNLOCKED},
4088 {.msgtype = RTM_GETTFILTER, .doit = tc_get_tfilter,
4089 .dumpit = tc_dump_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED},
4090 {.msgtype = RTM_NEWCHAIN, .doit = tc_ctl_chain},
4091 {.msgtype = RTM_DELCHAIN, .doit = tc_ctl_chain},
4092 {.msgtype = RTM_GETCHAIN, .doit = tc_ctl_chain,
4093 .dumpit = tc_dump_chain},
4096 static int __init tc_filter_init(void)
4100 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
4104 err = register_pernet_subsys(&tcf_net_ops);
4106 goto err_register_pernet_subsys;
4108 xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
4109 rtnl_register_many(tc_filter_rtnl_msg_handlers);
4113 err_register_pernet_subsys:
4114 destroy_workqueue(tc_filter_wq);
4118 subsys_initcall(tc_filter_init);