Merge tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping
[linux-2.6-block.git] / net / sched / cls_api.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c  Packet classifier API.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <net/net_namespace.h>
25 #include <net/sock.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/pkt_cls.h>
29 #include <net/tc_act/tc_pedit.h>
30 #include <net/tc_act/tc_mirred.h>
31 #include <net/tc_act/tc_vlan.h>
32 #include <net/tc_act/tc_tunnel_key.h>
33 #include <net/tc_act/tc_csum.h>
34 #include <net/tc_act/tc_gact.h>
35 #include <net/tc_act/tc_police.h>
36 #include <net/tc_act/tc_sample.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <net/tc_act/tc_ct.h>
39
40 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
41
42 /* The list of all installed classifier types */
43 static LIST_HEAD(tcf_proto_base);
44
45 /* Protects list of registered TC modules. It is pure SMP lock. */
46 static DEFINE_RWLOCK(cls_mod_lock);
47
48 /* Find classifier type by string name */
49
50 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
51 {
52         const struct tcf_proto_ops *t, *res = NULL;
53
54         if (kind) {
55                 read_lock(&cls_mod_lock);
56                 list_for_each_entry(t, &tcf_proto_base, head) {
57                         if (strcmp(kind, t->kind) == 0) {
58                                 if (try_module_get(t->owner))
59                                         res = t;
60                                 break;
61                         }
62                 }
63                 read_unlock(&cls_mod_lock);
64         }
65         return res;
66 }
67
68 static const struct tcf_proto_ops *
69 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
70                      struct netlink_ext_ack *extack)
71 {
72         const struct tcf_proto_ops *ops;
73
74         ops = __tcf_proto_lookup_ops(kind);
75         if (ops)
76                 return ops;
77 #ifdef CONFIG_MODULES
78         if (rtnl_held)
79                 rtnl_unlock();
80         request_module("cls_%s", kind);
81         if (rtnl_held)
82                 rtnl_lock();
83         ops = __tcf_proto_lookup_ops(kind);
84         /* We dropped the RTNL semaphore in order to perform
85          * the module load. So, even if we succeeded in loading
86          * the module we have to replay the request. We indicate
87          * this using -EAGAIN.
88          */
89         if (ops) {
90                 module_put(ops->owner);
91                 return ERR_PTR(-EAGAIN);
92         }
93 #endif
94         NL_SET_ERR_MSG(extack, "TC classifier not found");
95         return ERR_PTR(-ENOENT);
96 }
97
98 /* Register(unregister) new classifier type */
99
100 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
101 {
102         struct tcf_proto_ops *t;
103         int rc = -EEXIST;
104
105         write_lock(&cls_mod_lock);
106         list_for_each_entry(t, &tcf_proto_base, head)
107                 if (!strcmp(ops->kind, t->kind))
108                         goto out;
109
110         list_add_tail(&ops->head, &tcf_proto_base);
111         rc = 0;
112 out:
113         write_unlock(&cls_mod_lock);
114         return rc;
115 }
116 EXPORT_SYMBOL(register_tcf_proto_ops);
117
118 static struct workqueue_struct *tc_filter_wq;
119
120 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
121 {
122         struct tcf_proto_ops *t;
123         int rc = -ENOENT;
124
125         /* Wait for outstanding call_rcu()s, if any, from a
126          * tcf_proto_ops's destroy() handler.
127          */
128         rcu_barrier();
129         flush_workqueue(tc_filter_wq);
130
131         write_lock(&cls_mod_lock);
132         list_for_each_entry(t, &tcf_proto_base, head) {
133                 if (t == ops) {
134                         list_del(&t->head);
135                         rc = 0;
136                         break;
137                 }
138         }
139         write_unlock(&cls_mod_lock);
140         return rc;
141 }
142 EXPORT_SYMBOL(unregister_tcf_proto_ops);
143
144 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
145 {
146         INIT_RCU_WORK(rwork, func);
147         return queue_rcu_work(tc_filter_wq, rwork);
148 }
149 EXPORT_SYMBOL(tcf_queue_work);
150
151 /* Select new prio value from the range, managed by kernel. */
152
153 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
154 {
155         u32 first = TC_H_MAKE(0xC0000000U, 0U);
156
157         if (tp)
158                 first = tp->prio - 1;
159
160         return TC_H_MAJ(first);
161 }
162
163 static bool tcf_proto_is_unlocked(const char *kind)
164 {
165         const struct tcf_proto_ops *ops;
166         bool ret;
167
168         ops = tcf_proto_lookup_ops(kind, false, NULL);
169         /* On error return false to take rtnl lock. Proto lookup/create
170          * functions will perform lookup again and properly handle errors.
171          */
172         if (IS_ERR(ops))
173                 return false;
174
175         ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
176         module_put(ops->owner);
177         return ret;
178 }
179
180 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
181                                           u32 prio, struct tcf_chain *chain,
182                                           bool rtnl_held,
183                                           struct netlink_ext_ack *extack)
184 {
185         struct tcf_proto *tp;
186         int err;
187
188         tp = kzalloc(sizeof(*tp), GFP_KERNEL);
189         if (!tp)
190                 return ERR_PTR(-ENOBUFS);
191
192         tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
193         if (IS_ERR(tp->ops)) {
194                 err = PTR_ERR(tp->ops);
195                 goto errout;
196         }
197         tp->classify = tp->ops->classify;
198         tp->protocol = protocol;
199         tp->prio = prio;
200         tp->chain = chain;
201         spin_lock_init(&tp->lock);
202         refcount_set(&tp->refcnt, 1);
203
204         err = tp->ops->init(tp);
205         if (err) {
206                 module_put(tp->ops->owner);
207                 goto errout;
208         }
209         return tp;
210
211 errout:
212         kfree(tp);
213         return ERR_PTR(err);
214 }
215
216 static void tcf_proto_get(struct tcf_proto *tp)
217 {
218         refcount_inc(&tp->refcnt);
219 }
220
221 static void tcf_chain_put(struct tcf_chain *chain);
222
223 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
224                               struct netlink_ext_ack *extack)
225 {
226         tp->ops->destroy(tp, rtnl_held, extack);
227         tcf_chain_put(tp->chain);
228         module_put(tp->ops->owner);
229         kfree_rcu(tp, rcu);
230 }
231
232 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
233                           struct netlink_ext_ack *extack)
234 {
235         if (refcount_dec_and_test(&tp->refcnt))
236                 tcf_proto_destroy(tp, rtnl_held, extack);
237 }
238
239 static int walker_check_empty(struct tcf_proto *tp, void *fh,
240                               struct tcf_walker *arg)
241 {
242         if (fh) {
243                 arg->nonempty = true;
244                 return -1;
245         }
246         return 0;
247 }
248
249 static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
250 {
251         struct tcf_walker walker = { .fn = walker_check_empty, };
252
253         if (tp->ops->walk) {
254                 tp->ops->walk(tp, &walker, rtnl_held);
255                 return !walker.nonempty;
256         }
257         return true;
258 }
259
260 static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
261 {
262         spin_lock(&tp->lock);
263         if (tcf_proto_is_empty(tp, rtnl_held))
264                 tp->deleting = true;
265         spin_unlock(&tp->lock);
266         return tp->deleting;
267 }
268
269 static void tcf_proto_mark_delete(struct tcf_proto *tp)
270 {
271         spin_lock(&tp->lock);
272         tp->deleting = true;
273         spin_unlock(&tp->lock);
274 }
275
276 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
277 {
278         bool deleting;
279
280         spin_lock(&tp->lock);
281         deleting = tp->deleting;
282         spin_unlock(&tp->lock);
283
284         return deleting;
285 }
286
287 #define ASSERT_BLOCK_LOCKED(block)                                      \
288         lockdep_assert_held(&(block)->lock)
289
290 struct tcf_filter_chain_list_item {
291         struct list_head list;
292         tcf_chain_head_change_t *chain_head_change;
293         void *chain_head_change_priv;
294 };
295
296 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
297                                           u32 chain_index)
298 {
299         struct tcf_chain *chain;
300
301         ASSERT_BLOCK_LOCKED(block);
302
303         chain = kzalloc(sizeof(*chain), GFP_KERNEL);
304         if (!chain)
305                 return NULL;
306         list_add_tail(&chain->list, &block->chain_list);
307         mutex_init(&chain->filter_chain_lock);
308         chain->block = block;
309         chain->index = chain_index;
310         chain->refcnt = 1;
311         if (!chain->index)
312                 block->chain0.chain = chain;
313         return chain;
314 }
315
316 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
317                                        struct tcf_proto *tp_head)
318 {
319         if (item->chain_head_change)
320                 item->chain_head_change(tp_head, item->chain_head_change_priv);
321 }
322
323 static void tcf_chain0_head_change(struct tcf_chain *chain,
324                                    struct tcf_proto *tp_head)
325 {
326         struct tcf_filter_chain_list_item *item;
327         struct tcf_block *block = chain->block;
328
329         if (chain->index)
330                 return;
331
332         mutex_lock(&block->lock);
333         list_for_each_entry(item, &block->chain0.filter_chain_list, list)
334                 tcf_chain_head_change_item(item, tp_head);
335         mutex_unlock(&block->lock);
336 }
337
338 /* Returns true if block can be safely freed. */
339
340 static bool tcf_chain_detach(struct tcf_chain *chain)
341 {
342         struct tcf_block *block = chain->block;
343
344         ASSERT_BLOCK_LOCKED(block);
345
346         list_del(&chain->list);
347         if (!chain->index)
348                 block->chain0.chain = NULL;
349
350         if (list_empty(&block->chain_list) &&
351             refcount_read(&block->refcnt) == 0)
352                 return true;
353
354         return false;
355 }
356
357 static void tcf_block_destroy(struct tcf_block *block)
358 {
359         mutex_destroy(&block->lock);
360         kfree_rcu(block, rcu);
361 }
362
363 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
364 {
365         struct tcf_block *block = chain->block;
366
367         mutex_destroy(&chain->filter_chain_lock);
368         kfree_rcu(chain, rcu);
369         if (free_block)
370                 tcf_block_destroy(block);
371 }
372
373 static void tcf_chain_hold(struct tcf_chain *chain)
374 {
375         ASSERT_BLOCK_LOCKED(chain->block);
376
377         ++chain->refcnt;
378 }
379
380 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
381 {
382         ASSERT_BLOCK_LOCKED(chain->block);
383
384         /* In case all the references are action references, this
385          * chain should not be shown to the user.
386          */
387         return chain->refcnt == chain->action_refcnt;
388 }
389
390 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
391                                           u32 chain_index)
392 {
393         struct tcf_chain *chain;
394
395         ASSERT_BLOCK_LOCKED(block);
396
397         list_for_each_entry(chain, &block->chain_list, list) {
398                 if (chain->index == chain_index)
399                         return chain;
400         }
401         return NULL;
402 }
403
404 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
405                            u32 seq, u16 flags, int event, bool unicast);
406
407 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
408                                          u32 chain_index, bool create,
409                                          bool by_act)
410 {
411         struct tcf_chain *chain = NULL;
412         bool is_first_reference;
413
414         mutex_lock(&block->lock);
415         chain = tcf_chain_lookup(block, chain_index);
416         if (chain) {
417                 tcf_chain_hold(chain);
418         } else {
419                 if (!create)
420                         goto errout;
421                 chain = tcf_chain_create(block, chain_index);
422                 if (!chain)
423                         goto errout;
424         }
425
426         if (by_act)
427                 ++chain->action_refcnt;
428         is_first_reference = chain->refcnt - chain->action_refcnt == 1;
429         mutex_unlock(&block->lock);
430
431         /* Send notification only in case we got the first
432          * non-action reference. Until then, the chain acts only as
433          * a placeholder for actions pointing to it and user ought
434          * not know about them.
435          */
436         if (is_first_reference && !by_act)
437                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
438                                 RTM_NEWCHAIN, false);
439
440         return chain;
441
442 errout:
443         mutex_unlock(&block->lock);
444         return chain;
445 }
446
447 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
448                                        bool create)
449 {
450         return __tcf_chain_get(block, chain_index, create, false);
451 }
452
453 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
454 {
455         return __tcf_chain_get(block, chain_index, true, true);
456 }
457 EXPORT_SYMBOL(tcf_chain_get_by_act);
458
459 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
460                                void *tmplt_priv);
461 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
462                                   void *tmplt_priv, u32 chain_index,
463                                   struct tcf_block *block, struct sk_buff *oskb,
464                                   u32 seq, u16 flags, bool unicast);
465
466 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
467                             bool explicitly_created)
468 {
469         struct tcf_block *block = chain->block;
470         const struct tcf_proto_ops *tmplt_ops;
471         bool free_block = false;
472         unsigned int refcnt;
473         void *tmplt_priv;
474
475         mutex_lock(&block->lock);
476         if (explicitly_created) {
477                 if (!chain->explicitly_created) {
478                         mutex_unlock(&block->lock);
479                         return;
480                 }
481                 chain->explicitly_created = false;
482         }
483
484         if (by_act)
485                 chain->action_refcnt--;
486
487         /* tc_chain_notify_delete can't be called while holding block lock.
488          * However, when block is unlocked chain can be changed concurrently, so
489          * save these to temporary variables.
490          */
491         refcnt = --chain->refcnt;
492         tmplt_ops = chain->tmplt_ops;
493         tmplt_priv = chain->tmplt_priv;
494
495         /* The last dropped non-action reference will trigger notification. */
496         if (refcnt - chain->action_refcnt == 0 && !by_act) {
497                 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
498                                        block, NULL, 0, 0, false);
499                 /* Last reference to chain, no need to lock. */
500                 chain->flushing = false;
501         }
502
503         if (refcnt == 0)
504                 free_block = tcf_chain_detach(chain);
505         mutex_unlock(&block->lock);
506
507         if (refcnt == 0) {
508                 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
509                 tcf_chain_destroy(chain, free_block);
510         }
511 }
512
513 static void tcf_chain_put(struct tcf_chain *chain)
514 {
515         __tcf_chain_put(chain, false, false);
516 }
517
518 void tcf_chain_put_by_act(struct tcf_chain *chain)
519 {
520         __tcf_chain_put(chain, true, false);
521 }
522 EXPORT_SYMBOL(tcf_chain_put_by_act);
523
524 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
525 {
526         __tcf_chain_put(chain, false, true);
527 }
528
529 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
530 {
531         struct tcf_proto *tp, *tp_next;
532
533         mutex_lock(&chain->filter_chain_lock);
534         tp = tcf_chain_dereference(chain->filter_chain, chain);
535         RCU_INIT_POINTER(chain->filter_chain, NULL);
536         tcf_chain0_head_change(chain, NULL);
537         chain->flushing = true;
538         mutex_unlock(&chain->filter_chain_lock);
539
540         while (tp) {
541                 tp_next = rcu_dereference_protected(tp->next, 1);
542                 tcf_proto_put(tp, rtnl_held, NULL);
543                 tp = tp_next;
544         }
545 }
546
547 static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
548 {
549         const struct Qdisc_class_ops *cops;
550         struct Qdisc *qdisc;
551
552         if (!dev_ingress_queue(dev))
553                 return NULL;
554
555         qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
556         if (!qdisc)
557                 return NULL;
558
559         cops = qdisc->ops->cl_ops;
560         if (!cops)
561                 return NULL;
562
563         if (!cops->tcf_block)
564                 return NULL;
565
566         return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
567 }
568
569 static struct rhashtable indr_setup_block_ht;
570
571 struct tc_indr_block_dev {
572         struct rhash_head ht_node;
573         struct net_device *dev;
574         unsigned int refcnt;
575         struct list_head cb_list;
576         struct tcf_block *block;
577 };
578
579 struct tc_indr_block_cb {
580         struct list_head list;
581         void *cb_priv;
582         tc_indr_block_bind_cb_t *cb;
583         void *cb_ident;
584 };
585
586 static const struct rhashtable_params tc_indr_setup_block_ht_params = {
587         .key_offset     = offsetof(struct tc_indr_block_dev, dev),
588         .head_offset    = offsetof(struct tc_indr_block_dev, ht_node),
589         .key_len        = sizeof(struct net_device *),
590 };
591
592 static struct tc_indr_block_dev *
593 tc_indr_block_dev_lookup(struct net_device *dev)
594 {
595         return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
596                                       tc_indr_setup_block_ht_params);
597 }
598
599 static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
600 {
601         struct tc_indr_block_dev *indr_dev;
602
603         indr_dev = tc_indr_block_dev_lookup(dev);
604         if (indr_dev)
605                 goto inc_ref;
606
607         indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
608         if (!indr_dev)
609                 return NULL;
610
611         INIT_LIST_HEAD(&indr_dev->cb_list);
612         indr_dev->dev = dev;
613         indr_dev->block = tc_dev_ingress_block(dev);
614         if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
615                                    tc_indr_setup_block_ht_params)) {
616                 kfree(indr_dev);
617                 return NULL;
618         }
619
620 inc_ref:
621         indr_dev->refcnt++;
622         return indr_dev;
623 }
624
625 static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
626 {
627         if (--indr_dev->refcnt)
628                 return;
629
630         rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
631                                tc_indr_setup_block_ht_params);
632         kfree(indr_dev);
633 }
634
635 static struct tc_indr_block_cb *
636 tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
637                         tc_indr_block_bind_cb_t *cb, void *cb_ident)
638 {
639         struct tc_indr_block_cb *indr_block_cb;
640
641         list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
642                 if (indr_block_cb->cb == cb &&
643                     indr_block_cb->cb_ident == cb_ident)
644                         return indr_block_cb;
645         return NULL;
646 }
647
648 static struct tc_indr_block_cb *
649 tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
650                      tc_indr_block_bind_cb_t *cb, void *cb_ident)
651 {
652         struct tc_indr_block_cb *indr_block_cb;
653
654         indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
655         if (indr_block_cb)
656                 return ERR_PTR(-EEXIST);
657
658         indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
659         if (!indr_block_cb)
660                 return ERR_PTR(-ENOMEM);
661
662         indr_block_cb->cb_priv = cb_priv;
663         indr_block_cb->cb = cb;
664         indr_block_cb->cb_ident = cb_ident;
665         list_add(&indr_block_cb->list, &indr_dev->cb_list);
666
667         return indr_block_cb;
668 }
669
670 static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
671 {
672         list_del(&indr_block_cb->list);
673         kfree(indr_block_cb);
674 }
675
676 static int tcf_block_setup(struct tcf_block *block,
677                            struct flow_block_offload *bo);
678
679 static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
680                                   struct tc_indr_block_cb *indr_block_cb,
681                                   enum flow_block_command command)
682 {
683         struct flow_block_offload bo = {
684                 .command        = command,
685                 .binder_type    = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
686                 .net            = dev_net(indr_dev->dev),
687                 .block_shared   = tcf_block_non_null_shared(indr_dev->block),
688         };
689         INIT_LIST_HEAD(&bo.cb_list);
690
691         if (!indr_dev->block)
692                 return;
693
694         indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
695                           &bo);
696         tcf_block_setup(indr_dev->block, &bo);
697 }
698
699 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
700                                 tc_indr_block_bind_cb_t *cb, void *cb_ident)
701 {
702         struct tc_indr_block_cb *indr_block_cb;
703         struct tc_indr_block_dev *indr_dev;
704         int err;
705
706         indr_dev = tc_indr_block_dev_get(dev);
707         if (!indr_dev)
708                 return -ENOMEM;
709
710         indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
711         err = PTR_ERR_OR_ZERO(indr_block_cb);
712         if (err)
713                 goto err_dev_put;
714
715         tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_BIND);
716         return 0;
717
718 err_dev_put:
719         tc_indr_block_dev_put(indr_dev);
720         return err;
721 }
722 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
723
724 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
725                               tc_indr_block_bind_cb_t *cb, void *cb_ident)
726 {
727         int err;
728
729         rtnl_lock();
730         err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
731         rtnl_unlock();
732
733         return err;
734 }
735 EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
736
737 void __tc_indr_block_cb_unregister(struct net_device *dev,
738                                    tc_indr_block_bind_cb_t *cb, void *cb_ident)
739 {
740         struct tc_indr_block_cb *indr_block_cb;
741         struct tc_indr_block_dev *indr_dev;
742
743         indr_dev = tc_indr_block_dev_lookup(dev);
744         if (!indr_dev)
745                 return;
746
747         indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
748         if (!indr_block_cb)
749                 return;
750
751         /* Send unbind message if required to free any block cbs. */
752         tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_UNBIND);
753         tc_indr_block_cb_del(indr_block_cb);
754         tc_indr_block_dev_put(indr_dev);
755 }
756 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
757
758 void tc_indr_block_cb_unregister(struct net_device *dev,
759                                  tc_indr_block_bind_cb_t *cb, void *cb_ident)
760 {
761         rtnl_lock();
762         __tc_indr_block_cb_unregister(dev, cb, cb_ident);
763         rtnl_unlock();
764 }
765 EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
766
767 static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
768                                struct tcf_block_ext_info *ei,
769                                enum flow_block_command command,
770                                struct netlink_ext_ack *extack)
771 {
772         struct tc_indr_block_cb *indr_block_cb;
773         struct tc_indr_block_dev *indr_dev;
774         struct flow_block_offload bo = {
775                 .command        = command,
776                 .binder_type    = ei->binder_type,
777                 .net            = dev_net(dev),
778                 .block_shared   = tcf_block_shared(block),
779                 .extack         = extack,
780         };
781         INIT_LIST_HEAD(&bo.cb_list);
782
783         indr_dev = tc_indr_block_dev_lookup(dev);
784         if (!indr_dev)
785                 return;
786
787         indr_dev->block = command == FLOW_BLOCK_BIND ? block : NULL;
788
789         list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
790                 indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
791                                   &bo);
792
793         tcf_block_setup(block, &bo);
794 }
795
796 static bool tcf_block_offload_in_use(struct tcf_block *block)
797 {
798         return block->offloadcnt;
799 }
800
801 static int tcf_block_offload_cmd(struct tcf_block *block,
802                                  struct net_device *dev,
803                                  struct tcf_block_ext_info *ei,
804                                  enum flow_block_command command,
805                                  struct netlink_ext_ack *extack)
806 {
807         struct flow_block_offload bo = {};
808         int err;
809
810         bo.net = dev_net(dev);
811         bo.command = command;
812         bo.binder_type = ei->binder_type;
813         bo.block_shared = tcf_block_shared(block);
814         bo.extack = extack;
815         INIT_LIST_HEAD(&bo.cb_list);
816
817         err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
818         if (err < 0)
819                 return err;
820
821         return tcf_block_setup(block, &bo);
822 }
823
824 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
825                                   struct tcf_block_ext_info *ei,
826                                   struct netlink_ext_ack *extack)
827 {
828         struct net_device *dev = q->dev_queue->dev;
829         int err;
830
831         if (!dev->netdev_ops->ndo_setup_tc)
832                 goto no_offload_dev_inc;
833
834         /* If tc offload feature is disabled and the block we try to bind
835          * to already has some offloaded filters, forbid to bind.
836          */
837         if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
838                 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
839                 return -EOPNOTSUPP;
840         }
841
842         err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
843         if (err == -EOPNOTSUPP)
844                 goto no_offload_dev_inc;
845         if (err)
846                 return err;
847
848         tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
849         return 0;
850
851 no_offload_dev_inc:
852         if (tcf_block_offload_in_use(block))
853                 return -EOPNOTSUPP;
854         block->nooffloaddevcnt++;
855         tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
856         return 0;
857 }
858
859 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
860                                      struct tcf_block_ext_info *ei)
861 {
862         struct net_device *dev = q->dev_queue->dev;
863         int err;
864
865         tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
866
867         if (!dev->netdev_ops->ndo_setup_tc)
868                 goto no_offload_dev_dec;
869         err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
870         if (err == -EOPNOTSUPP)
871                 goto no_offload_dev_dec;
872         return;
873
874 no_offload_dev_dec:
875         WARN_ON(block->nooffloaddevcnt-- == 0);
876 }
877
878 static int
879 tcf_chain0_head_change_cb_add(struct tcf_block *block,
880                               struct tcf_block_ext_info *ei,
881                               struct netlink_ext_ack *extack)
882 {
883         struct tcf_filter_chain_list_item *item;
884         struct tcf_chain *chain0;
885
886         item = kmalloc(sizeof(*item), GFP_KERNEL);
887         if (!item) {
888                 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
889                 return -ENOMEM;
890         }
891         item->chain_head_change = ei->chain_head_change;
892         item->chain_head_change_priv = ei->chain_head_change_priv;
893
894         mutex_lock(&block->lock);
895         chain0 = block->chain0.chain;
896         if (chain0)
897                 tcf_chain_hold(chain0);
898         else
899                 list_add(&item->list, &block->chain0.filter_chain_list);
900         mutex_unlock(&block->lock);
901
902         if (chain0) {
903                 struct tcf_proto *tp_head;
904
905                 mutex_lock(&chain0->filter_chain_lock);
906
907                 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
908                 if (tp_head)
909                         tcf_chain_head_change_item(item, tp_head);
910
911                 mutex_lock(&block->lock);
912                 list_add(&item->list, &block->chain0.filter_chain_list);
913                 mutex_unlock(&block->lock);
914
915                 mutex_unlock(&chain0->filter_chain_lock);
916                 tcf_chain_put(chain0);
917         }
918
919         return 0;
920 }
921
922 static void
923 tcf_chain0_head_change_cb_del(struct tcf_block *block,
924                               struct tcf_block_ext_info *ei)
925 {
926         struct tcf_filter_chain_list_item *item;
927
928         mutex_lock(&block->lock);
929         list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
930                 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
931                     (item->chain_head_change == ei->chain_head_change &&
932                      item->chain_head_change_priv == ei->chain_head_change_priv)) {
933                         if (block->chain0.chain)
934                                 tcf_chain_head_change_item(item, NULL);
935                         list_del(&item->list);
936                         mutex_unlock(&block->lock);
937
938                         kfree(item);
939                         return;
940                 }
941         }
942         mutex_unlock(&block->lock);
943         WARN_ON(1);
944 }
945
946 struct tcf_net {
947         spinlock_t idr_lock; /* Protects idr */
948         struct idr idr;
949 };
950
951 static unsigned int tcf_net_id;
952
953 static int tcf_block_insert(struct tcf_block *block, struct net *net,
954                             struct netlink_ext_ack *extack)
955 {
956         struct tcf_net *tn = net_generic(net, tcf_net_id);
957         int err;
958
959         idr_preload(GFP_KERNEL);
960         spin_lock(&tn->idr_lock);
961         err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
962                             GFP_NOWAIT);
963         spin_unlock(&tn->idr_lock);
964         idr_preload_end();
965
966         return err;
967 }
968
969 static void tcf_block_remove(struct tcf_block *block, struct net *net)
970 {
971         struct tcf_net *tn = net_generic(net, tcf_net_id);
972
973         spin_lock(&tn->idr_lock);
974         idr_remove(&tn->idr, block->index);
975         spin_unlock(&tn->idr_lock);
976 }
977
978 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
979                                           u32 block_index,
980                                           struct netlink_ext_ack *extack)
981 {
982         struct tcf_block *block;
983
984         block = kzalloc(sizeof(*block), GFP_KERNEL);
985         if (!block) {
986                 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
987                 return ERR_PTR(-ENOMEM);
988         }
989         mutex_init(&block->lock);
990         INIT_LIST_HEAD(&block->chain_list);
991         INIT_LIST_HEAD(&block->cb_list);
992         INIT_LIST_HEAD(&block->owner_list);
993         INIT_LIST_HEAD(&block->chain0.filter_chain_list);
994
995         refcount_set(&block->refcnt, 1);
996         block->net = net;
997         block->index = block_index;
998
999         /* Don't store q pointer for blocks which are shared */
1000         if (!tcf_block_shared(block))
1001                 block->q = q;
1002         return block;
1003 }
1004
1005 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1006 {
1007         struct tcf_net *tn = net_generic(net, tcf_net_id);
1008
1009         return idr_find(&tn->idr, block_index);
1010 }
1011
1012 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1013 {
1014         struct tcf_block *block;
1015
1016         rcu_read_lock();
1017         block = tcf_block_lookup(net, block_index);
1018         if (block && !refcount_inc_not_zero(&block->refcnt))
1019                 block = NULL;
1020         rcu_read_unlock();
1021
1022         return block;
1023 }
1024
1025 static struct tcf_chain *
1026 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1027 {
1028         mutex_lock(&block->lock);
1029         if (chain)
1030                 chain = list_is_last(&chain->list, &block->chain_list) ?
1031                         NULL : list_next_entry(chain, list);
1032         else
1033                 chain = list_first_entry_or_null(&block->chain_list,
1034                                                  struct tcf_chain, list);
1035
1036         /* skip all action-only chains */
1037         while (chain && tcf_chain_held_by_acts_only(chain))
1038                 chain = list_is_last(&chain->list, &block->chain_list) ?
1039                         NULL : list_next_entry(chain, list);
1040
1041         if (chain)
1042                 tcf_chain_hold(chain);
1043         mutex_unlock(&block->lock);
1044
1045         return chain;
1046 }
1047
1048 /* Function to be used by all clients that want to iterate over all chains on
1049  * block. It properly obtains block->lock and takes reference to chain before
1050  * returning it. Users of this function must be tolerant to concurrent chain
1051  * insertion/deletion or ensure that no concurrent chain modification is
1052  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1053  * consistent dump because rtnl lock is released each time skb is filled with
1054  * data and sent to user-space.
1055  */
1056
1057 struct tcf_chain *
1058 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1059 {
1060         struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1061
1062         if (chain)
1063                 tcf_chain_put(chain);
1064
1065         return chain_next;
1066 }
1067 EXPORT_SYMBOL(tcf_get_next_chain);
1068
1069 static struct tcf_proto *
1070 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1071 {
1072         u32 prio = 0;
1073
1074         ASSERT_RTNL();
1075         mutex_lock(&chain->filter_chain_lock);
1076
1077         if (!tp) {
1078                 tp = tcf_chain_dereference(chain->filter_chain, chain);
1079         } else if (tcf_proto_is_deleting(tp)) {
1080                 /* 'deleting' flag is set and chain->filter_chain_lock was
1081                  * unlocked, which means next pointer could be invalid. Restart
1082                  * search.
1083                  */
1084                 prio = tp->prio + 1;
1085                 tp = tcf_chain_dereference(chain->filter_chain, chain);
1086
1087                 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1088                         if (!tp->deleting && tp->prio >= prio)
1089                                 break;
1090         } else {
1091                 tp = tcf_chain_dereference(tp->next, chain);
1092         }
1093
1094         if (tp)
1095                 tcf_proto_get(tp);
1096
1097         mutex_unlock(&chain->filter_chain_lock);
1098
1099         return tp;
1100 }
1101
1102 /* Function to be used by all clients that want to iterate over all tp's on
1103  * chain. Users of this function must be tolerant to concurrent tp
1104  * insertion/deletion or ensure that no concurrent chain modification is
1105  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1106  * consistent dump because rtnl lock is released each time skb is filled with
1107  * data and sent to user-space.
1108  */
1109
1110 struct tcf_proto *
1111 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
1112                    bool rtnl_held)
1113 {
1114         struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1115
1116         if (tp)
1117                 tcf_proto_put(tp, rtnl_held, NULL);
1118
1119         return tp_next;
1120 }
1121 EXPORT_SYMBOL(tcf_get_next_proto);
1122
1123 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1124 {
1125         struct tcf_chain *chain;
1126
1127         /* Last reference to block. At this point chains cannot be added or
1128          * removed concurrently.
1129          */
1130         for (chain = tcf_get_next_chain(block, NULL);
1131              chain;
1132              chain = tcf_get_next_chain(block, chain)) {
1133                 tcf_chain_put_explicitly_created(chain);
1134                 tcf_chain_flush(chain, rtnl_held);
1135         }
1136 }
1137
1138 /* Lookup Qdisc and increments its reference counter.
1139  * Set parent, if necessary.
1140  */
1141
1142 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1143                             u32 *parent, int ifindex, bool rtnl_held,
1144                             struct netlink_ext_ack *extack)
1145 {
1146         const struct Qdisc_class_ops *cops;
1147         struct net_device *dev;
1148         int err = 0;
1149
1150         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1151                 return 0;
1152
1153         rcu_read_lock();
1154
1155         /* Find link */
1156         dev = dev_get_by_index_rcu(net, ifindex);
1157         if (!dev) {
1158                 rcu_read_unlock();
1159                 return -ENODEV;
1160         }
1161
1162         /* Find qdisc */
1163         if (!*parent) {
1164                 *q = dev->qdisc;
1165                 *parent = (*q)->handle;
1166         } else {
1167                 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1168                 if (!*q) {
1169                         NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1170                         err = -EINVAL;
1171                         goto errout_rcu;
1172                 }
1173         }
1174
1175         *q = qdisc_refcount_inc_nz(*q);
1176         if (!*q) {
1177                 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1178                 err = -EINVAL;
1179                 goto errout_rcu;
1180         }
1181
1182         /* Is it classful? */
1183         cops = (*q)->ops->cl_ops;
1184         if (!cops) {
1185                 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1186                 err = -EINVAL;
1187                 goto errout_qdisc;
1188         }
1189
1190         if (!cops->tcf_block) {
1191                 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1192                 err = -EOPNOTSUPP;
1193                 goto errout_qdisc;
1194         }
1195
1196 errout_rcu:
1197         /* At this point we know that qdisc is not noop_qdisc,
1198          * which means that qdisc holds a reference to net_device
1199          * and we hold a reference to qdisc, so it is safe to release
1200          * rcu read lock.
1201          */
1202         rcu_read_unlock();
1203         return err;
1204
1205 errout_qdisc:
1206         rcu_read_unlock();
1207
1208         if (rtnl_held)
1209                 qdisc_put(*q);
1210         else
1211                 qdisc_put_unlocked(*q);
1212         *q = NULL;
1213
1214         return err;
1215 }
1216
1217 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1218                                int ifindex, struct netlink_ext_ack *extack)
1219 {
1220         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1221                 return 0;
1222
1223         /* Do we search for filter, attached to class? */
1224         if (TC_H_MIN(parent)) {
1225                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1226
1227                 *cl = cops->find(q, parent);
1228                 if (*cl == 0) {
1229                         NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1230                         return -ENOENT;
1231                 }
1232         }
1233
1234         return 0;
1235 }
1236
1237 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1238                                           unsigned long cl, int ifindex,
1239                                           u32 block_index,
1240                                           struct netlink_ext_ack *extack)
1241 {
1242         struct tcf_block *block;
1243
1244         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1245                 block = tcf_block_refcnt_get(net, block_index);
1246                 if (!block) {
1247                         NL_SET_ERR_MSG(extack, "Block of given index was not found");
1248                         return ERR_PTR(-EINVAL);
1249                 }
1250         } else {
1251                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1252
1253                 block = cops->tcf_block(q, cl, extack);
1254                 if (!block)
1255                         return ERR_PTR(-EINVAL);
1256
1257                 if (tcf_block_shared(block)) {
1258                         NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1259                         return ERR_PTR(-EOPNOTSUPP);
1260                 }
1261
1262                 /* Always take reference to block in order to support execution
1263                  * of rules update path of cls API without rtnl lock. Caller
1264                  * must release block when it is finished using it. 'if' block
1265                  * of this conditional obtain reference to block by calling
1266                  * tcf_block_refcnt_get().
1267                  */
1268                 refcount_inc(&block->refcnt);
1269         }
1270
1271         return block;
1272 }
1273
1274 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1275                             struct tcf_block_ext_info *ei, bool rtnl_held)
1276 {
1277         if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1278                 /* Flushing/putting all chains will cause the block to be
1279                  * deallocated when last chain is freed. However, if chain_list
1280                  * is empty, block has to be manually deallocated. After block
1281                  * reference counter reached 0, it is no longer possible to
1282                  * increment it or add new chains to block.
1283                  */
1284                 bool free_block = list_empty(&block->chain_list);
1285
1286                 mutex_unlock(&block->lock);
1287                 if (tcf_block_shared(block))
1288                         tcf_block_remove(block, block->net);
1289
1290                 if (q)
1291                         tcf_block_offload_unbind(block, q, ei);
1292
1293                 if (free_block)
1294                         tcf_block_destroy(block);
1295                 else
1296                         tcf_block_flush_all_chains(block, rtnl_held);
1297         } else if (q) {
1298                 tcf_block_offload_unbind(block, q, ei);
1299         }
1300 }
1301
1302 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1303 {
1304         __tcf_block_put(block, NULL, NULL, rtnl_held);
1305 }
1306
1307 /* Find tcf block.
1308  * Set q, parent, cl when appropriate.
1309  */
1310
1311 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1312                                         u32 *parent, unsigned long *cl,
1313                                         int ifindex, u32 block_index,
1314                                         struct netlink_ext_ack *extack)
1315 {
1316         struct tcf_block *block;
1317         int err = 0;
1318
1319         ASSERT_RTNL();
1320
1321         err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1322         if (err)
1323                 goto errout;
1324
1325         err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1326         if (err)
1327                 goto errout_qdisc;
1328
1329         block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1330         if (IS_ERR(block)) {
1331                 err = PTR_ERR(block);
1332                 goto errout_qdisc;
1333         }
1334
1335         return block;
1336
1337 errout_qdisc:
1338         if (*q)
1339                 qdisc_put(*q);
1340 errout:
1341         *q = NULL;
1342         return ERR_PTR(err);
1343 }
1344
1345 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1346                               bool rtnl_held)
1347 {
1348         if (!IS_ERR_OR_NULL(block))
1349                 tcf_block_refcnt_put(block, rtnl_held);
1350
1351         if (q) {
1352                 if (rtnl_held)
1353                         qdisc_put(q);
1354                 else
1355                         qdisc_put_unlocked(q);
1356         }
1357 }
1358
1359 struct tcf_block_owner_item {
1360         struct list_head list;
1361         struct Qdisc *q;
1362         enum flow_block_binder_type binder_type;
1363 };
1364
1365 static void
1366 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1367                                struct Qdisc *q,
1368                                enum flow_block_binder_type binder_type)
1369 {
1370         if (block->keep_dst &&
1371             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1372             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1373                 netif_keep_dst(qdisc_dev(q));
1374 }
1375
1376 void tcf_block_netif_keep_dst(struct tcf_block *block)
1377 {
1378         struct tcf_block_owner_item *item;
1379
1380         block->keep_dst = true;
1381         list_for_each_entry(item, &block->owner_list, list)
1382                 tcf_block_owner_netif_keep_dst(block, item->q,
1383                                                item->binder_type);
1384 }
1385 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1386
1387 static int tcf_block_owner_add(struct tcf_block *block,
1388                                struct Qdisc *q,
1389                                enum flow_block_binder_type binder_type)
1390 {
1391         struct tcf_block_owner_item *item;
1392
1393         item = kmalloc(sizeof(*item), GFP_KERNEL);
1394         if (!item)
1395                 return -ENOMEM;
1396         item->q = q;
1397         item->binder_type = binder_type;
1398         list_add(&item->list, &block->owner_list);
1399         return 0;
1400 }
1401
1402 static void tcf_block_owner_del(struct tcf_block *block,
1403                                 struct Qdisc *q,
1404                                 enum flow_block_binder_type binder_type)
1405 {
1406         struct tcf_block_owner_item *item;
1407
1408         list_for_each_entry(item, &block->owner_list, list) {
1409                 if (item->q == q && item->binder_type == binder_type) {
1410                         list_del(&item->list);
1411                         kfree(item);
1412                         return;
1413                 }
1414         }
1415         WARN_ON(1);
1416 }
1417
1418 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1419                       struct tcf_block_ext_info *ei,
1420                       struct netlink_ext_ack *extack)
1421 {
1422         struct net *net = qdisc_net(q);
1423         struct tcf_block *block = NULL;
1424         int err;
1425
1426         if (ei->block_index)
1427                 /* block_index not 0 means the shared block is requested */
1428                 block = tcf_block_refcnt_get(net, ei->block_index);
1429
1430         if (!block) {
1431                 block = tcf_block_create(net, q, ei->block_index, extack);
1432                 if (IS_ERR(block))
1433                         return PTR_ERR(block);
1434                 if (tcf_block_shared(block)) {
1435                         err = tcf_block_insert(block, net, extack);
1436                         if (err)
1437                                 goto err_block_insert;
1438                 }
1439         }
1440
1441         err = tcf_block_owner_add(block, q, ei->binder_type);
1442         if (err)
1443                 goto err_block_owner_add;
1444
1445         tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1446
1447         err = tcf_chain0_head_change_cb_add(block, ei, extack);
1448         if (err)
1449                 goto err_chain0_head_change_cb_add;
1450
1451         err = tcf_block_offload_bind(block, q, ei, extack);
1452         if (err)
1453                 goto err_block_offload_bind;
1454
1455         *p_block = block;
1456         return 0;
1457
1458 err_block_offload_bind:
1459         tcf_chain0_head_change_cb_del(block, ei);
1460 err_chain0_head_change_cb_add:
1461         tcf_block_owner_del(block, q, ei->binder_type);
1462 err_block_owner_add:
1463 err_block_insert:
1464         tcf_block_refcnt_put(block, true);
1465         return err;
1466 }
1467 EXPORT_SYMBOL(tcf_block_get_ext);
1468
1469 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1470 {
1471         struct tcf_proto __rcu **p_filter_chain = priv;
1472
1473         rcu_assign_pointer(*p_filter_chain, tp_head);
1474 }
1475
1476 int tcf_block_get(struct tcf_block **p_block,
1477                   struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1478                   struct netlink_ext_ack *extack)
1479 {
1480         struct tcf_block_ext_info ei = {
1481                 .chain_head_change = tcf_chain_head_change_dflt,
1482                 .chain_head_change_priv = p_filter_chain,
1483         };
1484
1485         WARN_ON(!p_filter_chain);
1486         return tcf_block_get_ext(p_block, q, &ei, extack);
1487 }
1488 EXPORT_SYMBOL(tcf_block_get);
1489
1490 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1491  * actions should be all removed after flushing.
1492  */
1493 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1494                        struct tcf_block_ext_info *ei)
1495 {
1496         if (!block)
1497                 return;
1498         tcf_chain0_head_change_cb_del(block, ei);
1499         tcf_block_owner_del(block, q, ei->binder_type);
1500
1501         __tcf_block_put(block, q, ei, true);
1502 }
1503 EXPORT_SYMBOL(tcf_block_put_ext);
1504
1505 void tcf_block_put(struct tcf_block *block)
1506 {
1507         struct tcf_block_ext_info ei = {0, };
1508
1509         if (!block)
1510                 return;
1511         tcf_block_put_ext(block, block->q, &ei);
1512 }
1513
1514 EXPORT_SYMBOL(tcf_block_put);
1515
1516 static int
1517 tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
1518                             void *cb_priv, bool add, bool offload_in_use,
1519                             struct netlink_ext_ack *extack)
1520 {
1521         struct tcf_chain *chain, *chain_prev;
1522         struct tcf_proto *tp, *tp_prev;
1523         int err;
1524
1525         for (chain = __tcf_get_next_chain(block, NULL);
1526              chain;
1527              chain_prev = chain,
1528                      chain = __tcf_get_next_chain(block, chain),
1529                      tcf_chain_put(chain_prev)) {
1530                 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1531                      tp_prev = tp,
1532                              tp = __tcf_get_next_proto(chain, tp),
1533                              tcf_proto_put(tp_prev, true, NULL)) {
1534                         if (tp->ops->reoffload) {
1535                                 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1536                                                          extack);
1537                                 if (err && add)
1538                                         goto err_playback_remove;
1539                         } else if (add && offload_in_use) {
1540                                 err = -EOPNOTSUPP;
1541                                 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1542                                 goto err_playback_remove;
1543                         }
1544                 }
1545         }
1546
1547         return 0;
1548
1549 err_playback_remove:
1550         tcf_proto_put(tp, true, NULL);
1551         tcf_chain_put(chain);
1552         tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1553                                     extack);
1554         return err;
1555 }
1556
1557 static int tcf_block_bind(struct tcf_block *block,
1558                           struct flow_block_offload *bo)
1559 {
1560         struct flow_block_cb *block_cb, *next;
1561         int err, i = 0;
1562
1563         list_for_each_entry(block_cb, &bo->cb_list, list) {
1564                 err = tcf_block_playback_offloads(block, block_cb->cb,
1565                                                   block_cb->cb_priv, true,
1566                                                   tcf_block_offload_in_use(block),
1567                                                   bo->extack);
1568                 if (err)
1569                         goto err_unroll;
1570
1571                 i++;
1572         }
1573         list_splice(&bo->cb_list, &block->cb_list);
1574
1575         return 0;
1576
1577 err_unroll:
1578         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1579                 if (i-- > 0) {
1580                         list_del(&block_cb->list);
1581                         tcf_block_playback_offloads(block, block_cb->cb,
1582                                                     block_cb->cb_priv, false,
1583                                                     tcf_block_offload_in_use(block),
1584                                                     NULL);
1585                 }
1586                 flow_block_cb_free(block_cb);
1587         }
1588
1589         return err;
1590 }
1591
1592 static void tcf_block_unbind(struct tcf_block *block,
1593                              struct flow_block_offload *bo)
1594 {
1595         struct flow_block_cb *block_cb, *next;
1596
1597         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1598                 tcf_block_playback_offloads(block, block_cb->cb,
1599                                             block_cb->cb_priv, false,
1600                                             tcf_block_offload_in_use(block),
1601                                             NULL);
1602                 list_del(&block_cb->list);
1603                 flow_block_cb_free(block_cb);
1604         }
1605 }
1606
1607 static int tcf_block_setup(struct tcf_block *block,
1608                            struct flow_block_offload *bo)
1609 {
1610         int err;
1611
1612         switch (bo->command) {
1613         case FLOW_BLOCK_BIND:
1614                 err = tcf_block_bind(block, bo);
1615                 break;
1616         case FLOW_BLOCK_UNBIND:
1617                 err = 0;
1618                 tcf_block_unbind(block, bo);
1619                 break;
1620         default:
1621                 WARN_ON_ONCE(1);
1622                 err = -EOPNOTSUPP;
1623         }
1624
1625         return err;
1626 }
1627
1628 /* Main classifier routine: scans classifier chain attached
1629  * to this qdisc, (optionally) tests for protocol and asks
1630  * specific classifiers.
1631  */
1632 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1633                  struct tcf_result *res, bool compat_mode)
1634 {
1635 #ifdef CONFIG_NET_CLS_ACT
1636         const int max_reclassify_loop = 4;
1637         const struct tcf_proto *orig_tp = tp;
1638         const struct tcf_proto *first_tp;
1639         int limit = 0;
1640
1641 reclassify:
1642 #endif
1643         for (; tp; tp = rcu_dereference_bh(tp->next)) {
1644                 __be16 protocol = tc_skb_protocol(skb);
1645                 int err;
1646
1647                 if (tp->protocol != protocol &&
1648                     tp->protocol != htons(ETH_P_ALL))
1649                         continue;
1650
1651                 err = tp->classify(skb, tp, res);
1652 #ifdef CONFIG_NET_CLS_ACT
1653                 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1654                         first_tp = orig_tp;
1655                         goto reset;
1656                 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1657                         first_tp = res->goto_tp;
1658                         goto reset;
1659                 }
1660 #endif
1661                 if (err >= 0)
1662                         return err;
1663         }
1664
1665         return TC_ACT_UNSPEC; /* signal: continue lookup */
1666 #ifdef CONFIG_NET_CLS_ACT
1667 reset:
1668         if (unlikely(limit++ >= max_reclassify_loop)) {
1669                 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1670                                        tp->chain->block->index,
1671                                        tp->prio & 0xffff,
1672                                        ntohs(tp->protocol));
1673                 return TC_ACT_SHOT;
1674         }
1675
1676         tp = first_tp;
1677         goto reclassify;
1678 #endif
1679 }
1680 EXPORT_SYMBOL(tcf_classify);
1681
1682 struct tcf_chain_info {
1683         struct tcf_proto __rcu **pprev;
1684         struct tcf_proto __rcu *next;
1685 };
1686
1687 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1688                                            struct tcf_chain_info *chain_info)
1689 {
1690         return tcf_chain_dereference(*chain_info->pprev, chain);
1691 }
1692
1693 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1694                                struct tcf_chain_info *chain_info,
1695                                struct tcf_proto *tp)
1696 {
1697         if (chain->flushing)
1698                 return -EAGAIN;
1699
1700         if (*chain_info->pprev == chain->filter_chain)
1701                 tcf_chain0_head_change(chain, tp);
1702         tcf_proto_get(tp);
1703         RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1704         rcu_assign_pointer(*chain_info->pprev, tp);
1705
1706         return 0;
1707 }
1708
1709 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1710                                 struct tcf_chain_info *chain_info,
1711                                 struct tcf_proto *tp)
1712 {
1713         struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1714
1715         tcf_proto_mark_delete(tp);
1716         if (tp == chain->filter_chain)
1717                 tcf_chain0_head_change(chain, next);
1718         RCU_INIT_POINTER(*chain_info->pprev, next);
1719 }
1720
1721 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1722                                            struct tcf_chain_info *chain_info,
1723                                            u32 protocol, u32 prio,
1724                                            bool prio_allocate);
1725
1726 /* Try to insert new proto.
1727  * If proto with specified priority already exists, free new proto
1728  * and return existing one.
1729  */
1730
1731 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1732                                                     struct tcf_proto *tp_new,
1733                                                     u32 protocol, u32 prio,
1734                                                     bool rtnl_held)
1735 {
1736         struct tcf_chain_info chain_info;
1737         struct tcf_proto *tp;
1738         int err = 0;
1739
1740         mutex_lock(&chain->filter_chain_lock);
1741
1742         tp = tcf_chain_tp_find(chain, &chain_info,
1743                                protocol, prio, false);
1744         if (!tp)
1745                 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1746         mutex_unlock(&chain->filter_chain_lock);
1747
1748         if (tp) {
1749                 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1750                 tp_new = tp;
1751         } else if (err) {
1752                 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1753                 tp_new = ERR_PTR(err);
1754         }
1755
1756         return tp_new;
1757 }
1758
1759 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1760                                       struct tcf_proto *tp, bool rtnl_held,
1761                                       struct netlink_ext_ack *extack)
1762 {
1763         struct tcf_chain_info chain_info;
1764         struct tcf_proto *tp_iter;
1765         struct tcf_proto **pprev;
1766         struct tcf_proto *next;
1767
1768         mutex_lock(&chain->filter_chain_lock);
1769
1770         /* Atomically find and remove tp from chain. */
1771         for (pprev = &chain->filter_chain;
1772              (tp_iter = tcf_chain_dereference(*pprev, chain));
1773              pprev = &tp_iter->next) {
1774                 if (tp_iter == tp) {
1775                         chain_info.pprev = pprev;
1776                         chain_info.next = tp_iter->next;
1777                         WARN_ON(tp_iter->deleting);
1778                         break;
1779                 }
1780         }
1781         /* Verify that tp still exists and no new filters were inserted
1782          * concurrently.
1783          * Mark tp for deletion if it is empty.
1784          */
1785         if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
1786                 mutex_unlock(&chain->filter_chain_lock);
1787                 return;
1788         }
1789
1790         next = tcf_chain_dereference(chain_info.next, chain);
1791         if (tp == chain->filter_chain)
1792                 tcf_chain0_head_change(chain, next);
1793         RCU_INIT_POINTER(*chain_info.pprev, next);
1794         mutex_unlock(&chain->filter_chain_lock);
1795
1796         tcf_proto_put(tp, rtnl_held, extack);
1797 }
1798
1799 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1800                                            struct tcf_chain_info *chain_info,
1801                                            u32 protocol, u32 prio,
1802                                            bool prio_allocate)
1803 {
1804         struct tcf_proto **pprev;
1805         struct tcf_proto *tp;
1806
1807         /* Check the chain for existence of proto-tcf with this priority */
1808         for (pprev = &chain->filter_chain;
1809              (tp = tcf_chain_dereference(*pprev, chain));
1810              pprev = &tp->next) {
1811                 if (tp->prio >= prio) {
1812                         if (tp->prio == prio) {
1813                                 if (prio_allocate ||
1814                                     (tp->protocol != protocol && protocol))
1815                                         return ERR_PTR(-EINVAL);
1816                         } else {
1817                                 tp = NULL;
1818                         }
1819                         break;
1820                 }
1821         }
1822         chain_info->pprev = pprev;
1823         if (tp) {
1824                 chain_info->next = tp->next;
1825                 tcf_proto_get(tp);
1826         } else {
1827                 chain_info->next = NULL;
1828         }
1829         return tp;
1830 }
1831
1832 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1833                          struct tcf_proto *tp, struct tcf_block *block,
1834                          struct Qdisc *q, u32 parent, void *fh,
1835                          u32 portid, u32 seq, u16 flags, int event,
1836                          bool rtnl_held)
1837 {
1838         struct tcmsg *tcm;
1839         struct nlmsghdr  *nlh;
1840         unsigned char *b = skb_tail_pointer(skb);
1841
1842         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1843         if (!nlh)
1844                 goto out_nlmsg_trim;
1845         tcm = nlmsg_data(nlh);
1846         tcm->tcm_family = AF_UNSPEC;
1847         tcm->tcm__pad1 = 0;
1848         tcm->tcm__pad2 = 0;
1849         if (q) {
1850                 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1851                 tcm->tcm_parent = parent;
1852         } else {
1853                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1854                 tcm->tcm_block_index = block->index;
1855         }
1856         tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1857         if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1858                 goto nla_put_failure;
1859         if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1860                 goto nla_put_failure;
1861         if (!fh) {
1862                 tcm->tcm_handle = 0;
1863         } else {
1864                 if (tp->ops->dump &&
1865                     tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1866                         goto nla_put_failure;
1867         }
1868         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1869         return skb->len;
1870
1871 out_nlmsg_trim:
1872 nla_put_failure:
1873         nlmsg_trim(skb, b);
1874         return -1;
1875 }
1876
1877 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1878                           struct nlmsghdr *n, struct tcf_proto *tp,
1879                           struct tcf_block *block, struct Qdisc *q,
1880                           u32 parent, void *fh, int event, bool unicast,
1881                           bool rtnl_held)
1882 {
1883         struct sk_buff *skb;
1884         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1885         int err = 0;
1886
1887         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1888         if (!skb)
1889                 return -ENOBUFS;
1890
1891         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1892                           n->nlmsg_seq, n->nlmsg_flags, event,
1893                           rtnl_held) <= 0) {
1894                 kfree_skb(skb);
1895                 return -EINVAL;
1896         }
1897
1898         if (unicast)
1899                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1900         else
1901                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1902                                      n->nlmsg_flags & NLM_F_ECHO);
1903
1904         if (err > 0)
1905                 err = 0;
1906         return err;
1907 }
1908
1909 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1910                               struct nlmsghdr *n, struct tcf_proto *tp,
1911                               struct tcf_block *block, struct Qdisc *q,
1912                               u32 parent, void *fh, bool unicast, bool *last,
1913                               bool rtnl_held, struct netlink_ext_ack *extack)
1914 {
1915         struct sk_buff *skb;
1916         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1917         int err;
1918
1919         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1920         if (!skb)
1921                 return -ENOBUFS;
1922
1923         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1924                           n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1925                           rtnl_held) <= 0) {
1926                 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1927                 kfree_skb(skb);
1928                 return -EINVAL;
1929         }
1930
1931         err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1932         if (err) {
1933                 kfree_skb(skb);
1934                 return err;
1935         }
1936
1937         if (unicast)
1938                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1939         else
1940                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1941                                      n->nlmsg_flags & NLM_F_ECHO);
1942         if (err < 0)
1943                 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1944
1945         if (err > 0)
1946                 err = 0;
1947         return err;
1948 }
1949
1950 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1951                                  struct tcf_block *block, struct Qdisc *q,
1952                                  u32 parent, struct nlmsghdr *n,
1953                                  struct tcf_chain *chain, int event,
1954                                  bool rtnl_held)
1955 {
1956         struct tcf_proto *tp;
1957
1958         for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1959              tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1960                 tfilter_notify(net, oskb, n, tp, block,
1961                                q, parent, NULL, event, false, rtnl_held);
1962 }
1963
1964 static void tfilter_put(struct tcf_proto *tp, void *fh)
1965 {
1966         if (tp->ops->put && fh)
1967                 tp->ops->put(tp, fh);
1968 }
1969
1970 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1971                           struct netlink_ext_ack *extack)
1972 {
1973         struct net *net = sock_net(skb->sk);
1974         struct nlattr *tca[TCA_MAX + 1];
1975         struct tcmsg *t;
1976         u32 protocol;
1977         u32 prio;
1978         bool prio_allocate;
1979         u32 parent;
1980         u32 chain_index;
1981         struct Qdisc *q = NULL;
1982         struct tcf_chain_info chain_info;
1983         struct tcf_chain *chain = NULL;
1984         struct tcf_block *block;
1985         struct tcf_proto *tp;
1986         unsigned long cl;
1987         void *fh;
1988         int err;
1989         int tp_created;
1990         bool rtnl_held = false;
1991
1992         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1993                 return -EPERM;
1994
1995 replay:
1996         tp_created = 0;
1997
1998         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1999                                      rtm_tca_policy, extack);
2000         if (err < 0)
2001                 return err;
2002
2003         t = nlmsg_data(n);
2004         protocol = TC_H_MIN(t->tcm_info);
2005         prio = TC_H_MAJ(t->tcm_info);
2006         prio_allocate = false;
2007         parent = t->tcm_parent;
2008         tp = NULL;
2009         cl = 0;
2010         block = NULL;
2011
2012         if (prio == 0) {
2013                 /* If no priority is provided by the user,
2014                  * we allocate one.
2015                  */
2016                 if (n->nlmsg_flags & NLM_F_CREATE) {
2017                         prio = TC_H_MAKE(0x80000000U, 0U);
2018                         prio_allocate = true;
2019                 } else {
2020                         NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2021                         return -ENOENT;
2022                 }
2023         }
2024
2025         /* Find head of filter chain. */
2026
2027         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2028         if (err)
2029                 return err;
2030
2031         /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2032          * block is shared (no qdisc found), qdisc is not unlocked, classifier
2033          * type is not specified, classifier is not unlocked.
2034          */
2035         if (rtnl_held ||
2036             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2037             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2038                 rtnl_held = true;
2039                 rtnl_lock();
2040         }
2041
2042         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2043         if (err)
2044                 goto errout;
2045
2046         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2047                                  extack);
2048         if (IS_ERR(block)) {
2049                 err = PTR_ERR(block);
2050                 goto errout;
2051         }
2052
2053         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2054         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2055                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2056                 err = -EINVAL;
2057                 goto errout;
2058         }
2059         chain = tcf_chain_get(block, chain_index, true);
2060         if (!chain) {
2061                 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2062                 err = -ENOMEM;
2063                 goto errout;
2064         }
2065
2066         mutex_lock(&chain->filter_chain_lock);
2067         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2068                                prio, prio_allocate);
2069         if (IS_ERR(tp)) {
2070                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2071                 err = PTR_ERR(tp);
2072                 goto errout_locked;
2073         }
2074
2075         if (tp == NULL) {
2076                 struct tcf_proto *tp_new = NULL;
2077
2078                 if (chain->flushing) {
2079                         err = -EAGAIN;
2080                         goto errout_locked;
2081                 }
2082
2083                 /* Proto-tcf does not exist, create new one */
2084
2085                 if (tca[TCA_KIND] == NULL || !protocol) {
2086                         NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2087                         err = -EINVAL;
2088                         goto errout_locked;
2089                 }
2090
2091                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2092                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2093                         err = -ENOENT;
2094                         goto errout_locked;
2095                 }
2096
2097                 if (prio_allocate)
2098                         prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2099                                                                &chain_info));
2100
2101                 mutex_unlock(&chain->filter_chain_lock);
2102                 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
2103                                           protocol, prio, chain, rtnl_held,
2104                                           extack);
2105                 if (IS_ERR(tp_new)) {
2106                         err = PTR_ERR(tp_new);
2107                         goto errout_tp;
2108                 }
2109
2110                 tp_created = 1;
2111                 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2112                                                 rtnl_held);
2113                 if (IS_ERR(tp)) {
2114                         err = PTR_ERR(tp);
2115                         goto errout_tp;
2116                 }
2117         } else {
2118                 mutex_unlock(&chain->filter_chain_lock);
2119         }
2120
2121         if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2122                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2123                 err = -EINVAL;
2124                 goto errout;
2125         }
2126
2127         fh = tp->ops->get(tp, t->tcm_handle);
2128
2129         if (!fh) {
2130                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2131                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2132                         err = -ENOENT;
2133                         goto errout;
2134                 }
2135         } else if (n->nlmsg_flags & NLM_F_EXCL) {
2136                 tfilter_put(tp, fh);
2137                 NL_SET_ERR_MSG(extack, "Filter already exists");
2138                 err = -EEXIST;
2139                 goto errout;
2140         }
2141
2142         if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2143                 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2144                 err = -EINVAL;
2145                 goto errout;
2146         }
2147
2148         err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2149                               n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2150                               rtnl_held, extack);
2151         if (err == 0) {
2152                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2153                                RTM_NEWTFILTER, false, rtnl_held);
2154                 tfilter_put(tp, fh);
2155                 q->flags &= ~TCQ_F_CAN_BYPASS;
2156         }
2157
2158 errout:
2159         if (err && tp_created)
2160                 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2161 errout_tp:
2162         if (chain) {
2163                 if (tp && !IS_ERR(tp))
2164                         tcf_proto_put(tp, rtnl_held, NULL);
2165                 if (!tp_created)
2166                         tcf_chain_put(chain);
2167         }
2168         tcf_block_release(q, block, rtnl_held);
2169
2170         if (rtnl_held)
2171                 rtnl_unlock();
2172
2173         if (err == -EAGAIN) {
2174                 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2175                  * of target chain.
2176                  */
2177                 rtnl_held = true;
2178                 /* Replay the request. */
2179                 goto replay;
2180         }
2181         return err;
2182
2183 errout_locked:
2184         mutex_unlock(&chain->filter_chain_lock);
2185         goto errout;
2186 }
2187
2188 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2189                           struct netlink_ext_ack *extack)
2190 {
2191         struct net *net = sock_net(skb->sk);
2192         struct nlattr *tca[TCA_MAX + 1];
2193         struct tcmsg *t;
2194         u32 protocol;
2195         u32 prio;
2196         u32 parent;
2197         u32 chain_index;
2198         struct Qdisc *q = NULL;
2199         struct tcf_chain_info chain_info;
2200         struct tcf_chain *chain = NULL;
2201         struct tcf_block *block = NULL;
2202         struct tcf_proto *tp = NULL;
2203         unsigned long cl = 0;
2204         void *fh = NULL;
2205         int err;
2206         bool rtnl_held = false;
2207
2208         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2209                 return -EPERM;
2210
2211         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2212                                      rtm_tca_policy, extack);
2213         if (err < 0)
2214                 return err;
2215
2216         t = nlmsg_data(n);
2217         protocol = TC_H_MIN(t->tcm_info);
2218         prio = TC_H_MAJ(t->tcm_info);
2219         parent = t->tcm_parent;
2220
2221         if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2222                 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2223                 return -ENOENT;
2224         }
2225
2226         /* Find head of filter chain. */
2227
2228         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2229         if (err)
2230                 return err;
2231
2232         /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2233          * found), qdisc is not unlocked, classifier type is not specified,
2234          * classifier is not unlocked.
2235          */
2236         if (!prio ||
2237             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2238             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2239                 rtnl_held = true;
2240                 rtnl_lock();
2241         }
2242
2243         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2244         if (err)
2245                 goto errout;
2246
2247         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2248                                  extack);
2249         if (IS_ERR(block)) {
2250                 err = PTR_ERR(block);
2251                 goto errout;
2252         }
2253
2254         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2255         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2256                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2257                 err = -EINVAL;
2258                 goto errout;
2259         }
2260         chain = tcf_chain_get(block, chain_index, false);
2261         if (!chain) {
2262                 /* User requested flush on non-existent chain. Nothing to do,
2263                  * so just return success.
2264                  */
2265                 if (prio == 0) {
2266                         err = 0;
2267                         goto errout;
2268                 }
2269                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2270                 err = -ENOENT;
2271                 goto errout;
2272         }
2273
2274         if (prio == 0) {
2275                 tfilter_notify_chain(net, skb, block, q, parent, n,
2276                                      chain, RTM_DELTFILTER, rtnl_held);
2277                 tcf_chain_flush(chain, rtnl_held);
2278                 err = 0;
2279                 goto errout;
2280         }
2281
2282         mutex_lock(&chain->filter_chain_lock);
2283         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2284                                prio, false);
2285         if (!tp || IS_ERR(tp)) {
2286                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2287                 err = tp ? PTR_ERR(tp) : -ENOENT;
2288                 goto errout_locked;
2289         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2290                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2291                 err = -EINVAL;
2292                 goto errout_locked;
2293         } else if (t->tcm_handle == 0) {
2294                 tcf_chain_tp_remove(chain, &chain_info, tp);
2295                 mutex_unlock(&chain->filter_chain_lock);
2296
2297                 tcf_proto_put(tp, rtnl_held, NULL);
2298                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2299                                RTM_DELTFILTER, false, rtnl_held);
2300                 err = 0;
2301                 goto errout;
2302         }
2303         mutex_unlock(&chain->filter_chain_lock);
2304
2305         fh = tp->ops->get(tp, t->tcm_handle);
2306
2307         if (!fh) {
2308                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2309                 err = -ENOENT;
2310         } else {
2311                 bool last;
2312
2313                 err = tfilter_del_notify(net, skb, n, tp, block,
2314                                          q, parent, fh, false, &last,
2315                                          rtnl_held, extack);
2316
2317                 if (err)
2318                         goto errout;
2319                 if (last)
2320                         tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2321         }
2322
2323 errout:
2324         if (chain) {
2325                 if (tp && !IS_ERR(tp))
2326                         tcf_proto_put(tp, rtnl_held, NULL);
2327                 tcf_chain_put(chain);
2328         }
2329         tcf_block_release(q, block, rtnl_held);
2330
2331         if (rtnl_held)
2332                 rtnl_unlock();
2333
2334         return err;
2335
2336 errout_locked:
2337         mutex_unlock(&chain->filter_chain_lock);
2338         goto errout;
2339 }
2340
2341 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2342                           struct netlink_ext_ack *extack)
2343 {
2344         struct net *net = sock_net(skb->sk);
2345         struct nlattr *tca[TCA_MAX + 1];
2346         struct tcmsg *t;
2347         u32 protocol;
2348         u32 prio;
2349         u32 parent;
2350         u32 chain_index;
2351         struct Qdisc *q = NULL;
2352         struct tcf_chain_info chain_info;
2353         struct tcf_chain *chain = NULL;
2354         struct tcf_block *block = NULL;
2355         struct tcf_proto *tp = NULL;
2356         unsigned long cl = 0;
2357         void *fh = NULL;
2358         int err;
2359         bool rtnl_held = false;
2360
2361         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2362                                      rtm_tca_policy, extack);
2363         if (err < 0)
2364                 return err;
2365
2366         t = nlmsg_data(n);
2367         protocol = TC_H_MIN(t->tcm_info);
2368         prio = TC_H_MAJ(t->tcm_info);
2369         parent = t->tcm_parent;
2370
2371         if (prio == 0) {
2372                 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2373                 return -ENOENT;
2374         }
2375
2376         /* Find head of filter chain. */
2377
2378         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2379         if (err)
2380                 return err;
2381
2382         /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2383          * unlocked, classifier type is not specified, classifier is not
2384          * unlocked.
2385          */
2386         if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2387             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2388                 rtnl_held = true;
2389                 rtnl_lock();
2390         }
2391
2392         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2393         if (err)
2394                 goto errout;
2395
2396         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2397                                  extack);
2398         if (IS_ERR(block)) {
2399                 err = PTR_ERR(block);
2400                 goto errout;
2401         }
2402
2403         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2404         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2405                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2406                 err = -EINVAL;
2407                 goto errout;
2408         }
2409         chain = tcf_chain_get(block, chain_index, false);
2410         if (!chain) {
2411                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2412                 err = -EINVAL;
2413                 goto errout;
2414         }
2415
2416         mutex_lock(&chain->filter_chain_lock);
2417         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2418                                prio, false);
2419         mutex_unlock(&chain->filter_chain_lock);
2420         if (!tp || IS_ERR(tp)) {
2421                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2422                 err = tp ? PTR_ERR(tp) : -ENOENT;
2423                 goto errout;
2424         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2425                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2426                 err = -EINVAL;
2427                 goto errout;
2428         }
2429
2430         fh = tp->ops->get(tp, t->tcm_handle);
2431
2432         if (!fh) {
2433                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2434                 err = -ENOENT;
2435         } else {
2436                 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2437                                      fh, RTM_NEWTFILTER, true, rtnl_held);
2438                 if (err < 0)
2439                         NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2440         }
2441
2442         tfilter_put(tp, fh);
2443 errout:
2444         if (chain) {
2445                 if (tp && !IS_ERR(tp))
2446                         tcf_proto_put(tp, rtnl_held, NULL);
2447                 tcf_chain_put(chain);
2448         }
2449         tcf_block_release(q, block, rtnl_held);
2450
2451         if (rtnl_held)
2452                 rtnl_unlock();
2453
2454         return err;
2455 }
2456
2457 struct tcf_dump_args {
2458         struct tcf_walker w;
2459         struct sk_buff *skb;
2460         struct netlink_callback *cb;
2461         struct tcf_block *block;
2462         struct Qdisc *q;
2463         u32 parent;
2464 };
2465
2466 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2467 {
2468         struct tcf_dump_args *a = (void *)arg;
2469         struct net *net = sock_net(a->skb->sk);
2470
2471         return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2472                              n, NETLINK_CB(a->cb->skb).portid,
2473                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2474                              RTM_NEWTFILTER, true);
2475 }
2476
2477 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2478                            struct sk_buff *skb, struct netlink_callback *cb,
2479                            long index_start, long *p_index)
2480 {
2481         struct net *net = sock_net(skb->sk);
2482         struct tcf_block *block = chain->block;
2483         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2484         struct tcf_proto *tp, *tp_prev;
2485         struct tcf_dump_args arg;
2486
2487         for (tp = __tcf_get_next_proto(chain, NULL);
2488              tp;
2489              tp_prev = tp,
2490                      tp = __tcf_get_next_proto(chain, tp),
2491                      tcf_proto_put(tp_prev, true, NULL),
2492                      (*p_index)++) {
2493                 if (*p_index < index_start)
2494                         continue;
2495                 if (TC_H_MAJ(tcm->tcm_info) &&
2496                     TC_H_MAJ(tcm->tcm_info) != tp->prio)
2497                         continue;
2498                 if (TC_H_MIN(tcm->tcm_info) &&
2499                     TC_H_MIN(tcm->tcm_info) != tp->protocol)
2500                         continue;
2501                 if (*p_index > index_start)
2502                         memset(&cb->args[1], 0,
2503                                sizeof(cb->args) - sizeof(cb->args[0]));
2504                 if (cb->args[1] == 0) {
2505                         if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2506                                           NETLINK_CB(cb->skb).portid,
2507                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
2508                                           RTM_NEWTFILTER, true) <= 0)
2509                                 goto errout;
2510                         cb->args[1] = 1;
2511                 }
2512                 if (!tp->ops->walk)
2513                         continue;
2514                 arg.w.fn = tcf_node_dump;
2515                 arg.skb = skb;
2516                 arg.cb = cb;
2517                 arg.block = block;
2518                 arg.q = q;
2519                 arg.parent = parent;
2520                 arg.w.stop = 0;
2521                 arg.w.skip = cb->args[1] - 1;
2522                 arg.w.count = 0;
2523                 arg.w.cookie = cb->args[2];
2524                 tp->ops->walk(tp, &arg.w, true);
2525                 cb->args[2] = arg.w.cookie;
2526                 cb->args[1] = arg.w.count + 1;
2527                 if (arg.w.stop)
2528                         goto errout;
2529         }
2530         return true;
2531
2532 errout:
2533         tcf_proto_put(tp, true, NULL);
2534         return false;
2535 }
2536
2537 /* called with RTNL */
2538 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2539 {
2540         struct tcf_chain *chain, *chain_prev;
2541         struct net *net = sock_net(skb->sk);
2542         struct nlattr *tca[TCA_MAX + 1];
2543         struct Qdisc *q = NULL;
2544         struct tcf_block *block;
2545         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2546         long index_start;
2547         long index;
2548         u32 parent;
2549         int err;
2550
2551         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2552                 return skb->len;
2553
2554         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2555                                      NULL, cb->extack);
2556         if (err)
2557                 return err;
2558
2559         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2560                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2561                 if (!block)
2562                         goto out;
2563                 /* If we work with block index, q is NULL and parent value
2564                  * will never be used in the following code. The check
2565                  * in tcf_fill_node prevents it. However, compiler does not
2566                  * see that far, so set parent to zero to silence the warning
2567                  * about parent being uninitialized.
2568                  */
2569                 parent = 0;
2570         } else {
2571                 const struct Qdisc_class_ops *cops;
2572                 struct net_device *dev;
2573                 unsigned long cl = 0;
2574
2575                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2576                 if (!dev)
2577                         return skb->len;
2578
2579                 parent = tcm->tcm_parent;
2580                 if (!parent) {
2581                         q = dev->qdisc;
2582                         parent = q->handle;
2583                 } else {
2584                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2585                 }
2586                 if (!q)
2587                         goto out;
2588                 cops = q->ops->cl_ops;
2589                 if (!cops)
2590                         goto out;
2591                 if (!cops->tcf_block)
2592                         goto out;
2593                 if (TC_H_MIN(tcm->tcm_parent)) {
2594                         cl = cops->find(q, tcm->tcm_parent);
2595                         if (cl == 0)
2596                                 goto out;
2597                 }
2598                 block = cops->tcf_block(q, cl, NULL);
2599                 if (!block)
2600                         goto out;
2601                 if (tcf_block_shared(block))
2602                         q = NULL;
2603         }
2604
2605         index_start = cb->args[0];
2606         index = 0;
2607
2608         for (chain = __tcf_get_next_chain(block, NULL);
2609              chain;
2610              chain_prev = chain,
2611                      chain = __tcf_get_next_chain(block, chain),
2612                      tcf_chain_put(chain_prev)) {
2613                 if (tca[TCA_CHAIN] &&
2614                     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2615                         continue;
2616                 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2617                                     index_start, &index)) {
2618                         tcf_chain_put(chain);
2619                         err = -EMSGSIZE;
2620                         break;
2621                 }
2622         }
2623
2624         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2625                 tcf_block_refcnt_put(block, true);
2626         cb->args[0] = index;
2627
2628 out:
2629         /* If we did no progress, the error (EMSGSIZE) is real */
2630         if (skb->len == 0 && err)
2631                 return err;
2632         return skb->len;
2633 }
2634
2635 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2636                               void *tmplt_priv, u32 chain_index,
2637                               struct net *net, struct sk_buff *skb,
2638                               struct tcf_block *block,
2639                               u32 portid, u32 seq, u16 flags, int event)
2640 {
2641         unsigned char *b = skb_tail_pointer(skb);
2642         const struct tcf_proto_ops *ops;
2643         struct nlmsghdr *nlh;
2644         struct tcmsg *tcm;
2645         void *priv;
2646
2647         ops = tmplt_ops;
2648         priv = tmplt_priv;
2649
2650         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2651         if (!nlh)
2652                 goto out_nlmsg_trim;
2653         tcm = nlmsg_data(nlh);
2654         tcm->tcm_family = AF_UNSPEC;
2655         tcm->tcm__pad1 = 0;
2656         tcm->tcm__pad2 = 0;
2657         tcm->tcm_handle = 0;
2658         if (block->q) {
2659                 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2660                 tcm->tcm_parent = block->q->handle;
2661         } else {
2662                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2663                 tcm->tcm_block_index = block->index;
2664         }
2665
2666         if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2667                 goto nla_put_failure;
2668
2669         if (ops) {
2670                 if (nla_put_string(skb, TCA_KIND, ops->kind))
2671                         goto nla_put_failure;
2672                 if (ops->tmplt_dump(skb, net, priv) < 0)
2673                         goto nla_put_failure;
2674         }
2675
2676         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2677         return skb->len;
2678
2679 out_nlmsg_trim:
2680 nla_put_failure:
2681         nlmsg_trim(skb, b);
2682         return -EMSGSIZE;
2683 }
2684
2685 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2686                            u32 seq, u16 flags, int event, bool unicast)
2687 {
2688         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2689         struct tcf_block *block = chain->block;
2690         struct net *net = block->net;
2691         struct sk_buff *skb;
2692         int err = 0;
2693
2694         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2695         if (!skb)
2696                 return -ENOBUFS;
2697
2698         if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2699                                chain->index, net, skb, block, portid,
2700                                seq, flags, event) <= 0) {
2701                 kfree_skb(skb);
2702                 return -EINVAL;
2703         }
2704
2705         if (unicast)
2706                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2707         else
2708                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2709                                      flags & NLM_F_ECHO);
2710
2711         if (err > 0)
2712                 err = 0;
2713         return err;
2714 }
2715
2716 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2717                                   void *tmplt_priv, u32 chain_index,
2718                                   struct tcf_block *block, struct sk_buff *oskb,
2719                                   u32 seq, u16 flags, bool unicast)
2720 {
2721         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2722         struct net *net = block->net;
2723         struct sk_buff *skb;
2724
2725         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2726         if (!skb)
2727                 return -ENOBUFS;
2728
2729         if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2730                                block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2731                 kfree_skb(skb);
2732                 return -EINVAL;
2733         }
2734
2735         if (unicast)
2736                 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2737
2738         return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2739 }
2740
2741 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2742                               struct nlattr **tca,
2743                               struct netlink_ext_ack *extack)
2744 {
2745         const struct tcf_proto_ops *ops;
2746         void *tmplt_priv;
2747
2748         /* If kind is not set, user did not specify template. */
2749         if (!tca[TCA_KIND])
2750                 return 0;
2751
2752         ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
2753         if (IS_ERR(ops))
2754                 return PTR_ERR(ops);
2755         if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2756                 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2757                 return -EOPNOTSUPP;
2758         }
2759
2760         tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2761         if (IS_ERR(tmplt_priv)) {
2762                 module_put(ops->owner);
2763                 return PTR_ERR(tmplt_priv);
2764         }
2765         chain->tmplt_ops = ops;
2766         chain->tmplt_priv = tmplt_priv;
2767         return 0;
2768 }
2769
2770 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2771                                void *tmplt_priv)
2772 {
2773         /* If template ops are set, no work to do for us. */
2774         if (!tmplt_ops)
2775                 return;
2776
2777         tmplt_ops->tmplt_destroy(tmplt_priv);
2778         module_put(tmplt_ops->owner);
2779 }
2780
2781 /* Add/delete/get a chain */
2782
2783 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2784                         struct netlink_ext_ack *extack)
2785 {
2786         struct net *net = sock_net(skb->sk);
2787         struct nlattr *tca[TCA_MAX + 1];
2788         struct tcmsg *t;
2789         u32 parent;
2790         u32 chain_index;
2791         struct Qdisc *q = NULL;
2792         struct tcf_chain *chain = NULL;
2793         struct tcf_block *block;
2794         unsigned long cl;
2795         int err;
2796
2797         if (n->nlmsg_type != RTM_GETCHAIN &&
2798             !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2799                 return -EPERM;
2800
2801 replay:
2802         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2803                                      rtm_tca_policy, extack);
2804         if (err < 0)
2805                 return err;
2806
2807         t = nlmsg_data(n);
2808         parent = t->tcm_parent;
2809         cl = 0;
2810
2811         block = tcf_block_find(net, &q, &parent, &cl,
2812                                t->tcm_ifindex, t->tcm_block_index, extack);
2813         if (IS_ERR(block))
2814                 return PTR_ERR(block);
2815
2816         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2817         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2818                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2819                 err = -EINVAL;
2820                 goto errout_block;
2821         }
2822
2823         mutex_lock(&block->lock);
2824         chain = tcf_chain_lookup(block, chain_index);
2825         if (n->nlmsg_type == RTM_NEWCHAIN) {
2826                 if (chain) {
2827                         if (tcf_chain_held_by_acts_only(chain)) {
2828                                 /* The chain exists only because there is
2829                                  * some action referencing it.
2830                                  */
2831                                 tcf_chain_hold(chain);
2832                         } else {
2833                                 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2834                                 err = -EEXIST;
2835                                 goto errout_block_locked;
2836                         }
2837                 } else {
2838                         if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2839                                 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2840                                 err = -ENOENT;
2841                                 goto errout_block_locked;
2842                         }
2843                         chain = tcf_chain_create(block, chain_index);
2844                         if (!chain) {
2845                                 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2846                                 err = -ENOMEM;
2847                                 goto errout_block_locked;
2848                         }
2849                 }
2850         } else {
2851                 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2852                         NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2853                         err = -EINVAL;
2854                         goto errout_block_locked;
2855                 }
2856                 tcf_chain_hold(chain);
2857         }
2858
2859         if (n->nlmsg_type == RTM_NEWCHAIN) {
2860                 /* Modifying chain requires holding parent block lock. In case
2861                  * the chain was successfully added, take a reference to the
2862                  * chain. This ensures that an empty chain does not disappear at
2863                  * the end of this function.
2864                  */
2865                 tcf_chain_hold(chain);
2866                 chain->explicitly_created = true;
2867         }
2868         mutex_unlock(&block->lock);
2869
2870         switch (n->nlmsg_type) {
2871         case RTM_NEWCHAIN:
2872                 err = tc_chain_tmplt_add(chain, net, tca, extack);
2873                 if (err) {
2874                         tcf_chain_put_explicitly_created(chain);
2875                         goto errout;
2876                 }
2877
2878                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2879                                 RTM_NEWCHAIN, false);
2880                 break;
2881         case RTM_DELCHAIN:
2882                 tfilter_notify_chain(net, skb, block, q, parent, n,
2883                                      chain, RTM_DELTFILTER, true);
2884                 /* Flush the chain first as the user requested chain removal. */
2885                 tcf_chain_flush(chain, true);
2886                 /* In case the chain was successfully deleted, put a reference
2887                  * to the chain previously taken during addition.
2888                  */
2889                 tcf_chain_put_explicitly_created(chain);
2890                 break;
2891         case RTM_GETCHAIN:
2892                 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2893                                       n->nlmsg_seq, n->nlmsg_type, true);
2894                 if (err < 0)
2895                         NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2896                 break;
2897         default:
2898                 err = -EOPNOTSUPP;
2899                 NL_SET_ERR_MSG(extack, "Unsupported message type");
2900                 goto errout;
2901         }
2902
2903 errout:
2904         tcf_chain_put(chain);
2905 errout_block:
2906         tcf_block_release(q, block, true);
2907         if (err == -EAGAIN)
2908                 /* Replay the request. */
2909                 goto replay;
2910         return err;
2911
2912 errout_block_locked:
2913         mutex_unlock(&block->lock);
2914         goto errout_block;
2915 }
2916
2917 /* called with RTNL */
2918 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2919 {
2920         struct net *net = sock_net(skb->sk);
2921         struct nlattr *tca[TCA_MAX + 1];
2922         struct Qdisc *q = NULL;
2923         struct tcf_block *block;
2924         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2925         struct tcf_chain *chain;
2926         long index_start;
2927         long index;
2928         u32 parent;
2929         int err;
2930
2931         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2932                 return skb->len;
2933
2934         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2935                                      rtm_tca_policy, cb->extack);
2936         if (err)
2937                 return err;
2938
2939         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2940                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2941                 if (!block)
2942                         goto out;
2943                 /* If we work with block index, q is NULL and parent value
2944                  * will never be used in the following code. The check
2945                  * in tcf_fill_node prevents it. However, compiler does not
2946                  * see that far, so set parent to zero to silence the warning
2947                  * about parent being uninitialized.
2948                  */
2949                 parent = 0;
2950         } else {
2951                 const struct Qdisc_class_ops *cops;
2952                 struct net_device *dev;
2953                 unsigned long cl = 0;
2954
2955                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2956                 if (!dev)
2957                         return skb->len;
2958
2959                 parent = tcm->tcm_parent;
2960                 if (!parent) {
2961                         q = dev->qdisc;
2962                         parent = q->handle;
2963                 } else {
2964                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2965                 }
2966                 if (!q)
2967                         goto out;
2968                 cops = q->ops->cl_ops;
2969                 if (!cops)
2970                         goto out;
2971                 if (!cops->tcf_block)
2972                         goto out;
2973                 if (TC_H_MIN(tcm->tcm_parent)) {
2974                         cl = cops->find(q, tcm->tcm_parent);
2975                         if (cl == 0)
2976                                 goto out;
2977                 }
2978                 block = cops->tcf_block(q, cl, NULL);
2979                 if (!block)
2980                         goto out;
2981                 if (tcf_block_shared(block))
2982                         q = NULL;
2983         }
2984
2985         index_start = cb->args[0];
2986         index = 0;
2987
2988         mutex_lock(&block->lock);
2989         list_for_each_entry(chain, &block->chain_list, list) {
2990                 if ((tca[TCA_CHAIN] &&
2991                      nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2992                         continue;
2993                 if (index < index_start) {
2994                         index++;
2995                         continue;
2996                 }
2997                 if (tcf_chain_held_by_acts_only(chain))
2998                         continue;
2999                 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3000                                          chain->index, net, skb, block,
3001                                          NETLINK_CB(cb->skb).portid,
3002                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
3003                                          RTM_NEWCHAIN);
3004                 if (err <= 0)
3005                         break;
3006                 index++;
3007         }
3008         mutex_unlock(&block->lock);
3009
3010         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3011                 tcf_block_refcnt_put(block, true);
3012         cb->args[0] = index;
3013
3014 out:
3015         /* If we did no progress, the error (EMSGSIZE) is real */
3016         if (skb->len == 0 && err)
3017                 return err;
3018         return skb->len;
3019 }
3020
3021 void tcf_exts_destroy(struct tcf_exts *exts)
3022 {
3023 #ifdef CONFIG_NET_CLS_ACT
3024         tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3025         kfree(exts->actions);
3026         exts->nr_actions = 0;
3027 #endif
3028 }
3029 EXPORT_SYMBOL(tcf_exts_destroy);
3030
3031 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3032                       struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3033                       bool rtnl_held, struct netlink_ext_ack *extack)
3034 {
3035 #ifdef CONFIG_NET_CLS_ACT
3036         {
3037                 struct tc_action *act;
3038                 size_t attr_size = 0;
3039
3040                 if (exts->police && tb[exts->police]) {
3041                         act = tcf_action_init_1(net, tp, tb[exts->police],
3042                                                 rate_tlv, "police", ovr,
3043                                                 TCA_ACT_BIND, rtnl_held,
3044                                                 extack);
3045                         if (IS_ERR(act))
3046                                 return PTR_ERR(act);
3047
3048                         act->type = exts->type = TCA_OLD_COMPAT;
3049                         exts->actions[0] = act;
3050                         exts->nr_actions = 1;
3051                 } else if (exts->action && tb[exts->action]) {
3052                         int err;
3053
3054                         err = tcf_action_init(net, tp, tb[exts->action],
3055                                               rate_tlv, NULL, ovr, TCA_ACT_BIND,
3056                                               exts->actions, &attr_size,
3057                                               rtnl_held, extack);
3058                         if (err < 0)
3059                                 return err;
3060                         exts->nr_actions = err;
3061                 }
3062         }
3063 #else
3064         if ((exts->action && tb[exts->action]) ||
3065             (exts->police && tb[exts->police])) {
3066                 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3067                 return -EOPNOTSUPP;
3068         }
3069 #endif
3070
3071         return 0;
3072 }
3073 EXPORT_SYMBOL(tcf_exts_validate);
3074
3075 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3076 {
3077 #ifdef CONFIG_NET_CLS_ACT
3078         struct tcf_exts old = *dst;
3079
3080         *dst = *src;
3081         tcf_exts_destroy(&old);
3082 #endif
3083 }
3084 EXPORT_SYMBOL(tcf_exts_change);
3085
3086 #ifdef CONFIG_NET_CLS_ACT
3087 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3088 {
3089         if (exts->nr_actions == 0)
3090                 return NULL;
3091         else
3092                 return exts->actions[0];
3093 }
3094 #endif
3095
3096 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3097 {
3098 #ifdef CONFIG_NET_CLS_ACT
3099         struct nlattr *nest;
3100
3101         if (exts->action && tcf_exts_has_actions(exts)) {
3102                 /*
3103                  * again for backward compatible mode - we want
3104                  * to work with both old and new modes of entering
3105                  * tc data even if iproute2  was newer - jhs
3106                  */
3107                 if (exts->type != TCA_OLD_COMPAT) {
3108                         nest = nla_nest_start_noflag(skb, exts->action);
3109                         if (nest == NULL)
3110                                 goto nla_put_failure;
3111
3112                         if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
3113                                 goto nla_put_failure;
3114                         nla_nest_end(skb, nest);
3115                 } else if (exts->police) {
3116                         struct tc_action *act = tcf_exts_first_act(exts);
3117                         nest = nla_nest_start_noflag(skb, exts->police);
3118                         if (nest == NULL || !act)
3119                                 goto nla_put_failure;
3120                         if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3121                                 goto nla_put_failure;
3122                         nla_nest_end(skb, nest);
3123                 }
3124         }
3125         return 0;
3126
3127 nla_put_failure:
3128         nla_nest_cancel(skb, nest);
3129         return -1;
3130 #else
3131         return 0;
3132 #endif
3133 }
3134 EXPORT_SYMBOL(tcf_exts_dump);
3135
3136
3137 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3138 {
3139 #ifdef CONFIG_NET_CLS_ACT
3140         struct tc_action *a = tcf_exts_first_act(exts);
3141         if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3142                 return -1;
3143 #endif
3144         return 0;
3145 }
3146 EXPORT_SYMBOL(tcf_exts_dump_stats);
3147
3148 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3149                      void *type_data, bool err_stop)
3150 {
3151         struct flow_block_cb *block_cb;
3152         int ok_count = 0;
3153         int err;
3154
3155         /* Make sure all netdevs sharing this block are offload-capable. */
3156         if (block->nooffloaddevcnt && err_stop)
3157                 return -EOPNOTSUPP;
3158
3159         list_for_each_entry(block_cb, &block->cb_list, list) {
3160                 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3161                 if (err) {
3162                         if (err_stop)
3163                                 return err;
3164                 } else {
3165                         ok_count++;
3166                 }
3167         }
3168         return ok_count;
3169 }
3170 EXPORT_SYMBOL(tc_setup_cb_call);
3171
3172 int tc_setup_flow_action(struct flow_action *flow_action,
3173                          const struct tcf_exts *exts)
3174 {
3175         const struct tc_action *act;
3176         int i, j, k;
3177
3178         if (!exts)
3179                 return 0;
3180
3181         j = 0;
3182         tcf_exts_for_each_action(i, act, exts) {
3183                 struct flow_action_entry *entry;
3184
3185                 entry = &flow_action->entries[j];
3186                 if (is_tcf_gact_ok(act)) {
3187                         entry->id = FLOW_ACTION_ACCEPT;
3188                 } else if (is_tcf_gact_shot(act)) {
3189                         entry->id = FLOW_ACTION_DROP;
3190                 } else if (is_tcf_gact_trap(act)) {
3191                         entry->id = FLOW_ACTION_TRAP;
3192                 } else if (is_tcf_gact_goto_chain(act)) {
3193                         entry->id = FLOW_ACTION_GOTO;
3194                         entry->chain_index = tcf_gact_goto_chain_index(act);
3195                 } else if (is_tcf_mirred_egress_redirect(act)) {
3196                         entry->id = FLOW_ACTION_REDIRECT;
3197                         entry->dev = tcf_mirred_dev(act);
3198                 } else if (is_tcf_mirred_egress_mirror(act)) {
3199                         entry->id = FLOW_ACTION_MIRRED;
3200                         entry->dev = tcf_mirred_dev(act);
3201                 } else if (is_tcf_vlan(act)) {
3202                         switch (tcf_vlan_action(act)) {
3203                         case TCA_VLAN_ACT_PUSH:
3204                                 entry->id = FLOW_ACTION_VLAN_PUSH;
3205                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3206                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3207                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3208                                 break;
3209                         case TCA_VLAN_ACT_POP:
3210                                 entry->id = FLOW_ACTION_VLAN_POP;
3211                                 break;
3212                         case TCA_VLAN_ACT_MODIFY:
3213                                 entry->id = FLOW_ACTION_VLAN_MANGLE;
3214                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3215                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3216                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3217                                 break;
3218                         default:
3219                                 goto err_out;
3220                         }
3221                 } else if (is_tcf_tunnel_set(act)) {
3222                         entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3223                         entry->tunnel = tcf_tunnel_info(act);
3224                 } else if (is_tcf_tunnel_release(act)) {
3225                         entry->id = FLOW_ACTION_TUNNEL_DECAP;
3226                 } else if (is_tcf_pedit(act)) {
3227                         for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3228                                 switch (tcf_pedit_cmd(act, k)) {
3229                                 case TCA_PEDIT_KEY_EX_CMD_SET:
3230                                         entry->id = FLOW_ACTION_MANGLE;
3231                                         break;
3232                                 case TCA_PEDIT_KEY_EX_CMD_ADD:
3233                                         entry->id = FLOW_ACTION_ADD;
3234                                         break;
3235                                 default:
3236                                         goto err_out;
3237                                 }
3238                                 entry->mangle.htype = tcf_pedit_htype(act, k);
3239                                 entry->mangle.mask = tcf_pedit_mask(act, k);
3240                                 entry->mangle.val = tcf_pedit_val(act, k);
3241                                 entry->mangle.offset = tcf_pedit_offset(act, k);
3242                                 entry = &flow_action->entries[++j];
3243                         }
3244                 } else if (is_tcf_csum(act)) {
3245                         entry->id = FLOW_ACTION_CSUM;
3246                         entry->csum_flags = tcf_csum_update_flags(act);
3247                 } else if (is_tcf_skbedit_mark(act)) {
3248                         entry->id = FLOW_ACTION_MARK;
3249                         entry->mark = tcf_skbedit_mark(act);
3250                 } else if (is_tcf_sample(act)) {
3251                         entry->id = FLOW_ACTION_SAMPLE;
3252                         entry->sample.psample_group =
3253                                 tcf_sample_psample_group(act);
3254                         entry->sample.trunc_size = tcf_sample_trunc_size(act);
3255                         entry->sample.truncate = tcf_sample_truncate(act);
3256                         entry->sample.rate = tcf_sample_rate(act);
3257                 } else if (is_tcf_police(act)) {
3258                         entry->id = FLOW_ACTION_POLICE;
3259                         entry->police.burst = tcf_police_tcfp_burst(act);
3260                         entry->police.rate_bytes_ps =
3261                                 tcf_police_rate_bytes_ps(act);
3262                 } else if (is_tcf_ct(act)) {
3263                         entry->id = FLOW_ACTION_CT;
3264                         entry->ct.action = tcf_ct_action(act);
3265                         entry->ct.zone = tcf_ct_zone(act);
3266                 } else {
3267                         goto err_out;
3268                 }
3269
3270                 if (!is_tcf_pedit(act))
3271                         j++;
3272         }
3273         return 0;
3274 err_out:
3275         return -EOPNOTSUPP;
3276 }
3277 EXPORT_SYMBOL(tc_setup_flow_action);
3278
3279 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3280 {
3281         unsigned int num_acts = 0;
3282         struct tc_action *act;
3283         int i;
3284
3285         tcf_exts_for_each_action(i, act, exts) {
3286                 if (is_tcf_pedit(act))
3287                         num_acts += tcf_pedit_nkeys(act);
3288                 else
3289                         num_acts++;
3290         }
3291         return num_acts;
3292 }
3293 EXPORT_SYMBOL(tcf_exts_num_actions);
3294
3295 static __net_init int tcf_net_init(struct net *net)
3296 {
3297         struct tcf_net *tn = net_generic(net, tcf_net_id);
3298
3299         spin_lock_init(&tn->idr_lock);
3300         idr_init(&tn->idr);
3301         return 0;
3302 }
3303
3304 static void __net_exit tcf_net_exit(struct net *net)
3305 {
3306         struct tcf_net *tn = net_generic(net, tcf_net_id);
3307
3308         idr_destroy(&tn->idr);
3309 }
3310
3311 static struct pernet_operations tcf_net_ops = {
3312         .init = tcf_net_init,
3313         .exit = tcf_net_exit,
3314         .id   = &tcf_net_id,
3315         .size = sizeof(struct tcf_net),
3316 };
3317
3318 static int __init tc_filter_init(void)
3319 {
3320         int err;
3321
3322         tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3323         if (!tc_filter_wq)
3324                 return -ENOMEM;
3325
3326         err = register_pernet_subsys(&tcf_net_ops);
3327         if (err)
3328                 goto err_register_pernet_subsys;
3329
3330         err = rhashtable_init(&indr_setup_block_ht,
3331                               &tc_indr_setup_block_ht_params);
3332         if (err)
3333                 goto err_rhash_setup_block_ht;
3334
3335         rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3336                       RTNL_FLAG_DOIT_UNLOCKED);
3337         rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3338                       RTNL_FLAG_DOIT_UNLOCKED);
3339         rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3340                       tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3341         rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3342         rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3343         rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3344                       tc_dump_chain, 0);
3345
3346         return 0;
3347
3348 err_rhash_setup_block_ht:
3349         unregister_pernet_subsys(&tcf_net_ops);
3350 err_register_pernet_subsys:
3351         destroy_workqueue(tc_filter_wq);
3352         return err;
3353 }
3354
3355 subsys_initcall(tc_filter_init);