2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 #include <net/sch_generic.h>
19 * Passing parameters to the root seems to be done more awkwardly than really
20 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
24 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
25 #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
28 struct tcindex_filter_result {
30 struct tcf_result res;
32 struct work_struct work;
37 struct tcindex_filter {
39 struct tcindex_filter_result result;
40 struct tcindex_filter __rcu *next;
42 struct work_struct work;
49 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
50 struct tcindex_filter __rcu **h; /* imperfect hash; */
52 u16 mask; /* AND key with mask */
53 u32 shift; /* shift ANDed key to the right */
54 u32 hash; /* hash table size; 0 if undefined */
55 u32 alloc_hash; /* allocated size */
56 u32 fall_through; /* 0: only classify if explicit match */
60 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
62 return tcf_exts_has_actions(&r->exts) || r->res.classid;
65 static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
69 struct tcindex_filter_result *f = p->perfect + key;
71 return tcindex_filter_is_set(f) ? f : NULL;
73 struct tcindex_filter __rcu **fp;
74 struct tcindex_filter *f;
76 fp = &p->h[key % p->hash];
77 for (f = rcu_dereference_bh_rtnl(*fp);
79 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
88 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
89 struct tcf_result *res)
91 struct tcindex_data *p = rcu_dereference_bh(tp->root);
92 struct tcindex_filter_result *f;
93 int key = (skb->tc_index & p->mask) >> p->shift;
95 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
98 f = tcindex_lookup(p, key);
100 struct Qdisc *q = tcf_block_q(tp->chain->block);
102 if (!p->fall_through)
104 res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
106 pr_debug("alg 0x%x\n", res->classid);
110 pr_debug("map 0x%x\n", res->classid);
112 return tcf_exts_exec(skb, &f->exts, res);
116 static void *tcindex_get(struct tcf_proto *tp, u32 handle)
118 struct tcindex_data *p = rtnl_dereference(tp->root);
119 struct tcindex_filter_result *r;
121 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
122 if (p->perfect && handle >= p->alloc_hash)
124 r = tcindex_lookup(p, handle);
125 return r && tcindex_filter_is_set(r) ? r : NULL;
128 static int tcindex_init(struct tcf_proto *tp)
130 struct tcindex_data *p;
132 pr_debug("tcindex_init(tp %p)\n", tp);
133 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
138 p->hash = DEFAULT_HASH_SIZE;
141 rcu_assign_pointer(tp->root, p);
145 static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
147 tcf_exts_destroy(&r->exts);
148 tcf_exts_put_net(&r->exts);
151 static void tcindex_destroy_rexts_work(struct work_struct *work)
153 struct tcindex_filter_result *r;
155 r = container_of(work, struct tcindex_filter_result, work);
157 __tcindex_destroy_rexts(r);
161 static void tcindex_destroy_rexts(struct rcu_head *head)
163 struct tcindex_filter_result *r;
165 r = container_of(head, struct tcindex_filter_result, rcu);
166 INIT_WORK(&r->work, tcindex_destroy_rexts_work);
167 tcf_queue_work(&r->work);
170 static void __tcindex_destroy_fexts(struct tcindex_filter *f)
172 tcf_exts_destroy(&f->result.exts);
173 tcf_exts_put_net(&f->result.exts);
177 static void tcindex_destroy_fexts_work(struct work_struct *work)
179 struct tcindex_filter *f = container_of(work, struct tcindex_filter,
183 __tcindex_destroy_fexts(f);
187 static void tcindex_destroy_fexts(struct rcu_head *head)
189 struct tcindex_filter *f = container_of(head, struct tcindex_filter,
192 INIT_WORK(&f->work, tcindex_destroy_fexts_work);
193 tcf_queue_work(&f->work);
196 static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
197 struct netlink_ext_ack *extack)
199 struct tcindex_data *p = rtnl_dereference(tp->root);
200 struct tcindex_filter_result *r = arg;
201 struct tcindex_filter __rcu **walk;
202 struct tcindex_filter *f = NULL;
204 pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
211 for (i = 0; i < p->hash; i++) {
213 for (f = rtnl_dereference(*walk); f;
214 walk = &f->next, f = rtnl_dereference(*walk)) {
222 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
224 tcf_unbind_filter(tp, &r->res);
225 /* all classifiers are required to call tcf_exts_destroy() after rcu
226 * grace period, since converted-to-rcu actions are relying on that
227 * in cleanup() callback
230 if (tcf_exts_get_net(&f->result.exts))
231 call_rcu(&f->rcu, tcindex_destroy_fexts);
233 __tcindex_destroy_fexts(f);
235 if (tcf_exts_get_net(&r->exts))
236 call_rcu(&r->rcu, tcindex_destroy_rexts);
238 __tcindex_destroy_rexts(r);
245 static int tcindex_destroy_element(struct tcf_proto *tp,
246 void *arg, struct tcf_walker *walker)
250 return tcindex_delete(tp, arg, &last, NULL);
253 static void __tcindex_destroy(struct rcu_head *head)
255 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
263 valid_perfect_hash(struct tcindex_data *p)
265 return p->hash > (p->mask >> p->shift);
268 static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
269 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
270 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
271 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
272 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
273 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
276 static int tcindex_filter_result_init(struct tcindex_filter_result *r)
278 memset(r, 0, sizeof(*r));
279 return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
282 static void __tcindex_partial_destroy(struct rcu_head *head)
284 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
290 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
294 for (i = 0; i < cp->hash; i++)
295 tcf_exts_destroy(&cp->perfect[i].exts);
299 static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
303 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
308 for (i = 0; i < cp->hash; i++) {
309 err = tcf_exts_init(&cp->perfect[i].exts,
310 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
318 tcindex_free_perfect_hash(cp);
323 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
324 u32 handle, struct tcindex_data *p,
325 struct tcindex_filter_result *r, struct nlattr **tb,
326 struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
328 struct tcindex_filter_result new_filter_result, *old_r = r;
329 struct tcindex_filter_result cr;
330 struct tcindex_data *cp = NULL, *oldp;
331 struct tcindex_filter *f = NULL; /* make gcc behave */
335 err = tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
338 err = tcf_exts_validate(net, tp, tb, est, &e, ovr, extack);
343 /* tcindex_data attributes must look atomic to classifier/lookup so
344 * allocate new tcindex data and RCU assign it onto root. Keeping
345 * perfect hash and hash pointers from old data.
347 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
352 cp->shift = p->shift;
354 cp->alloc_hash = p->alloc_hash;
355 cp->fall_through = p->fall_through;
361 if (tcindex_alloc_perfect_hash(cp) < 0)
363 for (i = 0; i < cp->hash; i++)
364 cp->perfect[i].res = p->perfect[i].res;
369 err = tcindex_filter_result_init(&new_filter_result);
372 err = tcindex_filter_result_init(&cr);
378 if (tb[TCA_TCINDEX_HASH])
379 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
381 if (tb[TCA_TCINDEX_MASK])
382 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
384 if (tb[TCA_TCINDEX_SHIFT])
385 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
389 /* Hash already allocated, make sure that we still meet the
390 * requirements for the allocated hash.
393 if (!valid_perfect_hash(cp) ||
394 cp->hash > cp->alloc_hash)
396 } else if (cp->h && cp->hash != cp->alloc_hash) {
401 if (tb[TCA_TCINDEX_FALL_THROUGH])
402 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
405 /* Hash not specified, use perfect hash if the upper limit
406 * of the hashing index is below the threshold.
408 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
409 cp->hash = (cp->mask >> cp->shift) + 1;
411 cp->hash = DEFAULT_HASH_SIZE;
414 if (!cp->perfect && !cp->h)
415 cp->alloc_hash = cp->hash;
417 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
418 * but then, we'd fail handles that may become valid after some future
419 * mask change. While this is extremely unlikely to ever matter,
420 * the check below is safer (and also more backwards-compatible).
422 if (cp->perfect || valid_perfect_hash(cp))
423 if (handle >= cp->alloc_hash)
428 if (!cp->perfect && !cp->h) {
429 if (valid_perfect_hash(cp)) {
430 if (tcindex_alloc_perfect_hash(cp) < 0)
434 struct tcindex_filter __rcu **hash;
436 hash = kcalloc(cp->hash,
437 sizeof(struct tcindex_filter *),
449 r = cp->perfect + handle;
451 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
453 if (r == &new_filter_result) {
454 f = kzalloc(sizeof(*f), GFP_KERNEL);
459 err = tcindex_filter_result_init(&f->result);
466 if (tb[TCA_TCINDEX_CLASSID]) {
467 cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
468 tcf_bind_filter(tp, &cr.res, base);
472 tcf_exts_change(&r->exts, &e);
474 tcf_exts_change(&cr.exts, &e);
476 if (old_r && old_r != r) {
477 err = tcindex_filter_result_init(old_r);
486 rcu_assign_pointer(tp->root, cp);
488 if (r == &new_filter_result) {
489 struct tcindex_filter *nfp;
490 struct tcindex_filter __rcu **fp;
492 tcf_exts_change(&f->result.exts, &r->exts);
494 fp = cp->h + (handle % cp->hash);
495 for (nfp = rtnl_dereference(*fp);
497 fp = &nfp->next, nfp = rtnl_dereference(*fp))
500 rcu_assign_pointer(*fp, f);
504 call_rcu(&oldp->rcu, __tcindex_partial_destroy);
509 tcindex_free_perfect_hash(cp);
510 else if (balloc == 2)
513 tcf_exts_destroy(&cr.exts);
514 tcf_exts_destroy(&new_filter_result.exts);
517 tcf_exts_destroy(&e);
522 tcindex_change(struct net *net, struct sk_buff *in_skb,
523 struct tcf_proto *tp, unsigned long base, u32 handle,
524 struct nlattr **tca, void **arg, bool ovr,
525 struct netlink_ext_ack *extack)
527 struct nlattr *opt = tca[TCA_OPTIONS];
528 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
529 struct tcindex_data *p = rtnl_dereference(tp->root);
530 struct tcindex_filter_result *r = *arg;
533 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
534 "p %p,r %p,*arg %p\n",
535 tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
540 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy, NULL);
544 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
545 tca[TCA_RATE], ovr, extack);
548 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
550 struct tcindex_data *p = rtnl_dereference(tp->root);
551 struct tcindex_filter *f, *next;
554 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
556 for (i = 0; i < p->hash; i++) {
557 if (!p->perfect[i].res.class)
559 if (walker->count >= walker->skip) {
560 if (walker->fn(tp, p->perfect + i, walker) < 0) {
570 for (i = 0; i < p->hash; i++) {
571 for (f = rtnl_dereference(p->h[i]); f; f = next) {
572 next = rtnl_dereference(f->next);
573 if (walker->count >= walker->skip) {
574 if (walker->fn(tp, &f->result, walker) < 0) {
584 static void tcindex_destroy(struct tcf_proto *tp,
585 struct netlink_ext_ack *extack)
587 struct tcindex_data *p = rtnl_dereference(tp->root);
588 struct tcf_walker walker;
590 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
593 walker.fn = tcindex_destroy_element;
594 tcindex_walk(tp, &walker);
596 call_rcu(&p->rcu, __tcindex_destroy);
600 static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
601 struct sk_buff *skb, struct tcmsg *t)
603 struct tcindex_data *p = rtnl_dereference(tp->root);
604 struct tcindex_filter_result *r = fh;
607 pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
608 tp, fh, skb, t, p, r);
609 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
611 nest = nla_nest_start(skb, TCA_OPTIONS);
613 goto nla_put_failure;
616 t->tcm_handle = ~0; /* whatever ... */
617 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
618 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
619 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
620 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
621 goto nla_put_failure;
622 nla_nest_end(skb, nest);
625 t->tcm_handle = r - p->perfect;
627 struct tcindex_filter *f;
628 struct tcindex_filter __rcu **fp;
632 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
634 for (f = rtnl_dereference(*fp);
636 fp = &f->next, f = rtnl_dereference(*fp)) {
638 t->tcm_handle = f->key;
642 pr_debug("handle = %d\n", t->tcm_handle);
644 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
645 goto nla_put_failure;
647 if (tcf_exts_dump(skb, &r->exts) < 0)
648 goto nla_put_failure;
649 nla_nest_end(skb, nest);
651 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
652 goto nla_put_failure;
658 nla_nest_cancel(skb, nest);
662 static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl)
664 struct tcindex_filter_result *r = fh;
666 if (r && r->res.classid == classid)
670 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
672 .classify = tcindex_classify,
673 .init = tcindex_init,
674 .destroy = tcindex_destroy,
676 .change = tcindex_change,
677 .delete = tcindex_delete,
678 .walk = tcindex_walk,
679 .dump = tcindex_dump,
680 .bind_class = tcindex_bind_class,
681 .owner = THIS_MODULE,
684 static int __init init_tcindex(void)
686 return register_tcf_proto_ops(&cls_tcindex_ops);
689 static void __exit exit_tcindex(void)
691 unregister_tcf_proto_ops(&cls_tcindex_ops);
694 module_init(init_tcindex)
695 module_exit(exit_tcindex)
696 MODULE_LICENSE("GPL");