2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
29 #define CLS_BPF_NAME_LEN 256
30 #define CLS_BPF_SUPPORTED_GEN_FLAGS \
31 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
34 struct list_head plist;
40 struct bpf_prog *filter;
41 struct list_head link;
42 struct tcf_result res;
49 struct sock_filter *bpf_ops;
55 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
56 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
57 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
58 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
59 [TCA_BPF_FD] = { .type = NLA_U32 },
60 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
61 .len = CLS_BPF_NAME_LEN },
62 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
63 [TCA_BPF_OPS] = { .type = NLA_BINARY,
64 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
67 static int cls_bpf_exec_opcode(int code)
81 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
82 struct tcf_result *res)
84 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
85 bool at_ingress = skb_at_tc_ingress(skb);
86 struct cls_bpf_prog *prog;
89 /* Needed here for accessing maps. */
91 list_for_each_entry_rcu(prog, &head->plist, link) {
94 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
96 if (tc_skip_sw(prog->gen_flags)) {
97 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
98 } else if (at_ingress) {
99 /* It is safe to push/pull even if skb_shared() */
100 __skb_push(skb, skb->mac_len);
101 bpf_compute_data_end(skb);
102 filter_res = BPF_PROG_RUN(prog->filter, skb);
103 __skb_pull(skb, skb->mac_len);
105 bpf_compute_data_end(skb);
106 filter_res = BPF_PROG_RUN(prog->filter, skb);
109 if (prog->exts_integrated) {
111 res->classid = TC_H_MAJ(prog->res.classid) |
112 qdisc_skb_cb(skb)->tc_classid;
114 ret = cls_bpf_exec_opcode(filter_res);
115 if (ret == TC_ACT_UNSPEC)
122 if (filter_res != -1) {
124 res->classid = filter_res;
129 ret = tcf_exts_exec(skb, &prog->exts, res);
140 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
142 return !prog->bpf_ops;
145 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
146 enum tc_clsbpf_command cmd)
148 struct net_device *dev = tp->q->dev_queue->dev;
149 struct tc_cls_bpf_offload bpf_offload = {};
150 struct tc_to_netdev offload;
152 offload.type = TC_SETUP_CLSBPF;
153 offload.cls_bpf = &bpf_offload;
155 bpf_offload.command = cmd;
156 bpf_offload.exts = &prog->exts;
157 bpf_offload.prog = prog->filter;
158 bpf_offload.name = prog->bpf_name;
159 bpf_offload.exts_integrated = prog->exts_integrated;
160 bpf_offload.gen_flags = prog->gen_flags;
162 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
163 tp->protocol, &offload);
166 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
167 struct cls_bpf_prog *oldprog)
169 struct net_device *dev = tp->q->dev_queue->dev;
170 struct cls_bpf_prog *obj = prog;
171 enum tc_clsbpf_command cmd;
175 skip_sw = tc_skip_sw(prog->gen_flags) ||
176 (oldprog && tc_skip_sw(oldprog->gen_flags));
178 if (oldprog && oldprog->offloaded) {
179 if (tc_should_offload(dev, tp, prog->gen_flags)) {
180 cmd = TC_CLSBPF_REPLACE;
181 } else if (!tc_skip_sw(prog->gen_flags)) {
183 cmd = TC_CLSBPF_DESTROY;
188 if (!tc_should_offload(dev, tp, prog->gen_flags))
189 return skip_sw ? -EINVAL : 0;
193 ret = cls_bpf_offload_cmd(tp, obj, cmd);
195 return skip_sw ? ret : 0;
197 obj->offloaded = true;
199 oldprog->offloaded = false;
204 static void cls_bpf_stop_offload(struct tcf_proto *tp,
205 struct cls_bpf_prog *prog)
209 if (!prog->offloaded)
212 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
214 pr_err("Stopping hardware offload failed: %d\n", err);
218 prog->offloaded = false;
221 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
222 struct cls_bpf_prog *prog)
224 if (!prog->offloaded)
227 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
230 static int cls_bpf_init(struct tcf_proto *tp)
232 struct cls_bpf_head *head;
234 head = kzalloc(sizeof(*head), GFP_KERNEL);
238 INIT_LIST_HEAD_RCU(&head->plist);
239 rcu_assign_pointer(tp->root, head);
244 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
246 tcf_exts_destroy(&prog->exts);
248 if (cls_bpf_is_ebpf(prog))
249 bpf_prog_put(prog->filter);
251 bpf_prog_destroy(prog->filter);
253 kfree(prog->bpf_name);
254 kfree(prog->bpf_ops);
258 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
260 __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
263 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
265 cls_bpf_stop_offload(tp, prog);
266 list_del_rcu(&prog->link);
267 tcf_unbind_filter(tp, &prog->res);
268 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
271 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
273 __cls_bpf_delete(tp, (struct cls_bpf_prog *) arg);
277 static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
279 struct cls_bpf_head *head = rtnl_dereference(tp->root);
280 struct cls_bpf_prog *prog, *tmp;
282 if (!force && !list_empty(&head->plist))
285 list_for_each_entry_safe(prog, tmp, &head->plist, link)
286 __cls_bpf_delete(tp, prog);
288 kfree_rcu(head, rcu);
292 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
294 struct cls_bpf_head *head = rtnl_dereference(tp->root);
295 struct cls_bpf_prog *prog;
296 unsigned long ret = 0UL;
298 list_for_each_entry(prog, &head->plist, link) {
299 if (prog->handle == handle) {
300 ret = (unsigned long) prog;
308 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
310 struct sock_filter *bpf_ops;
311 struct sock_fprog_kern fprog_tmp;
313 u16 bpf_size, bpf_num_ops;
316 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
317 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
320 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
321 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
324 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
328 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
330 fprog_tmp.len = bpf_num_ops;
331 fprog_tmp.filter = bpf_ops;
333 ret = bpf_prog_create(&fp, &fprog_tmp);
339 prog->bpf_ops = bpf_ops;
340 prog->bpf_num_ops = bpf_num_ops;
341 prog->bpf_name = NULL;
347 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
348 const struct tcf_proto *tp)
354 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
356 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
360 if (tb[TCA_BPF_NAME]) {
361 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
368 prog->bpf_ops = NULL;
369 prog->bpf_name = name;
372 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
373 netif_keep_dst(qdisc_dev(tp->q));
378 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
379 struct cls_bpf_prog *prog,
380 unsigned long base, struct nlattr **tb,
381 struct nlattr *est, bool ovr)
383 bool is_bpf, is_ebpf, have_exts = false;
384 struct tcf_exts exts;
388 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
389 is_ebpf = tb[TCA_BPF_FD];
390 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
393 ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
396 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
400 if (tb[TCA_BPF_FLAGS]) {
401 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
403 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
408 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
410 if (tb[TCA_BPF_FLAGS_GEN]) {
411 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
412 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
413 !tc_flags_valid(gen_flags)) {
419 prog->exts_integrated = have_exts;
420 prog->gen_flags = gen_flags;
422 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
423 cls_bpf_prog_from_efd(tb, prog, tp);
427 if (tb[TCA_BPF_CLASSID]) {
428 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
429 tcf_bind_filter(tp, &prog->res, base);
432 tcf_exts_change(tp, &prog->exts, &exts);
436 tcf_exts_destroy(&exts);
440 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
441 struct cls_bpf_head *head)
443 unsigned int i = 0x80000000;
447 if (++head->hgen == 0x7FFFFFFF)
449 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
451 if (unlikely(i == 0)) {
452 pr_err("Insufficient number of handles\n");
461 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
462 struct tcf_proto *tp, unsigned long base,
463 u32 handle, struct nlattr **tca,
464 unsigned long *arg, bool ovr)
466 struct cls_bpf_head *head = rtnl_dereference(tp->root);
467 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
468 struct nlattr *tb[TCA_BPF_MAX + 1];
469 struct cls_bpf_prog *prog;
472 if (tca[TCA_OPTIONS] == NULL)
475 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
479 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
483 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
488 if (handle && oldprog->handle != handle) {
495 prog->handle = cls_bpf_grab_new_handle(tp, head);
497 prog->handle = handle;
498 if (prog->handle == 0) {
503 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
508 ret = cls_bpf_offload(tp, prog, oldprog);
510 __cls_bpf_delete_prog(prog);
515 list_replace_rcu(&oldprog->link, &prog->link);
516 tcf_unbind_filter(tp, &oldprog->res);
517 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
519 list_add_rcu(&prog->link, &head->plist);
522 *arg = (unsigned long) prog;
526 tcf_exts_destroy(&prog->exts);
531 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
536 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
539 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
540 sizeof(struct sock_filter));
544 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
549 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
554 if (prog->bpf_name &&
555 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
558 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
562 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
567 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
568 struct sk_buff *skb, struct tcmsg *tm)
570 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
578 tm->tcm_handle = prog->handle;
580 cls_bpf_offload_update_stats(tp, prog);
582 nest = nla_nest_start(skb, TCA_OPTIONS);
584 goto nla_put_failure;
586 if (prog->res.classid &&
587 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
588 goto nla_put_failure;
590 if (cls_bpf_is_ebpf(prog))
591 ret = cls_bpf_dump_ebpf_info(prog, skb);
593 ret = cls_bpf_dump_bpf_info(prog, skb);
595 goto nla_put_failure;
597 if (tcf_exts_dump(skb, &prog->exts) < 0)
598 goto nla_put_failure;
600 if (prog->exts_integrated)
601 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
602 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
603 goto nla_put_failure;
604 if (prog->gen_flags &&
605 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
606 goto nla_put_failure;
608 nla_nest_end(skb, nest);
610 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
611 goto nla_put_failure;
616 nla_nest_cancel(skb, nest);
620 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
622 struct cls_bpf_head *head = rtnl_dereference(tp->root);
623 struct cls_bpf_prog *prog;
625 list_for_each_entry(prog, &head->plist, link) {
626 if (arg->count < arg->skip)
628 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
637 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
639 .owner = THIS_MODULE,
640 .classify = cls_bpf_classify,
641 .init = cls_bpf_init,
642 .destroy = cls_bpf_destroy,
644 .change = cls_bpf_change,
645 .delete = cls_bpf_delete,
646 .walk = cls_bpf_walk,
647 .dump = cls_bpf_dump,
650 static int __init cls_bpf_init_mod(void)
652 return register_tcf_proto_ops(&cls_bpf_ops);
655 static void __exit cls_bpf_exit_mod(void)
657 unregister_tcf_proto_ops(&cls_bpf_ops);
660 module_init(cls_bpf_init_mod);
661 module_exit(cls_bpf_exit_mod);