2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
23 #include <linux/filter.h>
24 #include <linux/skbuff.h>
25 #include <asm/unaligned.h>
28 #define BPF_R0 regs[BPF_REG_0]
29 #define BPF_R1 regs[BPF_REG_1]
30 #define BPF_R2 regs[BPF_REG_2]
31 #define BPF_R3 regs[BPF_REG_3]
32 #define BPF_R4 regs[BPF_REG_4]
33 #define BPF_R5 regs[BPF_REG_5]
34 #define BPF_R6 regs[BPF_REG_6]
35 #define BPF_R7 regs[BPF_REG_7]
36 #define BPF_R8 regs[BPF_REG_8]
37 #define BPF_R9 regs[BPF_REG_9]
38 #define BPF_R10 regs[BPF_REG_10]
41 #define DST regs[insn->dst_reg]
42 #define SRC regs[insn->src_reg]
43 #define FP regs[BPF_REG_FP]
44 #define ARG1 regs[BPF_REG_ARG1]
45 #define CTX regs[BPF_REG_CTX]
48 /* No hurry in this branch
50 * Exported for the bpf jit load helper.
52 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
57 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
58 else if (k >= SKF_LL_OFF)
59 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
60 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
66 /* Base function for offset calculation. Needs to go into .text section,
67 * therefore keeping it non-static as well; will also be used by JITs
68 * anyway later on, so do not let the compiler omit it.
70 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
76 * __sk_run_filter - run a filter on a given context
77 * @ctx: buffer to run the filter on
78 * @insn: filter to apply
80 * Decode and apply filter instructions to the skb->data. Return length to
81 * keep, 0 for none. @ctx is the data we are operating on, @insn is the
82 * array of filter instructions.
84 static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn)
86 u64 stack[MAX_BPF_STACK / sizeof(u64)];
87 u64 regs[MAX_BPF_REG], tmp;
88 static const void *jumptable[256] = {
89 [0 ... 255] = &&default_label,
90 /* Now overwrite non-defaults ... */
91 /* 32 bit ALU operations */
92 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
93 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
94 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
95 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
96 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
97 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
98 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
99 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
100 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
101 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
102 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
103 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
104 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
105 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
106 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
107 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
108 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
109 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
110 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
111 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
112 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
113 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
114 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
115 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
116 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
117 /* 64 bit ALU operations */
118 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
119 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
120 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
121 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
122 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
123 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
124 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
125 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
126 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
127 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
128 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
129 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
130 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
131 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
132 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
133 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
134 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
135 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
136 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
137 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
138 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
139 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
140 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
141 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
142 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
143 /* Call instruction */
144 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
146 [BPF_JMP | BPF_JA] = &&JMP_JA,
147 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
148 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
149 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
150 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
151 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
152 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
153 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
154 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
155 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
156 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
157 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
158 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
159 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
160 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
162 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
163 /* Store instructions */
164 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
165 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
166 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
167 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
168 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
169 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
170 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
171 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
172 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
173 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
174 /* Load instructions */
175 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
176 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
177 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
178 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
179 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
180 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
181 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
182 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
183 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
184 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
189 #define CONT ({ insn++; goto select_insn; })
190 #define CONT_JMP ({ insn++; goto select_insn; })
192 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
193 ARG1 = (u64) (unsigned long) ctx;
195 /* Registers used in classic BPF programs need to be reset first. */
200 goto *jumptable[insn->code];
203 #define ALU(OPCODE, OP) \
204 ALU64_##OPCODE##_X: \
208 DST = (u32) DST OP (u32) SRC; \
210 ALU64_##OPCODE##_K: \
214 DST = (u32) DST OP (u32) IMM; \
245 (*(s64 *) &DST) >>= SRC;
248 (*(s64 *) &DST) >>= IMM;
251 if (unlikely(SRC == 0))
254 DST = do_div(tmp, SRC);
257 if (unlikely(SRC == 0))
260 DST = do_div(tmp, (u32) SRC);
264 DST = do_div(tmp, IMM);
268 DST = do_div(tmp, (u32) IMM);
271 if (unlikely(SRC == 0))
276 if (unlikely(SRC == 0))
279 do_div(tmp, (u32) SRC);
287 do_div(tmp, (u32) IMM);
293 DST = (__force u16) cpu_to_be16(DST);
296 DST = (__force u32) cpu_to_be32(DST);
299 DST = (__force u64) cpu_to_be64(DST);
306 DST = (__force u16) cpu_to_le16(DST);
309 DST = (__force u32) cpu_to_le32(DST);
312 DST = (__force u64) cpu_to_le64(DST);
319 /* Function call scratches BPF_R1-BPF_R5 registers,
320 * preserves BPF_R6-BPF_R9, and stores return value
323 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
380 if (((s64) DST) > ((s64) SRC)) {
386 if (((s64) DST) > ((s64) IMM)) {
392 if (((s64) DST) >= ((s64) SRC)) {
398 if (((s64) DST) >= ((s64) IMM)) {
418 /* STX and ST and LDX*/
419 #define LDST(SIZEOP, SIZE) \
421 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
424 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
427 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
435 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
436 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
439 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
440 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
443 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
446 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
447 * only appearing in the programs where ctx ==
448 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
449 * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
450 * internal BPF verifier will check that BPF_R6 ==
453 * BPF_ABS and BPF_IND are wrappers of function calls,
454 * so they scratch BPF_R1-BPF_R5 registers, preserve
455 * BPF_R6-BPF_R9, and store return value into BPF_R0.
458 * ctx == skb == BPF_R6 == CTX
461 * SRC == any register
462 * IMM == 32-bit immediate
465 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
468 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
469 if (likely(ptr != NULL)) {
470 BPF_R0 = get_unaligned_be32(ptr);
475 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
478 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
479 if (likely(ptr != NULL)) {
480 BPF_R0 = get_unaligned_be16(ptr);
485 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
488 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
489 if (likely(ptr != NULL)) {
495 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
498 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
501 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
506 /* If we ever reach this, we have a bug somewhere. */
507 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
511 void __weak bpf_int_jit_compile(struct sk_filter *prog)
516 * sk_filter_select_runtime - select execution runtime for BPF program
517 * @fp: sk_filter populated with internal BPF program
519 * try to JIT internal BPF program, if JIT is not available select interpreter
520 * BPF program will be executed via SK_RUN_FILTER() macro
522 void sk_filter_select_runtime(struct sk_filter *fp)
524 fp->bpf_func = (void *) __sk_run_filter;
526 /* Probe if internal BPF can be JITed */
527 bpf_int_jit_compile(fp);
529 EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
531 /* free internal BPF program */
532 void sk_filter_free(struct sk_filter *fp)
536 EXPORT_SYMBOL_GPL(sk_filter_free);