2 * Linux Socket Filter Data Structures
4 #ifndef __LINUX_FILTER_H__
5 #define __LINUX_FILTER_H__
7 #include <linux/atomic.h>
8 #include <linux/compat.h>
9 #include <linux/workqueue.h>
10 #include <uapi/linux/filter.h>
12 /* Internally used and optimized filter representation with extended
13 * instruction set based on top of classic BPF.
16 /* instruction classes */
17 #define BPF_ALU64 0x07 /* alu mode in double word width */
20 #define BPF_DW 0x18 /* double word */
21 #define BPF_XADD 0xc0 /* exclusive add */
24 #define BPF_MOV 0xb0 /* mov reg to reg */
25 #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
27 /* change endianness of a register */
28 #define BPF_END 0xd0 /* flags for endianness conversion: */
29 #define BPF_TO_LE 0x00 /* convert to little-endian */
30 #define BPF_TO_BE 0x08 /* convert to big-endian */
31 #define BPF_FROM_LE BPF_TO_LE
32 #define BPF_FROM_BE BPF_TO_BE
34 #define BPF_JNE 0x50 /* jump != */
35 #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
36 #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
37 #define BPF_CALL 0x80 /* function call */
38 #define BPF_EXIT 0x90 /* function return */
40 /* Register numbers */
56 /* BPF has 10 general purpose 64-bit registers and stack frame. */
57 #define MAX_BPF_REG __MAX_BPF_REG
59 /* ArgX, context and stack frame pointer register positions. Note,
60 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
61 * calls in BPF_CALL instruction.
63 #define BPF_REG_ARG1 BPF_REG_1
64 #define BPF_REG_ARG2 BPF_REG_2
65 #define BPF_REG_ARG3 BPF_REG_3
66 #define BPF_REG_ARG4 BPF_REG_4
67 #define BPF_REG_ARG5 BPF_REG_5
68 #define BPF_REG_CTX BPF_REG_6
69 #define BPF_REG_FP BPF_REG_10
71 /* Additional register mappings for converted user programs. */
72 #define BPF_REG_A BPF_REG_0
73 #define BPF_REG_X BPF_REG_7
74 #define BPF_REG_TMP BPF_REG_8
76 /* BPF program can access up to 512 bytes of stack space. */
77 #define MAX_BPF_STACK 512
79 /* bpf_add|sub|...: a += x, bpf_mov: a = x */
80 #define BPF_ALU64_REG(op, a, x) \
81 ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0})
82 #define BPF_ALU32_REG(op, a, x) \
83 ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0})
85 /* bpf_add|sub|...: a += imm, bpf_mov: a = imm */
86 #define BPF_ALU64_IMM(op, a, imm) \
87 ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm})
88 #define BPF_ALU32_IMM(op, a, imm) \
89 ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm})
91 /* R0 = *(uint *) (skb->data + off) */
92 #define BPF_LD_ABS(size, off) \
93 ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off})
95 /* R0 = *(uint *) (skb->data + x + off) */
96 #define BPF_LD_IND(size, x, off) \
97 ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off})
99 /* a = *(uint *) (x + off) */
100 #define BPF_LDX_MEM(sz, a, x, off) \
101 ((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0})
103 /* if (a 'op' x) goto pc+off */
104 #define BPF_JMP_REG(op, a, x, off) \
105 ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0})
107 /* if (a 'op' imm) goto pc+off */
108 #define BPF_JMP_IMM(op, a, imm, off) \
109 ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm})
111 #define BPF_EXIT_INSN() \
112 ((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0})
114 static inline int size_to_bpf(int size)
130 /* Macro to invoke filter function. */
131 #define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
133 struct sock_filter_int {
134 __u8 code; /* opcode */
135 __u8 a_reg:4; /* dest register */
136 __u8 x_reg:4; /* source register */
137 __s16 off; /* signed offset */
138 __s32 imm; /* signed immediate constant */
142 /* A struct sock_filter is architecture independent. */
143 struct compat_sock_fprog {
145 compat_uptr_t filter; /* struct sock_filter * */
149 struct sock_fprog_kern {
151 struct sock_filter *filter;
160 u32 jited:1, /* Is our filter JIT'ed? */
161 len:31; /* Number of filter blocks */
162 struct sock_fprog_kern *orig_prog; /* Original BPF program */
164 unsigned int (*bpf_func)(const struct sk_buff *skb,
165 const struct sock_filter_int *filter);
167 struct sock_filter insns[0];
168 struct sock_filter_int insnsi[0];
169 struct work_struct work;
173 static inline unsigned int sk_filter_size(unsigned int proglen)
175 return max(sizeof(struct sk_filter),
176 offsetof(struct sk_filter, insns[proglen]));
179 #define sk_filter_proglen(fprog) \
180 (fprog->len * sizeof(fprog->filter[0]))
182 int sk_filter(struct sock *sk, struct sk_buff *skb);
184 void sk_filter_select_runtime(struct sk_filter *fp);
185 void sk_filter_free(struct sk_filter *fp);
187 int sk_convert_filter(struct sock_filter *prog, int len,
188 struct sock_filter_int *new_prog, int *new_len);
190 int sk_unattached_filter_create(struct sk_filter **pfp,
191 struct sock_fprog_kern *fprog);
192 void sk_unattached_filter_destroy(struct sk_filter *fp);
194 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
195 int sk_detach_filter(struct sock *sk);
197 int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
198 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
201 void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
202 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
204 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
205 void bpf_int_jit_compile(struct sk_filter *fp);
207 #define BPF_ANC BIT(15)
209 static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
211 BUG_ON(ftest->code & BPF_ANC);
213 switch (ftest->code) {
214 case BPF_LD | BPF_W | BPF_ABS:
215 case BPF_LD | BPF_H | BPF_ABS:
216 case BPF_LD | BPF_B | BPF_ABS:
217 #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
218 return BPF_ANC | SKF_AD_##CODE
220 BPF_ANCILLARY(PROTOCOL);
221 BPF_ANCILLARY(PKTTYPE);
222 BPF_ANCILLARY(IFINDEX);
223 BPF_ANCILLARY(NLATTR);
224 BPF_ANCILLARY(NLATTR_NEST);
226 BPF_ANCILLARY(QUEUE);
227 BPF_ANCILLARY(HATYPE);
228 BPF_ANCILLARY(RXHASH);
230 BPF_ANCILLARY(ALU_XOR_X);
231 BPF_ANCILLARY(VLAN_TAG);
232 BPF_ANCILLARY(VLAN_TAG_PRESENT);
233 BPF_ANCILLARY(PAY_OFFSET);
234 BPF_ANCILLARY(RANDOM);
242 #ifdef CONFIG_BPF_JIT
244 #include <linux/linkage.h>
245 #include <linux/printk.h>
247 void bpf_jit_compile(struct sk_filter *fp);
248 void bpf_jit_free(struct sk_filter *fp);
250 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
251 u32 pass, void *image)
253 pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
254 flen, proglen, pass, image);
256 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
257 16, 1, image, proglen, false);
260 #include <linux/slab.h>
262 static inline void bpf_jit_compile(struct sk_filter *fp)
266 static inline void bpf_jit_free(struct sk_filter *fp)
270 #endif /* CONFIG_BPF_JIT */
272 static inline int bpf_tell_extensions(void)
277 #endif /* __LINUX_FILTER_H__ */