Merge branch 'linux-4.4' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into...
[linux-2.6-block.git] / kernel / bpf / core.c
CommitLineData
f5bffecd
AS
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
f5bffecd 22 */
738cbe72 23
f5bffecd
AS
24#include <linux/filter.h>
25#include <linux/skbuff.h>
60a3b225 26#include <linux/vmalloc.h>
738cbe72
DB
27#include <linux/random.h>
28#include <linux/moduleloader.h>
09756af4 29#include <linux/bpf.h>
f5bffecd 30
3324b584
DB
31#include <asm/unaligned.h>
32
f5bffecd
AS
33/* Registers */
34#define BPF_R0 regs[BPF_REG_0]
35#define BPF_R1 regs[BPF_REG_1]
36#define BPF_R2 regs[BPF_REG_2]
37#define BPF_R3 regs[BPF_REG_3]
38#define BPF_R4 regs[BPF_REG_4]
39#define BPF_R5 regs[BPF_REG_5]
40#define BPF_R6 regs[BPF_REG_6]
41#define BPF_R7 regs[BPF_REG_7]
42#define BPF_R8 regs[BPF_REG_8]
43#define BPF_R9 regs[BPF_REG_9]
44#define BPF_R10 regs[BPF_REG_10]
45
46/* Named registers */
47#define DST regs[insn->dst_reg]
48#define SRC regs[insn->src_reg]
49#define FP regs[BPF_REG_FP]
50#define ARG1 regs[BPF_REG_ARG1]
51#define CTX regs[BPF_REG_CTX]
52#define IMM insn->imm
53
54/* No hurry in this branch
55 *
56 * Exported for the bpf jit load helper.
57 */
58void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
59{
60 u8 *ptr = NULL;
61
62 if (k >= SKF_NET_OFF)
63 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
64 else if (k >= SKF_LL_OFF)
65 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
3324b584 66
f5bffecd
AS
67 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
68 return ptr;
69
70 return NULL;
71}
72
60a3b225
DB
73struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
74{
75 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
76 gfp_extra_flags;
09756af4 77 struct bpf_prog_aux *aux;
60a3b225
DB
78 struct bpf_prog *fp;
79
80 size = round_up(size, PAGE_SIZE);
81 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
82 if (fp == NULL)
83 return NULL;
84
a91263d5
DB
85 kmemcheck_annotate_bitfield(fp, meta);
86
09756af4
AS
87 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
88 if (aux == NULL) {
60a3b225
DB
89 vfree(fp);
90 return NULL;
91 }
92
93 fp->pages = size / PAGE_SIZE;
09756af4 94 fp->aux = aux;
e9d8afa9 95 fp->aux->prog = fp;
60a3b225
DB
96
97 return fp;
98}
99EXPORT_SYMBOL_GPL(bpf_prog_alloc);
100
101struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
102 gfp_t gfp_extra_flags)
103{
104 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
105 gfp_extra_flags;
106 struct bpf_prog *fp;
107
108 BUG_ON(fp_old == NULL);
109
110 size = round_up(size, PAGE_SIZE);
111 if (size <= fp_old->pages * PAGE_SIZE)
112 return fp_old;
113
114 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
115 if (fp != NULL) {
a91263d5
DB
116 kmemcheck_annotate_bitfield(fp, meta);
117
60a3b225
DB
118 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
119 fp->pages = size / PAGE_SIZE;
e9d8afa9 120 fp->aux->prog = fp;
60a3b225 121
09756af4 122 /* We keep fp->aux from fp_old around in the new
60a3b225
DB
123 * reallocated structure.
124 */
09756af4 125 fp_old->aux = NULL;
60a3b225
DB
126 __bpf_prog_free(fp_old);
127 }
128
129 return fp;
130}
131EXPORT_SYMBOL_GPL(bpf_prog_realloc);
132
133void __bpf_prog_free(struct bpf_prog *fp)
134{
09756af4 135 kfree(fp->aux);
60a3b225
DB
136 vfree(fp);
137}
138EXPORT_SYMBOL_GPL(__bpf_prog_free);
139
b954d834 140#ifdef CONFIG_BPF_JIT
738cbe72
DB
141struct bpf_binary_header *
142bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
143 unsigned int alignment,
144 bpf_jit_fill_hole_t bpf_fill_ill_insns)
145{
146 struct bpf_binary_header *hdr;
147 unsigned int size, hole, start;
148
149 /* Most of BPF filters are really small, but if some of them
150 * fill a page, allow at least 128 extra bytes to insert a
151 * random section of illegal instructions.
152 */
153 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
154 hdr = module_alloc(size);
155 if (hdr == NULL)
156 return NULL;
157
158 /* Fill space with illegal/arch-dep instructions. */
159 bpf_fill_ill_insns(hdr, size);
160
161 hdr->pages = size / PAGE_SIZE;
162 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
163 PAGE_SIZE - sizeof(*hdr));
164 start = (prandom_u32() % hole) & ~(alignment - 1);
165
166 /* Leave a random number of instructions before BPF code. */
167 *image_ptr = &hdr->image[start];
168
169 return hdr;
170}
171
172void bpf_jit_binary_free(struct bpf_binary_header *hdr)
173{
be1f221c 174 module_memfree(hdr);
738cbe72 175}
b954d834 176#endif /* CONFIG_BPF_JIT */
738cbe72 177
f5bffecd
AS
178/* Base function for offset calculation. Needs to go into .text section,
179 * therefore keeping it non-static as well; will also be used by JITs
180 * anyway later on, so do not let the compiler omit it.
181 */
182noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
183{
184 return 0;
185}
4d9c5c53 186EXPORT_SYMBOL_GPL(__bpf_call_base);
f5bffecd
AS
187
188/**
7ae457c1
AS
189 * __bpf_prog_run - run eBPF program on a given context
190 * @ctx: is the data we are operating on
191 * @insn: is the array of eBPF instructions
f5bffecd 192 *
7ae457c1 193 * Decode and execute eBPF instructions.
f5bffecd 194 */
7ae457c1 195static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
f5bffecd
AS
196{
197 u64 stack[MAX_BPF_STACK / sizeof(u64)];
198 u64 regs[MAX_BPF_REG], tmp;
199 static const void *jumptable[256] = {
200 [0 ... 255] = &&default_label,
201 /* Now overwrite non-defaults ... */
202 /* 32 bit ALU operations */
203 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
204 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
205 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
206 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
207 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
208 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
209 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
210 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
211 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
212 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
213 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
214 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
215 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
216 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
217 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
218 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
219 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
220 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
221 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
222 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
223 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
224 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
225 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
226 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
227 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
228 /* 64 bit ALU operations */
229 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
230 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
231 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
232 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
233 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
234 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
235 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
236 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
237 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
238 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
239 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
240 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
241 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
242 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
243 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
244 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
245 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
246 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
247 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
248 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
249 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
250 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
251 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
252 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
253 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
254 /* Call instruction */
255 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
04fd61ab 256 [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
f5bffecd
AS
257 /* Jumps */
258 [BPF_JMP | BPF_JA] = &&JMP_JA,
259 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
260 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
261 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
262 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
263 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
264 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
265 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
266 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
267 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
268 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
269 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
270 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
271 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
272 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
273 /* Program return */
274 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
275 /* Store instructions */
276 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
277 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
278 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
279 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
280 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
281 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
282 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
283 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
284 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
285 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
286 /* Load instructions */
287 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
288 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
289 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
290 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
291 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
292 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
293 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
294 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
295 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
296 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
02ab695b 297 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
f5bffecd 298 };
04fd61ab 299 u32 tail_call_cnt = 0;
f5bffecd
AS
300 void *ptr;
301 int off;
302
303#define CONT ({ insn++; goto select_insn; })
304#define CONT_JMP ({ insn++; goto select_insn; })
305
306 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
307 ARG1 = (u64) (unsigned long) ctx;
308
309 /* Registers used in classic BPF programs need to be reset first. */
310 regs[BPF_REG_A] = 0;
311 regs[BPF_REG_X] = 0;
312
313select_insn:
314 goto *jumptable[insn->code];
315
316 /* ALU */
317#define ALU(OPCODE, OP) \
318 ALU64_##OPCODE##_X: \
319 DST = DST OP SRC; \
320 CONT; \
321 ALU_##OPCODE##_X: \
322 DST = (u32) DST OP (u32) SRC; \
323 CONT; \
324 ALU64_##OPCODE##_K: \
325 DST = DST OP IMM; \
326 CONT; \
327 ALU_##OPCODE##_K: \
328 DST = (u32) DST OP (u32) IMM; \
329 CONT;
330
331 ALU(ADD, +)
332 ALU(SUB, -)
333 ALU(AND, &)
334 ALU(OR, |)
335 ALU(LSH, <<)
336 ALU(RSH, >>)
337 ALU(XOR, ^)
338 ALU(MUL, *)
339#undef ALU
340 ALU_NEG:
341 DST = (u32) -DST;
342 CONT;
343 ALU64_NEG:
344 DST = -DST;
345 CONT;
346 ALU_MOV_X:
347 DST = (u32) SRC;
348 CONT;
349 ALU_MOV_K:
350 DST = (u32) IMM;
351 CONT;
352 ALU64_MOV_X:
353 DST = SRC;
354 CONT;
355 ALU64_MOV_K:
356 DST = IMM;
357 CONT;
02ab695b
AS
358 LD_IMM_DW:
359 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
360 insn++;
361 CONT;
f5bffecd
AS
362 ALU64_ARSH_X:
363 (*(s64 *) &DST) >>= SRC;
364 CONT;
365 ALU64_ARSH_K:
366 (*(s64 *) &DST) >>= IMM;
367 CONT;
368 ALU64_MOD_X:
369 if (unlikely(SRC == 0))
370 return 0;
876a7ae6
AS
371 div64_u64_rem(DST, SRC, &tmp);
372 DST = tmp;
f5bffecd
AS
373 CONT;
374 ALU_MOD_X:
375 if (unlikely(SRC == 0))
376 return 0;
377 tmp = (u32) DST;
378 DST = do_div(tmp, (u32) SRC);
379 CONT;
380 ALU64_MOD_K:
876a7ae6
AS
381 div64_u64_rem(DST, IMM, &tmp);
382 DST = tmp;
f5bffecd
AS
383 CONT;
384 ALU_MOD_K:
385 tmp = (u32) DST;
386 DST = do_div(tmp, (u32) IMM);
387 CONT;
388 ALU64_DIV_X:
389 if (unlikely(SRC == 0))
390 return 0;
876a7ae6 391 DST = div64_u64(DST, SRC);
f5bffecd
AS
392 CONT;
393 ALU_DIV_X:
394 if (unlikely(SRC == 0))
395 return 0;
396 tmp = (u32) DST;
397 do_div(tmp, (u32) SRC);
398 DST = (u32) tmp;
399 CONT;
400 ALU64_DIV_K:
876a7ae6 401 DST = div64_u64(DST, IMM);
f5bffecd
AS
402 CONT;
403 ALU_DIV_K:
404 tmp = (u32) DST;
405 do_div(tmp, (u32) IMM);
406 DST = (u32) tmp;
407 CONT;
408 ALU_END_TO_BE:
409 switch (IMM) {
410 case 16:
411 DST = (__force u16) cpu_to_be16(DST);
412 break;
413 case 32:
414 DST = (__force u32) cpu_to_be32(DST);
415 break;
416 case 64:
417 DST = (__force u64) cpu_to_be64(DST);
418 break;
419 }
420 CONT;
421 ALU_END_TO_LE:
422 switch (IMM) {
423 case 16:
424 DST = (__force u16) cpu_to_le16(DST);
425 break;
426 case 32:
427 DST = (__force u32) cpu_to_le32(DST);
428 break;
429 case 64:
430 DST = (__force u64) cpu_to_le64(DST);
431 break;
432 }
433 CONT;
434
435 /* CALL */
436 JMP_CALL:
437 /* Function call scratches BPF_R1-BPF_R5 registers,
438 * preserves BPF_R6-BPF_R9, and stores return value
439 * into BPF_R0.
440 */
441 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
442 BPF_R4, BPF_R5);
443 CONT;
444
04fd61ab
AS
445 JMP_TAIL_CALL: {
446 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
447 struct bpf_array *array = container_of(map, struct bpf_array, map);
448 struct bpf_prog *prog;
449 u64 index = BPF_R3;
450
451 if (unlikely(index >= array->map.max_entries))
452 goto out;
453
454 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
455 goto out;
456
457 tail_call_cnt++;
458
2a36f0b9 459 prog = READ_ONCE(array->ptrs[index]);
04fd61ab
AS
460 if (unlikely(!prog))
461 goto out;
462
c4675f93
DB
463 /* ARG1 at this point is guaranteed to point to CTX from
464 * the verifier side due to the fact that the tail call is
465 * handeled like a helper, that is, bpf_tail_call_proto,
466 * where arg1_type is ARG_PTR_TO_CTX.
467 */
04fd61ab
AS
468 insn = prog->insnsi;
469 goto select_insn;
470out:
471 CONT;
472 }
f5bffecd
AS
473 /* JMP */
474 JMP_JA:
475 insn += insn->off;
476 CONT;
477 JMP_JEQ_X:
478 if (DST == SRC) {
479 insn += insn->off;
480 CONT_JMP;
481 }
482 CONT;
483 JMP_JEQ_K:
484 if (DST == IMM) {
485 insn += insn->off;
486 CONT_JMP;
487 }
488 CONT;
489 JMP_JNE_X:
490 if (DST != SRC) {
491 insn += insn->off;
492 CONT_JMP;
493 }
494 CONT;
495 JMP_JNE_K:
496 if (DST != IMM) {
497 insn += insn->off;
498 CONT_JMP;
499 }
500 CONT;
501 JMP_JGT_X:
502 if (DST > SRC) {
503 insn += insn->off;
504 CONT_JMP;
505 }
506 CONT;
507 JMP_JGT_K:
508 if (DST > IMM) {
509 insn += insn->off;
510 CONT_JMP;
511 }
512 CONT;
513 JMP_JGE_X:
514 if (DST >= SRC) {
515 insn += insn->off;
516 CONT_JMP;
517 }
518 CONT;
519 JMP_JGE_K:
520 if (DST >= IMM) {
521 insn += insn->off;
522 CONT_JMP;
523 }
524 CONT;
525 JMP_JSGT_X:
526 if (((s64) DST) > ((s64) SRC)) {
527 insn += insn->off;
528 CONT_JMP;
529 }
530 CONT;
531 JMP_JSGT_K:
532 if (((s64) DST) > ((s64) IMM)) {
533 insn += insn->off;
534 CONT_JMP;
535 }
536 CONT;
537 JMP_JSGE_X:
538 if (((s64) DST) >= ((s64) SRC)) {
539 insn += insn->off;
540 CONT_JMP;
541 }
542 CONT;
543 JMP_JSGE_K:
544 if (((s64) DST) >= ((s64) IMM)) {
545 insn += insn->off;
546 CONT_JMP;
547 }
548 CONT;
549 JMP_JSET_X:
550 if (DST & SRC) {
551 insn += insn->off;
552 CONT_JMP;
553 }
554 CONT;
555 JMP_JSET_K:
556 if (DST & IMM) {
557 insn += insn->off;
558 CONT_JMP;
559 }
560 CONT;
561 JMP_EXIT:
562 return BPF_R0;
563
564 /* STX and ST and LDX*/
565#define LDST(SIZEOP, SIZE) \
566 STX_MEM_##SIZEOP: \
567 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
568 CONT; \
569 ST_MEM_##SIZEOP: \
570 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
571 CONT; \
572 LDX_MEM_##SIZEOP: \
573 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
574 CONT;
575
576 LDST(B, u8)
577 LDST(H, u16)
578 LDST(W, u32)
579 LDST(DW, u64)
580#undef LDST
581 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
582 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
583 (DST + insn->off));
584 CONT;
585 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
586 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
587 (DST + insn->off));
588 CONT;
589 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
590 off = IMM;
591load_word:
592 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
593 * only appearing in the programs where ctx ==
594 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
8fb575ca 595 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
f5bffecd
AS
596 * internal BPF verifier will check that BPF_R6 ==
597 * ctx.
598 *
599 * BPF_ABS and BPF_IND are wrappers of function calls,
600 * so they scratch BPF_R1-BPF_R5 registers, preserve
601 * BPF_R6-BPF_R9, and store return value into BPF_R0.
602 *
603 * Implicit input:
604 * ctx == skb == BPF_R6 == CTX
605 *
606 * Explicit input:
607 * SRC == any register
608 * IMM == 32-bit immediate
609 *
610 * Output:
611 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
612 */
613
614 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
615 if (likely(ptr != NULL)) {
616 BPF_R0 = get_unaligned_be32(ptr);
617 CONT;
618 }
619
620 return 0;
621 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
622 off = IMM;
623load_half:
624 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
625 if (likely(ptr != NULL)) {
626 BPF_R0 = get_unaligned_be16(ptr);
627 CONT;
628 }
629
630 return 0;
631 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
632 off = IMM;
633load_byte:
634 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
635 if (likely(ptr != NULL)) {
636 BPF_R0 = *(u8 *)ptr;
637 CONT;
638 }
639
640 return 0;
641 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
642 off = IMM + SRC;
643 goto load_word;
644 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
645 off = IMM + SRC;
646 goto load_half;
647 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
648 off = IMM + SRC;
649 goto load_byte;
650
651 default_label:
652 /* If we ever reach this, we have a bug somewhere. */
653 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
654 return 0;
655}
656
3324b584
DB
657bool bpf_prog_array_compatible(struct bpf_array *array,
658 const struct bpf_prog *fp)
04fd61ab 659{
3324b584
DB
660 if (!array->owner_prog_type) {
661 /* There's no owner yet where we could check for
662 * compatibility.
663 */
04fd61ab
AS
664 array->owner_prog_type = fp->type;
665 array->owner_jited = fp->jited;
3324b584
DB
666
667 return true;
04fd61ab 668 }
3324b584
DB
669
670 return array->owner_prog_type == fp->type &&
671 array->owner_jited == fp->jited;
04fd61ab
AS
672}
673
3324b584 674static int bpf_check_tail_call(const struct bpf_prog *fp)
04fd61ab
AS
675{
676 struct bpf_prog_aux *aux = fp->aux;
677 int i;
678
679 for (i = 0; i < aux->used_map_cnt; i++) {
3324b584 680 struct bpf_map *map = aux->used_maps[i];
04fd61ab 681 struct bpf_array *array;
04fd61ab 682
04fd61ab
AS
683 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
684 continue;
3324b584 685
04fd61ab
AS
686 array = container_of(map, struct bpf_array, map);
687 if (!bpf_prog_array_compatible(array, fp))
688 return -EINVAL;
689 }
690
691 return 0;
692}
693
f5bffecd 694/**
3324b584 695 * bpf_prog_select_runtime - select exec runtime for BPF program
7ae457c1 696 * @fp: bpf_prog populated with internal BPF program
f5bffecd 697 *
3324b584
DB
698 * Try to JIT eBPF program, if JIT is not available, use interpreter.
699 * The BPF program will be executed via BPF_PROG_RUN() macro.
f5bffecd 700 */
04fd61ab 701int bpf_prog_select_runtime(struct bpf_prog *fp)
f5bffecd 702{
7ae457c1 703 fp->bpf_func = (void *) __bpf_prog_run;
f5bffecd 704
f5bffecd 705 bpf_int_jit_compile(fp);
60a3b225 706 bpf_prog_lock_ro(fp);
04fd61ab 707
3324b584
DB
708 /* The tail call compatibility check can only be done at
709 * this late stage as we need to determine, if we deal
710 * with JITed or non JITed program concatenations and not
711 * all eBPF JITs might immediately support all features.
712 */
713 return bpf_check_tail_call(fp);
f5bffecd 714}
7ae457c1 715EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
f5bffecd 716
60a3b225
DB
717static void bpf_prog_free_deferred(struct work_struct *work)
718{
09756af4 719 struct bpf_prog_aux *aux;
60a3b225 720
09756af4
AS
721 aux = container_of(work, struct bpf_prog_aux, work);
722 bpf_jit_free(aux->prog);
60a3b225
DB
723}
724
725/* Free internal BPF program */
7ae457c1 726void bpf_prog_free(struct bpf_prog *fp)
f5bffecd 727{
09756af4 728 struct bpf_prog_aux *aux = fp->aux;
60a3b225 729
09756af4 730 INIT_WORK(&aux->work, bpf_prog_free_deferred);
09756af4 731 schedule_work(&aux->work);
f5bffecd 732}
7ae457c1 733EXPORT_SYMBOL_GPL(bpf_prog_free);
f89b7755 734
3ad00405
DB
735/* RNG for unpriviledged user space with separated state from prandom_u32(). */
736static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
737
738void bpf_user_rnd_init_once(void)
739{
740 prandom_init_once(&bpf_user_rnd_state);
741}
742
743u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
744{
745 /* Should someone ever have the rather unwise idea to use some
746 * of the registers passed into this function, then note that
747 * this function is called from native eBPF and classic-to-eBPF
748 * transformations. Register assignments from both sides are
749 * different, f.e. classic always sets fn(ctx, A, X) here.
750 */
751 struct rnd_state *state;
752 u32 res;
753
754 state = &get_cpu_var(bpf_user_rnd_state);
755 res = prandom_u32_state(state);
756 put_cpu_var(state);
757
758 return res;
759}
760
3ba67dab
DB
761/* Weak definitions of helper functions in case we don't have bpf syscall. */
762const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
763const struct bpf_func_proto bpf_map_update_elem_proto __weak;
764const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
765
03e69b50 766const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
c04167ce 767const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
17ca8cbf 768const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
ffeedafb
AS
769const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
770const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
771const struct bpf_func_proto bpf_get_current_comm_proto __weak;
0756ea3e
AS
772const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
773{
774 return NULL;
775}
03e69b50 776
3324b584
DB
777/* Always built-in helper functions. */
778const struct bpf_func_proto bpf_tail_call_proto = {
779 .func = NULL,
780 .gpl_only = false,
781 .ret_type = RET_VOID,
782 .arg1_type = ARG_PTR_TO_CTX,
783 .arg2_type = ARG_CONST_MAP_PTR,
784 .arg3_type = ARG_ANYTHING,
785};
786
787/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
788void __weak bpf_int_jit_compile(struct bpf_prog *prog)
789{
790}
791
f89b7755
AS
792/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
793 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
794 */
795int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
796 int len)
797{
798 return -EFAULT;
799}