tcp: remove dst refcount false sharing for prequeue mode
[linux-2.6-block.git] / kernel / bpf / core.c
CommitLineData
f5bffecd
AS
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
f5bffecd
AS
22 */
23#include <linux/filter.h>
24#include <linux/skbuff.h>
60a3b225 25#include <linux/vmalloc.h>
f5bffecd
AS
26#include <asm/unaligned.h>
27
28/* Registers */
29#define BPF_R0 regs[BPF_REG_0]
30#define BPF_R1 regs[BPF_REG_1]
31#define BPF_R2 regs[BPF_REG_2]
32#define BPF_R3 regs[BPF_REG_3]
33#define BPF_R4 regs[BPF_REG_4]
34#define BPF_R5 regs[BPF_REG_5]
35#define BPF_R6 regs[BPF_REG_6]
36#define BPF_R7 regs[BPF_REG_7]
37#define BPF_R8 regs[BPF_REG_8]
38#define BPF_R9 regs[BPF_REG_9]
39#define BPF_R10 regs[BPF_REG_10]
40
41/* Named registers */
42#define DST regs[insn->dst_reg]
43#define SRC regs[insn->src_reg]
44#define FP regs[BPF_REG_FP]
45#define ARG1 regs[BPF_REG_ARG1]
46#define CTX regs[BPF_REG_CTX]
47#define IMM insn->imm
48
49/* No hurry in this branch
50 *
51 * Exported for the bpf jit load helper.
52 */
53void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
54{
55 u8 *ptr = NULL;
56
57 if (k >= SKF_NET_OFF)
58 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
59 else if (k >= SKF_LL_OFF)
60 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
61 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
62 return ptr;
63
64 return NULL;
65}
66
60a3b225
DB
67struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
68{
69 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
70 gfp_extra_flags;
71 struct bpf_work_struct *ws;
72 struct bpf_prog *fp;
73
74 size = round_up(size, PAGE_SIZE);
75 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
76 if (fp == NULL)
77 return NULL;
78
79 ws = kmalloc(sizeof(*ws), GFP_KERNEL | gfp_extra_flags);
80 if (ws == NULL) {
81 vfree(fp);
82 return NULL;
83 }
84
85 fp->pages = size / PAGE_SIZE;
86 fp->work = ws;
87
88 return fp;
89}
90EXPORT_SYMBOL_GPL(bpf_prog_alloc);
91
92struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
93 gfp_t gfp_extra_flags)
94{
95 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
96 gfp_extra_flags;
97 struct bpf_prog *fp;
98
99 BUG_ON(fp_old == NULL);
100
101 size = round_up(size, PAGE_SIZE);
102 if (size <= fp_old->pages * PAGE_SIZE)
103 return fp_old;
104
105 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
106 if (fp != NULL) {
107 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
108 fp->pages = size / PAGE_SIZE;
109
110 /* We keep fp->work from fp_old around in the new
111 * reallocated structure.
112 */
113 fp_old->work = NULL;
114 __bpf_prog_free(fp_old);
115 }
116
117 return fp;
118}
119EXPORT_SYMBOL_GPL(bpf_prog_realloc);
120
121void __bpf_prog_free(struct bpf_prog *fp)
122{
123 kfree(fp->work);
124 vfree(fp);
125}
126EXPORT_SYMBOL_GPL(__bpf_prog_free);
127
f5bffecd
AS
128/* Base function for offset calculation. Needs to go into .text section,
129 * therefore keeping it non-static as well; will also be used by JITs
130 * anyway later on, so do not let the compiler omit it.
131 */
132noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
133{
134 return 0;
135}
136
137/**
7ae457c1
AS
138 * __bpf_prog_run - run eBPF program on a given context
139 * @ctx: is the data we are operating on
140 * @insn: is the array of eBPF instructions
f5bffecd 141 *
7ae457c1 142 * Decode and execute eBPF instructions.
f5bffecd 143 */
7ae457c1 144static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
f5bffecd
AS
145{
146 u64 stack[MAX_BPF_STACK / sizeof(u64)];
147 u64 regs[MAX_BPF_REG], tmp;
148 static const void *jumptable[256] = {
149 [0 ... 255] = &&default_label,
150 /* Now overwrite non-defaults ... */
151 /* 32 bit ALU operations */
152 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
153 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
154 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
155 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
156 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
157 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
158 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
159 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
160 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
161 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
162 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
163 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
164 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
165 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
166 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
167 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
168 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
169 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
170 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
171 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
172 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
173 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
174 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
175 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
176 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
177 /* 64 bit ALU operations */
178 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
179 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
180 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
181 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
182 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
183 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
184 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
185 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
186 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
187 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
188 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
189 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
190 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
191 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
192 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
193 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
194 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
195 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
196 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
197 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
198 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
199 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
200 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
201 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
202 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
203 /* Call instruction */
204 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
205 /* Jumps */
206 [BPF_JMP | BPF_JA] = &&JMP_JA,
207 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
208 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
209 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
210 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
211 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
212 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
213 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
214 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
215 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
216 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
217 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
218 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
219 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
220 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
221 /* Program return */
222 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
223 /* Store instructions */
224 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
225 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
226 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
227 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
228 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
229 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
230 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
231 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
232 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
233 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
234 /* Load instructions */
235 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
236 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
237 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
238 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
239 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
240 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
241 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
242 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
243 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
244 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
02ab695b 245 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
f5bffecd
AS
246 };
247 void *ptr;
248 int off;
249
250#define CONT ({ insn++; goto select_insn; })
251#define CONT_JMP ({ insn++; goto select_insn; })
252
253 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
254 ARG1 = (u64) (unsigned long) ctx;
255
256 /* Registers used in classic BPF programs need to be reset first. */
257 regs[BPF_REG_A] = 0;
258 regs[BPF_REG_X] = 0;
259
260select_insn:
261 goto *jumptable[insn->code];
262
263 /* ALU */
264#define ALU(OPCODE, OP) \
265 ALU64_##OPCODE##_X: \
266 DST = DST OP SRC; \
267 CONT; \
268 ALU_##OPCODE##_X: \
269 DST = (u32) DST OP (u32) SRC; \
270 CONT; \
271 ALU64_##OPCODE##_K: \
272 DST = DST OP IMM; \
273 CONT; \
274 ALU_##OPCODE##_K: \
275 DST = (u32) DST OP (u32) IMM; \
276 CONT;
277
278 ALU(ADD, +)
279 ALU(SUB, -)
280 ALU(AND, &)
281 ALU(OR, |)
282 ALU(LSH, <<)
283 ALU(RSH, >>)
284 ALU(XOR, ^)
285 ALU(MUL, *)
286#undef ALU
287 ALU_NEG:
288 DST = (u32) -DST;
289 CONT;
290 ALU64_NEG:
291 DST = -DST;
292 CONT;
293 ALU_MOV_X:
294 DST = (u32) SRC;
295 CONT;
296 ALU_MOV_K:
297 DST = (u32) IMM;
298 CONT;
299 ALU64_MOV_X:
300 DST = SRC;
301 CONT;
302 ALU64_MOV_K:
303 DST = IMM;
304 CONT;
02ab695b
AS
305 LD_IMM_DW:
306 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
307 insn++;
308 CONT;
f5bffecd
AS
309 ALU64_ARSH_X:
310 (*(s64 *) &DST) >>= SRC;
311 CONT;
312 ALU64_ARSH_K:
313 (*(s64 *) &DST) >>= IMM;
314 CONT;
315 ALU64_MOD_X:
316 if (unlikely(SRC == 0))
317 return 0;
318 tmp = DST;
319 DST = do_div(tmp, SRC);
320 CONT;
321 ALU_MOD_X:
322 if (unlikely(SRC == 0))
323 return 0;
324 tmp = (u32) DST;
325 DST = do_div(tmp, (u32) SRC);
326 CONT;
327 ALU64_MOD_K:
328 tmp = DST;
329 DST = do_div(tmp, IMM);
330 CONT;
331 ALU_MOD_K:
332 tmp = (u32) DST;
333 DST = do_div(tmp, (u32) IMM);
334 CONT;
335 ALU64_DIV_X:
336 if (unlikely(SRC == 0))
337 return 0;
338 do_div(DST, SRC);
339 CONT;
340 ALU_DIV_X:
341 if (unlikely(SRC == 0))
342 return 0;
343 tmp = (u32) DST;
344 do_div(tmp, (u32) SRC);
345 DST = (u32) tmp;
346 CONT;
347 ALU64_DIV_K:
348 do_div(DST, IMM);
349 CONT;
350 ALU_DIV_K:
351 tmp = (u32) DST;
352 do_div(tmp, (u32) IMM);
353 DST = (u32) tmp;
354 CONT;
355 ALU_END_TO_BE:
356 switch (IMM) {
357 case 16:
358 DST = (__force u16) cpu_to_be16(DST);
359 break;
360 case 32:
361 DST = (__force u32) cpu_to_be32(DST);
362 break;
363 case 64:
364 DST = (__force u64) cpu_to_be64(DST);
365 break;
366 }
367 CONT;
368 ALU_END_TO_LE:
369 switch (IMM) {
370 case 16:
371 DST = (__force u16) cpu_to_le16(DST);
372 break;
373 case 32:
374 DST = (__force u32) cpu_to_le32(DST);
375 break;
376 case 64:
377 DST = (__force u64) cpu_to_le64(DST);
378 break;
379 }
380 CONT;
381
382 /* CALL */
383 JMP_CALL:
384 /* Function call scratches BPF_R1-BPF_R5 registers,
385 * preserves BPF_R6-BPF_R9, and stores return value
386 * into BPF_R0.
387 */
388 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
389 BPF_R4, BPF_R5);
390 CONT;
391
392 /* JMP */
393 JMP_JA:
394 insn += insn->off;
395 CONT;
396 JMP_JEQ_X:
397 if (DST == SRC) {
398 insn += insn->off;
399 CONT_JMP;
400 }
401 CONT;
402 JMP_JEQ_K:
403 if (DST == IMM) {
404 insn += insn->off;
405 CONT_JMP;
406 }
407 CONT;
408 JMP_JNE_X:
409 if (DST != SRC) {
410 insn += insn->off;
411 CONT_JMP;
412 }
413 CONT;
414 JMP_JNE_K:
415 if (DST != IMM) {
416 insn += insn->off;
417 CONT_JMP;
418 }
419 CONT;
420 JMP_JGT_X:
421 if (DST > SRC) {
422 insn += insn->off;
423 CONT_JMP;
424 }
425 CONT;
426 JMP_JGT_K:
427 if (DST > IMM) {
428 insn += insn->off;
429 CONT_JMP;
430 }
431 CONT;
432 JMP_JGE_X:
433 if (DST >= SRC) {
434 insn += insn->off;
435 CONT_JMP;
436 }
437 CONT;
438 JMP_JGE_K:
439 if (DST >= IMM) {
440 insn += insn->off;
441 CONT_JMP;
442 }
443 CONT;
444 JMP_JSGT_X:
445 if (((s64) DST) > ((s64) SRC)) {
446 insn += insn->off;
447 CONT_JMP;
448 }
449 CONT;
450 JMP_JSGT_K:
451 if (((s64) DST) > ((s64) IMM)) {
452 insn += insn->off;
453 CONT_JMP;
454 }
455 CONT;
456 JMP_JSGE_X:
457 if (((s64) DST) >= ((s64) SRC)) {
458 insn += insn->off;
459 CONT_JMP;
460 }
461 CONT;
462 JMP_JSGE_K:
463 if (((s64) DST) >= ((s64) IMM)) {
464 insn += insn->off;
465 CONT_JMP;
466 }
467 CONT;
468 JMP_JSET_X:
469 if (DST & SRC) {
470 insn += insn->off;
471 CONT_JMP;
472 }
473 CONT;
474 JMP_JSET_K:
475 if (DST & IMM) {
476 insn += insn->off;
477 CONT_JMP;
478 }
479 CONT;
480 JMP_EXIT:
481 return BPF_R0;
482
483 /* STX and ST and LDX*/
484#define LDST(SIZEOP, SIZE) \
485 STX_MEM_##SIZEOP: \
486 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
487 CONT; \
488 ST_MEM_##SIZEOP: \
489 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
490 CONT; \
491 LDX_MEM_##SIZEOP: \
492 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
493 CONT;
494
495 LDST(B, u8)
496 LDST(H, u16)
497 LDST(W, u32)
498 LDST(DW, u64)
499#undef LDST
500 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
501 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
502 (DST + insn->off));
503 CONT;
504 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
505 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
506 (DST + insn->off));
507 CONT;
508 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
509 off = IMM;
510load_word:
511 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
512 * only appearing in the programs where ctx ==
513 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
8fb575ca 514 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
f5bffecd
AS
515 * internal BPF verifier will check that BPF_R6 ==
516 * ctx.
517 *
518 * BPF_ABS and BPF_IND are wrappers of function calls,
519 * so they scratch BPF_R1-BPF_R5 registers, preserve
520 * BPF_R6-BPF_R9, and store return value into BPF_R0.
521 *
522 * Implicit input:
523 * ctx == skb == BPF_R6 == CTX
524 *
525 * Explicit input:
526 * SRC == any register
527 * IMM == 32-bit immediate
528 *
529 * Output:
530 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
531 */
532
533 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
534 if (likely(ptr != NULL)) {
535 BPF_R0 = get_unaligned_be32(ptr);
536 CONT;
537 }
538
539 return 0;
540 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
541 off = IMM;
542load_half:
543 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
544 if (likely(ptr != NULL)) {
545 BPF_R0 = get_unaligned_be16(ptr);
546 CONT;
547 }
548
549 return 0;
550 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
551 off = IMM;
552load_byte:
553 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
554 if (likely(ptr != NULL)) {
555 BPF_R0 = *(u8 *)ptr;
556 CONT;
557 }
558
559 return 0;
560 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
561 off = IMM + SRC;
562 goto load_word;
563 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
564 off = IMM + SRC;
565 goto load_half;
566 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
567 off = IMM + SRC;
568 goto load_byte;
569
570 default_label:
571 /* If we ever reach this, we have a bug somewhere. */
572 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
573 return 0;
574}
575
7ae457c1 576void __weak bpf_int_jit_compile(struct bpf_prog *prog)
f5bffecd
AS
577{
578}
579
580/**
7ae457c1
AS
581 * bpf_prog_select_runtime - select execution runtime for BPF program
582 * @fp: bpf_prog populated with internal BPF program
f5bffecd
AS
583 *
584 * try to JIT internal BPF program, if JIT is not available select interpreter
7ae457c1 585 * BPF program will be executed via BPF_PROG_RUN() macro
f5bffecd 586 */
7ae457c1 587void bpf_prog_select_runtime(struct bpf_prog *fp)
f5bffecd 588{
7ae457c1 589 fp->bpf_func = (void *) __bpf_prog_run;
f5bffecd
AS
590
591 /* Probe if internal BPF can be JITed */
592 bpf_int_jit_compile(fp);
60a3b225
DB
593 /* Lock whole bpf_prog as read-only */
594 bpf_prog_lock_ro(fp);
f5bffecd 595}
7ae457c1 596EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
f5bffecd 597
60a3b225
DB
598static void bpf_prog_free_deferred(struct work_struct *work)
599{
600 struct bpf_work_struct *ws;
601
602 ws = container_of(work, struct bpf_work_struct, work);
603 bpf_jit_free(ws->prog);
604}
605
606/* Free internal BPF program */
7ae457c1 607void bpf_prog_free(struct bpf_prog *fp)
f5bffecd 608{
60a3b225
DB
609 struct bpf_work_struct *ws = fp->work;
610
611 INIT_WORK(&ws->work, bpf_prog_free_deferred);
612 ws->prog = fp;
613 schedule_work(&ws->work);
f5bffecd 614}
7ae457c1 615EXPORT_SYMBOL_GPL(bpf_prog_free);