serial: imx: support RS-485 Rx disable on Tx
[linux-2.6-block.git] / kernel / bpf / core.c
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *      Jay Schulist <jschlst@samba.org>
12  *      Alexei Starovoitov <ast@plumgrid.com>
13  *      Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31
32 #include <asm/unaligned.h>
33
34 /* Registers */
35 #define BPF_R0  regs[BPF_REG_0]
36 #define BPF_R1  regs[BPF_REG_1]
37 #define BPF_R2  regs[BPF_REG_2]
38 #define BPF_R3  regs[BPF_REG_3]
39 #define BPF_R4  regs[BPF_REG_4]
40 #define BPF_R5  regs[BPF_REG_5]
41 #define BPF_R6  regs[BPF_REG_6]
42 #define BPF_R7  regs[BPF_REG_7]
43 #define BPF_R8  regs[BPF_REG_8]
44 #define BPF_R9  regs[BPF_REG_9]
45 #define BPF_R10 regs[BPF_REG_10]
46
47 /* Named registers */
48 #define DST     regs[insn->dst_reg]
49 #define SRC     regs[insn->src_reg]
50 #define FP      regs[BPF_REG_FP]
51 #define ARG1    regs[BPF_REG_ARG1]
52 #define CTX     regs[BPF_REG_CTX]
53 #define IMM     insn->imm
54
55 /* No hurry in this branch
56  *
57  * Exported for the bpf jit load helper.
58  */
59 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
60 {
61         u8 *ptr = NULL;
62
63         if (k >= SKF_NET_OFF)
64                 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
65         else if (k >= SKF_LL_OFF)
66                 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
67
68         if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
69                 return ptr;
70
71         return NULL;
72 }
73
74 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
75 {
76         gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
77                           gfp_extra_flags;
78         struct bpf_prog_aux *aux;
79         struct bpf_prog *fp;
80
81         size = round_up(size, PAGE_SIZE);
82         fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
83         if (fp == NULL)
84                 return NULL;
85
86         kmemcheck_annotate_bitfield(fp, meta);
87
88         aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
89         if (aux == NULL) {
90                 vfree(fp);
91                 return NULL;
92         }
93
94         fp->pages = size / PAGE_SIZE;
95         fp->aux = aux;
96         fp->aux->prog = fp;
97
98         return fp;
99 }
100 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
101
102 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
103                                   gfp_t gfp_extra_flags)
104 {
105         gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
106                           gfp_extra_flags;
107         struct bpf_prog *fp;
108
109         BUG_ON(fp_old == NULL);
110
111         size = round_up(size, PAGE_SIZE);
112         if (size <= fp_old->pages * PAGE_SIZE)
113                 return fp_old;
114
115         fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
116         if (fp != NULL) {
117                 kmemcheck_annotate_bitfield(fp, meta);
118
119                 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
120                 fp->pages = size / PAGE_SIZE;
121                 fp->aux->prog = fp;
122
123                 /* We keep fp->aux from fp_old around in the new
124                  * reallocated structure.
125                  */
126                 fp_old->aux = NULL;
127                 __bpf_prog_free(fp_old);
128         }
129
130         return fp;
131 }
132 EXPORT_SYMBOL_GPL(bpf_prog_realloc);
133
134 void __bpf_prog_free(struct bpf_prog *fp)
135 {
136         kfree(fp->aux);
137         vfree(fp);
138 }
139 EXPORT_SYMBOL_GPL(__bpf_prog_free);
140
141 #ifdef CONFIG_BPF_JIT
142 struct bpf_binary_header *
143 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
144                      unsigned int alignment,
145                      bpf_jit_fill_hole_t bpf_fill_ill_insns)
146 {
147         struct bpf_binary_header *hdr;
148         unsigned int size, hole, start;
149
150         /* Most of BPF filters are really small, but if some of them
151          * fill a page, allow at least 128 extra bytes to insert a
152          * random section of illegal instructions.
153          */
154         size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
155         hdr = module_alloc(size);
156         if (hdr == NULL)
157                 return NULL;
158
159         /* Fill space with illegal/arch-dep instructions. */
160         bpf_fill_ill_insns(hdr, size);
161
162         hdr->pages = size / PAGE_SIZE;
163         hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
164                      PAGE_SIZE - sizeof(*hdr));
165         start = (prandom_u32() % hole) & ~(alignment - 1);
166
167         /* Leave a random number of instructions before BPF code. */
168         *image_ptr = &hdr->image[start];
169
170         return hdr;
171 }
172
173 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
174 {
175         module_memfree(hdr);
176 }
177 #endif /* CONFIG_BPF_JIT */
178
179 /* Base function for offset calculation. Needs to go into .text section,
180  * therefore keeping it non-static as well; will also be used by JITs
181  * anyway later on, so do not let the compiler omit it.
182  */
183 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
184 {
185         return 0;
186 }
187 EXPORT_SYMBOL_GPL(__bpf_call_base);
188
189 /**
190  *      __bpf_prog_run - run eBPF program on a given context
191  *      @ctx: is the data we are operating on
192  *      @insn: is the array of eBPF instructions
193  *
194  * Decode and execute eBPF instructions.
195  */
196 static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
197 {
198         u64 stack[MAX_BPF_STACK / sizeof(u64)];
199         u64 regs[MAX_BPF_REG], tmp;
200         static const void *jumptable[256] = {
201                 [0 ... 255] = &&default_label,
202                 /* Now overwrite non-defaults ... */
203                 /* 32 bit ALU operations */
204                 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
205                 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
206                 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
207                 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
208                 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
209                 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
210                 [BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
211                 [BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
212                 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
213                 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
214                 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
215                 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
216                 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
217                 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
218                 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
219                 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
220                 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
221                 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
222                 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
223                 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
224                 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
225                 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
226                 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
227                 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
228                 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
229                 /* 64 bit ALU operations */
230                 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
231                 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
232                 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
233                 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
234                 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
235                 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
236                 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
237                 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
238                 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
239                 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
240                 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
241                 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
242                 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
243                 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
244                 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
245                 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
246                 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
247                 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
248                 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
249                 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
250                 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
251                 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
252                 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
253                 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
254                 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
255                 /* Call instruction */
256                 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
257                 [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
258                 /* Jumps */
259                 [BPF_JMP | BPF_JA] = &&JMP_JA,
260                 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
261                 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
262                 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
263                 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
264                 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
265                 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
266                 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
267                 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
268                 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
269                 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
270                 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
271                 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
272                 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
273                 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
274                 /* Program return */
275                 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
276                 /* Store instructions */
277                 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
278                 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
279                 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
280                 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
281                 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
282                 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
283                 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
284                 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
285                 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
286                 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
287                 /* Load instructions */
288                 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
289                 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
290                 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
291                 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
292                 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
293                 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
294                 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
295                 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
296                 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
297                 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
298                 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
299         };
300         u32 tail_call_cnt = 0;
301         void *ptr;
302         int off;
303
304 #define CONT     ({ insn++; goto select_insn; })
305 #define CONT_JMP ({ insn++; goto select_insn; })
306
307         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
308         ARG1 = (u64) (unsigned long) ctx;
309
310 select_insn:
311         goto *jumptable[insn->code];
312
313         /* ALU */
314 #define ALU(OPCODE, OP)                 \
315         ALU64_##OPCODE##_X:             \
316                 DST = DST OP SRC;       \
317                 CONT;                   \
318         ALU_##OPCODE##_X:               \
319                 DST = (u32) DST OP (u32) SRC;   \
320                 CONT;                   \
321         ALU64_##OPCODE##_K:             \
322                 DST = DST OP IMM;               \
323                 CONT;                   \
324         ALU_##OPCODE##_K:               \
325                 DST = (u32) DST OP (u32) IMM;   \
326                 CONT;
327
328         ALU(ADD,  +)
329         ALU(SUB,  -)
330         ALU(AND,  &)
331         ALU(OR,   |)
332         ALU(LSH, <<)
333         ALU(RSH, >>)
334         ALU(XOR,  ^)
335         ALU(MUL,  *)
336 #undef ALU
337         ALU_NEG:
338                 DST = (u32) -DST;
339                 CONT;
340         ALU64_NEG:
341                 DST = -DST;
342                 CONT;
343         ALU_MOV_X:
344                 DST = (u32) SRC;
345                 CONT;
346         ALU_MOV_K:
347                 DST = (u32) IMM;
348                 CONT;
349         ALU64_MOV_X:
350                 DST = SRC;
351                 CONT;
352         ALU64_MOV_K:
353                 DST = IMM;
354                 CONT;
355         LD_IMM_DW:
356                 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
357                 insn++;
358                 CONT;
359         ALU64_ARSH_X:
360                 (*(s64 *) &DST) >>= SRC;
361                 CONT;
362         ALU64_ARSH_K:
363                 (*(s64 *) &DST) >>= IMM;
364                 CONT;
365         ALU64_MOD_X:
366                 if (unlikely(SRC == 0))
367                         return 0;
368                 div64_u64_rem(DST, SRC, &tmp);
369                 DST = tmp;
370                 CONT;
371         ALU_MOD_X:
372                 if (unlikely(SRC == 0))
373                         return 0;
374                 tmp = (u32) DST;
375                 DST = do_div(tmp, (u32) SRC);
376                 CONT;
377         ALU64_MOD_K:
378                 div64_u64_rem(DST, IMM, &tmp);
379                 DST = tmp;
380                 CONT;
381         ALU_MOD_K:
382                 tmp = (u32) DST;
383                 DST = do_div(tmp, (u32) IMM);
384                 CONT;
385         ALU64_DIV_X:
386                 if (unlikely(SRC == 0))
387                         return 0;
388                 DST = div64_u64(DST, SRC);
389                 CONT;
390         ALU_DIV_X:
391                 if (unlikely(SRC == 0))
392                         return 0;
393                 tmp = (u32) DST;
394                 do_div(tmp, (u32) SRC);
395                 DST = (u32) tmp;
396                 CONT;
397         ALU64_DIV_K:
398                 DST = div64_u64(DST, IMM);
399                 CONT;
400         ALU_DIV_K:
401                 tmp = (u32) DST;
402                 do_div(tmp, (u32) IMM);
403                 DST = (u32) tmp;
404                 CONT;
405         ALU_END_TO_BE:
406                 switch (IMM) {
407                 case 16:
408                         DST = (__force u16) cpu_to_be16(DST);
409                         break;
410                 case 32:
411                         DST = (__force u32) cpu_to_be32(DST);
412                         break;
413                 case 64:
414                         DST = (__force u64) cpu_to_be64(DST);
415                         break;
416                 }
417                 CONT;
418         ALU_END_TO_LE:
419                 switch (IMM) {
420                 case 16:
421                         DST = (__force u16) cpu_to_le16(DST);
422                         break;
423                 case 32:
424                         DST = (__force u32) cpu_to_le32(DST);
425                         break;
426                 case 64:
427                         DST = (__force u64) cpu_to_le64(DST);
428                         break;
429                 }
430                 CONT;
431
432         /* CALL */
433         JMP_CALL:
434                 /* Function call scratches BPF_R1-BPF_R5 registers,
435                  * preserves BPF_R6-BPF_R9, and stores return value
436                  * into BPF_R0.
437                  */
438                 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
439                                                        BPF_R4, BPF_R5);
440                 CONT;
441
442         JMP_TAIL_CALL: {
443                 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
444                 struct bpf_array *array = container_of(map, struct bpf_array, map);
445                 struct bpf_prog *prog;
446                 u64 index = BPF_R3;
447
448                 if (unlikely(index >= array->map.max_entries))
449                         goto out;
450
451                 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
452                         goto out;
453
454                 tail_call_cnt++;
455
456                 prog = READ_ONCE(array->ptrs[index]);
457                 if (unlikely(!prog))
458                         goto out;
459
460                 /* ARG1 at this point is guaranteed to point to CTX from
461                  * the verifier side due to the fact that the tail call is
462                  * handeled like a helper, that is, bpf_tail_call_proto,
463                  * where arg1_type is ARG_PTR_TO_CTX.
464                  */
465                 insn = prog->insnsi;
466                 goto select_insn;
467 out:
468                 CONT;
469         }
470         /* JMP */
471         JMP_JA:
472                 insn += insn->off;
473                 CONT;
474         JMP_JEQ_X:
475                 if (DST == SRC) {
476                         insn += insn->off;
477                         CONT_JMP;
478                 }
479                 CONT;
480         JMP_JEQ_K:
481                 if (DST == IMM) {
482                         insn += insn->off;
483                         CONT_JMP;
484                 }
485                 CONT;
486         JMP_JNE_X:
487                 if (DST != SRC) {
488                         insn += insn->off;
489                         CONT_JMP;
490                 }
491                 CONT;
492         JMP_JNE_K:
493                 if (DST != IMM) {
494                         insn += insn->off;
495                         CONT_JMP;
496                 }
497                 CONT;
498         JMP_JGT_X:
499                 if (DST > SRC) {
500                         insn += insn->off;
501                         CONT_JMP;
502                 }
503                 CONT;
504         JMP_JGT_K:
505                 if (DST > IMM) {
506                         insn += insn->off;
507                         CONT_JMP;
508                 }
509                 CONT;
510         JMP_JGE_X:
511                 if (DST >= SRC) {
512                         insn += insn->off;
513                         CONT_JMP;
514                 }
515                 CONT;
516         JMP_JGE_K:
517                 if (DST >= IMM) {
518                         insn += insn->off;
519                         CONT_JMP;
520                 }
521                 CONT;
522         JMP_JSGT_X:
523                 if (((s64) DST) > ((s64) SRC)) {
524                         insn += insn->off;
525                         CONT_JMP;
526                 }
527                 CONT;
528         JMP_JSGT_K:
529                 if (((s64) DST) > ((s64) IMM)) {
530                         insn += insn->off;
531                         CONT_JMP;
532                 }
533                 CONT;
534         JMP_JSGE_X:
535                 if (((s64) DST) >= ((s64) SRC)) {
536                         insn += insn->off;
537                         CONT_JMP;
538                 }
539                 CONT;
540         JMP_JSGE_K:
541                 if (((s64) DST) >= ((s64) IMM)) {
542                         insn += insn->off;
543                         CONT_JMP;
544                 }
545                 CONT;
546         JMP_JSET_X:
547                 if (DST & SRC) {
548                         insn += insn->off;
549                         CONT_JMP;
550                 }
551                 CONT;
552         JMP_JSET_K:
553                 if (DST & IMM) {
554                         insn += insn->off;
555                         CONT_JMP;
556                 }
557                 CONT;
558         JMP_EXIT:
559                 return BPF_R0;
560
561         /* STX and ST and LDX*/
562 #define LDST(SIZEOP, SIZE)                                              \
563         STX_MEM_##SIZEOP:                                               \
564                 *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
565                 CONT;                                                   \
566         ST_MEM_##SIZEOP:                                                \
567                 *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
568                 CONT;                                                   \
569         LDX_MEM_##SIZEOP:                                               \
570                 DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
571                 CONT;
572
573         LDST(B,   u8)
574         LDST(H,  u16)
575         LDST(W,  u32)
576         LDST(DW, u64)
577 #undef LDST
578         STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
579                 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
580                            (DST + insn->off));
581                 CONT;
582         STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
583                 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
584                              (DST + insn->off));
585                 CONT;
586         LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
587                 off = IMM;
588 load_word:
589                 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
590                  * only appearing in the programs where ctx ==
591                  * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
592                  * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
593                  * internal BPF verifier will check that BPF_R6 ==
594                  * ctx.
595                  *
596                  * BPF_ABS and BPF_IND are wrappers of function calls,
597                  * so they scratch BPF_R1-BPF_R5 registers, preserve
598                  * BPF_R6-BPF_R9, and store return value into BPF_R0.
599                  *
600                  * Implicit input:
601                  *   ctx == skb == BPF_R6 == CTX
602                  *
603                  * Explicit input:
604                  *   SRC == any register
605                  *   IMM == 32-bit immediate
606                  *
607                  * Output:
608                  *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
609                  */
610
611                 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
612                 if (likely(ptr != NULL)) {
613                         BPF_R0 = get_unaligned_be32(ptr);
614                         CONT;
615                 }
616
617                 return 0;
618         LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
619                 off = IMM;
620 load_half:
621                 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
622                 if (likely(ptr != NULL)) {
623                         BPF_R0 = get_unaligned_be16(ptr);
624                         CONT;
625                 }
626
627                 return 0;
628         LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
629                 off = IMM;
630 load_byte:
631                 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
632                 if (likely(ptr != NULL)) {
633                         BPF_R0 = *(u8 *)ptr;
634                         CONT;
635                 }
636
637                 return 0;
638         LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
639                 off = IMM + SRC;
640                 goto load_word;
641         LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
642                 off = IMM + SRC;
643                 goto load_half;
644         LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
645                 off = IMM + SRC;
646                 goto load_byte;
647
648         default_label:
649                 /* If we ever reach this, we have a bug somewhere. */
650                 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
651                 return 0;
652 }
653 STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
654
655 bool bpf_prog_array_compatible(struct bpf_array *array,
656                                const struct bpf_prog *fp)
657 {
658         if (!array->owner_prog_type) {
659                 /* There's no owner yet where we could check for
660                  * compatibility.
661                  */
662                 array->owner_prog_type = fp->type;
663                 array->owner_jited = fp->jited;
664
665                 return true;
666         }
667
668         return array->owner_prog_type == fp->type &&
669                array->owner_jited == fp->jited;
670 }
671
672 static int bpf_check_tail_call(const struct bpf_prog *fp)
673 {
674         struct bpf_prog_aux *aux = fp->aux;
675         int i;
676
677         for (i = 0; i < aux->used_map_cnt; i++) {
678                 struct bpf_map *map = aux->used_maps[i];
679                 struct bpf_array *array;
680
681                 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
682                         continue;
683
684                 array = container_of(map, struct bpf_array, map);
685                 if (!bpf_prog_array_compatible(array, fp))
686                         return -EINVAL;
687         }
688
689         return 0;
690 }
691
692 /**
693  *      bpf_prog_select_runtime - select exec runtime for BPF program
694  *      @fp: bpf_prog populated with internal BPF program
695  *
696  * Try to JIT eBPF program, if JIT is not available, use interpreter.
697  * The BPF program will be executed via BPF_PROG_RUN() macro.
698  */
699 int bpf_prog_select_runtime(struct bpf_prog *fp)
700 {
701         fp->bpf_func = (void *) __bpf_prog_run;
702
703         bpf_int_jit_compile(fp);
704         bpf_prog_lock_ro(fp);
705
706         /* The tail call compatibility check can only be done at
707          * this late stage as we need to determine, if we deal
708          * with JITed or non JITed program concatenations and not
709          * all eBPF JITs might immediately support all features.
710          */
711         return bpf_check_tail_call(fp);
712 }
713 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
714
715 static void bpf_prog_free_deferred(struct work_struct *work)
716 {
717         struct bpf_prog_aux *aux;
718
719         aux = container_of(work, struct bpf_prog_aux, work);
720         bpf_jit_free(aux->prog);
721 }
722
723 /* Free internal BPF program */
724 void bpf_prog_free(struct bpf_prog *fp)
725 {
726         struct bpf_prog_aux *aux = fp->aux;
727
728         INIT_WORK(&aux->work, bpf_prog_free_deferred);
729         schedule_work(&aux->work);
730 }
731 EXPORT_SYMBOL_GPL(bpf_prog_free);
732
733 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
734 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
735
736 void bpf_user_rnd_init_once(void)
737 {
738         prandom_init_once(&bpf_user_rnd_state);
739 }
740
741 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
742 {
743         /* Should someone ever have the rather unwise idea to use some
744          * of the registers passed into this function, then note that
745          * this function is called from native eBPF and classic-to-eBPF
746          * transformations. Register assignments from both sides are
747          * different, f.e. classic always sets fn(ctx, A, X) here.
748          */
749         struct rnd_state *state;
750         u32 res;
751
752         state = &get_cpu_var(bpf_user_rnd_state);
753         res = prandom_u32_state(state);
754         put_cpu_var(state);
755
756         return res;
757 }
758
759 /* Weak definitions of helper functions in case we don't have bpf syscall. */
760 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
761 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
762 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
763
764 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
765 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
766 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
767 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
768 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
769 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
770 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
771 {
772         return NULL;
773 }
774
775 /* Always built-in helper functions. */
776 const struct bpf_func_proto bpf_tail_call_proto = {
777         .func           = NULL,
778         .gpl_only       = false,
779         .ret_type       = RET_VOID,
780         .arg1_type      = ARG_PTR_TO_CTX,
781         .arg2_type      = ARG_CONST_MAP_PTR,
782         .arg3_type      = ARG_ANYTHING,
783 };
784
785 /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
786 void __weak bpf_int_jit_compile(struct bpf_prog *prog)
787 {
788 }
789
790 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
791  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
792  */
793 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
794                          int len)
795 {
796         return -EFAULT;
797 }