mm, swap: use kvzalloc to allocate some swap data structures
[linux-2.6-block.git] / kernel / bpf / core.c
CommitLineData
f5bffecd
AS
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
f5bffecd 22 */
738cbe72 23
f5bffecd
AS
24#include <linux/filter.h>
25#include <linux/skbuff.h>
60a3b225 26#include <linux/vmalloc.h>
738cbe72
DB
27#include <linux/random.h>
28#include <linux/moduleloader.h>
09756af4 29#include <linux/bpf.h>
39853cc0 30#include <linux/frame.h>
74451e66
DB
31#include <linux/rbtree_latch.h>
32#include <linux/kallsyms.h>
33#include <linux/rcupdate.h>
f5bffecd 34
3324b584
DB
35#include <asm/unaligned.h>
36
f5bffecd
AS
37/* Registers */
38#define BPF_R0 regs[BPF_REG_0]
39#define BPF_R1 regs[BPF_REG_1]
40#define BPF_R2 regs[BPF_REG_2]
41#define BPF_R3 regs[BPF_REG_3]
42#define BPF_R4 regs[BPF_REG_4]
43#define BPF_R5 regs[BPF_REG_5]
44#define BPF_R6 regs[BPF_REG_6]
45#define BPF_R7 regs[BPF_REG_7]
46#define BPF_R8 regs[BPF_REG_8]
47#define BPF_R9 regs[BPF_REG_9]
48#define BPF_R10 regs[BPF_REG_10]
49
50/* Named registers */
51#define DST regs[insn->dst_reg]
52#define SRC regs[insn->src_reg]
53#define FP regs[BPF_REG_FP]
54#define ARG1 regs[BPF_REG_ARG1]
55#define CTX regs[BPF_REG_CTX]
56#define IMM insn->imm
57
58/* No hurry in this branch
59 *
60 * Exported for the bpf jit load helper.
61 */
62void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63{
64 u8 *ptr = NULL;
65
66 if (k >= SKF_NET_OFF)
67 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
68 else if (k >= SKF_LL_OFF)
69 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
3324b584 70
f5bffecd
AS
71 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
72 return ptr;
73
74 return NULL;
75}
76
60a3b225
DB
77struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
78{
79 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
80 gfp_extra_flags;
09756af4 81 struct bpf_prog_aux *aux;
60a3b225
DB
82 struct bpf_prog *fp;
83
84 size = round_up(size, PAGE_SIZE);
85 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
86 if (fp == NULL)
87 return NULL;
88
a91263d5
DB
89 kmemcheck_annotate_bitfield(fp, meta);
90
09756af4
AS
91 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
92 if (aux == NULL) {
60a3b225
DB
93 vfree(fp);
94 return NULL;
95 }
96
97 fp->pages = size / PAGE_SIZE;
09756af4 98 fp->aux = aux;
e9d8afa9 99 fp->aux->prog = fp;
60a3b225 100
74451e66
DB
101 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
102
60a3b225
DB
103 return fp;
104}
105EXPORT_SYMBOL_GPL(bpf_prog_alloc);
106
107struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
108 gfp_t gfp_extra_flags)
109{
110 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
111 gfp_extra_flags;
112 struct bpf_prog *fp;
5ccb071e
DB
113 u32 pages, delta;
114 int ret;
60a3b225
DB
115
116 BUG_ON(fp_old == NULL);
117
118 size = round_up(size, PAGE_SIZE);
5ccb071e
DB
119 pages = size / PAGE_SIZE;
120 if (pages <= fp_old->pages)
60a3b225
DB
121 return fp_old;
122
5ccb071e
DB
123 delta = pages - fp_old->pages;
124 ret = __bpf_prog_charge(fp_old->aux->user, delta);
125 if (ret)
126 return NULL;
127
60a3b225 128 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
5ccb071e
DB
129 if (fp == NULL) {
130 __bpf_prog_uncharge(fp_old->aux->user, delta);
131 } else {
a91263d5
DB
132 kmemcheck_annotate_bitfield(fp, meta);
133
60a3b225 134 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
5ccb071e 135 fp->pages = pages;
e9d8afa9 136 fp->aux->prog = fp;
60a3b225 137
09756af4 138 /* We keep fp->aux from fp_old around in the new
60a3b225
DB
139 * reallocated structure.
140 */
09756af4 141 fp_old->aux = NULL;
60a3b225
DB
142 __bpf_prog_free(fp_old);
143 }
144
145 return fp;
146}
60a3b225
DB
147
148void __bpf_prog_free(struct bpf_prog *fp)
149{
09756af4 150 kfree(fp->aux);
60a3b225
DB
151 vfree(fp);
152}
60a3b225 153
f1f7714e 154int bpf_prog_calc_tag(struct bpf_prog *fp)
7bd509e3
DB
155{
156 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
f1f7714e
DB
157 u32 raw_size = bpf_prog_tag_scratch_size(fp);
158 u32 digest[SHA_DIGEST_WORDS];
aafe6ae9 159 u32 ws[SHA_WORKSPACE_WORDS];
7bd509e3 160 u32 i, bsize, psize, blocks;
aafe6ae9 161 struct bpf_insn *dst;
7bd509e3 162 bool was_ld_map;
aafe6ae9 163 u8 *raw, *todo;
7bd509e3
DB
164 __be32 *result;
165 __be64 *bits;
166
aafe6ae9
DB
167 raw = vmalloc(raw_size);
168 if (!raw)
169 return -ENOMEM;
170
f1f7714e 171 sha_init(digest);
7bd509e3
DB
172 memset(ws, 0, sizeof(ws));
173
174 /* We need to take out the map fd for the digest calculation
175 * since they are unstable from user space side.
176 */
aafe6ae9 177 dst = (void *)raw;
7bd509e3
DB
178 for (i = 0, was_ld_map = false; i < fp->len; i++) {
179 dst[i] = fp->insnsi[i];
180 if (!was_ld_map &&
181 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
182 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
183 was_ld_map = true;
184 dst[i].imm = 0;
185 } else if (was_ld_map &&
186 dst[i].code == 0 &&
187 dst[i].dst_reg == 0 &&
188 dst[i].src_reg == 0 &&
189 dst[i].off == 0) {
190 was_ld_map = false;
191 dst[i].imm = 0;
192 } else {
193 was_ld_map = false;
194 }
195 }
196
aafe6ae9
DB
197 psize = bpf_prog_insn_size(fp);
198 memset(&raw[psize], 0, raw_size - psize);
7bd509e3
DB
199 raw[psize++] = 0x80;
200
201 bsize = round_up(psize, SHA_MESSAGE_BYTES);
202 blocks = bsize / SHA_MESSAGE_BYTES;
aafe6ae9 203 todo = raw;
7bd509e3
DB
204 if (bsize - psize >= sizeof(__be64)) {
205 bits = (__be64 *)(todo + bsize - sizeof(__be64));
206 } else {
207 bits = (__be64 *)(todo + bsize + bits_offset);
208 blocks++;
209 }
210 *bits = cpu_to_be64((psize - 1) << 3);
211
212 while (blocks--) {
f1f7714e 213 sha_transform(digest, todo, ws);
7bd509e3
DB
214 todo += SHA_MESSAGE_BYTES;
215 }
216
f1f7714e 217 result = (__force __be32 *)digest;
7bd509e3 218 for (i = 0; i < SHA_DIGEST_WORDS; i++)
f1f7714e
DB
219 result[i] = cpu_to_be32(digest[i]);
220 memcpy(fp->tag, result, sizeof(fp->tag));
aafe6ae9
DB
221
222 vfree(raw);
223 return 0;
7bd509e3
DB
224}
225
c237ee5e
DB
226static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
227{
228 return BPF_CLASS(insn->code) == BPF_JMP &&
229 /* Call and Exit are both special jumps with no
230 * target inside the BPF instruction image.
231 */
232 BPF_OP(insn->code) != BPF_CALL &&
233 BPF_OP(insn->code) != BPF_EXIT;
234}
235
236static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
237{
238 struct bpf_insn *insn = prog->insnsi;
239 u32 i, insn_cnt = prog->len;
240
241 for (i = 0; i < insn_cnt; i++, insn++) {
242 if (!bpf_is_jmp_and_has_target(insn))
243 continue;
244
245 /* Adjust offset of jmps if we cross boundaries. */
246 if (i < pos && i + insn->off + 1 > pos)
247 insn->off += delta;
248 else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
249 insn->off -= delta;
250 }
251}
252
253struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
254 const struct bpf_insn *patch, u32 len)
255{
256 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
257 struct bpf_prog *prog_adj;
258
259 /* Since our patchlet doesn't expand the image, we're done. */
260 if (insn_delta == 0) {
261 memcpy(prog->insnsi + off, patch, sizeof(*patch));
262 return prog;
263 }
264
265 insn_adj_cnt = prog->len + insn_delta;
266
267 /* Several new instructions need to be inserted. Make room
268 * for them. Likely, there's no need for a new allocation as
269 * last page could have large enough tailroom.
270 */
271 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
272 GFP_USER);
273 if (!prog_adj)
274 return NULL;
275
276 prog_adj->len = insn_adj_cnt;
277
278 /* Patching happens in 3 steps:
279 *
280 * 1) Move over tail of insnsi from next instruction onwards,
281 * so we can patch the single target insn with one or more
282 * new ones (patching is always from 1 to n insns, n > 0).
283 * 2) Inject new instructions at the target location.
284 * 3) Adjust branch offsets if necessary.
285 */
286 insn_rest = insn_adj_cnt - off - len;
287
288 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
289 sizeof(*patch) * insn_rest);
290 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
291
292 bpf_adj_branches(prog_adj, off, insn_delta);
293
294 return prog_adj;
295}
296
b954d834 297#ifdef CONFIG_BPF_JIT
74451e66
DB
298static __always_inline void
299bpf_get_prog_addr_region(const struct bpf_prog *prog,
300 unsigned long *symbol_start,
301 unsigned long *symbol_end)
302{
303 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
304 unsigned long addr = (unsigned long)hdr;
305
306 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
307
308 *symbol_start = addr;
309 *symbol_end = addr + hdr->pages * PAGE_SIZE;
310}
311
312static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
313{
314 BUILD_BUG_ON(sizeof("bpf_prog_") +
315 sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN);
316
317 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
318 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
319 *sym = 0;
320}
321
322static __always_inline unsigned long
323bpf_get_prog_addr_start(struct latch_tree_node *n)
324{
325 unsigned long symbol_start, symbol_end;
326 const struct bpf_prog_aux *aux;
327
328 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
329 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
330
331 return symbol_start;
332}
333
334static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
335 struct latch_tree_node *b)
336{
337 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
338}
339
340static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
341{
342 unsigned long val = (unsigned long)key;
343 unsigned long symbol_start, symbol_end;
344 const struct bpf_prog_aux *aux;
345
346 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
347 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
348
349 if (val < symbol_start)
350 return -1;
351 if (val >= symbol_end)
352 return 1;
353
354 return 0;
355}
356
357static const struct latch_tree_ops bpf_tree_ops = {
358 .less = bpf_tree_less,
359 .comp = bpf_tree_comp,
360};
361
362static DEFINE_SPINLOCK(bpf_lock);
363static LIST_HEAD(bpf_kallsyms);
364static struct latch_tree_root bpf_tree __cacheline_aligned;
365
366int bpf_jit_kallsyms __read_mostly;
367
368static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
369{
370 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
371 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
372 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
373}
374
375static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
376{
377 if (list_empty(&aux->ksym_lnode))
378 return;
379
380 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
381 list_del_rcu(&aux->ksym_lnode);
382}
383
384static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
385{
386 return fp->jited && !bpf_prog_was_classic(fp);
387}
388
389static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
390{
391 return list_empty(&fp->aux->ksym_lnode) ||
392 fp->aux->ksym_lnode.prev == LIST_POISON2;
393}
394
395void bpf_prog_kallsyms_add(struct bpf_prog *fp)
396{
74451e66
DB
397 if (!bpf_prog_kallsyms_candidate(fp) ||
398 !capable(CAP_SYS_ADMIN))
399 return;
400
d24f7c7f 401 spin_lock_bh(&bpf_lock);
74451e66 402 bpf_prog_ksym_node_add(fp->aux);
d24f7c7f 403 spin_unlock_bh(&bpf_lock);
74451e66
DB
404}
405
406void bpf_prog_kallsyms_del(struct bpf_prog *fp)
407{
74451e66
DB
408 if (!bpf_prog_kallsyms_candidate(fp))
409 return;
410
d24f7c7f 411 spin_lock_bh(&bpf_lock);
74451e66 412 bpf_prog_ksym_node_del(fp->aux);
d24f7c7f 413 spin_unlock_bh(&bpf_lock);
74451e66
DB
414}
415
416static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
417{
418 struct latch_tree_node *n;
419
420 if (!bpf_jit_kallsyms_enabled())
421 return NULL;
422
423 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
424 return n ?
425 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
426 NULL;
427}
428
429const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
430 unsigned long *off, char *sym)
431{
432 unsigned long symbol_start, symbol_end;
433 struct bpf_prog *prog;
434 char *ret = NULL;
435
436 rcu_read_lock();
437 prog = bpf_prog_kallsyms_find(addr);
438 if (prog) {
439 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
440 bpf_get_prog_name(prog, sym);
441
442 ret = sym;
443 if (size)
444 *size = symbol_end - symbol_start;
445 if (off)
446 *off = addr - symbol_start;
447 }
448 rcu_read_unlock();
449
450 return ret;
451}
452
453bool is_bpf_text_address(unsigned long addr)
454{
455 bool ret;
456
457 rcu_read_lock();
458 ret = bpf_prog_kallsyms_find(addr) != NULL;
459 rcu_read_unlock();
460
461 return ret;
462}
463
464int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
465 char *sym)
466{
467 unsigned long symbol_start, symbol_end;
468 struct bpf_prog_aux *aux;
469 unsigned int it = 0;
470 int ret = -ERANGE;
471
472 if (!bpf_jit_kallsyms_enabled())
473 return ret;
474
475 rcu_read_lock();
476 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
477 if (it++ != symnum)
478 continue;
479
480 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
481 bpf_get_prog_name(aux->prog, sym);
482
483 *value = symbol_start;
484 *type = BPF_SYM_ELF_TYPE;
485
486 ret = 0;
487 break;
488 }
489 rcu_read_unlock();
490
491 return ret;
492}
493
738cbe72
DB
494struct bpf_binary_header *
495bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
496 unsigned int alignment,
497 bpf_jit_fill_hole_t bpf_fill_ill_insns)
498{
499 struct bpf_binary_header *hdr;
500 unsigned int size, hole, start;
501
502 /* Most of BPF filters are really small, but if some of them
503 * fill a page, allow at least 128 extra bytes to insert a
504 * random section of illegal instructions.
505 */
506 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
507 hdr = module_alloc(size);
508 if (hdr == NULL)
509 return NULL;
510
511 /* Fill space with illegal/arch-dep instructions. */
512 bpf_fill_ill_insns(hdr, size);
513
514 hdr->pages = size / PAGE_SIZE;
515 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
516 PAGE_SIZE - sizeof(*hdr));
b7552e1b 517 start = (get_random_int() % hole) & ~(alignment - 1);
738cbe72
DB
518
519 /* Leave a random number of instructions before BPF code. */
520 *image_ptr = &hdr->image[start];
521
522 return hdr;
523}
524
525void bpf_jit_binary_free(struct bpf_binary_header *hdr)
526{
be1f221c 527 module_memfree(hdr);
738cbe72 528}
4f3446bb 529
74451e66
DB
530/* This symbol is only overridden by archs that have different
531 * requirements than the usual eBPF JITs, f.e. when they only
532 * implement cBPF JIT, do not set images read-only, etc.
533 */
534void __weak bpf_jit_free(struct bpf_prog *fp)
535{
536 if (fp->jited) {
537 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
538
539 bpf_jit_binary_unlock_ro(hdr);
540 bpf_jit_binary_free(hdr);
541
542 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
543 }
544
545 bpf_prog_unlock_free(fp);
546}
547
4f3446bb
DB
548int bpf_jit_harden __read_mostly;
549
550static int bpf_jit_blind_insn(const struct bpf_insn *from,
551 const struct bpf_insn *aux,
552 struct bpf_insn *to_buff)
553{
554 struct bpf_insn *to = to_buff;
b7552e1b 555 u32 imm_rnd = get_random_int();
4f3446bb
DB
556 s16 off;
557
558 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
559 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
560
561 if (from->imm == 0 &&
562 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
563 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
564 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
565 goto out;
566 }
567
568 switch (from->code) {
569 case BPF_ALU | BPF_ADD | BPF_K:
570 case BPF_ALU | BPF_SUB | BPF_K:
571 case BPF_ALU | BPF_AND | BPF_K:
572 case BPF_ALU | BPF_OR | BPF_K:
573 case BPF_ALU | BPF_XOR | BPF_K:
574 case BPF_ALU | BPF_MUL | BPF_K:
575 case BPF_ALU | BPF_MOV | BPF_K:
576 case BPF_ALU | BPF_DIV | BPF_K:
577 case BPF_ALU | BPF_MOD | BPF_K:
578 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
579 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
580 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
581 break;
582
583 case BPF_ALU64 | BPF_ADD | BPF_K:
584 case BPF_ALU64 | BPF_SUB | BPF_K:
585 case BPF_ALU64 | BPF_AND | BPF_K:
586 case BPF_ALU64 | BPF_OR | BPF_K:
587 case BPF_ALU64 | BPF_XOR | BPF_K:
588 case BPF_ALU64 | BPF_MUL | BPF_K:
589 case BPF_ALU64 | BPF_MOV | BPF_K:
590 case BPF_ALU64 | BPF_DIV | BPF_K:
591 case BPF_ALU64 | BPF_MOD | BPF_K:
592 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
593 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
594 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
595 break;
596
597 case BPF_JMP | BPF_JEQ | BPF_K:
598 case BPF_JMP | BPF_JNE | BPF_K:
599 case BPF_JMP | BPF_JGT | BPF_K:
600 case BPF_JMP | BPF_JGE | BPF_K:
601 case BPF_JMP | BPF_JSGT | BPF_K:
602 case BPF_JMP | BPF_JSGE | BPF_K:
603 case BPF_JMP | BPF_JSET | BPF_K:
604 /* Accommodate for extra offset in case of a backjump. */
605 off = from->off;
606 if (off < 0)
607 off -= 2;
608 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
609 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
610 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
611 break;
612
613 case BPF_LD | BPF_ABS | BPF_W:
614 case BPF_LD | BPF_ABS | BPF_H:
615 case BPF_LD | BPF_ABS | BPF_B:
616 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
617 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
618 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
619 break;
620
621 case BPF_LD | BPF_IND | BPF_W:
622 case BPF_LD | BPF_IND | BPF_H:
623 case BPF_LD | BPF_IND | BPF_B:
624 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
625 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
626 *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
627 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
628 break;
629
630 case BPF_LD | BPF_IMM | BPF_DW:
631 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
632 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
633 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
634 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
635 break;
636 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
637 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
638 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
639 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
640 break;
641
642 case BPF_ST | BPF_MEM | BPF_DW:
643 case BPF_ST | BPF_MEM | BPF_W:
644 case BPF_ST | BPF_MEM | BPF_H:
645 case BPF_ST | BPF_MEM | BPF_B:
646 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
647 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
648 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
649 break;
650 }
651out:
652 return to - to_buff;
653}
654
655static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
656 gfp_t gfp_extra_flags)
657{
658 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
659 gfp_extra_flags;
660 struct bpf_prog *fp;
661
662 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
663 if (fp != NULL) {
664 kmemcheck_annotate_bitfield(fp, meta);
665
666 /* aux->prog still points to the fp_other one, so
667 * when promoting the clone to the real program,
668 * this still needs to be adapted.
669 */
670 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
671 }
672
673 return fp;
674}
675
676static void bpf_prog_clone_free(struct bpf_prog *fp)
677{
678 /* aux was stolen by the other clone, so we cannot free
679 * it from this path! It will be freed eventually by the
680 * other program on release.
681 *
682 * At this point, we don't need a deferred release since
683 * clone is guaranteed to not be locked.
684 */
685 fp->aux = NULL;
686 __bpf_prog_free(fp);
687}
688
689void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
690{
691 /* We have to repoint aux->prog to self, as we don't
692 * know whether fp here is the clone or the original.
693 */
694 fp->aux->prog = fp;
695 bpf_prog_clone_free(fp_other);
696}
697
698struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
699{
700 struct bpf_insn insn_buff[16], aux[2];
701 struct bpf_prog *clone, *tmp;
702 int insn_delta, insn_cnt;
703 struct bpf_insn *insn;
704 int i, rewritten;
705
706 if (!bpf_jit_blinding_enabled())
707 return prog;
708
709 clone = bpf_prog_clone_create(prog, GFP_USER);
710 if (!clone)
711 return ERR_PTR(-ENOMEM);
712
713 insn_cnt = clone->len;
714 insn = clone->insnsi;
715
716 for (i = 0; i < insn_cnt; i++, insn++) {
717 /* We temporarily need to hold the original ld64 insn
718 * so that we can still access the first part in the
719 * second blinding run.
720 */
721 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
722 insn[1].code == 0)
723 memcpy(aux, insn, sizeof(aux));
724
725 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
726 if (!rewritten)
727 continue;
728
729 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
730 if (!tmp) {
731 /* Patching may have repointed aux->prog during
732 * realloc from the original one, so we need to
733 * fix it up here on error.
734 */
735 bpf_jit_prog_release_other(prog, clone);
736 return ERR_PTR(-ENOMEM);
737 }
738
739 clone = tmp;
740 insn_delta = rewritten - 1;
741
742 /* Walk new program and skip insns we just inserted. */
743 insn = clone->insnsi + i + insn_delta;
744 insn_cnt += insn_delta;
745 i += insn_delta;
746 }
747
748 return clone;
749}
b954d834 750#endif /* CONFIG_BPF_JIT */
738cbe72 751
f5bffecd
AS
752/* Base function for offset calculation. Needs to go into .text section,
753 * therefore keeping it non-static as well; will also be used by JITs
754 * anyway later on, so do not let the compiler omit it.
755 */
756noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
757{
758 return 0;
759}
4d9c5c53 760EXPORT_SYMBOL_GPL(__bpf_call_base);
f5bffecd
AS
761
762/**
7ae457c1
AS
763 * __bpf_prog_run - run eBPF program on a given context
764 * @ctx: is the data we are operating on
765 * @insn: is the array of eBPF instructions
f5bffecd 766 *
7ae457c1 767 * Decode and execute eBPF instructions.
f5bffecd 768 */
7ae457c1 769static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
f5bffecd
AS
770{
771 u64 stack[MAX_BPF_STACK / sizeof(u64)];
772 u64 regs[MAX_BPF_REG], tmp;
773 static const void *jumptable[256] = {
774 [0 ... 255] = &&default_label,
775 /* Now overwrite non-defaults ... */
776 /* 32 bit ALU operations */
777 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
778 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
779 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
780 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
781 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
782 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
783 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
784 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
785 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
786 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
787 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
788 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
789 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
790 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
791 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
792 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
793 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
794 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
795 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
796 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
797 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
798 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
799 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
800 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
801 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
802 /* 64 bit ALU operations */
803 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
804 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
805 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
806 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
807 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
808 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
809 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
810 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
811 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
812 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
813 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
814 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
815 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
816 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
817 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
818 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
819 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
820 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
821 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
822 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
823 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
824 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
825 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
826 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
827 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
828 /* Call instruction */
829 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
04fd61ab 830 [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
f5bffecd
AS
831 /* Jumps */
832 [BPF_JMP | BPF_JA] = &&JMP_JA,
833 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
834 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
835 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
836 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
837 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
838 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
839 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
840 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
841 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
842 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
843 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
844 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
845 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
846 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
847 /* Program return */
848 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
849 /* Store instructions */
850 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
851 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
852 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
853 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
854 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
855 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
856 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
857 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
858 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
859 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
860 /* Load instructions */
861 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
862 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
863 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
864 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
865 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
866 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
867 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
868 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
869 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
870 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
02ab695b 871 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
f5bffecd 872 };
04fd61ab 873 u32 tail_call_cnt = 0;
f5bffecd
AS
874 void *ptr;
875 int off;
876
877#define CONT ({ insn++; goto select_insn; })
878#define CONT_JMP ({ insn++; goto select_insn; })
879
880 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
881 ARG1 = (u64) (unsigned long) ctx;
882
f5bffecd
AS
883select_insn:
884 goto *jumptable[insn->code];
885
886 /* ALU */
887#define ALU(OPCODE, OP) \
888 ALU64_##OPCODE##_X: \
889 DST = DST OP SRC; \
890 CONT; \
891 ALU_##OPCODE##_X: \
892 DST = (u32) DST OP (u32) SRC; \
893 CONT; \
894 ALU64_##OPCODE##_K: \
895 DST = DST OP IMM; \
896 CONT; \
897 ALU_##OPCODE##_K: \
898 DST = (u32) DST OP (u32) IMM; \
899 CONT;
900
901 ALU(ADD, +)
902 ALU(SUB, -)
903 ALU(AND, &)
904 ALU(OR, |)
905 ALU(LSH, <<)
906 ALU(RSH, >>)
907 ALU(XOR, ^)
908 ALU(MUL, *)
909#undef ALU
910 ALU_NEG:
911 DST = (u32) -DST;
912 CONT;
913 ALU64_NEG:
914 DST = -DST;
915 CONT;
916 ALU_MOV_X:
917 DST = (u32) SRC;
918 CONT;
919 ALU_MOV_K:
920 DST = (u32) IMM;
921 CONT;
922 ALU64_MOV_X:
923 DST = SRC;
924 CONT;
925 ALU64_MOV_K:
926 DST = IMM;
927 CONT;
02ab695b
AS
928 LD_IMM_DW:
929 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
930 insn++;
931 CONT;
f5bffecd
AS
932 ALU64_ARSH_X:
933 (*(s64 *) &DST) >>= SRC;
934 CONT;
935 ALU64_ARSH_K:
936 (*(s64 *) &DST) >>= IMM;
937 CONT;
938 ALU64_MOD_X:
939 if (unlikely(SRC == 0))
940 return 0;
876a7ae6
AS
941 div64_u64_rem(DST, SRC, &tmp);
942 DST = tmp;
f5bffecd
AS
943 CONT;
944 ALU_MOD_X:
945 if (unlikely(SRC == 0))
946 return 0;
947 tmp = (u32) DST;
948 DST = do_div(tmp, (u32) SRC);
949 CONT;
950 ALU64_MOD_K:
876a7ae6
AS
951 div64_u64_rem(DST, IMM, &tmp);
952 DST = tmp;
f5bffecd
AS
953 CONT;
954 ALU_MOD_K:
955 tmp = (u32) DST;
956 DST = do_div(tmp, (u32) IMM);
957 CONT;
958 ALU64_DIV_X:
959 if (unlikely(SRC == 0))
960 return 0;
876a7ae6 961 DST = div64_u64(DST, SRC);
f5bffecd
AS
962 CONT;
963 ALU_DIV_X:
964 if (unlikely(SRC == 0))
965 return 0;
966 tmp = (u32) DST;
967 do_div(tmp, (u32) SRC);
968 DST = (u32) tmp;
969 CONT;
970 ALU64_DIV_K:
876a7ae6 971 DST = div64_u64(DST, IMM);
f5bffecd
AS
972 CONT;
973 ALU_DIV_K:
974 tmp = (u32) DST;
975 do_div(tmp, (u32) IMM);
976 DST = (u32) tmp;
977 CONT;
978 ALU_END_TO_BE:
979 switch (IMM) {
980 case 16:
981 DST = (__force u16) cpu_to_be16(DST);
982 break;
983 case 32:
984 DST = (__force u32) cpu_to_be32(DST);
985 break;
986 case 64:
987 DST = (__force u64) cpu_to_be64(DST);
988 break;
989 }
990 CONT;
991 ALU_END_TO_LE:
992 switch (IMM) {
993 case 16:
994 DST = (__force u16) cpu_to_le16(DST);
995 break;
996 case 32:
997 DST = (__force u32) cpu_to_le32(DST);
998 break;
999 case 64:
1000 DST = (__force u64) cpu_to_le64(DST);
1001 break;
1002 }
1003 CONT;
1004
1005 /* CALL */
1006 JMP_CALL:
1007 /* Function call scratches BPF_R1-BPF_R5 registers,
1008 * preserves BPF_R6-BPF_R9, and stores return value
1009 * into BPF_R0.
1010 */
1011 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1012 BPF_R4, BPF_R5);
1013 CONT;
1014
04fd61ab
AS
1015 JMP_TAIL_CALL: {
1016 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1017 struct bpf_array *array = container_of(map, struct bpf_array, map);
1018 struct bpf_prog *prog;
1019 u64 index = BPF_R3;
1020
1021 if (unlikely(index >= array->map.max_entries))
1022 goto out;
04fd61ab
AS
1023 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1024 goto out;
1025
1026 tail_call_cnt++;
1027
2a36f0b9 1028 prog = READ_ONCE(array->ptrs[index]);
1ca1cc98 1029 if (!prog)
04fd61ab
AS
1030 goto out;
1031
c4675f93
DB
1032 /* ARG1 at this point is guaranteed to point to CTX from
1033 * the verifier side due to the fact that the tail call is
1034 * handeled like a helper, that is, bpf_tail_call_proto,
1035 * where arg1_type is ARG_PTR_TO_CTX.
1036 */
04fd61ab
AS
1037 insn = prog->insnsi;
1038 goto select_insn;
1039out:
1040 CONT;
1041 }
f5bffecd
AS
1042 /* JMP */
1043 JMP_JA:
1044 insn += insn->off;
1045 CONT;
1046 JMP_JEQ_X:
1047 if (DST == SRC) {
1048 insn += insn->off;
1049 CONT_JMP;
1050 }
1051 CONT;
1052 JMP_JEQ_K:
1053 if (DST == IMM) {
1054 insn += insn->off;
1055 CONT_JMP;
1056 }
1057 CONT;
1058 JMP_JNE_X:
1059 if (DST != SRC) {
1060 insn += insn->off;
1061 CONT_JMP;
1062 }
1063 CONT;
1064 JMP_JNE_K:
1065 if (DST != IMM) {
1066 insn += insn->off;
1067 CONT_JMP;
1068 }
1069 CONT;
1070 JMP_JGT_X:
1071 if (DST > SRC) {
1072 insn += insn->off;
1073 CONT_JMP;
1074 }
1075 CONT;
1076 JMP_JGT_K:
1077 if (DST > IMM) {
1078 insn += insn->off;
1079 CONT_JMP;
1080 }
1081 CONT;
1082 JMP_JGE_X:
1083 if (DST >= SRC) {
1084 insn += insn->off;
1085 CONT_JMP;
1086 }
1087 CONT;
1088 JMP_JGE_K:
1089 if (DST >= IMM) {
1090 insn += insn->off;
1091 CONT_JMP;
1092 }
1093 CONT;
1094 JMP_JSGT_X:
1095 if (((s64) DST) > ((s64) SRC)) {
1096 insn += insn->off;
1097 CONT_JMP;
1098 }
1099 CONT;
1100 JMP_JSGT_K:
1101 if (((s64) DST) > ((s64) IMM)) {
1102 insn += insn->off;
1103 CONT_JMP;
1104 }
1105 CONT;
1106 JMP_JSGE_X:
1107 if (((s64) DST) >= ((s64) SRC)) {
1108 insn += insn->off;
1109 CONT_JMP;
1110 }
1111 CONT;
1112 JMP_JSGE_K:
1113 if (((s64) DST) >= ((s64) IMM)) {
1114 insn += insn->off;
1115 CONT_JMP;
1116 }
1117 CONT;
1118 JMP_JSET_X:
1119 if (DST & SRC) {
1120 insn += insn->off;
1121 CONT_JMP;
1122 }
1123 CONT;
1124 JMP_JSET_K:
1125 if (DST & IMM) {
1126 insn += insn->off;
1127 CONT_JMP;
1128 }
1129 CONT;
1130 JMP_EXIT:
1131 return BPF_R0;
1132
1133 /* STX and ST and LDX*/
1134#define LDST(SIZEOP, SIZE) \
1135 STX_MEM_##SIZEOP: \
1136 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1137 CONT; \
1138 ST_MEM_##SIZEOP: \
1139 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1140 CONT; \
1141 LDX_MEM_##SIZEOP: \
1142 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1143 CONT;
1144
1145 LDST(B, u8)
1146 LDST(H, u16)
1147 LDST(W, u32)
1148 LDST(DW, u64)
1149#undef LDST
1150 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1151 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1152 (DST + insn->off));
1153 CONT;
1154 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1155 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1156 (DST + insn->off));
1157 CONT;
1158 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1159 off = IMM;
1160load_word:
96a94cc5
JB
1161 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1162 * appearing in the programs where ctx == skb
1163 * (see may_access_skb() in the verifier). All programs
1164 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1165 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1166 * verifier will check that BPF_R6 == ctx.
f5bffecd
AS
1167 *
1168 * BPF_ABS and BPF_IND are wrappers of function calls,
1169 * so they scratch BPF_R1-BPF_R5 registers, preserve
1170 * BPF_R6-BPF_R9, and store return value into BPF_R0.
1171 *
1172 * Implicit input:
1173 * ctx == skb == BPF_R6 == CTX
1174 *
1175 * Explicit input:
1176 * SRC == any register
1177 * IMM == 32-bit immediate
1178 *
1179 * Output:
1180 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1181 */
1182
1183 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
1184 if (likely(ptr != NULL)) {
1185 BPF_R0 = get_unaligned_be32(ptr);
1186 CONT;
1187 }
1188
1189 return 0;
1190 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1191 off = IMM;
1192load_half:
1193 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
1194 if (likely(ptr != NULL)) {
1195 BPF_R0 = get_unaligned_be16(ptr);
1196 CONT;
1197 }
1198
1199 return 0;
1200 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1201 off = IMM;
1202load_byte:
1203 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
1204 if (likely(ptr != NULL)) {
1205 BPF_R0 = *(u8 *)ptr;
1206 CONT;
1207 }
1208
1209 return 0;
1210 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1211 off = IMM + SRC;
1212 goto load_word;
1213 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1214 off = IMM + SRC;
1215 goto load_half;
1216 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1217 off = IMM + SRC;
1218 goto load_byte;
1219
1220 default_label:
1221 /* If we ever reach this, we have a bug somewhere. */
1222 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
1223 return 0;
1224}
39853cc0 1225STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
f5bffecd 1226
3324b584
DB
1227bool bpf_prog_array_compatible(struct bpf_array *array,
1228 const struct bpf_prog *fp)
04fd61ab 1229{
3324b584
DB
1230 if (!array->owner_prog_type) {
1231 /* There's no owner yet where we could check for
1232 * compatibility.
1233 */
04fd61ab
AS
1234 array->owner_prog_type = fp->type;
1235 array->owner_jited = fp->jited;
3324b584
DB
1236
1237 return true;
04fd61ab 1238 }
3324b584
DB
1239
1240 return array->owner_prog_type == fp->type &&
1241 array->owner_jited == fp->jited;
04fd61ab
AS
1242}
1243
3324b584 1244static int bpf_check_tail_call(const struct bpf_prog *fp)
04fd61ab
AS
1245{
1246 struct bpf_prog_aux *aux = fp->aux;
1247 int i;
1248
1249 for (i = 0; i < aux->used_map_cnt; i++) {
3324b584 1250 struct bpf_map *map = aux->used_maps[i];
04fd61ab 1251 struct bpf_array *array;
04fd61ab 1252
04fd61ab
AS
1253 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1254 continue;
3324b584 1255
04fd61ab
AS
1256 array = container_of(map, struct bpf_array, map);
1257 if (!bpf_prog_array_compatible(array, fp))
1258 return -EINVAL;
1259 }
1260
1261 return 0;
1262}
1263
f5bffecd 1264/**
3324b584 1265 * bpf_prog_select_runtime - select exec runtime for BPF program
7ae457c1 1266 * @fp: bpf_prog populated with internal BPF program
d1c55ab5 1267 * @err: pointer to error variable
f5bffecd 1268 *
3324b584
DB
1269 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1270 * The BPF program will be executed via BPF_PROG_RUN() macro.
f5bffecd 1271 */
d1c55ab5 1272struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
f5bffecd 1273{
7ae457c1 1274 fp->bpf_func = (void *) __bpf_prog_run;
f5bffecd 1275
d1c55ab5
DB
1276 /* eBPF JITs can rewrite the program in case constant
1277 * blinding is active. However, in case of error during
1278 * blinding, bpf_int_jit_compile() must always return a
1279 * valid program, which in this case would simply not
1280 * be JITed, but falls back to the interpreter.
1281 */
1282 fp = bpf_int_jit_compile(fp);
60a3b225 1283 bpf_prog_lock_ro(fp);
04fd61ab 1284
3324b584
DB
1285 /* The tail call compatibility check can only be done at
1286 * this late stage as we need to determine, if we deal
1287 * with JITed or non JITed program concatenations and not
1288 * all eBPF JITs might immediately support all features.
1289 */
d1c55ab5
DB
1290 *err = bpf_check_tail_call(fp);
1291
1292 return fp;
f5bffecd 1293}
7ae457c1 1294EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
f5bffecd 1295
60a3b225
DB
1296static void bpf_prog_free_deferred(struct work_struct *work)
1297{
09756af4 1298 struct bpf_prog_aux *aux;
60a3b225 1299
09756af4
AS
1300 aux = container_of(work, struct bpf_prog_aux, work);
1301 bpf_jit_free(aux->prog);
60a3b225
DB
1302}
1303
1304/* Free internal BPF program */
7ae457c1 1305void bpf_prog_free(struct bpf_prog *fp)
f5bffecd 1306{
09756af4 1307 struct bpf_prog_aux *aux = fp->aux;
60a3b225 1308
09756af4 1309 INIT_WORK(&aux->work, bpf_prog_free_deferred);
09756af4 1310 schedule_work(&aux->work);
f5bffecd 1311}
7ae457c1 1312EXPORT_SYMBOL_GPL(bpf_prog_free);
f89b7755 1313
3ad00405
DB
1314/* RNG for unpriviledged user space with separated state from prandom_u32(). */
1315static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1316
1317void bpf_user_rnd_init_once(void)
1318{
1319 prandom_init_once(&bpf_user_rnd_state);
1320}
1321
f3694e00 1322BPF_CALL_0(bpf_user_rnd_u32)
3ad00405
DB
1323{
1324 /* Should someone ever have the rather unwise idea to use some
1325 * of the registers passed into this function, then note that
1326 * this function is called from native eBPF and classic-to-eBPF
1327 * transformations. Register assignments from both sides are
1328 * different, f.e. classic always sets fn(ctx, A, X) here.
1329 */
1330 struct rnd_state *state;
1331 u32 res;
1332
1333 state = &get_cpu_var(bpf_user_rnd_state);
1334 res = prandom_u32_state(state);
b761fe22 1335 put_cpu_var(bpf_user_rnd_state);
3ad00405
DB
1336
1337 return res;
1338}
1339
3ba67dab
DB
1340/* Weak definitions of helper functions in case we don't have bpf syscall. */
1341const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1342const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1343const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1344
03e69b50 1345const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
c04167ce 1346const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2d0e30c3 1347const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
17ca8cbf 1348const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
bd570ff9 1349
ffeedafb
AS
1350const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1351const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1352const struct bpf_func_proto bpf_get_current_comm_proto __weak;
bd570ff9 1353
0756ea3e
AS
1354const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1355{
1356 return NULL;
1357}
03e69b50 1358
555c8a86
DB
1359u64 __weak
1360bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1361 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
bd570ff9 1362{
555c8a86 1363 return -ENOTSUPP;
bd570ff9
DB
1364}
1365
3324b584
DB
1366/* Always built-in helper functions. */
1367const struct bpf_func_proto bpf_tail_call_proto = {
1368 .func = NULL,
1369 .gpl_only = false,
1370 .ret_type = RET_VOID,
1371 .arg1_type = ARG_PTR_TO_CTX,
1372 .arg2_type = ARG_CONST_MAP_PTR,
1373 .arg3_type = ARG_ANYTHING,
1374};
1375
9383191d
DB
1376/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1377 * It is encouraged to implement bpf_int_jit_compile() instead, so that
1378 * eBPF and implicitly also cBPF can get JITed!
1379 */
d1c55ab5 1380struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
3324b584 1381{
d1c55ab5 1382 return prog;
3324b584
DB
1383}
1384
9383191d
DB
1385/* Stub for JITs that support eBPF. All cBPF code gets transformed into
1386 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1387 */
1388void __weak bpf_jit_compile(struct bpf_prog *prog)
1389{
1390}
1391
17bedab2 1392bool __weak bpf_helper_changes_pkt_data(void *func)
969bf05e
AS
1393{
1394 return false;
1395}
1396
f89b7755
AS
1397/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1398 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1399 */
1400int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1401 int len)
1402{
1403 return -EFAULT;
1404}
a67edbf4
DB
1405
1406/* All definitions of tracepoints related to BPF. */
1407#define CREATE_TRACE_POINTS
1408#include <linux/bpf_trace.h>
1409
1410EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1411
1412EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1413EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);