selftests/bpf: Add unit tests for bpf_arena_alloc/free_pages
authorAlexei Starovoitov <ast@kernel.org>
Fri, 8 Mar 2024 01:08:10 +0000 (17:08 -0800)
committerAndrii Nakryiko <andrii@kernel.org>
Mon, 11 Mar 2024 22:43:43 +0000 (15:43 -0700)
Add unit tests for bpf_arena_alloc/free_pages() functionality
and bpf_arena_common.h with a set of common helpers and macros that
is used in this test and the following patches.

Also modify test_loader that didn't support running bpf_prog_type_syscall
programs.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240308010812.89848-13-alexei.starovoitov@gmail.com
tools/testing/selftests/bpf/DENYLIST.aarch64
tools/testing/selftests/bpf/DENYLIST.s390x
tools/testing/selftests/bpf/bpf_arena_common.h [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/verifier.c
tools/testing/selftests/bpf/progs/verifier_arena.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_loader.c

index 0445ac38bc07de1337d78a5ce63f6875724403c5..f9101651747b553cc5601be90252475488fc7b6d 100644 (file)
@@ -10,3 +10,4 @@ fill_link_info/kprobe_multi_link_info            # bpf_program__attach_kprobe_mu
 fill_link_info/kretprobe_multi_link_info         # bpf_program__attach_kprobe_multi_opts unexpected error: -95
 fill_link_info/kprobe_multi_invalid_ubuff        # bpf_program__attach_kprobe_multi_opts unexpected error: -95
 missed/kprobe_recursion                          # missed_kprobe_recursion__attach unexpected error: -95 (errno 95)
+verifier_arena                                   # JIT does not support arena
index cb810a98e78f48477a0210d3707407bc42fd6380..aa8a620f3318d282df2519e516a63d12e1856686 100644 (file)
@@ -4,3 +4,4 @@ exceptions                               # JIT does not support calling kfunc bpf_throw                                (excepti
 get_stack_raw_tp                         # user_stack corrupted user stack                                             (no backchain userspace)
 stacktrace_build_id                      # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2                   (?)
 verifier_iterating_callbacks
+verifier_arena                           # JIT does not support arena
diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h
new file mode 100644 (file)
index 0000000..bcf195c
--- /dev/null
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#pragma once
+
+#ifndef WRITE_ONCE
+#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
+#endif
+
+#ifndef NUMA_NO_NODE
+#define        NUMA_NO_NODE    (-1)
+#endif
+
+#ifndef arena_container_of
+#define arena_container_of(ptr, type, member)                  \
+       ({                                                      \
+               void __arena *__mptr = (void __arena *)(ptr);   \
+               ((type *)(__mptr - offsetof(type, member)));    \
+       })
+#endif
+
+#ifdef __BPF__ /* when compiled as bpf program */
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE __PAGE_SIZE
+/*
+ * for older kernels try sizeof(struct genradix_node)
+ * or flexible:
+ * static inline long __bpf_page_size(void) {
+ *   return bpf_core_enum_value(enum page_size_enum___l, __PAGE_SIZE___l) ?: sizeof(struct genradix_node);
+ * }
+ * but generated code is not great.
+ */
+#endif
+
+#if defined(__BPF_FEATURE_ARENA_CAST) && !defined(BPF_ARENA_FORCE_ASM)
+#define __arena __attribute__((address_space(1)))
+#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */
+#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */
+#else
+#define __arena
+#define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1)
+#define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0)
+#endif
+
+void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
+                                   int node_id, __u64 flags) __ksym __weak;
+void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
+
+#else /* when compiled as user space code */
+
+#define __arena
+#define __arg_arena
+#define cast_kern(ptr) /* nop for user space */
+#define cast_user(ptr) /* nop for user space */
+__weak char arena[1];
+
+#ifndef offsetof
+#define offsetof(type, member)  ((unsigned long)&((type *)0)->member)
+#endif
+
+static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt,
+                                                 int node_id, __u64 flags)
+{
+       return NULL;
+}
+static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt)
+{
+}
+
+#endif
index 9c6072a197456929fda693d6fea4de527ff49adb..985273832f891c291a308d03d35d1469204f9708 100644 (file)
@@ -4,6 +4,7 @@
 
 #include "cap_helpers.h"
 #include "verifier_and.skel.h"
+#include "verifier_arena.skel.h"
 #include "verifier_array_access.skel.h"
 #include "verifier_basic_stack.skel.h"
 #include "verifier_bitfield_write.skel.h"
@@ -118,6 +119,7 @@ static void run_tests_aux(const char *skel_name,
 #define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes, NULL)
 
 void test_verifier_and(void)                  { RUN(verifier_and); }
+void test_verifier_arena(void)                { RUN(verifier_arena); }
 void test_verifier_basic_stack(void)          { RUN(verifier_basic_stack); }
 void test_verifier_bitfield_write(void)       { RUN(verifier_bitfield_write); }
 void test_verifier_bounds(void)               { RUN(verifier_bounds); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c
new file mode 100644 (file)
index 0000000..5540b05
--- /dev/null
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+#include "bpf_arena_common.h"
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARENA);
+       __uint(map_flags, BPF_F_MMAPABLE);
+       __uint(max_entries, 2); /* arena of two pages close to 32-bit boundary*/
+       __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
+} arena SEC(".maps");
+
+SEC("syscall")
+__success __retval(0)
+int basic_alloc1(void *ctx)
+{
+#if defined(__BPF_FEATURE_ARENA_CAST)
+       volatile int __arena *page1, *page2, *no_page, *page3;
+
+       page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+       if (!page1)
+               return 1;
+       *page1 = 1;
+       page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+       if (!page2)
+               return 2;
+       *page2 = 2;
+       no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+       if (no_page)
+               return 3;
+       if (*page1 != 1)
+               return 4;
+       if (*page2 != 2)
+               return 5;
+       bpf_arena_free_pages(&arena, (void __arena *)page2, 1);
+       if (*page1 != 1)
+               return 6;
+       if (*page2 != 0) /* use-after-free should return 0 */
+               return 7;
+       page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+       if (!page3)
+               return 8;
+       *page3 = 3;
+       if (page2 != page3)
+               return 9;
+       if (*page1 != 1)
+               return 10;
+#endif
+       return 0;
+}
+
+SEC("syscall")
+__success __retval(0)
+int basic_alloc2(void *ctx)
+{
+#if defined(__BPF_FEATURE_ARENA_CAST)
+       volatile char __arena *page1, *page2, *page3, *page4;
+
+       page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0);
+       if (!page1)
+               return 1;
+       page2 = page1 + __PAGE_SIZE;
+       page3 = page1 + __PAGE_SIZE * 2;
+       page4 = page1 - __PAGE_SIZE;
+       *page1 = 1;
+       *page2 = 2;
+       *page3 = 3;
+       *page4 = 4;
+       if (*page1 != 1)
+               return 1;
+       if (*page2 != 2)
+               return 2;
+       if (*page3 != 0)
+               return 3;
+       if (*page4 != 0)
+               return 4;
+       bpf_arena_free_pages(&arena, (void __arena *)page1, 2);
+       if (*page1 != 0)
+               return 5;
+       if (*page2 != 0)
+               return 6;
+       if (*page3 != 0)
+               return 7;
+       if (*page4 != 0)
+               return 8;
+#endif
+       return 0;
+}
+
+struct bpf_arena___l {
+        struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+SEC("syscall")
+__success __retval(0) __log_level(2)
+int basic_alloc3(void *ctx)
+{
+       struct bpf_arena___l *ar = (struct bpf_arena___l *)&arena;
+       volatile char __arena *pages;
+
+       pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0);
+       if (!pages)
+               return 1;
+       return 0;
+}
+
+SEC("iter.s/bpf_map")
+__success __log_level(2)
+int iter_maps1(struct bpf_iter__bpf_map *ctx)
+{
+       struct bpf_map *map = ctx->map;
+
+       if (!map)
+               return 0;
+       bpf_arena_alloc_pages(map, NULL, map->max_entries, 0, 0);
+       return 0;
+}
+
+SEC("iter.s/bpf_map")
+__failure __msg("expected pointer to STRUCT bpf_map")
+int iter_maps2(struct bpf_iter__bpf_map *ctx)
+{
+       struct seq_file *seq = ctx->meta->seq;
+
+       bpf_arena_alloc_pages((void *)seq, NULL, 1, 0, 0);
+       return 0;
+}
+
+SEC("iter.s/bpf_map")
+__failure __msg("untrusted_ptr_bpf_map")
+int iter_maps3(struct bpf_iter__bpf_map *ctx)
+{
+       struct bpf_map *map = ctx->map;
+
+       if (!map)
+               return 0;
+       bpf_arena_alloc_pages(map->inner_map_meta, NULL, map->max_entries, 0, 0);
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
index ba57601c2a4d5b98fdf5513812efab6f40c389c5..524c38e9cde48feb8977bb26555d29df3656c85a 100644 (file)
@@ -501,7 +501,7 @@ static bool is_unpriv_capable_map(struct bpf_map *map)
        }
 }
 
-static int do_prog_test_run(int fd_prog, int *retval)
+static int do_prog_test_run(int fd_prog, int *retval, bool empty_opts)
 {
        __u8 tmp_out[TEST_DATA_LEN << 2] = {};
        __u8 tmp_in[TEST_DATA_LEN] = {};
@@ -514,6 +514,10 @@ static int do_prog_test_run(int fd_prog, int *retval)
                .repeat = 1,
        );
 
+       if (empty_opts) {
+               memset(&topts, 0, sizeof(struct bpf_test_run_opts));
+               topts.sz = sizeof(struct bpf_test_run_opts);
+       }
        err = bpf_prog_test_run_opts(fd_prog, &topts);
        saved_errno = errno;
 
@@ -649,7 +653,8 @@ void run_subtest(struct test_loader *tester,
                        }
                }
 
-               do_prog_test_run(bpf_program__fd(tprog), &retval);
+               do_prog_test_run(bpf_program__fd(tprog), &retval,
+                                bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false);
                if (retval != subspec->retval && subspec->retval != POINTER_VALUE) {
                        PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
                        goto tobj_cleanup;