selftests/bpf: attach struct_ops maps before test prog runs
authorEduard Zingerman <eddyz87@gmail.com>
Thu, 29 Aug 2024 21:08:27 +0000 (14:08 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 30 Aug 2024 01:15:45 +0000 (18:15 -0700)
In test_loader based tests to bpf_map__attach_struct_ops()
before call to bpf_prog_test_run_opts() in order to trigger
bpf_struct_ops->reg() callbacks on kernel side.
This allows to use __retval macro for struct_ops tests.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20240829210833.388152-6-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/test_loader.c

index 4223cffc090e525a33dbf13deb24fd65336c204d..3e9b009580d4e435c689509808b604d54cf7f184 100644 (file)
@@ -890,11 +890,13 @@ void run_subtest(struct test_loader *tester,
 {
        struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
        struct bpf_program *tprog = NULL, *tprog_iter;
+       struct bpf_link *link, *links[32] = {};
        struct test_spec *spec_iter;
        struct cap_state caps = {};
        struct bpf_object *tobj;
        struct bpf_map *map;
        int retval, err, i;
+       int links_cnt = 0;
        bool should_load;
 
        if (!test__start_subtest(subspec->name))
@@ -999,6 +1001,26 @@ void run_subtest(struct test_loader *tester,
                if (restore_capabilities(&caps))
                        goto tobj_cleanup;
 
+               /* Do bpf_map__attach_struct_ops() for each struct_ops map.
+                * This should trigger bpf_struct_ops->reg callback on kernel side.
+                */
+               bpf_object__for_each_map(map, tobj) {
+                       if (!bpf_map__autocreate(map) ||
+                           bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
+                               continue;
+                       if (links_cnt >= ARRAY_SIZE(links)) {
+                               PRINT_FAIL("too many struct_ops maps");
+                               goto tobj_cleanup;
+                       }
+                       link = bpf_map__attach_struct_ops(map);
+                       if (!link) {
+                               PRINT_FAIL("bpf_map__attach_struct_ops failed for map %s: err=%d\n",
+                                          bpf_map__name(map), err);
+                               goto tobj_cleanup;
+                       }
+                       links[links_cnt++] = link;
+               }
+
                if (tester->pre_execution_cb) {
                        err = tester->pre_execution_cb(tobj);
                        if (err) {
@@ -1013,9 +1035,14 @@ void run_subtest(struct test_loader *tester,
                        PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
                        goto tobj_cleanup;
                }
+               /* redo bpf_map__attach_struct_ops for each test */
+               while (links_cnt > 0)
+                       bpf_link__destroy(links[--links_cnt]);
        }
 
 tobj_cleanup:
+       while (links_cnt > 0)
+               bpf_link__destroy(links[--links_cnt]);
        bpf_object__close(tobj);
 subtest_cleanup:
        test__end_subtest();