veristat: Load struct_ops programs only once
authorEduard Zingerman <eddyz87@gmail.com>
Wed, 15 Jan 2025 22:38:35 +0000 (14:38 -0800)
committerAndrii Nakryiko <andrii@kernel.org>
Thu, 16 Jan 2025 23:33:58 +0000 (15:33 -0800)
libbpf automatically adjusts autoload for struct_ops programs,
see libbpf.c:bpf_object_adjust_struct_ops_autoload.

For example, if there is a map:

    SEC(".struct_ops.link")
    struct sched_ext_ops ops = {
     .enqueue = foo,
        .tick = bar,
    };

Both 'foo' and 'bar' would be loaded if 'ops' autocreate is true,
both 'foo' and 'bar' would be skipped if 'ops' autocreate is false.

This means that when veristat processes object file with 'ops',
it would load 4 programs in total: two programs per each
'process_prog' call.

The adjustment occurs at object load time, and libbpf remembers
association between 'ops' and 'foo'/'bar' at object open time.
The only way to persuade libbpf to load one of two is to adjust map
initial value, such that only one program is referenced.
This patch does exactly that, significantly reducing time to process
object files with big number of struct_ops programs.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20250115223835.919989-1-eddyz87@gmail.com
tools/testing/selftests/bpf/veristat.c

index c72111dfb35d7080ff22823a2185c926b2c1de7a..06af5029885b9df06118d767d07bbc1a1ea47e5a 100644 (file)
@@ -1062,6 +1062,41 @@ static int guess_prog_type_by_ctx_name(const char *ctx_name,
        return -ESRCH;
 }
 
+/* Make sure only target program is referenced from struct_ops map,
+ * otherwise libbpf would automatically set autocreate for all
+ * referenced programs.
+ * See libbpf.c:bpf_object_adjust_struct_ops_autoload.
+ */
+static void mask_unrelated_struct_ops_progs(struct bpf_object *obj,
+                                           struct bpf_map *map,
+                                           struct bpf_program *prog)
+{
+       struct btf *btf = bpf_object__btf(obj);
+       const struct btf_type *t, *mt;
+       struct btf_member *m;
+       int i, moff;
+       size_t data_sz, ptr_sz = sizeof(void *);
+       void *data;
+
+       t = btf__type_by_id(btf, bpf_map__btf_value_type_id(map));
+       if (!btf_is_struct(t))
+               return;
+
+       data = bpf_map__initial_value(map, &data_sz);
+       for (i = 0; i < btf_vlen(t); i++) {
+               m = &btf_members(t)[i];
+               mt = btf__type_by_id(btf, m->type);
+               if (!btf_is_ptr(mt))
+                       continue;
+               moff = m->offset / 8;
+               if (moff + ptr_sz > data_sz)
+                       continue;
+               if (memcmp(data + moff, &prog, ptr_sz) == 0)
+                       continue;
+               memset(data + moff, 0, ptr_sz);
+       }
+}
+
 static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const char *filename)
 {
        struct bpf_map *map;
@@ -1077,6 +1112,9 @@ static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const ch
                case BPF_MAP_TYPE_INODE_STORAGE:
                case BPF_MAP_TYPE_CGROUP_STORAGE:
                        break;
+               case BPF_MAP_TYPE_STRUCT_OPS:
+                       mask_unrelated_struct_ops_progs(obj, map, prog);
+                       break;
                default:
                        if (bpf_map__max_entries(map) == 0)
                                bpf_map__set_max_entries(map, 1);