From 47f4f657acd5d04c78c5c5ac7022cba9ce3b4a7d Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:49:56 -0800 Subject: [PATCH] bpf: make struct_ops_map support btfs other than btf_vmlinux. Once new struct_ops can be registered from modules, btf_vmlinux is no longer the only btf that struct_ops_map would face. st_map should remember what btf it should use to get type information. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-6-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/bpf_struct_ops.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 9774f7824e8b..5ddcca4c4fba 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -46,6 +46,8 @@ struct bpf_struct_ops_map { * "links[]". */ void *image; + /* The owner moduler's btf. */ + struct btf *btf; /* uvalue->data stores the kernel struct * (e.g. tcp_congestion_ops) that is more useful * to userspace than the kvalue. For example, @@ -314,7 +316,7 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) } } -static int check_zero_holes(const struct btf_type *t, void *data) +static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data) { const struct btf_member *member; u32 i, moff, msize, prev_mend = 0; @@ -326,8 +328,8 @@ static int check_zero_holes(const struct btf_type *t, void *data) memchr_inv(data + prev_mend, 0, moff - prev_mend)) return -EINVAL; - mtype = btf_type_by_id(btf_vmlinux, member->type); - mtype = btf_resolve_size(btf_vmlinux, mtype, &msize); + mtype = btf_type_by_id(btf, member->type); + mtype = btf_resolve_size(btf, mtype, &msize); if (IS_ERR(mtype)) return PTR_ERR(mtype); prev_mend = moff + msize; @@ -401,12 +403,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (*(u32 *)key != 0) return -E2BIG; - err = check_zero_holes(st_ops_desc->value_type, value); + err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value); if (err) return err; uvalue = value; - err = check_zero_holes(t, uvalue->data); + err = check_zero_holes(st_map->btf, t, uvalue->data); if (err) return err; @@ -442,7 +444,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, u32 moff; moff = __btf_member_bit_offset(t, member) / 8; - ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL); + ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL); if (ptype == module_type) { if (*(void **)(udata + moff)) goto reset_unlock; @@ -467,8 +469,8 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (!ptype || !btf_type_is_func_proto(ptype)) { u32 msize; - mtype = btf_type_by_id(btf_vmlinux, member->type); - mtype = btf_resolve_size(btf_vmlinux, mtype, &msize); + mtype = btf_type_by_id(st_map->btf, member->type); + mtype = btf_resolve_size(st_map->btf, mtype, &msize); if (IS_ERR(mtype)) { err = PTR_ERR(mtype); goto reset_unlock; @@ -607,6 +609,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, struct seq_file *m) { + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; void *value; int err; @@ -616,7 +619,8 @@ static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); if (!err) { - btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id, + btf_type_seq_show(st_map->btf, + map->btf_vmlinux_value_type_id, value, m); seq_puts(m, "\n"); } @@ -726,6 +730,8 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) return ERR_PTR(-ENOMEM); } + st_map->btf = btf_vmlinux; + mutex_init(&st_map->lock); bpf_map_init_from_attr(map, attr); -- 2.25.1