Commit | Line | Data |
---|---|---|
5b497af4 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
d0003ec0 | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
d0003ec0 AS |
3 | */ |
4 | #include <linux/bpf.h> | |
3bd916ee | 5 | #include <linux/btf.h> |
aef2feda | 6 | #include <linux/bpf-cgroup.h> |
fda01efc | 7 | #include <linux/cgroup.h> |
d0003ec0 | 8 | #include <linux/rcupdate.h> |
03e69b50 | 9 | #include <linux/random.h> |
c04167ce | 10 | #include <linux/smp.h> |
2d0e30c3 | 11 | #include <linux/topology.h> |
17ca8cbf | 12 | #include <linux/ktime.h> |
ffeedafb AS |
13 | #include <linux/sched.h> |
14 | #include <linux/uidgid.h> | |
f3694e00 | 15 | #include <linux/filter.h> |
d7a4cb9b | 16 | #include <linux/ctype.h> |
5576b991 | 17 | #include <linux/jiffies.h> |
b4490c5c | 18 | #include <linux/pid_namespace.h> |
47e34cb7 | 19 | #include <linux/poison.h> |
b4490c5c | 20 | #include <linux/proc_ns.h> |
d02c48fa | 21 | #include <linux/sched/task.h> |
ff40e510 | 22 | #include <linux/security.h> |
376040e4 | 23 | #include <linux/btf_ids.h> |
958cf2e2 | 24 | #include <linux/bpf_mem_alloc.h> |
ec5290a1 | 25 | #include <linux/kasan.h> |
d7a4cb9b AI |
26 | |
27 | #include "../../lib/kstrtox.h" | |
d0003ec0 AS |
28 | |
29 | /* If kernel subsystem is allowing eBPF programs to call this function, | |
30 | * inside its own verifier_ops->get_func_proto() callback it should return | |
31 | * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments | |
32 | * | |
33 | * Different map implementations will rely on rcu in map methods | |
34 | * lookup/update/delete, therefore eBPF programs must run under rcu lock | |
35 | * if program is allowed to access maps, so check rcu_read_lock_held in | |
36 | * all three functions. | |
37 | */ | |
f3694e00 | 38 | BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) |
d0003ec0 | 39 | { |
694cea39 | 40 | WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); |
f3694e00 | 41 | return (unsigned long) map->ops->map_lookup_elem(map, key); |
d0003ec0 AS |
42 | } |
43 | ||
a2c83fff | 44 | const struct bpf_func_proto bpf_map_lookup_elem_proto = { |
3324b584 DB |
45 | .func = bpf_map_lookup_elem, |
46 | .gpl_only = false, | |
36bbef52 | 47 | .pkt_access = true, |
3324b584 DB |
48 | .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, |
49 | .arg1_type = ARG_CONST_MAP_PTR, | |
50 | .arg2_type = ARG_PTR_TO_MAP_KEY, | |
d0003ec0 AS |
51 | }; |
52 | ||
f3694e00 DB |
53 | BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, |
54 | void *, value, u64, flags) | |
d0003ec0 | 55 | { |
694cea39 | 56 | WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); |
f3694e00 | 57 | return map->ops->map_update_elem(map, key, value, flags); |
d0003ec0 AS |
58 | } |
59 | ||
a2c83fff | 60 | const struct bpf_func_proto bpf_map_update_elem_proto = { |
3324b584 DB |
61 | .func = bpf_map_update_elem, |
62 | .gpl_only = false, | |
36bbef52 | 63 | .pkt_access = true, |
3324b584 DB |
64 | .ret_type = RET_INTEGER, |
65 | .arg1_type = ARG_CONST_MAP_PTR, | |
66 | .arg2_type = ARG_PTR_TO_MAP_KEY, | |
67 | .arg3_type = ARG_PTR_TO_MAP_VALUE, | |
68 | .arg4_type = ARG_ANYTHING, | |
d0003ec0 AS |
69 | }; |
70 | ||
f3694e00 | 71 | BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) |
d0003ec0 | 72 | { |
694cea39 | 73 | WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); |
d0003ec0 AS |
74 | return map->ops->map_delete_elem(map, key); |
75 | } | |
76 | ||
a2c83fff | 77 | const struct bpf_func_proto bpf_map_delete_elem_proto = { |
3324b584 DB |
78 | .func = bpf_map_delete_elem, |
79 | .gpl_only = false, | |
36bbef52 | 80 | .pkt_access = true, |
3324b584 DB |
81 | .ret_type = RET_INTEGER, |
82 | .arg1_type = ARG_CONST_MAP_PTR, | |
83 | .arg2_type = ARG_PTR_TO_MAP_KEY, | |
d0003ec0 | 84 | }; |
03e69b50 | 85 | |
f1a2e44a MV |
86 | BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) |
87 | { | |
88 | return map->ops->map_push_elem(map, value, flags); | |
89 | } | |
90 | ||
91 | const struct bpf_func_proto bpf_map_push_elem_proto = { | |
92 | .func = bpf_map_push_elem, | |
93 | .gpl_only = false, | |
94 | .pkt_access = true, | |
95 | .ret_type = RET_INTEGER, | |
96 | .arg1_type = ARG_CONST_MAP_PTR, | |
97 | .arg2_type = ARG_PTR_TO_MAP_VALUE, | |
98 | .arg3_type = ARG_ANYTHING, | |
99 | }; | |
100 | ||
101 | BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) | |
102 | { | |
103 | return map->ops->map_pop_elem(map, value); | |
104 | } | |
105 | ||
106 | const struct bpf_func_proto bpf_map_pop_elem_proto = { | |
107 | .func = bpf_map_pop_elem, | |
108 | .gpl_only = false, | |
f1a2e44a MV |
109 | .ret_type = RET_INTEGER, |
110 | .arg1_type = ARG_CONST_MAP_PTR, | |
16d1e00c | 111 | .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, |
f1a2e44a MV |
112 | }; |
113 | ||
114 | BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) | |
115 | { | |
116 | return map->ops->map_peek_elem(map, value); | |
117 | } | |
118 | ||
119 | const struct bpf_func_proto bpf_map_peek_elem_proto = { | |
301a33d5 | 120 | .func = bpf_map_peek_elem, |
f1a2e44a | 121 | .gpl_only = false, |
f1a2e44a MV |
122 | .ret_type = RET_INTEGER, |
123 | .arg1_type = ARG_CONST_MAP_PTR, | |
16d1e00c | 124 | .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, |
f1a2e44a MV |
125 | }; |
126 | ||
07343110 FZ |
127 | BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) |
128 | { | |
129 | WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); | |
130 | return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); | |
131 | } | |
132 | ||
133 | const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { | |
134 | .func = bpf_map_lookup_percpu_elem, | |
135 | .gpl_only = false, | |
136 | .pkt_access = true, | |
137 | .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, | |
138 | .arg1_type = ARG_CONST_MAP_PTR, | |
139 | .arg2_type = ARG_PTR_TO_MAP_KEY, | |
140 | .arg3_type = ARG_ANYTHING, | |
141 | }; | |
142 | ||
03e69b50 | 143 | const struct bpf_func_proto bpf_get_prandom_u32_proto = { |
3ad00405 | 144 | .func = bpf_user_rnd_u32, |
03e69b50 DB |
145 | .gpl_only = false, |
146 | .ret_type = RET_INTEGER, | |
147 | }; | |
c04167ce | 148 | |
f3694e00 | 149 | BPF_CALL_0(bpf_get_smp_processor_id) |
c04167ce | 150 | { |
80b48c44 | 151 | return smp_processor_id(); |
c04167ce DB |
152 | } |
153 | ||
154 | const struct bpf_func_proto bpf_get_smp_processor_id_proto = { | |
155 | .func = bpf_get_smp_processor_id, | |
156 | .gpl_only = false, | |
157 | .ret_type = RET_INTEGER, | |
158 | }; | |
17ca8cbf | 159 | |
2d0e30c3 DB |
160 | BPF_CALL_0(bpf_get_numa_node_id) |
161 | { | |
162 | return numa_node_id(); | |
163 | } | |
164 | ||
165 | const struct bpf_func_proto bpf_get_numa_node_id_proto = { | |
166 | .func = bpf_get_numa_node_id, | |
167 | .gpl_only = false, | |
168 | .ret_type = RET_INTEGER, | |
169 | }; | |
170 | ||
f3694e00 | 171 | BPF_CALL_0(bpf_ktime_get_ns) |
17ca8cbf DB |
172 | { |
173 | /* NMI safe access to clock monotonic */ | |
174 | return ktime_get_mono_fast_ns(); | |
175 | } | |
176 | ||
177 | const struct bpf_func_proto bpf_ktime_get_ns_proto = { | |
178 | .func = bpf_ktime_get_ns, | |
082b57e3 | 179 | .gpl_only = false, |
17ca8cbf DB |
180 | .ret_type = RET_INTEGER, |
181 | }; | |
ffeedafb | 182 | |
71d19214 MŻ |
183 | BPF_CALL_0(bpf_ktime_get_boot_ns) |
184 | { | |
185 | /* NMI safe access to clock boottime */ | |
186 | return ktime_get_boot_fast_ns(); | |
187 | } | |
188 | ||
189 | const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { | |
190 | .func = bpf_ktime_get_boot_ns, | |
191 | .gpl_only = false, | |
192 | .ret_type = RET_INTEGER, | |
193 | }; | |
194 | ||
d0551261 DB |
195 | BPF_CALL_0(bpf_ktime_get_coarse_ns) |
196 | { | |
197 | return ktime_get_coarse_ns(); | |
198 | } | |
199 | ||
200 | const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { | |
201 | .func = bpf_ktime_get_coarse_ns, | |
202 | .gpl_only = false, | |
203 | .ret_type = RET_INTEGER, | |
204 | }; | |
205 | ||
c8996c98 JDB |
206 | BPF_CALL_0(bpf_ktime_get_tai_ns) |
207 | { | |
208 | /* NMI safe access to clock tai */ | |
209 | return ktime_get_tai_fast_ns(); | |
210 | } | |
211 | ||
212 | const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = { | |
213 | .func = bpf_ktime_get_tai_ns, | |
214 | .gpl_only = false, | |
215 | .ret_type = RET_INTEGER, | |
216 | }; | |
217 | ||
f3694e00 | 218 | BPF_CALL_0(bpf_get_current_pid_tgid) |
ffeedafb AS |
219 | { |
220 | struct task_struct *task = current; | |
221 | ||
6088b582 | 222 | if (unlikely(!task)) |
ffeedafb AS |
223 | return -EINVAL; |
224 | ||
225 | return (u64) task->tgid << 32 | task->pid; | |
226 | } | |
227 | ||
228 | const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { | |
229 | .func = bpf_get_current_pid_tgid, | |
230 | .gpl_only = false, | |
231 | .ret_type = RET_INTEGER, | |
232 | }; | |
233 | ||
f3694e00 | 234 | BPF_CALL_0(bpf_get_current_uid_gid) |
ffeedafb AS |
235 | { |
236 | struct task_struct *task = current; | |
237 | kuid_t uid; | |
238 | kgid_t gid; | |
239 | ||
6088b582 | 240 | if (unlikely(!task)) |
ffeedafb AS |
241 | return -EINVAL; |
242 | ||
243 | current_uid_gid(&uid, &gid); | |
244 | return (u64) from_kgid(&init_user_ns, gid) << 32 | | |
6088b582 | 245 | from_kuid(&init_user_ns, uid); |
ffeedafb AS |
246 | } |
247 | ||
248 | const struct bpf_func_proto bpf_get_current_uid_gid_proto = { | |
249 | .func = bpf_get_current_uid_gid, | |
250 | .gpl_only = false, | |
251 | .ret_type = RET_INTEGER, | |
252 | }; | |
253 | ||
f3694e00 | 254 | BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) |
ffeedafb AS |
255 | { |
256 | struct task_struct *task = current; | |
ffeedafb | 257 | |
074f528e DB |
258 | if (unlikely(!task)) |
259 | goto err_clear; | |
ffeedafb | 260 | |
03b9c7fa | 261 | /* Verifier guarantees that size > 0 */ |
f3f21349 | 262 | strscpy_pad(buf, task->comm, size); |
ffeedafb | 263 | return 0; |
074f528e DB |
264 | err_clear: |
265 | memset(buf, 0, size); | |
266 | return -EINVAL; | |
ffeedafb AS |
267 | } |
268 | ||
269 | const struct bpf_func_proto bpf_get_current_comm_proto = { | |
270 | .func = bpf_get_current_comm, | |
271 | .gpl_only = false, | |
272 | .ret_type = RET_INTEGER, | |
39f19ebb AS |
273 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
274 | .arg2_type = ARG_CONST_SIZE, | |
ffeedafb | 275 | }; |
bf6fa2c8 | 276 | |
d83525ca AS |
277 | #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) |
278 | ||
279 | static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) | |
280 | { | |
281 | arch_spinlock_t *l = (void *)lock; | |
282 | union { | |
283 | __u32 val; | |
284 | arch_spinlock_t lock; | |
285 | } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; | |
286 | ||
287 | compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); | |
288 | BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); | |
289 | BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); | |
5861d1e8 | 290 | preempt_disable(); |
d83525ca AS |
291 | arch_spin_lock(l); |
292 | } | |
293 | ||
294 | static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) | |
295 | { | |
296 | arch_spinlock_t *l = (void *)lock; | |
297 | ||
298 | arch_spin_unlock(l); | |
5861d1e8 | 299 | preempt_enable(); |
d83525ca AS |
300 | } |
301 | ||
302 | #else | |
303 | ||
304 | static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) | |
305 | { | |
306 | atomic_t *l = (void *)lock; | |
307 | ||
308 | BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); | |
309 | do { | |
310 | atomic_cond_read_relaxed(l, !VAL); | |
311 | } while (atomic_xchg(l, 1)); | |
312 | } | |
313 | ||
314 | static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) | |
315 | { | |
316 | atomic_t *l = (void *)lock; | |
317 | ||
318 | atomic_set_release(l, 0); | |
319 | } | |
320 | ||
321 | #endif | |
322 | ||
323 | static DEFINE_PER_CPU(unsigned long, irqsave_flags); | |
324 | ||
c1b3fed3 | 325 | static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) |
d83525ca AS |
326 | { |
327 | unsigned long flags; | |
328 | ||
329 | local_irq_save(flags); | |
330 | __bpf_spin_lock(lock); | |
331 | __this_cpu_write(irqsave_flags, flags); | |
c1b3fed3 AS |
332 | } |
333 | ||
334 | notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) | |
335 | { | |
336 | __bpf_spin_lock_irqsave(lock); | |
d83525ca AS |
337 | return 0; |
338 | } | |
339 | ||
340 | const struct bpf_func_proto bpf_spin_lock_proto = { | |
341 | .func = bpf_spin_lock, | |
342 | .gpl_only = false, | |
343 | .ret_type = RET_VOID, | |
344 | .arg1_type = ARG_PTR_TO_SPIN_LOCK, | |
4e814da0 | 345 | .arg1_btf_id = BPF_PTR_POISON, |
d83525ca AS |
346 | }; |
347 | ||
c1b3fed3 | 348 | static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) |
d83525ca AS |
349 | { |
350 | unsigned long flags; | |
351 | ||
352 | flags = __this_cpu_read(irqsave_flags); | |
353 | __bpf_spin_unlock(lock); | |
354 | local_irq_restore(flags); | |
c1b3fed3 AS |
355 | } |
356 | ||
357 | notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) | |
358 | { | |
359 | __bpf_spin_unlock_irqrestore(lock); | |
d83525ca AS |
360 | return 0; |
361 | } | |
362 | ||
363 | const struct bpf_func_proto bpf_spin_unlock_proto = { | |
364 | .func = bpf_spin_unlock, | |
365 | .gpl_only = false, | |
366 | .ret_type = RET_VOID, | |
367 | .arg1_type = ARG_PTR_TO_SPIN_LOCK, | |
4e814da0 | 368 | .arg1_btf_id = BPF_PTR_POISON, |
d83525ca AS |
369 | }; |
370 | ||
96049f3a AS |
371 | void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, |
372 | bool lock_src) | |
373 | { | |
374 | struct bpf_spin_lock *lock; | |
375 | ||
376 | if (lock_src) | |
db559117 | 377 | lock = src + map->record->spin_lock_off; |
96049f3a | 378 | else |
db559117 | 379 | lock = dst + map->record->spin_lock_off; |
96049f3a | 380 | preempt_disable(); |
c1b3fed3 | 381 | __bpf_spin_lock_irqsave(lock); |
96049f3a | 382 | copy_map_value(map, dst, src); |
c1b3fed3 | 383 | __bpf_spin_unlock_irqrestore(lock); |
96049f3a AS |
384 | preempt_enable(); |
385 | } | |
386 | ||
5576b991 MKL |
387 | BPF_CALL_0(bpf_jiffies64) |
388 | { | |
389 | return get_jiffies_64(); | |
390 | } | |
391 | ||
392 | const struct bpf_func_proto bpf_jiffies64_proto = { | |
393 | .func = bpf_jiffies64, | |
394 | .gpl_only = false, | |
395 | .ret_type = RET_INTEGER, | |
396 | }; | |
397 | ||
bf6fa2c8 YS |
398 | #ifdef CONFIG_CGROUPS |
399 | BPF_CALL_0(bpf_get_current_cgroup_id) | |
400 | { | |
2d3a1e36 YS |
401 | struct cgroup *cgrp; |
402 | u64 cgrp_id; | |
bf6fa2c8 | 403 | |
2d3a1e36 YS |
404 | rcu_read_lock(); |
405 | cgrp = task_dfl_cgroup(current); | |
406 | cgrp_id = cgroup_id(cgrp); | |
407 | rcu_read_unlock(); | |
408 | ||
409 | return cgrp_id; | |
bf6fa2c8 YS |
410 | } |
411 | ||
412 | const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { | |
413 | .func = bpf_get_current_cgroup_id, | |
414 | .gpl_only = false, | |
415 | .ret_type = RET_INTEGER, | |
416 | }; | |
cd339431 | 417 | |
0f09abd1 DB |
418 | BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) |
419 | { | |
2d3a1e36 | 420 | struct cgroup *cgrp; |
0f09abd1 | 421 | struct cgroup *ancestor; |
2d3a1e36 | 422 | u64 cgrp_id; |
0f09abd1 | 423 | |
2d3a1e36 YS |
424 | rcu_read_lock(); |
425 | cgrp = task_dfl_cgroup(current); | |
0f09abd1 | 426 | ancestor = cgroup_ancestor(cgrp, ancestor_level); |
2d3a1e36 YS |
427 | cgrp_id = ancestor ? cgroup_id(ancestor) : 0; |
428 | rcu_read_unlock(); | |
429 | ||
430 | return cgrp_id; | |
0f09abd1 DB |
431 | } |
432 | ||
433 | const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { | |
434 | .func = bpf_get_current_ancestor_cgroup_id, | |
435 | .gpl_only = false, | |
436 | .ret_type = RET_INTEGER, | |
437 | .arg1_type = ARG_ANYTHING, | |
438 | }; | |
8a67f2de | 439 | #endif /* CONFIG_CGROUPS */ |
0f09abd1 | 440 | |
d7a4cb9b AI |
441 | #define BPF_STRTOX_BASE_MASK 0x1F |
442 | ||
443 | static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, | |
444 | unsigned long long *res, bool *is_negative) | |
445 | { | |
446 | unsigned int base = flags & BPF_STRTOX_BASE_MASK; | |
447 | const char *cur_buf = buf; | |
448 | size_t cur_len = buf_len; | |
449 | unsigned int consumed; | |
450 | size_t val_len; | |
451 | char str[64]; | |
452 | ||
453 | if (!buf || !buf_len || !res || !is_negative) | |
454 | return -EINVAL; | |
455 | ||
456 | if (base != 0 && base != 8 && base != 10 && base != 16) | |
457 | return -EINVAL; | |
458 | ||
459 | if (flags & ~BPF_STRTOX_BASE_MASK) | |
460 | return -EINVAL; | |
461 | ||
462 | while (cur_buf < buf + buf_len && isspace(*cur_buf)) | |
463 | ++cur_buf; | |
464 | ||
465 | *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); | |
466 | if (*is_negative) | |
467 | ++cur_buf; | |
468 | ||
469 | consumed = cur_buf - buf; | |
470 | cur_len -= consumed; | |
471 | if (!cur_len) | |
472 | return -EINVAL; | |
473 | ||
474 | cur_len = min(cur_len, sizeof(str) - 1); | |
475 | memcpy(str, cur_buf, cur_len); | |
476 | str[cur_len] = '\0'; | |
477 | cur_buf = str; | |
478 | ||
479 | cur_buf = _parse_integer_fixup_radix(cur_buf, &base); | |
480 | val_len = _parse_integer(cur_buf, base, res); | |
481 | ||
482 | if (val_len & KSTRTOX_OVERFLOW) | |
483 | return -ERANGE; | |
484 | ||
485 | if (val_len == 0) | |
486 | return -EINVAL; | |
487 | ||
488 | cur_buf += val_len; | |
489 | consumed += cur_buf - str; | |
490 | ||
491 | return consumed; | |
492 | } | |
493 | ||
494 | static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, | |
495 | long long *res) | |
496 | { | |
497 | unsigned long long _res; | |
498 | bool is_negative; | |
499 | int err; | |
500 | ||
501 | err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); | |
502 | if (err < 0) | |
503 | return err; | |
504 | if (is_negative) { | |
505 | if ((long long)-_res > 0) | |
506 | return -ERANGE; | |
507 | *res = -_res; | |
508 | } else { | |
509 | if ((long long)_res < 0) | |
510 | return -ERANGE; | |
511 | *res = _res; | |
512 | } | |
513 | return err; | |
514 | } | |
515 | ||
516 | BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, | |
517 | long *, res) | |
518 | { | |
519 | long long _res; | |
520 | int err; | |
521 | ||
522 | err = __bpf_strtoll(buf, buf_len, flags, &_res); | |
523 | if (err < 0) | |
524 | return err; | |
525 | if (_res != (long)_res) | |
526 | return -ERANGE; | |
527 | *res = _res; | |
528 | return err; | |
529 | } | |
530 | ||
531 | const struct bpf_func_proto bpf_strtol_proto = { | |
532 | .func = bpf_strtol, | |
533 | .gpl_only = false, | |
534 | .ret_type = RET_INTEGER, | |
216e3cd2 | 535 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
d7a4cb9b AI |
536 | .arg2_type = ARG_CONST_SIZE, |
537 | .arg3_type = ARG_ANYTHING, | |
538 | .arg4_type = ARG_PTR_TO_LONG, | |
539 | }; | |
540 | ||
541 | BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, | |
542 | unsigned long *, res) | |
543 | { | |
544 | unsigned long long _res; | |
545 | bool is_negative; | |
546 | int err; | |
547 | ||
548 | err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); | |
549 | if (err < 0) | |
550 | return err; | |
551 | if (is_negative) | |
552 | return -EINVAL; | |
553 | if (_res != (unsigned long)_res) | |
554 | return -ERANGE; | |
555 | *res = _res; | |
556 | return err; | |
557 | } | |
558 | ||
559 | const struct bpf_func_proto bpf_strtoul_proto = { | |
560 | .func = bpf_strtoul, | |
561 | .gpl_only = false, | |
562 | .ret_type = RET_INTEGER, | |
216e3cd2 | 563 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
d7a4cb9b AI |
564 | .arg2_type = ARG_CONST_SIZE, |
565 | .arg3_type = ARG_ANYTHING, | |
566 | .arg4_type = ARG_PTR_TO_LONG, | |
567 | }; | |
b4490c5c | 568 | |
c5fb1993 HT |
569 | BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) |
570 | { | |
571 | return strncmp(s1, s2, s1_sz); | |
572 | } | |
573 | ||
dc368e1c | 574 | static const struct bpf_func_proto bpf_strncmp_proto = { |
c5fb1993 HT |
575 | .func = bpf_strncmp, |
576 | .gpl_only = false, | |
577 | .ret_type = RET_INTEGER, | |
c9267aa8 | 578 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
c5fb1993 HT |
579 | .arg2_type = ARG_CONST_SIZE, |
580 | .arg3_type = ARG_PTR_TO_CONST_STR, | |
581 | }; | |
582 | ||
b4490c5c CN |
583 | BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, |
584 | struct bpf_pidns_info *, nsdata, u32, size) | |
585 | { | |
586 | struct task_struct *task = current; | |
587 | struct pid_namespace *pidns; | |
588 | int err = -EINVAL; | |
589 | ||
590 | if (unlikely(size != sizeof(struct bpf_pidns_info))) | |
591 | goto clear; | |
592 | ||
593 | if (unlikely((u64)(dev_t)dev != dev)) | |
594 | goto clear; | |
595 | ||
596 | if (unlikely(!task)) | |
597 | goto clear; | |
598 | ||
599 | pidns = task_active_pid_ns(task); | |
600 | if (unlikely(!pidns)) { | |
601 | err = -ENOENT; | |
602 | goto clear; | |
603 | } | |
604 | ||
605 | if (!ns_match(&pidns->ns, (dev_t)dev, ino)) | |
606 | goto clear; | |
607 | ||
608 | nsdata->pid = task_pid_nr_ns(task, pidns); | |
609 | nsdata->tgid = task_tgid_nr_ns(task, pidns); | |
610 | return 0; | |
611 | clear: | |
612 | memset((void *)nsdata, 0, (size_t) size); | |
613 | return err; | |
614 | } | |
615 | ||
616 | const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { | |
617 | .func = bpf_get_ns_current_pid_tgid, | |
618 | .gpl_only = false, | |
619 | .ret_type = RET_INTEGER, | |
620 | .arg1_type = ARG_ANYTHING, | |
621 | .arg2_type = ARG_ANYTHING, | |
622 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
623 | .arg4_type = ARG_CONST_SIZE, | |
624 | }; | |
6890896b SF |
625 | |
626 | static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { | |
627 | .func = bpf_get_raw_cpu_id, | |
628 | .gpl_only = false, | |
629 | .ret_type = RET_INTEGER, | |
630 | }; | |
631 | ||
632 | BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, | |
633 | u64, flags, void *, data, u64, size) | |
634 | { | |
635 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) | |
636 | return -EINVAL; | |
637 | ||
638 | return bpf_event_output(map, flags, data, size, NULL, 0, NULL); | |
639 | } | |
640 | ||
641 | const struct bpf_func_proto bpf_event_output_data_proto = { | |
642 | .func = bpf_event_output_data, | |
643 | .gpl_only = true, | |
644 | .ret_type = RET_INTEGER, | |
645 | .arg1_type = ARG_PTR_TO_CTX, | |
646 | .arg2_type = ARG_CONST_MAP_PTR, | |
647 | .arg3_type = ARG_ANYTHING, | |
216e3cd2 | 648 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
6890896b SF |
649 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
650 | }; | |
651 | ||
07be4c4a AS |
652 | BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, |
653 | const void __user *, user_ptr) | |
654 | { | |
655 | int ret = copy_from_user(dst, user_ptr, size); | |
656 | ||
657 | if (unlikely(ret)) { | |
658 | memset(dst, 0, size); | |
659 | ret = -EFAULT; | |
660 | } | |
661 | ||
662 | return ret; | |
663 | } | |
664 | ||
665 | const struct bpf_func_proto bpf_copy_from_user_proto = { | |
666 | .func = bpf_copy_from_user, | |
667 | .gpl_only = false, | |
01685c5b | 668 | .might_sleep = true, |
07be4c4a AS |
669 | .ret_type = RET_INTEGER, |
670 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
671 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
672 | .arg3_type = ARG_ANYTHING, | |
673 | }; | |
674 | ||
376040e4 KY |
675 | BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, |
676 | const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) | |
677 | { | |
678 | int ret; | |
679 | ||
680 | /* flags is not used yet */ | |
681 | if (unlikely(flags)) | |
682 | return -EINVAL; | |
683 | ||
684 | if (unlikely(!size)) | |
685 | return 0; | |
686 | ||
687 | ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); | |
688 | if (ret == size) | |
689 | return 0; | |
690 | ||
691 | memset(dst, 0, size); | |
692 | /* Return -EFAULT for partial read */ | |
693 | return ret < 0 ? ret : -EFAULT; | |
694 | } | |
695 | ||
696 | const struct bpf_func_proto bpf_copy_from_user_task_proto = { | |
697 | .func = bpf_copy_from_user_task, | |
0407a65f | 698 | .gpl_only = true, |
01685c5b | 699 | .might_sleep = true, |
376040e4 KY |
700 | .ret_type = RET_INTEGER, |
701 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
702 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
703 | .arg3_type = ARG_ANYTHING, | |
704 | .arg4_type = ARG_PTR_TO_BTF_ID, | |
705 | .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], | |
706 | .arg5_type = ARG_ANYTHING | |
707 | }; | |
708 | ||
eaa6bcb7 HL |
709 | BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) |
710 | { | |
711 | if (cpu >= nr_cpu_ids) | |
712 | return (unsigned long)NULL; | |
713 | ||
714 | return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); | |
715 | } | |
716 | ||
717 | const struct bpf_func_proto bpf_per_cpu_ptr_proto = { | |
718 | .func = bpf_per_cpu_ptr, | |
719 | .gpl_only = false, | |
34d3a78c | 720 | .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, |
eaa6bcb7 HL |
721 | .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, |
722 | .arg2_type = ARG_ANYTHING, | |
723 | }; | |
724 | ||
63d9b80d HL |
725 | BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) |
726 | { | |
727 | return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr); | |
728 | } | |
729 | ||
730 | const struct bpf_func_proto bpf_this_cpu_ptr_proto = { | |
731 | .func = bpf_this_cpu_ptr, | |
732 | .gpl_only = false, | |
34d3a78c | 733 | .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, |
63d9b80d HL |
734 | .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, |
735 | }; | |
736 | ||
d9c9e4db FR |
737 | static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, |
738 | size_t bufsz) | |
739 | { | |
740 | void __user *user_ptr = (__force void __user *)unsafe_ptr; | |
741 | ||
742 | buf[0] = 0; | |
743 | ||
744 | switch (fmt_ptype) { | |
745 | case 's': | |
746 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE | |
747 | if ((unsigned long)unsafe_ptr < TASK_SIZE) | |
748 | return strncpy_from_user_nofault(buf, user_ptr, bufsz); | |
749 | fallthrough; | |
750 | #endif | |
751 | case 'k': | |
752 | return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); | |
753 | case 'u': | |
754 | return strncpy_from_user_nofault(buf, user_ptr, bufsz); | |
755 | } | |
756 | ||
757 | return -EINVAL; | |
758 | } | |
759 | ||
8afcc19f FR |
760 | /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary |
761 | * arguments representation. | |
d9c9e4db | 762 | */ |
e2bb9e01 | 763 | #define MAX_BPRINTF_BIN_ARGS 512 |
d9c9e4db | 764 | |
e2d5b2bb | 765 | /* Support executing three nested bprintf helper calls on a given CPU */ |
0af02eb2 | 766 | #define MAX_BPRINTF_NEST_LEVEL 3 |
e2d5b2bb | 767 | struct bpf_bprintf_buffers { |
e2bb9e01 JO |
768 | char bin_args[MAX_BPRINTF_BIN_ARGS]; |
769 | char buf[MAX_BPRINTF_BUF]; | |
d9c9e4db | 770 | }; |
e2bb9e01 JO |
771 | |
772 | static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); | |
e2d5b2bb | 773 | static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); |
d9c9e4db | 774 | |
e2bb9e01 | 775 | static int try_get_buffers(struct bpf_bprintf_buffers **bufs) |
d9c9e4db | 776 | { |
e2d5b2bb | 777 | int nest_level; |
d9c9e4db | 778 | |
d9c9e4db | 779 | preempt_disable(); |
e2d5b2bb | 780 | nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); |
0af02eb2 | 781 | if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { |
e2d5b2bb | 782 | this_cpu_dec(bpf_bprintf_nest_level); |
d9c9e4db FR |
783 | preempt_enable(); |
784 | return -EBUSY; | |
785 | } | |
e2bb9e01 | 786 | *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); |
d9c9e4db FR |
787 | |
788 | return 0; | |
789 | } | |
790 | ||
f19a4050 | 791 | void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) |
d9c9e4db | 792 | { |
e2bb9e01 | 793 | if (!data->bin_args && !data->buf) |
f19a4050 JO |
794 | return; |
795 | if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) | |
796 | return; | |
797 | this_cpu_dec(bpf_bprintf_nest_level); | |
798 | preempt_enable(); | |
d9c9e4db FR |
799 | } |
800 | ||
801 | /* | |
48cac3f4 | 802 | * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers |
d9c9e4db FR |
803 | * |
804 | * Returns a negative value if fmt is an invalid format string or 0 otherwise. | |
805 | * | |
806 | * This can be used in two ways: | |
78aa1cc9 | 807 | * - Format string verification only: when data->get_bin_args is false |
d9c9e4db | 808 | * - Arguments preparation: in addition to the above verification, it writes in |
78aa1cc9 JO |
809 | * data->bin_args a binary representation of arguments usable by bstr_printf |
810 | * where pointers from BPF have been sanitized. | |
d9c9e4db FR |
811 | * |
812 | * In argument preparation mode, if 0 is returned, safe temporary buffers are | |
48cac3f4 | 813 | * allocated and bpf_bprintf_cleanup should be called to free them after use. |
d9c9e4db | 814 | */ |
48cac3f4 | 815 | int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, |
78aa1cc9 | 816 | u32 num_args, struct bpf_bprintf_data *data) |
48cac3f4 | 817 | { |
e2bb9e01 | 818 | bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; |
48cac3f4 | 819 | char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; |
e2bb9e01 | 820 | struct bpf_bprintf_buffers *buffers = NULL; |
48cac3f4 FR |
821 | size_t sizeof_cur_arg, sizeof_cur_ip; |
822 | int err, i, num_spec = 0; | |
d9c9e4db | 823 | u64 cur_arg; |
48cac3f4 | 824 | char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; |
d9c9e4db FR |
825 | |
826 | fmt_end = strnchr(fmt, fmt_size, 0); | |
827 | if (!fmt_end) | |
828 | return -EINVAL; | |
829 | fmt_size = fmt_end - fmt; | |
830 | ||
e2bb9e01 JO |
831 | if (get_buffers && try_get_buffers(&buffers)) |
832 | return -EBUSY; | |
48cac3f4 | 833 | |
e2bb9e01 JO |
834 | if (data->get_bin_args) { |
835 | if (num_args) | |
836 | tmp_buf = buffers->bin_args; | |
837 | tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; | |
78aa1cc9 | 838 | data->bin_args = (u32 *)tmp_buf; |
48cac3f4 FR |
839 | } |
840 | ||
e2bb9e01 JO |
841 | if (data->get_buf) |
842 | data->buf = buffers->buf; | |
843 | ||
d9c9e4db FR |
844 | for (i = 0; i < fmt_size; i++) { |
845 | if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { | |
846 | err = -EINVAL; | |
48cac3f4 | 847 | goto out; |
d9c9e4db FR |
848 | } |
849 | ||
850 | if (fmt[i] != '%') | |
851 | continue; | |
852 | ||
853 | if (fmt[i + 1] == '%') { | |
854 | i++; | |
855 | continue; | |
856 | } | |
857 | ||
858 | if (num_spec >= num_args) { | |
859 | err = -EINVAL; | |
48cac3f4 | 860 | goto out; |
d9c9e4db FR |
861 | } |
862 | ||
863 | /* The string is zero-terminated so if fmt[i] != 0, we can | |
864 | * always access fmt[i + 1], in the worst case it will be a 0 | |
865 | */ | |
866 | i++; | |
867 | ||
868 | /* skip optional "[0 +-][num]" width formatting field */ | |
869 | while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || | |
870 | fmt[i] == ' ') | |
871 | i++; | |
872 | if (fmt[i] >= '1' && fmt[i] <= '9') { | |
873 | i++; | |
874 | while (fmt[i] >= '0' && fmt[i] <= '9') | |
875 | i++; | |
876 | } | |
877 | ||
878 | if (fmt[i] == 'p') { | |
48cac3f4 | 879 | sizeof_cur_arg = sizeof(long); |
d9c9e4db FR |
880 | |
881 | if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && | |
882 | fmt[i + 2] == 's') { | |
883 | fmt_ptype = fmt[i + 1]; | |
884 | i += 2; | |
885 | goto fmt_str; | |
886 | } | |
887 | ||
888 | if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || | |
889 | ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || | |
48cac3f4 FR |
890 | fmt[i + 1] == 'x' || fmt[i + 1] == 's' || |
891 | fmt[i + 1] == 'S') { | |
d9c9e4db | 892 | /* just kernel pointers */ |
48cac3f4 | 893 | if (tmp_buf) |
d9c9e4db | 894 | cur_arg = raw_args[num_spec]; |
48cac3f4 FR |
895 | i++; |
896 | goto nocopy_fmt; | |
897 | } | |
898 | ||
899 | if (fmt[i + 1] == 'B') { | |
900 | if (tmp_buf) { | |
901 | err = snprintf(tmp_buf, | |
902 | (tmp_buf_end - tmp_buf), | |
903 | "%pB", | |
904 | (void *)(long)raw_args[num_spec]); | |
905 | tmp_buf += (err + 1); | |
906 | } | |
907 | ||
908 | i++; | |
909 | num_spec++; | |
910 | continue; | |
d9c9e4db FR |
911 | } |
912 | ||
913 | /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ | |
914 | if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || | |
915 | (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { | |
916 | err = -EINVAL; | |
48cac3f4 | 917 | goto out; |
d9c9e4db FR |
918 | } |
919 | ||
920 | i += 2; | |
48cac3f4 FR |
921 | if (!tmp_buf) |
922 | goto nocopy_fmt; | |
d9c9e4db | 923 | |
48cac3f4 FR |
924 | sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; |
925 | if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { | |
d9c9e4db | 926 | err = -ENOSPC; |
48cac3f4 | 927 | goto out; |
d9c9e4db FR |
928 | } |
929 | ||
930 | unsafe_ptr = (char *)(long)raw_args[num_spec]; | |
48cac3f4 FR |
931 | err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, |
932 | sizeof_cur_ip); | |
d9c9e4db | 933 | if (err < 0) |
48cac3f4 | 934 | memset(cur_ip, 0, sizeof_cur_ip); |
d9c9e4db | 935 | |
48cac3f4 FR |
936 | /* hack: bstr_printf expects IP addresses to be |
937 | * pre-formatted as strings, ironically, the easiest way | |
938 | * to do that is to call snprintf. | |
939 | */ | |
940 | ip_spec[2] = fmt[i - 1]; | |
941 | ip_spec[3] = fmt[i]; | |
942 | err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, | |
943 | ip_spec, &cur_ip); | |
944 | ||
945 | tmp_buf += err + 1; | |
946 | num_spec++; | |
947 | ||
948 | continue; | |
d9c9e4db | 949 | } else if (fmt[i] == 's') { |
d9c9e4db FR |
950 | fmt_ptype = fmt[i]; |
951 | fmt_str: | |
952 | if (fmt[i + 1] != 0 && | |
953 | !isspace(fmt[i + 1]) && | |
954 | !ispunct(fmt[i + 1])) { | |
955 | err = -EINVAL; | |
d9c9e4db FR |
956 | goto out; |
957 | } | |
958 | ||
48cac3f4 FR |
959 | if (!tmp_buf) |
960 | goto nocopy_fmt; | |
961 | ||
962 | if (tmp_buf_end == tmp_buf) { | |
d9c9e4db | 963 | err = -ENOSPC; |
48cac3f4 | 964 | goto out; |
d9c9e4db FR |
965 | } |
966 | ||
967 | unsafe_ptr = (char *)(long)raw_args[num_spec]; | |
968 | err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, | |
48cac3f4 FR |
969 | fmt_ptype, |
970 | tmp_buf_end - tmp_buf); | |
d9c9e4db FR |
971 | if (err < 0) { |
972 | tmp_buf[0] = '\0'; | |
973 | err = 1; | |
974 | } | |
975 | ||
d9c9e4db | 976 | tmp_buf += err; |
48cac3f4 | 977 | num_spec++; |
d9c9e4db | 978 | |
3478cfcf KI |
979 | continue; |
980 | } else if (fmt[i] == 'c') { | |
981 | if (!tmp_buf) | |
982 | goto nocopy_fmt; | |
983 | ||
984 | if (tmp_buf_end == tmp_buf) { | |
985 | err = -ENOSPC; | |
986 | goto out; | |
987 | } | |
988 | ||
989 | *tmp_buf = raw_args[num_spec]; | |
990 | tmp_buf++; | |
991 | num_spec++; | |
992 | ||
48cac3f4 | 993 | continue; |
d9c9e4db FR |
994 | } |
995 | ||
48cac3f4 | 996 | sizeof_cur_arg = sizeof(int); |
d9c9e4db FR |
997 | |
998 | if (fmt[i] == 'l') { | |
48cac3f4 | 999 | sizeof_cur_arg = sizeof(long); |
d9c9e4db FR |
1000 | i++; |
1001 | } | |
1002 | if (fmt[i] == 'l') { | |
48cac3f4 | 1003 | sizeof_cur_arg = sizeof(long long); |
d9c9e4db FR |
1004 | i++; |
1005 | } | |
1006 | ||
1007 | if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && | |
1008 | fmt[i] != 'x' && fmt[i] != 'X') { | |
1009 | err = -EINVAL; | |
48cac3f4 | 1010 | goto out; |
d9c9e4db FR |
1011 | } |
1012 | ||
48cac3f4 | 1013 | if (tmp_buf) |
d9c9e4db | 1014 | cur_arg = raw_args[num_spec]; |
48cac3f4 FR |
1015 | nocopy_fmt: |
1016 | if (tmp_buf) { | |
1017 | tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); | |
1018 | if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { | |
1019 | err = -ENOSPC; | |
1020 | goto out; | |
1021 | } | |
1022 | ||
1023 | if (sizeof_cur_arg == 8) { | |
1024 | *(u32 *)tmp_buf = *(u32 *)&cur_arg; | |
1025 | *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); | |
1026 | } else { | |
1027 | *(u32 *)tmp_buf = (u32)(long)cur_arg; | |
1028 | } | |
1029 | tmp_buf += sizeof_cur_arg; | |
d9c9e4db FR |
1030 | } |
1031 | num_spec++; | |
1032 | } | |
1033 | ||
1034 | err = 0; | |
d9c9e4db | 1035 | out: |
48cac3f4 | 1036 | if (err) |
f19a4050 | 1037 | bpf_bprintf_cleanup(data); |
d9c9e4db FR |
1038 | return err; |
1039 | } | |
1040 | ||
7b15523a | 1041 | BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, |
78aa1cc9 | 1042 | const void *, args, u32, data_len) |
7b15523a | 1043 | { |
78aa1cc9 JO |
1044 | struct bpf_bprintf_data data = { |
1045 | .get_bin_args = true, | |
1046 | }; | |
7b15523a FR |
1047 | int err, num_args; |
1048 | ||
335ff499 | 1049 | if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || |
78aa1cc9 | 1050 | (data_len && !args)) |
7b15523a FR |
1051 | return -EINVAL; |
1052 | num_args = data_len / 8; | |
1053 | ||
1054 | /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we | |
1055 | * can safely give an unbounded size. | |
1056 | */ | |
78aa1cc9 | 1057 | err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); |
7b15523a FR |
1058 | if (err < 0) |
1059 | return err; | |
1060 | ||
78aa1cc9 | 1061 | err = bstr_printf(str, str_size, fmt, data.bin_args); |
48cac3f4 | 1062 | |
f19a4050 | 1063 | bpf_bprintf_cleanup(&data); |
7b15523a FR |
1064 | |
1065 | return err + 1; | |
1066 | } | |
1067 | ||
1068 | const struct bpf_func_proto bpf_snprintf_proto = { | |
1069 | .func = bpf_snprintf, | |
1070 | .gpl_only = true, | |
1071 | .ret_type = RET_INTEGER, | |
1072 | .arg1_type = ARG_PTR_TO_MEM_OR_NULL, | |
1073 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
1074 | .arg3_type = ARG_PTR_TO_CONST_STR, | |
216e3cd2 | 1075 | .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, |
7b15523a FR |
1076 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
1077 | }; | |
1078 | ||
b00628b1 AS |
1079 | /* BPF map elements can contain 'struct bpf_timer'. |
1080 | * Such map owns all of its BPF timers. | |
1081 | * 'struct bpf_timer' is allocated as part of map element allocation | |
1082 | * and it's zero initialized. | |
1083 | * That space is used to keep 'struct bpf_timer_kern'. | |
1084 | * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and | |
1085 | * remembers 'struct bpf_map *' pointer it's part of. | |
1086 | * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. | |
1087 | * bpf_timer_start() arms the timer. | |
1088 | * If user space reference to a map goes to zero at this point | |
1089 | * ops->map_release_uref callback is responsible for cancelling the timers, | |
1090 | * freeing their memory, and decrementing prog's refcnts. | |
1091 | * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. | |
1092 | * Inner maps can contain bpf timers as well. ops->map_release_uref is | |
1093 | * freeing the timers when inner map is replaced or deleted by user space. | |
1094 | */ | |
1095 | struct bpf_hrtimer { | |
1096 | struct hrtimer timer; | |
1097 | struct bpf_map *map; | |
1098 | struct bpf_prog *prog; | |
1099 | void __rcu *callback_fn; | |
1100 | void *value; | |
1101 | }; | |
1102 | ||
1103 | /* the actual struct hidden inside uapi struct bpf_timer */ | |
1104 | struct bpf_timer_kern { | |
1105 | struct bpf_hrtimer *timer; | |
1106 | /* bpf_spin_lock is used here instead of spinlock_t to make | |
c561d110 | 1107 | * sure that it always fits into space reserved by struct bpf_timer |
b00628b1 AS |
1108 | * regardless of LOCKDEP and spinlock debug flags. |
1109 | */ | |
1110 | struct bpf_spin_lock lock; | |
1111 | } __attribute__((aligned(8))); | |
1112 | ||
1113 | static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); | |
1114 | ||
1115 | static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) | |
1116 | { | |
1117 | struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); | |
1118 | struct bpf_map *map = t->map; | |
1119 | void *value = t->value; | |
102acbac | 1120 | bpf_callback_t callback_fn; |
b00628b1 AS |
1121 | void *key; |
1122 | u32 idx; | |
b00628b1 | 1123 | |
3bd916ee | 1124 | BTF_TYPE_EMIT(struct bpf_timer); |
b00628b1 AS |
1125 | callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held()); |
1126 | if (!callback_fn) | |
1127 | goto out; | |
1128 | ||
1129 | /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and | |
1130 | * cannot be preempted by another bpf_timer_cb() on the same cpu. | |
1131 | * Remember the timer this callback is servicing to prevent | |
1132 | * deadlock if callback_fn() calls bpf_timer_cancel() or | |
1133 | * bpf_map_delete_elem() on the same timer. | |
1134 | */ | |
1135 | this_cpu_write(hrtimer_running, t); | |
1136 | if (map->map_type == BPF_MAP_TYPE_ARRAY) { | |
1137 | struct bpf_array *array = container_of(map, struct bpf_array, map); | |
1138 | ||
1139 | /* compute the key */ | |
1140 | idx = ((char *)value - array->value) / array->elem_size; | |
1141 | key = &idx; | |
1142 | } else { /* hash or lru */ | |
1143 | key = value - round_up(map->key_size, 8); | |
1144 | } | |
1145 | ||
102acbac | 1146 | callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); |
bfc6bb74 | 1147 | /* The verifier checked that return value is zero. */ |
b00628b1 AS |
1148 | |
1149 | this_cpu_write(hrtimer_running, NULL); | |
1150 | out: | |
1151 | return HRTIMER_NORESTART; | |
1152 | } | |
1153 | ||
1154 | BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map, | |
1155 | u64, flags) | |
1156 | { | |
1157 | clockid_t clockid = flags & (MAX_CLOCKS - 1); | |
1158 | struct bpf_hrtimer *t; | |
1159 | int ret = 0; | |
1160 | ||
1161 | BUILD_BUG_ON(MAX_CLOCKS != 16); | |
1162 | BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer)); | |
1163 | BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer)); | |
1164 | ||
1165 | if (in_nmi()) | |
1166 | return -EOPNOTSUPP; | |
1167 | ||
1168 | if (flags >= MAX_CLOCKS || | |
1169 | /* similar to timerfd except _ALARM variants are not supported */ | |
1170 | (clockid != CLOCK_MONOTONIC && | |
1171 | clockid != CLOCK_REALTIME && | |
1172 | clockid != CLOCK_BOOTTIME)) | |
1173 | return -EINVAL; | |
1174 | __bpf_spin_lock_irqsave(&timer->lock); | |
1175 | t = timer->timer; | |
1176 | if (t) { | |
1177 | ret = -EBUSY; | |
1178 | goto out; | |
1179 | } | |
1180 | if (!atomic64_read(&map->usercnt)) { | |
1181 | /* maps with timers must be either held by user space | |
1182 | * or pinned in bpffs. | |
1183 | */ | |
1184 | ret = -EPERM; | |
1185 | goto out; | |
1186 | } | |
1187 | /* allocate hrtimer via map_kmalloc to use memcg accounting */ | |
1188 | t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node); | |
1189 | if (!t) { | |
1190 | ret = -ENOMEM; | |
1191 | goto out; | |
1192 | } | |
db559117 | 1193 | t->value = (void *)timer - map->record->timer_off; |
b00628b1 AS |
1194 | t->map = map; |
1195 | t->prog = NULL; | |
1196 | rcu_assign_pointer(t->callback_fn, NULL); | |
1197 | hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); | |
1198 | t->timer.function = bpf_timer_cb; | |
1199 | timer->timer = t; | |
1200 | out: | |
1201 | __bpf_spin_unlock_irqrestore(&timer->lock); | |
1202 | return ret; | |
1203 | } | |
1204 | ||
1205 | static const struct bpf_func_proto bpf_timer_init_proto = { | |
1206 | .func = bpf_timer_init, | |
1207 | .gpl_only = true, | |
1208 | .ret_type = RET_INTEGER, | |
1209 | .arg1_type = ARG_PTR_TO_TIMER, | |
1210 | .arg2_type = ARG_CONST_MAP_PTR, | |
1211 | .arg3_type = ARG_ANYTHING, | |
1212 | }; | |
1213 | ||
1214 | BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn, | |
1215 | struct bpf_prog_aux *, aux) | |
1216 | { | |
1217 | struct bpf_prog *prev, *prog = aux->prog; | |
1218 | struct bpf_hrtimer *t; | |
1219 | int ret = 0; | |
1220 | ||
1221 | if (in_nmi()) | |
1222 | return -EOPNOTSUPP; | |
1223 | __bpf_spin_lock_irqsave(&timer->lock); | |
1224 | t = timer->timer; | |
1225 | if (!t) { | |
1226 | ret = -EINVAL; | |
1227 | goto out; | |
1228 | } | |
1229 | if (!atomic64_read(&t->map->usercnt)) { | |
1230 | /* maps with timers must be either held by user space | |
1231 | * or pinned in bpffs. Otherwise timer might still be | |
1232 | * running even when bpf prog is detached and user space | |
1233 | * is gone, since map_release_uref won't ever be called. | |
1234 | */ | |
1235 | ret = -EPERM; | |
1236 | goto out; | |
1237 | } | |
1238 | prev = t->prog; | |
1239 | if (prev != prog) { | |
1240 | /* Bump prog refcnt once. Every bpf_timer_set_callback() | |
1241 | * can pick different callback_fn-s within the same prog. | |
1242 | */ | |
1243 | prog = bpf_prog_inc_not_zero(prog); | |
1244 | if (IS_ERR(prog)) { | |
1245 | ret = PTR_ERR(prog); | |
1246 | goto out; | |
1247 | } | |
1248 | if (prev) | |
1249 | /* Drop prev prog refcnt when swapping with new prog */ | |
1250 | bpf_prog_put(prev); | |
1251 | t->prog = prog; | |
1252 | } | |
1253 | rcu_assign_pointer(t->callback_fn, callback_fn); | |
1254 | out: | |
1255 | __bpf_spin_unlock_irqrestore(&timer->lock); | |
1256 | return ret; | |
1257 | } | |
1258 | ||
1259 | static const struct bpf_func_proto bpf_timer_set_callback_proto = { | |
1260 | .func = bpf_timer_set_callback, | |
1261 | .gpl_only = true, | |
1262 | .ret_type = RET_INTEGER, | |
1263 | .arg1_type = ARG_PTR_TO_TIMER, | |
1264 | .arg2_type = ARG_PTR_TO_FUNC, | |
1265 | }; | |
1266 | ||
1267 | BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags) | |
1268 | { | |
1269 | struct bpf_hrtimer *t; | |
1270 | int ret = 0; | |
f71f8530 | 1271 | enum hrtimer_mode mode; |
b00628b1 AS |
1272 | |
1273 | if (in_nmi()) | |
1274 | return -EOPNOTSUPP; | |
d6247ecb | 1275 | if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN)) |
b00628b1 AS |
1276 | return -EINVAL; |
1277 | __bpf_spin_lock_irqsave(&timer->lock); | |
1278 | t = timer->timer; | |
1279 | if (!t || !t->prog) { | |
1280 | ret = -EINVAL; | |
1281 | goto out; | |
1282 | } | |
f71f8530 TK |
1283 | |
1284 | if (flags & BPF_F_TIMER_ABS) | |
1285 | mode = HRTIMER_MODE_ABS_SOFT; | |
1286 | else | |
1287 | mode = HRTIMER_MODE_REL_SOFT; | |
1288 | ||
d6247ecb DV |
1289 | if (flags & BPF_F_TIMER_CPU_PIN) |
1290 | mode |= HRTIMER_MODE_PINNED; | |
1291 | ||
f71f8530 | 1292 | hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); |
b00628b1 AS |
1293 | out: |
1294 | __bpf_spin_unlock_irqrestore(&timer->lock); | |
1295 | return ret; | |
1296 | } | |
1297 | ||
1298 | static const struct bpf_func_proto bpf_timer_start_proto = { | |
1299 | .func = bpf_timer_start, | |
1300 | .gpl_only = true, | |
1301 | .ret_type = RET_INTEGER, | |
1302 | .arg1_type = ARG_PTR_TO_TIMER, | |
1303 | .arg2_type = ARG_ANYTHING, | |
1304 | .arg3_type = ARG_ANYTHING, | |
1305 | }; | |
1306 | ||
1307 | static void drop_prog_refcnt(struct bpf_hrtimer *t) | |
1308 | { | |
1309 | struct bpf_prog *prog = t->prog; | |
1310 | ||
1311 | if (prog) { | |
1312 | bpf_prog_put(prog); | |
1313 | t->prog = NULL; | |
1314 | rcu_assign_pointer(t->callback_fn, NULL); | |
1315 | } | |
1316 | } | |
1317 | ||
1318 | BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer) | |
1319 | { | |
1320 | struct bpf_hrtimer *t; | |
1321 | int ret = 0; | |
1322 | ||
1323 | if (in_nmi()) | |
1324 | return -EOPNOTSUPP; | |
1325 | __bpf_spin_lock_irqsave(&timer->lock); | |
1326 | t = timer->timer; | |
1327 | if (!t) { | |
1328 | ret = -EINVAL; | |
1329 | goto out; | |
1330 | } | |
1331 | if (this_cpu_read(hrtimer_running) == t) { | |
1332 | /* If bpf callback_fn is trying to bpf_timer_cancel() | |
1333 | * its own timer the hrtimer_cancel() will deadlock | |
1334 | * since it waits for callback_fn to finish | |
1335 | */ | |
1336 | ret = -EDEADLK; | |
1337 | goto out; | |
1338 | } | |
1339 | drop_prog_refcnt(t); | |
1340 | out: | |
1341 | __bpf_spin_unlock_irqrestore(&timer->lock); | |
1342 | /* Cancel the timer and wait for associated callback to finish | |
1343 | * if it was running. | |
1344 | */ | |
1345 | ret = ret ?: hrtimer_cancel(&t->timer); | |
1346 | return ret; | |
1347 | } | |
1348 | ||
1349 | static const struct bpf_func_proto bpf_timer_cancel_proto = { | |
1350 | .func = bpf_timer_cancel, | |
1351 | .gpl_only = true, | |
1352 | .ret_type = RET_INTEGER, | |
1353 | .arg1_type = ARG_PTR_TO_TIMER, | |
1354 | }; | |
1355 | ||
1356 | /* This function is called by map_delete/update_elem for individual element and | |
1357 | * by ops->map_release_uref when the user space reference to a map reaches zero. | |
1358 | */ | |
1359 | void bpf_timer_cancel_and_free(void *val) | |
1360 | { | |
1361 | struct bpf_timer_kern *timer = val; | |
1362 | struct bpf_hrtimer *t; | |
1363 | ||
1364 | /* Performance optimization: read timer->timer without lock first. */ | |
1365 | if (!READ_ONCE(timer->timer)) | |
1366 | return; | |
1367 | ||
1368 | __bpf_spin_lock_irqsave(&timer->lock); | |
1369 | /* re-read it under lock */ | |
1370 | t = timer->timer; | |
1371 | if (!t) | |
1372 | goto out; | |
1373 | drop_prog_refcnt(t); | |
1374 | /* The subsequent bpf_timer_start/cancel() helpers won't be able to use | |
1375 | * this timer, since it won't be initialized. | |
1376 | */ | |
1377 | timer->timer = NULL; | |
1378 | out: | |
1379 | __bpf_spin_unlock_irqrestore(&timer->lock); | |
1380 | if (!t) | |
1381 | return; | |
1382 | /* Cancel the timer and wait for callback to complete if it was running. | |
1383 | * If hrtimer_cancel() can be safely called it's safe to call kfree(t) | |
1384 | * right after for both preallocated and non-preallocated maps. | |
1385 | * The timer->timer = NULL was already done and no code path can | |
1386 | * see address 't' anymore. | |
1387 | * | |
1388 | * Check that bpf_map_delete/update_elem() wasn't called from timer | |
1389 | * callback_fn. In such case don't call hrtimer_cancel() (since it will | |
1390 | * deadlock) and don't call hrtimer_try_to_cancel() (since it will just | |
1391 | * return -1). Though callback_fn is still running on this cpu it's | |
1392 | * safe to do kfree(t) because bpf_timer_cb() read everything it needed | |
1393 | * from 't'. The bpf subprog callback_fn won't be able to access 't', | |
1394 | * since timer->timer = NULL was already done. The timer will be | |
1395 | * effectively cancelled because bpf_timer_cb() will return | |
1396 | * HRTIMER_NORESTART. | |
1397 | */ | |
1398 | if (this_cpu_read(hrtimer_running) != t) | |
1399 | hrtimer_cancel(&t->timer); | |
1400 | kfree(t); | |
1401 | } | |
1402 | ||
c0a5a21c KKD |
1403 | BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) |
1404 | { | |
1405 | unsigned long *kptr = map_value; | |
1406 | ||
1407 | return xchg(kptr, (unsigned long)ptr); | |
1408 | } | |
1409 | ||
1410 | /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() | |
47e34cb7 DM |
1411 | * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to |
1412 | * denote type that verifier will determine. | |
c0a5a21c | 1413 | */ |
dc368e1c | 1414 | static const struct bpf_func_proto bpf_kptr_xchg_proto = { |
c0a5a21c KKD |
1415 | .func = bpf_kptr_xchg, |
1416 | .gpl_only = false, | |
1417 | .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, | |
1418 | .ret_btf_id = BPF_PTR_POISON, | |
1419 | .arg1_type = ARG_PTR_TO_KPTR, | |
1420 | .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, | |
1421 | .arg2_btf_id = BPF_PTR_POISON, | |
1422 | }; | |
1423 | ||
263ae152 JK |
1424 | /* Since the upper 8 bits of dynptr->size is reserved, the |
1425 | * maximum supported size is 2^24 - 1. | |
1426 | */ | |
1427 | #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) | |
1428 | #define DYNPTR_TYPE_SHIFT 28 | |
13bbbfbe JK |
1429 | #define DYNPTR_SIZE_MASK 0xFFFFFF |
1430 | #define DYNPTR_RDONLY_BIT BIT(31) | |
1431 | ||
540ccf96 | 1432 | static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) |
13bbbfbe JK |
1433 | { |
1434 | return ptr->size & DYNPTR_RDONLY_BIT; | |
1435 | } | |
263ae152 | 1436 | |
b5964b96 JK |
1437 | void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) |
1438 | { | |
1439 | ptr->size |= DYNPTR_RDONLY_BIT; | |
1440 | } | |
1441 | ||
263ae152 JK |
1442 | static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) |
1443 | { | |
1444 | ptr->size |= type << DYNPTR_TYPE_SHIFT; | |
1445 | } | |
1446 | ||
b5964b96 JK |
1447 | static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) |
1448 | { | |
1449 | return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; | |
1450 | } | |
1451 | ||
26662d73 | 1452 | u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) |
13bbbfbe JK |
1453 | { |
1454 | return ptr->size & DYNPTR_SIZE_MASK; | |
1455 | } | |
1456 | ||
987d0242 JK |
1457 | static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size) |
1458 | { | |
1459 | u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; | |
1460 | ||
1461 | ptr->size = new_size | metadata; | |
1462 | } | |
1463 | ||
bc34dee6 | 1464 | int bpf_dynptr_check_size(u32 size) |
263ae152 JK |
1465 | { |
1466 | return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; | |
1467 | } | |
1468 | ||
bc34dee6 JK |
1469 | void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, |
1470 | enum bpf_dynptr_type type, u32 offset, u32 size) | |
263ae152 JK |
1471 | { |
1472 | ptr->data = data; | |
1473 | ptr->offset = offset; | |
1474 | ptr->size = size; | |
1475 | bpf_dynptr_set_type(ptr, type); | |
1476 | } | |
1477 | ||
bc34dee6 | 1478 | void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) |
263ae152 JK |
1479 | { |
1480 | memset(ptr, 0, sizeof(*ptr)); | |
1481 | } | |
1482 | ||
27060531 | 1483 | static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) |
13bbbfbe | 1484 | { |
26662d73 | 1485 | u32 size = __bpf_dynptr_size(ptr); |
13bbbfbe JK |
1486 | |
1487 | if (len > size || offset > size - len) | |
1488 | return -E2BIG; | |
1489 | ||
1490 | return 0; | |
1491 | } | |
1492 | ||
263ae152 JK |
1493 | BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) |
1494 | { | |
1495 | int err; | |
1496 | ||
00f14641 RS |
1497 | BTF_TYPE_EMIT(struct bpf_dynptr); |
1498 | ||
263ae152 JK |
1499 | err = bpf_dynptr_check_size(size); |
1500 | if (err) | |
1501 | goto error; | |
1502 | ||
1503 | /* flags is currently unsupported */ | |
1504 | if (flags) { | |
1505 | err = -EINVAL; | |
1506 | goto error; | |
1507 | } | |
1508 | ||
1509 | bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size); | |
1510 | ||
1511 | return 0; | |
1512 | ||
1513 | error: | |
1514 | bpf_dynptr_set_null(ptr); | |
1515 | return err; | |
1516 | } | |
1517 | ||
dc368e1c | 1518 | static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { |
263ae152 JK |
1519 | .func = bpf_dynptr_from_mem, |
1520 | .gpl_only = false, | |
1521 | .ret_type = RET_INTEGER, | |
1522 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
1523 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
1524 | .arg3_type = ARG_ANYTHING, | |
1525 | .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT, | |
1526 | }; | |
1527 | ||
27060531 | 1528 | BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, |
f8d3da4e | 1529 | u32, offset, u64, flags) |
13bbbfbe | 1530 | { |
b5964b96 | 1531 | enum bpf_dynptr_type type; |
13bbbfbe JK |
1532 | int err; |
1533 | ||
f8d3da4e | 1534 | if (!src->data || flags) |
13bbbfbe JK |
1535 | return -EINVAL; |
1536 | ||
1537 | err = bpf_dynptr_check_off_len(src, offset, len); | |
1538 | if (err) | |
1539 | return err; | |
1540 | ||
b5964b96 | 1541 | type = bpf_dynptr_get_type(src); |
13bbbfbe | 1542 | |
b5964b96 JK |
1543 | switch (type) { |
1544 | case BPF_DYNPTR_TYPE_LOCAL: | |
1545 | case BPF_DYNPTR_TYPE_RINGBUF: | |
1546 | /* Source and destination may possibly overlap, hence use memmove to | |
1547 | * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr | |
1548 | * pointing to overlapping PTR_TO_MAP_VALUE regions. | |
1549 | */ | |
1550 | memmove(dst, src->data + src->offset + offset, len); | |
1551 | return 0; | |
1552 | case BPF_DYNPTR_TYPE_SKB: | |
1553 | return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); | |
05421aec JK |
1554 | case BPF_DYNPTR_TYPE_XDP: |
1555 | return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); | |
b5964b96 JK |
1556 | default: |
1557 | WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type); | |
1558 | return -EFAULT; | |
1559 | } | |
13bbbfbe JK |
1560 | } |
1561 | ||
dc368e1c | 1562 | static const struct bpf_func_proto bpf_dynptr_read_proto = { |
13bbbfbe JK |
1563 | .func = bpf_dynptr_read, |
1564 | .gpl_only = false, | |
1565 | .ret_type = RET_INTEGER, | |
1566 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, | |
1567 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, | |
27060531 | 1568 | .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, |
13bbbfbe | 1569 | .arg4_type = ARG_ANYTHING, |
f8d3da4e | 1570 | .arg5_type = ARG_ANYTHING, |
13bbbfbe JK |
1571 | }; |
1572 | ||
27060531 | 1573 | BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, |
f8d3da4e | 1574 | u32, len, u64, flags) |
13bbbfbe | 1575 | { |
b5964b96 | 1576 | enum bpf_dynptr_type type; |
13bbbfbe JK |
1577 | int err; |
1578 | ||
540ccf96 | 1579 | if (!dst->data || __bpf_dynptr_is_rdonly(dst)) |
13bbbfbe JK |
1580 | return -EINVAL; |
1581 | ||
1582 | err = bpf_dynptr_check_off_len(dst, offset, len); | |
1583 | if (err) | |
1584 | return err; | |
1585 | ||
b5964b96 | 1586 | type = bpf_dynptr_get_type(dst); |
13bbbfbe | 1587 | |
b5964b96 JK |
1588 | switch (type) { |
1589 | case BPF_DYNPTR_TYPE_LOCAL: | |
1590 | case BPF_DYNPTR_TYPE_RINGBUF: | |
1591 | if (flags) | |
1592 | return -EINVAL; | |
1593 | /* Source and destination may possibly overlap, hence use memmove to | |
1594 | * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr | |
1595 | * pointing to overlapping PTR_TO_MAP_VALUE regions. | |
1596 | */ | |
1597 | memmove(dst->data + dst->offset + offset, src, len); | |
1598 | return 0; | |
1599 | case BPF_DYNPTR_TYPE_SKB: | |
1600 | return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, | |
1601 | flags); | |
05421aec JK |
1602 | case BPF_DYNPTR_TYPE_XDP: |
1603 | if (flags) | |
1604 | return -EINVAL; | |
1605 | return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); | |
b5964b96 JK |
1606 | default: |
1607 | WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type); | |
1608 | return -EFAULT; | |
1609 | } | |
13bbbfbe JK |
1610 | } |
1611 | ||
dc368e1c | 1612 | static const struct bpf_func_proto bpf_dynptr_write_proto = { |
13bbbfbe JK |
1613 | .func = bpf_dynptr_write, |
1614 | .gpl_only = false, | |
1615 | .ret_type = RET_INTEGER, | |
27060531 | 1616 | .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, |
13bbbfbe JK |
1617 | .arg2_type = ARG_ANYTHING, |
1618 | .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, | |
1619 | .arg4_type = ARG_CONST_SIZE_OR_ZERO, | |
f8d3da4e | 1620 | .arg5_type = ARG_ANYTHING, |
13bbbfbe JK |
1621 | }; |
1622 | ||
27060531 | 1623 | BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) |
34d4ef57 | 1624 | { |
b5964b96 | 1625 | enum bpf_dynptr_type type; |
34d4ef57 JK |
1626 | int err; |
1627 | ||
1628 | if (!ptr->data) | |
1629 | return 0; | |
1630 | ||
1631 | err = bpf_dynptr_check_off_len(ptr, offset, len); | |
1632 | if (err) | |
1633 | return 0; | |
1634 | ||
540ccf96 | 1635 | if (__bpf_dynptr_is_rdonly(ptr)) |
34d4ef57 JK |
1636 | return 0; |
1637 | ||
b5964b96 JK |
1638 | type = bpf_dynptr_get_type(ptr); |
1639 | ||
1640 | switch (type) { | |
1641 | case BPF_DYNPTR_TYPE_LOCAL: | |
1642 | case BPF_DYNPTR_TYPE_RINGBUF: | |
1643 | return (unsigned long)(ptr->data + ptr->offset + offset); | |
1644 | case BPF_DYNPTR_TYPE_SKB: | |
05421aec JK |
1645 | case BPF_DYNPTR_TYPE_XDP: |
1646 | /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ | |
b5964b96 JK |
1647 | return 0; |
1648 | default: | |
1649 | WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type); | |
1650 | return 0; | |
1651 | } | |
34d4ef57 JK |
1652 | } |
1653 | ||
dc368e1c | 1654 | static const struct bpf_func_proto bpf_dynptr_data_proto = { |
34d4ef57 JK |
1655 | .func = bpf_dynptr_data, |
1656 | .gpl_only = false, | |
1657 | .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, | |
27060531 | 1658 | .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, |
34d4ef57 JK |
1659 | .arg2_type = ARG_ANYTHING, |
1660 | .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, | |
1661 | }; | |
1662 | ||
f470378c | 1663 | const struct bpf_func_proto bpf_get_current_task_proto __weak; |
a396eda5 | 1664 | const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; |
f470378c JF |
1665 | const struct bpf_func_proto bpf_probe_read_user_proto __weak; |
1666 | const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; | |
1667 | const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; | |
1668 | const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; | |
dd6e10fb | 1669 | const struct bpf_func_proto bpf_task_pt_regs_proto __weak; |
f470378c | 1670 | |
6890896b SF |
1671 | const struct bpf_func_proto * |
1672 | bpf_base_func_proto(enum bpf_func_id func_id) | |
1673 | { | |
1674 | switch (func_id) { | |
1675 | case BPF_FUNC_map_lookup_elem: | |
1676 | return &bpf_map_lookup_elem_proto; | |
1677 | case BPF_FUNC_map_update_elem: | |
1678 | return &bpf_map_update_elem_proto; | |
1679 | case BPF_FUNC_map_delete_elem: | |
1680 | return &bpf_map_delete_elem_proto; | |
1681 | case BPF_FUNC_map_push_elem: | |
1682 | return &bpf_map_push_elem_proto; | |
1683 | case BPF_FUNC_map_pop_elem: | |
1684 | return &bpf_map_pop_elem_proto; | |
1685 | case BPF_FUNC_map_peek_elem: | |
1686 | return &bpf_map_peek_elem_proto; | |
07343110 FZ |
1687 | case BPF_FUNC_map_lookup_percpu_elem: |
1688 | return &bpf_map_lookup_percpu_elem_proto; | |
6890896b SF |
1689 | case BPF_FUNC_get_prandom_u32: |
1690 | return &bpf_get_prandom_u32_proto; | |
1691 | case BPF_FUNC_get_smp_processor_id: | |
1692 | return &bpf_get_raw_smp_processor_id_proto; | |
1693 | case BPF_FUNC_get_numa_node_id: | |
1694 | return &bpf_get_numa_node_id_proto; | |
1695 | case BPF_FUNC_tail_call: | |
1696 | return &bpf_tail_call_proto; | |
1697 | case BPF_FUNC_ktime_get_ns: | |
1698 | return &bpf_ktime_get_ns_proto; | |
71d19214 MŻ |
1699 | case BPF_FUNC_ktime_get_boot_ns: |
1700 | return &bpf_ktime_get_boot_ns_proto; | |
c8996c98 JDB |
1701 | case BPF_FUNC_ktime_get_tai_ns: |
1702 | return &bpf_ktime_get_tai_ns_proto; | |
457f4436 AN |
1703 | case BPF_FUNC_ringbuf_output: |
1704 | return &bpf_ringbuf_output_proto; | |
1705 | case BPF_FUNC_ringbuf_reserve: | |
1706 | return &bpf_ringbuf_reserve_proto; | |
1707 | case BPF_FUNC_ringbuf_submit: | |
1708 | return &bpf_ringbuf_submit_proto; | |
1709 | case BPF_FUNC_ringbuf_discard: | |
1710 | return &bpf_ringbuf_discard_proto; | |
1711 | case BPF_FUNC_ringbuf_query: | |
1712 | return &bpf_ringbuf_query_proto; | |
c5fb1993 HT |
1713 | case BPF_FUNC_strncmp: |
1714 | return &bpf_strncmp_proto; | |
8a67f2de SF |
1715 | case BPF_FUNC_strtol: |
1716 | return &bpf_strtol_proto; | |
1717 | case BPF_FUNC_strtoul: | |
1718 | return &bpf_strtoul_proto; | |
6890896b SF |
1719 | default: |
1720 | break; | |
1721 | } | |
1722 | ||
2c78ee89 | 1723 | if (!bpf_capable()) |
6890896b SF |
1724 | return NULL; |
1725 | ||
1726 | switch (func_id) { | |
1727 | case BPF_FUNC_spin_lock: | |
1728 | return &bpf_spin_lock_proto; | |
1729 | case BPF_FUNC_spin_unlock: | |
1730 | return &bpf_spin_unlock_proto; | |
6890896b SF |
1731 | case BPF_FUNC_jiffies64: |
1732 | return &bpf_jiffies64_proto; | |
b7906b70 | 1733 | case BPF_FUNC_per_cpu_ptr: |
eaa6bcb7 | 1734 | return &bpf_per_cpu_ptr_proto; |
b7906b70 | 1735 | case BPF_FUNC_this_cpu_ptr: |
63d9b80d | 1736 | return &bpf_this_cpu_ptr_proto; |
b00628b1 AS |
1737 | case BPF_FUNC_timer_init: |
1738 | return &bpf_timer_init_proto; | |
1739 | case BPF_FUNC_timer_set_callback: | |
1740 | return &bpf_timer_set_callback_proto; | |
1741 | case BPF_FUNC_timer_start: | |
1742 | return &bpf_timer_start_proto; | |
1743 | case BPF_FUNC_timer_cancel: | |
1744 | return &bpf_timer_cancel_proto; | |
c0a5a21c KKD |
1745 | case BPF_FUNC_kptr_xchg: |
1746 | return &bpf_kptr_xchg_proto; | |
5679ff2f KKD |
1747 | case BPF_FUNC_for_each_map_elem: |
1748 | return &bpf_for_each_map_elem_proto; | |
1749 | case BPF_FUNC_loop: | |
1750 | return &bpf_loop_proto; | |
20571567 DV |
1751 | case BPF_FUNC_user_ringbuf_drain: |
1752 | return &bpf_user_ringbuf_drain_proto; | |
8addbfc7 KKD |
1753 | case BPF_FUNC_ringbuf_reserve_dynptr: |
1754 | return &bpf_ringbuf_reserve_dynptr_proto; | |
1755 | case BPF_FUNC_ringbuf_submit_dynptr: | |
1756 | return &bpf_ringbuf_submit_dynptr_proto; | |
1757 | case BPF_FUNC_ringbuf_discard_dynptr: | |
1758 | return &bpf_ringbuf_discard_dynptr_proto; | |
1759 | case BPF_FUNC_dynptr_from_mem: | |
1760 | return &bpf_dynptr_from_mem_proto; | |
1761 | case BPF_FUNC_dynptr_read: | |
1762 | return &bpf_dynptr_read_proto; | |
1763 | case BPF_FUNC_dynptr_write: | |
1764 | return &bpf_dynptr_write_proto; | |
1765 | case BPF_FUNC_dynptr_data: | |
1766 | return &bpf_dynptr_data_proto; | |
c4bcfb38 YS |
1767 | #ifdef CONFIG_CGROUPS |
1768 | case BPF_FUNC_cgrp_storage_get: | |
1769 | return &bpf_cgrp_storage_get_proto; | |
1770 | case BPF_FUNC_cgrp_storage_delete: | |
1771 | return &bpf_cgrp_storage_delete_proto; | |
c501bf55 TH |
1772 | case BPF_FUNC_get_current_cgroup_id: |
1773 | return &bpf_get_current_cgroup_id_proto; | |
1774 | case BPF_FUNC_get_current_ancestor_cgroup_id: | |
1775 | return &bpf_get_current_ancestor_cgroup_id_proto; | |
c4bcfb38 | 1776 | #endif |
f470378c JF |
1777 | default: |
1778 | break; | |
1779 | } | |
1780 | ||
1781 | if (!perfmon_capable()) | |
1782 | return NULL; | |
1783 | ||
1784 | switch (func_id) { | |
61ca36c8 TK |
1785 | case BPF_FUNC_trace_printk: |
1786 | return bpf_get_trace_printk_proto(); | |
f470378c JF |
1787 | case BPF_FUNC_get_current_task: |
1788 | return &bpf_get_current_task_proto; | |
a396eda5 DX |
1789 | case BPF_FUNC_get_current_task_btf: |
1790 | return &bpf_get_current_task_btf_proto; | |
f470378c JF |
1791 | case BPF_FUNC_probe_read_user: |
1792 | return &bpf_probe_read_user_proto; | |
1793 | case BPF_FUNC_probe_read_kernel: | |
71330842 | 1794 | return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
ff40e510 | 1795 | NULL : &bpf_probe_read_kernel_proto; |
f470378c JF |
1796 | case BPF_FUNC_probe_read_user_str: |
1797 | return &bpf_probe_read_user_str_proto; | |
1798 | case BPF_FUNC_probe_read_kernel_str: | |
71330842 | 1799 | return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
ff40e510 | 1800 | NULL : &bpf_probe_read_kernel_str_proto; |
61ca36c8 TK |
1801 | case BPF_FUNC_snprintf_btf: |
1802 | return &bpf_snprintf_btf_proto; | |
7b15523a FR |
1803 | case BPF_FUNC_snprintf: |
1804 | return &bpf_snprintf_proto; | |
dd6e10fb DX |
1805 | case BPF_FUNC_task_pt_regs: |
1806 | return &bpf_task_pt_regs_proto; | |
10aceb62 DM |
1807 | case BPF_FUNC_trace_vprintk: |
1808 | return bpf_get_trace_vprintk_proto(); | |
6890896b SF |
1809 | default: |
1810 | return NULL; | |
1811 | } | |
1812 | } | |
13379059 | 1813 | |
f0c5941f KKD |
1814 | void bpf_list_head_free(const struct btf_field *field, void *list_head, |
1815 | struct bpf_spin_lock *spin_lock) | |
1816 | { | |
1817 | struct list_head *head = list_head, *orig_head = list_head; | |
1818 | ||
1819 | BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head)); | |
1820 | BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head)); | |
1821 | ||
1822 | /* Do the actual list draining outside the lock to not hold the lock for | |
1823 | * too long, and also prevent deadlocks if tracing programs end up | |
1824 | * executing on entry/exit of functions called inside the critical | |
1825 | * section, and end up doing map ops that call bpf_list_head_free for | |
1826 | * the same map value again. | |
1827 | */ | |
1828 | __bpf_spin_lock_irqsave(spin_lock); | |
1829 | if (!head->next || list_empty(head)) | |
1830 | goto unlock; | |
1831 | head = head->next; | |
1832 | unlock: | |
1833 | INIT_LIST_HEAD(orig_head); | |
1834 | __bpf_spin_unlock_irqrestore(spin_lock); | |
1835 | ||
1836 | while (head != orig_head) { | |
1837 | void *obj = head; | |
1838 | ||
30465003 | 1839 | obj -= field->graph_root.node_offset; |
f0c5941f | 1840 | head = head->next; |
958cf2e2 KKD |
1841 | /* The contained type can also have resources, including a |
1842 | * bpf_list_head which needs to be freed. | |
1843 | */ | |
958cf2e2 | 1844 | migrate_disable(); |
1512217c | 1845 | __bpf_obj_drop_impl(obj, field->graph_root.value_rec); |
958cf2e2 | 1846 | migrate_enable(); |
f0c5941f KKD |
1847 | } |
1848 | } | |
1849 | ||
9c395c1b DM |
1850 | /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are |
1851 | * 'rb_node *', so field name of rb_node within containing struct is not | |
1852 | * needed. | |
1853 | * | |
1854 | * Since bpf_rb_tree's node type has a corresponding struct btf_field with | |
1855 | * graph_root.node_offset, it's not necessary to know field name | |
1856 | * or type of node struct | |
1857 | */ | |
1858 | #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ | |
1859 | for (pos = rb_first_postorder(root); \ | |
1860 | pos && ({ n = rb_next_postorder(pos); 1; }); \ | |
1861 | pos = n) | |
1862 | ||
1863 | void bpf_rb_root_free(const struct btf_field *field, void *rb_root, | |
1864 | struct bpf_spin_lock *spin_lock) | |
1865 | { | |
1866 | struct rb_root_cached orig_root, *root = rb_root; | |
1867 | struct rb_node *pos, *n; | |
1868 | void *obj; | |
1869 | ||
1870 | BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); | |
1871 | BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); | |
1872 | ||
1873 | __bpf_spin_lock_irqsave(spin_lock); | |
1874 | orig_root = *root; | |
1875 | *root = RB_ROOT_CACHED; | |
1876 | __bpf_spin_unlock_irqrestore(spin_lock); | |
1877 | ||
1878 | bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { | |
1879 | obj = pos; | |
1880 | obj -= field->graph_root.node_offset; | |
1881 | ||
9c395c1b DM |
1882 | |
1883 | migrate_disable(); | |
1512217c | 1884 | __bpf_obj_drop_impl(obj, field->graph_root.value_rec); |
9c395c1b DM |
1885 | migrate_enable(); |
1886 | } | |
1887 | } | |
1888 | ||
958cf2e2 KKD |
1889 | __diag_push(); |
1890 | __diag_ignore_all("-Wmissing-prototypes", | |
1891 | "Global functions as their definitions will be in vmlinux BTF"); | |
1892 | ||
400031e0 | 1893 | __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) |
958cf2e2 KKD |
1894 | { |
1895 | struct btf_struct_meta *meta = meta__ign; | |
1896 | u64 size = local_type_id__k; | |
1897 | void *p; | |
1898 | ||
958cf2e2 KKD |
1899 | p = bpf_mem_alloc(&bpf_global_ma, size); |
1900 | if (!p) | |
1901 | return NULL; | |
1902 | if (meta) | |
cd2a8079 | 1903 | bpf_obj_init(meta->record, p); |
958cf2e2 KKD |
1904 | return p; |
1905 | } | |
1906 | ||
36d8bdf7 YS |
1907 | __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) |
1908 | { | |
1909 | u64 size = local_type_id__k; | |
1910 | ||
1911 | /* The verifier has ensured that meta__ign must be NULL */ | |
1912 | return bpf_mem_alloc(&bpf_global_percpu_ma, size); | |
1913 | } | |
1914 | ||
1512217c | 1915 | /* Must be called under migrate_disable(), as required by bpf_mem_free */ |
c8e18754 DM |
1916 | void __bpf_obj_drop_impl(void *p, const struct btf_record *rec) |
1917 | { | |
1512217c DM |
1918 | if (rec && rec->refcount_off >= 0 && |
1919 | !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { | |
1920 | /* Object is refcounted and refcount_dec didn't result in 0 | |
1921 | * refcount. Return without freeing the object | |
1922 | */ | |
1923 | return; | |
1924 | } | |
1925 | ||
c8e18754 DM |
1926 | if (rec) |
1927 | bpf_obj_free_fields(rec, p); | |
7e26cd12 DM |
1928 | |
1929 | if (rec && rec->refcount_off >= 0) | |
1930 | bpf_mem_free_rcu(&bpf_global_ma, p); | |
1931 | else | |
1932 | bpf_mem_free(&bpf_global_ma, p); | |
c8e18754 DM |
1933 | } |
1934 | ||
400031e0 | 1935 | __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) |
ac9f0605 KKD |
1936 | { |
1937 | struct btf_struct_meta *meta = meta__ign; | |
1938 | void *p = p__alloc; | |
1939 | ||
c8e18754 | 1940 | __bpf_obj_drop_impl(p, meta ? meta->record : NULL); |
ac9f0605 KKD |
1941 | } |
1942 | ||
36d8bdf7 YS |
1943 | __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) |
1944 | { | |
1945 | /* The verifier has ensured that meta__ign must be NULL */ | |
1946 | bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc); | |
1947 | } | |
1948 | ||
7c50b1cb DM |
1949 | __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) |
1950 | { | |
1951 | struct btf_struct_meta *meta = meta__ign; | |
1952 | struct bpf_refcount *ref; | |
1953 | ||
1954 | /* Could just cast directly to refcount_t *, but need some code using | |
1955 | * bpf_refcount type so that it is emitted in vmlinux BTF | |
1956 | */ | |
4ab07209 | 1957 | ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); |
7793fc3b DM |
1958 | if (!refcount_inc_not_zero((refcount_t *)ref)) |
1959 | return NULL; | |
7c50b1cb | 1960 | |
7793fc3b DM |
1961 | /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null |
1962 | * in verifier.c | |
1963 | */ | |
7c50b1cb DM |
1964 | return (void *)p__refcounted_kptr; |
1965 | } | |
1966 | ||
0a1f7bfe DM |
1967 | static int __bpf_list_add(struct bpf_list_node_kern *node, |
1968 | struct bpf_list_head *head, | |
d2dcc67d | 1969 | bool tail, struct btf_record *rec, u64 off) |
8cab76ec | 1970 | { |
0a1f7bfe | 1971 | struct list_head *n = &node->list_head, *h = (void *)head; |
8cab76ec | 1972 | |
3e81740a DM |
1973 | /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't |
1974 | * called on its fields, so init here | |
1975 | */ | |
8cab76ec KKD |
1976 | if (unlikely(!h->next)) |
1977 | INIT_LIST_HEAD(h); | |
c3c510ce DM |
1978 | |
1979 | /* node->owner != NULL implies !list_empty(n), no need to separately | |
1980 | * check the latter | |
1981 | */ | |
1982 | if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { | |
d2dcc67d | 1983 | /* Only called from BPF prog, no need to migrate_disable */ |
cc0d76ca | 1984 | __bpf_obj_drop_impl((void *)n - off, rec); |
d2dcc67d DM |
1985 | return -EINVAL; |
1986 | } | |
1987 | ||
8cab76ec | 1988 | tail ? list_add_tail(n, h) : list_add(n, h); |
c3c510ce | 1989 | WRITE_ONCE(node->owner, head); |
d2dcc67d DM |
1990 | |
1991 | return 0; | |
8cab76ec KKD |
1992 | } |
1993 | ||
d2dcc67d DM |
1994 | __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, |
1995 | struct bpf_list_node *node, | |
1996 | void *meta__ign, u64 off) | |
8cab76ec | 1997 | { |
0a1f7bfe | 1998 | struct bpf_list_node_kern *n = (void *)node; |
d2dcc67d DM |
1999 | struct btf_struct_meta *meta = meta__ign; |
2000 | ||
0a1f7bfe | 2001 | return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); |
8cab76ec KKD |
2002 | } |
2003 | ||
d2dcc67d DM |
2004 | __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, |
2005 | struct bpf_list_node *node, | |
2006 | void *meta__ign, u64 off) | |
8cab76ec | 2007 | { |
0a1f7bfe | 2008 | struct bpf_list_node_kern *n = (void *)node; |
d2dcc67d DM |
2009 | struct btf_struct_meta *meta = meta__ign; |
2010 | ||
0a1f7bfe | 2011 | return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); |
8cab76ec KKD |
2012 | } |
2013 | ||
2014 | static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) | |
2015 | { | |
2016 | struct list_head *n, *h = (void *)head; | |
c3c510ce | 2017 | struct bpf_list_node_kern *node; |
8cab76ec | 2018 | |
3e81740a DM |
2019 | /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't |
2020 | * called on its fields, so init here | |
2021 | */ | |
8cab76ec KKD |
2022 | if (unlikely(!h->next)) |
2023 | INIT_LIST_HEAD(h); | |
2024 | if (list_empty(h)) | |
2025 | return NULL; | |
c3c510ce | 2026 | |
8cab76ec | 2027 | n = tail ? h->prev : h->next; |
c3c510ce DM |
2028 | node = container_of(n, struct bpf_list_node_kern, list_head); |
2029 | if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) | |
2030 | return NULL; | |
2031 | ||
8cab76ec | 2032 | list_del_init(n); |
c3c510ce | 2033 | WRITE_ONCE(node->owner, NULL); |
8cab76ec KKD |
2034 | return (struct bpf_list_node *)n; |
2035 | } | |
2036 | ||
400031e0 | 2037 | __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) |
8cab76ec KKD |
2038 | { |
2039 | return __bpf_list_del(head, false); | |
2040 | } | |
2041 | ||
400031e0 | 2042 | __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) |
8cab76ec KKD |
2043 | { |
2044 | return __bpf_list_del(head, true); | |
2045 | } | |
2046 | ||
bd1279ae DM |
2047 | __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, |
2048 | struct bpf_rb_node *node) | |
2049 | { | |
c3c510ce | 2050 | struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; |
bd1279ae | 2051 | struct rb_root_cached *r = (struct rb_root_cached *)root; |
c3c510ce | 2052 | struct rb_node *n = &node_internal->rb_node; |
bd1279ae | 2053 | |
c3c510ce DM |
2054 | /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or |
2055 | * n is owned by some other tree. No need to check RB_EMPTY_NODE(n) | |
2056 | */ | |
2057 | if (READ_ONCE(node_internal->owner) != root) | |
404ad75a DM |
2058 | return NULL; |
2059 | ||
bd1279ae DM |
2060 | rb_erase_cached(n, r); |
2061 | RB_CLEAR_NODE(n); | |
c3c510ce | 2062 | WRITE_ONCE(node_internal->owner, NULL); |
bd1279ae DM |
2063 | return (struct bpf_rb_node *)n; |
2064 | } | |
2065 | ||
2066 | /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF | |
2067 | * program | |
2068 | */ | |
0a1f7bfe DM |
2069 | static int __bpf_rbtree_add(struct bpf_rb_root *root, |
2070 | struct bpf_rb_node_kern *node, | |
d2dcc67d | 2071 | void *less, struct btf_record *rec, u64 off) |
bd1279ae DM |
2072 | { |
2073 | struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; | |
0a1f7bfe | 2074 | struct rb_node *parent = NULL, *n = &node->rb_node; |
bd1279ae | 2075 | bpf_callback_t cb = (bpf_callback_t)less; |
bd1279ae DM |
2076 | bool leftmost = true; |
2077 | ||
c3c510ce DM |
2078 | /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately |
2079 | * check the latter | |
2080 | */ | |
2081 | if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { | |
d2dcc67d | 2082 | /* Only called from BPF prog, no need to migrate_disable */ |
cc0d76ca | 2083 | __bpf_obj_drop_impl((void *)n - off, rec); |
d2dcc67d DM |
2084 | return -EINVAL; |
2085 | } | |
2086 | ||
bd1279ae DM |
2087 | while (*link) { |
2088 | parent = *link; | |
2089 | if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { | |
2090 | link = &parent->rb_left; | |
2091 | } else { | |
2092 | link = &parent->rb_right; | |
2093 | leftmost = false; | |
2094 | } | |
2095 | } | |
2096 | ||
d2dcc67d DM |
2097 | rb_link_node(n, parent, link); |
2098 | rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost); | |
c3c510ce | 2099 | WRITE_ONCE(node->owner, root); |
d2dcc67d | 2100 | return 0; |
bd1279ae DM |
2101 | } |
2102 | ||
d2dcc67d DM |
2103 | __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, |
2104 | bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), | |
2105 | void *meta__ign, u64 off) | |
bd1279ae | 2106 | { |
d2dcc67d | 2107 | struct btf_struct_meta *meta = meta__ign; |
0a1f7bfe | 2108 | struct bpf_rb_node_kern *n = (void *)node; |
d2dcc67d | 2109 | |
0a1f7bfe | 2110 | return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); |
bd1279ae DM |
2111 | } |
2112 | ||
2113 | __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) | |
2114 | { | |
2115 | struct rb_root_cached *r = (struct rb_root_cached *)root; | |
2116 | ||
2117 | return (struct bpf_rb_node *)rb_first_cached(r); | |
2118 | } | |
2119 | ||
90660309 DV |
2120 | /** |
2121 | * bpf_task_acquire - Acquire a reference to a task. A task acquired by this | |
2122 | * kfunc which is not stored in a map as a kptr, must be released by calling | |
2123 | * bpf_task_release(). | |
2124 | * @p: The task on which a reference is being acquired. | |
2125 | */ | |
400031e0 | 2126 | __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) |
90660309 | 2127 | { |
d02c48fa DV |
2128 | if (refcount_inc_not_zero(&p->rcu_users)) |
2129 | return p; | |
2130 | return NULL; | |
90660309 DV |
2131 | } |
2132 | ||
90660309 | 2133 | /** |
25c5e92d | 2134 | * bpf_task_release - Release the reference acquired on a task. |
90660309 DV |
2135 | * @p: The task on which a reference is being released. |
2136 | */ | |
400031e0 | 2137 | __bpf_kfunc void bpf_task_release(struct task_struct *p) |
90660309 | 2138 | { |
d02c48fa | 2139 | put_task_struct_rcu_user(p); |
90660309 DV |
2140 | } |
2141 | ||
fda01efc DV |
2142 | #ifdef CONFIG_CGROUPS |
2143 | /** | |
2144 | * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by | |
2145 | * this kfunc which is not stored in a map as a kptr, must be released by | |
2146 | * calling bpf_cgroup_release(). | |
2147 | * @cgrp: The cgroup on which a reference is being acquired. | |
2148 | */ | |
400031e0 | 2149 | __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) |
fda01efc | 2150 | { |
1d712839 | 2151 | return cgroup_tryget(cgrp) ? cgrp : NULL; |
fda01efc DV |
2152 | } |
2153 | ||
fda01efc | 2154 | /** |
36aa10ff | 2155 | * bpf_cgroup_release - Release the reference acquired on a cgroup. |
fda01efc DV |
2156 | * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to |
2157 | * not be freed until the current grace period has ended, even if its refcount | |
2158 | * drops to 0. | |
2159 | * @cgrp: The cgroup on which a reference is being released. | |
2160 | */ | |
400031e0 | 2161 | __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) |
fda01efc | 2162 | { |
fda01efc DV |
2163 | cgroup_put(cgrp); |
2164 | } | |
5ca78670 DV |
2165 | |
2166 | /** | |
2167 | * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor | |
2168 | * array. A cgroup returned by this kfunc which is not subsequently stored in a | |
2169 | * map, must be released by calling bpf_cgroup_release(). | |
2170 | * @cgrp: The cgroup for which we're performing a lookup. | |
2171 | * @level: The level of ancestor to look up. | |
2172 | */ | |
400031e0 | 2173 | __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) |
5ca78670 DV |
2174 | { |
2175 | struct cgroup *ancestor; | |
2176 | ||
2177 | if (level > cgrp->level || level < 0) | |
2178 | return NULL; | |
2179 | ||
20c09d92 | 2180 | /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ |
5ca78670 | 2181 | ancestor = cgrp->ancestors[level]; |
20c09d92 AS |
2182 | if (!cgroup_tryget(ancestor)) |
2183 | return NULL; | |
5ca78670 DV |
2184 | return ancestor; |
2185 | } | |
332ea1f6 TH |
2186 | |
2187 | /** | |
2188 | * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this | |
2189 | * kfunc which is not subsequently stored in a map, must be released by calling | |
2190 | * bpf_cgroup_release(). | |
30a2d832 | 2191 | * @cgid: cgroup id. |
332ea1f6 TH |
2192 | */ |
2193 | __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) | |
2194 | { | |
2195 | struct cgroup *cgrp; | |
2196 | ||
2197 | cgrp = cgroup_get_from_id(cgid); | |
2198 | if (IS_ERR(cgrp)) | |
2199 | return NULL; | |
2200 | return cgrp; | |
2201 | } | |
b5ad4cdc FZ |
2202 | |
2203 | /** | |
2204 | * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test | |
2205 | * task's membership of cgroup ancestry. | |
2206 | * @task: the task to be tested | |
2207 | * @ancestor: possible ancestor of @task's cgroup | |
2208 | * | |
2209 | * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. | |
2210 | * It follows all the same rules as cgroup_is_descendant, and only applies | |
2211 | * to the default hierarchy. | |
2212 | */ | |
2213 | __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, | |
2214 | struct cgroup *ancestor) | |
2215 | { | |
29a7e00f YS |
2216 | long ret; |
2217 | ||
2218 | rcu_read_lock(); | |
2219 | ret = task_under_cgroup_hierarchy(task, ancestor); | |
2220 | rcu_read_unlock(); | |
2221 | return ret; | |
b5ad4cdc | 2222 | } |
fda01efc DV |
2223 | #endif /* CONFIG_CGROUPS */ |
2224 | ||
3f0e6f2b DV |
2225 | /** |
2226 | * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up | |
2227 | * in the root pid namespace idr. If a task is returned, it must either be | |
2228 | * stored in a map, or released with bpf_task_release(). | |
2229 | * @pid: The pid of the task being looked up. | |
2230 | */ | |
400031e0 | 2231 | __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) |
3f0e6f2b DV |
2232 | { |
2233 | struct task_struct *p; | |
2234 | ||
2235 | rcu_read_lock(); | |
2236 | p = find_task_by_pid_ns(pid, &init_pid_ns); | |
2237 | if (p) | |
d02c48fa | 2238 | p = bpf_task_acquire(p); |
3f0e6f2b DV |
2239 | rcu_read_unlock(); |
2240 | ||
2241 | return p; | |
2242 | } | |
2243 | ||
66e3a13e | 2244 | /** |
7ce60b11 DV |
2245 | * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. |
2246 | * @ptr: The dynptr whose data slice to retrieve | |
2247 | * @offset: Offset into the dynptr | |
3bda08b6 DR |
2248 | * @buffer__opt: User-provided buffer to copy contents into. May be NULL |
2249 | * @buffer__szk: Size (in bytes) of the buffer if present. This is the | |
2250 | * length of the requested slice. This must be a constant. | |
66e3a13e JK |
2251 | * |
2252 | * For non-skb and non-xdp type dynptrs, there is no difference between | |
2253 | * bpf_dynptr_slice and bpf_dynptr_data. | |
2254 | * | |
3bda08b6 DR |
2255 | * If buffer__opt is NULL, the call will fail if buffer_opt was needed. |
2256 | * | |
66e3a13e JK |
2257 | * If the intention is to write to the data slice, please use |
2258 | * bpf_dynptr_slice_rdwr. | |
2259 | * | |
2260 | * The user must check that the returned pointer is not null before using it. | |
2261 | * | |
2262 | * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice | |
2263 | * does not change the underlying packet data pointers, so a call to | |
2264 | * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in | |
2265 | * the bpf program. | |
2266 | * | |
7ce60b11 | 2267 | * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only |
66e3a13e JK |
2268 | * data slice (can be either direct pointer to the data or a pointer to the user |
2269 | * provided buffer, with its contents containing the data, if unable to obtain | |
2270 | * direct pointer) | |
2271 | */ | |
2272 | __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset, | |
3bda08b6 | 2273 | void *buffer__opt, u32 buffer__szk) |
66e3a13e JK |
2274 | { |
2275 | enum bpf_dynptr_type type; | |
2276 | u32 len = buffer__szk; | |
2277 | int err; | |
2278 | ||
2279 | if (!ptr->data) | |
c45eac53 | 2280 | return NULL; |
66e3a13e JK |
2281 | |
2282 | err = bpf_dynptr_check_off_len(ptr, offset, len); | |
2283 | if (err) | |
c45eac53 | 2284 | return NULL; |
66e3a13e JK |
2285 | |
2286 | type = bpf_dynptr_get_type(ptr); | |
2287 | ||
2288 | switch (type) { | |
2289 | case BPF_DYNPTR_TYPE_LOCAL: | |
2290 | case BPF_DYNPTR_TYPE_RINGBUF: | |
2291 | return ptr->data + ptr->offset + offset; | |
2292 | case BPF_DYNPTR_TYPE_SKB: | |
6f5a630d AS |
2293 | if (buffer__opt) |
2294 | return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); | |
2295 | else | |
2296 | return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); | |
66e3a13e JK |
2297 | case BPF_DYNPTR_TYPE_XDP: |
2298 | { | |
2299 | void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); | |
5426700e | 2300 | if (!IS_ERR_OR_NULL(xdp_ptr)) |
66e3a13e JK |
2301 | return xdp_ptr; |
2302 | ||
3bda08b6 DR |
2303 | if (!buffer__opt) |
2304 | return NULL; | |
2305 | bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); | |
2306 | return buffer__opt; | |
66e3a13e JK |
2307 | } |
2308 | default: | |
2309 | WARN_ONCE(true, "unknown dynptr type %d\n", type); | |
c45eac53 | 2310 | return NULL; |
66e3a13e JK |
2311 | } |
2312 | } | |
2313 | ||
2314 | /** | |
7ce60b11 DV |
2315 | * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. |
2316 | * @ptr: The dynptr whose data slice to retrieve | |
2317 | * @offset: Offset into the dynptr | |
3bda08b6 DR |
2318 | * @buffer__opt: User-provided buffer to copy contents into. May be NULL |
2319 | * @buffer__szk: Size (in bytes) of the buffer if present. This is the | |
2320 | * length of the requested slice. This must be a constant. | |
66e3a13e JK |
2321 | * |
2322 | * For non-skb and non-xdp type dynptrs, there is no difference between | |
2323 | * bpf_dynptr_slice and bpf_dynptr_data. | |
2324 | * | |
3bda08b6 DR |
2325 | * If buffer__opt is NULL, the call will fail if buffer_opt was needed. |
2326 | * | |
66e3a13e JK |
2327 | * The returned pointer is writable and may point to either directly the dynptr |
2328 | * data at the requested offset or to the buffer if unable to obtain a direct | |
2329 | * data pointer to (example: the requested slice is to the paged area of an skb | |
2330 | * packet). In the case where the returned pointer is to the buffer, the user | |
2331 | * is responsible for persisting writes through calling bpf_dynptr_write(). This | |
2332 | * usually looks something like this pattern: | |
2333 | * | |
2334 | * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); | |
2335 | * if (!eth) | |
2336 | * return TC_ACT_SHOT; | |
2337 | * | |
2338 | * // mutate eth header // | |
2339 | * | |
2340 | * if (eth == buffer) | |
2341 | * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); | |
2342 | * | |
2343 | * Please note that, as in the example above, the user must check that the | |
2344 | * returned pointer is not null before using it. | |
2345 | * | |
2346 | * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr | |
2347 | * does not change the underlying packet data pointers, so a call to | |
2348 | * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in | |
2349 | * the bpf program. | |
2350 | * | |
7ce60b11 | 2351 | * Return: NULL if the call failed (eg invalid dynptr), pointer to a |
66e3a13e JK |
2352 | * data slice (can be either direct pointer to the data or a pointer to the user |
2353 | * provided buffer, with its contents containing the data, if unable to obtain | |
2354 | * direct pointer) | |
2355 | */ | |
2356 | __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset, | |
3bda08b6 | 2357 | void *buffer__opt, u32 buffer__szk) |
66e3a13e | 2358 | { |
540ccf96 | 2359 | if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) |
c45eac53 | 2360 | return NULL; |
66e3a13e JK |
2361 | |
2362 | /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. | |
2363 | * | |
2364 | * For skb-type dynptrs, it is safe to write into the returned pointer | |
2365 | * if the bpf program allows skb data writes. There are two possiblities | |
2366 | * that may occur when calling bpf_dynptr_slice_rdwr: | |
2367 | * | |
2368 | * 1) The requested slice is in the head of the skb. In this case, the | |
2369 | * returned pointer is directly to skb data, and if the skb is cloned, the | |
2370 | * verifier will have uncloned it (see bpf_unclone_prologue()) already. | |
2371 | * The pointer can be directly written into. | |
2372 | * | |
2373 | * 2) Some portion of the requested slice is in the paged buffer area. | |
2374 | * In this case, the requested data will be copied out into the buffer | |
2375 | * and the returned pointer will be a pointer to the buffer. The skb | |
2376 | * will not be pulled. To persist the write, the user will need to call | |
2377 | * bpf_dynptr_write(), which will pull the skb and commit the write. | |
2378 | * | |
2379 | * Similarly for xdp programs, if the requested slice is not across xdp | |
2380 | * fragments, then a direct pointer will be returned, otherwise the data | |
2381 | * will be copied out into the buffer and the user will need to call | |
2382 | * bpf_dynptr_write() to commit changes. | |
2383 | */ | |
3bda08b6 | 2384 | return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk); |
66e3a13e JK |
2385 | } |
2386 | ||
987d0242 JK |
2387 | __bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end) |
2388 | { | |
2389 | u32 size; | |
2390 | ||
2391 | if (!ptr->data || start > end) | |
2392 | return -EINVAL; | |
2393 | ||
26662d73 | 2394 | size = __bpf_dynptr_size(ptr); |
987d0242 JK |
2395 | |
2396 | if (start > size || end > size) | |
2397 | return -ERANGE; | |
2398 | ||
2399 | ptr->offset += start; | |
2400 | bpf_dynptr_set_size(ptr, end - start); | |
2401 | ||
2402 | return 0; | |
2403 | } | |
2404 | ||
540ccf96 JK |
2405 | __bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr) |
2406 | { | |
2407 | return !ptr->data; | |
2408 | } | |
2409 | ||
2410 | __bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr) | |
2411 | { | |
2412 | if (!ptr->data) | |
2413 | return false; | |
2414 | ||
2415 | return __bpf_dynptr_is_rdonly(ptr); | |
2416 | } | |
2417 | ||
26662d73 JK |
2418 | __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) |
2419 | { | |
2420 | if (!ptr->data) | |
2421 | return -EINVAL; | |
2422 | ||
2423 | return __bpf_dynptr_size(ptr); | |
2424 | } | |
2425 | ||
361f129f JK |
2426 | __bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr, |
2427 | struct bpf_dynptr_kern *clone__uninit) | |
2428 | { | |
2429 | if (!ptr->data) { | |
2430 | bpf_dynptr_set_null(clone__uninit); | |
2431 | return -EINVAL; | |
2432 | } | |
2433 | ||
2434 | *clone__uninit = *ptr; | |
2435 | ||
2436 | return 0; | |
2437 | } | |
2438 | ||
400031e0 | 2439 | __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) |
fd264ca0 YS |
2440 | { |
2441 | return obj; | |
2442 | } | |
2443 | ||
400031e0 | 2444 | __bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k) |
a35b9af4 YS |
2445 | { |
2446 | return obj__ign; | |
2447 | } | |
2448 | ||
400031e0 | 2449 | __bpf_kfunc void bpf_rcu_read_lock(void) |
9bb00b28 YS |
2450 | { |
2451 | rcu_read_lock(); | |
2452 | } | |
2453 | ||
400031e0 | 2454 | __bpf_kfunc void bpf_rcu_read_unlock(void) |
9bb00b28 YS |
2455 | { |
2456 | rcu_read_unlock(); | |
2457 | } | |
2458 | ||
f18b03fa KKD |
2459 | struct bpf_throw_ctx { |
2460 | struct bpf_prog_aux *aux; | |
2461 | u64 sp; | |
2462 | u64 bp; | |
2463 | int cnt; | |
2464 | }; | |
2465 | ||
2466 | static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp) | |
2467 | { | |
2468 | struct bpf_throw_ctx *ctx = cookie; | |
2469 | struct bpf_prog *prog; | |
2470 | ||
2471 | if (!is_bpf_text_address(ip)) | |
2472 | return !ctx->cnt; | |
2473 | prog = bpf_prog_ksym_find(ip); | |
2474 | ctx->cnt++; | |
2475 | if (bpf_is_subprog(prog)) | |
2476 | return true; | |
2477 | ctx->aux = prog->aux; | |
2478 | ctx->sp = sp; | |
2479 | ctx->bp = bp; | |
2480 | return false; | |
2481 | } | |
2482 | ||
2483 | __bpf_kfunc void bpf_throw(u64 cookie) | |
2484 | { | |
2485 | struct bpf_throw_ctx ctx = {}; | |
2486 | ||
2487 | arch_bpf_stack_walk(bpf_stack_walker, &ctx); | |
2488 | WARN_ON_ONCE(!ctx.aux); | |
2489 | if (ctx.aux) | |
2490 | WARN_ON_ONCE(!ctx.aux->exception_boundary); | |
2491 | WARN_ON_ONCE(!ctx.bp); | |
2492 | WARN_ON_ONCE(!ctx.cnt); | |
ec5290a1 KKD |
2493 | /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning |
2494 | * deeper stack depths than ctx.sp as we do not return from bpf_throw, | |
2495 | * which skips compiler generated instrumentation to do the same. | |
2496 | */ | |
7d346063 | 2497 | kasan_unpoison_task_stack_below((void *)(long)ctx.sp); |
f18b03fa | 2498 | ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp); |
fd548e1a | 2499 | WARN(1, "A call to BPF exception callback should never return\n"); |
f18b03fa KKD |
2500 | } |
2501 | ||
958cf2e2 KKD |
2502 | __diag_pop(); |
2503 | ||
2504 | BTF_SET8_START(generic_btf_ids) | |
13379059 AS |
2505 | #ifdef CONFIG_KEXEC_CORE |
2506 | BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) | |
2507 | #endif | |
958cf2e2 | 2508 | BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) |
36d8bdf7 | 2509 | BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) |
ac9f0605 | 2510 | BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) |
36d8bdf7 | 2511 | BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE) |
7793fc3b | 2512 | BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL) |
d2dcc67d DM |
2513 | BTF_ID_FLAGS(func, bpf_list_push_front_impl) |
2514 | BTF_ID_FLAGS(func, bpf_list_push_back_impl) | |
8cab76ec KKD |
2515 | BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) |
2516 | BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) | |
d02c48fa | 2517 | BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) |
90660309 | 2518 | BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) |
404ad75a | 2519 | BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) |
d2dcc67d | 2520 | BTF_ID_FLAGS(func, bpf_rbtree_add_impl) |
bd1279ae DM |
2521 | BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) |
2522 | ||
fda01efc | 2523 | #ifdef CONFIG_CGROUPS |
1d712839 | 2524 | BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) |
fda01efc | 2525 | BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) |
20c09d92 | 2526 | BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) |
332ea1f6 | 2527 | BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) |
b5ad4cdc | 2528 | BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) |
fda01efc | 2529 | #endif |
3f0e6f2b | 2530 | BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) |
f18b03fa | 2531 | BTF_ID_FLAGS(func, bpf_throw) |
958cf2e2 | 2532 | BTF_SET8_END(generic_btf_ids) |
13379059 | 2533 | |
958cf2e2 | 2534 | static const struct btf_kfunc_id_set generic_kfunc_set = { |
13379059 | 2535 | .owner = THIS_MODULE, |
958cf2e2 | 2536 | .set = &generic_btf_ids, |
13379059 AS |
2537 | }; |
2538 | ||
cfe14564 | 2539 | |
90660309 DV |
2540 | BTF_ID_LIST(generic_dtor_ids) |
2541 | BTF_ID(struct, task_struct) | |
2542 | BTF_ID(func, bpf_task_release) | |
fda01efc DV |
2543 | #ifdef CONFIG_CGROUPS |
2544 | BTF_ID(struct, cgroup) | |
2545 | BTF_ID(func, bpf_cgroup_release) | |
2546 | #endif | |
90660309 | 2547 | |
cfe14564 | 2548 | BTF_SET8_START(common_btf_ids) |
fd264ca0 | 2549 | BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) |
a35b9af4 | 2550 | BTF_ID_FLAGS(func, bpf_rdonly_cast) |
9bb00b28 YS |
2551 | BTF_ID_FLAGS(func, bpf_rcu_read_lock) |
2552 | BTF_ID_FLAGS(func, bpf_rcu_read_unlock) | |
66e3a13e JK |
2553 | BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) |
2554 | BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) | |
6018e1f4 AN |
2555 | BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) |
2556 | BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) | |
2557 | BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) | |
4ac45468 DM |
2558 | BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) |
2559 | BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) | |
2560 | BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) | |
9c66dc94 CZ |
2561 | BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) |
2562 | BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) | |
2563 | BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) | |
dfab99df | 2564 | BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) |
c68a78ff CZ |
2565 | BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) |
2566 | BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) | |
dfab99df | 2567 | BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) |
7251d090 CZ |
2568 | BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) |
2569 | BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) | |
987d0242 | 2570 | BTF_ID_FLAGS(func, bpf_dynptr_adjust) |
540ccf96 JK |
2571 | BTF_ID_FLAGS(func, bpf_dynptr_is_null) |
2572 | BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) | |
26662d73 | 2573 | BTF_ID_FLAGS(func, bpf_dynptr_size) |
361f129f | 2574 | BTF_ID_FLAGS(func, bpf_dynptr_clone) |
cfe14564 YS |
2575 | BTF_SET8_END(common_btf_ids) |
2576 | ||
2577 | static const struct btf_kfunc_id_set common_kfunc_set = { | |
2578 | .owner = THIS_MODULE, | |
2579 | .set = &common_btf_ids, | |
2580 | }; | |
2581 | ||
13379059 AS |
2582 | static int __init kfunc_init(void) |
2583 | { | |
2fcc6081 | 2584 | int ret; |
90660309 DV |
2585 | const struct btf_id_dtor_kfunc generic_dtors[] = { |
2586 | { | |
2fcc6081 DV |
2587 | .btf_id = generic_dtor_ids[0], |
2588 | .kfunc_btf_id = generic_dtor_ids[1] | |
90660309 | 2589 | }, |
fda01efc DV |
2590 | #ifdef CONFIG_CGROUPS |
2591 | { | |
2fcc6081 DV |
2592 | .btf_id = generic_dtor_ids[2], |
2593 | .kfunc_btf_id = generic_dtor_ids[3] | |
fda01efc DV |
2594 | }, |
2595 | #endif | |
90660309 | 2596 | }; |
8cab76ec KKD |
2597 | |
2598 | ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set); | |
90660309 DV |
2599 | ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set); |
2600 | ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set); | |
cfe14564 | 2601 | ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors, |
90660309 DV |
2602 | ARRAY_SIZE(generic_dtors), |
2603 | THIS_MODULE); | |
cfe14564 | 2604 | return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set); |
13379059 AS |
2605 | } |
2606 | ||
2607 | late_initcall(kfunc_init); |