1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 #include <linux/kasan.h>
27 #include "../../lib/kstrtox.h"
29 /* If kernel subsystem is allowing eBPF programs to call this function,
30 * inside its own verifier_ops->get_func_proto() callback it should return
31 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
33 * Different map implementations will rely on rcu in map methods
34 * lookup/update/delete, therefore eBPF programs must run under rcu lock
35 * if program is allowed to access maps, so check rcu_read_lock_held in
36 * all three functions.
38 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
40 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
41 return (unsigned long) map->ops->map_lookup_elem(map, key);
44 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
45 .func = bpf_map_lookup_elem,
48 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
49 .arg1_type = ARG_CONST_MAP_PTR,
50 .arg2_type = ARG_PTR_TO_MAP_KEY,
53 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
54 void *, value, u64, flags)
56 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
57 return map->ops->map_update_elem(map, key, value, flags);
60 const struct bpf_func_proto bpf_map_update_elem_proto = {
61 .func = bpf_map_update_elem,
64 .ret_type = RET_INTEGER,
65 .arg1_type = ARG_CONST_MAP_PTR,
66 .arg2_type = ARG_PTR_TO_MAP_KEY,
67 .arg3_type = ARG_PTR_TO_MAP_VALUE,
68 .arg4_type = ARG_ANYTHING,
71 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
73 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
74 return map->ops->map_delete_elem(map, key);
77 const struct bpf_func_proto bpf_map_delete_elem_proto = {
78 .func = bpf_map_delete_elem,
81 .ret_type = RET_INTEGER,
82 .arg1_type = ARG_CONST_MAP_PTR,
83 .arg2_type = ARG_PTR_TO_MAP_KEY,
86 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
88 return map->ops->map_push_elem(map, value, flags);
91 const struct bpf_func_proto bpf_map_push_elem_proto = {
92 .func = bpf_map_push_elem,
95 .ret_type = RET_INTEGER,
96 .arg1_type = ARG_CONST_MAP_PTR,
97 .arg2_type = ARG_PTR_TO_MAP_VALUE,
98 .arg3_type = ARG_ANYTHING,
101 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
103 return map->ops->map_pop_elem(map, value);
106 const struct bpf_func_proto bpf_map_pop_elem_proto = {
107 .func = bpf_map_pop_elem,
109 .ret_type = RET_INTEGER,
110 .arg1_type = ARG_CONST_MAP_PTR,
111 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
114 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
116 return map->ops->map_peek_elem(map, value);
119 const struct bpf_func_proto bpf_map_peek_elem_proto = {
120 .func = bpf_map_peek_elem,
122 .ret_type = RET_INTEGER,
123 .arg1_type = ARG_CONST_MAP_PTR,
124 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
127 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
129 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
130 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
133 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
134 .func = bpf_map_lookup_percpu_elem,
137 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
138 .arg1_type = ARG_CONST_MAP_PTR,
139 .arg2_type = ARG_PTR_TO_MAP_KEY,
140 .arg3_type = ARG_ANYTHING,
143 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
144 .func = bpf_user_rnd_u32,
146 .ret_type = RET_INTEGER,
149 BPF_CALL_0(bpf_get_smp_processor_id)
151 return smp_processor_id();
154 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
155 .func = bpf_get_smp_processor_id,
157 .ret_type = RET_INTEGER,
160 BPF_CALL_0(bpf_get_numa_node_id)
162 return numa_node_id();
165 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
166 .func = bpf_get_numa_node_id,
168 .ret_type = RET_INTEGER,
171 BPF_CALL_0(bpf_ktime_get_ns)
173 /* NMI safe access to clock monotonic */
174 return ktime_get_mono_fast_ns();
177 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
178 .func = bpf_ktime_get_ns,
180 .ret_type = RET_INTEGER,
183 BPF_CALL_0(bpf_ktime_get_boot_ns)
185 /* NMI safe access to clock boottime */
186 return ktime_get_boot_fast_ns();
189 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
190 .func = bpf_ktime_get_boot_ns,
192 .ret_type = RET_INTEGER,
195 BPF_CALL_0(bpf_ktime_get_coarse_ns)
197 return ktime_get_coarse_ns();
200 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
201 .func = bpf_ktime_get_coarse_ns,
203 .ret_type = RET_INTEGER,
206 BPF_CALL_0(bpf_ktime_get_tai_ns)
208 /* NMI safe access to clock tai */
209 return ktime_get_tai_fast_ns();
212 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
213 .func = bpf_ktime_get_tai_ns,
215 .ret_type = RET_INTEGER,
218 BPF_CALL_0(bpf_get_current_pid_tgid)
220 struct task_struct *task = current;
225 return (u64) task->tgid << 32 | task->pid;
228 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
229 .func = bpf_get_current_pid_tgid,
231 .ret_type = RET_INTEGER,
234 BPF_CALL_0(bpf_get_current_uid_gid)
236 struct task_struct *task = current;
243 current_uid_gid(&uid, &gid);
244 return (u64) from_kgid(&init_user_ns, gid) << 32 |
245 from_kuid(&init_user_ns, uid);
248 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
249 .func = bpf_get_current_uid_gid,
251 .ret_type = RET_INTEGER,
254 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
256 struct task_struct *task = current;
261 /* Verifier guarantees that size > 0 */
262 strscpy_pad(buf, task->comm, size);
265 memset(buf, 0, size);
269 const struct bpf_func_proto bpf_get_current_comm_proto = {
270 .func = bpf_get_current_comm,
272 .ret_type = RET_INTEGER,
273 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
274 .arg2_type = ARG_CONST_SIZE,
277 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
279 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
281 arch_spinlock_t *l = (void *)lock;
284 arch_spinlock_t lock;
285 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
287 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
288 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
289 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
294 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
296 arch_spinlock_t *l = (void *)lock;
304 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
306 atomic_t *l = (void *)lock;
308 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
310 atomic_cond_read_relaxed(l, !VAL);
311 } while (atomic_xchg(l, 1));
314 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
316 atomic_t *l = (void *)lock;
318 atomic_set_release(l, 0);
323 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
325 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
329 local_irq_save(flags);
330 __bpf_spin_lock(lock);
331 __this_cpu_write(irqsave_flags, flags);
334 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
336 __bpf_spin_lock_irqsave(lock);
340 const struct bpf_func_proto bpf_spin_lock_proto = {
341 .func = bpf_spin_lock,
343 .ret_type = RET_VOID,
344 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
345 .arg1_btf_id = BPF_PTR_POISON,
348 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
352 flags = __this_cpu_read(irqsave_flags);
353 __bpf_spin_unlock(lock);
354 local_irq_restore(flags);
357 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
359 __bpf_spin_unlock_irqrestore(lock);
363 const struct bpf_func_proto bpf_spin_unlock_proto = {
364 .func = bpf_spin_unlock,
366 .ret_type = RET_VOID,
367 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
368 .arg1_btf_id = BPF_PTR_POISON,
371 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
374 struct bpf_spin_lock *lock;
377 lock = src + map->record->spin_lock_off;
379 lock = dst + map->record->spin_lock_off;
381 __bpf_spin_lock_irqsave(lock);
382 copy_map_value(map, dst, src);
383 __bpf_spin_unlock_irqrestore(lock);
387 BPF_CALL_0(bpf_jiffies64)
389 return get_jiffies_64();
392 const struct bpf_func_proto bpf_jiffies64_proto = {
393 .func = bpf_jiffies64,
395 .ret_type = RET_INTEGER,
398 #ifdef CONFIG_CGROUPS
399 BPF_CALL_0(bpf_get_current_cgroup_id)
405 cgrp = task_dfl_cgroup(current);
406 cgrp_id = cgroup_id(cgrp);
412 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
413 .func = bpf_get_current_cgroup_id,
415 .ret_type = RET_INTEGER,
418 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
421 struct cgroup *ancestor;
425 cgrp = task_dfl_cgroup(current);
426 ancestor = cgroup_ancestor(cgrp, ancestor_level);
427 cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
433 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
434 .func = bpf_get_current_ancestor_cgroup_id,
436 .ret_type = RET_INTEGER,
437 .arg1_type = ARG_ANYTHING,
439 #endif /* CONFIG_CGROUPS */
441 #define BPF_STRTOX_BASE_MASK 0x1F
443 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
444 unsigned long long *res, bool *is_negative)
446 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
447 const char *cur_buf = buf;
448 size_t cur_len = buf_len;
449 unsigned int consumed;
453 if (!buf || !buf_len || !res || !is_negative)
456 if (base != 0 && base != 8 && base != 10 && base != 16)
459 if (flags & ~BPF_STRTOX_BASE_MASK)
462 while (cur_buf < buf + buf_len && isspace(*cur_buf))
465 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
469 consumed = cur_buf - buf;
474 cur_len = min(cur_len, sizeof(str) - 1);
475 memcpy(str, cur_buf, cur_len);
479 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
480 val_len = _parse_integer(cur_buf, base, res);
482 if (val_len & KSTRTOX_OVERFLOW)
489 consumed += cur_buf - str;
494 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
497 unsigned long long _res;
501 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
505 if ((long long)-_res > 0)
509 if ((long long)_res < 0)
516 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
522 err = __bpf_strtoll(buf, buf_len, flags, &_res);
525 if (_res != (long)_res)
531 const struct bpf_func_proto bpf_strtol_proto = {
534 .ret_type = RET_INTEGER,
535 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
536 .arg2_type = ARG_CONST_SIZE,
537 .arg3_type = ARG_ANYTHING,
538 .arg4_type = ARG_PTR_TO_LONG,
541 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
542 unsigned long *, res)
544 unsigned long long _res;
548 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
553 if (_res != (unsigned long)_res)
559 const struct bpf_func_proto bpf_strtoul_proto = {
562 .ret_type = RET_INTEGER,
563 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
564 .arg2_type = ARG_CONST_SIZE,
565 .arg3_type = ARG_ANYTHING,
566 .arg4_type = ARG_PTR_TO_LONG,
569 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
571 return strncmp(s1, s2, s1_sz);
574 static const struct bpf_func_proto bpf_strncmp_proto = {
577 .ret_type = RET_INTEGER,
578 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
579 .arg2_type = ARG_CONST_SIZE,
580 .arg3_type = ARG_PTR_TO_CONST_STR,
583 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
584 struct bpf_pidns_info *, nsdata, u32, size)
586 struct task_struct *task = current;
587 struct pid_namespace *pidns;
590 if (unlikely(size != sizeof(struct bpf_pidns_info)))
593 if (unlikely((u64)(dev_t)dev != dev))
599 pidns = task_active_pid_ns(task);
600 if (unlikely(!pidns)) {
605 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
608 nsdata->pid = task_pid_nr_ns(task, pidns);
609 nsdata->tgid = task_tgid_nr_ns(task, pidns);
612 memset((void *)nsdata, 0, (size_t) size);
616 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
617 .func = bpf_get_ns_current_pid_tgid,
619 .ret_type = RET_INTEGER,
620 .arg1_type = ARG_ANYTHING,
621 .arg2_type = ARG_ANYTHING,
622 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
623 .arg4_type = ARG_CONST_SIZE,
626 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
627 .func = bpf_get_raw_cpu_id,
629 .ret_type = RET_INTEGER,
632 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
633 u64, flags, void *, data, u64, size)
635 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
638 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
641 const struct bpf_func_proto bpf_event_output_data_proto = {
642 .func = bpf_event_output_data,
644 .ret_type = RET_INTEGER,
645 .arg1_type = ARG_PTR_TO_CTX,
646 .arg2_type = ARG_CONST_MAP_PTR,
647 .arg3_type = ARG_ANYTHING,
648 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
649 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
652 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
653 const void __user *, user_ptr)
655 int ret = copy_from_user(dst, user_ptr, size);
658 memset(dst, 0, size);
665 const struct bpf_func_proto bpf_copy_from_user_proto = {
666 .func = bpf_copy_from_user,
669 .ret_type = RET_INTEGER,
670 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
671 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
672 .arg3_type = ARG_ANYTHING,
675 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
676 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
680 /* flags is not used yet */
687 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
691 memset(dst, 0, size);
692 /* Return -EFAULT for partial read */
693 return ret < 0 ? ret : -EFAULT;
696 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
697 .func = bpf_copy_from_user_task,
700 .ret_type = RET_INTEGER,
701 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
702 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
703 .arg3_type = ARG_ANYTHING,
704 .arg4_type = ARG_PTR_TO_BTF_ID,
705 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
706 .arg5_type = ARG_ANYTHING
709 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
711 if (cpu >= nr_cpu_ids)
712 return (unsigned long)NULL;
714 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
717 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
718 .func = bpf_per_cpu_ptr,
720 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
721 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
722 .arg2_type = ARG_ANYTHING,
725 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
727 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
730 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
731 .func = bpf_this_cpu_ptr,
733 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
734 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
737 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
740 void __user *user_ptr = (__force void __user *)unsafe_ptr;
746 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
747 if ((unsigned long)unsafe_ptr < TASK_SIZE)
748 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
752 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
754 return strncpy_from_user_nofault(buf, user_ptr, bufsz);
760 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
761 * arguments representation.
763 #define MAX_BPRINTF_BIN_ARGS 512
765 /* Support executing three nested bprintf helper calls on a given CPU */
766 #define MAX_BPRINTF_NEST_LEVEL 3
767 struct bpf_bprintf_buffers {
768 char bin_args[MAX_BPRINTF_BIN_ARGS];
769 char buf[MAX_BPRINTF_BUF];
772 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
773 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
775 static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
780 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
781 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
782 this_cpu_dec(bpf_bprintf_nest_level);
786 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
791 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
793 if (!data->bin_args && !data->buf)
795 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
797 this_cpu_dec(bpf_bprintf_nest_level);
802 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
804 * Returns a negative value if fmt is an invalid format string or 0 otherwise.
806 * This can be used in two ways:
807 * - Format string verification only: when data->get_bin_args is false
808 * - Arguments preparation: in addition to the above verification, it writes in
809 * data->bin_args a binary representation of arguments usable by bstr_printf
810 * where pointers from BPF have been sanitized.
812 * In argument preparation mode, if 0 is returned, safe temporary buffers are
813 * allocated and bpf_bprintf_cleanup should be called to free them after use.
815 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
816 u32 num_args, struct bpf_bprintf_data *data)
818 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
819 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
820 struct bpf_bprintf_buffers *buffers = NULL;
821 size_t sizeof_cur_arg, sizeof_cur_ip;
822 int err, i, num_spec = 0;
824 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
826 fmt_end = strnchr(fmt, fmt_size, 0);
829 fmt_size = fmt_end - fmt;
831 if (get_buffers && try_get_buffers(&buffers))
834 if (data->get_bin_args) {
836 tmp_buf = buffers->bin_args;
837 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
838 data->bin_args = (u32 *)tmp_buf;
842 data->buf = buffers->buf;
844 for (i = 0; i < fmt_size; i++) {
845 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
853 if (fmt[i + 1] == '%') {
858 if (num_spec >= num_args) {
863 /* The string is zero-terminated so if fmt[i] != 0, we can
864 * always access fmt[i + 1], in the worst case it will be a 0
868 /* skip optional "[0 +-][num]" width formatting field */
869 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
872 if (fmt[i] >= '1' && fmt[i] <= '9') {
874 while (fmt[i] >= '0' && fmt[i] <= '9')
879 sizeof_cur_arg = sizeof(long);
881 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
883 fmt_ptype = fmt[i + 1];
888 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
889 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
890 fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
892 /* just kernel pointers */
894 cur_arg = raw_args[num_spec];
899 if (fmt[i + 1] == 'B') {
901 err = snprintf(tmp_buf,
902 (tmp_buf_end - tmp_buf),
904 (void *)(long)raw_args[num_spec]);
905 tmp_buf += (err + 1);
913 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
914 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
915 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
924 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
925 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
930 unsafe_ptr = (char *)(long)raw_args[num_spec];
931 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
934 memset(cur_ip, 0, sizeof_cur_ip);
936 /* hack: bstr_printf expects IP addresses to be
937 * pre-formatted as strings, ironically, the easiest way
938 * to do that is to call snprintf.
940 ip_spec[2] = fmt[i - 1];
942 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
949 } else if (fmt[i] == 's') {
952 if (fmt[i + 1] != 0 &&
953 !isspace(fmt[i + 1]) &&
954 !ispunct(fmt[i + 1])) {
962 if (tmp_buf_end == tmp_buf) {
967 unsafe_ptr = (char *)(long)raw_args[num_spec];
968 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
970 tmp_buf_end - tmp_buf);
980 } else if (fmt[i] == 'c') {
984 if (tmp_buf_end == tmp_buf) {
989 *tmp_buf = raw_args[num_spec];
996 sizeof_cur_arg = sizeof(int);
999 sizeof_cur_arg = sizeof(long);
1002 if (fmt[i] == 'l') {
1003 sizeof_cur_arg = sizeof(long long);
1007 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1008 fmt[i] != 'x' && fmt[i] != 'X') {
1014 cur_arg = raw_args[num_spec];
1017 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1018 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1023 if (sizeof_cur_arg == 8) {
1024 *(u32 *)tmp_buf = *(u32 *)&cur_arg;
1025 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1027 *(u32 *)tmp_buf = (u32)(long)cur_arg;
1029 tmp_buf += sizeof_cur_arg;
1037 bpf_bprintf_cleanup(data);
1041 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1042 const void *, args, u32, data_len)
1044 struct bpf_bprintf_data data = {
1045 .get_bin_args = true,
1049 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1050 (data_len && !args))
1052 num_args = data_len / 8;
1054 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1055 * can safely give an unbounded size.
1057 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1061 err = bstr_printf(str, str_size, fmt, data.bin_args);
1063 bpf_bprintf_cleanup(&data);
1068 const struct bpf_func_proto bpf_snprintf_proto = {
1069 .func = bpf_snprintf,
1071 .ret_type = RET_INTEGER,
1072 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
1073 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1074 .arg3_type = ARG_PTR_TO_CONST_STR,
1075 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1076 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1079 /* BPF map elements can contain 'struct bpf_timer'.
1080 * Such map owns all of its BPF timers.
1081 * 'struct bpf_timer' is allocated as part of map element allocation
1082 * and it's zero initialized.
1083 * That space is used to keep 'struct bpf_timer_kern'.
1084 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1085 * remembers 'struct bpf_map *' pointer it's part of.
1086 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1087 * bpf_timer_start() arms the timer.
1088 * If user space reference to a map goes to zero at this point
1089 * ops->map_release_uref callback is responsible for cancelling the timers,
1090 * freeing their memory, and decrementing prog's refcnts.
1091 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1092 * Inner maps can contain bpf timers as well. ops->map_release_uref is
1093 * freeing the timers when inner map is replaced or deleted by user space.
1095 struct bpf_hrtimer {
1096 struct hrtimer timer;
1097 struct bpf_map *map;
1098 struct bpf_prog *prog;
1099 void __rcu *callback_fn;
1103 /* the actual struct hidden inside uapi struct bpf_timer */
1104 struct bpf_timer_kern {
1105 struct bpf_hrtimer *timer;
1106 /* bpf_spin_lock is used here instead of spinlock_t to make
1107 * sure that it always fits into space reserved by struct bpf_timer
1108 * regardless of LOCKDEP and spinlock debug flags.
1110 struct bpf_spin_lock lock;
1111 } __attribute__((aligned(8)));
1113 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1115 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1117 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1118 struct bpf_map *map = t->map;
1119 void *value = t->value;
1120 bpf_callback_t callback_fn;
1124 BTF_TYPE_EMIT(struct bpf_timer);
1125 callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
1129 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1130 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1131 * Remember the timer this callback is servicing to prevent
1132 * deadlock if callback_fn() calls bpf_timer_cancel() or
1133 * bpf_map_delete_elem() on the same timer.
1135 this_cpu_write(hrtimer_running, t);
1136 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1137 struct bpf_array *array = container_of(map, struct bpf_array, map);
1139 /* compute the key */
1140 idx = ((char *)value - array->value) / array->elem_size;
1142 } else { /* hash or lru */
1143 key = value - round_up(map->key_size, 8);
1146 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1147 /* The verifier checked that return value is zero. */
1149 this_cpu_write(hrtimer_running, NULL);
1151 return HRTIMER_NORESTART;
1154 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1157 clockid_t clockid = flags & (MAX_CLOCKS - 1);
1158 struct bpf_hrtimer *t;
1161 BUILD_BUG_ON(MAX_CLOCKS != 16);
1162 BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
1163 BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
1168 if (flags >= MAX_CLOCKS ||
1169 /* similar to timerfd except _ALARM variants are not supported */
1170 (clockid != CLOCK_MONOTONIC &&
1171 clockid != CLOCK_REALTIME &&
1172 clockid != CLOCK_BOOTTIME))
1174 __bpf_spin_lock_irqsave(&timer->lock);
1180 /* allocate hrtimer via map_kmalloc to use memcg accounting */
1181 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1186 t->value = (void *)timer - map->record->timer_off;
1189 rcu_assign_pointer(t->callback_fn, NULL);
1190 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1191 t->timer.function = bpf_timer_cb;
1192 WRITE_ONCE(timer->timer, t);
1193 /* Guarantee the order between timer->timer and map->usercnt. So
1194 * when there are concurrent uref release and bpf timer init, either
1195 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1196 * timer or atomic64_read() below returns a zero usercnt.
1199 if (!atomic64_read(&map->usercnt)) {
1200 /* maps with timers must be either held by user space
1201 * or pinned in bpffs.
1203 WRITE_ONCE(timer->timer, NULL);
1208 __bpf_spin_unlock_irqrestore(&timer->lock);
1212 static const struct bpf_func_proto bpf_timer_init_proto = {
1213 .func = bpf_timer_init,
1215 .ret_type = RET_INTEGER,
1216 .arg1_type = ARG_PTR_TO_TIMER,
1217 .arg2_type = ARG_CONST_MAP_PTR,
1218 .arg3_type = ARG_ANYTHING,
1221 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
1222 struct bpf_prog_aux *, aux)
1224 struct bpf_prog *prev, *prog = aux->prog;
1225 struct bpf_hrtimer *t;
1230 __bpf_spin_lock_irqsave(&timer->lock);
1236 if (!atomic64_read(&t->map->usercnt)) {
1237 /* maps with timers must be either held by user space
1238 * or pinned in bpffs. Otherwise timer might still be
1239 * running even when bpf prog is detached and user space
1240 * is gone, since map_release_uref won't ever be called.
1247 /* Bump prog refcnt once. Every bpf_timer_set_callback()
1248 * can pick different callback_fn-s within the same prog.
1250 prog = bpf_prog_inc_not_zero(prog);
1252 ret = PTR_ERR(prog);
1256 /* Drop prev prog refcnt when swapping with new prog */
1260 rcu_assign_pointer(t->callback_fn, callback_fn);
1262 __bpf_spin_unlock_irqrestore(&timer->lock);
1266 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1267 .func = bpf_timer_set_callback,
1269 .ret_type = RET_INTEGER,
1270 .arg1_type = ARG_PTR_TO_TIMER,
1271 .arg2_type = ARG_PTR_TO_FUNC,
1274 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
1276 struct bpf_hrtimer *t;
1278 enum hrtimer_mode mode;
1282 if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
1284 __bpf_spin_lock_irqsave(&timer->lock);
1286 if (!t || !t->prog) {
1291 if (flags & BPF_F_TIMER_ABS)
1292 mode = HRTIMER_MODE_ABS_SOFT;
1294 mode = HRTIMER_MODE_REL_SOFT;
1296 if (flags & BPF_F_TIMER_CPU_PIN)
1297 mode |= HRTIMER_MODE_PINNED;
1299 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1301 __bpf_spin_unlock_irqrestore(&timer->lock);
1305 static const struct bpf_func_proto bpf_timer_start_proto = {
1306 .func = bpf_timer_start,
1308 .ret_type = RET_INTEGER,
1309 .arg1_type = ARG_PTR_TO_TIMER,
1310 .arg2_type = ARG_ANYTHING,
1311 .arg3_type = ARG_ANYTHING,
1314 static void drop_prog_refcnt(struct bpf_hrtimer *t)
1316 struct bpf_prog *prog = t->prog;
1321 rcu_assign_pointer(t->callback_fn, NULL);
1325 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
1327 struct bpf_hrtimer *t;
1332 __bpf_spin_lock_irqsave(&timer->lock);
1338 if (this_cpu_read(hrtimer_running) == t) {
1339 /* If bpf callback_fn is trying to bpf_timer_cancel()
1340 * its own timer the hrtimer_cancel() will deadlock
1341 * since it waits for callback_fn to finish
1346 drop_prog_refcnt(t);
1348 __bpf_spin_unlock_irqrestore(&timer->lock);
1349 /* Cancel the timer and wait for associated callback to finish
1350 * if it was running.
1352 ret = ret ?: hrtimer_cancel(&t->timer);
1356 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1357 .func = bpf_timer_cancel,
1359 .ret_type = RET_INTEGER,
1360 .arg1_type = ARG_PTR_TO_TIMER,
1363 /* This function is called by map_delete/update_elem for individual element and
1364 * by ops->map_release_uref when the user space reference to a map reaches zero.
1366 void bpf_timer_cancel_and_free(void *val)
1368 struct bpf_timer_kern *timer = val;
1369 struct bpf_hrtimer *t;
1371 /* Performance optimization: read timer->timer without lock first. */
1372 if (!READ_ONCE(timer->timer))
1375 __bpf_spin_lock_irqsave(&timer->lock);
1376 /* re-read it under lock */
1380 drop_prog_refcnt(t);
1381 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1382 * this timer, since it won't be initialized.
1384 WRITE_ONCE(timer->timer, NULL);
1386 __bpf_spin_unlock_irqrestore(&timer->lock);
1389 /* Cancel the timer and wait for callback to complete if it was running.
1390 * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
1391 * right after for both preallocated and non-preallocated maps.
1392 * The timer->timer = NULL was already done and no code path can
1393 * see address 't' anymore.
1395 * Check that bpf_map_delete/update_elem() wasn't called from timer
1396 * callback_fn. In such case don't call hrtimer_cancel() (since it will
1397 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
1398 * return -1). Though callback_fn is still running on this cpu it's
1399 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1400 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1401 * since timer->timer = NULL was already done. The timer will be
1402 * effectively cancelled because bpf_timer_cb() will return
1403 * HRTIMER_NORESTART.
1405 if (this_cpu_read(hrtimer_running) != t)
1406 hrtimer_cancel(&t->timer);
1410 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
1412 unsigned long *kptr = map_value;
1414 return xchg(kptr, (unsigned long)ptr);
1417 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1418 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1419 * denote type that verifier will determine.
1421 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1422 .func = bpf_kptr_xchg,
1424 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1425 .ret_btf_id = BPF_PTR_POISON,
1426 .arg1_type = ARG_PTR_TO_KPTR,
1427 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1428 .arg2_btf_id = BPF_PTR_POISON,
1431 /* Since the upper 8 bits of dynptr->size is reserved, the
1432 * maximum supported size is 2^24 - 1.
1434 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1)
1435 #define DYNPTR_TYPE_SHIFT 28
1436 #define DYNPTR_SIZE_MASK 0xFFFFFF
1437 #define DYNPTR_RDONLY_BIT BIT(31)
1439 static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1441 return ptr->size & DYNPTR_RDONLY_BIT;
1444 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1446 ptr->size |= DYNPTR_RDONLY_BIT;
1449 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1451 ptr->size |= type << DYNPTR_TYPE_SHIFT;
1454 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1456 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1459 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1461 return ptr->size & DYNPTR_SIZE_MASK;
1464 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
1466 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1468 ptr->size = new_size | metadata;
1471 int bpf_dynptr_check_size(u32 size)
1473 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1476 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1477 enum bpf_dynptr_type type, u32 offset, u32 size)
1480 ptr->offset = offset;
1482 bpf_dynptr_set_type(ptr, type);
1485 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1487 memset(ptr, 0, sizeof(*ptr));
1490 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
1492 u32 size = __bpf_dynptr_size(ptr);
1494 if (len > size || offset > size - len)
1500 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1504 BTF_TYPE_EMIT(struct bpf_dynptr);
1506 err = bpf_dynptr_check_size(size);
1510 /* flags is currently unsupported */
1516 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1521 bpf_dynptr_set_null(ptr);
1525 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1526 .func = bpf_dynptr_from_mem,
1528 .ret_type = RET_INTEGER,
1529 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1530 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1531 .arg3_type = ARG_ANYTHING,
1532 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
1535 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1536 u32, offset, u64, flags)
1538 enum bpf_dynptr_type type;
1541 if (!src->data || flags)
1544 err = bpf_dynptr_check_off_len(src, offset, len);
1548 type = bpf_dynptr_get_type(src);
1551 case BPF_DYNPTR_TYPE_LOCAL:
1552 case BPF_DYNPTR_TYPE_RINGBUF:
1553 /* Source and destination may possibly overlap, hence use memmove to
1554 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1555 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1557 memmove(dst, src->data + src->offset + offset, len);
1559 case BPF_DYNPTR_TYPE_SKB:
1560 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1561 case BPF_DYNPTR_TYPE_XDP:
1562 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1564 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1569 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1570 .func = bpf_dynptr_read,
1572 .ret_type = RET_INTEGER,
1573 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
1574 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1575 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1576 .arg4_type = ARG_ANYTHING,
1577 .arg5_type = ARG_ANYTHING,
1580 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1581 u32, len, u64, flags)
1583 enum bpf_dynptr_type type;
1586 if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1589 err = bpf_dynptr_check_off_len(dst, offset, len);
1593 type = bpf_dynptr_get_type(dst);
1596 case BPF_DYNPTR_TYPE_LOCAL:
1597 case BPF_DYNPTR_TYPE_RINGBUF:
1600 /* Source and destination may possibly overlap, hence use memmove to
1601 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1602 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1604 memmove(dst->data + dst->offset + offset, src, len);
1606 case BPF_DYNPTR_TYPE_SKB:
1607 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1609 case BPF_DYNPTR_TYPE_XDP:
1612 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1614 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1619 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1620 .func = bpf_dynptr_write,
1622 .ret_type = RET_INTEGER,
1623 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1624 .arg2_type = ARG_ANYTHING,
1625 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1626 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
1627 .arg5_type = ARG_ANYTHING,
1630 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1632 enum bpf_dynptr_type type;
1638 err = bpf_dynptr_check_off_len(ptr, offset, len);
1642 if (__bpf_dynptr_is_rdonly(ptr))
1645 type = bpf_dynptr_get_type(ptr);
1648 case BPF_DYNPTR_TYPE_LOCAL:
1649 case BPF_DYNPTR_TYPE_RINGBUF:
1650 return (unsigned long)(ptr->data + ptr->offset + offset);
1651 case BPF_DYNPTR_TYPE_SKB:
1652 case BPF_DYNPTR_TYPE_XDP:
1653 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1656 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1661 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1662 .func = bpf_dynptr_data,
1664 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1665 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1666 .arg2_type = ARG_ANYTHING,
1667 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
1670 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1671 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1672 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1673 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1674 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1675 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1676 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1678 const struct bpf_func_proto *
1679 bpf_base_func_proto(enum bpf_func_id func_id)
1682 case BPF_FUNC_map_lookup_elem:
1683 return &bpf_map_lookup_elem_proto;
1684 case BPF_FUNC_map_update_elem:
1685 return &bpf_map_update_elem_proto;
1686 case BPF_FUNC_map_delete_elem:
1687 return &bpf_map_delete_elem_proto;
1688 case BPF_FUNC_map_push_elem:
1689 return &bpf_map_push_elem_proto;
1690 case BPF_FUNC_map_pop_elem:
1691 return &bpf_map_pop_elem_proto;
1692 case BPF_FUNC_map_peek_elem:
1693 return &bpf_map_peek_elem_proto;
1694 case BPF_FUNC_map_lookup_percpu_elem:
1695 return &bpf_map_lookup_percpu_elem_proto;
1696 case BPF_FUNC_get_prandom_u32:
1697 return &bpf_get_prandom_u32_proto;
1698 case BPF_FUNC_get_smp_processor_id:
1699 return &bpf_get_raw_smp_processor_id_proto;
1700 case BPF_FUNC_get_numa_node_id:
1701 return &bpf_get_numa_node_id_proto;
1702 case BPF_FUNC_tail_call:
1703 return &bpf_tail_call_proto;
1704 case BPF_FUNC_ktime_get_ns:
1705 return &bpf_ktime_get_ns_proto;
1706 case BPF_FUNC_ktime_get_boot_ns:
1707 return &bpf_ktime_get_boot_ns_proto;
1708 case BPF_FUNC_ktime_get_tai_ns:
1709 return &bpf_ktime_get_tai_ns_proto;
1710 case BPF_FUNC_ringbuf_output:
1711 return &bpf_ringbuf_output_proto;
1712 case BPF_FUNC_ringbuf_reserve:
1713 return &bpf_ringbuf_reserve_proto;
1714 case BPF_FUNC_ringbuf_submit:
1715 return &bpf_ringbuf_submit_proto;
1716 case BPF_FUNC_ringbuf_discard:
1717 return &bpf_ringbuf_discard_proto;
1718 case BPF_FUNC_ringbuf_query:
1719 return &bpf_ringbuf_query_proto;
1720 case BPF_FUNC_strncmp:
1721 return &bpf_strncmp_proto;
1722 case BPF_FUNC_strtol:
1723 return &bpf_strtol_proto;
1724 case BPF_FUNC_strtoul:
1725 return &bpf_strtoul_proto;
1734 case BPF_FUNC_spin_lock:
1735 return &bpf_spin_lock_proto;
1736 case BPF_FUNC_spin_unlock:
1737 return &bpf_spin_unlock_proto;
1738 case BPF_FUNC_jiffies64:
1739 return &bpf_jiffies64_proto;
1740 case BPF_FUNC_per_cpu_ptr:
1741 return &bpf_per_cpu_ptr_proto;
1742 case BPF_FUNC_this_cpu_ptr:
1743 return &bpf_this_cpu_ptr_proto;
1744 case BPF_FUNC_timer_init:
1745 return &bpf_timer_init_proto;
1746 case BPF_FUNC_timer_set_callback:
1747 return &bpf_timer_set_callback_proto;
1748 case BPF_FUNC_timer_start:
1749 return &bpf_timer_start_proto;
1750 case BPF_FUNC_timer_cancel:
1751 return &bpf_timer_cancel_proto;
1752 case BPF_FUNC_kptr_xchg:
1753 return &bpf_kptr_xchg_proto;
1754 case BPF_FUNC_for_each_map_elem:
1755 return &bpf_for_each_map_elem_proto;
1757 return &bpf_loop_proto;
1758 case BPF_FUNC_user_ringbuf_drain:
1759 return &bpf_user_ringbuf_drain_proto;
1760 case BPF_FUNC_ringbuf_reserve_dynptr:
1761 return &bpf_ringbuf_reserve_dynptr_proto;
1762 case BPF_FUNC_ringbuf_submit_dynptr:
1763 return &bpf_ringbuf_submit_dynptr_proto;
1764 case BPF_FUNC_ringbuf_discard_dynptr:
1765 return &bpf_ringbuf_discard_dynptr_proto;
1766 case BPF_FUNC_dynptr_from_mem:
1767 return &bpf_dynptr_from_mem_proto;
1768 case BPF_FUNC_dynptr_read:
1769 return &bpf_dynptr_read_proto;
1770 case BPF_FUNC_dynptr_write:
1771 return &bpf_dynptr_write_proto;
1772 case BPF_FUNC_dynptr_data:
1773 return &bpf_dynptr_data_proto;
1774 #ifdef CONFIG_CGROUPS
1775 case BPF_FUNC_cgrp_storage_get:
1776 return &bpf_cgrp_storage_get_proto;
1777 case BPF_FUNC_cgrp_storage_delete:
1778 return &bpf_cgrp_storage_delete_proto;
1779 case BPF_FUNC_get_current_cgroup_id:
1780 return &bpf_get_current_cgroup_id_proto;
1781 case BPF_FUNC_get_current_ancestor_cgroup_id:
1782 return &bpf_get_current_ancestor_cgroup_id_proto;
1788 if (!perfmon_capable())
1792 case BPF_FUNC_trace_printk:
1793 return bpf_get_trace_printk_proto();
1794 case BPF_FUNC_get_current_task:
1795 return &bpf_get_current_task_proto;
1796 case BPF_FUNC_get_current_task_btf:
1797 return &bpf_get_current_task_btf_proto;
1798 case BPF_FUNC_probe_read_user:
1799 return &bpf_probe_read_user_proto;
1800 case BPF_FUNC_probe_read_kernel:
1801 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1802 NULL : &bpf_probe_read_kernel_proto;
1803 case BPF_FUNC_probe_read_user_str:
1804 return &bpf_probe_read_user_str_proto;
1805 case BPF_FUNC_probe_read_kernel_str:
1806 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1807 NULL : &bpf_probe_read_kernel_str_proto;
1808 case BPF_FUNC_snprintf_btf:
1809 return &bpf_snprintf_btf_proto;
1810 case BPF_FUNC_snprintf:
1811 return &bpf_snprintf_proto;
1812 case BPF_FUNC_task_pt_regs:
1813 return &bpf_task_pt_regs_proto;
1814 case BPF_FUNC_trace_vprintk:
1815 return bpf_get_trace_vprintk_proto();
1821 void bpf_list_head_free(const struct btf_field *field, void *list_head,
1822 struct bpf_spin_lock *spin_lock)
1824 struct list_head *head = list_head, *orig_head = list_head;
1826 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
1827 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
1829 /* Do the actual list draining outside the lock to not hold the lock for
1830 * too long, and also prevent deadlocks if tracing programs end up
1831 * executing on entry/exit of functions called inside the critical
1832 * section, and end up doing map ops that call bpf_list_head_free for
1833 * the same map value again.
1835 __bpf_spin_lock_irqsave(spin_lock);
1836 if (!head->next || list_empty(head))
1840 INIT_LIST_HEAD(orig_head);
1841 __bpf_spin_unlock_irqrestore(spin_lock);
1843 while (head != orig_head) {
1846 obj -= field->graph_root.node_offset;
1848 /* The contained type can also have resources, including a
1849 * bpf_list_head which needs to be freed.
1852 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
1857 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
1858 * 'rb_node *', so field name of rb_node within containing struct is not
1861 * Since bpf_rb_tree's node type has a corresponding struct btf_field with
1862 * graph_root.node_offset, it's not necessary to know field name
1863 * or type of node struct
1865 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
1866 for (pos = rb_first_postorder(root); \
1867 pos && ({ n = rb_next_postorder(pos); 1; }); \
1870 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
1871 struct bpf_spin_lock *spin_lock)
1873 struct rb_root_cached orig_root, *root = rb_root;
1874 struct rb_node *pos, *n;
1877 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
1878 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
1880 __bpf_spin_lock_irqsave(spin_lock);
1882 *root = RB_ROOT_CACHED;
1883 __bpf_spin_unlock_irqrestore(spin_lock);
1885 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
1887 obj -= field->graph_root.node_offset;
1891 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
1896 __bpf_kfunc_start_defs();
1898 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
1900 struct btf_struct_meta *meta = meta__ign;
1901 u64 size = local_type_id__k;
1904 p = bpf_mem_alloc(&bpf_global_ma, size);
1908 bpf_obj_init(meta->record, p);
1912 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
1914 u64 size = local_type_id__k;
1916 /* The verifier has ensured that meta__ign must be NULL */
1917 return bpf_mem_alloc(&bpf_global_percpu_ma, size);
1920 /* Must be called under migrate_disable(), as required by bpf_mem_free */
1921 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
1923 struct bpf_mem_alloc *ma;
1925 if (rec && rec->refcount_off >= 0 &&
1926 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
1927 /* Object is refcounted and refcount_dec didn't result in 0
1928 * refcount. Return without freeing the object
1934 bpf_obj_free_fields(rec, p);
1937 ma = &bpf_global_percpu_ma;
1939 ma = &bpf_global_ma;
1940 if (rec && rec->refcount_off >= 0)
1941 bpf_mem_free_rcu(ma, p);
1943 bpf_mem_free(ma, p);
1946 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
1948 struct btf_struct_meta *meta = meta__ign;
1951 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
1954 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
1956 /* The verifier has ensured that meta__ign must be NULL */
1957 bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
1960 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
1962 struct btf_struct_meta *meta = meta__ign;
1963 struct bpf_refcount *ref;
1965 /* Could just cast directly to refcount_t *, but need some code using
1966 * bpf_refcount type so that it is emitted in vmlinux BTF
1968 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
1969 if (!refcount_inc_not_zero((refcount_t *)ref))
1972 /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
1975 return (void *)p__refcounted_kptr;
1978 static int __bpf_list_add(struct bpf_list_node_kern *node,
1979 struct bpf_list_head *head,
1980 bool tail, struct btf_record *rec, u64 off)
1982 struct list_head *n = &node->list_head, *h = (void *)head;
1984 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
1985 * called on its fields, so init here
1987 if (unlikely(!h->next))
1990 /* node->owner != NULL implies !list_empty(n), no need to separately
1993 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
1994 /* Only called from BPF prog, no need to migrate_disable */
1995 __bpf_obj_drop_impl((void *)n - off, rec, false);
1999 tail ? list_add_tail(n, h) : list_add(n, h);
2000 WRITE_ONCE(node->owner, head);
2005 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
2006 struct bpf_list_node *node,
2007 void *meta__ign, u64 off)
2009 struct bpf_list_node_kern *n = (void *)node;
2010 struct btf_struct_meta *meta = meta__ign;
2012 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
2015 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
2016 struct bpf_list_node *node,
2017 void *meta__ign, u64 off)
2019 struct bpf_list_node_kern *n = (void *)node;
2020 struct btf_struct_meta *meta = meta__ign;
2022 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2025 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2027 struct list_head *n, *h = (void *)head;
2028 struct bpf_list_node_kern *node;
2030 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2031 * called on its fields, so init here
2033 if (unlikely(!h->next))
2038 n = tail ? h->prev : h->next;
2039 node = container_of(n, struct bpf_list_node_kern, list_head);
2040 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2044 WRITE_ONCE(node->owner, NULL);
2045 return (struct bpf_list_node *)n;
2048 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2050 return __bpf_list_del(head, false);
2053 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2055 return __bpf_list_del(head, true);
2058 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2059 struct bpf_rb_node *node)
2061 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2062 struct rb_root_cached *r = (struct rb_root_cached *)root;
2063 struct rb_node *n = &node_internal->rb_node;
2065 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2066 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2068 if (READ_ONCE(node_internal->owner) != root)
2071 rb_erase_cached(n, r);
2073 WRITE_ONCE(node_internal->owner, NULL);
2074 return (struct bpf_rb_node *)n;
2077 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2080 static int __bpf_rbtree_add(struct bpf_rb_root *root,
2081 struct bpf_rb_node_kern *node,
2082 void *less, struct btf_record *rec, u64 off)
2084 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2085 struct rb_node *parent = NULL, *n = &node->rb_node;
2086 bpf_callback_t cb = (bpf_callback_t)less;
2087 bool leftmost = true;
2089 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2092 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2093 /* Only called from BPF prog, no need to migrate_disable */
2094 __bpf_obj_drop_impl((void *)n - off, rec, false);
2100 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2101 link = &parent->rb_left;
2103 link = &parent->rb_right;
2108 rb_link_node(n, parent, link);
2109 rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2110 WRITE_ONCE(node->owner, root);
2114 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2115 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2116 void *meta__ign, u64 off)
2118 struct btf_struct_meta *meta = meta__ign;
2119 struct bpf_rb_node_kern *n = (void *)node;
2121 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2124 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2126 struct rb_root_cached *r = (struct rb_root_cached *)root;
2128 return (struct bpf_rb_node *)rb_first_cached(r);
2132 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2133 * kfunc which is not stored in a map as a kptr, must be released by calling
2134 * bpf_task_release().
2135 * @p: The task on which a reference is being acquired.
2137 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2139 if (refcount_inc_not_zero(&p->rcu_users))
2145 * bpf_task_release - Release the reference acquired on a task.
2146 * @p: The task on which a reference is being released.
2148 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2150 put_task_struct_rcu_user(p);
2153 #ifdef CONFIG_CGROUPS
2155 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2156 * this kfunc which is not stored in a map as a kptr, must be released by
2157 * calling bpf_cgroup_release().
2158 * @cgrp: The cgroup on which a reference is being acquired.
2160 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2162 return cgroup_tryget(cgrp) ? cgrp : NULL;
2166 * bpf_cgroup_release - Release the reference acquired on a cgroup.
2167 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2168 * not be freed until the current grace period has ended, even if its refcount
2170 * @cgrp: The cgroup on which a reference is being released.
2172 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2178 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2179 * array. A cgroup returned by this kfunc which is not subsequently stored in a
2180 * map, must be released by calling bpf_cgroup_release().
2181 * @cgrp: The cgroup for which we're performing a lookup.
2182 * @level: The level of ancestor to look up.
2184 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2186 struct cgroup *ancestor;
2188 if (level > cgrp->level || level < 0)
2191 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2192 ancestor = cgrp->ancestors[level];
2193 if (!cgroup_tryget(ancestor))
2199 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2200 * kfunc which is not subsequently stored in a map, must be released by calling
2201 * bpf_cgroup_release().
2204 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2206 struct cgroup *cgrp;
2208 cgrp = cgroup_get_from_id(cgid);
2215 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2216 * task's membership of cgroup ancestry.
2217 * @task: the task to be tested
2218 * @ancestor: possible ancestor of @task's cgroup
2220 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2221 * It follows all the same rules as cgroup_is_descendant, and only applies
2222 * to the default hierarchy.
2224 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2225 struct cgroup *ancestor)
2230 ret = task_under_cgroup_hierarchy(task, ancestor);
2234 #endif /* CONFIG_CGROUPS */
2237 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2238 * in the root pid namespace idr. If a task is returned, it must either be
2239 * stored in a map, or released with bpf_task_release().
2240 * @pid: The pid of the task being looked up.
2242 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2244 struct task_struct *p;
2247 p = find_task_by_pid_ns(pid, &init_pid_ns);
2249 p = bpf_task_acquire(p);
2256 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2257 * @ptr: The dynptr whose data slice to retrieve
2258 * @offset: Offset into the dynptr
2259 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2260 * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2261 * length of the requested slice. This must be a constant.
2263 * For non-skb and non-xdp type dynptrs, there is no difference between
2264 * bpf_dynptr_slice and bpf_dynptr_data.
2266 * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2268 * If the intention is to write to the data slice, please use
2269 * bpf_dynptr_slice_rdwr.
2271 * The user must check that the returned pointer is not null before using it.
2273 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2274 * does not change the underlying packet data pointers, so a call to
2275 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2278 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2279 * data slice (can be either direct pointer to the data or a pointer to the user
2280 * provided buffer, with its contents containing the data, if unable to obtain
2283 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset,
2284 void *buffer__opt, u32 buffer__szk)
2286 enum bpf_dynptr_type type;
2287 u32 len = buffer__szk;
2293 err = bpf_dynptr_check_off_len(ptr, offset, len);
2297 type = bpf_dynptr_get_type(ptr);
2300 case BPF_DYNPTR_TYPE_LOCAL:
2301 case BPF_DYNPTR_TYPE_RINGBUF:
2302 return ptr->data + ptr->offset + offset;
2303 case BPF_DYNPTR_TYPE_SKB:
2305 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
2307 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2308 case BPF_DYNPTR_TYPE_XDP:
2310 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2311 if (!IS_ERR_OR_NULL(xdp_ptr))
2316 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
2320 WARN_ONCE(true, "unknown dynptr type %d\n", type);
2326 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2327 * @ptr: The dynptr whose data slice to retrieve
2328 * @offset: Offset into the dynptr
2329 * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2330 * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2331 * length of the requested slice. This must be a constant.
2333 * For non-skb and non-xdp type dynptrs, there is no difference between
2334 * bpf_dynptr_slice and bpf_dynptr_data.
2336 * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2338 * The returned pointer is writable and may point to either directly the dynptr
2339 * data at the requested offset or to the buffer if unable to obtain a direct
2340 * data pointer to (example: the requested slice is to the paged area of an skb
2341 * packet). In the case where the returned pointer is to the buffer, the user
2342 * is responsible for persisting writes through calling bpf_dynptr_write(). This
2343 * usually looks something like this pattern:
2345 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2347 * return TC_ACT_SHOT;
2349 * // mutate eth header //
2351 * if (eth == buffer)
2352 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2354 * Please note that, as in the example above, the user must check that the
2355 * returned pointer is not null before using it.
2357 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2358 * does not change the underlying packet data pointers, so a call to
2359 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2362 * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2363 * data slice (can be either direct pointer to the data or a pointer to the user
2364 * provided buffer, with its contents containing the data, if unable to obtain
2367 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset,
2368 void *buffer__opt, u32 buffer__szk)
2370 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2373 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2375 * For skb-type dynptrs, it is safe to write into the returned pointer
2376 * if the bpf program allows skb data writes. There are two possiblities
2377 * that may occur when calling bpf_dynptr_slice_rdwr:
2379 * 1) The requested slice is in the head of the skb. In this case, the
2380 * returned pointer is directly to skb data, and if the skb is cloned, the
2381 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2382 * The pointer can be directly written into.
2384 * 2) Some portion of the requested slice is in the paged buffer area.
2385 * In this case, the requested data will be copied out into the buffer
2386 * and the returned pointer will be a pointer to the buffer. The skb
2387 * will not be pulled. To persist the write, the user will need to call
2388 * bpf_dynptr_write(), which will pull the skb and commit the write.
2390 * Similarly for xdp programs, if the requested slice is not across xdp
2391 * fragments, then a direct pointer will be returned, otherwise the data
2392 * will be copied out into the buffer and the user will need to call
2393 * bpf_dynptr_write() to commit changes.
2395 return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk);
2398 __bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end)
2402 if (!ptr->data || start > end)
2405 size = __bpf_dynptr_size(ptr);
2407 if (start > size || end > size)
2410 ptr->offset += start;
2411 bpf_dynptr_set_size(ptr, end - start);
2416 __bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr)
2421 __bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr)
2426 return __bpf_dynptr_is_rdonly(ptr);
2429 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
2434 return __bpf_dynptr_size(ptr);
2437 __bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr,
2438 struct bpf_dynptr_kern *clone__uninit)
2441 bpf_dynptr_set_null(clone__uninit);
2445 *clone__uninit = *ptr;
2450 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2455 __bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
2460 __bpf_kfunc void bpf_rcu_read_lock(void)
2465 __bpf_kfunc void bpf_rcu_read_unlock(void)
2470 struct bpf_throw_ctx {
2471 struct bpf_prog_aux *aux;
2477 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
2479 struct bpf_throw_ctx *ctx = cookie;
2480 struct bpf_prog *prog;
2482 if (!is_bpf_text_address(ip))
2484 prog = bpf_prog_ksym_find(ip);
2486 if (bpf_is_subprog(prog))
2488 ctx->aux = prog->aux;
2494 __bpf_kfunc void bpf_throw(u64 cookie)
2496 struct bpf_throw_ctx ctx = {};
2498 arch_bpf_stack_walk(bpf_stack_walker, &ctx);
2499 WARN_ON_ONCE(!ctx.aux);
2501 WARN_ON_ONCE(!ctx.aux->exception_boundary);
2502 WARN_ON_ONCE(!ctx.bp);
2503 WARN_ON_ONCE(!ctx.cnt);
2504 /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
2505 * deeper stack depths than ctx.sp as we do not return from bpf_throw,
2506 * which skips compiler generated instrumentation to do the same.
2508 kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
2509 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp);
2510 WARN(1, "A call to BPF exception callback should never return\n");
2513 __bpf_kfunc_end_defs();
2515 BTF_SET8_START(generic_btf_ids)
2516 #ifdef CONFIG_KEXEC_CORE
2517 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
2519 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
2520 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
2521 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
2522 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
2523 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL)
2524 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
2525 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
2526 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
2527 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
2528 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2529 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
2530 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
2531 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
2532 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
2534 #ifdef CONFIG_CGROUPS
2535 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2536 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
2537 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2538 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
2539 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
2541 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
2542 BTF_ID_FLAGS(func, bpf_throw)
2543 BTF_SET8_END(generic_btf_ids)
2545 static const struct btf_kfunc_id_set generic_kfunc_set = {
2546 .owner = THIS_MODULE,
2547 .set = &generic_btf_ids,
2551 BTF_ID_LIST(generic_dtor_ids)
2552 BTF_ID(struct, task_struct)
2553 BTF_ID(func, bpf_task_release)
2554 #ifdef CONFIG_CGROUPS
2555 BTF_ID(struct, cgroup)
2556 BTF_ID(func, bpf_cgroup_release)
2559 BTF_SET8_START(common_btf_ids)
2560 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
2561 BTF_ID_FLAGS(func, bpf_rdonly_cast)
2562 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
2563 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
2564 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
2565 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
2566 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
2567 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
2568 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
2569 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
2570 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
2571 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
2572 #ifdef CONFIG_CGROUPS
2573 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
2574 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
2575 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
2576 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
2577 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
2578 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
2580 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
2581 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
2582 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
2583 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
2584 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
2585 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
2586 BTF_ID_FLAGS(func, bpf_dynptr_size)
2587 BTF_ID_FLAGS(func, bpf_dynptr_clone)
2588 BTF_SET8_END(common_btf_ids)
2590 static const struct btf_kfunc_id_set common_kfunc_set = {
2591 .owner = THIS_MODULE,
2592 .set = &common_btf_ids,
2595 static int __init kfunc_init(void)
2598 const struct btf_id_dtor_kfunc generic_dtors[] = {
2600 .btf_id = generic_dtor_ids[0],
2601 .kfunc_btf_id = generic_dtor_ids[1]
2603 #ifdef CONFIG_CGROUPS
2605 .btf_id = generic_dtor_ids[2],
2606 .kfunc_btf_id = generic_dtor_ids[3]
2611 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
2612 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
2613 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
2614 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
2615 ARRAY_SIZE(generic_dtors),
2617 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
2620 late_initcall(kfunc_init);