1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_perf_event.h>
13 #include <linux/filter.h>
14 #include <linux/uaccess.h>
15 #include <linux/ctype.h>
19 * trace_call_bpf - invoke BPF program
21 * @ctx: opaque context pointer
23 * kprobe handlers execute BPF programs via this helper.
24 * Can be used from static tracepoints in the future.
26 * Return: BPF programs always return an integer which is interpreted by
28 * 0 - return from kprobe (event is filtered out)
29 * 1 - store kprobe event into ring buffer
30 * Other values are reserved and currently alias to 1
32 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
36 if (in_nmi()) /* not supported yet */
41 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
43 * since some bpf program is already running on this cpu,
44 * don't call into another bpf program (same or different)
45 * and don't send kprobe event into ring-buffer,
53 ret = BPF_PROG_RUN(prog, ctx);
57 __this_cpu_dec(bpf_prog_active);
62 EXPORT_SYMBOL_GPL(trace_call_bpf);
64 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
68 ret = probe_kernel_read(dst, unsafe_ptr, size);
69 if (unlikely(ret < 0))
75 static const struct bpf_func_proto bpf_probe_read_proto = {
76 .func = bpf_probe_read,
78 .ret_type = RET_INTEGER,
79 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
80 .arg2_type = ARG_CONST_SIZE,
81 .arg3_type = ARG_ANYTHING,
84 BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
88 * Ensure we're in user context which is safe for the helper to
89 * run. This helper has no business in a kthread.
91 * access_ok() should prevent writing to non-user memory, but in
92 * some situations (nommu, temporary switch, etc) access_ok() does
93 * not provide enough validation, hence the check on KERNEL_DS.
96 if (unlikely(in_interrupt() ||
97 current->flags & (PF_KTHREAD | PF_EXITING)))
99 if (unlikely(uaccess_kernel()))
101 if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
104 return probe_kernel_write(unsafe_ptr, src, size);
107 static const struct bpf_func_proto bpf_probe_write_user_proto = {
108 .func = bpf_probe_write_user,
110 .ret_type = RET_INTEGER,
111 .arg1_type = ARG_ANYTHING,
112 .arg2_type = ARG_PTR_TO_MEM,
113 .arg3_type = ARG_CONST_SIZE,
116 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
118 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
119 current->comm, task_pid_nr(current));
121 return &bpf_probe_write_user_proto;
125 * Only limited trace_printk() conversion specifiers allowed:
126 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
128 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
129 u64, arg2, u64, arg3)
131 bool str_seen = false;
139 * bpf_check()->check_func_arg()->check_stack_boundary()
140 * guarantees that fmt points to bpf program stack,
141 * fmt_size bytes of it were initialized and fmt_size > 0
143 if (fmt[--fmt_size] != 0)
146 /* check format string for allowed specifiers */
147 for (i = 0; i < fmt_size; i++) {
148 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
157 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
162 } else if (fmt[i] == 'p' || fmt[i] == 's') {
165 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
168 if (fmt[i - 1] == 's') {
170 /* allow only one '%s' per fmt string */
189 strncpy_from_unsafe(buf,
190 (void *) (long) unsafe_addr,
201 if (fmt[i] != 'i' && fmt[i] != 'd' &&
202 fmt[i] != 'u' && fmt[i] != 'x')
207 /* Horrid workaround for getting va_list handling working with different
208 * argument type combinations generically for 32 and 64 bit archs.
210 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
211 #define __BPF_TP(...) \
212 __trace_printk(1 /* Fake ip will not be printed. */, \
215 #define __BPF_ARG1_TP(...) \
216 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
217 ? __BPF_TP(arg1, ##__VA_ARGS__) \
218 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
219 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
220 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
222 #define __BPF_ARG2_TP(...) \
223 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
224 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
225 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
226 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
227 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
229 #define __BPF_ARG3_TP(...) \
230 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
231 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
232 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
233 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
234 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
236 return __BPF_TP_EMIT();
239 static const struct bpf_func_proto bpf_trace_printk_proto = {
240 .func = bpf_trace_printk,
242 .ret_type = RET_INTEGER,
243 .arg1_type = ARG_PTR_TO_MEM,
244 .arg2_type = ARG_CONST_SIZE,
247 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
250 * this program might be calling bpf_trace_printk,
251 * so allocate per-cpu printk buffers
253 trace_printk_init_buffers();
255 return &bpf_trace_printk_proto;
258 static __always_inline int
259 get_map_perf_counter(struct bpf_map *map, u64 flags,
260 u64 *value, u64 *enabled, u64 *running)
262 struct bpf_array *array = container_of(map, struct bpf_array, map);
263 unsigned int cpu = smp_processor_id();
264 u64 index = flags & BPF_F_INDEX_MASK;
265 struct bpf_event_entry *ee;
267 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
269 if (index == BPF_F_CURRENT_CPU)
271 if (unlikely(index >= array->map.max_entries))
274 ee = READ_ONCE(array->ptrs[index]);
278 return perf_event_read_local(ee->event, value, enabled, running);
281 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
286 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
288 * this api is ugly since we miss [-22..-2] range of valid
289 * counter values, but that's uapi
296 static const struct bpf_func_proto bpf_perf_event_read_proto = {
297 .func = bpf_perf_event_read,
299 .ret_type = RET_INTEGER,
300 .arg1_type = ARG_CONST_MAP_PTR,
301 .arg2_type = ARG_ANYTHING,
304 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
305 struct bpf_perf_event_value *, buf, u32, size)
309 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
311 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
317 memset(buf, 0, size);
321 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
322 .func = bpf_perf_event_read_value,
324 .ret_type = RET_INTEGER,
325 .arg1_type = ARG_CONST_MAP_PTR,
326 .arg2_type = ARG_ANYTHING,
327 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
328 .arg4_type = ARG_CONST_SIZE,
331 static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
333 static __always_inline u64
334 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
335 u64 flags, struct perf_raw_record *raw)
337 struct bpf_array *array = container_of(map, struct bpf_array, map);
338 struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
339 unsigned int cpu = smp_processor_id();
340 u64 index = flags & BPF_F_INDEX_MASK;
341 struct bpf_event_entry *ee;
342 struct perf_event *event;
344 if (index == BPF_F_CURRENT_CPU)
346 if (unlikely(index >= array->map.max_entries))
349 ee = READ_ONCE(array->ptrs[index]);
354 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
355 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
358 if (unlikely(event->oncpu != cpu))
361 perf_sample_data_init(sd, 0, 0);
363 perf_event_output(event, sd, regs);
367 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
368 u64, flags, void *, data, u64, size)
370 struct perf_raw_record raw = {
377 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
380 return __bpf_perf_event_output(regs, map, flags, &raw);
383 static const struct bpf_func_proto bpf_perf_event_output_proto = {
384 .func = bpf_perf_event_output,
386 .ret_type = RET_INTEGER,
387 .arg1_type = ARG_PTR_TO_CTX,
388 .arg2_type = ARG_CONST_MAP_PTR,
389 .arg3_type = ARG_ANYTHING,
390 .arg4_type = ARG_PTR_TO_MEM,
391 .arg5_type = ARG_CONST_SIZE,
394 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
396 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
397 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
399 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
400 struct perf_raw_frag frag = {
405 struct perf_raw_record raw = {
408 .next = ctx_size ? &frag : NULL,
415 perf_fetch_caller_regs(regs);
417 return __bpf_perf_event_output(regs, map, flags, &raw);
420 BPF_CALL_0(bpf_get_current_task)
422 return (long) current;
425 static const struct bpf_func_proto bpf_get_current_task_proto = {
426 .func = bpf_get_current_task,
428 .ret_type = RET_INTEGER,
431 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
433 struct bpf_array *array = container_of(map, struct bpf_array, map);
436 if (unlikely(in_interrupt()))
438 if (unlikely(idx >= array->map.max_entries))
441 cgrp = READ_ONCE(array->ptrs[idx]);
445 return task_under_cgroup_hierarchy(current, cgrp);
448 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
449 .func = bpf_current_task_under_cgroup,
451 .ret_type = RET_INTEGER,
452 .arg1_type = ARG_CONST_MAP_PTR,
453 .arg2_type = ARG_ANYTHING,
456 BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
457 const void *, unsafe_ptr)
462 * The strncpy_from_unsafe() call will likely not fill the entire
463 * buffer, but that's okay in this circumstance as we're probing
464 * arbitrary memory anyway similar to bpf_probe_read() and might
465 * as well probe the stack. Thus, memory is explicitly cleared
466 * only in error case, so that improper users ignoring return
467 * code altogether don't copy garbage; otherwise length of string
468 * is returned that can be used for bpf_perf_event_output() et al.
470 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
471 if (unlikely(ret < 0))
472 memset(dst, 0, size);
477 static const struct bpf_func_proto bpf_probe_read_str_proto = {
478 .func = bpf_probe_read_str,
480 .ret_type = RET_INTEGER,
481 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
482 .arg2_type = ARG_CONST_SIZE,
483 .arg3_type = ARG_ANYTHING,
486 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
489 case BPF_FUNC_map_lookup_elem:
490 return &bpf_map_lookup_elem_proto;
491 case BPF_FUNC_map_update_elem:
492 return &bpf_map_update_elem_proto;
493 case BPF_FUNC_map_delete_elem:
494 return &bpf_map_delete_elem_proto;
495 case BPF_FUNC_probe_read:
496 return &bpf_probe_read_proto;
497 case BPF_FUNC_ktime_get_ns:
498 return &bpf_ktime_get_ns_proto;
499 case BPF_FUNC_tail_call:
500 return &bpf_tail_call_proto;
501 case BPF_FUNC_get_current_pid_tgid:
502 return &bpf_get_current_pid_tgid_proto;
503 case BPF_FUNC_get_current_task:
504 return &bpf_get_current_task_proto;
505 case BPF_FUNC_get_current_uid_gid:
506 return &bpf_get_current_uid_gid_proto;
507 case BPF_FUNC_get_current_comm:
508 return &bpf_get_current_comm_proto;
509 case BPF_FUNC_trace_printk:
510 return bpf_get_trace_printk_proto();
511 case BPF_FUNC_get_smp_processor_id:
512 return &bpf_get_smp_processor_id_proto;
513 case BPF_FUNC_get_numa_node_id:
514 return &bpf_get_numa_node_id_proto;
515 case BPF_FUNC_perf_event_read:
516 return &bpf_perf_event_read_proto;
517 case BPF_FUNC_probe_write_user:
518 return bpf_get_probe_write_proto();
519 case BPF_FUNC_current_task_under_cgroup:
520 return &bpf_current_task_under_cgroup_proto;
521 case BPF_FUNC_get_prandom_u32:
522 return &bpf_get_prandom_u32_proto;
523 case BPF_FUNC_probe_read_str:
524 return &bpf_probe_read_str_proto;
530 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
533 case BPF_FUNC_perf_event_output:
534 return &bpf_perf_event_output_proto;
535 case BPF_FUNC_get_stackid:
536 return &bpf_get_stackid_proto;
537 case BPF_FUNC_perf_event_read_value:
538 return &bpf_perf_event_read_value_proto;
540 return tracing_func_proto(func_id);
544 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
545 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
546 struct bpf_insn_access_aux *info)
548 if (off < 0 || off >= sizeof(struct pt_regs))
550 if (type != BPF_READ)
555 * Assertion for 32 bit to make sure last 8 byte access
556 * (BPF_DW) to the last 4 byte member is disallowed.
558 if (off + size > sizeof(struct pt_regs))
564 const struct bpf_verifier_ops kprobe_verifier_ops = {
565 .get_func_proto = kprobe_prog_func_proto,
566 .is_valid_access = kprobe_prog_is_valid_access,
569 const struct bpf_prog_ops kprobe_prog_ops = {
572 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
573 u64, flags, void *, data, u64, size)
575 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
578 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
579 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
580 * from there and call the same bpf_perf_event_output() helper inline.
582 return ____bpf_perf_event_output(regs, map, flags, data, size);
585 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
586 .func = bpf_perf_event_output_tp,
588 .ret_type = RET_INTEGER,
589 .arg1_type = ARG_PTR_TO_CTX,
590 .arg2_type = ARG_CONST_MAP_PTR,
591 .arg3_type = ARG_ANYTHING,
592 .arg4_type = ARG_PTR_TO_MEM,
593 .arg5_type = ARG_CONST_SIZE,
596 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
599 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
602 * Same comment as in bpf_perf_event_output_tp(), only that this time
603 * the other helper's function body cannot be inlined due to being
604 * external, thus we need to call raw helper function.
606 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
610 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
611 .func = bpf_get_stackid_tp,
613 .ret_type = RET_INTEGER,
614 .arg1_type = ARG_PTR_TO_CTX,
615 .arg2_type = ARG_CONST_MAP_PTR,
616 .arg3_type = ARG_ANYTHING,
619 BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx,
620 struct bpf_perf_event_value *, buf, u32, size)
624 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
626 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
632 memset(buf, 0, size);
636 static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
637 .func = bpf_perf_prog_read_value_tp,
639 .ret_type = RET_INTEGER,
640 .arg1_type = ARG_PTR_TO_CTX,
641 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
642 .arg3_type = ARG_CONST_SIZE,
645 static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
648 case BPF_FUNC_perf_event_output:
649 return &bpf_perf_event_output_proto_tp;
650 case BPF_FUNC_get_stackid:
651 return &bpf_get_stackid_proto_tp;
652 case BPF_FUNC_perf_prog_read_value:
653 return &bpf_perf_prog_read_value_proto_tp;
655 return tracing_func_proto(func_id);
659 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
660 struct bpf_insn_access_aux *info)
662 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
664 if (type != BPF_READ)
669 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
673 const struct bpf_verifier_ops tracepoint_verifier_ops = {
674 .get_func_proto = tp_prog_func_proto,
675 .is_valid_access = tp_prog_is_valid_access,
678 const struct bpf_prog_ops tracepoint_prog_ops = {
681 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
682 struct bpf_insn_access_aux *info)
684 const int size_sp = FIELD_SIZEOF(struct bpf_perf_event_data,
687 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
689 if (type != BPF_READ)
695 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
696 bpf_ctx_record_field_size(info, size_sp);
697 if (!bpf_ctx_narrow_access_ok(off, size, size_sp))
701 if (size != sizeof(long))
708 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
709 const struct bpf_insn *si,
710 struct bpf_insn *insn_buf,
711 struct bpf_prog *prog, u32 *target_size)
713 struct bpf_insn *insn = insn_buf;
716 case offsetof(struct bpf_perf_event_data, sample_period):
717 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
718 data), si->dst_reg, si->src_reg,
719 offsetof(struct bpf_perf_event_data_kern, data));
720 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
721 bpf_target_off(struct perf_sample_data, period, 8,
725 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
726 regs), si->dst_reg, si->src_reg,
727 offsetof(struct bpf_perf_event_data_kern, regs));
728 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
733 return insn - insn_buf;
736 const struct bpf_verifier_ops perf_event_verifier_ops = {
737 .get_func_proto = tp_prog_func_proto,
738 .is_valid_access = pe_prog_is_valid_access,
739 .convert_ctx_access = pe_prog_convert_ctx_access,
742 const struct bpf_prog_ops perf_event_prog_ops = {