Commit | Line | Data |
---|---|---|
173965fb YS |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <linux/bpf.h> | |
4 | #include "bpf_helpers.h" | |
5 | ||
6 | /* Permit pretty deep stack traces */ | |
7 | #define MAX_STACK_RAWTP 100 | |
8 | struct stack_trace_t { | |
9 | int pid; | |
10 | int kern_stack_size; | |
11 | int user_stack_size; | |
12 | int user_stack_buildid_size; | |
13 | __u64 kern_stack[MAX_STACK_RAWTP]; | |
14 | __u64 user_stack[MAX_STACK_RAWTP]; | |
15 | struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP]; | |
16 | }; | |
17 | ||
df0b7792 | 18 | struct { |
bc7430cc AN |
19 | __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); |
20 | __uint(max_entries, 2); | |
21 | __uint(key_size, sizeof(int)); | |
22 | __uint(value_size, sizeof(__u32)); | |
23 | } perfmap SEC(".maps"); | |
173965fb | 24 | |
df0b7792 | 25 | struct { |
bc7430cc AN |
26 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); |
27 | __uint(max_entries, 1); | |
28 | __type(key, __u32); | |
29 | __type(value, struct stack_trace_t); | |
30 | } stackdata_map SEC(".maps"); | |
173965fb YS |
31 | |
32 | /* Allocate per-cpu space twice the needed. For the code below | |
33 | * usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK); | |
34 | * if (usize < 0) | |
35 | * return 0; | |
36 | * ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0); | |
37 | * | |
38 | * If we have value_size = MAX_STACK_RAWTP * sizeof(__u64), | |
39 | * verifier will complain that access "raw_data + usize" | |
40 | * with size "max_len - usize" may be out of bound. | |
41 | * The maximum "raw_data + usize" is "raw_data + max_len" | |
42 | * and the maximum "max_len - usize" is "max_len", verifier | |
43 | * concludes that the maximum buffer access range is | |
44 | * "raw_data[0...max_len * 2 - 1]" and hence reject the program. | |
45 | * | |
46 | * Doubling the to-be-used max buffer size can fix this verifier | |
47 | * issue and avoid complicated C programming massaging. | |
48 | * This is an acceptable workaround since there is one entry here. | |
49 | */ | |
df0b7792 | 50 | struct { |
bc7430cc AN |
51 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); |
52 | __uint(max_entries, 1); | |
53 | __type(key, __u32); | |
a53ba15d | 54 | __type(value, __u64[2 * MAX_STACK_RAWTP]); |
bc7430cc | 55 | } rawdata_map SEC(".maps"); |
173965fb | 56 | |
58b80815 | 57 | SEC("raw_tracepoint/sys_enter") |
173965fb YS |
58 | int bpf_prog1(void *ctx) |
59 | { | |
60 | int max_len, max_buildid_len, usize, ksize, total_size; | |
61 | struct stack_trace_t *data; | |
62 | void *raw_data; | |
63 | __u32 key = 0; | |
64 | ||
65 | data = bpf_map_lookup_elem(&stackdata_map, &key); | |
66 | if (!data) | |
67 | return 0; | |
68 | ||
69 | max_len = MAX_STACK_RAWTP * sizeof(__u64); | |
70 | max_buildid_len = MAX_STACK_RAWTP * sizeof(struct bpf_stack_build_id); | |
71 | data->pid = bpf_get_current_pid_tgid(); | |
72 | data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack, | |
73 | max_len, 0); | |
74 | data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len, | |
75 | BPF_F_USER_STACK); | |
76 | data->user_stack_buildid_size = bpf_get_stack( | |
77 | ctx, data->user_stack_buildid, max_buildid_len, | |
78 | BPF_F_USER_STACK | BPF_F_USER_BUILD_ID); | |
79 | bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data)); | |
80 | ||
81 | /* write both kernel and user stacks to the same buffer */ | |
82 | raw_data = bpf_map_lookup_elem(&rawdata_map, &key); | |
83 | if (!raw_data) | |
84 | return 0; | |
85 | ||
86 | usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK); | |
87 | if (usize < 0) | |
88 | return 0; | |
89 | ||
90 | ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0); | |
91 | if (ksize < 0) | |
92 | return 0; | |
93 | ||
94 | total_size = usize + ksize; | |
95 | if (total_size > 0 && total_size <= max_len) | |
96 | bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size); | |
97 | ||
98 | return 0; | |
99 | } | |
100 | ||
101 | char _license[] SEC("license") = "GPL"; |