Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski...
[linux-2.6-block.git] / tools / perf / tests / bpf.c
1 #include <stdio.h>
2 #include <sys/epoll.h>
3 #include <util/util.h>
4 #include <util/bpf-loader.h>
5 #include <util/evlist.h>
6 #include <linux/bpf.h>
7 #include <linux/filter.h>
8 #include <bpf/bpf.h>
9 #include "tests.h"
10 #include "llvm.h"
11 #include "debug.h"
12 #define NR_ITERS       111
13
14 #ifdef HAVE_LIBBPF_SUPPORT
15
16 static int epoll_pwait_loop(void)
17 {
18         int i;
19
20         /* Should fail NR_ITERS times */
21         for (i = 0; i < NR_ITERS; i++)
22                 epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
23         return 0;
24 }
25
26 #ifdef HAVE_BPF_PROLOGUE
27
28 static int llseek_loop(void)
29 {
30         int fds[2], i;
31
32         fds[0] = open("/dev/null", O_RDONLY);
33         fds[1] = open("/dev/null", O_RDWR);
34
35         if (fds[0] < 0 || fds[1] < 0)
36                 return -1;
37
38         for (i = 0; i < NR_ITERS; i++) {
39                 lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
40                 lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
41         }
42         close(fds[0]);
43         close(fds[1]);
44         return 0;
45 }
46
47 #endif
48
49 static struct {
50         enum test_llvm__testcase prog_id;
51         const char *desc;
52         const char *name;
53         const char *msg_compile_fail;
54         const char *msg_load_fail;
55         int (*target_func)(void);
56         int expect_result;
57 } bpf_testcase_table[] = {
58         {
59                 LLVM_TESTCASE_BASE,
60                 "Test basic BPF filtering",
61                 "[basic_bpf_test]",
62                 "fix 'perf test LLVM' first",
63                 "load bpf object failed",
64                 &epoll_pwait_loop,
65                 (NR_ITERS + 1) / 2,
66         },
67 #ifdef HAVE_BPF_PROLOGUE
68         {
69                 LLVM_TESTCASE_BPF_PROLOGUE,
70                 "Test BPF prologue generation",
71                 "[bpf_prologue_test]",
72                 "fix kbuild first",
73                 "check your vmlinux setting?",
74                 &llseek_loop,
75                 (NR_ITERS + 1) / 4,
76         },
77 #endif
78         {
79                 LLVM_TESTCASE_BPF_RELOCATION,
80                 "Test BPF relocation checker",
81                 "[bpf_relocation_test]",
82                 "fix 'perf test LLVM' first",
83                 "libbpf error when dealing with relocation",
84                 NULL,
85                 0,
86         },
87 };
88
89 static int do_test(struct bpf_object *obj, int (*func)(void),
90                    int expect)
91 {
92         struct record_opts opts = {
93                 .target = {
94                         .uid = UINT_MAX,
95                         .uses_mmap = true,
96                 },
97                 .freq         = 0,
98                 .mmap_pages   = 256,
99                 .default_interval = 1,
100         };
101
102         char pid[16];
103         char sbuf[STRERR_BUFSIZE];
104         struct perf_evlist *evlist;
105         int i, ret = TEST_FAIL, err = 0, count = 0;
106
107         struct parse_events_evlist parse_evlist;
108         struct parse_events_error parse_error;
109
110         bzero(&parse_error, sizeof(parse_error));
111         bzero(&parse_evlist, sizeof(parse_evlist));
112         parse_evlist.error = &parse_error;
113         INIT_LIST_HEAD(&parse_evlist.list);
114
115         err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj, NULL);
116         if (err || list_empty(&parse_evlist.list)) {
117                 pr_debug("Failed to add events selected by BPF\n");
118                 return TEST_FAIL;
119         }
120
121         snprintf(pid, sizeof(pid), "%d", getpid());
122         pid[sizeof(pid) - 1] = '\0';
123         opts.target.tid = opts.target.pid = pid;
124
125         /* Instead of perf_evlist__new_default, don't add default events */
126         evlist = perf_evlist__new();
127         if (!evlist) {
128                 pr_debug("No ehough memory to create evlist\n");
129                 return TEST_FAIL;
130         }
131
132         err = perf_evlist__create_maps(evlist, &opts.target);
133         if (err < 0) {
134                 pr_debug("Not enough memory to create thread/cpu maps\n");
135                 goto out_delete_evlist;
136         }
137
138         perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
139         evlist->nr_groups = parse_evlist.nr_groups;
140
141         perf_evlist__config(evlist, &opts, NULL);
142
143         err = perf_evlist__open(evlist);
144         if (err < 0) {
145                 pr_debug("perf_evlist__open: %s\n",
146                          strerror_r(errno, sbuf, sizeof(sbuf)));
147                 goto out_delete_evlist;
148         }
149
150         err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
151         if (err < 0) {
152                 pr_debug("perf_evlist__mmap: %s\n",
153                          strerror_r(errno, sbuf, sizeof(sbuf)));
154                 goto out_delete_evlist;
155         }
156
157         perf_evlist__enable(evlist);
158         (*func)();
159         perf_evlist__disable(evlist);
160
161         for (i = 0; i < evlist->nr_mmaps; i++) {
162                 union perf_event *event;
163
164                 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
165                         const u32 type = event->header.type;
166
167                         if (type == PERF_RECORD_SAMPLE)
168                                 count ++;
169                 }
170         }
171
172         if (count != expect) {
173                 pr_debug("BPF filter result incorrect\n");
174                 goto out_delete_evlist;
175         }
176
177         ret = TEST_OK;
178
179 out_delete_evlist:
180         perf_evlist__delete(evlist);
181         return ret;
182 }
183
184 static struct bpf_object *
185 prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
186 {
187         struct bpf_object *obj;
188
189         obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
190         if (IS_ERR(obj)) {
191                 pr_debug("Compile BPF program failed.\n");
192                 return NULL;
193         }
194         return obj;
195 }
196
197 static int __test__bpf(int idx)
198 {
199         int ret;
200         void *obj_buf;
201         size_t obj_buf_sz;
202         struct bpf_object *obj;
203
204         ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
205                                        bpf_testcase_table[idx].prog_id,
206                                        true, NULL);
207         if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
208                 pr_debug("Unable to get BPF object, %s\n",
209                          bpf_testcase_table[idx].msg_compile_fail);
210                 if (idx == 0)
211                         return TEST_SKIP;
212                 else
213                         return TEST_FAIL;
214         }
215
216         obj = prepare_bpf(obj_buf, obj_buf_sz,
217                           bpf_testcase_table[idx].name);
218         if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
219                 if (!obj)
220                         pr_debug("Fail to load BPF object: %s\n",
221                                  bpf_testcase_table[idx].msg_load_fail);
222                 else
223                         pr_debug("Success unexpectedly: %s\n",
224                                  bpf_testcase_table[idx].msg_load_fail);
225                 ret = TEST_FAIL;
226                 goto out;
227         }
228
229         if (obj)
230                 ret = do_test(obj,
231                               bpf_testcase_table[idx].target_func,
232                               bpf_testcase_table[idx].expect_result);
233 out:
234         bpf__clear();
235         return ret;
236 }
237
238 int test__bpf_subtest_get_nr(void)
239 {
240         return (int)ARRAY_SIZE(bpf_testcase_table);
241 }
242
243 const char *test__bpf_subtest_get_desc(int i)
244 {
245         if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
246                 return NULL;
247         return bpf_testcase_table[i].desc;
248 }
249
250 static int check_env(void)
251 {
252         int err;
253         unsigned int kver_int;
254         char license[] = "GPL";
255
256         struct bpf_insn insns[] = {
257                 BPF_MOV64_IMM(BPF_REG_0, 1),
258                 BPF_EXIT_INSN(),
259         };
260
261         err = fetch_kernel_version(&kver_int, NULL, 0);
262         if (err) {
263                 pr_debug("Unable to get kernel version\n");
264                 return err;
265         }
266
267         err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
268                                sizeof(insns) / sizeof(insns[0]),
269                                license, kver_int, NULL, 0);
270         if (err < 0) {
271                 pr_err("Missing basic BPF support, skip this test: %s\n",
272                        strerror(errno));
273                 return err;
274         }
275         close(err);
276
277         return 0;
278 }
279
280 int test__bpf(int i)
281 {
282         int err;
283
284         if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
285                 return TEST_FAIL;
286
287         if (geteuid() != 0) {
288                 pr_debug("Only root can run BPF test\n");
289                 return TEST_SKIP;
290         }
291
292         if (check_env())
293                 return TEST_SKIP;
294
295         err = __test__bpf(i);
296         return err;
297 }
298
299 #else
300 int test__bpf_subtest_get_nr(void)
301 {
302         return 0;
303 }
304
305 const char *test__bpf_subtest_get_desc(int i __maybe_unused)
306 {
307         return NULL;
308 }
309
310 int test__bpf(int i __maybe_unused)
311 {
312         pr_debug("Skip BPF test because BPF support is not compiled\n");
313         return TEST_SKIP;
314 }
315 #endif