pref tools: Add missing map.h includes
[linux-2.6-block.git] / tools / perf / tests / code-reading.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a43783ae 2#include <errno.h>
877a7a11 3#include <linux/kernel.h>
d944c4ee 4#include <linux/types.h>
fd20e811 5#include <inttypes.h>
b55ae0a9
AH
6#include <stdlib.h>
7#include <unistd.h>
8#include <stdio.h>
b55ae0a9 9#include <string.h>
391e4206 10#include <sys/param.h>
b55ae0a9
AH
11
12#include "parse-events.h"
13#include "evlist.h"
14#include "evsel.h"
15#include "thread_map.h"
16#include "cpumap.h"
17#include "machine.h"
1101f69a 18#include "map.h"
b55ae0a9
AH
19#include "event.h"
20#include "thread.h"
21
22#include "tests.h"
23
3d689ed6
ACM
24#include "sane_ctype.h"
25
b55ae0a9
AH
26#define BUFSZ 1024
27#define READLEN 128
28
7a77bc2c
AH
29struct state {
30 u64 done[1024];
31 size_t done_cnt;
32};
33
b55ae0a9
AH
34static unsigned int hex(char c)
35{
36 if (c >= '0' && c <= '9')
37 return c - '0';
38 if (c >= 'a' && c <= 'f')
39 return c - 'a' + 10;
40 return c - 'A' + 10;
41}
42
b2d0dbf0
JS
43static size_t read_objdump_chunk(const char **line, unsigned char **buf,
44 size_t *buf_len)
b55ae0a9 45{
b2d0dbf0
JS
46 size_t bytes_read = 0;
47 unsigned char *chunk_start = *buf;
b55ae0a9
AH
48
49 /* Read bytes */
b2d0dbf0 50 while (*buf_len > 0) {
b55ae0a9
AH
51 char c1, c2;
52
b55ae0a9 53 /* Get 2 hex digits */
b2d0dbf0
JS
54 c1 = *(*line)++;
55 if (!isxdigit(c1))
b55ae0a9 56 break;
b2d0dbf0
JS
57 c2 = *(*line)++;
58 if (!isxdigit(c2))
b55ae0a9 59 break;
b2d0dbf0
JS
60
61 /* Store byte and advance buf */
62 **buf = (hex(c1) << 4) | hex(c2);
63 (*buf)++;
64 (*buf_len)--;
65 bytes_read++;
66
67 /* End of chunk? */
68 if (isspace(**line))
b55ae0a9 69 break;
b55ae0a9 70 }
b2d0dbf0
JS
71
72 /*
73 * objdump will display raw insn as LE if code endian
74 * is LE and bytes_per_chunk > 1. In that case reverse
75 * the chunk we just read.
76 *
77 * see disassemble_bytes() at binutils/objdump.c for details
78 * how objdump chooses display endian)
79 */
80 if (bytes_read > 1 && !bigendian()) {
81 unsigned char *chunk_end = chunk_start + bytes_read - 1;
82 unsigned char tmp;
83
84 while (chunk_start < chunk_end) {
85 tmp = *chunk_start;
86 *chunk_start = *chunk_end;
87 *chunk_end = tmp;
88 chunk_start++;
89 chunk_end--;
90 }
91 }
92
93 return bytes_read;
94}
95
96static size_t read_objdump_line(const char *line, unsigned char *buf,
97 size_t buf_len)
98{
99 const char *p;
100 size_t ret, bytes_read = 0;
101
102 /* Skip to a colon */
103 p = strchr(line, ':');
104 if (!p)
105 return 0;
106 p++;
107
108 /* Skip initial spaces */
109 while (*p) {
110 if (!isspace(*p))
111 break;
112 p++;
113 }
114
115 do {
116 ret = read_objdump_chunk(&p, &buf, &buf_len);
117 bytes_read += ret;
118 p++;
119 } while (ret > 0);
120
729a7ed1 121 /* return number of successfully read bytes */
b2d0dbf0 122 return bytes_read;
b55ae0a9
AH
123}
124
729a7ed1 125static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
b55ae0a9
AH
126{
127 char *line = NULL;
729a7ed1 128 size_t line_len, off_last = 0;
b55ae0a9
AH
129 ssize_t ret;
130 int err = 0;
edfdb7ea 131 u64 addr, last_addr = start_addr;
729a7ed1
JS
132
133 while (off_last < *len) {
134 size_t off, read_bytes, written_bytes;
135 unsigned char tmp[BUFSZ];
b55ae0a9 136
b55ae0a9
AH
137 ret = getline(&line, &line_len, f);
138 if (feof(f))
139 break;
140 if (ret < 0) {
141 pr_debug("getline failed\n");
142 err = -1;
143 break;
144 }
729a7ed1
JS
145
146 /* read objdump data into temporary buffer */
b2d0dbf0 147 read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
729a7ed1
JS
148 if (!read_bytes)
149 continue;
150
151 if (sscanf(line, "%"PRIx64, &addr) != 1)
152 continue;
edfdb7ea
JS
153 if (addr < last_addr) {
154 pr_debug("addr going backwards, read beyond section?\n");
155 break;
156 }
157 last_addr = addr;
729a7ed1
JS
158
159 /* copy it from temporary buffer to 'buf' according
160 * to address on current objdump line */
161 off = addr - start_addr;
162 if (off >= *len)
163 break;
164 written_bytes = MIN(read_bytes, *len - off);
165 memcpy(buf + off, tmp, written_bytes);
166 off_last = off + written_bytes;
b55ae0a9
AH
167 }
168
729a7ed1
JS
169 /* len returns number of bytes that could not be read */
170 *len -= off_last;
171
b55ae0a9
AH
172 free(line);
173
174 return err;
175}
176
177static int read_via_objdump(const char *filename, u64 addr, void *buf,
178 size_t len)
179{
180 char cmd[PATH_MAX * 2];
181 const char *fmt;
182 FILE *f;
183 int ret;
184
06f679c1 185 fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
b55ae0a9
AH
186 ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
187 filename);
188 if (ret <= 0 || (size_t)ret >= sizeof(cmd))
189 return -1;
190
191 pr_debug("Objdump command is: %s\n", cmd);
192
7a77bc2c
AH
193 /* Ignore objdump errors */
194 strcat(cmd, " 2>/dev/null");
195
b55ae0a9
AH
196 f = popen(cmd, "r");
197 if (!f) {
198 pr_debug("popen failed\n");
199 return -1;
200 }
201
729a7ed1 202 ret = read_objdump_output(f, buf, &len, addr);
b55ae0a9 203 if (len) {
b2d0dbf0 204 pr_debug("objdump read too few bytes: %zd\n", len);
b55ae0a9
AH
205 if (!ret)
206 ret = len;
207 }
208
209 pclose(f);
210
211 return ret;
212}
213
fd405cf6
JS
214static void dump_buf(unsigned char *buf, size_t len)
215{
216 size_t i;
217
218 for (i = 0; i < len; i++) {
219 pr_debug("0x%02x ", buf[i]);
220 if (i % 16 == 15)
221 pr_debug("\n");
222 }
223 pr_debug("\n");
224}
225
b55ae0a9 226static int read_object_code(u64 addr, size_t len, u8 cpumode,
29f9e521 227 struct thread *thread, struct state *state)
b55ae0a9
AH
228{
229 struct addr_location al;
230 unsigned char buf1[BUFSZ];
231 unsigned char buf2[BUFSZ];
232 size_t ret_len;
233 u64 objdump_addr;
94df1040
NK
234 const char *objdump_name;
235 char decomp_name[KMOD_DECOMP_LEN];
bcd4287e 236 bool decomp = false;
b55ae0a9
AH
237 int ret;
238
239 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
240
71a84b5a 241 if (!thread__find_map(thread, cpumode, addr, &al) || !al.map->dso) {
9a805d86
RB
242 if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
243 pr_debug("Hypervisor address can not be resolved - skipping\n");
244 return 0;
245 }
246
f07a2d32 247 pr_debug("thread__find_map failed\n");
b55ae0a9
AH
248 return -1;
249 }
250
251 pr_debug("File is: %s\n", al.map->dso->long_name);
252
7a77bc2c
AH
253 if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
254 !dso__is_kcore(al.map->dso)) {
b55ae0a9
AH
255 pr_debug("Unexpected kernel address - skipping\n");
256 return 0;
257 }
258
259 pr_debug("On file address is: %#"PRIx64"\n", al.addr);
260
261 if (len > BUFSZ)
262 len = BUFSZ;
263
264 /* Do not go off the map */
265 if (addr + len > al.map->end)
266 len = al.map->end - addr;
267
268 /* Read the object code using perf */
29f9e521
ACM
269 ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine,
270 al.addr, buf1, len);
b55ae0a9
AH
271 if (ret_len != len) {
272 pr_debug("dso__data_read_offset failed\n");
273 return -1;
274 }
275
276 /*
277 * Converting addresses for use by objdump requires more information.
278 * map__load() does that. See map__rip_2objdump() for details.
279 */
be39db9f 280 if (map__load(al.map))
b55ae0a9
AH
281 return -1;
282
7a77bc2c
AH
283 /* objdump struggles with kcore - try each map only once */
284 if (dso__is_kcore(al.map->dso)) {
285 size_t d;
286
287 for (d = 0; d < state->done_cnt; d++) {
288 if (state->done[d] == al.map->start) {
289 pr_debug("kcore map tested already");
290 pr_debug(" - skipping\n");
291 return 0;
292 }
293 }
294 if (state->done_cnt >= ARRAY_SIZE(state->done)) {
295 pr_debug("Too many kcore maps - skipping\n");
296 return 0;
297 }
298 state->done[state->done_cnt++] = al.map->start;
299 }
300
94df1040
NK
301 objdump_name = al.map->dso->long_name;
302 if (dso__needs_decompress(al.map->dso)) {
303 if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
304 decomp_name,
305 sizeof(decomp_name)) < 0) {
306 pr_debug("decompression failed\n");
307 return -1;
308 }
309
bcd4287e 310 decomp = true;
94df1040
NK
311 objdump_name = decomp_name;
312 }
313
b55ae0a9
AH
314 /* Read the object code using objdump */
315 objdump_addr = map__rip_2objdump(al.map, al.addr);
94df1040
NK
316 ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
317
bcd4287e 318 if (decomp)
94df1040
NK
319 unlink(objdump_name);
320
b55ae0a9
AH
321 if (ret > 0) {
322 /*
323 * The kernel maps are inaccurate - assume objdump is right in
324 * that case.
325 */
326 if (cpumode == PERF_RECORD_MISC_KERNEL ||
327 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
328 len -= ret;
7a77bc2c 329 if (len) {
b55ae0a9 330 pr_debug("Reducing len to %zu\n", len);
7a77bc2c
AH
331 } else if (dso__is_kcore(al.map->dso)) {
332 /*
333 * objdump cannot handle very large segments
334 * that may be found in kcore.
335 */
336 pr_debug("objdump failed for kcore");
337 pr_debug(" - skipping\n");
338 return 0;
339 } else {
b55ae0a9 340 return -1;
7a77bc2c 341 }
b55ae0a9
AH
342 }
343 }
344 if (ret < 0) {
345 pr_debug("read_via_objdump failed\n");
346 return -1;
347 }
348
349 /* The results should be identical */
350 if (memcmp(buf1, buf2, len)) {
351 pr_debug("Bytes read differ from those read by objdump\n");
fd405cf6
JS
352 pr_debug("buf1 (dso):\n");
353 dump_buf(buf1, len);
354 pr_debug("buf2 (objdump):\n");
355 dump_buf(buf2, len);
b55ae0a9
AH
356 return -1;
357 }
358 pr_debug("Bytes read match those read by objdump\n");
359
360 return 0;
361}
362
363static int process_sample_event(struct machine *machine,
364 struct perf_evlist *evlist,
7a77bc2c 365 union perf_event *event, struct state *state)
b55ae0a9
AH
366{
367 struct perf_sample sample;
368 struct thread *thread;
b91fc39f 369 int ret;
b55ae0a9
AH
370
371 if (perf_evlist__parse_sample(evlist, event, &sample)) {
372 pr_debug("perf_evlist__parse_sample failed\n");
373 return -1;
374 }
375
13ce34df 376 thread = machine__findnew_thread(machine, sample.pid, sample.tid);
b55ae0a9
AH
377 if (!thread) {
378 pr_debug("machine__findnew_thread failed\n");
379 return -1;
380 }
381
473398a2 382 ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state);
b91fc39f
ACM
383 thread__put(thread);
384 return ret;
b55ae0a9
AH
385}
386
387static int process_event(struct machine *machine, struct perf_evlist *evlist,
7a77bc2c 388 union perf_event *event, struct state *state)
b55ae0a9
AH
389{
390 if (event->header.type == PERF_RECORD_SAMPLE)
7a77bc2c 391 return process_sample_event(machine, evlist, event, state);
b55ae0a9 392
48095b72
AH
393 if (event->header.type == PERF_RECORD_THROTTLE ||
394 event->header.type == PERF_RECORD_UNTHROTTLE)
395 return 0;
396
397 if (event->header.type < PERF_RECORD_MAX) {
398 int ret;
399
400 ret = machine__process_event(machine, event, NULL);
401 if (ret < 0)
402 pr_debug("machine__process_event failed, event type %u\n",
403 event->header.type);
404 return ret;
405 }
b55ae0a9
AH
406
407 return 0;
408}
409
7a77bc2c
AH
410static int process_events(struct machine *machine, struct perf_evlist *evlist,
411 struct state *state)
b55ae0a9
AH
412{
413 union perf_event *event;
00fc2460 414 struct perf_mmap *md;
b55ae0a9
AH
415 int i, ret;
416
417 for (i = 0; i < evlist->nr_mmaps; i++) {
00fc2460 418 md = &evlist->mmap[i];
b9bae2c8 419 if (perf_mmap__read_init(md) < 0)
00fc2460
KL
420 continue;
421
0019dc87 422 while ((event = perf_mmap__read_event(md)) != NULL) {
7a77bc2c 423 ret = process_event(machine, evlist, event, state);
d6ace3df 424 perf_mmap__consume(md);
b55ae0a9
AH
425 if (ret < 0)
426 return ret;
427 }
00fc2460 428 perf_mmap__read_done(md);
b55ae0a9
AH
429 }
430 return 0;
431}
432
433static int comp(const void *a, const void *b)
434{
435 return *(int *)a - *(int *)b;
436}
437
438static void do_sort_something(void)
439{
309b5185 440 int buf[40960], i;
b55ae0a9 441
309b5185
DA
442 for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
443 buf[i] = ARRAY_SIZE(buf) - i - 1;
b55ae0a9 444
309b5185 445 qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
b55ae0a9 446
309b5185 447 for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
b55ae0a9
AH
448 if (buf[i] != i) {
449 pr_debug("qsort failed\n");
450 break;
451 }
452 }
453}
454
455static void sort_something(void)
456{
457 int i;
458
459 for (i = 0; i < 10; i++)
460 do_sort_something();
461}
462
463static void syscall_something(void)
464{
465 int pipefd[2];
466 int i;
467
468 for (i = 0; i < 1000; i++) {
469 if (pipe(pipefd) < 0) {
470 pr_debug("pipe failed\n");
471 break;
472 }
473 close(pipefd[1]);
474 close(pipefd[0]);
475 }
476}
477
478static void fs_something(void)
479{
480 const char *test_file_name = "temp-perf-code-reading-test-file--";
481 FILE *f;
482 int i;
483
484 for (i = 0; i < 1000; i++) {
485 f = fopen(test_file_name, "w+");
486 if (f) {
487 fclose(f);
488 unlink(test_file_name);
489 }
490 }
491}
492
b3be39c5
TR
493static const char *do_determine_event(bool excl_kernel)
494{
495 const char *event = excl_kernel ? "cycles:u" : "cycles";
496
497#ifdef __s390x__
498 char cpuid[128], model[16], model_c[16], cpum_cf_v[16];
499 unsigned int family;
500 int ret, cpum_cf_a;
501
502 if (get_cpuid(cpuid, sizeof(cpuid)))
503 goto out_clocks;
504 ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family, model_c,
505 model, cpum_cf_v, &cpum_cf_a);
506 if (ret != 5) /* Not available */
507 goto out_clocks;
508 if (excl_kernel && (cpum_cf_a & 4))
509 return event;
510 if (!excl_kernel && (cpum_cf_a & 2))
511 return event;
512
513 /* Fall through: missing authorization */
514out_clocks:
515 event = excl_kernel ? "cpu-clock:u" : "cpu-clock";
516
517#endif
518 return event;
519}
520
b55ae0a9
AH
521static void do_something(void)
522{
523 fs_something();
524
525 sort_something();
526
527 syscall_something();
528}
529
530enum {
531 TEST_CODE_READING_OK,
532 TEST_CODE_READING_NO_VMLINUX,
7a77bc2c 533 TEST_CODE_READING_NO_KCORE,
b55ae0a9 534 TEST_CODE_READING_NO_ACCESS,
7a77bc2c 535 TEST_CODE_READING_NO_KERNEL_OBJ,
b55ae0a9
AH
536};
537
7a77bc2c 538static int do_test_code_reading(bool try_kcore)
b55ae0a9 539{
b55ae0a9
AH
540 struct machine *machine;
541 struct thread *thread;
b4006796 542 struct record_opts opts = {
b55ae0a9
AH
543 .mmap_pages = UINT_MAX,
544 .user_freq = UINT_MAX,
545 .user_interval = ULLONG_MAX,
5243ba76 546 .freq = 500,
b55ae0a9
AH
547 .target = {
548 .uses_mmap = true,
549 },
550 };
7a77bc2c
AH
551 struct state state = {
552 .done_cnt = 0,
553 };
b55ae0a9
AH
554 struct thread_map *threads = NULL;
555 struct cpu_map *cpus = NULL;
556 struct perf_evlist *evlist = NULL;
557 struct perf_evsel *evsel = NULL;
558 int err = -1, ret;
559 pid_t pid;
560 struct map *map;
7a77bc2c 561 bool have_vmlinux, have_kcore, excl_kernel = false;
b55ae0a9
AH
562
563 pid = getpid();
564
0fd4008e 565 machine = machine__new_host();
f6c66d73 566 machine->env = &perf_env;
b55ae0a9
AH
567
568 ret = machine__create_kernel_maps(machine);
569 if (ret < 0) {
570 pr_debug("machine__create_kernel_maps failed\n");
571 goto out_err;
572 }
573
7a77bc2c
AH
574 /* Force the use of kallsyms instead of vmlinux to try kcore */
575 if (try_kcore)
576 symbol_conf.kallsyms_name = "/proc/kallsyms";
577
b55ae0a9 578 /* Load kernel map */
a5e813c6 579 map = machine__kernel_map(machine);
be39db9f 580 ret = map__load(map);
b55ae0a9
AH
581 if (ret < 0) {
582 pr_debug("map__load failed\n");
583 goto out_err;
584 }
7a77bc2c
AH
585 have_vmlinux = dso__is_vmlinux(map->dso);
586 have_kcore = dso__is_kcore(map->dso);
587
588 /* 2nd time through we just try kcore */
589 if (try_kcore && !have_kcore)
590 return TEST_CODE_READING_NO_KCORE;
591
592 /* No point getting kernel events if there is no kernel object */
593 if (!have_vmlinux && !have_kcore)
b55ae0a9
AH
594 excl_kernel = true;
595
596 threads = thread_map__new_by_tid(pid);
597 if (!threads) {
598 pr_debug("thread_map__new_by_tid failed\n");
599 goto out_err;
600 }
601
602 ret = perf_event__synthesize_thread_map(NULL, threads,
3fcb10e4 603 perf_event__process, machine, false);
b55ae0a9
AH
604 if (ret < 0) {
605 pr_debug("perf_event__synthesize_thread_map failed\n");
606 goto out_err;
607 }
608
314add6b 609 thread = machine__findnew_thread(machine, pid, pid);
b55ae0a9
AH
610 if (!thread) {
611 pr_debug("machine__findnew_thread failed\n");
b91fc39f 612 goto out_put;
b55ae0a9
AH
613 }
614
615 cpus = cpu_map__new(NULL);
616 if (!cpus) {
617 pr_debug("cpu_map__new failed\n");
b91fc39f 618 goto out_put;
b55ae0a9
AH
619 }
620
621 while (1) {
622 const char *str;
623
624 evlist = perf_evlist__new();
625 if (!evlist) {
626 pr_debug("perf_evlist__new failed\n");
b91fc39f 627 goto out_put;
b55ae0a9
AH
628 }
629
630 perf_evlist__set_maps(evlist, cpus, threads);
631
b3be39c5 632 str = do_determine_event(excl_kernel);
b55ae0a9 633 pr_debug("Parsing event '%s'\n", str);
b39b8393 634 ret = parse_events(evlist, str, NULL);
b55ae0a9
AH
635 if (ret < 0) {
636 pr_debug("parse_events failed\n");
b91fc39f 637 goto out_put;
b55ae0a9
AH
638 }
639
e68ae9cf 640 perf_evlist__config(evlist, &opts, NULL);
b55ae0a9
AH
641
642 evsel = perf_evlist__first(evlist);
643
644 evsel->attr.comm = 1;
645 evsel->attr.disabled = 1;
646 evsel->attr.enable_on_exec = 0;
647
648 ret = perf_evlist__open(evlist);
649 if (ret < 0) {
650 if (!excl_kernel) {
651 excl_kernel = true;
7320b1b3
JO
652 /*
653 * Both cpus and threads are now owned by evlist
654 * and will be freed by following perf_evlist__set_maps
655 * call. Getting refference to keep them alive.
656 */
657 cpu_map__get(cpus);
658 thread_map__get(threads);
ae450a7d 659 perf_evlist__set_maps(evlist, NULL, NULL);
b55ae0a9
AH
660 perf_evlist__delete(evlist);
661 evlist = NULL;
662 continue;
663 }
6880bbf9 664
bb963e16 665 if (verbose > 0) {
6880bbf9
ACM
666 char errbuf[512];
667 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
668 pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
669 }
670
b91fc39f 671 goto out_put;
b55ae0a9
AH
672 }
673 break;
674 }
675
f74b9d3a 676 ret = perf_evlist__mmap(evlist, UINT_MAX);
b55ae0a9
AH
677 if (ret < 0) {
678 pr_debug("perf_evlist__mmap failed\n");
b91fc39f 679 goto out_put;
b55ae0a9
AH
680 }
681
682 perf_evlist__enable(evlist);
683
684 do_something();
685
686 perf_evlist__disable(evlist);
687
7a77bc2c 688 ret = process_events(machine, evlist, &state);
b55ae0a9 689 if (ret < 0)
b91fc39f 690 goto out_put;
b55ae0a9 691
7a77bc2c
AH
692 if (!have_vmlinux && !have_kcore && !try_kcore)
693 err = TEST_CODE_READING_NO_KERNEL_OBJ;
694 else if (!have_vmlinux && !try_kcore)
b55ae0a9
AH
695 err = TEST_CODE_READING_NO_VMLINUX;
696 else if (excl_kernel)
697 err = TEST_CODE_READING_NO_ACCESS;
698 else
699 err = TEST_CODE_READING_OK;
b91fc39f
ACM
700out_put:
701 thread__put(thread);
b55ae0a9 702out_err:
b91fc39f 703
b55ae0a9 704 if (evlist) {
b55ae0a9 705 perf_evlist__delete(evlist);
03ad9747 706 } else {
f30a79b0 707 cpu_map__put(cpus);
186fbb74 708 thread_map__put(threads);
03ad9747 709 }
b55ae0a9 710 machine__delete_threads(machine);
0fd4008e 711 machine__delete(machine);
b55ae0a9
AH
712
713 return err;
714}
715
81f17c90 716int test__code_reading(struct test *test __maybe_unused, int subtest __maybe_unused)
b55ae0a9
AH
717{
718 int ret;
719
7a77bc2c
AH
720 ret = do_test_code_reading(false);
721 if (!ret)
722 ret = do_test_code_reading(true);
b55ae0a9
AH
723
724 switch (ret) {
725 case TEST_CODE_READING_OK:
726 return 0;
727 case TEST_CODE_READING_NO_VMLINUX:
597bdeb4 728 pr_debug("no vmlinux\n");
b55ae0a9 729 return 0;
7a77bc2c 730 case TEST_CODE_READING_NO_KCORE:
597bdeb4 731 pr_debug("no kcore\n");
7a77bc2c 732 return 0;
b55ae0a9 733 case TEST_CODE_READING_NO_ACCESS:
597bdeb4 734 pr_debug("no access\n");
b55ae0a9 735 return 0;
7a77bc2c 736 case TEST_CODE_READING_NO_KERNEL_OBJ:
597bdeb4 737 pr_debug("no kernel obj\n");
7a77bc2c 738 return 0;
b55ae0a9
AH
739 default:
740 return -1;
741 };
742}