1 /* Copyright (c) 2017 Facebook
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
15 #include <linux/types.h>
16 typedef __u16 __sum16;
17 #include <arpa/inet.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_packet.h>
21 #include <linux/ipv6.h>
22 #include <linux/tcp.h>
23 #include <linux/filter.h>
24 #include <linux/perf_event.h>
25 #include <linux/unistd.h>
27 #include <sys/ioctl.h>
29 #include <sys/resource.h>
30 #include <sys/types.h>
33 #include <linux/bpf.h>
34 #include <linux/err.h>
36 #include <bpf/libbpf.h>
37 #include "test_iptunnel_common.h"
39 #include "bpf_endian.h"
41 static int error_cnt, pass_cnt;
43 #define MAGIC_BYTES 123
45 /* ipv4 test vector */
51 .eth.h_proto = bpf_htons(ETH_P_IP),
54 .iph.tot_len = bpf_htons(MAGIC_BYTES),
58 /* ipv6 test vector */
64 .eth.h_proto = bpf_htons(ETH_P_IPV6),
66 .iph.payload_len = bpf_htons(MAGIC_BYTES),
70 #define CHECK(condition, tag, format...) ({ \
71 int __ret = !!(condition); \
74 printf("%s:FAIL:%s ", __func__, tag); \
78 printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
83 static int bpf_find_map(const char *test, struct bpf_object *obj,
88 map = bpf_object__find_map_by_name(obj, name);
90 printf("%s:FAIL:map '%s' not found\n", test, name);
94 return bpf_map__fd(map);
97 static void test_pkt_access(void)
99 const char *file = "./test_pkt_access.o";
100 struct bpf_object *obj;
101 __u32 duration, retval;
104 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
110 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
111 NULL, NULL, &retval, &duration);
112 CHECK(err || errno || retval, "ipv4",
113 "err %d errno %d retval %d duration %d\n",
114 err, errno, retval, duration);
116 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
117 NULL, NULL, &retval, &duration);
118 CHECK(err || errno || retval, "ipv6",
119 "err %d errno %d retval %d duration %d\n",
120 err, errno, retval, duration);
121 bpf_object__close(obj);
124 static void test_xdp(void)
126 struct vip key4 = {.protocol = 6, .family = AF_INET};
127 struct vip key6 = {.protocol = 6, .family = AF_INET6};
128 struct iptnl_info value4 = {.family = AF_INET};
129 struct iptnl_info value6 = {.family = AF_INET6};
130 const char *file = "./test_xdp.o";
131 struct bpf_object *obj;
133 struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
134 struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
135 __u32 duration, retval, size;
136 int err, prog_fd, map_fd;
138 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
144 map_fd = bpf_find_map(__func__, obj, "vip2tnl");
147 bpf_map_update_elem(map_fd, &key4, &value4, 0);
148 bpf_map_update_elem(map_fd, &key6, &value6, 0);
150 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
151 buf, &size, &retval, &duration);
153 CHECK(err || errno || retval != XDP_TX || size != 74 ||
154 iph->protocol != IPPROTO_IPIP, "ipv4",
155 "err %d errno %d retval %d size %d\n",
156 err, errno, retval, size);
158 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
159 buf, &size, &retval, &duration);
160 CHECK(err || errno || retval != XDP_TX || size != 114 ||
161 iph6->nexthdr != IPPROTO_IPV6, "ipv6",
162 "err %d errno %d retval %d size %d\n",
163 err, errno, retval, size);
165 bpf_object__close(obj);
168 #define MAGIC_VAL 0x1234
169 #define NUM_ITER 100000
172 static void test_l4lb(const char *file)
174 unsigned int nr_cpus = bpf_num_possible_cpus();
175 struct vip key = {.protocol = 6};
179 } value = {.vip_num = VIP_NUM};
180 __u32 stats_key = VIP_NUM;
185 struct real_definition {
191 } real_def = {.dst = MAGIC_VAL};
192 __u32 ch_key = 11, real_num = 3;
193 __u32 duration, retval, size;
194 int err, i, prog_fd, map_fd;
195 __u64 bytes = 0, pkts = 0;
196 struct bpf_object *obj;
198 u32 *magic = (u32 *)buf;
200 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
206 map_fd = bpf_find_map(__func__, obj, "vip_map");
209 bpf_map_update_elem(map_fd, &key, &value, 0);
211 map_fd = bpf_find_map(__func__, obj, "ch_rings");
214 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
216 map_fd = bpf_find_map(__func__, obj, "reals");
219 bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
221 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
222 buf, &size, &retval, &duration);
223 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
224 *magic != MAGIC_VAL, "ipv4",
225 "err %d errno %d retval %d size %d magic %x\n",
226 err, errno, retval, size, *magic);
228 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
229 buf, &size, &retval, &duration);
230 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
231 *magic != MAGIC_VAL, "ipv6",
232 "err %d errno %d retval %d size %d magic %x\n",
233 err, errno, retval, size, *magic);
235 map_fd = bpf_find_map(__func__, obj, "stats");
238 bpf_map_lookup_elem(map_fd, &stats_key, stats);
239 for (i = 0; i < nr_cpus; i++) {
240 bytes += stats[i].bytes;
241 pkts += stats[i].pkts;
243 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
245 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
248 bpf_object__close(obj);
251 static void test_l4lb_all(void)
253 const char *file1 = "./test_l4lb.o";
254 const char *file2 = "./test_l4lb_noinline.o";
260 static void test_tcp_estats(void)
262 const char *file = "./test_tcp_estats.o";
264 struct bpf_object *obj;
267 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
268 CHECK(err, "", "err %d errno %d\n", err, errno);
274 bpf_object__close(obj);
277 static inline __u64 ptr_to_u64(const void *ptr)
279 return (__u64) (unsigned long) ptr;
282 static void test_bpf_obj_id(void)
284 const __u64 array_magic_value = 0xfaceb00c;
285 const __u32 array_key = 0;
286 const int nr_iters = 2;
287 const char *file = "./test_obj_id.o";
288 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
289 const char *expected_prog_name = "test_obj_id";
290 const char *expected_map_name = "test_map_id";
291 const __u64 nsec_per_sec = 1000000000;
293 struct bpf_object *objs[nr_iters];
294 int prog_fds[nr_iters], map_fds[nr_iters];
295 /* +1 to test for the info_len returned by kernel */
296 struct bpf_prog_info prog_infos[nr_iters + 1];
297 struct bpf_map_info map_infos[nr_iters + 1];
298 /* Each prog only uses one map. +1 to test nr_map_ids
299 * returned by kernel.
301 __u32 map_ids[nr_iters + 1];
302 char jited_insns[128], xlated_insns[128], zeros[128];
303 __u32 i, next_id, info_len, nr_id_found, duration = 0;
304 struct timespec real_time_ts, boot_time_ts;
305 int sysctl_fd, jit_enabled = 0, err = 0;
307 uid_t my_uid = getuid();
308 time_t now, load_time;
310 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
311 if (sysctl_fd != -1) {
314 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
315 jit_enabled = (tmpc != '0');
319 err = bpf_prog_get_fd_by_id(0);
320 CHECK(err >= 0 || errno != ENOENT,
321 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
323 err = bpf_map_get_fd_by_id(0);
324 CHECK(err >= 0 || errno != ENOENT,
325 "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
327 for (i = 0; i < nr_iters; i++)
330 /* Check bpf_obj_get_info_by_fd() */
331 bzero(zeros, sizeof(zeros));
332 for (i = 0; i < nr_iters; i++) {
334 err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
335 &objs[i], &prog_fds[i]);
336 /* test_obj_id.o is a dumb prog. It should never fail
343 /* Insert a magic value to the map */
344 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
345 assert(map_fds[i] >= 0);
346 err = bpf_map_update_elem(map_fds[i], &array_key,
347 &array_magic_value, 0);
350 /* Check getting map info */
351 info_len = sizeof(struct bpf_map_info) * 2;
352 bzero(&map_infos[i], info_len);
353 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
356 map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
357 map_infos[i].key_size != sizeof(__u32) ||
358 map_infos[i].value_size != sizeof(__u64) ||
359 map_infos[i].max_entries != 1 ||
360 map_infos[i].map_flags != 0 ||
361 info_len != sizeof(struct bpf_map_info) ||
362 strcmp((char *)map_infos[i].name, expected_map_name),
364 "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
366 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
367 info_len, sizeof(struct bpf_map_info),
368 map_infos[i].key_size,
369 map_infos[i].value_size,
370 map_infos[i].max_entries,
371 map_infos[i].map_flags,
372 map_infos[i].name, expected_map_name))
375 /* Check getting prog info */
376 info_len = sizeof(struct bpf_prog_info) * 2;
377 bzero(&prog_infos[i], info_len);
378 bzero(jited_insns, sizeof(jited_insns));
379 bzero(xlated_insns, sizeof(xlated_insns));
380 prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
381 prog_infos[i].jited_prog_len = sizeof(jited_insns);
382 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
383 prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
384 prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
385 prog_infos[i].nr_map_ids = 2;
386 err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
388 err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
390 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
392 load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
393 + (prog_infos[i].load_time / nsec_per_sec);
395 prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
396 info_len != sizeof(struct bpf_prog_info) ||
397 (jit_enabled && !prog_infos[i].jited_prog_len) ||
399 !memcmp(jited_insns, zeros, sizeof(zeros))) ||
400 !prog_infos[i].xlated_prog_len ||
401 !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
402 load_time < now - 60 || load_time > now + 60 ||
403 prog_infos[i].created_by_uid != my_uid ||
404 prog_infos[i].nr_map_ids != 1 ||
405 *(int *)prog_infos[i].map_ids != map_infos[i].id ||
406 strcmp((char *)prog_infos[i].name, expected_prog_name),
408 "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
410 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
411 info_len, sizeof(struct bpf_prog_info),
413 prog_infos[i].jited_prog_len,
414 prog_infos[i].xlated_prog_len,
415 !!memcmp(jited_insns, zeros, sizeof(zeros)),
416 !!memcmp(xlated_insns, zeros, sizeof(zeros)),
418 prog_infos[i].created_by_uid, my_uid,
419 prog_infos[i].nr_map_ids, 1,
420 *(int *)prog_infos[i].map_ids, map_infos[i].id,
421 prog_infos[i].name, expected_prog_name))
425 /* Check bpf_prog_get_next_id() */
428 while (!bpf_prog_get_next_id(next_id, &next_id)) {
429 struct bpf_prog_info prog_info = {};
433 info_len = sizeof(prog_info);
435 prog_fd = bpf_prog_get_fd_by_id(next_id);
436 if (prog_fd < 0 && errno == ENOENT)
437 /* The bpf_prog is in the dead row */
439 if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
440 "prog_fd %d next_id %d errno %d\n",
441 prog_fd, next_id, errno))
444 for (i = 0; i < nr_iters; i++)
445 if (prog_infos[i].id == next_id)
454 * prog_info.nr_map_ids = 1
455 * prog_info.map_ids = NULL
457 prog_info.nr_map_ids = 1;
458 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
459 if (CHECK(!err || errno != EFAULT,
460 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
463 bzero(&prog_info, sizeof(prog_info));
464 info_len = sizeof(prog_info);
466 saved_map_id = *(int *)(prog_infos[i].map_ids);
467 prog_info.map_ids = prog_infos[i].map_ids;
468 prog_info.nr_map_ids = 2;
469 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
470 prog_infos[i].jited_prog_insns = 0;
471 prog_infos[i].xlated_prog_insns = 0;
472 CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
473 memcmp(&prog_info, &prog_infos[i], info_len) ||
474 *(int *)prog_info.map_ids != saved_map_id,
475 "get-prog-info(next_id->fd)",
476 "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
477 err, errno, info_len, sizeof(struct bpf_prog_info),
478 memcmp(&prog_info, &prog_infos[i], info_len),
479 *(int *)prog_info.map_ids, saved_map_id);
482 CHECK(nr_id_found != nr_iters,
483 "check total prog id found by get_next_id",
484 "nr_id_found %u(%u)\n",
485 nr_id_found, nr_iters);
487 /* Check bpf_map_get_next_id() */
490 while (!bpf_map_get_next_id(next_id, &next_id)) {
491 struct bpf_map_info map_info = {};
494 info_len = sizeof(map_info);
496 map_fd = bpf_map_get_fd_by_id(next_id);
497 if (map_fd < 0 && errno == ENOENT)
498 /* The bpf_map is in the dead row */
500 if (CHECK(map_fd < 0, "get-map-fd(next_id)",
501 "map_fd %d next_id %u errno %d\n",
502 map_fd, next_id, errno))
505 for (i = 0; i < nr_iters; i++)
506 if (map_infos[i].id == next_id)
514 err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
517 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
518 CHECK(err || info_len != sizeof(struct bpf_map_info) ||
519 memcmp(&map_info, &map_infos[i], info_len) ||
520 array_value != array_magic_value,
521 "check get-map-info(next_id->fd)",
522 "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
523 err, errno, info_len, sizeof(struct bpf_map_info),
524 memcmp(&map_info, &map_infos[i], info_len),
525 array_value, array_magic_value);
529 CHECK(nr_id_found != nr_iters,
530 "check total map id found by get_next_id",
531 "nr_id_found %u(%u)\n",
532 nr_id_found, nr_iters);
535 for (i = 0; i < nr_iters; i++)
536 bpf_object__close(objs[i]);
539 static void test_pkt_md_access(void)
541 const char *file = "./test_pkt_md_access.o";
542 struct bpf_object *obj;
543 __u32 duration, retval;
546 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
552 err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
553 NULL, NULL, &retval, &duration);
554 CHECK(err || retval, "",
555 "err %d errno %d retval %d duration %d\n",
556 err, errno, retval, duration);
558 bpf_object__close(obj);
561 static void test_obj_name(void)
569 { "_123456789ABCDE", 1, 0 },
570 { "_123456789ABCDEF", 0, EINVAL },
571 { "_123456789ABCD\n", 0, EINVAL },
573 struct bpf_insn prog[] = {
574 BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
580 for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
581 size_t name_len = strlen(tests[i].name) + 1;
586 /* test different attr.prog_name during BPF_PROG_LOAD */
587 ncopy = name_len < sizeof(attr.prog_name) ?
588 name_len : sizeof(attr.prog_name);
589 bzero(&attr, sizeof(attr));
590 attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
592 attr.insns = ptr_to_u64(prog);
593 attr.license = ptr_to_u64("");
594 memcpy(attr.prog_name, tests[i].name, ncopy);
596 fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
597 CHECK((tests[i].success && fd < 0) ||
598 (!tests[i].success && fd != -1) ||
599 (!tests[i].success && errno != tests[i].expected_errno),
600 "check-bpf-prog-name",
601 "fd %d(%d) errno %d(%d)\n",
602 fd, tests[i].success, errno, tests[i].expected_errno);
607 /* test different attr.map_name during BPF_MAP_CREATE */
608 ncopy = name_len < sizeof(attr.map_name) ?
609 name_len : sizeof(attr.map_name);
610 bzero(&attr, sizeof(attr));
611 attr.map_type = BPF_MAP_TYPE_ARRAY;
614 attr.max_entries = 1;
616 memcpy(attr.map_name, tests[i].name, ncopy);
617 fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
618 CHECK((tests[i].success && fd < 0) ||
619 (!tests[i].success && fd != -1) ||
620 (!tests[i].success && errno != tests[i].expected_errno),
621 "check-bpf-map-name",
622 "fd %d(%d) errno %d(%d)\n",
623 fd, tests[i].success, errno, tests[i].expected_errno);
630 static void test_tp_attach_query(void)
632 const int num_progs = 3;
633 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
634 __u32 duration = 0, info_len, saved_prog_ids[num_progs];
635 const char *file = "./test_tracepoint.o";
636 struct perf_event_query_bpf *query;
637 struct perf_event_attr attr = {};
638 struct bpf_object *obj[num_progs];
639 struct bpf_prog_info prog_info;
642 snprintf(buf, sizeof(buf),
643 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
644 efd = open(buf, O_RDONLY, 0);
645 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
647 bytes = read(efd, buf, sizeof(buf));
649 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
650 "read", "bytes %d errno %d\n", bytes, errno))
653 attr.config = strtol(buf, NULL, 0);
654 attr.type = PERF_TYPE_TRACEPOINT;
655 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
656 attr.sample_period = 1;
657 attr.wakeup_events = 1;
659 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
660 for (i = 0; i < num_progs; i++) {
661 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
663 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
666 bzero(&prog_info, sizeof(prog_info));
667 prog_info.jited_prog_len = 0;
668 prog_info.xlated_prog_len = 0;
669 prog_info.nr_map_ids = 0;
670 info_len = sizeof(prog_info);
671 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
672 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
675 saved_prog_ids[i] = prog_info.id;
677 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
678 0 /* cpu 0 */, -1 /* group id */,
680 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
683 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
684 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
689 /* check NULL prog array query */
690 query->ids_len = num_progs;
691 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
692 if (CHECK(err || query->prog_cnt != 0,
693 "perf_event_ioc_query_bpf",
694 "err %d errno %d query->prog_cnt %u\n",
695 err, errno, query->prog_cnt))
699 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
700 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
705 /* try to get # of programs only */
707 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
708 if (CHECK(err || query->prog_cnt != 2,
709 "perf_event_ioc_query_bpf",
710 "err %d errno %d query->prog_cnt %u\n",
711 err, errno, query->prog_cnt))
714 /* try a few negative tests */
715 /* invalid query pointer */
716 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
717 (struct perf_event_query_bpf *)0x1);
718 if (CHECK(!err || errno != EFAULT,
719 "perf_event_ioc_query_bpf",
720 "err %d errno %d\n", err, errno))
723 /* no enough space */
725 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
726 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
727 "perf_event_ioc_query_bpf",
728 "err %d errno %d query->prog_cnt %u\n",
729 err, errno, query->prog_cnt))
733 query->ids_len = num_progs;
734 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
735 if (CHECK(err || query->prog_cnt != (i + 1),
736 "perf_event_ioc_query_bpf",
737 "err %d errno %d query->prog_cnt %u\n",
738 err, errno, query->prog_cnt))
740 for (j = 0; j < i + 1; j++)
741 if (CHECK(saved_prog_ids[j] != query->ids[j],
742 "perf_event_ioc_query_bpf",
743 "#%d saved_prog_id %x query prog_id %x\n",
744 j, saved_prog_ids[j], query->ids[j]))
749 for (; i >= 0; i--) {
751 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
755 bpf_object__close(obj[i]);
762 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
764 setrlimit(RLIMIT_MEMLOCK, &rinf);
771 test_pkt_md_access();
773 test_tp_attach_query();
775 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
776 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;