5 #include <linux/perf_event.h>
10 #include <sys/ioctl.h>
11 #include <sys/resource.h>
13 #include <sys/types.h>
21 #define SAMPLE_PERIOD 0x7fffffffffffffffULL
23 static void check_on_cpu(int cpu, struct perf_event_attr *attr)
25 int pmu_fd, error = 0;
29 /* Move to target CPU */
32 assert(sched_setaffinity(0, sizeof(set), &set) == 0);
33 /* Open perf event and attach to the perf_event_array */
34 pmu_fd = sys_perf_event_open(attr, -1/*pid*/, cpu/*cpu*/, -1/*group_fd*/, 0);
36 fprintf(stderr, "sys_perf_event_open failed on CPU %d\n", cpu);
40 assert(bpf_map_update_elem(map_fd[0], &cpu, &pmu_fd, BPF_ANY) == 0);
41 assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0);
42 /* Trigger the kprobe */
43 bpf_map_get_next_key(map_fd[1], &cpu, NULL);
45 if (bpf_map_lookup_elem(map_fd[1], &cpu, &value)) {
46 fprintf(stderr, "Value missing for CPU %d\n", cpu);
50 fprintf(stderr, "CPU %d: %llu\n", cpu, value);
53 assert(bpf_map_delete_elem(map_fd[0], &cpu) == 0 || error);
54 assert(ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE, 0) == 0 || error);
55 assert(close(pmu_fd) == 0 || error);
56 assert(bpf_map_delete_elem(map_fd[1], &cpu) == 0 || error);
60 static void test_perf_event_array(struct perf_event_attr *attr,
63 int i, status, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
67 printf("Test reading %s counters\n", name);
69 for (i = 0; i < nr_cpus; i++) {
73 check_on_cpu(i, attr);
78 for (i = 0; i < nr_cpus; i++) {
79 assert(waitpid(pid[i], &status, 0) == pid[i]);
84 printf("Test: %s FAILED\n", name);
87 static void test_bpf_perf_event(void)
89 struct perf_event_attr attr_cycles = {
91 .sample_period = SAMPLE_PERIOD,
93 .type = PERF_TYPE_HARDWARE,
96 .config = PERF_COUNT_HW_CPU_CYCLES,
98 struct perf_event_attr attr_clock = {
100 .sample_period = SAMPLE_PERIOD,
102 .type = PERF_TYPE_SOFTWARE,
105 .config = PERF_COUNT_SW_CPU_CLOCK,
107 struct perf_event_attr attr_raw = {
109 .sample_period = SAMPLE_PERIOD,
111 .type = PERF_TYPE_RAW,
114 /* Intel Instruction Retired */
117 struct perf_event_attr attr_l1d_load = {
119 .sample_period = SAMPLE_PERIOD,
121 .type = PERF_TYPE_HW_CACHE,
125 PERF_COUNT_HW_CACHE_L1D |
126 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
127 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
129 struct perf_event_attr attr_llc_miss = {
131 .sample_period = SAMPLE_PERIOD,
133 .type = PERF_TYPE_HW_CACHE,
137 PERF_COUNT_HW_CACHE_LL |
138 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
139 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
141 struct perf_event_attr attr_msr_tsc = {
145 /* From /sys/bus/event_source/devices/msr/ */
152 test_perf_event_array(&attr_cycles, "HARDWARE-cycles");
153 test_perf_event_array(&attr_clock, "SOFTWARE-clock");
154 test_perf_event_array(&attr_raw, "RAW-instruction-retired");
155 test_perf_event_array(&attr_l1d_load, "HW_CACHE-L1D-load");
157 /* below tests may fail in qemu */
158 test_perf_event_array(&attr_llc_miss, "HW_CACHE-LLC-miss");
159 test_perf_event_array(&attr_msr_tsc, "Dynamic-msr-tsc");
162 int main(int argc, char **argv)
164 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
167 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
169 setrlimit(RLIMIT_MEMLOCK, &r);
170 if (load_bpf_file(filename)) {
171 printf("%s", bpf_log_buf);
175 test_bpf_perf_event();