Merge tag 'm68k-for-v4.19-tag2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / tools / bpf / bpftool / map_perf_ring.c
CommitLineData
f412eed9
JK
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2018 Netronome Systems, Inc. */
3/* This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <errno.h>
8#include <fcntl.h>
9#include <libbpf.h>
10#include <poll.h>
11#include <signal.h>
12#include <stdbool.h>
13#include <stdio.h>
14#include <stdlib.h>
15#include <string.h>
16#include <time.h>
17#include <unistd.h>
18#include <linux/bpf.h>
19#include <linux/perf_event.h>
20#include <sys/ioctl.h>
21#include <sys/mman.h>
22#include <sys/syscall.h>
23
24#include <bpf.h>
25#include <perf-sys.h>
26
27#include "main.h"
28
29#define MMAP_PAGE_CNT 16
30
31static bool stop;
32
33struct event_ring_info {
34 int fd;
35 int key;
36 unsigned int cpu;
37 void *mem;
38};
39
40struct perf_event_sample {
41 struct perf_event_header header;
e3687510 42 u64 time;
f412eed9
JK
43 __u32 size;
44 unsigned char data[];
45};
46
47static void int_exit(int signo)
48{
49 fprintf(stderr, "Stopping...\n");
50 stop = true;
51}
52
d0cabbb0 53static enum bpf_perf_event_ret print_bpf_output(void *event, void *priv)
f412eed9 54{
d0cabbb0
JK
55 struct event_ring_info *ring = priv;
56 struct perf_event_sample *e = event;
f412eed9
JK
57 struct {
58 struct perf_event_header header;
59 __u64 id;
60 __u64 lost;
d0cabbb0 61 } *lost = event;
f412eed9
JK
62
63 if (json_output) {
64 jsonw_start_object(json_wtr);
f412eed9
JK
65 jsonw_name(json_wtr, "type");
66 jsonw_uint(json_wtr, e->header.type);
67 jsonw_name(json_wtr, "cpu");
68 jsonw_uint(json_wtr, ring->cpu);
69 jsonw_name(json_wtr, "index");
70 jsonw_uint(json_wtr, ring->key);
71 if (e->header.type == PERF_RECORD_SAMPLE) {
e3687510
JK
72 jsonw_name(json_wtr, "timestamp");
73 jsonw_uint(json_wtr, e->time);
f412eed9
JK
74 jsonw_name(json_wtr, "data");
75 print_data_json(e->data, e->size);
76 } else if (e->header.type == PERF_RECORD_LOST) {
77 jsonw_name(json_wtr, "lost");
78 jsonw_start_object(json_wtr);
79 jsonw_name(json_wtr, "id");
80 jsonw_uint(json_wtr, lost->id);
81 jsonw_name(json_wtr, "count");
82 jsonw_uint(json_wtr, lost->lost);
83 jsonw_end_object(json_wtr);
84 }
85 jsonw_end_object(json_wtr);
86 } else {
87 if (e->header.type == PERF_RECORD_SAMPLE) {
e3687510
JK
88 printf("== @%lld.%09lld CPU: %d index: %d =====\n",
89 e->time / 1000000000ULL, e->time % 1000000000ULL,
f412eed9
JK
90 ring->cpu, ring->key);
91 fprint_hex(stdout, e->data, e->size, " ");
92 printf("\n");
93 } else if (e->header.type == PERF_RECORD_LOST) {
94 printf("lost %lld events\n", lost->lost);
95 } else {
96 printf("unknown event type=%d size=%d\n",
97 e->header.type, e->header.size);
98 }
99 }
d0cabbb0
JK
100
101 return LIBBPF_PERF_EVENT_CONT;
f412eed9
JK
102}
103
104static void
105perf_event_read(struct event_ring_info *ring, void **buf, size_t *buf_len)
106{
d0cabbb0
JK
107 enum bpf_perf_event_ret ret;
108
109 ret = bpf_perf_event_read_simple(ring->mem,
110 MMAP_PAGE_CNT * get_page_size(),
111 get_page_size(), buf, buf_len,
112 print_bpf_output, ring);
113 if (ret != LIBBPF_PERF_EVENT_CONT) {
114 fprintf(stderr, "perf read loop failed with %d\n", ret);
115 stop = true;
f412eed9 116 }
f412eed9
JK
117}
118
119static int perf_mmap_size(void)
120{
121 return get_page_size() * (MMAP_PAGE_CNT + 1);
122}
123
124static void *perf_event_mmap(int fd)
125{
126 int mmap_size = perf_mmap_size();
127 void *base;
128
129 base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
130 if (base == MAP_FAILED) {
131 p_err("event mmap failed: %s\n", strerror(errno));
132 return NULL;
133 }
134
135 return base;
136}
137
138static void perf_event_unmap(void *mem)
139{
140 if (munmap(mem, perf_mmap_size()))
141 fprintf(stderr, "Can't unmap ring memory!\n");
142}
143
144static int bpf_perf_event_open(int map_fd, int key, int cpu)
145{
146 struct perf_event_attr attr = {
e3687510 147 .sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME,
f412eed9
JK
148 .type = PERF_TYPE_SOFTWARE,
149 .config = PERF_COUNT_SW_BPF_OUTPUT,
150 };
151 int pmu_fd;
152
153 pmu_fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
154 if (pmu_fd < 0) {
155 p_err("failed to open perf event %d for CPU %d", key, cpu);
156 return -1;
157 }
158
159 if (bpf_map_update_elem(map_fd, &key, &pmu_fd, BPF_ANY)) {
160 p_err("failed to update map for event %d for CPU %d", key, cpu);
161 goto err_close;
162 }
163 if (ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
164 p_err("failed to enable event %d for CPU %d", key, cpu);
165 goto err_close;
166 }
167
168 return pmu_fd;
169
170err_close:
171 close(pmu_fd);
172 return -1;
173}
174
175int do_event_pipe(int argc, char **argv)
176{
177 int i, nfds, map_fd, index = -1, cpu = -1;
178 struct bpf_map_info map_info = {};
179 struct event_ring_info *rings;
180 size_t tmp_buf_sz = 0;
181 void *tmp_buf = NULL;
182 struct pollfd *pfds;
183 __u32 map_info_len;
184 bool do_all = true;
185
186 map_info_len = sizeof(map_info);
187 map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len);
188 if (map_fd < 0)
189 return -1;
190
191 if (map_info.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
192 p_err("map is not a perf event array");
193 goto err_close_map;
194 }
195
196 while (argc) {
785e76d7 197 if (argc < 2) {
f412eed9 198 BAD_ARG();
785e76d7
QM
199 goto err_close_map;
200 }
f412eed9
JK
201
202 if (is_prefix(*argv, "cpu")) {
203 char *endptr;
204
205 NEXT_ARG();
206 cpu = strtoul(*argv, &endptr, 0);
207 if (*endptr) {
208 p_err("can't parse %s as CPU ID", **argv);
209 goto err_close_map;
210 }
211
212 NEXT_ARG();
213 } else if (is_prefix(*argv, "index")) {
214 char *endptr;
215
216 NEXT_ARG();
217 index = strtoul(*argv, &endptr, 0);
218 if (*endptr) {
219 p_err("can't parse %s as index", **argv);
220 goto err_close_map;
221 }
222
223 NEXT_ARG();
224 } else {
225 BAD_ARG();
785e76d7 226 goto err_close_map;
f412eed9
JK
227 }
228
229 do_all = false;
230 }
231
232 if (!do_all) {
233 if (index == -1 || cpu == -1) {
234 p_err("cpu and index must be specified together");
235 goto err_close_map;
236 }
237
238 nfds = 1;
239 } else {
240 nfds = min(get_possible_cpus(), map_info.max_entries);
241 cpu = 0;
242 index = 0;
243 }
244
245 rings = calloc(nfds, sizeof(rings[0]));
246 if (!rings)
247 goto err_close_map;
248
249 pfds = calloc(nfds, sizeof(pfds[0]));
250 if (!pfds)
251 goto err_free_rings;
252
253 for (i = 0; i < nfds; i++) {
254 rings[i].cpu = cpu + i;
255 rings[i].key = index + i;
256
257 rings[i].fd = bpf_perf_event_open(map_fd, rings[i].key,
258 rings[i].cpu);
259 if (rings[i].fd < 0)
260 goto err_close_fds_prev;
261
262 rings[i].mem = perf_event_mmap(rings[i].fd);
263 if (!rings[i].mem)
264 goto err_close_fds_current;
265
266 pfds[i].fd = rings[i].fd;
267 pfds[i].events = POLLIN;
268 }
269
270 signal(SIGINT, int_exit);
271 signal(SIGHUP, int_exit);
272 signal(SIGTERM, int_exit);
273
274 if (json_output)
275 jsonw_start_array(json_wtr);
276
277 while (!stop) {
278 poll(pfds, nfds, 200);
279 for (i = 0; i < nfds; i++)
280 perf_event_read(&rings[i], &tmp_buf, &tmp_buf_sz);
281 }
282 free(tmp_buf);
283
284 if (json_output)
285 jsonw_end_array(json_wtr);
286
287 for (i = 0; i < nfds; i++) {
288 perf_event_unmap(rings[i].mem);
289 close(rings[i].fd);
290 }
291 free(pfds);
292 free(rings);
293 close(map_fd);
294
295 return 0;
296
297err_close_fds_prev:
298 while (i--) {
299 perf_event_unmap(rings[i].mem);
300err_close_fds_current:
301 close(rings[i].fd);
302 }
303 free(pfds);
304err_free_rings:
305 free(rings);
306err_close_map:
307 close(map_fd);
308 return -1;
309}