Merge tag 'for-linux-6.12-ofs1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / kernel / trace / trace_event_perf.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
ac199db0 2/*
97d5a220 3 * trace event based perf event profiling/tracing
ac199db0 4 *
90eec103 5 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
c530665c 6 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
ac199db0
PZ
7 */
8
558e6547 9#include <linux/module.h>
430ad5a6 10#include <linux/kprobes.h>
da97e184 11#include <linux/security.h>
ac199db0 12#include "trace.h"
e12f03d7 13#include "trace_probe.h"
ac199db0 14
6016ee13 15static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
20ab4425 16
eb1e7961
FW
17/*
18 * Force it to be aligned to unsigned long to avoid misaligned accesses
f2cc020d 19 * surprises
eb1e7961
FW
20 */
21typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
22 perf_trace_t;
ce71b9df 23
20ab4425 24/* Count the events in use (per event id, not per instance) */
97d5a220 25static int total_ref_count;
20ab4425 26
2425bcb9 27static int perf_trace_event_perm(struct trace_event_call *tp_event,
61c32659
FW
28 struct perf_event *p_event)
29{
da97e184
JFG
30 int ret;
31
d5b5f391 32 if (tp_event->perf_perm) {
da97e184 33 ret = tp_event->perf_perm(tp_event, p_event);
d5b5f391
PZ
34 if (ret)
35 return ret;
36 }
37
f4be073d
JO
38 /*
39 * We checked and allowed to create parent,
40 * allow children without checking.
41 */
42 if (p_event->parent)
43 return 0;
44
45 /*
46 * It's ok to check current process (owner) permissions in here,
47 * because code below is called only via perf_event_open syscall.
48 */
49
ced39002 50 /* The ftrace function trace is allowed only for root. */
cfa77bc4 51 if (ftrace_event_is_function(tp_event)) {
da97e184
JFG
52 ret = perf_allow_tracepoint(&p_event->attr);
53 if (ret)
54 return ret;
cfa77bc4 55
0a74c5b3
JO
56 if (!is_sampling_event(p_event))
57 return 0;
58
cfa77bc4
JO
59 /*
60 * We don't allow user space callchains for function trace
61 * event, due to issues with page faults while tracing page
62 * fault handler and its overall trickiness nature.
63 */
64 if (!p_event->attr.exclude_callchain_user)
65 return -EINVAL;
63c45f4b
JO
66
67 /*
68 * Same reason to disable user stack dump as for user space
69 * callchains above.
70 */
71 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
72 return -EINVAL;
cfa77bc4 73 }
ced39002 74
61c32659
FW
75 /* No tracing, just counting, so no obvious leak */
76 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
77 return 0;
78
79 /* Some events are ok to be traced by non-root users... */
80 if (p_event->attach_state == PERF_ATTACH_TASK) {
81 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
82 return 0;
83 }
84
85 /*
86 * ...otherwise raw tracepoint data can be a severe data leak,
87 * only allow root to have these.
88 */
da97e184
JFG
89 ret = perf_allow_tracepoint(&p_event->attr);
90 if (ret)
91 return ret;
61c32659
FW
92
93 return 0;
94}
95
2425bcb9 96static int perf_trace_event_reg(struct trace_event_call *tp_event,
ceec0b6f 97 struct perf_event *p_event)
e5e25cf4 98{
6016ee13 99 struct hlist_head __percpu *list;
ceec0b6f 100 int ret = -ENOMEM;
1c024eca 101 int cpu;
20ab4425 102
1c024eca
PZ
103 p_event->tp_event = tp_event;
104 if (tp_event->perf_refcount++ > 0)
e5e25cf4
FW
105 return 0;
106
1c024eca
PZ
107 list = alloc_percpu(struct hlist_head);
108 if (!list)
109 goto fail;
110
111 for_each_possible_cpu(cpu)
112 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
20ab4425 113
1c024eca 114 tp_event->perf_events = list;
e5e25cf4 115
97d5a220 116 if (!total_ref_count) {
6016ee13 117 char __percpu *buf;
b7e2ecef 118 int i;
20ab4425 119
7ae07ea3 120 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
6016ee13 121 buf = (char __percpu *)alloc_percpu(perf_trace_t);
b7e2ecef 122 if (!buf)
1c024eca 123 goto fail;
20ab4425 124
1c024eca 125 perf_trace_buf[i] = buf;
b7e2ecef 126 }
20ab4425
FW
127 }
128
ceec0b6f 129 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
1c024eca
PZ
130 if (ret)
131 goto fail;
20ab4425 132
1c024eca
PZ
133 total_ref_count++;
134 return 0;
135
136fail:
97d5a220 137 if (!total_ref_count) {
b7e2ecef
PZ
138 int i;
139
7ae07ea3 140 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
b7e2ecef
PZ
141 free_percpu(perf_trace_buf[i]);
142 perf_trace_buf[i] = NULL;
143 }
fe8e5b5a 144 }
1c024eca
PZ
145
146 if (!--tp_event->perf_refcount) {
147 free_percpu(tp_event->perf_events);
148 tp_event->perf_events = NULL;
fe8e5b5a 149 }
20ab4425
FW
150
151 return ret;
e5e25cf4
FW
152}
153
ceec0b6f
JO
154static void perf_trace_event_unreg(struct perf_event *p_event)
155{
2425bcb9 156 struct trace_event_call *tp_event = p_event->tp_event;
ceec0b6f
JO
157 int i;
158
159 if (--tp_event->perf_refcount > 0)
7249921d 160 return;
ceec0b6f
JO
161
162 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
163
164 /*
165 * Ensure our callback won't be called anymore. The buffers
166 * will be freed after that.
167 */
168 tracepoint_synchronize_unregister();
169
170 free_percpu(tp_event->perf_events);
171 tp_event->perf_events = NULL;
172
173 if (!--total_ref_count) {
174 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
175 free_percpu(perf_trace_buf[i]);
176 perf_trace_buf[i] = NULL;
177 }
178 }
ceec0b6f
JO
179}
180
181static int perf_trace_event_open(struct perf_event *p_event)
182{
2425bcb9 183 struct trace_event_call *tp_event = p_event->tp_event;
ceec0b6f
JO
184 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
185}
186
187static void perf_trace_event_close(struct perf_event *p_event)
188{
2425bcb9 189 struct trace_event_call *tp_event = p_event->tp_event;
ceec0b6f
JO
190 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
191}
192
2425bcb9 193static int perf_trace_event_init(struct trace_event_call *tp_event,
ceec0b6f
JO
194 struct perf_event *p_event)
195{
196 int ret;
197
198 ret = perf_trace_event_perm(tp_event, p_event);
199 if (ret)
200 return ret;
201
202 ret = perf_trace_event_reg(tp_event, p_event);
203 if (ret)
204 return ret;
205
206 ret = perf_trace_event_open(p_event);
207 if (ret) {
208 perf_trace_event_unreg(p_event);
209 return ret;
210 }
211
212 return 0;
213}
214
1c024eca 215int perf_trace_init(struct perf_event *p_event)
ac199db0 216{
2425bcb9 217 struct trace_event_call *tp_event;
0022cedd 218 u64 event_id = p_event->attr.config;
20c8928a 219 int ret = -EINVAL;
ac199db0 220
20c8928a 221 mutex_lock(&event_mutex);
1c024eca 222 list_for_each_entry(tp_event, &ftrace_events, list) {
ff5f149b 223 if (tp_event->event.type == event_id &&
a1d0ce82 224 tp_event->class && tp_event->class->reg &&
1d18538e 225 trace_event_try_get_ref(tp_event)) {
1c024eca 226 ret = perf_trace_event_init(tp_event, p_event);
9cb627d5 227 if (ret)
1d18538e 228 trace_event_put_ref(tp_event);
20c8928a
LZ
229 break;
230 }
ac199db0 231 }
20c8928a 232 mutex_unlock(&event_mutex);
ac199db0 233
20c8928a 234 return ret;
ac199db0
PZ
235}
236
ceec0b6f
JO
237void perf_trace_destroy(struct perf_event *p_event)
238{
239 mutex_lock(&event_mutex);
240 perf_trace_event_close(p_event);
241 perf_trace_event_unreg(p_event);
7249921d 242 trace_event_put_ref(p_event->tp_event);
ceec0b6f
JO
243 mutex_unlock(&event_mutex);
244}
245
e12f03d7
SL
246#ifdef CONFIG_KPROBE_EVENTS
247int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
248{
249 int ret;
250 char *func = NULL;
251 struct trace_event_call *tp_event;
252
253 if (p_event->attr.kprobe_func) {
9430cd62
CW
254 func = strndup_user(u64_to_user_ptr(p_event->attr.kprobe_func),
255 KSYM_NAME_LEN);
256 if (IS_ERR(func)) {
257 ret = PTR_ERR(func);
258 return (ret == -EINVAL) ? -E2BIG : ret;
259 }
e12f03d7
SL
260
261 if (func[0] == '\0') {
262 kfree(func);
263 func = NULL;
264 }
265 }
266
267 tp_event = create_local_trace_kprobe(
268 func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
269 p_event->attr.probe_offset, is_retprobe);
270 if (IS_ERR(tp_event)) {
271 ret = PTR_ERR(tp_event);
272 goto out;
273 }
274
6b1340cc 275 mutex_lock(&event_mutex);
e12f03d7
SL
276 ret = perf_trace_event_init(tp_event, p_event);
277 if (ret)
278 destroy_local_trace_kprobe(tp_event);
6b1340cc 279 mutex_unlock(&event_mutex);
e12f03d7
SL
280out:
281 kfree(func);
282 return ret;
283}
284
285void perf_kprobe_destroy(struct perf_event *p_event)
286{
6b1340cc 287 mutex_lock(&event_mutex);
e12f03d7
SL
288 perf_trace_event_close(p_event);
289 perf_trace_event_unreg(p_event);
7249921d 290 trace_event_put_ref(p_event->tp_event);
6b1340cc 291 mutex_unlock(&event_mutex);
e12f03d7
SL
292
293 destroy_local_trace_kprobe(p_event->tp_event);
294}
295#endif /* CONFIG_KPROBE_EVENTS */
296
33ea4b24 297#ifdef CONFIG_UPROBE_EVENTS
a6ca88b2
SL
298int perf_uprobe_init(struct perf_event *p_event,
299 unsigned long ref_ctr_offset, bool is_retprobe)
33ea4b24
SL
300{
301 int ret;
302 char *path = NULL;
303 struct trace_event_call *tp_event;
304
305 if (!p_event->attr.uprobe_path)
306 return -EINVAL;
83540fbc
JH
307
308 path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
309 PATH_MAX);
310 if (IS_ERR(path)) {
311 ret = PTR_ERR(path);
312 return (ret == -EINVAL) ? -E2BIG : ret;
313 }
33ea4b24
SL
314 if (path[0] == '\0') {
315 ret = -EINVAL;
316 goto out;
317 }
318
a6ca88b2
SL
319 tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
320 ref_ctr_offset, is_retprobe);
33ea4b24
SL
321 if (IS_ERR(tp_event)) {
322 ret = PTR_ERR(tp_event);
323 goto out;
324 }
325
326 /*
327 * local trace_uprobe need to hold event_mutex to call
328 * uprobe_buffer_enable() and uprobe_buffer_disable().
329 * event_mutex is not required for local trace_kprobes.
330 */
331 mutex_lock(&event_mutex);
332 ret = perf_trace_event_init(tp_event, p_event);
333 if (ret)
334 destroy_local_trace_uprobe(tp_event);
335 mutex_unlock(&event_mutex);
336out:
337 kfree(path);
338 return ret;
339}
340
341void perf_uprobe_destroy(struct perf_event *p_event)
342{
343 mutex_lock(&event_mutex);
344 perf_trace_event_close(p_event);
345 perf_trace_event_unreg(p_event);
7249921d 346 trace_event_put_ref(p_event->tp_event);
33ea4b24
SL
347 mutex_unlock(&event_mutex);
348 destroy_local_trace_uprobe(p_event->tp_event);
349}
350#endif /* CONFIG_UPROBE_EVENTS */
351
a4eaf7f1 352int perf_trace_add(struct perf_event *p_event, int flags)
e5e25cf4 353{
2425bcb9 354 struct trace_event_call *tp_event = p_event->tp_event;
20ab4425 355
a4eaf7f1
PZ
356 if (!(flags & PERF_EF_START))
357 p_event->hw.state = PERF_HES_STOPPED;
358
466c81c4
PZ
359 /*
360 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
361 * and we need to take the default action of enqueueing our event on
362 * the right per-cpu hlist.
363 */
364 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
365 struct hlist_head __percpu *pcpu_list;
366 struct hlist_head *list;
367
368 pcpu_list = tp_event->perf_events;
369 if (WARN_ON_ONCE(!pcpu_list))
370 return -EINVAL;
371
372 list = this_cpu_ptr(pcpu_list);
373 hlist_add_head_rcu(&p_event->hlist_entry, list);
374 }
20ab4425 375
466c81c4 376 return 0;
1c024eca 377}
20ab4425 378
a4eaf7f1 379void perf_trace_del(struct perf_event *p_event, int flags)
1c024eca 380{
2425bcb9 381 struct trace_event_call *tp_event = p_event->tp_event;
466c81c4
PZ
382
383 /*
384 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
385 * and we need to take the default action of dequeueing our event from
386 * the right per-cpu hlist.
387 */
388 if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
389 hlist_del_rcu(&p_event->hlist_entry);
e5e25cf4
FW
390}
391
1e1dcd93 392void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
430ad5a6 393{
1c024eca 394 char *raw_data;
1e1dcd93 395 int rctx;
430ad5a6 396
eb1e7961
FW
397 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
398
cd92bf61 399 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
a90afe8d
RJ
400 "perf buffer not large enough, wanted %d, have %d",
401 size, PERF_MAX_TRACE_SIZE))
cd92bf61
ON
402 return NULL;
403
1e1dcd93
AS
404 *rctxp = rctx = perf_swevent_get_recursion_context();
405 if (rctx < 0)
1c024eca 406 return NULL;
430ad5a6 407
86038c5e 408 if (regs)
1e1dcd93
AS
409 *regs = this_cpu_ptr(&__perf_regs[rctx]);
410 raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
430ad5a6
XG
411
412 /* zero the dead bytes from align to not leak stack to user */
eb1e7961 413 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
1e1dcd93
AS
414 return raw_data;
415}
416EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
417NOKPROBE_SYMBOL(perf_trace_buf_alloc);
418
419void perf_trace_buf_update(void *record, u16 type)
420{
421 struct trace_entry *entry = record;
430ad5a6 422
36590c50 423 tracing_generic_entry_update(entry, type, tracing_gen_ctx());
430ad5a6 424}
1e1dcd93 425NOKPROBE_SYMBOL(perf_trace_buf_update);
ced39002
JO
426
427#ifdef CONFIG_FUNCTION_TRACER
428static void
2f5f6ad9 429perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
d19ad077 430 struct ftrace_ops *ops, struct ftrace_regs *fregs)
ced39002
JO
431{
432 struct ftrace_entry *entry;
466c81c4
PZ
433 struct perf_event *event;
434 struct hlist_head head;
ced39002
JO
435 struct pt_regs regs;
436 int rctx;
5d15a624 437 int bit;
ced39002 438
5d029b03
SRV
439 if (!rcu_is_watching())
440 return;
441
773c1670 442 bit = ftrace_test_recursion_trylock(ip, parent_ip);
5d15a624
SRV
443 if (bit < 0)
444 return;
445
d33cc657 446 if ((unsigned long)ops->private != smp_processor_id())
447 goto out;
448
466c81c4
PZ
449 event = container_of(ops, struct perf_event, ftrace_ops);
450
451 /*
452 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
453 * the perf code does is hlist_for_each_entry_rcu(), so we can
454 * get away with simply setting the @head.first pointer in order
455 * to create a singular list.
456 */
457 head.first = &event->hlist_entry;
458
ced39002
JO
459#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
460 sizeof(u64)) - sizeof(u32))
461
462 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
463
ec5e099d 464 memset(&regs, 0, sizeof(regs));
ced39002
JO
465 perf_fetch_caller_regs(&regs);
466
1e1dcd93 467 entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
ced39002 468 if (!entry)
5d15a624 469 goto out;
ced39002
JO
470
471 entry->ip = ip;
472 entry->parent_ip = parent_ip;
1e1dcd93 473 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
466c81c4 474 1, &regs, &head, NULL);
ced39002 475
5d15a624
SRV
476out:
477 ftrace_test_recursion_unlock(bit);
ced39002
JO
478#undef ENTRY_SIZE
479}
480
481static int perf_ftrace_function_register(struct perf_event *event)
482{
483 struct ftrace_ops *ops = &event->ftrace_ops;
484
466c81c4
PZ
485 ops->func = perf_ftrace_function_call;
486 ops->private = (void *)(unsigned long)nr_cpu_ids;
487
ced39002
JO
488 return register_ftrace_function(ops);
489}
490
491static int perf_ftrace_function_unregister(struct perf_event *event)
492{
493 struct ftrace_ops *ops = &event->ftrace_ops;
5500fa51
JO
494 int ret = unregister_ftrace_function(ops);
495 ftrace_free_filter(ops);
496 return ret;
ced39002
JO
497}
498
2425bcb9 499int perf_ftrace_event_register(struct trace_event_call *call,
ced39002
JO
500 enum trace_reg type, void *data)
501{
466c81c4
PZ
502 struct perf_event *event = data;
503
ced39002
JO
504 switch (type) {
505 case TRACE_REG_REGISTER:
506 case TRACE_REG_UNREGISTER:
507 break;
508 case TRACE_REG_PERF_REGISTER:
509 case TRACE_REG_PERF_UNREGISTER:
510 return 0;
511 case TRACE_REG_PERF_OPEN:
512 return perf_ftrace_function_register(data);
513 case TRACE_REG_PERF_CLOSE:
514 return perf_ftrace_function_unregister(data);
515 case TRACE_REG_PERF_ADD:
466c81c4
PZ
516 event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
517 return 1;
ced39002 518 case TRACE_REG_PERF_DEL:
466c81c4
PZ
519 event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
520 return 1;
ced39002
JO
521 }
522
523 return -EINVAL;
524}
525#endif /* CONFIG_FUNCTION_TRACER */