perf: Only update context time when active
[linux-2.6-block.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
90eec103 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
12351ef8 21#include <linux/tick.h>
0793a61d 22#include <linux/sysfs.h>
22a4f650 23#include <linux/dcache.h>
0793a61d 24#include <linux/percpu.h>
22a4f650 25#include <linux/ptrace.h>
c277443c 26#include <linux/reboot.h>
b9cacc7b 27#include <linux/vmstat.h>
abe43400 28#include <linux/device.h>
6e5fdeed 29#include <linux/export.h>
906010b2 30#include <linux/vmalloc.h>
b9cacc7b
PZ
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
0793a61d
TG
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
aa9c4c0f 36#include <linux/kernel_stat.h>
39bed6cb 37#include <linux/cgroup.h>
cdd6c482 38#include <linux/perf_event.h>
af658dca 39#include <linux/trace_events.h>
3c502e7a 40#include <linux/hw_breakpoint.h>
c5ebcedb 41#include <linux/mm_types.h>
c464c76e 42#include <linux/module.h>
f972eb63 43#include <linux/mman.h>
b3f20785 44#include <linux/compat.h>
2541517c
AS
45#include <linux/bpf.h>
46#include <linux/filter.h>
0793a61d 47
76369139
FW
48#include "internal.h"
49
4e193bd4
TB
50#include <asm/irq_regs.h>
51
272325c4
PZ
52typedef int (*remote_function_f)(void *);
53
fe4b04fa 54struct remote_function_call {
e7e7ee2e 55 struct task_struct *p;
272325c4 56 remote_function_f func;
e7e7ee2e
IM
57 void *info;
58 int ret;
fe4b04fa
PZ
59};
60
61static void remote_function(void *data)
62{
63 struct remote_function_call *tfc = data;
64 struct task_struct *p = tfc->p;
65
66 if (p) {
67 tfc->ret = -EAGAIN;
68 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
69 return;
70 }
71
72 tfc->ret = tfc->func(tfc->info);
73}
74
75/**
76 * task_function_call - call a function on the cpu on which a task runs
77 * @p: the task to evaluate
78 * @func: the function to be called
79 * @info: the function call argument
80 *
81 * Calls the function @func when the task is currently running. This might
82 * be on the current CPU, which just calls the function directly
83 *
84 * returns: @func return value, or
85 * -ESRCH - when the process isn't running
86 * -EAGAIN - when the process moved away
87 */
88static int
272325c4 89task_function_call(struct task_struct *p, remote_function_f func, void *info)
fe4b04fa
PZ
90{
91 struct remote_function_call data = {
e7e7ee2e
IM
92 .p = p,
93 .func = func,
94 .info = info,
95 .ret = -ESRCH, /* No such (running) process */
fe4b04fa
PZ
96 };
97
98 if (task_curr(p))
99 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
100
101 return data.ret;
102}
103
104/**
105 * cpu_function_call - call a function on the cpu
106 * @func: the function to be called
107 * @info: the function call argument
108 *
109 * Calls the function @func on the remote cpu.
110 *
111 * returns: @func return value or -ENXIO when the cpu is offline
112 */
272325c4 113static int cpu_function_call(int cpu, remote_function_f func, void *info)
fe4b04fa
PZ
114{
115 struct remote_function_call data = {
e7e7ee2e
IM
116 .p = NULL,
117 .func = func,
118 .info = info,
119 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
120 };
121
122 smp_call_function_single(cpu, remote_function, &data, 1);
123
124 return data.ret;
125}
126
fae3fde6
PZ
127static inline struct perf_cpu_context *
128__get_cpu_context(struct perf_event_context *ctx)
129{
130 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
131}
132
133static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
134 struct perf_event_context *ctx)
0017960f 135{
fae3fde6
PZ
136 raw_spin_lock(&cpuctx->ctx.lock);
137 if (ctx)
138 raw_spin_lock(&ctx->lock);
139}
140
141static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
142 struct perf_event_context *ctx)
143{
144 if (ctx)
145 raw_spin_unlock(&ctx->lock);
146 raw_spin_unlock(&cpuctx->ctx.lock);
147}
148
63b6da39
PZ
149#define TASK_TOMBSTONE ((void *)-1L)
150
151static bool is_kernel_event(struct perf_event *event)
152{
f47c02c0 153 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
63b6da39
PZ
154}
155
39a43640
PZ
156/*
157 * On task ctx scheduling...
158 *
159 * When !ctx->nr_events a task context will not be scheduled. This means
160 * we can disable the scheduler hooks (for performance) without leaving
161 * pending task ctx state.
162 *
163 * This however results in two special cases:
164 *
165 * - removing the last event from a task ctx; this is relatively straight
166 * forward and is done in __perf_remove_from_context.
167 *
168 * - adding the first event to a task ctx; this is tricky because we cannot
169 * rely on ctx->is_active and therefore cannot use event_function_call().
170 * See perf_install_in_context().
171 *
172 * This is because we need a ctx->lock serialized variable (ctx->is_active)
173 * to reliably determine if a particular task/context is scheduled in. The
174 * task_curr() use in task_function_call() is racy in that a remote context
175 * switch is not a single atomic operation.
176 *
177 * As is, the situation is 'safe' because we set rq->curr before we do the
178 * actual context switch. This means that task_curr() will fail early, but
179 * we'll continue spinning on ctx->is_active until we've passed
180 * perf_event_task_sched_out().
181 *
182 * Without this ctx->lock serialized variable we could have race where we find
183 * the task (and hence the context) would not be active while in fact they are.
184 *
185 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
186 */
187
fae3fde6
PZ
188typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
189 struct perf_event_context *, void *);
190
191struct event_function_struct {
192 struct perf_event *event;
193 event_f func;
194 void *data;
195};
196
197static int event_function(void *info)
198{
199 struct event_function_struct *efs = info;
200 struct perf_event *event = efs->event;
0017960f 201 struct perf_event_context *ctx = event->ctx;
fae3fde6
PZ
202 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
203 struct perf_event_context *task_ctx = cpuctx->task_ctx;
63b6da39 204 int ret = 0;
fae3fde6
PZ
205
206 WARN_ON_ONCE(!irqs_disabled());
207
63b6da39 208 perf_ctx_lock(cpuctx, task_ctx);
fae3fde6
PZ
209 /*
210 * Since we do the IPI call without holding ctx->lock things can have
211 * changed, double check we hit the task we set out to hit.
fae3fde6
PZ
212 */
213 if (ctx->task) {
63b6da39
PZ
214 if (ctx->task != current) {
215 ret = -EAGAIN;
216 goto unlock;
217 }
fae3fde6 218
fae3fde6
PZ
219 /*
220 * We only use event_function_call() on established contexts,
221 * and event_function() is only ever called when active (or
222 * rather, we'll have bailed in task_function_call() or the
223 * above ctx->task != current test), therefore we must have
224 * ctx->is_active here.
225 */
226 WARN_ON_ONCE(!ctx->is_active);
227 /*
228 * And since we have ctx->is_active, cpuctx->task_ctx must
229 * match.
230 */
63b6da39
PZ
231 WARN_ON_ONCE(task_ctx != ctx);
232 } else {
233 WARN_ON_ONCE(&cpuctx->ctx != ctx);
fae3fde6 234 }
63b6da39 235
fae3fde6 236 efs->func(event, cpuctx, ctx, efs->data);
63b6da39 237unlock:
fae3fde6
PZ
238 perf_ctx_unlock(cpuctx, task_ctx);
239
63b6da39 240 return ret;
fae3fde6
PZ
241}
242
243static void event_function_local(struct perf_event *event, event_f func, void *data)
244{
245 struct event_function_struct efs = {
246 .event = event,
247 .func = func,
248 .data = data,
249 };
250
251 int ret = event_function(&efs);
252 WARN_ON_ONCE(ret);
253}
254
255static void event_function_call(struct perf_event *event, event_f func, void *data)
0017960f
PZ
256{
257 struct perf_event_context *ctx = event->ctx;
63b6da39 258 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
fae3fde6
PZ
259 struct event_function_struct efs = {
260 .event = event,
261 .func = func,
262 .data = data,
263 };
0017960f 264
c97f4736
PZ
265 if (!event->parent) {
266 /*
267 * If this is a !child event, we must hold ctx::mutex to
268 * stabilize the the event->ctx relation. See
269 * perf_event_ctx_lock().
270 */
271 lockdep_assert_held(&ctx->mutex);
272 }
0017960f
PZ
273
274 if (!task) {
fae3fde6 275 cpu_function_call(event->cpu, event_function, &efs);
0017960f
PZ
276 return;
277 }
278
279again:
63b6da39
PZ
280 if (task == TASK_TOMBSTONE)
281 return;
282
fae3fde6 283 if (!task_function_call(task, event_function, &efs))
0017960f
PZ
284 return;
285
286 raw_spin_lock_irq(&ctx->lock);
63b6da39
PZ
287 /*
288 * Reload the task pointer, it might have been changed by
289 * a concurrent perf_event_context_sched_out().
290 */
291 task = ctx->task;
292 if (task != TASK_TOMBSTONE) {
293 if (ctx->is_active) {
294 raw_spin_unlock_irq(&ctx->lock);
295 goto again;
296 }
297 func(event, NULL, ctx, data);
0017960f 298 }
0017960f
PZ
299 raw_spin_unlock_irq(&ctx->lock);
300}
301
e5d1367f
SE
302#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
303 PERF_FLAG_FD_OUTPUT |\
a21b0b35
YD
304 PERF_FLAG_PID_CGROUP |\
305 PERF_FLAG_FD_CLOEXEC)
e5d1367f 306
bce38cd5
SE
307/*
308 * branch priv levels that need permission checks
309 */
310#define PERF_SAMPLE_BRANCH_PERM_PLM \
311 (PERF_SAMPLE_BRANCH_KERNEL |\
312 PERF_SAMPLE_BRANCH_HV)
313
0b3fcf17
SE
314enum event_type_t {
315 EVENT_FLEXIBLE = 0x1,
316 EVENT_PINNED = 0x2,
317 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
318};
319
e5d1367f
SE
320/*
321 * perf_sched_events : >0 events exist
322 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
323 */
c5905afb 324struct static_key_deferred perf_sched_events __read_mostly;
e5d1367f 325static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
ba532500 326static DEFINE_PER_CPU(int, perf_sched_cb_usages);
e5d1367f 327
cdd6c482
IM
328static atomic_t nr_mmap_events __read_mostly;
329static atomic_t nr_comm_events __read_mostly;
330static atomic_t nr_task_events __read_mostly;
948b26b6 331static atomic_t nr_freq_events __read_mostly;
45ac1403 332static atomic_t nr_switch_events __read_mostly;
9ee318a7 333
108b02cf
PZ
334static LIST_HEAD(pmus);
335static DEFINE_MUTEX(pmus_lock);
336static struct srcu_struct pmus_srcu;
337
0764771d 338/*
cdd6c482 339 * perf event paranoia level:
0fbdea19
IM
340 * -1 - not paranoid at all
341 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 342 * 1 - disallow cpu events for unpriv
0fbdea19 343 * 2 - disallow kernel profiling for unpriv
0764771d 344 */
cdd6c482 345int sysctl_perf_event_paranoid __read_mostly = 1;
0764771d 346
20443384
FW
347/* Minimum for 512 kiB + 1 user control page */
348int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
349
350/*
cdd6c482 351 * max perf event sample rate
df58ab24 352 */
14c63f17
DH
353#define DEFAULT_MAX_SAMPLE_RATE 100000
354#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
355#define DEFAULT_CPU_TIME_MAX_PERCENT 25
356
357int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
358
359static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
360static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
361
d9494cb4
PZ
362static int perf_sample_allowed_ns __read_mostly =
363 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
14c63f17 364
18ab2cd3 365static void update_perf_cpu_limits(void)
14c63f17
DH
366{
367 u64 tmp = perf_sample_period_ns;
368
369 tmp *= sysctl_perf_cpu_time_max_percent;
e5302920 370 do_div(tmp, 100);
d9494cb4 371 ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
14c63f17 372}
163ec435 373
9e630205
SE
374static int perf_rotate_context(struct perf_cpu_context *cpuctx);
375
163ec435
PZ
376int perf_proc_update_handler(struct ctl_table *table, int write,
377 void __user *buffer, size_t *lenp,
378 loff_t *ppos)
379{
723478c8 380 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
163ec435
PZ
381
382 if (ret || !write)
383 return ret;
384
385 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
14c63f17
DH
386 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
387 update_perf_cpu_limits();
388
389 return 0;
390}
391
392int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
393
394int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
395 void __user *buffer, size_t *lenp,
396 loff_t *ppos)
397{
398 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
399
400 if (ret || !write)
401 return ret;
402
403 update_perf_cpu_limits();
163ec435
PZ
404
405 return 0;
406}
1ccd1549 407
14c63f17
DH
408/*
409 * perf samples are done in some very critical code paths (NMIs).
410 * If they take too much CPU time, the system can lock up and not
411 * get any real work done. This will drop the sample rate when
412 * we detect that events are taking too long.
413 */
414#define NR_ACCUMULATED_SAMPLES 128
d9494cb4 415static DEFINE_PER_CPU(u64, running_sample_length);
14c63f17 416
6a02ad66 417static void perf_duration_warn(struct irq_work *w)
14c63f17 418{
6a02ad66 419 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
14c63f17 420 u64 avg_local_sample_len;
e5302920 421 u64 local_samples_len;
6a02ad66 422
4a32fea9 423 local_samples_len = __this_cpu_read(running_sample_length);
6a02ad66
PZ
424 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
425
426 printk_ratelimited(KERN_WARNING
427 "perf interrupt took too long (%lld > %lld), lowering "
428 "kernel.perf_event_max_sample_rate to %d\n",
cd578abb 429 avg_local_sample_len, allowed_ns >> 1,
6a02ad66
PZ
430 sysctl_perf_event_sample_rate);
431}
432
433static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
434
435void perf_sample_event_took(u64 sample_len_ns)
436{
d9494cb4 437 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
6a02ad66
PZ
438 u64 avg_local_sample_len;
439 u64 local_samples_len;
14c63f17 440
d9494cb4 441 if (allowed_ns == 0)
14c63f17
DH
442 return;
443
444 /* decay the counter by 1 average sample */
4a32fea9 445 local_samples_len = __this_cpu_read(running_sample_length);
14c63f17
DH
446 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
447 local_samples_len += sample_len_ns;
4a32fea9 448 __this_cpu_write(running_sample_length, local_samples_len);
14c63f17
DH
449
450 /*
451 * note: this will be biased artifically low until we have
452 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
453 * from having to maintain a count.
454 */
455 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
456
d9494cb4 457 if (avg_local_sample_len <= allowed_ns)
14c63f17
DH
458 return;
459
460 if (max_samples_per_tick <= 1)
461 return;
462
463 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
464 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
465 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
466
14c63f17 467 update_perf_cpu_limits();
6a02ad66 468
cd578abb
PZ
469 if (!irq_work_queue(&perf_duration_work)) {
470 early_printk("perf interrupt took too long (%lld > %lld), lowering "
471 "kernel.perf_event_max_sample_rate to %d\n",
472 avg_local_sample_len, allowed_ns >> 1,
473 sysctl_perf_event_sample_rate);
474 }
14c63f17
DH
475}
476
cdd6c482 477static atomic64_t perf_event_id;
a96bbc16 478
0b3fcf17
SE
479static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
480 enum event_type_t event_type);
481
482static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
483 enum event_type_t event_type,
484 struct task_struct *task);
485
486static void update_context_time(struct perf_event_context *ctx);
487static u64 perf_event_time(struct perf_event *event);
0b3fcf17 488
cdd6c482 489void __weak perf_event_print_debug(void) { }
0793a61d 490
84c79910 491extern __weak const char *perf_pmu_name(void)
0793a61d 492{
84c79910 493 return "pmu";
0793a61d
TG
494}
495
0b3fcf17
SE
496static inline u64 perf_clock(void)
497{
498 return local_clock();
499}
500
34f43927
PZ
501static inline u64 perf_event_clock(struct perf_event *event)
502{
503 return event->clock();
504}
505
e5d1367f
SE
506#ifdef CONFIG_CGROUP_PERF
507
e5d1367f
SE
508static inline bool
509perf_cgroup_match(struct perf_event *event)
510{
511 struct perf_event_context *ctx = event->ctx;
512 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
513
ef824fa1
TH
514 /* @event doesn't care about cgroup */
515 if (!event->cgrp)
516 return true;
517
518 /* wants specific cgroup scope but @cpuctx isn't associated with any */
519 if (!cpuctx->cgrp)
520 return false;
521
522 /*
523 * Cgroup scoping is recursive. An event enabled for a cgroup is
524 * also enabled for all its descendant cgroups. If @cpuctx's
525 * cgroup is a descendant of @event's (the test covers identity
526 * case), it's a match.
527 */
528 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
529 event->cgrp->css.cgroup);
e5d1367f
SE
530}
531
e5d1367f
SE
532static inline void perf_detach_cgroup(struct perf_event *event)
533{
4e2ba650 534 css_put(&event->cgrp->css);
e5d1367f
SE
535 event->cgrp = NULL;
536}
537
538static inline int is_cgroup_event(struct perf_event *event)
539{
540 return event->cgrp != NULL;
541}
542
543static inline u64 perf_cgroup_event_time(struct perf_event *event)
544{
545 struct perf_cgroup_info *t;
546
547 t = per_cpu_ptr(event->cgrp->info, event->cpu);
548 return t->time;
549}
550
551static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
552{
553 struct perf_cgroup_info *info;
554 u64 now;
555
556 now = perf_clock();
557
558 info = this_cpu_ptr(cgrp->info);
559
560 info->time += now - info->timestamp;
561 info->timestamp = now;
562}
563
564static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
565{
566 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
567 if (cgrp_out)
568 __update_cgrp_time(cgrp_out);
569}
570
571static inline void update_cgrp_time_from_event(struct perf_event *event)
572{
3f7cce3c
SE
573 struct perf_cgroup *cgrp;
574
e5d1367f 575 /*
3f7cce3c
SE
576 * ensure we access cgroup data only when needed and
577 * when we know the cgroup is pinned (css_get)
e5d1367f 578 */
3f7cce3c 579 if (!is_cgroup_event(event))
e5d1367f
SE
580 return;
581
614e4c4e 582 cgrp = perf_cgroup_from_task(current, event->ctx);
3f7cce3c
SE
583 /*
584 * Do not update time when cgroup is not active
585 */
586 if (cgrp == event->cgrp)
587 __update_cgrp_time(event->cgrp);
e5d1367f
SE
588}
589
590static inline void
3f7cce3c
SE
591perf_cgroup_set_timestamp(struct task_struct *task,
592 struct perf_event_context *ctx)
e5d1367f
SE
593{
594 struct perf_cgroup *cgrp;
595 struct perf_cgroup_info *info;
596
3f7cce3c
SE
597 /*
598 * ctx->lock held by caller
599 * ensure we do not access cgroup data
600 * unless we have the cgroup pinned (css_get)
601 */
602 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
603 return;
604
614e4c4e 605 cgrp = perf_cgroup_from_task(task, ctx);
e5d1367f 606 info = this_cpu_ptr(cgrp->info);
3f7cce3c 607 info->timestamp = ctx->timestamp;
e5d1367f
SE
608}
609
610#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
611#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
612
613/*
614 * reschedule events based on the cgroup constraint of task.
615 *
616 * mode SWOUT : schedule out everything
617 * mode SWIN : schedule in based on cgroup for next
618 */
18ab2cd3 619static void perf_cgroup_switch(struct task_struct *task, int mode)
e5d1367f
SE
620{
621 struct perf_cpu_context *cpuctx;
622 struct pmu *pmu;
623 unsigned long flags;
624
625 /*
626 * disable interrupts to avoid geting nr_cgroup
627 * changes via __perf_event_disable(). Also
628 * avoids preemption.
629 */
630 local_irq_save(flags);
631
632 /*
633 * we reschedule only in the presence of cgroup
634 * constrained events.
635 */
e5d1367f
SE
636
637 list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f 638 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95cf59ea
PZ
639 if (cpuctx->unique_pmu != pmu)
640 continue; /* ensure we process each cpuctx once */
e5d1367f 641
e5d1367f
SE
642 /*
643 * perf_cgroup_events says at least one
644 * context on this CPU has cgroup events.
645 *
646 * ctx->nr_cgroups reports the number of cgroup
647 * events for a context.
648 */
649 if (cpuctx->ctx.nr_cgroups > 0) {
facc4307
PZ
650 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
651 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f
SE
652
653 if (mode & PERF_CGROUP_SWOUT) {
654 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
655 /*
656 * must not be done before ctxswout due
657 * to event_filter_match() in event_sched_out()
658 */
659 cpuctx->cgrp = NULL;
660 }
661
662 if (mode & PERF_CGROUP_SWIN) {
e566b76e 663 WARN_ON_ONCE(cpuctx->cgrp);
95cf59ea
PZ
664 /*
665 * set cgrp before ctxsw in to allow
666 * event_filter_match() to not have to pass
667 * task around
614e4c4e
SE
668 * we pass the cpuctx->ctx to perf_cgroup_from_task()
669 * because cgorup events are only per-cpu
e5d1367f 670 */
614e4c4e 671 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
e5d1367f
SE
672 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
673 }
facc4307
PZ
674 perf_pmu_enable(cpuctx->ctx.pmu);
675 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f 676 }
e5d1367f
SE
677 }
678
e5d1367f
SE
679 local_irq_restore(flags);
680}
681
a8d757ef
SE
682static inline void perf_cgroup_sched_out(struct task_struct *task,
683 struct task_struct *next)
e5d1367f 684{
a8d757ef
SE
685 struct perf_cgroup *cgrp1;
686 struct perf_cgroup *cgrp2 = NULL;
687
ddaaf4e2 688 rcu_read_lock();
a8d757ef
SE
689 /*
690 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
691 * we do not need to pass the ctx here because we know
692 * we are holding the rcu lock
a8d757ef 693 */
614e4c4e 694 cgrp1 = perf_cgroup_from_task(task, NULL);
70a01657 695 cgrp2 = perf_cgroup_from_task(next, NULL);
a8d757ef
SE
696
697 /*
698 * only schedule out current cgroup events if we know
699 * that we are switching to a different cgroup. Otherwise,
700 * do no touch the cgroup events.
701 */
702 if (cgrp1 != cgrp2)
703 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
ddaaf4e2
SE
704
705 rcu_read_unlock();
e5d1367f
SE
706}
707
a8d757ef
SE
708static inline void perf_cgroup_sched_in(struct task_struct *prev,
709 struct task_struct *task)
e5d1367f 710{
a8d757ef
SE
711 struct perf_cgroup *cgrp1;
712 struct perf_cgroup *cgrp2 = NULL;
713
ddaaf4e2 714 rcu_read_lock();
a8d757ef
SE
715 /*
716 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
717 * we do not need to pass the ctx here because we know
718 * we are holding the rcu lock
a8d757ef 719 */
614e4c4e 720 cgrp1 = perf_cgroup_from_task(task, NULL);
614e4c4e 721 cgrp2 = perf_cgroup_from_task(prev, NULL);
a8d757ef
SE
722
723 /*
724 * only need to schedule in cgroup events if we are changing
725 * cgroup during ctxsw. Cgroup events were not scheduled
726 * out of ctxsw out if that was not the case.
727 */
728 if (cgrp1 != cgrp2)
729 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
ddaaf4e2
SE
730
731 rcu_read_unlock();
e5d1367f
SE
732}
733
734static inline int perf_cgroup_connect(int fd, struct perf_event *event,
735 struct perf_event_attr *attr,
736 struct perf_event *group_leader)
737{
738 struct perf_cgroup *cgrp;
739 struct cgroup_subsys_state *css;
2903ff01
AV
740 struct fd f = fdget(fd);
741 int ret = 0;
e5d1367f 742
2903ff01 743 if (!f.file)
e5d1367f
SE
744 return -EBADF;
745
b583043e 746 css = css_tryget_online_from_dir(f.file->f_path.dentry,
ec903c0c 747 &perf_event_cgrp_subsys);
3db272c0
LZ
748 if (IS_ERR(css)) {
749 ret = PTR_ERR(css);
750 goto out;
751 }
e5d1367f
SE
752
753 cgrp = container_of(css, struct perf_cgroup, css);
754 event->cgrp = cgrp;
755
756 /*
757 * all events in a group must monitor
758 * the same cgroup because a task belongs
759 * to only one perf cgroup at a time
760 */
761 if (group_leader && group_leader->cgrp != cgrp) {
762 perf_detach_cgroup(event);
763 ret = -EINVAL;
e5d1367f 764 }
3db272c0 765out:
2903ff01 766 fdput(f);
e5d1367f
SE
767 return ret;
768}
769
770static inline void
771perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
772{
773 struct perf_cgroup_info *t;
774 t = per_cpu_ptr(event->cgrp->info, event->cpu);
775 event->shadow_ctx_time = now - t->timestamp;
776}
777
778static inline void
779perf_cgroup_defer_enabled(struct perf_event *event)
780{
781 /*
782 * when the current task's perf cgroup does not match
783 * the event's, we need to remember to call the
784 * perf_mark_enable() function the first time a task with
785 * a matching perf cgroup is scheduled in.
786 */
787 if (is_cgroup_event(event) && !perf_cgroup_match(event))
788 event->cgrp_defer_enabled = 1;
789}
790
791static inline void
792perf_cgroup_mark_enabled(struct perf_event *event,
793 struct perf_event_context *ctx)
794{
795 struct perf_event *sub;
796 u64 tstamp = perf_event_time(event);
797
798 if (!event->cgrp_defer_enabled)
799 return;
800
801 event->cgrp_defer_enabled = 0;
802
803 event->tstamp_enabled = tstamp - event->total_time_enabled;
804 list_for_each_entry(sub, &event->sibling_list, group_entry) {
805 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
806 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
807 sub->cgrp_defer_enabled = 0;
808 }
809 }
810}
811#else /* !CONFIG_CGROUP_PERF */
812
813static inline bool
814perf_cgroup_match(struct perf_event *event)
815{
816 return true;
817}
818
819static inline void perf_detach_cgroup(struct perf_event *event)
820{}
821
822static inline int is_cgroup_event(struct perf_event *event)
823{
824 return 0;
825}
826
827static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
828{
829 return 0;
830}
831
832static inline void update_cgrp_time_from_event(struct perf_event *event)
833{
834}
835
836static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
837{
838}
839
a8d757ef
SE
840static inline void perf_cgroup_sched_out(struct task_struct *task,
841 struct task_struct *next)
e5d1367f
SE
842{
843}
844
a8d757ef
SE
845static inline void perf_cgroup_sched_in(struct task_struct *prev,
846 struct task_struct *task)
e5d1367f
SE
847{
848}
849
850static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
851 struct perf_event_attr *attr,
852 struct perf_event *group_leader)
853{
854 return -EINVAL;
855}
856
857static inline void
3f7cce3c
SE
858perf_cgroup_set_timestamp(struct task_struct *task,
859 struct perf_event_context *ctx)
e5d1367f
SE
860{
861}
862
863void
864perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
865{
866}
867
868static inline void
869perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
870{
871}
872
873static inline u64 perf_cgroup_event_time(struct perf_event *event)
874{
875 return 0;
876}
877
878static inline void
879perf_cgroup_defer_enabled(struct perf_event *event)
880{
881}
882
883static inline void
884perf_cgroup_mark_enabled(struct perf_event *event,
885 struct perf_event_context *ctx)
886{
887}
888#endif
889
9e630205
SE
890/*
891 * set default to be dependent on timer tick just
892 * like original code
893 */
894#define PERF_CPU_HRTIMER (1000 / HZ)
895/*
896 * function must be called with interrupts disbled
897 */
272325c4 898static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
9e630205
SE
899{
900 struct perf_cpu_context *cpuctx;
9e630205
SE
901 int rotations = 0;
902
903 WARN_ON(!irqs_disabled());
904
905 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
9e630205
SE
906 rotations = perf_rotate_context(cpuctx);
907
4cfafd30
PZ
908 raw_spin_lock(&cpuctx->hrtimer_lock);
909 if (rotations)
9e630205 910 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
4cfafd30
PZ
911 else
912 cpuctx->hrtimer_active = 0;
913 raw_spin_unlock(&cpuctx->hrtimer_lock);
9e630205 914
4cfafd30 915 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
9e630205
SE
916}
917
272325c4 918static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
9e630205 919{
272325c4 920 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 921 struct pmu *pmu = cpuctx->ctx.pmu;
272325c4 922 u64 interval;
9e630205
SE
923
924 /* no multiplexing needed for SW PMU */
925 if (pmu->task_ctx_nr == perf_sw_context)
926 return;
927
62b85639
SE
928 /*
929 * check default is sane, if not set then force to
930 * default interval (1/tick)
931 */
272325c4
PZ
932 interval = pmu->hrtimer_interval_ms;
933 if (interval < 1)
934 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
62b85639 935
272325c4 936 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
9e630205 937
4cfafd30
PZ
938 raw_spin_lock_init(&cpuctx->hrtimer_lock);
939 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
272325c4 940 timer->function = perf_mux_hrtimer_handler;
9e630205
SE
941}
942
272325c4 943static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
9e630205 944{
272325c4 945 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 946 struct pmu *pmu = cpuctx->ctx.pmu;
4cfafd30 947 unsigned long flags;
9e630205
SE
948
949 /* not for SW PMU */
950 if (pmu->task_ctx_nr == perf_sw_context)
272325c4 951 return 0;
9e630205 952
4cfafd30
PZ
953 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
954 if (!cpuctx->hrtimer_active) {
955 cpuctx->hrtimer_active = 1;
956 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
957 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
958 }
959 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
9e630205 960
272325c4 961 return 0;
9e630205
SE
962}
963
33696fc0 964void perf_pmu_disable(struct pmu *pmu)
9e35ad38 965{
33696fc0
PZ
966 int *count = this_cpu_ptr(pmu->pmu_disable_count);
967 if (!(*count)++)
968 pmu->pmu_disable(pmu);
9e35ad38 969}
9e35ad38 970
33696fc0 971void perf_pmu_enable(struct pmu *pmu)
9e35ad38 972{
33696fc0
PZ
973 int *count = this_cpu_ptr(pmu->pmu_disable_count);
974 if (!--(*count))
975 pmu->pmu_enable(pmu);
9e35ad38 976}
9e35ad38 977
2fde4f94 978static DEFINE_PER_CPU(struct list_head, active_ctx_list);
e9d2b064
PZ
979
980/*
2fde4f94
MR
981 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
982 * perf_event_task_tick() are fully serialized because they're strictly cpu
983 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
984 * disabled, while perf_event_task_tick is called from IRQ context.
e9d2b064 985 */
2fde4f94 986static void perf_event_ctx_activate(struct perf_event_context *ctx)
9e35ad38 987{
2fde4f94 988 struct list_head *head = this_cpu_ptr(&active_ctx_list);
b5ab4cd5 989
e9d2b064 990 WARN_ON(!irqs_disabled());
b5ab4cd5 991
2fde4f94
MR
992 WARN_ON(!list_empty(&ctx->active_ctx_list));
993
994 list_add(&ctx->active_ctx_list, head);
995}
996
997static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
998{
999 WARN_ON(!irqs_disabled());
1000
1001 WARN_ON(list_empty(&ctx->active_ctx_list));
1002
1003 list_del_init(&ctx->active_ctx_list);
9e35ad38 1004}
9e35ad38 1005
cdd6c482 1006static void get_ctx(struct perf_event_context *ctx)
a63eaf34 1007{
e5289d4a 1008 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
1009}
1010
4af57ef2
YZ
1011static void free_ctx(struct rcu_head *head)
1012{
1013 struct perf_event_context *ctx;
1014
1015 ctx = container_of(head, struct perf_event_context, rcu_head);
1016 kfree(ctx->task_ctx_data);
1017 kfree(ctx);
1018}
1019
cdd6c482 1020static void put_ctx(struct perf_event_context *ctx)
a63eaf34 1021{
564c2b21
PM
1022 if (atomic_dec_and_test(&ctx->refcount)) {
1023 if (ctx->parent_ctx)
1024 put_ctx(ctx->parent_ctx);
63b6da39 1025 if (ctx->task && ctx->task != TASK_TOMBSTONE)
c93f7669 1026 put_task_struct(ctx->task);
4af57ef2 1027 call_rcu(&ctx->rcu_head, free_ctx);
564c2b21 1028 }
a63eaf34
PM
1029}
1030
f63a8daa
PZ
1031/*
1032 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1033 * perf_pmu_migrate_context() we need some magic.
1034 *
1035 * Those places that change perf_event::ctx will hold both
1036 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1037 *
8b10c5e2
PZ
1038 * Lock ordering is by mutex address. There are two other sites where
1039 * perf_event_context::mutex nests and those are:
1040 *
1041 * - perf_event_exit_task_context() [ child , 0 ]
8ba289b8
PZ
1042 * perf_event_exit_event()
1043 * put_event() [ parent, 1 ]
8b10c5e2
PZ
1044 *
1045 * - perf_event_init_context() [ parent, 0 ]
1046 * inherit_task_group()
1047 * inherit_group()
1048 * inherit_event()
1049 * perf_event_alloc()
1050 * perf_init_event()
1051 * perf_try_init_event() [ child , 1 ]
1052 *
1053 * While it appears there is an obvious deadlock here -- the parent and child
1054 * nesting levels are inverted between the two. This is in fact safe because
1055 * life-time rules separate them. That is an exiting task cannot fork, and a
1056 * spawning task cannot (yet) exit.
1057 *
1058 * But remember that that these are parent<->child context relations, and
1059 * migration does not affect children, therefore these two orderings should not
1060 * interact.
f63a8daa
PZ
1061 *
1062 * The change in perf_event::ctx does not affect children (as claimed above)
1063 * because the sys_perf_event_open() case will install a new event and break
1064 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1065 * concerned with cpuctx and that doesn't have children.
1066 *
1067 * The places that change perf_event::ctx will issue:
1068 *
1069 * perf_remove_from_context();
1070 * synchronize_rcu();
1071 * perf_install_in_context();
1072 *
1073 * to affect the change. The remove_from_context() + synchronize_rcu() should
1074 * quiesce the event, after which we can install it in the new location. This
1075 * means that only external vectors (perf_fops, prctl) can perturb the event
1076 * while in transit. Therefore all such accessors should also acquire
1077 * perf_event_context::mutex to serialize against this.
1078 *
1079 * However; because event->ctx can change while we're waiting to acquire
1080 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1081 * function.
1082 *
1083 * Lock order:
1084 * task_struct::perf_event_mutex
1085 * perf_event_context::mutex
f63a8daa 1086 * perf_event::child_mutex;
07c4a776 1087 * perf_event_context::lock
f63a8daa
PZ
1088 * perf_event::mmap_mutex
1089 * mmap_sem
1090 */
a83fe28e
PZ
1091static struct perf_event_context *
1092perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
f63a8daa
PZ
1093{
1094 struct perf_event_context *ctx;
1095
1096again:
1097 rcu_read_lock();
1098 ctx = ACCESS_ONCE(event->ctx);
1099 if (!atomic_inc_not_zero(&ctx->refcount)) {
1100 rcu_read_unlock();
1101 goto again;
1102 }
1103 rcu_read_unlock();
1104
a83fe28e 1105 mutex_lock_nested(&ctx->mutex, nesting);
f63a8daa
PZ
1106 if (event->ctx != ctx) {
1107 mutex_unlock(&ctx->mutex);
1108 put_ctx(ctx);
1109 goto again;
1110 }
1111
1112 return ctx;
1113}
1114
a83fe28e
PZ
1115static inline struct perf_event_context *
1116perf_event_ctx_lock(struct perf_event *event)
1117{
1118 return perf_event_ctx_lock_nested(event, 0);
1119}
1120
f63a8daa
PZ
1121static void perf_event_ctx_unlock(struct perf_event *event,
1122 struct perf_event_context *ctx)
1123{
1124 mutex_unlock(&ctx->mutex);
1125 put_ctx(ctx);
1126}
1127
211de6eb
PZ
1128/*
1129 * This must be done under the ctx->lock, such as to serialize against
1130 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1131 * calling scheduler related locks and ctx->lock nests inside those.
1132 */
1133static __must_check struct perf_event_context *
1134unclone_ctx(struct perf_event_context *ctx)
71a851b4 1135{
211de6eb
PZ
1136 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1137
1138 lockdep_assert_held(&ctx->lock);
1139
1140 if (parent_ctx)
71a851b4 1141 ctx->parent_ctx = NULL;
5a3126d4 1142 ctx->generation++;
211de6eb
PZ
1143
1144 return parent_ctx;
71a851b4
PZ
1145}
1146
6844c09d
ACM
1147static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1148{
1149 /*
1150 * only top level events have the pid namespace they were created in
1151 */
1152 if (event->parent)
1153 event = event->parent;
1154
1155 return task_tgid_nr_ns(p, event->ns);
1156}
1157
1158static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1159{
1160 /*
1161 * only top level events have the pid namespace they were created in
1162 */
1163 if (event->parent)
1164 event = event->parent;
1165
1166 return task_pid_nr_ns(p, event->ns);
1167}
1168
7f453c24 1169/*
cdd6c482 1170 * If we inherit events we want to return the parent event id
7f453c24
PZ
1171 * to userspace.
1172 */
cdd6c482 1173static u64 primary_event_id(struct perf_event *event)
7f453c24 1174{
cdd6c482 1175 u64 id = event->id;
7f453c24 1176
cdd6c482
IM
1177 if (event->parent)
1178 id = event->parent->id;
7f453c24
PZ
1179
1180 return id;
1181}
1182
25346b93 1183/*
cdd6c482 1184 * Get the perf_event_context for a task and lock it.
63b6da39 1185 *
25346b93
PM
1186 * This has to cope with with the fact that until it is locked,
1187 * the context could get moved to another task.
1188 */
cdd6c482 1189static struct perf_event_context *
8dc85d54 1190perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 1191{
cdd6c482 1192 struct perf_event_context *ctx;
25346b93 1193
9ed6060d 1194retry:
058ebd0e
PZ
1195 /*
1196 * One of the few rules of preemptible RCU is that one cannot do
1197 * rcu_read_unlock() while holding a scheduler (or nested) lock when
2fd59077 1198 * part of the read side critical section was irqs-enabled -- see
058ebd0e
PZ
1199 * rcu_read_unlock_special().
1200 *
1201 * Since ctx->lock nests under rq->lock we must ensure the entire read
2fd59077 1202 * side critical section has interrupts disabled.
058ebd0e 1203 */
2fd59077 1204 local_irq_save(*flags);
058ebd0e 1205 rcu_read_lock();
8dc85d54 1206 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
1207 if (ctx) {
1208 /*
1209 * If this context is a clone of another, it might
1210 * get swapped for another underneath us by
cdd6c482 1211 * perf_event_task_sched_out, though the
25346b93
PM
1212 * rcu_read_lock() protects us from any context
1213 * getting freed. Lock the context and check if it
1214 * got swapped before we could get the lock, and retry
1215 * if so. If we locked the right context, then it
1216 * can't get swapped on us any more.
1217 */
2fd59077 1218 raw_spin_lock(&ctx->lock);
8dc85d54 1219 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
2fd59077 1220 raw_spin_unlock(&ctx->lock);
058ebd0e 1221 rcu_read_unlock();
2fd59077 1222 local_irq_restore(*flags);
25346b93
PM
1223 goto retry;
1224 }
b49a9e7e 1225
63b6da39
PZ
1226 if (ctx->task == TASK_TOMBSTONE ||
1227 !atomic_inc_not_zero(&ctx->refcount)) {
2fd59077 1228 raw_spin_unlock(&ctx->lock);
b49a9e7e 1229 ctx = NULL;
828b6f0e
PZ
1230 } else {
1231 WARN_ON_ONCE(ctx->task != task);
b49a9e7e 1232 }
25346b93
PM
1233 }
1234 rcu_read_unlock();
2fd59077
PM
1235 if (!ctx)
1236 local_irq_restore(*flags);
25346b93
PM
1237 return ctx;
1238}
1239
1240/*
1241 * Get the context for a task and increment its pin_count so it
1242 * can't get swapped to another task. This also increments its
1243 * reference count so that the context can't get freed.
1244 */
8dc85d54
PZ
1245static struct perf_event_context *
1246perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 1247{
cdd6c482 1248 struct perf_event_context *ctx;
25346b93
PM
1249 unsigned long flags;
1250
8dc85d54 1251 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
1252 if (ctx) {
1253 ++ctx->pin_count;
e625cce1 1254 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1255 }
1256 return ctx;
1257}
1258
cdd6c482 1259static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
1260{
1261 unsigned long flags;
1262
e625cce1 1263 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 1264 --ctx->pin_count;
e625cce1 1265 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1266}
1267
f67218c3
PZ
1268/*
1269 * Update the record of the current time in a context.
1270 */
1271static void update_context_time(struct perf_event_context *ctx)
1272{
1273 u64 now = perf_clock();
1274
1275 ctx->time += now - ctx->timestamp;
1276 ctx->timestamp = now;
1277}
1278
4158755d
SE
1279static u64 perf_event_time(struct perf_event *event)
1280{
1281 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
1282
1283 if (is_cgroup_event(event))
1284 return perf_cgroup_event_time(event);
1285
4158755d
SE
1286 return ctx ? ctx->time : 0;
1287}
1288
f67218c3
PZ
1289/*
1290 * Update the total_time_enabled and total_time_running fields for a event.
b7526f0c 1291 * The caller of this function needs to hold the ctx->lock.
f67218c3
PZ
1292 */
1293static void update_event_times(struct perf_event *event)
1294{
1295 struct perf_event_context *ctx = event->ctx;
1296 u64 run_end;
1297
1298 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1299 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1300 return;
e5d1367f
SE
1301 /*
1302 * in cgroup mode, time_enabled represents
1303 * the time the event was enabled AND active
1304 * tasks were in the monitored cgroup. This is
1305 * independent of the activity of the context as
1306 * there may be a mix of cgroup and non-cgroup events.
1307 *
1308 * That is why we treat cgroup events differently
1309 * here.
1310 */
1311 if (is_cgroup_event(event))
46cd6a7f 1312 run_end = perf_cgroup_event_time(event);
e5d1367f
SE
1313 else if (ctx->is_active)
1314 run_end = ctx->time;
acd1d7c1
PZ
1315 else
1316 run_end = event->tstamp_stopped;
1317
1318 event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3
PZ
1319
1320 if (event->state == PERF_EVENT_STATE_INACTIVE)
1321 run_end = event->tstamp_stopped;
1322 else
4158755d 1323 run_end = perf_event_time(event);
f67218c3
PZ
1324
1325 event->total_time_running = run_end - event->tstamp_running;
e5d1367f 1326
f67218c3
PZ
1327}
1328
96c21a46
PZ
1329/*
1330 * Update total_time_enabled and total_time_running for all events in a group.
1331 */
1332static void update_group_times(struct perf_event *leader)
1333{
1334 struct perf_event *event;
1335
1336 update_event_times(leader);
1337 list_for_each_entry(event, &leader->sibling_list, group_entry)
1338 update_event_times(event);
1339}
1340
889ff015
FW
1341static struct list_head *
1342ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1343{
1344 if (event->attr.pinned)
1345 return &ctx->pinned_groups;
1346 else
1347 return &ctx->flexible_groups;
1348}
1349
fccc714b 1350/*
cdd6c482 1351 * Add a event from the lists for its context.
fccc714b
PZ
1352 * Must be called with ctx->mutex and ctx->lock held.
1353 */
04289bb9 1354static void
cdd6c482 1355list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1356{
c994d613
PZ
1357 lockdep_assert_held(&ctx->lock);
1358
8a49542c
PZ
1359 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1360 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9
IM
1361
1362 /*
8a49542c
PZ
1363 * If we're a stand alone event or group leader, we go to the context
1364 * list, group events are kept attached to the group so that
1365 * perf_group_detach can, at all times, locate all siblings.
04289bb9 1366 */
8a49542c 1367 if (event->group_leader == event) {
889ff015
FW
1368 struct list_head *list;
1369
d6f962b5
FW
1370 if (is_software_event(event))
1371 event->group_flags |= PERF_GROUP_SOFTWARE;
1372
889ff015
FW
1373 list = ctx_group_list(event, ctx);
1374 list_add_tail(&event->group_entry, list);
5c148194 1375 }
592903cd 1376
08309379 1377 if (is_cgroup_event(event))
e5d1367f 1378 ctx->nr_cgroups++;
e5d1367f 1379
cdd6c482
IM
1380 list_add_rcu(&event->event_entry, &ctx->event_list);
1381 ctx->nr_events++;
1382 if (event->attr.inherit_stat)
bfbd3381 1383 ctx->nr_stat++;
5a3126d4
PZ
1384
1385 ctx->generation++;
04289bb9
IM
1386}
1387
0231bb53
JO
1388/*
1389 * Initialize event state based on the perf_event_attr::disabled.
1390 */
1391static inline void perf_event__state_init(struct perf_event *event)
1392{
1393 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1394 PERF_EVENT_STATE_INACTIVE;
1395}
1396
a723968c 1397static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
c320c7b7
ACM
1398{
1399 int entry = sizeof(u64); /* value */
1400 int size = 0;
1401 int nr = 1;
1402
1403 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1404 size += sizeof(u64);
1405
1406 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1407 size += sizeof(u64);
1408
1409 if (event->attr.read_format & PERF_FORMAT_ID)
1410 entry += sizeof(u64);
1411
1412 if (event->attr.read_format & PERF_FORMAT_GROUP) {
a723968c 1413 nr += nr_siblings;
c320c7b7
ACM
1414 size += sizeof(u64);
1415 }
1416
1417 size += entry * nr;
1418 event->read_size = size;
1419}
1420
a723968c 1421static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
c320c7b7
ACM
1422{
1423 struct perf_sample_data *data;
c320c7b7
ACM
1424 u16 size = 0;
1425
c320c7b7
ACM
1426 if (sample_type & PERF_SAMPLE_IP)
1427 size += sizeof(data->ip);
1428
6844c09d
ACM
1429 if (sample_type & PERF_SAMPLE_ADDR)
1430 size += sizeof(data->addr);
1431
1432 if (sample_type & PERF_SAMPLE_PERIOD)
1433 size += sizeof(data->period);
1434
c3feedf2
AK
1435 if (sample_type & PERF_SAMPLE_WEIGHT)
1436 size += sizeof(data->weight);
1437
6844c09d
ACM
1438 if (sample_type & PERF_SAMPLE_READ)
1439 size += event->read_size;
1440
d6be9ad6
SE
1441 if (sample_type & PERF_SAMPLE_DATA_SRC)
1442 size += sizeof(data->data_src.val);
1443
fdfbbd07
AK
1444 if (sample_type & PERF_SAMPLE_TRANSACTION)
1445 size += sizeof(data->txn);
1446
6844c09d
ACM
1447 event->header_size = size;
1448}
1449
a723968c
PZ
1450/*
1451 * Called at perf_event creation and when events are attached/detached from a
1452 * group.
1453 */
1454static void perf_event__header_size(struct perf_event *event)
1455{
1456 __perf_event_read_size(event,
1457 event->group_leader->nr_siblings);
1458 __perf_event_header_size(event, event->attr.sample_type);
1459}
1460
6844c09d
ACM
1461static void perf_event__id_header_size(struct perf_event *event)
1462{
1463 struct perf_sample_data *data;
1464 u64 sample_type = event->attr.sample_type;
1465 u16 size = 0;
1466
c320c7b7
ACM
1467 if (sample_type & PERF_SAMPLE_TID)
1468 size += sizeof(data->tid_entry);
1469
1470 if (sample_type & PERF_SAMPLE_TIME)
1471 size += sizeof(data->time);
1472
ff3d527c
AH
1473 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1474 size += sizeof(data->id);
1475
c320c7b7
ACM
1476 if (sample_type & PERF_SAMPLE_ID)
1477 size += sizeof(data->id);
1478
1479 if (sample_type & PERF_SAMPLE_STREAM_ID)
1480 size += sizeof(data->stream_id);
1481
1482 if (sample_type & PERF_SAMPLE_CPU)
1483 size += sizeof(data->cpu_entry);
1484
6844c09d 1485 event->id_header_size = size;
c320c7b7
ACM
1486}
1487
a723968c
PZ
1488static bool perf_event_validate_size(struct perf_event *event)
1489{
1490 /*
1491 * The values computed here will be over-written when we actually
1492 * attach the event.
1493 */
1494 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1495 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1496 perf_event__id_header_size(event);
1497
1498 /*
1499 * Sum the lot; should not exceed the 64k limit we have on records.
1500 * Conservative limit to allow for callchains and other variable fields.
1501 */
1502 if (event->read_size + event->header_size +
1503 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1504 return false;
1505
1506 return true;
1507}
1508
8a49542c
PZ
1509static void perf_group_attach(struct perf_event *event)
1510{
c320c7b7 1511 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 1512
74c3337c
PZ
1513 /*
1514 * We can have double attach due to group movement in perf_event_open.
1515 */
1516 if (event->attach_state & PERF_ATTACH_GROUP)
1517 return;
1518
8a49542c
PZ
1519 event->attach_state |= PERF_ATTACH_GROUP;
1520
1521 if (group_leader == event)
1522 return;
1523
652884fe
PZ
1524 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1525
8a49542c
PZ
1526 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1527 !is_software_event(event))
1528 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1529
1530 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1531 group_leader->nr_siblings++;
c320c7b7
ACM
1532
1533 perf_event__header_size(group_leader);
1534
1535 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1536 perf_event__header_size(pos);
8a49542c
PZ
1537}
1538
a63eaf34 1539/*
cdd6c482 1540 * Remove a event from the lists for its context.
fccc714b 1541 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1542 */
04289bb9 1543static void
cdd6c482 1544list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1545{
68cacd29 1546 struct perf_cpu_context *cpuctx;
652884fe
PZ
1547
1548 WARN_ON_ONCE(event->ctx != ctx);
1549 lockdep_assert_held(&ctx->lock);
1550
8a49542c
PZ
1551 /*
1552 * We can have double detach due to exit/hot-unplug + close.
1553 */
1554 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1555 return;
8a49542c
PZ
1556
1557 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1558
68cacd29 1559 if (is_cgroup_event(event)) {
e5d1367f 1560 ctx->nr_cgroups--;
70a01657
PZ
1561 /*
1562 * Because cgroup events are always per-cpu events, this will
1563 * always be called from the right CPU.
1564 */
68cacd29
SE
1565 cpuctx = __get_cpu_context(ctx);
1566 /*
70a01657
PZ
1567 * If there are no more cgroup events then clear cgrp to avoid
1568 * stale pointer in update_cgrp_time_from_cpuctx().
68cacd29
SE
1569 */
1570 if (!ctx->nr_cgroups)
1571 cpuctx->cgrp = NULL;
1572 }
e5d1367f 1573
cdd6c482
IM
1574 ctx->nr_events--;
1575 if (event->attr.inherit_stat)
bfbd3381 1576 ctx->nr_stat--;
8bc20959 1577
cdd6c482 1578 list_del_rcu(&event->event_entry);
04289bb9 1579
8a49542c
PZ
1580 if (event->group_leader == event)
1581 list_del_init(&event->group_entry);
5c148194 1582
96c21a46 1583 update_group_times(event);
b2e74a26
SE
1584
1585 /*
1586 * If event was in error state, then keep it
1587 * that way, otherwise bogus counts will be
1588 * returned on read(). The only way to get out
1589 * of error state is by explicit re-enabling
1590 * of the event
1591 */
1592 if (event->state > PERF_EVENT_STATE_OFF)
1593 event->state = PERF_EVENT_STATE_OFF;
5a3126d4
PZ
1594
1595 ctx->generation++;
050735b0
PZ
1596}
1597
8a49542c 1598static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1599{
1600 struct perf_event *sibling, *tmp;
8a49542c
PZ
1601 struct list_head *list = NULL;
1602
1603 /*
1604 * We can have double detach due to exit/hot-unplug + close.
1605 */
1606 if (!(event->attach_state & PERF_ATTACH_GROUP))
1607 return;
1608
1609 event->attach_state &= ~PERF_ATTACH_GROUP;
1610
1611 /*
1612 * If this is a sibling, remove it from its group.
1613 */
1614 if (event->group_leader != event) {
1615 list_del_init(&event->group_entry);
1616 event->group_leader->nr_siblings--;
c320c7b7 1617 goto out;
8a49542c
PZ
1618 }
1619
1620 if (!list_empty(&event->group_entry))
1621 list = &event->group_entry;
2e2af50b 1622
04289bb9 1623 /*
cdd6c482
IM
1624 * If this was a group event with sibling events then
1625 * upgrade the siblings to singleton events by adding them
8a49542c 1626 * to whatever list we are on.
04289bb9 1627 */
cdd6c482 1628 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c
PZ
1629 if (list)
1630 list_move_tail(&sibling->group_entry, list);
04289bb9 1631 sibling->group_leader = sibling;
d6f962b5
FW
1632
1633 /* Inherit group flags from the previous leader */
1634 sibling->group_flags = event->group_flags;
652884fe
PZ
1635
1636 WARN_ON_ONCE(sibling->ctx != event->ctx);
04289bb9 1637 }
c320c7b7
ACM
1638
1639out:
1640 perf_event__header_size(event->group_leader);
1641
1642 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1643 perf_event__header_size(tmp);
04289bb9
IM
1644}
1645
fadfe7be
JO
1646static bool is_orphaned_event(struct perf_event *event)
1647{
c6e5b732 1648 return event->state == PERF_EVENT_STATE_EXIT;
fadfe7be
JO
1649}
1650
66eb579e
MR
1651static inline int pmu_filter_match(struct perf_event *event)
1652{
1653 struct pmu *pmu = event->pmu;
1654 return pmu->filter_match ? pmu->filter_match(event) : 1;
1655}
1656
fa66f07a
SE
1657static inline int
1658event_filter_match(struct perf_event *event)
1659{
e5d1367f 1660 return (event->cpu == -1 || event->cpu == smp_processor_id())
66eb579e 1661 && perf_cgroup_match(event) && pmu_filter_match(event);
fa66f07a
SE
1662}
1663
9ffcfa6f
SE
1664static void
1665event_sched_out(struct perf_event *event,
3b6f9e5c 1666 struct perf_cpu_context *cpuctx,
cdd6c482 1667 struct perf_event_context *ctx)
3b6f9e5c 1668{
4158755d 1669 u64 tstamp = perf_event_time(event);
fa66f07a 1670 u64 delta;
652884fe
PZ
1671
1672 WARN_ON_ONCE(event->ctx != ctx);
1673 lockdep_assert_held(&ctx->lock);
1674
fa66f07a
SE
1675 /*
1676 * An event which could not be activated because of
1677 * filter mismatch still needs to have its timings
1678 * maintained, otherwise bogus information is return
1679 * via read() for time_enabled, time_running:
1680 */
1681 if (event->state == PERF_EVENT_STATE_INACTIVE
1682 && !event_filter_match(event)) {
e5d1367f 1683 delta = tstamp - event->tstamp_stopped;
fa66f07a 1684 event->tstamp_running += delta;
4158755d 1685 event->tstamp_stopped = tstamp;
fa66f07a
SE
1686 }
1687
cdd6c482 1688 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1689 return;
3b6f9e5c 1690
44377277
AS
1691 perf_pmu_disable(event->pmu);
1692
cdd6c482
IM
1693 event->state = PERF_EVENT_STATE_INACTIVE;
1694 if (event->pending_disable) {
1695 event->pending_disable = 0;
1696 event->state = PERF_EVENT_STATE_OFF;
970892a9 1697 }
4158755d 1698 event->tstamp_stopped = tstamp;
a4eaf7f1 1699 event->pmu->del(event, 0);
cdd6c482 1700 event->oncpu = -1;
3b6f9e5c 1701
cdd6c482 1702 if (!is_software_event(event))
3b6f9e5c 1703 cpuctx->active_oncpu--;
2fde4f94
MR
1704 if (!--ctx->nr_active)
1705 perf_event_ctx_deactivate(ctx);
0f5a2601
PZ
1706 if (event->attr.freq && event->attr.sample_freq)
1707 ctx->nr_freq--;
cdd6c482 1708 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c 1709 cpuctx->exclusive = 0;
44377277
AS
1710
1711 perf_pmu_enable(event->pmu);
3b6f9e5c
PM
1712}
1713
d859e29f 1714static void
cdd6c482 1715group_sched_out(struct perf_event *group_event,
d859e29f 1716 struct perf_cpu_context *cpuctx,
cdd6c482 1717 struct perf_event_context *ctx)
d859e29f 1718{
cdd6c482 1719 struct perf_event *event;
fa66f07a 1720 int state = group_event->state;
d859e29f 1721
cdd6c482 1722 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
1723
1724 /*
1725 * Schedule out siblings (if any):
1726 */
cdd6c482
IM
1727 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1728 event_sched_out(event, cpuctx, ctx);
d859e29f 1729
fa66f07a 1730 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29f
PM
1731 cpuctx->exclusive = 0;
1732}
1733
45a0e07a 1734#define DETACH_GROUP 0x01UL
60beda84 1735#define DETACH_STATE 0x02UL
0017960f 1736
0793a61d 1737/*
cdd6c482 1738 * Cross CPU call to remove a performance event
0793a61d 1739 *
cdd6c482 1740 * We disable the event on the hardware level first. After that we
0793a61d
TG
1741 * remove it from the context list.
1742 */
fae3fde6
PZ
1743static void
1744__perf_remove_from_context(struct perf_event *event,
1745 struct perf_cpu_context *cpuctx,
1746 struct perf_event_context *ctx,
1747 void *info)
0793a61d 1748{
45a0e07a 1749 unsigned long flags = (unsigned long)info;
0793a61d 1750
cdd6c482 1751 event_sched_out(event, cpuctx, ctx);
45a0e07a 1752 if (flags & DETACH_GROUP)
46ce0fe9 1753 perf_group_detach(event);
cdd6c482 1754 list_del_event(event, ctx);
60beda84
PZ
1755 if (flags & DETACH_STATE)
1756 event->state = PERF_EVENT_STATE_EXIT;
39a43640
PZ
1757
1758 if (!ctx->nr_events && ctx->is_active) {
64ce3126 1759 ctx->is_active = 0;
39a43640
PZ
1760 if (ctx->task) {
1761 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
1762 cpuctx->task_ctx = NULL;
1763 }
64ce3126 1764 }
0793a61d
TG
1765}
1766
0793a61d 1767/*
cdd6c482 1768 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 1769 *
cdd6c482
IM
1770 * If event->ctx is a cloned context, callers must make sure that
1771 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
1772 * remains valid. This is OK when called from perf_release since
1773 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 1774 * When called from perf_event_exit_task, it's OK because the
c93f7669 1775 * context has been detached from its task.
0793a61d 1776 */
45a0e07a 1777static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
0793a61d 1778{
fae3fde6 1779 lockdep_assert_held(&event->ctx->mutex);
0793a61d 1780
45a0e07a 1781 event_function_call(event, __perf_remove_from_context, (void *)flags);
0793a61d
TG
1782}
1783
d859e29f 1784/*
cdd6c482 1785 * Cross CPU call to disable a performance event
d859e29f 1786 */
fae3fde6
PZ
1787static void __perf_event_disable(struct perf_event *event,
1788 struct perf_cpu_context *cpuctx,
1789 struct perf_event_context *ctx,
1790 void *info)
7b648018 1791{
fae3fde6
PZ
1792 if (event->state < PERF_EVENT_STATE_INACTIVE)
1793 return;
7b648018 1794
fae3fde6
PZ
1795 update_context_time(ctx);
1796 update_cgrp_time_from_event(event);
1797 update_group_times(event);
1798 if (event == event->group_leader)
1799 group_sched_out(event, cpuctx, ctx);
1800 else
1801 event_sched_out(event, cpuctx, ctx);
1802 event->state = PERF_EVENT_STATE_OFF;
7b648018
PZ
1803}
1804
d859e29f 1805/*
cdd6c482 1806 * Disable a event.
c93f7669 1807 *
cdd6c482
IM
1808 * If event->ctx is a cloned context, callers must make sure that
1809 * every task struct that event->ctx->task could possibly point to
c93f7669 1810 * remains valid. This condition is satisifed when called through
cdd6c482
IM
1811 * perf_event_for_each_child or perf_event_for_each because they
1812 * hold the top-level event's child_mutex, so any descendant that
8ba289b8
PZ
1813 * goes to exit will block in perf_event_exit_event().
1814 *
cdd6c482 1815 * When called from perf_pending_event it's OK because event->ctx
c93f7669 1816 * is the current context on this CPU and preemption is disabled,
cdd6c482 1817 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 1818 */
f63a8daa 1819static void _perf_event_disable(struct perf_event *event)
d859e29f 1820{
cdd6c482 1821 struct perf_event_context *ctx = event->ctx;
d859e29f 1822
e625cce1 1823 raw_spin_lock_irq(&ctx->lock);
7b648018 1824 if (event->state <= PERF_EVENT_STATE_OFF) {
e625cce1 1825 raw_spin_unlock_irq(&ctx->lock);
7b648018 1826 return;
53cfbf59 1827 }
e625cce1 1828 raw_spin_unlock_irq(&ctx->lock);
7b648018 1829
fae3fde6
PZ
1830 event_function_call(event, __perf_event_disable, NULL);
1831}
1832
1833void perf_event_disable_local(struct perf_event *event)
1834{
1835 event_function_local(event, __perf_event_disable, NULL);
d859e29f 1836}
f63a8daa
PZ
1837
1838/*
1839 * Strictly speaking kernel users cannot create groups and therefore this
1840 * interface does not need the perf_event_ctx_lock() magic.
1841 */
1842void perf_event_disable(struct perf_event *event)
1843{
1844 struct perf_event_context *ctx;
1845
1846 ctx = perf_event_ctx_lock(event);
1847 _perf_event_disable(event);
1848 perf_event_ctx_unlock(event, ctx);
1849}
dcfce4a0 1850EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 1851
e5d1367f
SE
1852static void perf_set_shadow_time(struct perf_event *event,
1853 struct perf_event_context *ctx,
1854 u64 tstamp)
1855{
1856 /*
1857 * use the correct time source for the time snapshot
1858 *
1859 * We could get by without this by leveraging the
1860 * fact that to get to this function, the caller
1861 * has most likely already called update_context_time()
1862 * and update_cgrp_time_xx() and thus both timestamp
1863 * are identical (or very close). Given that tstamp is,
1864 * already adjusted for cgroup, we could say that:
1865 * tstamp - ctx->timestamp
1866 * is equivalent to
1867 * tstamp - cgrp->timestamp.
1868 *
1869 * Then, in perf_output_read(), the calculation would
1870 * work with no changes because:
1871 * - event is guaranteed scheduled in
1872 * - no scheduled out in between
1873 * - thus the timestamp would be the same
1874 *
1875 * But this is a bit hairy.
1876 *
1877 * So instead, we have an explicit cgroup call to remain
1878 * within the time time source all along. We believe it
1879 * is cleaner and simpler to understand.
1880 */
1881 if (is_cgroup_event(event))
1882 perf_cgroup_set_shadow_time(event, tstamp);
1883 else
1884 event->shadow_ctx_time = tstamp - ctx->timestamp;
1885}
1886
4fe757dd
PZ
1887#define MAX_INTERRUPTS (~0ULL)
1888
1889static void perf_log_throttle(struct perf_event *event, int enable);
ec0d7729 1890static void perf_log_itrace_start(struct perf_event *event);
4fe757dd 1891
235c7fc7 1892static int
9ffcfa6f 1893event_sched_in(struct perf_event *event,
235c7fc7 1894 struct perf_cpu_context *cpuctx,
6e37738a 1895 struct perf_event_context *ctx)
235c7fc7 1896{
4158755d 1897 u64 tstamp = perf_event_time(event);
44377277 1898 int ret = 0;
4158755d 1899
63342411
PZ
1900 lockdep_assert_held(&ctx->lock);
1901
cdd6c482 1902 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
1903 return 0;
1904
cdd6c482 1905 event->state = PERF_EVENT_STATE_ACTIVE;
6e37738a 1906 event->oncpu = smp_processor_id();
4fe757dd
PZ
1907
1908 /*
1909 * Unthrottle events, since we scheduled we might have missed several
1910 * ticks already, also for a heavily scheduling task there is little
1911 * guarantee it'll get a tick in a timely manner.
1912 */
1913 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1914 perf_log_throttle(event, 1);
1915 event->hw.interrupts = 0;
1916 }
1917
235c7fc7
IM
1918 /*
1919 * The new state must be visible before we turn it on in the hardware:
1920 */
1921 smp_wmb();
1922
44377277
AS
1923 perf_pmu_disable(event->pmu);
1924
72f669c0
SL
1925 perf_set_shadow_time(event, ctx, tstamp);
1926
ec0d7729
AS
1927 perf_log_itrace_start(event);
1928
a4eaf7f1 1929 if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482
IM
1930 event->state = PERF_EVENT_STATE_INACTIVE;
1931 event->oncpu = -1;
44377277
AS
1932 ret = -EAGAIN;
1933 goto out;
235c7fc7
IM
1934 }
1935
00a2916f
PZ
1936 event->tstamp_running += tstamp - event->tstamp_stopped;
1937
cdd6c482 1938 if (!is_software_event(event))
3b6f9e5c 1939 cpuctx->active_oncpu++;
2fde4f94
MR
1940 if (!ctx->nr_active++)
1941 perf_event_ctx_activate(ctx);
0f5a2601
PZ
1942 if (event->attr.freq && event->attr.sample_freq)
1943 ctx->nr_freq++;
235c7fc7 1944
cdd6c482 1945 if (event->attr.exclusive)
3b6f9e5c
PM
1946 cpuctx->exclusive = 1;
1947
44377277
AS
1948out:
1949 perf_pmu_enable(event->pmu);
1950
1951 return ret;
235c7fc7
IM
1952}
1953
6751b71e 1954static int
cdd6c482 1955group_sched_in(struct perf_event *group_event,
6751b71e 1956 struct perf_cpu_context *cpuctx,
6e37738a 1957 struct perf_event_context *ctx)
6751b71e 1958{
6bde9b6c 1959 struct perf_event *event, *partial_group = NULL;
4a234593 1960 struct pmu *pmu = ctx->pmu;
d7842da4
SE
1961 u64 now = ctx->time;
1962 bool simulate = false;
6751b71e 1963
cdd6c482 1964 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
1965 return 0;
1966
fbbe0701 1967 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
6bde9b6c 1968
9ffcfa6f 1969 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 1970 pmu->cancel_txn(pmu);
272325c4 1971 perf_mux_hrtimer_restart(cpuctx);
6751b71e 1972 return -EAGAIN;
90151c35 1973 }
6751b71e
PM
1974
1975 /*
1976 * Schedule in siblings as one group (if any):
1977 */
cdd6c482 1978 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f 1979 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 1980 partial_group = event;
6751b71e
PM
1981 goto group_error;
1982 }
1983 }
1984
9ffcfa6f 1985 if (!pmu->commit_txn(pmu))
6e85158c 1986 return 0;
9ffcfa6f 1987
6751b71e
PM
1988group_error:
1989 /*
1990 * Groups can be scheduled in as one unit only, so undo any
1991 * partial group before returning:
d7842da4
SE
1992 * The events up to the failed event are scheduled out normally,
1993 * tstamp_stopped will be updated.
1994 *
1995 * The failed events and the remaining siblings need to have
1996 * their timings updated as if they had gone thru event_sched_in()
1997 * and event_sched_out(). This is required to get consistent timings
1998 * across the group. This also takes care of the case where the group
1999 * could never be scheduled by ensuring tstamp_stopped is set to mark
2000 * the time the event was actually stopped, such that time delta
2001 * calculation in update_event_times() is correct.
6751b71e 2002 */
cdd6c482
IM
2003 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2004 if (event == partial_group)
d7842da4
SE
2005 simulate = true;
2006
2007 if (simulate) {
2008 event->tstamp_running += now - event->tstamp_stopped;
2009 event->tstamp_stopped = now;
2010 } else {
2011 event_sched_out(event, cpuctx, ctx);
2012 }
6751b71e 2013 }
9ffcfa6f 2014 event_sched_out(group_event, cpuctx, ctx);
6751b71e 2015
ad5133b7 2016 pmu->cancel_txn(pmu);
90151c35 2017
272325c4 2018 perf_mux_hrtimer_restart(cpuctx);
9e630205 2019
6751b71e
PM
2020 return -EAGAIN;
2021}
2022
3b6f9e5c 2023/*
cdd6c482 2024 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 2025 */
cdd6c482 2026static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
2027 struct perf_cpu_context *cpuctx,
2028 int can_add_hw)
2029{
2030 /*
cdd6c482 2031 * Groups consisting entirely of software events can always go on.
3b6f9e5c 2032 */
d6f962b5 2033 if (event->group_flags & PERF_GROUP_SOFTWARE)
3b6f9e5c
PM
2034 return 1;
2035 /*
2036 * If an exclusive group is already on, no other hardware
cdd6c482 2037 * events can go on.
3b6f9e5c
PM
2038 */
2039 if (cpuctx->exclusive)
2040 return 0;
2041 /*
2042 * If this group is exclusive and there are already
cdd6c482 2043 * events on the CPU, it can't go on.
3b6f9e5c 2044 */
cdd6c482 2045 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
2046 return 0;
2047 /*
2048 * Otherwise, try to add it if all previous groups were able
2049 * to go on.
2050 */
2051 return can_add_hw;
2052}
2053
cdd6c482
IM
2054static void add_event_to_ctx(struct perf_event *event,
2055 struct perf_event_context *ctx)
53cfbf59 2056{
4158755d
SE
2057 u64 tstamp = perf_event_time(event);
2058
cdd6c482 2059 list_add_event(event, ctx);
8a49542c 2060 perf_group_attach(event);
4158755d
SE
2061 event->tstamp_enabled = tstamp;
2062 event->tstamp_running = tstamp;
2063 event->tstamp_stopped = tstamp;
53cfbf59
PM
2064}
2065
3e349507
PZ
2066static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2067 struct perf_event_context *ctx);
2c29ef0f
PZ
2068static void
2069ctx_sched_in(struct perf_event_context *ctx,
2070 struct perf_cpu_context *cpuctx,
2071 enum event_type_t event_type,
2072 struct task_struct *task);
fe4b04fa 2073
dce5855b
PZ
2074static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2075 struct perf_event_context *ctx,
2076 struct task_struct *task)
2077{
2078 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2079 if (ctx)
2080 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2081 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2082 if (ctx)
2083 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2084}
2085
3e349507
PZ
2086static void ctx_resched(struct perf_cpu_context *cpuctx,
2087 struct perf_event_context *task_ctx)
0017960f 2088{
3e349507
PZ
2089 perf_pmu_disable(cpuctx->ctx.pmu);
2090 if (task_ctx)
2091 task_ctx_sched_out(cpuctx, task_ctx);
2092 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
2093 perf_event_sched_in(cpuctx, task_ctx, current);
2094 perf_pmu_enable(cpuctx->ctx.pmu);
0017960f
PZ
2095}
2096
0793a61d 2097/*
cdd6c482 2098 * Cross CPU call to install and enable a performance event
682076ae
PZ
2099 *
2100 * Must be called with ctx->mutex held
0793a61d 2101 */
fe4b04fa 2102static int __perf_install_in_context(void *info)
0793a61d 2103{
39a43640 2104 struct perf_event_context *ctx = info;
108b02cf 2105 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f 2106 struct perf_event_context *task_ctx = cpuctx->task_ctx;
0793a61d 2107
63b6da39 2108 raw_spin_lock(&cpuctx->ctx.lock);
39a43640 2109 if (ctx->task) {
b58f6b0d 2110 raw_spin_lock(&ctx->lock);
39a43640
PZ
2111 /*
2112 * If we hit the 'wrong' task, we've since scheduled and
2113 * everything should be sorted, nothing to do!
2114 */
b58f6b0d 2115 task_ctx = ctx;
39a43640 2116 if (ctx->task != current)
63b6da39 2117 goto unlock;
b58f6b0d 2118
39a43640
PZ
2119 /*
2120 * If task_ctx is set, it had better be to us.
2121 */
2122 WARN_ON_ONCE(cpuctx->task_ctx != ctx && cpuctx->task_ctx);
63b6da39
PZ
2123 } else if (task_ctx) {
2124 raw_spin_lock(&task_ctx->lock);
2c29ef0f 2125 }
b58f6b0d 2126
39a43640 2127 ctx_resched(cpuctx, task_ctx);
63b6da39 2128unlock:
2c29ef0f 2129 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa
PZ
2130
2131 return 0;
0793a61d
TG
2132}
2133
2134/*
cdd6c482 2135 * Attach a performance event to a context
0793a61d
TG
2136 */
2137static void
cdd6c482
IM
2138perf_install_in_context(struct perf_event_context *ctx,
2139 struct perf_event *event,
0793a61d
TG
2140 int cpu)
2141{
39a43640
PZ
2142 struct task_struct *task = NULL;
2143
fe4b04fa
PZ
2144 lockdep_assert_held(&ctx->mutex);
2145
c3f00c70 2146 event->ctx = ctx;
0cda4c02
YZ
2147 if (event->cpu != -1)
2148 event->cpu = cpu;
c3f00c70 2149
39a43640
PZ
2150 /*
2151 * Installing events is tricky because we cannot rely on ctx->is_active
2152 * to be set in case this is the nr_events 0 -> 1 transition.
2153 *
2154 * So what we do is we add the event to the list here, which will allow
2155 * a future context switch to DTRT and then send a racy IPI. If the IPI
2156 * fails to hit the right task, this means a context switch must have
2157 * happened and that will have taken care of business.
2158 */
2159 raw_spin_lock_irq(&ctx->lock);
63b6da39 2160 task = ctx->task;
84c4e620 2161
63b6da39 2162 /*
84c4e620
PZ
2163 * If between ctx = find_get_context() and mutex_lock(&ctx->mutex) the
2164 * ctx gets destroyed, we must not install an event into it.
2165 *
2166 * This is normally tested for after we acquire the mutex, so this is
2167 * a sanity check.
63b6da39 2168 */
84c4e620 2169 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
63b6da39
PZ
2170 raw_spin_unlock_irq(&ctx->lock);
2171 return;
2172 }
6f932e5b
PZ
2173
2174 if (ctx->is_active) {
2175 update_context_time(ctx);
2176 update_cgrp_time_from_event(event);
2177 }
2178
39a43640 2179 add_event_to_ctx(event, ctx);
39a43640
PZ
2180 raw_spin_unlock_irq(&ctx->lock);
2181
2182 if (task)
2183 task_function_call(task, __perf_install_in_context, ctx);
2184 else
2185 cpu_function_call(cpu, __perf_install_in_context, ctx);
0793a61d
TG
2186}
2187
fa289bec 2188/*
cdd6c482 2189 * Put a event into inactive state and update time fields.
fa289bec
PM
2190 * Enabling the leader of a group effectively enables all
2191 * the group members that aren't explicitly disabled, so we
2192 * have to update their ->tstamp_enabled also.
2193 * Note: this works for group members as well as group leaders
2194 * since the non-leader members' sibling_lists will be empty.
2195 */
1d9b482e 2196static void __perf_event_mark_enabled(struct perf_event *event)
fa289bec 2197{
cdd6c482 2198 struct perf_event *sub;
4158755d 2199 u64 tstamp = perf_event_time(event);
fa289bec 2200
cdd6c482 2201 event->state = PERF_EVENT_STATE_INACTIVE;
4158755d 2202 event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d 2203 list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d
SE
2204 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2205 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d 2206 }
fa289bec
PM
2207}
2208
d859e29f 2209/*
cdd6c482 2210 * Cross CPU call to enable a performance event
d859e29f 2211 */
fae3fde6
PZ
2212static void __perf_event_enable(struct perf_event *event,
2213 struct perf_cpu_context *cpuctx,
2214 struct perf_event_context *ctx,
2215 void *info)
04289bb9 2216{
cdd6c482 2217 struct perf_event *leader = event->group_leader;
fae3fde6 2218 struct perf_event_context *task_ctx;
04289bb9 2219
6e801e01
PZ
2220 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2221 event->state <= PERF_EVENT_STATE_ERROR)
fae3fde6 2222 return;
3cbed429 2223
4af4998b 2224 update_context_time(ctx);
1d9b482e 2225 __perf_event_mark_enabled(event);
04289bb9 2226
fae3fde6
PZ
2227 if (!ctx->is_active)
2228 return;
2229
e5d1367f 2230 if (!event_filter_match(event)) {
fae3fde6
PZ
2231 if (is_cgroup_event(event)) {
2232 perf_cgroup_set_timestamp(current, ctx); // XXX ?
e5d1367f 2233 perf_cgroup_defer_enabled(event);
fae3fde6
PZ
2234 }
2235 return;
e5d1367f 2236 }
f4c4176f 2237
04289bb9 2238 /*
cdd6c482 2239 * If the event is in a group and isn't the group leader,
d859e29f 2240 * then don't put it on unless the group is on.
04289bb9 2241 */
cdd6c482 2242 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
fae3fde6 2243 return;
fe4b04fa 2244
fae3fde6
PZ
2245 task_ctx = cpuctx->task_ctx;
2246 if (ctx->task)
2247 WARN_ON_ONCE(task_ctx != ctx);
d859e29f 2248
fae3fde6 2249 ctx_resched(cpuctx, task_ctx);
7b648018
PZ
2250}
2251
d859e29f 2252/*
cdd6c482 2253 * Enable a event.
c93f7669 2254 *
cdd6c482
IM
2255 * If event->ctx is a cloned context, callers must make sure that
2256 * every task struct that event->ctx->task could possibly point to
c93f7669 2257 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2258 * perf_event_for_each_child or perf_event_for_each as described
2259 * for perf_event_disable.
d859e29f 2260 */
f63a8daa 2261static void _perf_event_enable(struct perf_event *event)
d859e29f 2262{
cdd6c482 2263 struct perf_event_context *ctx = event->ctx;
d859e29f 2264
7b648018 2265 raw_spin_lock_irq(&ctx->lock);
6e801e01
PZ
2266 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2267 event->state < PERF_EVENT_STATE_ERROR) {
7b648018 2268 raw_spin_unlock_irq(&ctx->lock);
d859e29f
PM
2269 return;
2270 }
2271
d859e29f 2272 /*
cdd6c482 2273 * If the event is in error state, clear that first.
7b648018
PZ
2274 *
2275 * That way, if we see the event in error state below, we know that it
2276 * has gone back into error state, as distinct from the task having
2277 * been scheduled away before the cross-call arrived.
d859e29f 2278 */
cdd6c482
IM
2279 if (event->state == PERF_EVENT_STATE_ERROR)
2280 event->state = PERF_EVENT_STATE_OFF;
e625cce1 2281 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa 2282
fae3fde6 2283 event_function_call(event, __perf_event_enable, NULL);
d859e29f 2284}
f63a8daa
PZ
2285
2286/*
2287 * See perf_event_disable();
2288 */
2289void perf_event_enable(struct perf_event *event)
2290{
2291 struct perf_event_context *ctx;
2292
2293 ctx = perf_event_ctx_lock(event);
2294 _perf_event_enable(event);
2295 perf_event_ctx_unlock(event, ctx);
2296}
dcfce4a0 2297EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 2298
f63a8daa 2299static int _perf_event_refresh(struct perf_event *event, int refresh)
79f14641 2300{
2023b359 2301 /*
cdd6c482 2302 * not supported on inherited events
2023b359 2303 */
2e939d1d 2304 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
2305 return -EINVAL;
2306
cdd6c482 2307 atomic_add(refresh, &event->event_limit);
f63a8daa 2308 _perf_event_enable(event);
2023b359
PZ
2309
2310 return 0;
79f14641 2311}
f63a8daa
PZ
2312
2313/*
2314 * See perf_event_disable()
2315 */
2316int perf_event_refresh(struct perf_event *event, int refresh)
2317{
2318 struct perf_event_context *ctx;
2319 int ret;
2320
2321 ctx = perf_event_ctx_lock(event);
2322 ret = _perf_event_refresh(event, refresh);
2323 perf_event_ctx_unlock(event, ctx);
2324
2325 return ret;
2326}
26ca5c11 2327EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 2328
5b0311e1
FW
2329static void ctx_sched_out(struct perf_event_context *ctx,
2330 struct perf_cpu_context *cpuctx,
2331 enum event_type_t event_type)
235c7fc7 2332{
db24d33e 2333 int is_active = ctx->is_active;
c994d613 2334 struct perf_event *event;
235c7fc7 2335
c994d613 2336 lockdep_assert_held(&ctx->lock);
235c7fc7 2337
39a43640
PZ
2338 if (likely(!ctx->nr_events)) {
2339 /*
2340 * See __perf_remove_from_context().
2341 */
2342 WARN_ON_ONCE(ctx->is_active);
2343 if (ctx->task)
2344 WARN_ON_ONCE(cpuctx->task_ctx);
facc4307 2345 return;
39a43640
PZ
2346 }
2347
db24d33e 2348 ctx->is_active &= ~event_type;
63e30d3e
PZ
2349 if (ctx->task) {
2350 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2351 if (!ctx->is_active)
2352 cpuctx->task_ctx = NULL;
2353 }
facc4307 2354
4af4998b 2355 update_context_time(ctx);
e5d1367f 2356 update_cgrp_time_from_cpuctx(cpuctx);
5b0311e1 2357 if (!ctx->nr_active)
facc4307 2358 return;
5b0311e1 2359
075e0b00 2360 perf_pmu_disable(ctx->pmu);
db24d33e 2361 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
889ff015
FW
2362 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2363 group_sched_out(event, cpuctx, ctx);
9ed6060d 2364 }
889ff015 2365
db24d33e 2366 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
889ff015 2367 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e1 2368 group_sched_out(event, cpuctx, ctx);
9ed6060d 2369 }
1b9a644f 2370 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
2371}
2372
564c2b21 2373/*
5a3126d4
PZ
2374 * Test whether two contexts are equivalent, i.e. whether they have both been
2375 * cloned from the same version of the same context.
2376 *
2377 * Equivalence is measured using a generation number in the context that is
2378 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2379 * and list_del_event().
564c2b21 2380 */
cdd6c482
IM
2381static int context_equiv(struct perf_event_context *ctx1,
2382 struct perf_event_context *ctx2)
564c2b21 2383{
211de6eb
PZ
2384 lockdep_assert_held(&ctx1->lock);
2385 lockdep_assert_held(&ctx2->lock);
2386
5a3126d4
PZ
2387 /* Pinning disables the swap optimization */
2388 if (ctx1->pin_count || ctx2->pin_count)
2389 return 0;
2390
2391 /* If ctx1 is the parent of ctx2 */
2392 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2393 return 1;
2394
2395 /* If ctx2 is the parent of ctx1 */
2396 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2397 return 1;
2398
2399 /*
2400 * If ctx1 and ctx2 have the same parent; we flatten the parent
2401 * hierarchy, see perf_event_init_context().
2402 */
2403 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2404 ctx1->parent_gen == ctx2->parent_gen)
2405 return 1;
2406
2407 /* Unmatched */
2408 return 0;
564c2b21
PM
2409}
2410
cdd6c482
IM
2411static void __perf_event_sync_stat(struct perf_event *event,
2412 struct perf_event *next_event)
bfbd3381
PZ
2413{
2414 u64 value;
2415
cdd6c482 2416 if (!event->attr.inherit_stat)
bfbd3381
PZ
2417 return;
2418
2419 /*
cdd6c482 2420 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
2421 * because we're in the middle of a context switch and have IRQs
2422 * disabled, which upsets smp_call_function_single(), however
cdd6c482 2423 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
2424 * don't need to use it.
2425 */
cdd6c482
IM
2426 switch (event->state) {
2427 case PERF_EVENT_STATE_ACTIVE:
3dbebf15
PZ
2428 event->pmu->read(event);
2429 /* fall-through */
bfbd3381 2430
cdd6c482
IM
2431 case PERF_EVENT_STATE_INACTIVE:
2432 update_event_times(event);
bfbd3381
PZ
2433 break;
2434
2435 default:
2436 break;
2437 }
2438
2439 /*
cdd6c482 2440 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
2441 * values when we flip the contexts.
2442 */
e7850595
PZ
2443 value = local64_read(&next_event->count);
2444 value = local64_xchg(&event->count, value);
2445 local64_set(&next_event->count, value);
bfbd3381 2446
cdd6c482
IM
2447 swap(event->total_time_enabled, next_event->total_time_enabled);
2448 swap(event->total_time_running, next_event->total_time_running);
19d2e755 2449
bfbd3381 2450 /*
19d2e755 2451 * Since we swizzled the values, update the user visible data too.
bfbd3381 2452 */
cdd6c482
IM
2453 perf_event_update_userpage(event);
2454 perf_event_update_userpage(next_event);
bfbd3381
PZ
2455}
2456
cdd6c482
IM
2457static void perf_event_sync_stat(struct perf_event_context *ctx,
2458 struct perf_event_context *next_ctx)
bfbd3381 2459{
cdd6c482 2460 struct perf_event *event, *next_event;
bfbd3381
PZ
2461
2462 if (!ctx->nr_stat)
2463 return;
2464
02ffdbc8
PZ
2465 update_context_time(ctx);
2466
cdd6c482
IM
2467 event = list_first_entry(&ctx->event_list,
2468 struct perf_event, event_entry);
bfbd3381 2469
cdd6c482
IM
2470 next_event = list_first_entry(&next_ctx->event_list,
2471 struct perf_event, event_entry);
bfbd3381 2472
cdd6c482
IM
2473 while (&event->event_entry != &ctx->event_list &&
2474 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 2475
cdd6c482 2476 __perf_event_sync_stat(event, next_event);
bfbd3381 2477
cdd6c482
IM
2478 event = list_next_entry(event, event_entry);
2479 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
2480 }
2481}
2482
fe4b04fa
PZ
2483static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2484 struct task_struct *next)
0793a61d 2485{
8dc85d54 2486 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482 2487 struct perf_event_context *next_ctx;
5a3126d4 2488 struct perf_event_context *parent, *next_parent;
108b02cf 2489 struct perf_cpu_context *cpuctx;
c93f7669 2490 int do_switch = 1;
0793a61d 2491
108b02cf
PZ
2492 if (likely(!ctx))
2493 return;
10989fb2 2494
108b02cf
PZ
2495 cpuctx = __get_cpu_context(ctx);
2496 if (!cpuctx->task_ctx)
0793a61d
TG
2497 return;
2498
c93f7669 2499 rcu_read_lock();
8dc85d54 2500 next_ctx = next->perf_event_ctxp[ctxn];
5a3126d4
PZ
2501 if (!next_ctx)
2502 goto unlock;
2503
2504 parent = rcu_dereference(ctx->parent_ctx);
2505 next_parent = rcu_dereference(next_ctx->parent_ctx);
2506
2507 /* If neither context have a parent context; they cannot be clones. */
802c8a61 2508 if (!parent && !next_parent)
5a3126d4
PZ
2509 goto unlock;
2510
2511 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
c93f7669
PM
2512 /*
2513 * Looks like the two contexts are clones, so we might be
2514 * able to optimize the context switch. We lock both
2515 * contexts and check that they are clones under the
2516 * lock (including re-checking that neither has been
2517 * uncloned in the meantime). It doesn't matter which
2518 * order we take the locks because no other cpu could
2519 * be trying to lock both of these tasks.
2520 */
e625cce1
TG
2521 raw_spin_lock(&ctx->lock);
2522 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 2523 if (context_equiv(ctx, next_ctx)) {
63b6da39
PZ
2524 WRITE_ONCE(ctx->task, next);
2525 WRITE_ONCE(next_ctx->task, task);
5a158c3c
YZ
2526
2527 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2528
63b6da39
PZ
2529 /*
2530 * RCU_INIT_POINTER here is safe because we've not
2531 * modified the ctx and the above modification of
2532 * ctx->task and ctx->task_ctx_data are immaterial
2533 * since those values are always verified under
2534 * ctx->lock which we're now holding.
2535 */
2536 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
2537 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
2538
c93f7669 2539 do_switch = 0;
bfbd3381 2540
cdd6c482 2541 perf_event_sync_stat(ctx, next_ctx);
c93f7669 2542 }
e625cce1
TG
2543 raw_spin_unlock(&next_ctx->lock);
2544 raw_spin_unlock(&ctx->lock);
564c2b21 2545 }
5a3126d4 2546unlock:
c93f7669 2547 rcu_read_unlock();
564c2b21 2548
c93f7669 2549 if (do_switch) {
facc4307 2550 raw_spin_lock(&ctx->lock);
8833d0e2 2551 task_ctx_sched_out(cpuctx, ctx);
facc4307 2552 raw_spin_unlock(&ctx->lock);
c93f7669 2553 }
0793a61d
TG
2554}
2555
ba532500
YZ
2556void perf_sched_cb_dec(struct pmu *pmu)
2557{
2558 this_cpu_dec(perf_sched_cb_usages);
2559}
2560
2561void perf_sched_cb_inc(struct pmu *pmu)
2562{
2563 this_cpu_inc(perf_sched_cb_usages);
2564}
2565
2566/*
2567 * This function provides the context switch callback to the lower code
2568 * layer. It is invoked ONLY when the context switch callback is enabled.
2569 */
2570static void perf_pmu_sched_task(struct task_struct *prev,
2571 struct task_struct *next,
2572 bool sched_in)
2573{
2574 struct perf_cpu_context *cpuctx;
2575 struct pmu *pmu;
2576 unsigned long flags;
2577
2578 if (prev == next)
2579 return;
2580
2581 local_irq_save(flags);
2582
2583 rcu_read_lock();
2584
2585 list_for_each_entry_rcu(pmu, &pmus, entry) {
2586 if (pmu->sched_task) {
2587 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2588
2589 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2590
2591 perf_pmu_disable(pmu);
2592
2593 pmu->sched_task(cpuctx->task_ctx, sched_in);
2594
2595 perf_pmu_enable(pmu);
2596
2597 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2598 }
2599 }
2600
2601 rcu_read_unlock();
2602
2603 local_irq_restore(flags);
2604}
2605
45ac1403
AH
2606static void perf_event_switch(struct task_struct *task,
2607 struct task_struct *next_prev, bool sched_in);
2608
8dc85d54
PZ
2609#define for_each_task_context_nr(ctxn) \
2610 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2611
2612/*
2613 * Called from scheduler to remove the events of the current task,
2614 * with interrupts disabled.
2615 *
2616 * We stop each event and update the event value in event->count.
2617 *
2618 * This does not protect us against NMI, but disable()
2619 * sets the disabled bit in the control field of event _before_
2620 * accessing the event control register. If a NMI hits, then it will
2621 * not restart the event.
2622 */
ab0cce56
JO
2623void __perf_event_task_sched_out(struct task_struct *task,
2624 struct task_struct *next)
8dc85d54
PZ
2625{
2626 int ctxn;
2627
ba532500
YZ
2628 if (__this_cpu_read(perf_sched_cb_usages))
2629 perf_pmu_sched_task(task, next, false);
2630
45ac1403
AH
2631 if (atomic_read(&nr_switch_events))
2632 perf_event_switch(task, next, false);
2633
8dc85d54
PZ
2634 for_each_task_context_nr(ctxn)
2635 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
2636
2637 /*
2638 * if cgroup events exist on this CPU, then we need
2639 * to check if we have to switch out PMU state.
2640 * cgroup event are system-wide mode only
2641 */
4a32fea9 2642 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
a8d757ef 2643 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
2644}
2645
3e349507
PZ
2646static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2647 struct perf_event_context *ctx)
a08b159f 2648{
a63eaf34
PM
2649 if (!cpuctx->task_ctx)
2650 return;
012b84da
IM
2651
2652 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2653 return;
2654
04dc2dbb 2655 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
a08b159f
PM
2656}
2657
5b0311e1
FW
2658/*
2659 * Called with IRQs disabled
2660 */
2661static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2662 enum event_type_t event_type)
2663{
2664 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
2665}
2666
235c7fc7 2667static void
5b0311e1 2668ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a 2669 struct perf_cpu_context *cpuctx)
0793a61d 2670{
cdd6c482 2671 struct perf_event *event;
0793a61d 2672
889ff015
FW
2673 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2674 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2675 continue;
5632ab12 2676 if (!event_filter_match(event))
3b6f9e5c
PM
2677 continue;
2678
e5d1367f
SE
2679 /* may need to reset tstamp_enabled */
2680 if (is_cgroup_event(event))
2681 perf_cgroup_mark_enabled(event, ctx);
2682
8c9ed8e1 2683 if (group_can_go_on(event, cpuctx, 1))
6e37738a 2684 group_sched_in(event, cpuctx, ctx);
3b6f9e5c
PM
2685
2686 /*
2687 * If this pinned group hasn't been scheduled,
2688 * put it in error state.
2689 */
cdd6c482
IM
2690 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2691 update_group_times(event);
2692 event->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2693 }
3b6f9e5c 2694 }
5b0311e1
FW
2695}
2696
2697static void
2698ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a 2699 struct perf_cpu_context *cpuctx)
5b0311e1
FW
2700{
2701 struct perf_event *event;
2702 int can_add_hw = 1;
3b6f9e5c 2703
889ff015
FW
2704 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2705 /* Ignore events in OFF or ERROR state */
2706 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2707 continue;
04289bb9
IM
2708 /*
2709 * Listen to the 'cpu' scheduling filter constraint
cdd6c482 2710 * of events:
04289bb9 2711 */
5632ab12 2712 if (!event_filter_match(event))
0793a61d
TG
2713 continue;
2714
e5d1367f
SE
2715 /* may need to reset tstamp_enabled */
2716 if (is_cgroup_event(event))
2717 perf_cgroup_mark_enabled(event, ctx);
2718
9ed6060d 2719 if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a 2720 if (group_sched_in(event, cpuctx, ctx))
dd0e6ba2 2721 can_add_hw = 0;
9ed6060d 2722 }
0793a61d 2723 }
5b0311e1
FW
2724}
2725
2726static void
2727ctx_sched_in(struct perf_event_context *ctx,
2728 struct perf_cpu_context *cpuctx,
e5d1367f
SE
2729 enum event_type_t event_type,
2730 struct task_struct *task)
5b0311e1 2731{
db24d33e 2732 int is_active = ctx->is_active;
c994d613
PZ
2733 u64 now;
2734
2735 lockdep_assert_held(&ctx->lock);
e5d1367f 2736
5b0311e1 2737 if (likely(!ctx->nr_events))
facc4307 2738 return;
5b0311e1 2739
db24d33e 2740 ctx->is_active |= event_type;
63e30d3e
PZ
2741 if (ctx->task) {
2742 if (!is_active)
2743 cpuctx->task_ctx = ctx;
2744 else
2745 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2746 }
2747
e5d1367f
SE
2748 now = perf_clock();
2749 ctx->timestamp = now;
3f7cce3c 2750 perf_cgroup_set_timestamp(task, ctx);
5b0311e1
FW
2751 /*
2752 * First go through the list and put on any pinned groups
2753 * in order to give them the best chance of going on.
2754 */
db24d33e 2755 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
6e37738a 2756 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
2757
2758 /* Then walk through the lower prio flexible groups */
db24d33e 2759 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
6e37738a 2760 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
2761}
2762
329c0e01 2763static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
2764 enum event_type_t event_type,
2765 struct task_struct *task)
329c0e01
FW
2766{
2767 struct perf_event_context *ctx = &cpuctx->ctx;
2768
e5d1367f 2769 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
2770}
2771
e5d1367f
SE
2772static void perf_event_context_sched_in(struct perf_event_context *ctx,
2773 struct task_struct *task)
235c7fc7 2774{
108b02cf 2775 struct perf_cpu_context *cpuctx;
235c7fc7 2776
108b02cf 2777 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
2778 if (cpuctx->task_ctx == ctx)
2779 return;
2780
facc4307 2781 perf_ctx_lock(cpuctx, ctx);
1b9a644f 2782 perf_pmu_disable(ctx->pmu);
329c0e01
FW
2783 /*
2784 * We want to keep the following priority order:
2785 * cpu pinned (that don't need to move), task pinned,
2786 * cpu flexible, task flexible.
2787 */
2788 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
63e30d3e 2789 perf_event_sched_in(cpuctx, ctx, task);
facc4307
PZ
2790 perf_pmu_enable(ctx->pmu);
2791 perf_ctx_unlock(cpuctx, ctx);
235c7fc7
IM
2792}
2793
8dc85d54
PZ
2794/*
2795 * Called from scheduler to add the events of the current task
2796 * with interrupts disabled.
2797 *
2798 * We restore the event value and then enable it.
2799 *
2800 * This does not protect us against NMI, but enable()
2801 * sets the enabled bit in the control field of event _before_
2802 * accessing the event control register. If a NMI hits, then it will
2803 * keep the event running.
2804 */
ab0cce56
JO
2805void __perf_event_task_sched_in(struct task_struct *prev,
2806 struct task_struct *task)
8dc85d54
PZ
2807{
2808 struct perf_event_context *ctx;
2809 int ctxn;
2810
7e41d177
PZ
2811 /*
2812 * If cgroup events exist on this CPU, then we need to check if we have
2813 * to switch in PMU state; cgroup event are system-wide mode only.
2814 *
2815 * Since cgroup events are CPU events, we must schedule these in before
2816 * we schedule in the task events.
2817 */
2818 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2819 perf_cgroup_sched_in(prev, task);
2820
8dc85d54
PZ
2821 for_each_task_context_nr(ctxn) {
2822 ctx = task->perf_event_ctxp[ctxn];
2823 if (likely(!ctx))
2824 continue;
2825
e5d1367f 2826 perf_event_context_sched_in(ctx, task);
8dc85d54 2827 }
d010b332 2828
45ac1403
AH
2829 if (atomic_read(&nr_switch_events))
2830 perf_event_switch(task, prev, true);
2831
ba532500
YZ
2832 if (__this_cpu_read(perf_sched_cb_usages))
2833 perf_pmu_sched_task(prev, task, true);
235c7fc7
IM
2834}
2835
abd50713
PZ
2836static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2837{
2838 u64 frequency = event->attr.sample_freq;
2839 u64 sec = NSEC_PER_SEC;
2840 u64 divisor, dividend;
2841
2842 int count_fls, nsec_fls, frequency_fls, sec_fls;
2843
2844 count_fls = fls64(count);
2845 nsec_fls = fls64(nsec);
2846 frequency_fls = fls64(frequency);
2847 sec_fls = 30;
2848
2849 /*
2850 * We got @count in @nsec, with a target of sample_freq HZ
2851 * the target period becomes:
2852 *
2853 * @count * 10^9
2854 * period = -------------------
2855 * @nsec * sample_freq
2856 *
2857 */
2858
2859 /*
2860 * Reduce accuracy by one bit such that @a and @b converge
2861 * to a similar magnitude.
2862 */
fe4b04fa 2863#define REDUCE_FLS(a, b) \
abd50713
PZ
2864do { \
2865 if (a##_fls > b##_fls) { \
2866 a >>= 1; \
2867 a##_fls--; \
2868 } else { \
2869 b >>= 1; \
2870 b##_fls--; \
2871 } \
2872} while (0)
2873
2874 /*
2875 * Reduce accuracy until either term fits in a u64, then proceed with
2876 * the other, so that finally we can do a u64/u64 division.
2877 */
2878 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2879 REDUCE_FLS(nsec, frequency);
2880 REDUCE_FLS(sec, count);
2881 }
2882
2883 if (count_fls + sec_fls > 64) {
2884 divisor = nsec * frequency;
2885
2886 while (count_fls + sec_fls > 64) {
2887 REDUCE_FLS(count, sec);
2888 divisor >>= 1;
2889 }
2890
2891 dividend = count * sec;
2892 } else {
2893 dividend = count * sec;
2894
2895 while (nsec_fls + frequency_fls > 64) {
2896 REDUCE_FLS(nsec, frequency);
2897 dividend >>= 1;
2898 }
2899
2900 divisor = nsec * frequency;
2901 }
2902
f6ab91ad
PZ
2903 if (!divisor)
2904 return dividend;
2905
abd50713
PZ
2906 return div64_u64(dividend, divisor);
2907}
2908
e050e3f0
SE
2909static DEFINE_PER_CPU(int, perf_throttled_count);
2910static DEFINE_PER_CPU(u64, perf_throttled_seq);
2911
f39d47ff 2912static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 2913{
cdd6c482 2914 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 2915 s64 period, sample_period;
bd2b5b12
PZ
2916 s64 delta;
2917
abd50713 2918 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
2919
2920 delta = (s64)(period - hwc->sample_period);
2921 delta = (delta + 7) / 8; /* low pass filter */
2922
2923 sample_period = hwc->sample_period + delta;
2924
2925 if (!sample_period)
2926 sample_period = 1;
2927
bd2b5b12 2928 hwc->sample_period = sample_period;
abd50713 2929
e7850595 2930 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
2931 if (disable)
2932 event->pmu->stop(event, PERF_EF_UPDATE);
2933
e7850595 2934 local64_set(&hwc->period_left, 0);
f39d47ff
SE
2935
2936 if (disable)
2937 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 2938 }
bd2b5b12
PZ
2939}
2940
e050e3f0
SE
2941/*
2942 * combine freq adjustment with unthrottling to avoid two passes over the
2943 * events. At the same time, make sure, having freq events does not change
2944 * the rate of unthrottling as that would introduce bias.
2945 */
2946static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2947 int needs_unthr)
60db5e09 2948{
cdd6c482
IM
2949 struct perf_event *event;
2950 struct hw_perf_event *hwc;
e050e3f0 2951 u64 now, period = TICK_NSEC;
abd50713 2952 s64 delta;
60db5e09 2953
e050e3f0
SE
2954 /*
2955 * only need to iterate over all events iff:
2956 * - context have events in frequency mode (needs freq adjust)
2957 * - there are events to unthrottle on this cpu
2958 */
2959 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
2960 return;
2961
e050e3f0 2962 raw_spin_lock(&ctx->lock);
f39d47ff 2963 perf_pmu_disable(ctx->pmu);
e050e3f0 2964
03541f8b 2965 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 2966 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
2967 continue;
2968
5632ab12 2969 if (!event_filter_match(event))
5d27c23d
PZ
2970 continue;
2971
44377277
AS
2972 perf_pmu_disable(event->pmu);
2973
cdd6c482 2974 hwc = &event->hw;
6a24ed6c 2975
ae23bff1 2976 if (hwc->interrupts == MAX_INTERRUPTS) {
e050e3f0 2977 hwc->interrupts = 0;
cdd6c482 2978 perf_log_throttle(event, 1);
a4eaf7f1 2979 event->pmu->start(event, 0);
a78ac325
PZ
2980 }
2981
cdd6c482 2982 if (!event->attr.freq || !event->attr.sample_freq)
44377277 2983 goto next;
60db5e09 2984
e050e3f0
SE
2985 /*
2986 * stop the event and update event->count
2987 */
2988 event->pmu->stop(event, PERF_EF_UPDATE);
2989
e7850595 2990 now = local64_read(&event->count);
abd50713
PZ
2991 delta = now - hwc->freq_count_stamp;
2992 hwc->freq_count_stamp = now;
60db5e09 2993
e050e3f0
SE
2994 /*
2995 * restart the event
2996 * reload only if value has changed
f39d47ff
SE
2997 * we have stopped the event so tell that
2998 * to perf_adjust_period() to avoid stopping it
2999 * twice.
e050e3f0 3000 */
abd50713 3001 if (delta > 0)
f39d47ff 3002 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
3003
3004 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
44377277
AS
3005 next:
3006 perf_pmu_enable(event->pmu);
60db5e09 3007 }
e050e3f0 3008
f39d47ff 3009 perf_pmu_enable(ctx->pmu);
e050e3f0 3010 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
3011}
3012
235c7fc7 3013/*
cdd6c482 3014 * Round-robin a context's events:
235c7fc7 3015 */
cdd6c482 3016static void rotate_ctx(struct perf_event_context *ctx)
0793a61d 3017{
dddd3379
TG
3018 /*
3019 * Rotate the first entry last of non-pinned groups. Rotation might be
3020 * disabled by the inheritance code.
3021 */
3022 if (!ctx->rotate_disable)
3023 list_rotate_left(&ctx->flexible_groups);
235c7fc7
IM
3024}
3025
9e630205 3026static int perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7 3027{
8dc85d54 3028 struct perf_event_context *ctx = NULL;
2fde4f94 3029 int rotate = 0;
7fc23a53 3030
b5ab4cd5 3031 if (cpuctx->ctx.nr_events) {
b5ab4cd5
PZ
3032 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3033 rotate = 1;
3034 }
235c7fc7 3035
8dc85d54 3036 ctx = cpuctx->task_ctx;
b5ab4cd5 3037 if (ctx && ctx->nr_events) {
b5ab4cd5
PZ
3038 if (ctx->nr_events != ctx->nr_active)
3039 rotate = 1;
3040 }
9717e6cd 3041
e050e3f0 3042 if (!rotate)
0f5a2601
PZ
3043 goto done;
3044
facc4307 3045 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 3046 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 3047
e050e3f0
SE
3048 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3049 if (ctx)
3050 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d 3051
e050e3f0
SE
3052 rotate_ctx(&cpuctx->ctx);
3053 if (ctx)
3054 rotate_ctx(ctx);
235c7fc7 3055
e050e3f0 3056 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 3057
0f5a2601
PZ
3058 perf_pmu_enable(cpuctx->ctx.pmu);
3059 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
b5ab4cd5 3060done:
9e630205
SE
3061
3062 return rotate;
e9d2b064
PZ
3063}
3064
026249ef
FW
3065#ifdef CONFIG_NO_HZ_FULL
3066bool perf_event_can_stop_tick(void)
3067{
948b26b6 3068 if (atomic_read(&nr_freq_events) ||
d84153d6 3069 __this_cpu_read(perf_throttled_count))
026249ef 3070 return false;
d84153d6
FW
3071 else
3072 return true;
026249ef
FW
3073}
3074#endif
3075
e9d2b064
PZ
3076void perf_event_task_tick(void)
3077{
2fde4f94
MR
3078 struct list_head *head = this_cpu_ptr(&active_ctx_list);
3079 struct perf_event_context *ctx, *tmp;
e050e3f0 3080 int throttled;
b5ab4cd5 3081
e9d2b064
PZ
3082 WARN_ON(!irqs_disabled());
3083
e050e3f0
SE
3084 __this_cpu_inc(perf_throttled_seq);
3085 throttled = __this_cpu_xchg(perf_throttled_count, 0);
3086
2fde4f94 3087 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
e050e3f0 3088 perf_adjust_freq_unthr_context(ctx, throttled);
0793a61d
TG
3089}
3090
889ff015
FW
3091static int event_enable_on_exec(struct perf_event *event,
3092 struct perf_event_context *ctx)
3093{
3094 if (!event->attr.enable_on_exec)
3095 return 0;
3096
3097 event->attr.enable_on_exec = 0;
3098 if (event->state >= PERF_EVENT_STATE_INACTIVE)
3099 return 0;
3100
1d9b482e 3101 __perf_event_mark_enabled(event);
889ff015
FW
3102
3103 return 1;
3104}
3105
57e7986e 3106/*
cdd6c482 3107 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
3108 * This expects task == current.
3109 */
c1274499 3110static void perf_event_enable_on_exec(int ctxn)
57e7986e 3111{
c1274499 3112 struct perf_event_context *ctx, *clone_ctx = NULL;
3e349507 3113 struct perf_cpu_context *cpuctx;
cdd6c482 3114 struct perf_event *event;
57e7986e
PM
3115 unsigned long flags;
3116 int enabled = 0;
3117
3118 local_irq_save(flags);
c1274499 3119 ctx = current->perf_event_ctxp[ctxn];
cdd6c482 3120 if (!ctx || !ctx->nr_events)
57e7986e
PM
3121 goto out;
3122
3e349507
PZ
3123 cpuctx = __get_cpu_context(ctx);
3124 perf_ctx_lock(cpuctx, ctx);
3125 list_for_each_entry(event, &ctx->event_list, event_entry)
3126 enabled |= event_enable_on_exec(event, ctx);
57e7986e
PM
3127
3128 /*
3e349507 3129 * Unclone and reschedule this context if we enabled any event.
57e7986e 3130 */
3e349507 3131 if (enabled) {
211de6eb 3132 clone_ctx = unclone_ctx(ctx);
3e349507
PZ
3133 ctx_resched(cpuctx, ctx);
3134 }
3135 perf_ctx_unlock(cpuctx, ctx);
57e7986e 3136
9ed6060d 3137out:
57e7986e 3138 local_irq_restore(flags);
211de6eb
PZ
3139
3140 if (clone_ctx)
3141 put_ctx(clone_ctx);
57e7986e
PM
3142}
3143
e041e328
PZ
3144void perf_event_exec(void)
3145{
e041e328
PZ
3146 int ctxn;
3147
3148 rcu_read_lock();
c1274499
PZ
3149 for_each_task_context_nr(ctxn)
3150 perf_event_enable_on_exec(ctxn);
e041e328
PZ
3151 rcu_read_unlock();
3152}
3153
0492d4c5
PZ
3154struct perf_read_data {
3155 struct perf_event *event;
3156 bool group;
7d88962e 3157 int ret;
0492d4c5
PZ
3158};
3159
0793a61d 3160/*
cdd6c482 3161 * Cross CPU call to read the hardware event
0793a61d 3162 */
cdd6c482 3163static void __perf_event_read(void *info)
0793a61d 3164{
0492d4c5
PZ
3165 struct perf_read_data *data = info;
3166 struct perf_event *sub, *event = data->event;
cdd6c482 3167 struct perf_event_context *ctx = event->ctx;
108b02cf 3168 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
4a00c16e 3169 struct pmu *pmu = event->pmu;
621a01ea 3170
e1ac3614
PM
3171 /*
3172 * If this is a task context, we need to check whether it is
3173 * the current task context of this cpu. If not it has been
3174 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
3175 * event->count would have been updated to a recent sample
3176 * when the event was scheduled out.
e1ac3614
PM
3177 */
3178 if (ctx->task && cpuctx->task_ctx != ctx)
3179 return;
3180
e625cce1 3181 raw_spin_lock(&ctx->lock);
e5d1367f 3182 if (ctx->is_active) {
542e72fc 3183 update_context_time(ctx);
e5d1367f
SE
3184 update_cgrp_time_from_event(event);
3185 }
0492d4c5 3186
cdd6c482 3187 update_event_times(event);
4a00c16e
SB
3188 if (event->state != PERF_EVENT_STATE_ACTIVE)
3189 goto unlock;
0492d4c5 3190
4a00c16e
SB
3191 if (!data->group) {
3192 pmu->read(event);
3193 data->ret = 0;
0492d4c5 3194 goto unlock;
4a00c16e
SB
3195 }
3196
3197 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3198
3199 pmu->read(event);
0492d4c5
PZ
3200
3201 list_for_each_entry(sub, &event->sibling_list, group_entry) {
3202 update_event_times(sub);
4a00c16e
SB
3203 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3204 /*
3205 * Use sibling's PMU rather than @event's since
3206 * sibling could be on different (eg: software) PMU.
3207 */
0492d4c5 3208 sub->pmu->read(sub);
4a00c16e 3209 }
0492d4c5 3210 }
4a00c16e
SB
3211
3212 data->ret = pmu->commit_txn(pmu);
0492d4c5
PZ
3213
3214unlock:
e625cce1 3215 raw_spin_unlock(&ctx->lock);
0793a61d
TG
3216}
3217
b5e58793
PZ
3218static inline u64 perf_event_count(struct perf_event *event)
3219{
eacd3ecc
MF
3220 if (event->pmu->count)
3221 return event->pmu->count(event);
3222
3223 return __perf_event_count(event);
b5e58793
PZ
3224}
3225
ffe8690c
KX
3226/*
3227 * NMI-safe method to read a local event, that is an event that
3228 * is:
3229 * - either for the current task, or for this CPU
3230 * - does not have inherit set, for inherited task events
3231 * will not be local and we cannot read them atomically
3232 * - must not have a pmu::count method
3233 */
3234u64 perf_event_read_local(struct perf_event *event)
3235{
3236 unsigned long flags;
3237 u64 val;
3238
3239 /*
3240 * Disabling interrupts avoids all counter scheduling (context
3241 * switches, timer based rotation and IPIs).
3242 */
3243 local_irq_save(flags);
3244
3245 /* If this is a per-task event, it must be for current */
3246 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
3247 event->hw.target != current);
3248
3249 /* If this is a per-CPU event, it must be for this CPU */
3250 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
3251 event->cpu != smp_processor_id());
3252
3253 /*
3254 * It must not be an event with inherit set, we cannot read
3255 * all child counters from atomic context.
3256 */
3257 WARN_ON_ONCE(event->attr.inherit);
3258
3259 /*
3260 * It must not have a pmu::count method, those are not
3261 * NMI safe.
3262 */
3263 WARN_ON_ONCE(event->pmu->count);
3264
3265 /*
3266 * If the event is currently on this CPU, its either a per-task event,
3267 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3268 * oncpu == -1).
3269 */
3270 if (event->oncpu == smp_processor_id())
3271 event->pmu->read(event);
3272
3273 val = local64_read(&event->count);
3274 local_irq_restore(flags);
3275
3276 return val;
3277}
3278
7d88962e 3279static int perf_event_read(struct perf_event *event, bool group)
0793a61d 3280{
7d88962e
SB
3281 int ret = 0;
3282
0793a61d 3283 /*
cdd6c482
IM
3284 * If event is enabled and currently active on a CPU, update the
3285 * value in the event structure:
0793a61d 3286 */
cdd6c482 3287 if (event->state == PERF_EVENT_STATE_ACTIVE) {
0492d4c5
PZ
3288 struct perf_read_data data = {
3289 .event = event,
3290 .group = group,
7d88962e 3291 .ret = 0,
0492d4c5 3292 };
cdd6c482 3293 smp_call_function_single(event->oncpu,
0492d4c5 3294 __perf_event_read, &data, 1);
7d88962e 3295 ret = data.ret;
cdd6c482 3296 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
3297 struct perf_event_context *ctx = event->ctx;
3298 unsigned long flags;
3299
e625cce1 3300 raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9
SE
3301 /*
3302 * may read while context is not active
3303 * (e.g., thread is blocked), in that case
3304 * we cannot update context time
3305 */
e5d1367f 3306 if (ctx->is_active) {
c530ccd9 3307 update_context_time(ctx);
e5d1367f
SE
3308 update_cgrp_time_from_event(event);
3309 }
0492d4c5
PZ
3310 if (group)
3311 update_group_times(event);
3312 else
3313 update_event_times(event);
e625cce1 3314 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d 3315 }
7d88962e
SB
3316
3317 return ret;
0793a61d
TG
3318}
3319
a63eaf34 3320/*
cdd6c482 3321 * Initialize the perf_event context in a task_struct:
a63eaf34 3322 */
eb184479 3323static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 3324{
e625cce1 3325 raw_spin_lock_init(&ctx->lock);
a63eaf34 3326 mutex_init(&ctx->mutex);
2fde4f94 3327 INIT_LIST_HEAD(&ctx->active_ctx_list);
889ff015
FW
3328 INIT_LIST_HEAD(&ctx->pinned_groups);
3329 INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34
PM
3330 INIT_LIST_HEAD(&ctx->event_list);
3331 atomic_set(&ctx->refcount, 1);
eb184479
PZ
3332}
3333
3334static struct perf_event_context *
3335alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3336{
3337 struct perf_event_context *ctx;
3338
3339 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3340 if (!ctx)
3341 return NULL;
3342
3343 __perf_event_init_context(ctx);
3344 if (task) {
3345 ctx->task = task;
3346 get_task_struct(task);
0793a61d 3347 }
eb184479
PZ
3348 ctx->pmu = pmu;
3349
3350 return ctx;
a63eaf34
PM
3351}
3352
2ebd4ffb
MH
3353static struct task_struct *
3354find_lively_task_by_vpid(pid_t vpid)
3355{
3356 struct task_struct *task;
3357 int err;
0793a61d
TG
3358
3359 rcu_read_lock();
2ebd4ffb 3360 if (!vpid)
0793a61d
TG
3361 task = current;
3362 else
2ebd4ffb 3363 task = find_task_by_vpid(vpid);
0793a61d
TG
3364 if (task)
3365 get_task_struct(task);
3366 rcu_read_unlock();
3367
3368 if (!task)
3369 return ERR_PTR(-ESRCH);
3370
0793a61d 3371 /* Reuse ptrace permission checks for now. */
c93f7669 3372 err = -EACCES;
caaee623 3373 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
c93f7669
PM
3374 goto errout;
3375
2ebd4ffb
MH
3376 return task;
3377errout:
3378 put_task_struct(task);
3379 return ERR_PTR(err);
3380
3381}
3382
fe4b04fa
PZ
3383/*
3384 * Returns a matching context with refcount and pincount.
3385 */
108b02cf 3386static struct perf_event_context *
4af57ef2
YZ
3387find_get_context(struct pmu *pmu, struct task_struct *task,
3388 struct perf_event *event)
0793a61d 3389{
211de6eb 3390 struct perf_event_context *ctx, *clone_ctx = NULL;
22a4f650 3391 struct perf_cpu_context *cpuctx;
4af57ef2 3392 void *task_ctx_data = NULL;
25346b93 3393 unsigned long flags;
8dc85d54 3394 int ctxn, err;
4af57ef2 3395 int cpu = event->cpu;
0793a61d 3396
22a4ec72 3397 if (!task) {
cdd6c482 3398 /* Must be root to operate on a CPU event: */
0764771d 3399 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
3400 return ERR_PTR(-EACCES);
3401
0793a61d 3402 /*
cdd6c482 3403 * We could be clever and allow to attach a event to an
0793a61d
TG
3404 * offline CPU and activate it when the CPU comes up, but
3405 * that's for later.
3406 */
f6325e30 3407 if (!cpu_online(cpu))
0793a61d
TG
3408 return ERR_PTR(-ENODEV);
3409
108b02cf 3410 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 3411 ctx = &cpuctx->ctx;
c93f7669 3412 get_ctx(ctx);
fe4b04fa 3413 ++ctx->pin_count;
0793a61d 3414
0793a61d
TG
3415 return ctx;
3416 }
3417
8dc85d54
PZ
3418 err = -EINVAL;
3419 ctxn = pmu->task_ctx_nr;
3420 if (ctxn < 0)
3421 goto errout;
3422
4af57ef2
YZ
3423 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3424 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3425 if (!task_ctx_data) {
3426 err = -ENOMEM;
3427 goto errout;
3428 }
3429 }
3430
9ed6060d 3431retry:
8dc85d54 3432 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 3433 if (ctx) {
211de6eb 3434 clone_ctx = unclone_ctx(ctx);
fe4b04fa 3435 ++ctx->pin_count;
4af57ef2
YZ
3436
3437 if (task_ctx_data && !ctx->task_ctx_data) {
3438 ctx->task_ctx_data = task_ctx_data;
3439 task_ctx_data = NULL;
3440 }
e625cce1 3441 raw_spin_unlock_irqrestore(&ctx->lock, flags);
211de6eb
PZ
3442
3443 if (clone_ctx)
3444 put_ctx(clone_ctx);
9137fb28 3445 } else {
eb184479 3446 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
3447 err = -ENOMEM;
3448 if (!ctx)
3449 goto errout;
eb184479 3450
4af57ef2
YZ
3451 if (task_ctx_data) {
3452 ctx->task_ctx_data = task_ctx_data;
3453 task_ctx_data = NULL;
3454 }
3455
dbe08d82
ON
3456 err = 0;
3457 mutex_lock(&task->perf_event_mutex);
3458 /*
3459 * If it has already passed perf_event_exit_task().
3460 * we must see PF_EXITING, it takes this mutex too.
3461 */
3462 if (task->flags & PF_EXITING)
3463 err = -ESRCH;
3464 else if (task->perf_event_ctxp[ctxn])
3465 err = -EAGAIN;
fe4b04fa 3466 else {
9137fb28 3467 get_ctx(ctx);
fe4b04fa 3468 ++ctx->pin_count;
dbe08d82 3469 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 3470 }
dbe08d82
ON
3471 mutex_unlock(&task->perf_event_mutex);
3472
3473 if (unlikely(err)) {
9137fb28 3474 put_ctx(ctx);
dbe08d82
ON
3475
3476 if (err == -EAGAIN)
3477 goto retry;
3478 goto errout;
a63eaf34
PM
3479 }
3480 }
3481
4af57ef2 3482 kfree(task_ctx_data);
0793a61d 3483 return ctx;
c93f7669 3484
9ed6060d 3485errout:
4af57ef2 3486 kfree(task_ctx_data);
c93f7669 3487 return ERR_PTR(err);
0793a61d
TG
3488}
3489
6fb2915d 3490static void perf_event_free_filter(struct perf_event *event);
2541517c 3491static void perf_event_free_bpf_prog(struct perf_event *event);
6fb2915d 3492
cdd6c482 3493static void free_event_rcu(struct rcu_head *head)
592903cd 3494{
cdd6c482 3495 struct perf_event *event;
592903cd 3496
cdd6c482
IM
3497 event = container_of(head, struct perf_event, rcu_head);
3498 if (event->ns)
3499 put_pid_ns(event->ns);
6fb2915d 3500 perf_event_free_filter(event);
cdd6c482 3501 kfree(event);
592903cd
PZ
3502}
3503
b69cf536
PZ
3504static void ring_buffer_attach(struct perf_event *event,
3505 struct ring_buffer *rb);
925d519a 3506
4beb31f3 3507static void unaccount_event_cpu(struct perf_event *event, int cpu)
f1600952 3508{
4beb31f3
FW
3509 if (event->parent)
3510 return;
3511
4beb31f3
FW
3512 if (is_cgroup_event(event))
3513 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3514}
925d519a 3515
4beb31f3
FW
3516static void unaccount_event(struct perf_event *event)
3517{
25432ae9
PZ
3518 bool dec = false;
3519
4beb31f3
FW
3520 if (event->parent)
3521 return;
3522
3523 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 3524 dec = true;
4beb31f3
FW
3525 if (event->attr.mmap || event->attr.mmap_data)
3526 atomic_dec(&nr_mmap_events);
3527 if (event->attr.comm)
3528 atomic_dec(&nr_comm_events);
3529 if (event->attr.task)
3530 atomic_dec(&nr_task_events);
948b26b6
FW
3531 if (event->attr.freq)
3532 atomic_dec(&nr_freq_events);
45ac1403 3533 if (event->attr.context_switch) {
25432ae9 3534 dec = true;
45ac1403
AH
3535 atomic_dec(&nr_switch_events);
3536 }
4beb31f3 3537 if (is_cgroup_event(event))
25432ae9 3538 dec = true;
4beb31f3 3539 if (has_branch_stack(event))
25432ae9
PZ
3540 dec = true;
3541
3542 if (dec)
4beb31f3
FW
3543 static_key_slow_dec_deferred(&perf_sched_events);
3544
3545 unaccount_event_cpu(event, event->cpu);
3546}
925d519a 3547
bed5b25a
AS
3548/*
3549 * The following implement mutual exclusion of events on "exclusive" pmus
3550 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3551 * at a time, so we disallow creating events that might conflict, namely:
3552 *
3553 * 1) cpu-wide events in the presence of per-task events,
3554 * 2) per-task events in the presence of cpu-wide events,
3555 * 3) two matching events on the same context.
3556 *
3557 * The former two cases are handled in the allocation path (perf_event_alloc(),
a0733e69 3558 * _free_event()), the latter -- before the first perf_install_in_context().
bed5b25a
AS
3559 */
3560static int exclusive_event_init(struct perf_event *event)
3561{
3562 struct pmu *pmu = event->pmu;
3563
3564 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3565 return 0;
3566
3567 /*
3568 * Prevent co-existence of per-task and cpu-wide events on the
3569 * same exclusive pmu.
3570 *
3571 * Negative pmu::exclusive_cnt means there are cpu-wide
3572 * events on this "exclusive" pmu, positive means there are
3573 * per-task events.
3574 *
3575 * Since this is called in perf_event_alloc() path, event::ctx
3576 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
3577 * to mean "per-task event", because unlike other attach states it
3578 * never gets cleared.
3579 */
3580 if (event->attach_state & PERF_ATTACH_TASK) {
3581 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
3582 return -EBUSY;
3583 } else {
3584 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
3585 return -EBUSY;
3586 }
3587
3588 return 0;
3589}
3590
3591static void exclusive_event_destroy(struct perf_event *event)
3592{
3593 struct pmu *pmu = event->pmu;
3594
3595 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3596 return;
3597
3598 /* see comment in exclusive_event_init() */
3599 if (event->attach_state & PERF_ATTACH_TASK)
3600 atomic_dec(&pmu->exclusive_cnt);
3601 else
3602 atomic_inc(&pmu->exclusive_cnt);
3603}
3604
3605static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3606{
3607 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
3608 (e1->cpu == e2->cpu ||
3609 e1->cpu == -1 ||
3610 e2->cpu == -1))
3611 return true;
3612 return false;
3613}
3614
3615/* Called under the same ctx::mutex as perf_install_in_context() */
3616static bool exclusive_event_installable(struct perf_event *event,
3617 struct perf_event_context *ctx)
3618{
3619 struct perf_event *iter_event;
3620 struct pmu *pmu = event->pmu;
3621
3622 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3623 return true;
3624
3625 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
3626 if (exclusive_event_match(iter_event, event))
3627 return false;
3628 }
3629
3630 return true;
3631}
3632
683ede43 3633static void _free_event(struct perf_event *event)
f1600952 3634{
e360adbe 3635 irq_work_sync(&event->pending);
925d519a 3636
4beb31f3 3637 unaccount_event(event);
9ee318a7 3638
76369139 3639 if (event->rb) {
9bb5d40c
PZ
3640 /*
3641 * Can happen when we close an event with re-directed output.
3642 *
3643 * Since we have a 0 refcount, perf_mmap_close() will skip
3644 * over us; possibly making our ring_buffer_put() the last.
3645 */
3646 mutex_lock(&event->mmap_mutex);
b69cf536 3647 ring_buffer_attach(event, NULL);
9bb5d40c 3648 mutex_unlock(&event->mmap_mutex);
a4be7c27
PZ
3649 }
3650
e5d1367f
SE
3651 if (is_cgroup_event(event))
3652 perf_detach_cgroup(event);
3653
a0733e69
PZ
3654 if (!event->parent) {
3655 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3656 put_callchain_buffers();
3657 }
3658
3659 perf_event_free_bpf_prog(event);
3660
3661 if (event->destroy)
3662 event->destroy(event);
3663
3664 if (event->ctx)
3665 put_ctx(event->ctx);
3666
3667 if (event->pmu) {
3668 exclusive_event_destroy(event);
3669 module_put(event->pmu->module);
3670 }
3671
3672 call_rcu(&event->rcu_head, free_event_rcu);
f1600952
PZ
3673}
3674
683ede43
PZ
3675/*
3676 * Used to free events which have a known refcount of 1, such as in error paths
3677 * where the event isn't exposed yet and inherited events.
3678 */
3679static void free_event(struct perf_event *event)
0793a61d 3680{
683ede43
PZ
3681 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
3682 "unexpected event refcount: %ld; ptr=%p\n",
3683 atomic_long_read(&event->refcount), event)) {
3684 /* leak to avoid use-after-free */
3685 return;
3686 }
0793a61d 3687
683ede43 3688 _free_event(event);
0793a61d
TG
3689}
3690
a66a3052 3691/*
f8697762 3692 * Remove user event from the owner task.
a66a3052 3693 */
f8697762 3694static void perf_remove_from_owner(struct perf_event *event)
fb0459d7 3695{
8882135b 3696 struct task_struct *owner;
fb0459d7 3697
8882135b 3698 rcu_read_lock();
8882135b 3699 /*
f47c02c0
PZ
3700 * Matches the smp_store_release() in perf_event_exit_task(). If we
3701 * observe !owner it means the list deletion is complete and we can
3702 * indeed free this event, otherwise we need to serialize on
8882135b
PZ
3703 * owner->perf_event_mutex.
3704 */
f47c02c0 3705 owner = lockless_dereference(event->owner);
8882135b
PZ
3706 if (owner) {
3707 /*
3708 * Since delayed_put_task_struct() also drops the last
3709 * task reference we can safely take a new reference
3710 * while holding the rcu_read_lock().
3711 */
3712 get_task_struct(owner);
3713 }
3714 rcu_read_unlock();
3715
3716 if (owner) {
f63a8daa
PZ
3717 /*
3718 * If we're here through perf_event_exit_task() we're already
3719 * holding ctx->mutex which would be an inversion wrt. the
3720 * normal lock order.
3721 *
3722 * However we can safely take this lock because its the child
3723 * ctx->mutex.
3724 */
3725 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
3726
8882135b
PZ
3727 /*
3728 * We have to re-check the event->owner field, if it is cleared
3729 * we raced with perf_event_exit_task(), acquiring the mutex
3730 * ensured they're done, and we can proceed with freeing the
3731 * event.
3732 */
f47c02c0 3733 if (event->owner) {
8882135b 3734 list_del_init(&event->owner_entry);
f47c02c0
PZ
3735 smp_store_release(&event->owner, NULL);
3736 }
8882135b
PZ
3737 mutex_unlock(&owner->perf_event_mutex);
3738 put_task_struct(owner);
3739 }
f8697762
JO
3740}
3741
f8697762
JO
3742static void put_event(struct perf_event *event)
3743{
f8697762
JO
3744 if (!atomic_long_dec_and_test(&event->refcount))
3745 return;
3746
c6e5b732
PZ
3747 _free_event(event);
3748}
3749
3750/*
3751 * Kill an event dead; while event:refcount will preserve the event
3752 * object, it will not preserve its functionality. Once the last 'user'
3753 * gives up the object, we'll destroy the thing.
3754 */
3755int perf_event_release_kernel(struct perf_event *event)
3756{
a4f4bb6d 3757 struct perf_event_context *ctx = event->ctx;
c6e5b732
PZ
3758 struct perf_event *child, *tmp;
3759
a4f4bb6d
PZ
3760 /*
3761 * If we got here through err_file: fput(event_file); we will not have
3762 * attached to a context yet.
3763 */
3764 if (!ctx) {
3765 WARN_ON_ONCE(event->attach_state &
3766 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
3767 goto no_ctx;
3768 }
3769
f8697762
JO
3770 if (!is_kernel_event(event))
3771 perf_remove_from_owner(event);
8882135b 3772
5fa7c8ec 3773 ctx = perf_event_ctx_lock(event);
a83fe28e 3774 WARN_ON_ONCE(ctx->parent_ctx);
60beda84 3775 perf_remove_from_context(event, DETACH_GROUP | DETACH_STATE);
d415a7f1 3776 perf_event_ctx_unlock(event, ctx);
683ede43 3777
683ede43 3778 /*
60beda84
PZ
3779 * At this point we must have event->state == PERF_EVENT_STATE_EXIT,
3780 * either from the above perf_remove_from_context() or through
3781 * perf_event_exit_event().
683ede43 3782 *
c6e5b732
PZ
3783 * Therefore, anybody acquiring event->child_mutex after the below
3784 * loop _must_ also see this, most importantly inherit_event() which
3785 * will avoid placing more children on the list.
683ede43 3786 *
c6e5b732
PZ
3787 * Thus this guarantees that we will in fact observe and kill _ALL_
3788 * child events.
683ede43 3789 */
60beda84 3790 WARN_ON_ONCE(event->state != PERF_EVENT_STATE_EXIT);
683ede43 3791
c6e5b732
PZ
3792again:
3793 mutex_lock(&event->child_mutex);
3794 list_for_each_entry(child, &event->child_list, child_list) {
a6fa941d 3795
c6e5b732
PZ
3796 /*
3797 * Cannot change, child events are not migrated, see the
3798 * comment with perf_event_ctx_lock_nested().
3799 */
3800 ctx = lockless_dereference(child->ctx);
3801 /*
3802 * Since child_mutex nests inside ctx::mutex, we must jump
3803 * through hoops. We start by grabbing a reference on the ctx.
3804 *
3805 * Since the event cannot get freed while we hold the
3806 * child_mutex, the context must also exist and have a !0
3807 * reference count.
3808 */
3809 get_ctx(ctx);
3810
3811 /*
3812 * Now that we have a ctx ref, we can drop child_mutex, and
3813 * acquire ctx::mutex without fear of it going away. Then we
3814 * can re-acquire child_mutex.
3815 */
3816 mutex_unlock(&event->child_mutex);
3817 mutex_lock(&ctx->mutex);
3818 mutex_lock(&event->child_mutex);
3819
3820 /*
3821 * Now that we hold ctx::mutex and child_mutex, revalidate our
3822 * state, if child is still the first entry, it didn't get freed
3823 * and we can continue doing so.
3824 */
3825 tmp = list_first_entry_or_null(&event->child_list,
3826 struct perf_event, child_list);
3827 if (tmp == child) {
3828 perf_remove_from_context(child, DETACH_GROUP);
3829 list_del(&child->child_list);
3830 free_event(child);
3831 /*
3832 * This matches the refcount bump in inherit_event();
3833 * this can't be the last reference.
3834 */
3835 put_event(event);
3836 }
3837
3838 mutex_unlock(&event->child_mutex);
3839 mutex_unlock(&ctx->mutex);
3840 put_ctx(ctx);
3841 goto again;
3842 }
3843 mutex_unlock(&event->child_mutex);
3844
a4f4bb6d
PZ
3845no_ctx:
3846 put_event(event); /* Must be the 'last' reference */
683ede43
PZ
3847 return 0;
3848}
3849EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3850
8b10c5e2
PZ
3851/*
3852 * Called when the last reference to the file is gone.
3853 */
a6fa941d
AV
3854static int perf_release(struct inode *inode, struct file *file)
3855{
c6e5b732 3856 perf_event_release_kernel(file->private_data);
a6fa941d 3857 return 0;
fb0459d7 3858}
fb0459d7 3859
59ed446f 3860u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 3861{
cdd6c482 3862 struct perf_event *child;
e53c0994
PZ
3863 u64 total = 0;
3864
59ed446f
PZ
3865 *enabled = 0;
3866 *running = 0;
3867
6f10581a 3868 mutex_lock(&event->child_mutex);
01add3ea 3869
7d88962e 3870 (void)perf_event_read(event, false);
01add3ea
SB
3871 total += perf_event_count(event);
3872
59ed446f
PZ
3873 *enabled += event->total_time_enabled +
3874 atomic64_read(&event->child_total_time_enabled);
3875 *running += event->total_time_running +
3876 atomic64_read(&event->child_total_time_running);
3877
3878 list_for_each_entry(child, &event->child_list, child_list) {
7d88962e 3879 (void)perf_event_read(child, false);
01add3ea 3880 total += perf_event_count(child);
59ed446f
PZ
3881 *enabled += child->total_time_enabled;
3882 *running += child->total_time_running;
3883 }
6f10581a 3884 mutex_unlock(&event->child_mutex);
e53c0994
PZ
3885
3886 return total;
3887}
fb0459d7 3888EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 3889
7d88962e 3890static int __perf_read_group_add(struct perf_event *leader,
fa8c2693 3891 u64 read_format, u64 *values)
3dab77fb 3892{
fa8c2693
PZ
3893 struct perf_event *sub;
3894 int n = 1; /* skip @nr */
7d88962e 3895 int ret;
f63a8daa 3896
7d88962e
SB
3897 ret = perf_event_read(leader, true);
3898 if (ret)
3899 return ret;
abf4868b 3900
fa8c2693
PZ
3901 /*
3902 * Since we co-schedule groups, {enabled,running} times of siblings
3903 * will be identical to those of the leader, so we only publish one
3904 * set.
3905 */
3906 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3907 values[n++] += leader->total_time_enabled +
3908 atomic64_read(&leader->child_total_time_enabled);
3909 }
3dab77fb 3910
fa8c2693
PZ
3911 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3912 values[n++] += leader->total_time_running +
3913 atomic64_read(&leader->child_total_time_running);
3914 }
3915
3916 /*
3917 * Write {count,id} tuples for every sibling.
3918 */
3919 values[n++] += perf_event_count(leader);
abf4868b
PZ
3920 if (read_format & PERF_FORMAT_ID)
3921 values[n++] = primary_event_id(leader);
3dab77fb 3922
fa8c2693
PZ
3923 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3924 values[n++] += perf_event_count(sub);
3925 if (read_format & PERF_FORMAT_ID)
3926 values[n++] = primary_event_id(sub);
3927 }
7d88962e
SB
3928
3929 return 0;
fa8c2693 3930}
3dab77fb 3931
fa8c2693
PZ
3932static int perf_read_group(struct perf_event *event,
3933 u64 read_format, char __user *buf)
3934{
3935 struct perf_event *leader = event->group_leader, *child;
3936 struct perf_event_context *ctx = leader->ctx;
7d88962e 3937 int ret;
fa8c2693 3938 u64 *values;
3dab77fb 3939
fa8c2693 3940 lockdep_assert_held(&ctx->mutex);
3dab77fb 3941
fa8c2693
PZ
3942 values = kzalloc(event->read_size, GFP_KERNEL);
3943 if (!values)
3944 return -ENOMEM;
3dab77fb 3945
fa8c2693
PZ
3946 values[0] = 1 + leader->nr_siblings;
3947
3948 /*
3949 * By locking the child_mutex of the leader we effectively
3950 * lock the child list of all siblings.. XXX explain how.
3951 */
3952 mutex_lock(&leader->child_mutex);
abf4868b 3953
7d88962e
SB
3954 ret = __perf_read_group_add(leader, read_format, values);
3955 if (ret)
3956 goto unlock;
3957
3958 list_for_each_entry(child, &leader->child_list, child_list) {
3959 ret = __perf_read_group_add(child, read_format, values);
3960 if (ret)
3961 goto unlock;
3962 }
abf4868b 3963
fa8c2693 3964 mutex_unlock(&leader->child_mutex);
abf4868b 3965
7d88962e 3966 ret = event->read_size;
fa8c2693
PZ
3967 if (copy_to_user(buf, values, event->read_size))
3968 ret = -EFAULT;
7d88962e 3969 goto out;
fa8c2693 3970
7d88962e
SB
3971unlock:
3972 mutex_unlock(&leader->child_mutex);
3973out:
fa8c2693 3974 kfree(values);
abf4868b 3975 return ret;
3dab77fb
PZ
3976}
3977
b15f495b 3978static int perf_read_one(struct perf_event *event,
3dab77fb
PZ
3979 u64 read_format, char __user *buf)
3980{
59ed446f 3981 u64 enabled, running;
3dab77fb
PZ
3982 u64 values[4];
3983 int n = 0;
3984
59ed446f
PZ
3985 values[n++] = perf_event_read_value(event, &enabled, &running);
3986 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3987 values[n++] = enabled;
3988 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3989 values[n++] = running;
3dab77fb 3990 if (read_format & PERF_FORMAT_ID)
cdd6c482 3991 values[n++] = primary_event_id(event);
3dab77fb
PZ
3992
3993 if (copy_to_user(buf, values, n * sizeof(u64)))
3994 return -EFAULT;
3995
3996 return n * sizeof(u64);
3997}
3998
dc633982
JO
3999static bool is_event_hup(struct perf_event *event)
4000{
4001 bool no_children;
4002
4003 if (event->state != PERF_EVENT_STATE_EXIT)
4004 return false;
4005
4006 mutex_lock(&event->child_mutex);
4007 no_children = list_empty(&event->child_list);
4008 mutex_unlock(&event->child_mutex);
4009 return no_children;
4010}
4011
0793a61d 4012/*
cdd6c482 4013 * Read the performance event - simple non blocking version for now
0793a61d
TG
4014 */
4015static ssize_t
b15f495b 4016__perf_read(struct perf_event *event, char __user *buf, size_t count)
0793a61d 4017{
cdd6c482 4018 u64 read_format = event->attr.read_format;
3dab77fb 4019 int ret;
0793a61d 4020
3b6f9e5c 4021 /*
cdd6c482 4022 * Return end-of-file for a read on a event that is in
3b6f9e5c
PM
4023 * error state (i.e. because it was pinned but it couldn't be
4024 * scheduled on to the CPU at some point).
4025 */
cdd6c482 4026 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
4027 return 0;
4028
c320c7b7 4029 if (count < event->read_size)
3dab77fb
PZ
4030 return -ENOSPC;
4031
cdd6c482 4032 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 4033 if (read_format & PERF_FORMAT_GROUP)
b15f495b 4034 ret = perf_read_group(event, read_format, buf);
3dab77fb 4035 else
b15f495b 4036 ret = perf_read_one(event, read_format, buf);
0793a61d 4037
3dab77fb 4038 return ret;
0793a61d
TG
4039}
4040
0793a61d
TG
4041static ssize_t
4042perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4043{
cdd6c482 4044 struct perf_event *event = file->private_data;
f63a8daa
PZ
4045 struct perf_event_context *ctx;
4046 int ret;
0793a61d 4047
f63a8daa 4048 ctx = perf_event_ctx_lock(event);
b15f495b 4049 ret = __perf_read(event, buf, count);
f63a8daa
PZ
4050 perf_event_ctx_unlock(event, ctx);
4051
4052 return ret;
0793a61d
TG
4053}
4054
4055static unsigned int perf_poll(struct file *file, poll_table *wait)
4056{
cdd6c482 4057 struct perf_event *event = file->private_data;
76369139 4058 struct ring_buffer *rb;
61b67684 4059 unsigned int events = POLLHUP;
c7138f37 4060
e708d7ad 4061 poll_wait(file, &event->waitq, wait);
179033b3 4062
dc633982 4063 if (is_event_hup(event))
179033b3 4064 return events;
c7138f37 4065
10c6db11 4066 /*
9bb5d40c
PZ
4067 * Pin the event->rb by taking event->mmap_mutex; otherwise
4068 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
10c6db11
PZ
4069 */
4070 mutex_lock(&event->mmap_mutex);
9bb5d40c
PZ
4071 rb = event->rb;
4072 if (rb)
76369139 4073 events = atomic_xchg(&rb->poll, 0);
10c6db11 4074 mutex_unlock(&event->mmap_mutex);
0793a61d
TG
4075 return events;
4076}
4077
f63a8daa 4078static void _perf_event_reset(struct perf_event *event)
6de6a7b9 4079{
7d88962e 4080 (void)perf_event_read(event, false);
e7850595 4081 local64_set(&event->count, 0);
cdd6c482 4082 perf_event_update_userpage(event);
3df5edad
PZ
4083}
4084
c93f7669 4085/*
cdd6c482
IM
4086 * Holding the top-level event's child_mutex means that any
4087 * descendant process that has inherited this event will block
8ba289b8 4088 * in perf_event_exit_event() if it goes to exit, thus satisfying the
cdd6c482 4089 * task existence requirements of perf_event_enable/disable.
c93f7669 4090 */
cdd6c482
IM
4091static void perf_event_for_each_child(struct perf_event *event,
4092 void (*func)(struct perf_event *))
3df5edad 4093{
cdd6c482 4094 struct perf_event *child;
3df5edad 4095
cdd6c482 4096 WARN_ON_ONCE(event->ctx->parent_ctx);
f63a8daa 4097
cdd6c482
IM
4098 mutex_lock(&event->child_mutex);
4099 func(event);
4100 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 4101 func(child);
cdd6c482 4102 mutex_unlock(&event->child_mutex);
3df5edad
PZ
4103}
4104
cdd6c482
IM
4105static void perf_event_for_each(struct perf_event *event,
4106 void (*func)(struct perf_event *))
3df5edad 4107{
cdd6c482
IM
4108 struct perf_event_context *ctx = event->ctx;
4109 struct perf_event *sibling;
3df5edad 4110
f63a8daa
PZ
4111 lockdep_assert_held(&ctx->mutex);
4112
cdd6c482 4113 event = event->group_leader;
75f937f2 4114
cdd6c482 4115 perf_event_for_each_child(event, func);
cdd6c482 4116 list_for_each_entry(sibling, &event->sibling_list, group_entry)
724b6daa 4117 perf_event_for_each_child(sibling, func);
6de6a7b9
PZ
4118}
4119
fae3fde6
PZ
4120static void __perf_event_period(struct perf_event *event,
4121 struct perf_cpu_context *cpuctx,
4122 struct perf_event_context *ctx,
4123 void *info)
c7999c6f 4124{
fae3fde6 4125 u64 value = *((u64 *)info);
c7999c6f 4126 bool active;
08247e31 4127
cdd6c482 4128 if (event->attr.freq) {
cdd6c482 4129 event->attr.sample_freq = value;
08247e31 4130 } else {
cdd6c482
IM
4131 event->attr.sample_period = value;
4132 event->hw.sample_period = value;
08247e31 4133 }
bad7192b
PZ
4134
4135 active = (event->state == PERF_EVENT_STATE_ACTIVE);
4136 if (active) {
4137 perf_pmu_disable(ctx->pmu);
4138 event->pmu->stop(event, PERF_EF_UPDATE);
4139 }
4140
4141 local64_set(&event->hw.period_left, 0);
4142
4143 if (active) {
4144 event->pmu->start(event, PERF_EF_RELOAD);
4145 perf_pmu_enable(ctx->pmu);
4146 }
c7999c6f
PZ
4147}
4148
4149static int perf_event_period(struct perf_event *event, u64 __user *arg)
4150{
c7999c6f
PZ
4151 u64 value;
4152
4153 if (!is_sampling_event(event))
4154 return -EINVAL;
4155
4156 if (copy_from_user(&value, arg, sizeof(value)))
4157 return -EFAULT;
4158
4159 if (!value)
4160 return -EINVAL;
4161
4162 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4163 return -EINVAL;
4164
fae3fde6 4165 event_function_call(event, __perf_event_period, &value);
08247e31 4166
c7999c6f 4167 return 0;
08247e31
PZ
4168}
4169
ac9721f3
PZ
4170static const struct file_operations perf_fops;
4171
2903ff01 4172static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 4173{
2903ff01
AV
4174 struct fd f = fdget(fd);
4175 if (!f.file)
4176 return -EBADF;
ac9721f3 4177
2903ff01
AV
4178 if (f.file->f_op != &perf_fops) {
4179 fdput(f);
4180 return -EBADF;
ac9721f3 4181 }
2903ff01
AV
4182 *p = f;
4183 return 0;
ac9721f3
PZ
4184}
4185
4186static int perf_event_set_output(struct perf_event *event,
4187 struct perf_event *output_event);
6fb2915d 4188static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2541517c 4189static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
a4be7c27 4190
f63a8daa 4191static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
d859e29f 4192{
cdd6c482 4193 void (*func)(struct perf_event *);
3df5edad 4194 u32 flags = arg;
d859e29f
PM
4195
4196 switch (cmd) {
cdd6c482 4197 case PERF_EVENT_IOC_ENABLE:
f63a8daa 4198 func = _perf_event_enable;
d859e29f 4199 break;
cdd6c482 4200 case PERF_EVENT_IOC_DISABLE:
f63a8daa 4201 func = _perf_event_disable;
79f14641 4202 break;
cdd6c482 4203 case PERF_EVENT_IOC_RESET:
f63a8daa 4204 func = _perf_event_reset;
6de6a7b9 4205 break;
3df5edad 4206
cdd6c482 4207 case PERF_EVENT_IOC_REFRESH:
f63a8daa 4208 return _perf_event_refresh(event, arg);
08247e31 4209
cdd6c482
IM
4210 case PERF_EVENT_IOC_PERIOD:
4211 return perf_event_period(event, (u64 __user *)arg);
08247e31 4212
cf4957f1
JO
4213 case PERF_EVENT_IOC_ID:
4214 {
4215 u64 id = primary_event_id(event);
4216
4217 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4218 return -EFAULT;
4219 return 0;
4220 }
4221
cdd6c482 4222 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 4223 {
ac9721f3 4224 int ret;
ac9721f3 4225 if (arg != -1) {
2903ff01
AV
4226 struct perf_event *output_event;
4227 struct fd output;
4228 ret = perf_fget_light(arg, &output);
4229 if (ret)
4230 return ret;
4231 output_event = output.file->private_data;
4232 ret = perf_event_set_output(event, output_event);
4233 fdput(output);
4234 } else {
4235 ret = perf_event_set_output(event, NULL);
ac9721f3 4236 }
ac9721f3
PZ
4237 return ret;
4238 }
a4be7c27 4239
6fb2915d
LZ
4240 case PERF_EVENT_IOC_SET_FILTER:
4241 return perf_event_set_filter(event, (void __user *)arg);
4242
2541517c
AS
4243 case PERF_EVENT_IOC_SET_BPF:
4244 return perf_event_set_bpf_prog(event, arg);
4245
d859e29f 4246 default:
3df5edad 4247 return -ENOTTY;
d859e29f 4248 }
3df5edad
PZ
4249
4250 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 4251 perf_event_for_each(event, func);
3df5edad 4252 else
cdd6c482 4253 perf_event_for_each_child(event, func);
3df5edad
PZ
4254
4255 return 0;
d859e29f
PM
4256}
4257
f63a8daa
PZ
4258static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4259{
4260 struct perf_event *event = file->private_data;
4261 struct perf_event_context *ctx;
4262 long ret;
4263
4264 ctx = perf_event_ctx_lock(event);
4265 ret = _perf_ioctl(event, cmd, arg);
4266 perf_event_ctx_unlock(event, ctx);
4267
4268 return ret;
4269}
4270
b3f20785
PM
4271#ifdef CONFIG_COMPAT
4272static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4273 unsigned long arg)
4274{
4275 switch (_IOC_NR(cmd)) {
4276 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4277 case _IOC_NR(PERF_EVENT_IOC_ID):
4278 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4279 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4280 cmd &= ~IOCSIZE_MASK;
4281 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4282 }
4283 break;
4284 }
4285 return perf_ioctl(file, cmd, arg);
4286}
4287#else
4288# define perf_compat_ioctl NULL
4289#endif
4290
cdd6c482 4291int perf_event_task_enable(void)
771d7cde 4292{
f63a8daa 4293 struct perf_event_context *ctx;
cdd6c482 4294 struct perf_event *event;
771d7cde 4295
cdd6c482 4296 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
4297 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4298 ctx = perf_event_ctx_lock(event);
4299 perf_event_for_each_child(event, _perf_event_enable);
4300 perf_event_ctx_unlock(event, ctx);
4301 }
cdd6c482 4302 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
4303
4304 return 0;
4305}
4306
cdd6c482 4307int perf_event_task_disable(void)
771d7cde 4308{
f63a8daa 4309 struct perf_event_context *ctx;
cdd6c482 4310 struct perf_event *event;
771d7cde 4311
cdd6c482 4312 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
4313 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4314 ctx = perf_event_ctx_lock(event);
4315 perf_event_for_each_child(event, _perf_event_disable);
4316 perf_event_ctx_unlock(event, ctx);
4317 }
cdd6c482 4318 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
4319
4320 return 0;
4321}
4322
cdd6c482 4323static int perf_event_index(struct perf_event *event)
194002b2 4324{
a4eaf7f1
PZ
4325 if (event->hw.state & PERF_HES_STOPPED)
4326 return 0;
4327
cdd6c482 4328 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
4329 return 0;
4330
35edc2a5 4331 return event->pmu->event_idx(event);
194002b2
PZ
4332}
4333
c4794295 4334static void calc_timer_values(struct perf_event *event,
e3f3541c 4335 u64 *now,
7f310a5d
EM
4336 u64 *enabled,
4337 u64 *running)
c4794295 4338{
e3f3541c 4339 u64 ctx_time;
c4794295 4340
e3f3541c
PZ
4341 *now = perf_clock();
4342 ctx_time = event->shadow_ctx_time + *now;
c4794295
EM
4343 *enabled = ctx_time - event->tstamp_enabled;
4344 *running = ctx_time - event->tstamp_running;
4345}
4346
fa731587
PZ
4347static void perf_event_init_userpage(struct perf_event *event)
4348{
4349 struct perf_event_mmap_page *userpg;
4350 struct ring_buffer *rb;
4351
4352 rcu_read_lock();
4353 rb = rcu_dereference(event->rb);
4354 if (!rb)
4355 goto unlock;
4356
4357 userpg = rb->user_page;
4358
4359 /* Allow new userspace to detect that bit 0 is deprecated */
4360 userpg->cap_bit0_is_deprecated = 1;
4361 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
e8c6deac
AS
4362 userpg->data_offset = PAGE_SIZE;
4363 userpg->data_size = perf_data_size(rb);
fa731587
PZ
4364
4365unlock:
4366 rcu_read_unlock();
4367}
4368
c1317ec2
AL
4369void __weak arch_perf_update_userpage(
4370 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
4371{
4372}
4373
38ff667b
PZ
4374/*
4375 * Callers need to ensure there can be no nesting of this function, otherwise
4376 * the seqlock logic goes bad. We can not serialize this because the arch
4377 * code calls this from NMI context.
4378 */
cdd6c482 4379void perf_event_update_userpage(struct perf_event *event)
37d81828 4380{
cdd6c482 4381 struct perf_event_mmap_page *userpg;
76369139 4382 struct ring_buffer *rb;
e3f3541c 4383 u64 enabled, running, now;
38ff667b
PZ
4384
4385 rcu_read_lock();
5ec4c599
PZ
4386 rb = rcu_dereference(event->rb);
4387 if (!rb)
4388 goto unlock;
4389
0d641208
EM
4390 /*
4391 * compute total_time_enabled, total_time_running
4392 * based on snapshot values taken when the event
4393 * was last scheduled in.
4394 *
4395 * we cannot simply called update_context_time()
4396 * because of locking issue as we can be called in
4397 * NMI context
4398 */
e3f3541c 4399 calc_timer_values(event, &now, &enabled, &running);
38ff667b 4400
76369139 4401 userpg = rb->user_page;
7b732a75
PZ
4402 /*
4403 * Disable preemption so as to not let the corresponding user-space
4404 * spin too long if we get preempted.
4405 */
4406 preempt_disable();
37d81828 4407 ++userpg->lock;
92f22a38 4408 barrier();
cdd6c482 4409 userpg->index = perf_event_index(event);
b5e58793 4410 userpg->offset = perf_event_count(event);
365a4038 4411 if (userpg->index)
e7850595 4412 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 4413
0d641208 4414 userpg->time_enabled = enabled +
cdd6c482 4415 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 4416
0d641208 4417 userpg->time_running = running +
cdd6c482 4418 atomic64_read(&event->child_total_time_running);
7f8b4e4e 4419
c1317ec2 4420 arch_perf_update_userpage(event, userpg, now);
e3f3541c 4421
92f22a38 4422 barrier();
37d81828 4423 ++userpg->lock;
7b732a75 4424 preempt_enable();
38ff667b 4425unlock:
7b732a75 4426 rcu_read_unlock();
37d81828
PM
4427}
4428
906010b2
PZ
4429static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4430{
4431 struct perf_event *event = vma->vm_file->private_data;
76369139 4432 struct ring_buffer *rb;
906010b2
PZ
4433 int ret = VM_FAULT_SIGBUS;
4434
4435 if (vmf->flags & FAULT_FLAG_MKWRITE) {
4436 if (vmf->pgoff == 0)
4437 ret = 0;
4438 return ret;
4439 }
4440
4441 rcu_read_lock();
76369139
FW
4442 rb = rcu_dereference(event->rb);
4443 if (!rb)
906010b2
PZ
4444 goto unlock;
4445
4446 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4447 goto unlock;
4448
76369139 4449 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
4450 if (!vmf->page)
4451 goto unlock;
4452
4453 get_page(vmf->page);
4454 vmf->page->mapping = vma->vm_file->f_mapping;
4455 vmf->page->index = vmf->pgoff;
4456
4457 ret = 0;
4458unlock:
4459 rcu_read_unlock();
4460
4461 return ret;
4462}
4463
10c6db11
PZ
4464static void ring_buffer_attach(struct perf_event *event,
4465 struct ring_buffer *rb)
4466{
b69cf536 4467 struct ring_buffer *old_rb = NULL;
10c6db11
PZ
4468 unsigned long flags;
4469
b69cf536
PZ
4470 if (event->rb) {
4471 /*
4472 * Should be impossible, we set this when removing
4473 * event->rb_entry and wait/clear when adding event->rb_entry.
4474 */
4475 WARN_ON_ONCE(event->rcu_pending);
10c6db11 4476
b69cf536 4477 old_rb = event->rb;
b69cf536
PZ
4478 spin_lock_irqsave(&old_rb->event_lock, flags);
4479 list_del_rcu(&event->rb_entry);
4480 spin_unlock_irqrestore(&old_rb->event_lock, flags);
10c6db11 4481
2f993cf0
ON
4482 event->rcu_batches = get_state_synchronize_rcu();
4483 event->rcu_pending = 1;
b69cf536 4484 }
10c6db11 4485
b69cf536 4486 if (rb) {
2f993cf0
ON
4487 if (event->rcu_pending) {
4488 cond_synchronize_rcu(event->rcu_batches);
4489 event->rcu_pending = 0;
4490 }
4491
b69cf536
PZ
4492 spin_lock_irqsave(&rb->event_lock, flags);
4493 list_add_rcu(&event->rb_entry, &rb->event_list);
4494 spin_unlock_irqrestore(&rb->event_lock, flags);
4495 }
4496
4497 rcu_assign_pointer(event->rb, rb);
4498
4499 if (old_rb) {
4500 ring_buffer_put(old_rb);
4501 /*
4502 * Since we detached before setting the new rb, so that we
4503 * could attach the new rb, we could have missed a wakeup.
4504 * Provide it now.
4505 */
4506 wake_up_all(&event->waitq);
4507 }
10c6db11
PZ
4508}
4509
4510static void ring_buffer_wakeup(struct perf_event *event)
4511{
4512 struct ring_buffer *rb;
4513
4514 rcu_read_lock();
4515 rb = rcu_dereference(event->rb);
9bb5d40c
PZ
4516 if (rb) {
4517 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
4518 wake_up_all(&event->waitq);
4519 }
10c6db11
PZ
4520 rcu_read_unlock();
4521}
4522
fdc26706 4523struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 4524{
76369139 4525 struct ring_buffer *rb;
7b732a75 4526
ac9721f3 4527 rcu_read_lock();
76369139
FW
4528 rb = rcu_dereference(event->rb);
4529 if (rb) {
4530 if (!atomic_inc_not_zero(&rb->refcount))
4531 rb = NULL;
ac9721f3
PZ
4532 }
4533 rcu_read_unlock();
4534
76369139 4535 return rb;
ac9721f3
PZ
4536}
4537
fdc26706 4538void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 4539{
76369139 4540 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 4541 return;
7b732a75 4542
9bb5d40c 4543 WARN_ON_ONCE(!list_empty(&rb->event_list));
10c6db11 4544
76369139 4545 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
4546}
4547
4548static void perf_mmap_open(struct vm_area_struct *vma)
4549{
cdd6c482 4550 struct perf_event *event = vma->vm_file->private_data;
7b732a75 4551
cdd6c482 4552 atomic_inc(&event->mmap_count);
9bb5d40c 4553 atomic_inc(&event->rb->mmap_count);
1e0fb9ec 4554
45bfb2e5
PZ
4555 if (vma->vm_pgoff)
4556 atomic_inc(&event->rb->aux_mmap_count);
4557
1e0fb9ec
AL
4558 if (event->pmu->event_mapped)
4559 event->pmu->event_mapped(event);
7b732a75
PZ
4560}
4561
9bb5d40c
PZ
4562/*
4563 * A buffer can be mmap()ed multiple times; either directly through the same
4564 * event, or through other events by use of perf_event_set_output().
4565 *
4566 * In order to undo the VM accounting done by perf_mmap() we need to destroy
4567 * the buffer here, where we still have a VM context. This means we need
4568 * to detach all events redirecting to us.
4569 */
7b732a75
PZ
4570static void perf_mmap_close(struct vm_area_struct *vma)
4571{
cdd6c482 4572 struct perf_event *event = vma->vm_file->private_data;
7b732a75 4573
b69cf536 4574 struct ring_buffer *rb = ring_buffer_get(event);
9bb5d40c
PZ
4575 struct user_struct *mmap_user = rb->mmap_user;
4576 int mmap_locked = rb->mmap_locked;
4577 unsigned long size = perf_data_size(rb);
789f90fc 4578
1e0fb9ec
AL
4579 if (event->pmu->event_unmapped)
4580 event->pmu->event_unmapped(event);
4581
45bfb2e5
PZ
4582 /*
4583 * rb->aux_mmap_count will always drop before rb->mmap_count and
4584 * event->mmap_count, so it is ok to use event->mmap_mutex to
4585 * serialize with perf_mmap here.
4586 */
4587 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
4588 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
4589 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
4590 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
4591
4592 rb_free_aux(rb);
4593 mutex_unlock(&event->mmap_mutex);
4594 }
4595
9bb5d40c
PZ
4596 atomic_dec(&rb->mmap_count);
4597
4598 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
b69cf536 4599 goto out_put;
9bb5d40c 4600
b69cf536 4601 ring_buffer_attach(event, NULL);
9bb5d40c
PZ
4602 mutex_unlock(&event->mmap_mutex);
4603
4604 /* If there's still other mmap()s of this buffer, we're done. */
b69cf536
PZ
4605 if (atomic_read(&rb->mmap_count))
4606 goto out_put;
ac9721f3 4607
9bb5d40c
PZ
4608 /*
4609 * No other mmap()s, detach from all other events that might redirect
4610 * into the now unreachable buffer. Somewhat complicated by the
4611 * fact that rb::event_lock otherwise nests inside mmap_mutex.
4612 */
4613again:
4614 rcu_read_lock();
4615 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
4616 if (!atomic_long_inc_not_zero(&event->refcount)) {
4617 /*
4618 * This event is en-route to free_event() which will
4619 * detach it and remove it from the list.
4620 */
4621 continue;
4622 }
4623 rcu_read_unlock();
789f90fc 4624
9bb5d40c
PZ
4625 mutex_lock(&event->mmap_mutex);
4626 /*
4627 * Check we didn't race with perf_event_set_output() which can
4628 * swizzle the rb from under us while we were waiting to
4629 * acquire mmap_mutex.
4630 *
4631 * If we find a different rb; ignore this event, a next
4632 * iteration will no longer find it on the list. We have to
4633 * still restart the iteration to make sure we're not now
4634 * iterating the wrong list.
4635 */
b69cf536
PZ
4636 if (event->rb == rb)
4637 ring_buffer_attach(event, NULL);
4638
cdd6c482 4639 mutex_unlock(&event->mmap_mutex);
9bb5d40c 4640 put_event(event);
ac9721f3 4641
9bb5d40c
PZ
4642 /*
4643 * Restart the iteration; either we're on the wrong list or
4644 * destroyed its integrity by doing a deletion.
4645 */
4646 goto again;
7b732a75 4647 }
9bb5d40c
PZ
4648 rcu_read_unlock();
4649
4650 /*
4651 * It could be there's still a few 0-ref events on the list; they'll
4652 * get cleaned up by free_event() -- they'll also still have their
4653 * ref on the rb and will free it whenever they are done with it.
4654 *
4655 * Aside from that, this buffer is 'fully' detached and unmapped,
4656 * undo the VM accounting.
4657 */
4658
4659 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
4660 vma->vm_mm->pinned_vm -= mmap_locked;
4661 free_uid(mmap_user);
4662
b69cf536 4663out_put:
9bb5d40c 4664 ring_buffer_put(rb); /* could be last */
37d81828
PM
4665}
4666
f0f37e2f 4667static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8 4668 .open = perf_mmap_open,
45bfb2e5 4669 .close = perf_mmap_close, /* non mergable */
43a21ea8
PZ
4670 .fault = perf_mmap_fault,
4671 .page_mkwrite = perf_mmap_fault,
37d81828
PM
4672};
4673
4674static int perf_mmap(struct file *file, struct vm_area_struct *vma)
4675{
cdd6c482 4676 struct perf_event *event = file->private_data;
22a4f650 4677 unsigned long user_locked, user_lock_limit;
789f90fc 4678 struct user_struct *user = current_user();
22a4f650 4679 unsigned long locked, lock_limit;
45bfb2e5 4680 struct ring_buffer *rb = NULL;
7b732a75
PZ
4681 unsigned long vma_size;
4682 unsigned long nr_pages;
45bfb2e5 4683 long user_extra = 0, extra = 0;
d57e34fd 4684 int ret = 0, flags = 0;
37d81828 4685
c7920614
PZ
4686 /*
4687 * Don't allow mmap() of inherited per-task counters. This would
4688 * create a performance issue due to all children writing to the
76369139 4689 * same rb.
c7920614
PZ
4690 */
4691 if (event->cpu == -1 && event->attr.inherit)
4692 return -EINVAL;
4693
43a21ea8 4694 if (!(vma->vm_flags & VM_SHARED))
37d81828 4695 return -EINVAL;
7b732a75
PZ
4696
4697 vma_size = vma->vm_end - vma->vm_start;
45bfb2e5
PZ
4698
4699 if (vma->vm_pgoff == 0) {
4700 nr_pages = (vma_size / PAGE_SIZE) - 1;
4701 } else {
4702 /*
4703 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
4704 * mapped, all subsequent mappings should have the same size
4705 * and offset. Must be above the normal perf buffer.
4706 */
4707 u64 aux_offset, aux_size;
4708
4709 if (!event->rb)
4710 return -EINVAL;
4711
4712 nr_pages = vma_size / PAGE_SIZE;
4713
4714 mutex_lock(&event->mmap_mutex);
4715 ret = -EINVAL;
4716
4717 rb = event->rb;
4718 if (!rb)
4719 goto aux_unlock;
4720
4721 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
4722 aux_size = ACCESS_ONCE(rb->user_page->aux_size);
4723
4724 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
4725 goto aux_unlock;
4726
4727 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
4728 goto aux_unlock;
4729
4730 /* already mapped with a different offset */
4731 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
4732 goto aux_unlock;
4733
4734 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
4735 goto aux_unlock;
4736
4737 /* already mapped with a different size */
4738 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
4739 goto aux_unlock;
4740
4741 if (!is_power_of_2(nr_pages))
4742 goto aux_unlock;
4743
4744 if (!atomic_inc_not_zero(&rb->mmap_count))
4745 goto aux_unlock;
4746
4747 if (rb_has_aux(rb)) {
4748 atomic_inc(&rb->aux_mmap_count);
4749 ret = 0;
4750 goto unlock;
4751 }
4752
4753 atomic_set(&rb->aux_mmap_count, 1);
4754 user_extra = nr_pages;
4755
4756 goto accounting;
4757 }
7b732a75 4758
7730d865 4759 /*
76369139 4760 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
4761 * can do bitmasks instead of modulo.
4762 */
2ed11312 4763 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
4764 return -EINVAL;
4765
7b732a75 4766 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
4767 return -EINVAL;
4768
cdd6c482 4769 WARN_ON_ONCE(event->ctx->parent_ctx);
9bb5d40c 4770again:
cdd6c482 4771 mutex_lock(&event->mmap_mutex);
76369139 4772 if (event->rb) {
9bb5d40c 4773 if (event->rb->nr_pages != nr_pages) {
ebb3c4c4 4774 ret = -EINVAL;
9bb5d40c
PZ
4775 goto unlock;
4776 }
4777
4778 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
4779 /*
4780 * Raced against perf_mmap_close() through
4781 * perf_event_set_output(). Try again, hope for better
4782 * luck.
4783 */
4784 mutex_unlock(&event->mmap_mutex);
4785 goto again;
4786 }
4787
ebb3c4c4
PZ
4788 goto unlock;
4789 }
4790
789f90fc 4791 user_extra = nr_pages + 1;
45bfb2e5
PZ
4792
4793accounting:
cdd6c482 4794 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
4795
4796 /*
4797 * Increase the limit linearly with more CPUs:
4798 */
4799 user_lock_limit *= num_online_cpus();
4800
789f90fc 4801 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 4802
789f90fc
PZ
4803 if (user_locked > user_lock_limit)
4804 extra = user_locked - user_lock_limit;
7b732a75 4805
78d7d407 4806 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 4807 lock_limit >>= PAGE_SHIFT;
bc3e53f6 4808 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 4809
459ec28a
IM
4810 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
4811 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
4812 ret = -EPERM;
4813 goto unlock;
4814 }
7b732a75 4815
45bfb2e5 4816 WARN_ON(!rb && event->rb);
906010b2 4817
d57e34fd 4818 if (vma->vm_flags & VM_WRITE)
76369139 4819 flags |= RING_BUFFER_WRITABLE;
d57e34fd 4820
76369139 4821 if (!rb) {
45bfb2e5
PZ
4822 rb = rb_alloc(nr_pages,
4823 event->attr.watermark ? event->attr.wakeup_watermark : 0,
4824 event->cpu, flags);
26cb63ad 4825
45bfb2e5
PZ
4826 if (!rb) {
4827 ret = -ENOMEM;
4828 goto unlock;
4829 }
43a21ea8 4830
45bfb2e5
PZ
4831 atomic_set(&rb->mmap_count, 1);
4832 rb->mmap_user = get_current_user();
4833 rb->mmap_locked = extra;
26cb63ad 4834
45bfb2e5 4835 ring_buffer_attach(event, rb);
ac9721f3 4836
45bfb2e5
PZ
4837 perf_event_init_userpage(event);
4838 perf_event_update_userpage(event);
4839 } else {
1a594131
AS
4840 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
4841 event->attr.aux_watermark, flags);
45bfb2e5
PZ
4842 if (!ret)
4843 rb->aux_mmap_locked = extra;
4844 }
9a0f05cb 4845
ebb3c4c4 4846unlock:
45bfb2e5
PZ
4847 if (!ret) {
4848 atomic_long_add(user_extra, &user->locked_vm);
4849 vma->vm_mm->pinned_vm += extra;
4850
ac9721f3 4851 atomic_inc(&event->mmap_count);
45bfb2e5
PZ
4852 } else if (rb) {
4853 atomic_dec(&rb->mmap_count);
4854 }
4855aux_unlock:
cdd6c482 4856 mutex_unlock(&event->mmap_mutex);
37d81828 4857
9bb5d40c
PZ
4858 /*
4859 * Since pinned accounting is per vm we cannot allow fork() to copy our
4860 * vma.
4861 */
26cb63ad 4862 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
37d81828 4863 vma->vm_ops = &perf_mmap_vmops;
7b732a75 4864
1e0fb9ec
AL
4865 if (event->pmu->event_mapped)
4866 event->pmu->event_mapped(event);
4867
7b732a75 4868 return ret;
37d81828
PM
4869}
4870
3c446b3d
PZ
4871static int perf_fasync(int fd, struct file *filp, int on)
4872{
496ad9aa 4873 struct inode *inode = file_inode(filp);
cdd6c482 4874 struct perf_event *event = filp->private_data;
3c446b3d
PZ
4875 int retval;
4876
5955102c 4877 inode_lock(inode);
cdd6c482 4878 retval = fasync_helper(fd, filp, on, &event->fasync);
5955102c 4879 inode_unlock(inode);
3c446b3d
PZ
4880
4881 if (retval < 0)
4882 return retval;
4883
4884 return 0;
4885}
4886
0793a61d 4887static const struct file_operations perf_fops = {
3326c1ce 4888 .llseek = no_llseek,
0793a61d
TG
4889 .release = perf_release,
4890 .read = perf_read,
4891 .poll = perf_poll,
d859e29f 4892 .unlocked_ioctl = perf_ioctl,
b3f20785 4893 .compat_ioctl = perf_compat_ioctl,
37d81828 4894 .mmap = perf_mmap,
3c446b3d 4895 .fasync = perf_fasync,
0793a61d
TG
4896};
4897
925d519a 4898/*
cdd6c482 4899 * Perf event wakeup
925d519a
PZ
4900 *
4901 * If there's data, ensure we set the poll() state and publish everything
4902 * to user-space before waking everybody up.
4903 */
4904
fed66e2c
PZ
4905static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
4906{
4907 /* only the parent has fasync state */
4908 if (event->parent)
4909 event = event->parent;
4910 return &event->fasync;
4911}
4912
cdd6c482 4913void perf_event_wakeup(struct perf_event *event)
925d519a 4914{
10c6db11 4915 ring_buffer_wakeup(event);
4c9e2542 4916
cdd6c482 4917 if (event->pending_kill) {
fed66e2c 4918 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
cdd6c482 4919 event->pending_kill = 0;
4c9e2542 4920 }
925d519a
PZ
4921}
4922
e360adbe 4923static void perf_pending_event(struct irq_work *entry)
79f14641 4924{
cdd6c482
IM
4925 struct perf_event *event = container_of(entry,
4926 struct perf_event, pending);
d525211f
PZ
4927 int rctx;
4928
4929 rctx = perf_swevent_get_recursion_context();
4930 /*
4931 * If we 'fail' here, that's OK, it means recursion is already disabled
4932 * and we won't recurse 'further'.
4933 */
79f14641 4934
cdd6c482
IM
4935 if (event->pending_disable) {
4936 event->pending_disable = 0;
fae3fde6 4937 perf_event_disable_local(event);
79f14641
PZ
4938 }
4939
cdd6c482
IM
4940 if (event->pending_wakeup) {
4941 event->pending_wakeup = 0;
4942 perf_event_wakeup(event);
79f14641 4943 }
d525211f
PZ
4944
4945 if (rctx >= 0)
4946 perf_swevent_put_recursion_context(rctx);
79f14641
PZ
4947}
4948
39447b38
ZY
4949/*
4950 * We assume there is only KVM supporting the callbacks.
4951 * Later on, we might change it to a list if there is
4952 * another virtualization implementation supporting the callbacks.
4953 */
4954struct perf_guest_info_callbacks *perf_guest_cbs;
4955
4956int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4957{
4958 perf_guest_cbs = cbs;
4959 return 0;
4960}
4961EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
4962
4963int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4964{
4965 perf_guest_cbs = NULL;
4966 return 0;
4967}
4968EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
4969
4018994f
JO
4970static void
4971perf_output_sample_regs(struct perf_output_handle *handle,
4972 struct pt_regs *regs, u64 mask)
4973{
4974 int bit;
4975
4976 for_each_set_bit(bit, (const unsigned long *) &mask,
4977 sizeof(mask) * BITS_PER_BYTE) {
4978 u64 val;
4979
4980 val = perf_reg_value(regs, bit);
4981 perf_output_put(handle, val);
4982 }
4983}
4984
60e2364e 4985static void perf_sample_regs_user(struct perf_regs *regs_user,
88a7c26a
AL
4986 struct pt_regs *regs,
4987 struct pt_regs *regs_user_copy)
4018994f 4988{
88a7c26a
AL
4989 if (user_mode(regs)) {
4990 regs_user->abi = perf_reg_abi(current);
2565711f 4991 regs_user->regs = regs;
88a7c26a
AL
4992 } else if (current->mm) {
4993 perf_get_regs_user(regs_user, regs, regs_user_copy);
2565711f
PZ
4994 } else {
4995 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
4996 regs_user->regs = NULL;
4018994f
JO
4997 }
4998}
4999
60e2364e
SE
5000static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5001 struct pt_regs *regs)
5002{
5003 regs_intr->regs = regs;
5004 regs_intr->abi = perf_reg_abi(current);
5005}
5006
5007
c5ebcedb
JO
5008/*
5009 * Get remaining task size from user stack pointer.
5010 *
5011 * It'd be better to take stack vma map and limit this more
5012 * precisly, but there's no way to get it safely under interrupt,
5013 * so using TASK_SIZE as limit.
5014 */
5015static u64 perf_ustack_task_size(struct pt_regs *regs)
5016{
5017 unsigned long addr = perf_user_stack_pointer(regs);
5018
5019 if (!addr || addr >= TASK_SIZE)
5020 return 0;
5021
5022 return TASK_SIZE - addr;
5023}
5024
5025static u16
5026perf_sample_ustack_size(u16 stack_size, u16 header_size,
5027 struct pt_regs *regs)
5028{
5029 u64 task_size;
5030
5031 /* No regs, no stack pointer, no dump. */
5032 if (!regs)
5033 return 0;
5034
5035 /*
5036 * Check if we fit in with the requested stack size into the:
5037 * - TASK_SIZE
5038 * If we don't, we limit the size to the TASK_SIZE.
5039 *
5040 * - remaining sample size
5041 * If we don't, we customize the stack size to
5042 * fit in to the remaining sample size.
5043 */
5044
5045 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5046 stack_size = min(stack_size, (u16) task_size);
5047
5048 /* Current header size plus static size and dynamic size. */
5049 header_size += 2 * sizeof(u64);
5050
5051 /* Do we fit in with the current stack dump size? */
5052 if ((u16) (header_size + stack_size) < header_size) {
5053 /*
5054 * If we overflow the maximum size for the sample,
5055 * we customize the stack dump size to fit in.
5056 */
5057 stack_size = USHRT_MAX - header_size - sizeof(u64);
5058 stack_size = round_up(stack_size, sizeof(u64));
5059 }
5060
5061 return stack_size;
5062}
5063
5064static void
5065perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5066 struct pt_regs *regs)
5067{
5068 /* Case of a kernel thread, nothing to dump */
5069 if (!regs) {
5070 u64 size = 0;
5071 perf_output_put(handle, size);
5072 } else {
5073 unsigned long sp;
5074 unsigned int rem;
5075 u64 dyn_size;
5076
5077 /*
5078 * We dump:
5079 * static size
5080 * - the size requested by user or the best one we can fit
5081 * in to the sample max size
5082 * data
5083 * - user stack dump data
5084 * dynamic size
5085 * - the actual dumped size
5086 */
5087
5088 /* Static size. */
5089 perf_output_put(handle, dump_size);
5090
5091 /* Data. */
5092 sp = perf_user_stack_pointer(regs);
5093 rem = __output_copy_user(handle, (void *) sp, dump_size);
5094 dyn_size = dump_size - rem;
5095
5096 perf_output_skip(handle, rem);
5097
5098 /* Dynamic size. */
5099 perf_output_put(handle, dyn_size);
5100 }
5101}
5102
c980d109
ACM
5103static void __perf_event_header__init_id(struct perf_event_header *header,
5104 struct perf_sample_data *data,
5105 struct perf_event *event)
6844c09d
ACM
5106{
5107 u64 sample_type = event->attr.sample_type;
5108
5109 data->type = sample_type;
5110 header->size += event->id_header_size;
5111
5112 if (sample_type & PERF_SAMPLE_TID) {
5113 /* namespace issues */
5114 data->tid_entry.pid = perf_event_pid(event, current);
5115 data->tid_entry.tid = perf_event_tid(event, current);
5116 }
5117
5118 if (sample_type & PERF_SAMPLE_TIME)
34f43927 5119 data->time = perf_event_clock(event);
6844c09d 5120
ff3d527c 5121 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
6844c09d
ACM
5122 data->id = primary_event_id(event);
5123
5124 if (sample_type & PERF_SAMPLE_STREAM_ID)
5125 data->stream_id = event->id;
5126
5127 if (sample_type & PERF_SAMPLE_CPU) {
5128 data->cpu_entry.cpu = raw_smp_processor_id();
5129 data->cpu_entry.reserved = 0;
5130 }
5131}
5132
76369139
FW
5133void perf_event_header__init_id(struct perf_event_header *header,
5134 struct perf_sample_data *data,
5135 struct perf_event *event)
c980d109
ACM
5136{
5137 if (event->attr.sample_id_all)
5138 __perf_event_header__init_id(header, data, event);
5139}
5140
5141static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5142 struct perf_sample_data *data)
5143{
5144 u64 sample_type = data->type;
5145
5146 if (sample_type & PERF_SAMPLE_TID)
5147 perf_output_put(handle, data->tid_entry);
5148
5149 if (sample_type & PERF_SAMPLE_TIME)
5150 perf_output_put(handle, data->time);
5151
5152 if (sample_type & PERF_SAMPLE_ID)
5153 perf_output_put(handle, data->id);
5154
5155 if (sample_type & PERF_SAMPLE_STREAM_ID)
5156 perf_output_put(handle, data->stream_id);
5157
5158 if (sample_type & PERF_SAMPLE_CPU)
5159 perf_output_put(handle, data->cpu_entry);
ff3d527c
AH
5160
5161 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5162 perf_output_put(handle, data->id);
c980d109
ACM
5163}
5164
76369139
FW
5165void perf_event__output_id_sample(struct perf_event *event,
5166 struct perf_output_handle *handle,
5167 struct perf_sample_data *sample)
c980d109
ACM
5168{
5169 if (event->attr.sample_id_all)
5170 __perf_event__output_id_sample(handle, sample);
5171}
5172
3dab77fb 5173static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
5174 struct perf_event *event,
5175 u64 enabled, u64 running)
3dab77fb 5176{
cdd6c482 5177 u64 read_format = event->attr.read_format;
3dab77fb
PZ
5178 u64 values[4];
5179 int n = 0;
5180
b5e58793 5181 values[n++] = perf_event_count(event);
3dab77fb 5182 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 5183 values[n++] = enabled +
cdd6c482 5184 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
5185 }
5186 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 5187 values[n++] = running +
cdd6c482 5188 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
5189 }
5190 if (read_format & PERF_FORMAT_ID)
cdd6c482 5191 values[n++] = primary_event_id(event);
3dab77fb 5192
76369139 5193 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
5194}
5195
5196/*
cdd6c482 5197 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb
PZ
5198 */
5199static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
5200 struct perf_event *event,
5201 u64 enabled, u64 running)
3dab77fb 5202{
cdd6c482
IM
5203 struct perf_event *leader = event->group_leader, *sub;
5204 u64 read_format = event->attr.read_format;
3dab77fb
PZ
5205 u64 values[5];
5206 int n = 0;
5207
5208 values[n++] = 1 + leader->nr_siblings;
5209
5210 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 5211 values[n++] = enabled;
3dab77fb
PZ
5212
5213 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 5214 values[n++] = running;
3dab77fb 5215
cdd6c482 5216 if (leader != event)
3dab77fb
PZ
5217 leader->pmu->read(leader);
5218
b5e58793 5219 values[n++] = perf_event_count(leader);
3dab77fb 5220 if (read_format & PERF_FORMAT_ID)
cdd6c482 5221 values[n++] = primary_event_id(leader);
3dab77fb 5222
76369139 5223 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 5224
65abc865 5225 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb
PZ
5226 n = 0;
5227
6f5ab001
JO
5228 if ((sub != event) &&
5229 (sub->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
5230 sub->pmu->read(sub);
5231
b5e58793 5232 values[n++] = perf_event_count(sub);
3dab77fb 5233 if (read_format & PERF_FORMAT_ID)
cdd6c482 5234 values[n++] = primary_event_id(sub);
3dab77fb 5235
76369139 5236 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
5237 }
5238}
5239
eed01528
SE
5240#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5241 PERF_FORMAT_TOTAL_TIME_RUNNING)
5242
3dab77fb 5243static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 5244 struct perf_event *event)
3dab77fb 5245{
e3f3541c 5246 u64 enabled = 0, running = 0, now;
eed01528
SE
5247 u64 read_format = event->attr.read_format;
5248
5249 /*
5250 * compute total_time_enabled, total_time_running
5251 * based on snapshot values taken when the event
5252 * was last scheduled in.
5253 *
5254 * we cannot simply called update_context_time()
5255 * because of locking issue as we are called in
5256 * NMI context
5257 */
c4794295 5258 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 5259 calc_timer_values(event, &now, &enabled, &running);
eed01528 5260
cdd6c482 5261 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 5262 perf_output_read_group(handle, event, enabled, running);
3dab77fb 5263 else
eed01528 5264 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
5265}
5266
5622f295
MM
5267void perf_output_sample(struct perf_output_handle *handle,
5268 struct perf_event_header *header,
5269 struct perf_sample_data *data,
cdd6c482 5270 struct perf_event *event)
5622f295
MM
5271{
5272 u64 sample_type = data->type;
5273
5274 perf_output_put(handle, *header);
5275
ff3d527c
AH
5276 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5277 perf_output_put(handle, data->id);
5278
5622f295
MM
5279 if (sample_type & PERF_SAMPLE_IP)
5280 perf_output_put(handle, data->ip);
5281
5282 if (sample_type & PERF_SAMPLE_TID)
5283 perf_output_put(handle, data->tid_entry);
5284
5285 if (sample_type & PERF_SAMPLE_TIME)
5286 perf_output_put(handle, data->time);
5287
5288 if (sample_type & PERF_SAMPLE_ADDR)
5289 perf_output_put(handle, data->addr);
5290
5291 if (sample_type & PERF_SAMPLE_ID)
5292 perf_output_put(handle, data->id);
5293
5294 if (sample_type & PERF_SAMPLE_STREAM_ID)
5295 perf_output_put(handle, data->stream_id);
5296
5297 if (sample_type & PERF_SAMPLE_CPU)
5298 perf_output_put(handle, data->cpu_entry);
5299
5300 if (sample_type & PERF_SAMPLE_PERIOD)
5301 perf_output_put(handle, data->period);
5302
5303 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 5304 perf_output_read(handle, event);
5622f295
MM
5305
5306 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5307 if (data->callchain) {
5308 int size = 1;
5309
5310 if (data->callchain)
5311 size += data->callchain->nr;
5312
5313 size *= sizeof(u64);
5314
76369139 5315 __output_copy(handle, data->callchain, size);
5622f295
MM
5316 } else {
5317 u64 nr = 0;
5318 perf_output_put(handle, nr);
5319 }
5320 }
5321
5322 if (sample_type & PERF_SAMPLE_RAW) {
5323 if (data->raw) {
fa128e6a
AS
5324 u32 raw_size = data->raw->size;
5325 u32 real_size = round_up(raw_size + sizeof(u32),
5326 sizeof(u64)) - sizeof(u32);
5327 u64 zero = 0;
5328
5329 perf_output_put(handle, real_size);
5330 __output_copy(handle, data->raw->data, raw_size);
5331 if (real_size - raw_size)
5332 __output_copy(handle, &zero, real_size - raw_size);
5622f295
MM
5333 } else {
5334 struct {
5335 u32 size;
5336 u32 data;
5337 } raw = {
5338 .size = sizeof(u32),
5339 .data = 0,
5340 };
5341 perf_output_put(handle, raw);
5342 }
5343 }
a7ac67ea 5344
bce38cd5
SE
5345 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5346 if (data->br_stack) {
5347 size_t size;
5348
5349 size = data->br_stack->nr
5350 * sizeof(struct perf_branch_entry);
5351
5352 perf_output_put(handle, data->br_stack->nr);
5353 perf_output_copy(handle, data->br_stack->entries, size);
5354 } else {
5355 /*
5356 * we always store at least the value of nr
5357 */
5358 u64 nr = 0;
5359 perf_output_put(handle, nr);
5360 }
5361 }
4018994f
JO
5362
5363 if (sample_type & PERF_SAMPLE_REGS_USER) {
5364 u64 abi = data->regs_user.abi;
5365
5366 /*
5367 * If there are no regs to dump, notice it through
5368 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5369 */
5370 perf_output_put(handle, abi);
5371
5372 if (abi) {
5373 u64 mask = event->attr.sample_regs_user;
5374 perf_output_sample_regs(handle,
5375 data->regs_user.regs,
5376 mask);
5377 }
5378 }
c5ebcedb 5379
a5cdd40c 5380 if (sample_type & PERF_SAMPLE_STACK_USER) {
c5ebcedb
JO
5381 perf_output_sample_ustack(handle,
5382 data->stack_user_size,
5383 data->regs_user.regs);
a5cdd40c 5384 }
c3feedf2
AK
5385
5386 if (sample_type & PERF_SAMPLE_WEIGHT)
5387 perf_output_put(handle, data->weight);
d6be9ad6
SE
5388
5389 if (sample_type & PERF_SAMPLE_DATA_SRC)
5390 perf_output_put(handle, data->data_src.val);
a5cdd40c 5391
fdfbbd07
AK
5392 if (sample_type & PERF_SAMPLE_TRANSACTION)
5393 perf_output_put(handle, data->txn);
5394
60e2364e
SE
5395 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5396 u64 abi = data->regs_intr.abi;
5397 /*
5398 * If there are no regs to dump, notice it through
5399 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5400 */
5401 perf_output_put(handle, abi);
5402
5403 if (abi) {
5404 u64 mask = event->attr.sample_regs_intr;
5405
5406 perf_output_sample_regs(handle,
5407 data->regs_intr.regs,
5408 mask);
5409 }
5410 }
5411
a5cdd40c
PZ
5412 if (!event->attr.watermark) {
5413 int wakeup_events = event->attr.wakeup_events;
5414
5415 if (wakeup_events) {
5416 struct ring_buffer *rb = handle->rb;
5417 int events = local_inc_return(&rb->events);
5418
5419 if (events >= wakeup_events) {
5420 local_sub(wakeup_events, &rb->events);
5421 local_inc(&rb->wakeup);
5422 }
5423 }
5424 }
5622f295
MM
5425}
5426
5427void perf_prepare_sample(struct perf_event_header *header,
5428 struct perf_sample_data *data,
cdd6c482 5429 struct perf_event *event,
5622f295 5430 struct pt_regs *regs)
7b732a75 5431{
cdd6c482 5432 u64 sample_type = event->attr.sample_type;
7b732a75 5433
cdd6c482 5434 header->type = PERF_RECORD_SAMPLE;
c320c7b7 5435 header->size = sizeof(*header) + event->header_size;
5622f295
MM
5436
5437 header->misc = 0;
5438 header->misc |= perf_misc_flags(regs);
6fab0192 5439
c980d109 5440 __perf_event_header__init_id(header, data, event);
6844c09d 5441
c320c7b7 5442 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
5443 data->ip = perf_instruction_pointer(regs);
5444
b23f3325 5445 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 5446 int size = 1;
394ee076 5447
e6dab5ff 5448 data->callchain = perf_callchain(event, regs);
5622f295
MM
5449
5450 if (data->callchain)
5451 size += data->callchain->nr;
5452
5453 header->size += size * sizeof(u64);
394ee076
PZ
5454 }
5455
3a43ce68 5456 if (sample_type & PERF_SAMPLE_RAW) {
a044560c
PZ
5457 int size = sizeof(u32);
5458
5459 if (data->raw)
5460 size += data->raw->size;
5461 else
5462 size += sizeof(u32);
5463
fa128e6a 5464 header->size += round_up(size, sizeof(u64));
7f453c24 5465 }
bce38cd5
SE
5466
5467 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5468 int size = sizeof(u64); /* nr */
5469 if (data->br_stack) {
5470 size += data->br_stack->nr
5471 * sizeof(struct perf_branch_entry);
5472 }
5473 header->size += size;
5474 }
4018994f 5475
2565711f 5476 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
88a7c26a
AL
5477 perf_sample_regs_user(&data->regs_user, regs,
5478 &data->regs_user_copy);
2565711f 5479
4018994f
JO
5480 if (sample_type & PERF_SAMPLE_REGS_USER) {
5481 /* regs dump ABI info */
5482 int size = sizeof(u64);
5483
4018994f
JO
5484 if (data->regs_user.regs) {
5485 u64 mask = event->attr.sample_regs_user;
5486 size += hweight64(mask) * sizeof(u64);
5487 }
5488
5489 header->size += size;
5490 }
c5ebcedb
JO
5491
5492 if (sample_type & PERF_SAMPLE_STACK_USER) {
5493 /*
5494 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
5495 * processed as the last one or have additional check added
5496 * in case new sample type is added, because we could eat
5497 * up the rest of the sample size.
5498 */
c5ebcedb
JO
5499 u16 stack_size = event->attr.sample_stack_user;
5500 u16 size = sizeof(u64);
5501
c5ebcedb 5502 stack_size = perf_sample_ustack_size(stack_size, header->size,
2565711f 5503 data->regs_user.regs);
c5ebcedb
JO
5504
5505 /*
5506 * If there is something to dump, add space for the dump
5507 * itself and for the field that tells the dynamic size,
5508 * which is how many have been actually dumped.
5509 */
5510 if (stack_size)
5511 size += sizeof(u64) + stack_size;
5512
5513 data->stack_user_size = stack_size;
5514 header->size += size;
5515 }
60e2364e
SE
5516
5517 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5518 /* regs dump ABI info */
5519 int size = sizeof(u64);
5520
5521 perf_sample_regs_intr(&data->regs_intr, regs);
5522
5523 if (data->regs_intr.regs) {
5524 u64 mask = event->attr.sample_regs_intr;
5525
5526 size += hweight64(mask) * sizeof(u64);
5527 }
5528
5529 header->size += size;
5530 }
5622f295 5531}
7f453c24 5532
21509084
YZ
5533void perf_event_output(struct perf_event *event,
5534 struct perf_sample_data *data,
5535 struct pt_regs *regs)
5622f295
MM
5536{
5537 struct perf_output_handle handle;
5538 struct perf_event_header header;
689802b2 5539
927c7a9e
FW
5540 /* protect the callchain buffers */
5541 rcu_read_lock();
5542
cdd6c482 5543 perf_prepare_sample(&header, data, event, regs);
5c148194 5544
a7ac67ea 5545 if (perf_output_begin(&handle, event, header.size))
927c7a9e 5546 goto exit;
0322cd6e 5547
cdd6c482 5548 perf_output_sample(&handle, &header, data, event);
f413cdb8 5549
8a057d84 5550 perf_output_end(&handle);
927c7a9e
FW
5551
5552exit:
5553 rcu_read_unlock();
0322cd6e
PZ
5554}
5555
38b200d6 5556/*
cdd6c482 5557 * read event_id
38b200d6
PZ
5558 */
5559
5560struct perf_read_event {
5561 struct perf_event_header header;
5562
5563 u32 pid;
5564 u32 tid;
38b200d6
PZ
5565};
5566
5567static void
cdd6c482 5568perf_event_read_event(struct perf_event *event,
38b200d6
PZ
5569 struct task_struct *task)
5570{
5571 struct perf_output_handle handle;
c980d109 5572 struct perf_sample_data sample;
dfc65094 5573 struct perf_read_event read_event = {
38b200d6 5574 .header = {
cdd6c482 5575 .type = PERF_RECORD_READ,
38b200d6 5576 .misc = 0,
c320c7b7 5577 .size = sizeof(read_event) + event->read_size,
38b200d6 5578 },
cdd6c482
IM
5579 .pid = perf_event_pid(event, task),
5580 .tid = perf_event_tid(event, task),
38b200d6 5581 };
3dab77fb 5582 int ret;
38b200d6 5583
c980d109 5584 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 5585 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
5586 if (ret)
5587 return;
5588
dfc65094 5589 perf_output_put(&handle, read_event);
cdd6c482 5590 perf_output_read(&handle, event);
c980d109 5591 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 5592
38b200d6
PZ
5593 perf_output_end(&handle);
5594}
5595
52d857a8
JO
5596typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
5597
5598static void
5599perf_event_aux_ctx(struct perf_event_context *ctx,
52d857a8
JO
5600 perf_event_aux_output_cb output,
5601 void *data)
5602{
5603 struct perf_event *event;
5604
5605 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5606 if (event->state < PERF_EVENT_STATE_INACTIVE)
5607 continue;
5608 if (!event_filter_match(event))
5609 continue;
67516844 5610 output(event, data);
52d857a8
JO
5611 }
5612}
5613
4e93ad60
JO
5614static void
5615perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
5616 struct perf_event_context *task_ctx)
5617{
5618 rcu_read_lock();
5619 preempt_disable();
5620 perf_event_aux_ctx(task_ctx, output, data);
5621 preempt_enable();
5622 rcu_read_unlock();
5623}
5624
52d857a8 5625static void
67516844 5626perf_event_aux(perf_event_aux_output_cb output, void *data,
52d857a8
JO
5627 struct perf_event_context *task_ctx)
5628{
5629 struct perf_cpu_context *cpuctx;
5630 struct perf_event_context *ctx;
5631 struct pmu *pmu;
5632 int ctxn;
5633
4e93ad60
JO
5634 /*
5635 * If we have task_ctx != NULL we only notify
5636 * the task context itself. The task_ctx is set
5637 * only for EXIT events before releasing task
5638 * context.
5639 */
5640 if (task_ctx) {
5641 perf_event_aux_task_ctx(output, data, task_ctx);
5642 return;
5643 }
5644
52d857a8
JO
5645 rcu_read_lock();
5646 list_for_each_entry_rcu(pmu, &pmus, entry) {
5647 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
5648 if (cpuctx->unique_pmu != pmu)
5649 goto next;
67516844 5650 perf_event_aux_ctx(&cpuctx->ctx, output, data);
52d857a8
JO
5651 ctxn = pmu->task_ctx_nr;
5652 if (ctxn < 0)
5653 goto next;
5654 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
5655 if (ctx)
67516844 5656 perf_event_aux_ctx(ctx, output, data);
52d857a8
JO
5657next:
5658 put_cpu_ptr(pmu->pmu_cpu_context);
5659 }
52d857a8
JO
5660 rcu_read_unlock();
5661}
5662
60313ebe 5663/*
9f498cc5
PZ
5664 * task tracking -- fork/exit
5665 *
13d7a241 5666 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
60313ebe
PZ
5667 */
5668
9f498cc5 5669struct perf_task_event {
3a80b4a3 5670 struct task_struct *task;
cdd6c482 5671 struct perf_event_context *task_ctx;
60313ebe
PZ
5672
5673 struct {
5674 struct perf_event_header header;
5675
5676 u32 pid;
5677 u32 ppid;
9f498cc5
PZ
5678 u32 tid;
5679 u32 ptid;
393b2ad8 5680 u64 time;
cdd6c482 5681 } event_id;
60313ebe
PZ
5682};
5683
67516844
JO
5684static int perf_event_task_match(struct perf_event *event)
5685{
13d7a241
SE
5686 return event->attr.comm || event->attr.mmap ||
5687 event->attr.mmap2 || event->attr.mmap_data ||
5688 event->attr.task;
67516844
JO
5689}
5690
cdd6c482 5691static void perf_event_task_output(struct perf_event *event,
52d857a8 5692 void *data)
60313ebe 5693{
52d857a8 5694 struct perf_task_event *task_event = data;
60313ebe 5695 struct perf_output_handle handle;
c980d109 5696 struct perf_sample_data sample;
9f498cc5 5697 struct task_struct *task = task_event->task;
c980d109 5698 int ret, size = task_event->event_id.header.size;
8bb39f9a 5699
67516844
JO
5700 if (!perf_event_task_match(event))
5701 return;
5702
c980d109 5703 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 5704
c980d109 5705 ret = perf_output_begin(&handle, event,
a7ac67ea 5706 task_event->event_id.header.size);
ef60777c 5707 if (ret)
c980d109 5708 goto out;
60313ebe 5709
cdd6c482
IM
5710 task_event->event_id.pid = perf_event_pid(event, task);
5711 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 5712
cdd6c482
IM
5713 task_event->event_id.tid = perf_event_tid(event, task);
5714 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 5715
34f43927
PZ
5716 task_event->event_id.time = perf_event_clock(event);
5717
cdd6c482 5718 perf_output_put(&handle, task_event->event_id);
393b2ad8 5719
c980d109
ACM
5720 perf_event__output_id_sample(event, &handle, &sample);
5721
60313ebe 5722 perf_output_end(&handle);
c980d109
ACM
5723out:
5724 task_event->event_id.header.size = size;
60313ebe
PZ
5725}
5726
cdd6c482
IM
5727static void perf_event_task(struct task_struct *task,
5728 struct perf_event_context *task_ctx,
3a80b4a3 5729 int new)
60313ebe 5730{
9f498cc5 5731 struct perf_task_event task_event;
60313ebe 5732
cdd6c482
IM
5733 if (!atomic_read(&nr_comm_events) &&
5734 !atomic_read(&nr_mmap_events) &&
5735 !atomic_read(&nr_task_events))
60313ebe
PZ
5736 return;
5737
9f498cc5 5738 task_event = (struct perf_task_event){
3a80b4a3
PZ
5739 .task = task,
5740 .task_ctx = task_ctx,
cdd6c482 5741 .event_id = {
60313ebe 5742 .header = {
cdd6c482 5743 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 5744 .misc = 0,
cdd6c482 5745 .size = sizeof(task_event.event_id),
60313ebe 5746 },
573402db
PZ
5747 /* .pid */
5748 /* .ppid */
9f498cc5
PZ
5749 /* .tid */
5750 /* .ptid */
34f43927 5751 /* .time */
60313ebe
PZ
5752 },
5753 };
5754
67516844 5755 perf_event_aux(perf_event_task_output,
52d857a8
JO
5756 &task_event,
5757 task_ctx);
9f498cc5
PZ
5758}
5759
cdd6c482 5760void perf_event_fork(struct task_struct *task)
9f498cc5 5761{
cdd6c482 5762 perf_event_task(task, NULL, 1);
60313ebe
PZ
5763}
5764
8d1b2d93
PZ
5765/*
5766 * comm tracking
5767 */
5768
5769struct perf_comm_event {
22a4f650
IM
5770 struct task_struct *task;
5771 char *comm;
8d1b2d93
PZ
5772 int comm_size;
5773
5774 struct {
5775 struct perf_event_header header;
5776
5777 u32 pid;
5778 u32 tid;
cdd6c482 5779 } event_id;
8d1b2d93
PZ
5780};
5781
67516844
JO
5782static int perf_event_comm_match(struct perf_event *event)
5783{
5784 return event->attr.comm;
5785}
5786
cdd6c482 5787static void perf_event_comm_output(struct perf_event *event,
52d857a8 5788 void *data)
8d1b2d93 5789{
52d857a8 5790 struct perf_comm_event *comm_event = data;
8d1b2d93 5791 struct perf_output_handle handle;
c980d109 5792 struct perf_sample_data sample;
cdd6c482 5793 int size = comm_event->event_id.header.size;
c980d109
ACM
5794 int ret;
5795
67516844
JO
5796 if (!perf_event_comm_match(event))
5797 return;
5798
c980d109
ACM
5799 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
5800 ret = perf_output_begin(&handle, event,
a7ac67ea 5801 comm_event->event_id.header.size);
8d1b2d93
PZ
5802
5803 if (ret)
c980d109 5804 goto out;
8d1b2d93 5805
cdd6c482
IM
5806 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
5807 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 5808
cdd6c482 5809 perf_output_put(&handle, comm_event->event_id);
76369139 5810 __output_copy(&handle, comm_event->comm,
8d1b2d93 5811 comm_event->comm_size);
c980d109
ACM
5812
5813 perf_event__output_id_sample(event, &handle, &sample);
5814
8d1b2d93 5815 perf_output_end(&handle);
c980d109
ACM
5816out:
5817 comm_event->event_id.header.size = size;
8d1b2d93
PZ
5818}
5819
cdd6c482 5820static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93 5821{
413ee3b4 5822 char comm[TASK_COMM_LEN];
8d1b2d93 5823 unsigned int size;
8d1b2d93 5824
413ee3b4 5825 memset(comm, 0, sizeof(comm));
96b02d78 5826 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 5827 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
5828
5829 comm_event->comm = comm;
5830 comm_event->comm_size = size;
5831
cdd6c482 5832 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8dc85d54 5833
67516844 5834 perf_event_aux(perf_event_comm_output,
52d857a8
JO
5835 comm_event,
5836 NULL);
8d1b2d93
PZ
5837}
5838
82b89778 5839void perf_event_comm(struct task_struct *task, bool exec)
8d1b2d93 5840{
9ee318a7
PZ
5841 struct perf_comm_event comm_event;
5842
cdd6c482 5843 if (!atomic_read(&nr_comm_events))
9ee318a7 5844 return;
a63eaf34 5845
9ee318a7 5846 comm_event = (struct perf_comm_event){
8d1b2d93 5847 .task = task,
573402db
PZ
5848 /* .comm */
5849 /* .comm_size */
cdd6c482 5850 .event_id = {
573402db 5851 .header = {
cdd6c482 5852 .type = PERF_RECORD_COMM,
82b89778 5853 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
573402db
PZ
5854 /* .size */
5855 },
5856 /* .pid */
5857 /* .tid */
8d1b2d93
PZ
5858 },
5859 };
5860
cdd6c482 5861 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
5862}
5863
0a4a9391
PZ
5864/*
5865 * mmap tracking
5866 */
5867
5868struct perf_mmap_event {
089dd79d
PZ
5869 struct vm_area_struct *vma;
5870
5871 const char *file_name;
5872 int file_size;
13d7a241
SE
5873 int maj, min;
5874 u64 ino;
5875 u64 ino_generation;
f972eb63 5876 u32 prot, flags;
0a4a9391
PZ
5877
5878 struct {
5879 struct perf_event_header header;
5880
5881 u32 pid;
5882 u32 tid;
5883 u64 start;
5884 u64 len;
5885 u64 pgoff;
cdd6c482 5886 } event_id;
0a4a9391
PZ
5887};
5888
67516844
JO
5889static int perf_event_mmap_match(struct perf_event *event,
5890 void *data)
5891{
5892 struct perf_mmap_event *mmap_event = data;
5893 struct vm_area_struct *vma = mmap_event->vma;
5894 int executable = vma->vm_flags & VM_EXEC;
5895
5896 return (!executable && event->attr.mmap_data) ||
13d7a241 5897 (executable && (event->attr.mmap || event->attr.mmap2));
67516844
JO
5898}
5899
cdd6c482 5900static void perf_event_mmap_output(struct perf_event *event,
52d857a8 5901 void *data)
0a4a9391 5902{
52d857a8 5903 struct perf_mmap_event *mmap_event = data;
0a4a9391 5904 struct perf_output_handle handle;
c980d109 5905 struct perf_sample_data sample;
cdd6c482 5906 int size = mmap_event->event_id.header.size;
c980d109 5907 int ret;
0a4a9391 5908
67516844
JO
5909 if (!perf_event_mmap_match(event, data))
5910 return;
5911
13d7a241
SE
5912 if (event->attr.mmap2) {
5913 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
5914 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
5915 mmap_event->event_id.header.size += sizeof(mmap_event->min);
5916 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
d008d525 5917 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
f972eb63
PZ
5918 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
5919 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
13d7a241
SE
5920 }
5921
c980d109
ACM
5922 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
5923 ret = perf_output_begin(&handle, event,
a7ac67ea 5924 mmap_event->event_id.header.size);
0a4a9391 5925 if (ret)
c980d109 5926 goto out;
0a4a9391 5927
cdd6c482
IM
5928 mmap_event->event_id.pid = perf_event_pid(event, current);
5929 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 5930
cdd6c482 5931 perf_output_put(&handle, mmap_event->event_id);
13d7a241
SE
5932
5933 if (event->attr.mmap2) {
5934 perf_output_put(&handle, mmap_event->maj);
5935 perf_output_put(&handle, mmap_event->min);
5936 perf_output_put(&handle, mmap_event->ino);
5937 perf_output_put(&handle, mmap_event->ino_generation);
f972eb63
PZ
5938 perf_output_put(&handle, mmap_event->prot);
5939 perf_output_put(&handle, mmap_event->flags);
13d7a241
SE
5940 }
5941
76369139 5942 __output_copy(&handle, mmap_event->file_name,
0a4a9391 5943 mmap_event->file_size);
c980d109
ACM
5944
5945 perf_event__output_id_sample(event, &handle, &sample);
5946
78d613eb 5947 perf_output_end(&handle);
c980d109
ACM
5948out:
5949 mmap_event->event_id.header.size = size;
0a4a9391
PZ
5950}
5951
cdd6c482 5952static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391 5953{
089dd79d
PZ
5954 struct vm_area_struct *vma = mmap_event->vma;
5955 struct file *file = vma->vm_file;
13d7a241
SE
5956 int maj = 0, min = 0;
5957 u64 ino = 0, gen = 0;
f972eb63 5958 u32 prot = 0, flags = 0;
0a4a9391
PZ
5959 unsigned int size;
5960 char tmp[16];
5961 char *buf = NULL;
2c42cfbf 5962 char *name;
413ee3b4 5963
0a4a9391 5964 if (file) {
13d7a241
SE
5965 struct inode *inode;
5966 dev_t dev;
3ea2f2b9 5967
2c42cfbf 5968 buf = kmalloc(PATH_MAX, GFP_KERNEL);
0a4a9391 5969 if (!buf) {
c7e548b4
ON
5970 name = "//enomem";
5971 goto cpy_name;
0a4a9391 5972 }
413ee3b4 5973 /*
3ea2f2b9 5974 * d_path() works from the end of the rb backwards, so we
413ee3b4
AB
5975 * need to add enough zero bytes after the string to handle
5976 * the 64bit alignment we do later.
5977 */
9bf39ab2 5978 name = file_path(file, buf, PATH_MAX - sizeof(u64));
0a4a9391 5979 if (IS_ERR(name)) {
c7e548b4
ON
5980 name = "//toolong";
5981 goto cpy_name;
0a4a9391 5982 }
13d7a241
SE
5983 inode = file_inode(vma->vm_file);
5984 dev = inode->i_sb->s_dev;
5985 ino = inode->i_ino;
5986 gen = inode->i_generation;
5987 maj = MAJOR(dev);
5988 min = MINOR(dev);
f972eb63
PZ
5989
5990 if (vma->vm_flags & VM_READ)
5991 prot |= PROT_READ;
5992 if (vma->vm_flags & VM_WRITE)
5993 prot |= PROT_WRITE;
5994 if (vma->vm_flags & VM_EXEC)
5995 prot |= PROT_EXEC;
5996
5997 if (vma->vm_flags & VM_MAYSHARE)
5998 flags = MAP_SHARED;
5999 else
6000 flags = MAP_PRIVATE;
6001
6002 if (vma->vm_flags & VM_DENYWRITE)
6003 flags |= MAP_DENYWRITE;
6004 if (vma->vm_flags & VM_MAYEXEC)
6005 flags |= MAP_EXECUTABLE;
6006 if (vma->vm_flags & VM_LOCKED)
6007 flags |= MAP_LOCKED;
6008 if (vma->vm_flags & VM_HUGETLB)
6009 flags |= MAP_HUGETLB;
6010
c7e548b4 6011 goto got_name;
0a4a9391 6012 } else {
fbe26abe
JO
6013 if (vma->vm_ops && vma->vm_ops->name) {
6014 name = (char *) vma->vm_ops->name(vma);
6015 if (name)
6016 goto cpy_name;
6017 }
6018
2c42cfbf 6019 name = (char *)arch_vma_name(vma);
c7e548b4
ON
6020 if (name)
6021 goto cpy_name;
089dd79d 6022
32c5fb7e 6023 if (vma->vm_start <= vma->vm_mm->start_brk &&
3af9e859 6024 vma->vm_end >= vma->vm_mm->brk) {
c7e548b4
ON
6025 name = "[heap]";
6026 goto cpy_name;
32c5fb7e
ON
6027 }
6028 if (vma->vm_start <= vma->vm_mm->start_stack &&
3af9e859 6029 vma->vm_end >= vma->vm_mm->start_stack) {
c7e548b4
ON
6030 name = "[stack]";
6031 goto cpy_name;
089dd79d
PZ
6032 }
6033
c7e548b4
ON
6034 name = "//anon";
6035 goto cpy_name;
0a4a9391
PZ
6036 }
6037
c7e548b4
ON
6038cpy_name:
6039 strlcpy(tmp, name, sizeof(tmp));
6040 name = tmp;
0a4a9391 6041got_name:
2c42cfbf
PZ
6042 /*
6043 * Since our buffer works in 8 byte units we need to align our string
6044 * size to a multiple of 8. However, we must guarantee the tail end is
6045 * zero'd out to avoid leaking random bits to userspace.
6046 */
6047 size = strlen(name)+1;
6048 while (!IS_ALIGNED(size, sizeof(u64)))
6049 name[size++] = '\0';
0a4a9391
PZ
6050
6051 mmap_event->file_name = name;
6052 mmap_event->file_size = size;
13d7a241
SE
6053 mmap_event->maj = maj;
6054 mmap_event->min = min;
6055 mmap_event->ino = ino;
6056 mmap_event->ino_generation = gen;
f972eb63
PZ
6057 mmap_event->prot = prot;
6058 mmap_event->flags = flags;
0a4a9391 6059
2fe85427
SE
6060 if (!(vma->vm_flags & VM_EXEC))
6061 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
6062
cdd6c482 6063 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 6064
67516844 6065 perf_event_aux(perf_event_mmap_output,
52d857a8
JO
6066 mmap_event,
6067 NULL);
665c2142 6068
0a4a9391
PZ
6069 kfree(buf);
6070}
6071
3af9e859 6072void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 6073{
9ee318a7
PZ
6074 struct perf_mmap_event mmap_event;
6075
cdd6c482 6076 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
6077 return;
6078
6079 mmap_event = (struct perf_mmap_event){
089dd79d 6080 .vma = vma,
573402db
PZ
6081 /* .file_name */
6082 /* .file_size */
cdd6c482 6083 .event_id = {
573402db 6084 .header = {
cdd6c482 6085 .type = PERF_RECORD_MMAP,
39447b38 6086 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
6087 /* .size */
6088 },
6089 /* .pid */
6090 /* .tid */
089dd79d
PZ
6091 .start = vma->vm_start,
6092 .len = vma->vm_end - vma->vm_start,
3a0304e9 6093 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391 6094 },
13d7a241
SE
6095 /* .maj (attr_mmap2 only) */
6096 /* .min (attr_mmap2 only) */
6097 /* .ino (attr_mmap2 only) */
6098 /* .ino_generation (attr_mmap2 only) */
f972eb63
PZ
6099 /* .prot (attr_mmap2 only) */
6100 /* .flags (attr_mmap2 only) */
0a4a9391
PZ
6101 };
6102
cdd6c482 6103 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
6104}
6105
68db7e98
AS
6106void perf_event_aux_event(struct perf_event *event, unsigned long head,
6107 unsigned long size, u64 flags)
6108{
6109 struct perf_output_handle handle;
6110 struct perf_sample_data sample;
6111 struct perf_aux_event {
6112 struct perf_event_header header;
6113 u64 offset;
6114 u64 size;
6115 u64 flags;
6116 } rec = {
6117 .header = {
6118 .type = PERF_RECORD_AUX,
6119 .misc = 0,
6120 .size = sizeof(rec),
6121 },
6122 .offset = head,
6123 .size = size,
6124 .flags = flags,
6125 };
6126 int ret;
6127
6128 perf_event_header__init_id(&rec.header, &sample, event);
6129 ret = perf_output_begin(&handle, event, rec.header.size);
6130
6131 if (ret)
6132 return;
6133
6134 perf_output_put(&handle, rec);
6135 perf_event__output_id_sample(event, &handle, &sample);
6136
6137 perf_output_end(&handle);
6138}
6139
f38b0dbb
KL
6140/*
6141 * Lost/dropped samples logging
6142 */
6143void perf_log_lost_samples(struct perf_event *event, u64 lost)
6144{
6145 struct perf_output_handle handle;
6146 struct perf_sample_data sample;
6147 int ret;
6148
6149 struct {
6150 struct perf_event_header header;
6151 u64 lost;
6152 } lost_samples_event = {
6153 .header = {
6154 .type = PERF_RECORD_LOST_SAMPLES,
6155 .misc = 0,
6156 .size = sizeof(lost_samples_event),
6157 },
6158 .lost = lost,
6159 };
6160
6161 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
6162
6163 ret = perf_output_begin(&handle, event,
6164 lost_samples_event.header.size);
6165 if (ret)
6166 return;
6167
6168 perf_output_put(&handle, lost_samples_event);
6169 perf_event__output_id_sample(event, &handle, &sample);
6170 perf_output_end(&handle);
6171}
6172
45ac1403
AH
6173/*
6174 * context_switch tracking
6175 */
6176
6177struct perf_switch_event {
6178 struct task_struct *task;
6179 struct task_struct *next_prev;
6180
6181 struct {
6182 struct perf_event_header header;
6183 u32 next_prev_pid;
6184 u32 next_prev_tid;
6185 } event_id;
6186};
6187
6188static int perf_event_switch_match(struct perf_event *event)
6189{
6190 return event->attr.context_switch;
6191}
6192
6193static void perf_event_switch_output(struct perf_event *event, void *data)
6194{
6195 struct perf_switch_event *se = data;
6196 struct perf_output_handle handle;
6197 struct perf_sample_data sample;
6198 int ret;
6199
6200 if (!perf_event_switch_match(event))
6201 return;
6202
6203 /* Only CPU-wide events are allowed to see next/prev pid/tid */
6204 if (event->ctx->task) {
6205 se->event_id.header.type = PERF_RECORD_SWITCH;
6206 se->event_id.header.size = sizeof(se->event_id.header);
6207 } else {
6208 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
6209 se->event_id.header.size = sizeof(se->event_id);
6210 se->event_id.next_prev_pid =
6211 perf_event_pid(event, se->next_prev);
6212 se->event_id.next_prev_tid =
6213 perf_event_tid(event, se->next_prev);
6214 }
6215
6216 perf_event_header__init_id(&se->event_id.header, &sample, event);
6217
6218 ret = perf_output_begin(&handle, event, se->event_id.header.size);
6219 if (ret)
6220 return;
6221
6222 if (event->ctx->task)
6223 perf_output_put(&handle, se->event_id.header);
6224 else
6225 perf_output_put(&handle, se->event_id);
6226
6227 perf_event__output_id_sample(event, &handle, &sample);
6228
6229 perf_output_end(&handle);
6230}
6231
6232static void perf_event_switch(struct task_struct *task,
6233 struct task_struct *next_prev, bool sched_in)
6234{
6235 struct perf_switch_event switch_event;
6236
6237 /* N.B. caller checks nr_switch_events != 0 */
6238
6239 switch_event = (struct perf_switch_event){
6240 .task = task,
6241 .next_prev = next_prev,
6242 .event_id = {
6243 .header = {
6244 /* .type */
6245 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
6246 /* .size */
6247 },
6248 /* .next_prev_pid */
6249 /* .next_prev_tid */
6250 },
6251 };
6252
6253 perf_event_aux(perf_event_switch_output,
6254 &switch_event,
6255 NULL);
6256}
6257
a78ac325
PZ
6258/*
6259 * IRQ throttle logging
6260 */
6261
cdd6c482 6262static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
6263{
6264 struct perf_output_handle handle;
c980d109 6265 struct perf_sample_data sample;
a78ac325
PZ
6266 int ret;
6267
6268 struct {
6269 struct perf_event_header header;
6270 u64 time;
cca3f454 6271 u64 id;
7f453c24 6272 u64 stream_id;
a78ac325
PZ
6273 } throttle_event = {
6274 .header = {
cdd6c482 6275 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
6276 .misc = 0,
6277 .size = sizeof(throttle_event),
6278 },
34f43927 6279 .time = perf_event_clock(event),
cdd6c482
IM
6280 .id = primary_event_id(event),
6281 .stream_id = event->id,
a78ac325
PZ
6282 };
6283
966ee4d6 6284 if (enable)
cdd6c482 6285 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 6286
c980d109
ACM
6287 perf_event_header__init_id(&throttle_event.header, &sample, event);
6288
6289 ret = perf_output_begin(&handle, event,
a7ac67ea 6290 throttle_event.header.size);
a78ac325
PZ
6291 if (ret)
6292 return;
6293
6294 perf_output_put(&handle, throttle_event);
c980d109 6295 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
6296 perf_output_end(&handle);
6297}
6298
ec0d7729
AS
6299static void perf_log_itrace_start(struct perf_event *event)
6300{
6301 struct perf_output_handle handle;
6302 struct perf_sample_data sample;
6303 struct perf_aux_event {
6304 struct perf_event_header header;
6305 u32 pid;
6306 u32 tid;
6307 } rec;
6308 int ret;
6309
6310 if (event->parent)
6311 event = event->parent;
6312
6313 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
6314 event->hw.itrace_started)
6315 return;
6316
ec0d7729
AS
6317 rec.header.type = PERF_RECORD_ITRACE_START;
6318 rec.header.misc = 0;
6319 rec.header.size = sizeof(rec);
6320 rec.pid = perf_event_pid(event, current);
6321 rec.tid = perf_event_tid(event, current);
6322
6323 perf_event_header__init_id(&rec.header, &sample, event);
6324 ret = perf_output_begin(&handle, event, rec.header.size);
6325
6326 if (ret)
6327 return;
6328
6329 perf_output_put(&handle, rec);
6330 perf_event__output_id_sample(event, &handle, &sample);
6331
6332 perf_output_end(&handle);
6333}
6334
f6c7d5fe 6335/*
cdd6c482 6336 * Generic event overflow handling, sampling.
f6c7d5fe
PZ
6337 */
6338
a8b0ca17 6339static int __perf_event_overflow(struct perf_event *event,
5622f295
MM
6340 int throttle, struct perf_sample_data *data,
6341 struct pt_regs *regs)
f6c7d5fe 6342{
cdd6c482
IM
6343 int events = atomic_read(&event->event_limit);
6344 struct hw_perf_event *hwc = &event->hw;
e050e3f0 6345 u64 seq;
79f14641
PZ
6346 int ret = 0;
6347
96398826
PZ
6348 /*
6349 * Non-sampling counters might still use the PMI to fold short
6350 * hardware counters, ignore those.
6351 */
6352 if (unlikely(!is_sampling_event(event)))
6353 return 0;
6354
e050e3f0
SE
6355 seq = __this_cpu_read(perf_throttled_seq);
6356 if (seq != hwc->interrupts_seq) {
6357 hwc->interrupts_seq = seq;
6358 hwc->interrupts = 1;
6359 } else {
6360 hwc->interrupts++;
6361 if (unlikely(throttle
6362 && hwc->interrupts >= max_samples_per_tick)) {
6363 __this_cpu_inc(perf_throttled_count);
163ec435
PZ
6364 hwc->interrupts = MAX_INTERRUPTS;
6365 perf_log_throttle(event, 0);
d84153d6 6366 tick_nohz_full_kick();
a78ac325
PZ
6367 ret = 1;
6368 }
e050e3f0 6369 }
60db5e09 6370
cdd6c482 6371 if (event->attr.freq) {
def0a9b2 6372 u64 now = perf_clock();
abd50713 6373 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 6374
abd50713 6375 hwc->freq_time_stamp = now;
bd2b5b12 6376
abd50713 6377 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 6378 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
6379 }
6380
2023b359
PZ
6381 /*
6382 * XXX event_limit might not quite work as expected on inherited
cdd6c482 6383 * events
2023b359
PZ
6384 */
6385
cdd6c482
IM
6386 event->pending_kill = POLL_IN;
6387 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 6388 ret = 1;
cdd6c482 6389 event->pending_kill = POLL_HUP;
a8b0ca17
PZ
6390 event->pending_disable = 1;
6391 irq_work_queue(&event->pending);
79f14641
PZ
6392 }
6393
453f19ee 6394 if (event->overflow_handler)
a8b0ca17 6395 event->overflow_handler(event, data, regs);
453f19ee 6396 else
a8b0ca17 6397 perf_event_output(event, data, regs);
453f19ee 6398
fed66e2c 6399 if (*perf_event_fasync(event) && event->pending_kill) {
a8b0ca17
PZ
6400 event->pending_wakeup = 1;
6401 irq_work_queue(&event->pending);
f506b3dc
PZ
6402 }
6403
79f14641 6404 return ret;
f6c7d5fe
PZ
6405}
6406
a8b0ca17 6407int perf_event_overflow(struct perf_event *event,
5622f295
MM
6408 struct perf_sample_data *data,
6409 struct pt_regs *regs)
850bc73f 6410{
a8b0ca17 6411 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
6412}
6413
15dbf27c 6414/*
cdd6c482 6415 * Generic software event infrastructure
15dbf27c
PZ
6416 */
6417
b28ab83c
PZ
6418struct swevent_htable {
6419 struct swevent_hlist *swevent_hlist;
6420 struct mutex hlist_mutex;
6421 int hlist_refcount;
6422
6423 /* Recursion avoidance in each contexts */
6424 int recursion[PERF_NR_CONTEXTS];
6425};
6426
6427static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
6428
7b4b6658 6429/*
cdd6c482
IM
6430 * We directly increment event->count and keep a second value in
6431 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
6432 * is kept in the range [-sample_period, 0] so that we can use the
6433 * sign as trigger.
6434 */
6435
ab573844 6436u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 6437{
cdd6c482 6438 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
6439 u64 period = hwc->last_period;
6440 u64 nr, offset;
6441 s64 old, val;
6442
6443 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
6444
6445again:
e7850595 6446 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
6447 if (val < 0)
6448 return 0;
15dbf27c 6449
7b4b6658
PZ
6450 nr = div64_u64(period + val, period);
6451 offset = nr * period;
6452 val -= offset;
e7850595 6453 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 6454 goto again;
15dbf27c 6455
7b4b6658 6456 return nr;
15dbf27c
PZ
6457}
6458
0cff784a 6459static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 6460 struct perf_sample_data *data,
5622f295 6461 struct pt_regs *regs)
15dbf27c 6462{
cdd6c482 6463 struct hw_perf_event *hwc = &event->hw;
850bc73f 6464 int throttle = 0;
15dbf27c 6465
0cff784a
PZ
6466 if (!overflow)
6467 overflow = perf_swevent_set_period(event);
15dbf27c 6468
7b4b6658
PZ
6469 if (hwc->interrupts == MAX_INTERRUPTS)
6470 return;
15dbf27c 6471
7b4b6658 6472 for (; overflow; overflow--) {
a8b0ca17 6473 if (__perf_event_overflow(event, throttle,
5622f295 6474 data, regs)) {
7b4b6658
PZ
6475 /*
6476 * We inhibit the overflow from happening when
6477 * hwc->interrupts == MAX_INTERRUPTS.
6478 */
6479 break;
6480 }
cf450a73 6481 throttle = 1;
7b4b6658 6482 }
15dbf27c
PZ
6483}
6484
a4eaf7f1 6485static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 6486 struct perf_sample_data *data,
5622f295 6487 struct pt_regs *regs)
7b4b6658 6488{
cdd6c482 6489 struct hw_perf_event *hwc = &event->hw;
d6d020e9 6490
e7850595 6491 local64_add(nr, &event->count);
d6d020e9 6492
0cff784a
PZ
6493 if (!regs)
6494 return;
6495
6c7e550f 6496 if (!is_sampling_event(event))
7b4b6658 6497 return;
d6d020e9 6498
5d81e5cf
AV
6499 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
6500 data->period = nr;
6501 return perf_swevent_overflow(event, 1, data, regs);
6502 } else
6503 data->period = event->hw.last_period;
6504
0cff784a 6505 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 6506 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 6507
e7850595 6508 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 6509 return;
df1a132b 6510
a8b0ca17 6511 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
6512}
6513
f5ffe02e
FW
6514static int perf_exclude_event(struct perf_event *event,
6515 struct pt_regs *regs)
6516{
a4eaf7f1 6517 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 6518 return 1;
a4eaf7f1 6519
f5ffe02e
FW
6520 if (regs) {
6521 if (event->attr.exclude_user && user_mode(regs))
6522 return 1;
6523
6524 if (event->attr.exclude_kernel && !user_mode(regs))
6525 return 1;
6526 }
6527
6528 return 0;
6529}
6530
cdd6c482 6531static int perf_swevent_match(struct perf_event *event,
1c432d89 6532 enum perf_type_id type,
6fb2915d
LZ
6533 u32 event_id,
6534 struct perf_sample_data *data,
6535 struct pt_regs *regs)
15dbf27c 6536{
cdd6c482 6537 if (event->attr.type != type)
a21ca2ca 6538 return 0;
f5ffe02e 6539
cdd6c482 6540 if (event->attr.config != event_id)
15dbf27c
PZ
6541 return 0;
6542
f5ffe02e
FW
6543 if (perf_exclude_event(event, regs))
6544 return 0;
15dbf27c
PZ
6545
6546 return 1;
6547}
6548
76e1d904
FW
6549static inline u64 swevent_hash(u64 type, u32 event_id)
6550{
6551 u64 val = event_id | (type << 32);
6552
6553 return hash_64(val, SWEVENT_HLIST_BITS);
6554}
6555
49f135ed
FW
6556static inline struct hlist_head *
6557__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 6558{
49f135ed
FW
6559 u64 hash = swevent_hash(type, event_id);
6560
6561 return &hlist->heads[hash];
6562}
76e1d904 6563
49f135ed
FW
6564/* For the read side: events when they trigger */
6565static inline struct hlist_head *
b28ab83c 6566find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
6567{
6568 struct swevent_hlist *hlist;
76e1d904 6569
b28ab83c 6570 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
6571 if (!hlist)
6572 return NULL;
6573
49f135ed
FW
6574 return __find_swevent_head(hlist, type, event_id);
6575}
6576
6577/* For the event head insertion and removal in the hlist */
6578static inline struct hlist_head *
b28ab83c 6579find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
6580{
6581 struct swevent_hlist *hlist;
6582 u32 event_id = event->attr.config;
6583 u64 type = event->attr.type;
6584
6585 /*
6586 * Event scheduling is always serialized against hlist allocation
6587 * and release. Which makes the protected version suitable here.
6588 * The context lock guarantees that.
6589 */
b28ab83c 6590 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
6591 lockdep_is_held(&event->ctx->lock));
6592 if (!hlist)
6593 return NULL;
6594
6595 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
6596}
6597
6598static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 6599 u64 nr,
76e1d904
FW
6600 struct perf_sample_data *data,
6601 struct pt_regs *regs)
15dbf27c 6602{
4a32fea9 6603 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 6604 struct perf_event *event;
76e1d904 6605 struct hlist_head *head;
15dbf27c 6606
76e1d904 6607 rcu_read_lock();
b28ab83c 6608 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
6609 if (!head)
6610 goto end;
6611
b67bfe0d 6612 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6fb2915d 6613 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 6614 perf_swevent_event(event, nr, data, regs);
15dbf27c 6615 }
76e1d904
FW
6616end:
6617 rcu_read_unlock();
15dbf27c
PZ
6618}
6619
86038c5e
PZI
6620DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
6621
4ed7c92d 6622int perf_swevent_get_recursion_context(void)
96f6d444 6623{
4a32fea9 6624 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
96f6d444 6625
b28ab83c 6626 return get_recursion_context(swhash->recursion);
96f6d444 6627}
645e8cc0 6628EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 6629
fa9f90be 6630inline void perf_swevent_put_recursion_context(int rctx)
15dbf27c 6631{
4a32fea9 6632 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
927c7a9e 6633
b28ab83c 6634 put_recursion_context(swhash->recursion, rctx);
ce71b9df 6635}
15dbf27c 6636
86038c5e 6637void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 6638{
a4234bfc 6639 struct perf_sample_data data;
4ed7c92d 6640
86038c5e 6641 if (WARN_ON_ONCE(!regs))
4ed7c92d 6642 return;
a4234bfc 6643
fd0d000b 6644 perf_sample_data_init(&data, addr, 0);
a8b0ca17 6645 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
86038c5e
PZI
6646}
6647
6648void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
6649{
6650 int rctx;
6651
6652 preempt_disable_notrace();
6653 rctx = perf_swevent_get_recursion_context();
6654 if (unlikely(rctx < 0))
6655 goto fail;
6656
6657 ___perf_sw_event(event_id, nr, regs, addr);
4ed7c92d
PZ
6658
6659 perf_swevent_put_recursion_context(rctx);
86038c5e 6660fail:
1c024eca 6661 preempt_enable_notrace();
b8e83514
PZ
6662}
6663
cdd6c482 6664static void perf_swevent_read(struct perf_event *event)
15dbf27c 6665{
15dbf27c
PZ
6666}
6667
a4eaf7f1 6668static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 6669{
4a32fea9 6670 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 6671 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
6672 struct hlist_head *head;
6673
6c7e550f 6674 if (is_sampling_event(event)) {
7b4b6658 6675 hwc->last_period = hwc->sample_period;
cdd6c482 6676 perf_swevent_set_period(event);
7b4b6658 6677 }
76e1d904 6678
a4eaf7f1
PZ
6679 hwc->state = !(flags & PERF_EF_START);
6680
b28ab83c 6681 head = find_swevent_head(swhash, event);
12ca6ad2 6682 if (WARN_ON_ONCE(!head))
76e1d904
FW
6683 return -EINVAL;
6684
6685 hlist_add_head_rcu(&event->hlist_entry, head);
6a694a60 6686 perf_event_update_userpage(event);
76e1d904 6687
15dbf27c
PZ
6688 return 0;
6689}
6690
a4eaf7f1 6691static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 6692{
76e1d904 6693 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
6694}
6695
a4eaf7f1 6696static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 6697{
a4eaf7f1 6698 event->hw.state = 0;
d6d020e9 6699}
aa9c4c0f 6700
a4eaf7f1 6701static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 6702{
a4eaf7f1 6703 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
6704}
6705
49f135ed
FW
6706/* Deref the hlist from the update side */
6707static inline struct swevent_hlist *
b28ab83c 6708swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 6709{
b28ab83c
PZ
6710 return rcu_dereference_protected(swhash->swevent_hlist,
6711 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
6712}
6713
b28ab83c 6714static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 6715{
b28ab83c 6716 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 6717
49f135ed 6718 if (!hlist)
76e1d904
FW
6719 return;
6720
70691d4a 6721 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
fa4bbc4c 6722 kfree_rcu(hlist, rcu_head);
76e1d904
FW
6723}
6724
6725static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
6726{
b28ab83c 6727 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 6728
b28ab83c 6729 mutex_lock(&swhash->hlist_mutex);
76e1d904 6730
b28ab83c
PZ
6731 if (!--swhash->hlist_refcount)
6732 swevent_hlist_release(swhash);
76e1d904 6733
b28ab83c 6734 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
6735}
6736
6737static void swevent_hlist_put(struct perf_event *event)
6738{
6739 int cpu;
6740
76e1d904
FW
6741 for_each_possible_cpu(cpu)
6742 swevent_hlist_put_cpu(event, cpu);
6743}
6744
6745static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
6746{
b28ab83c 6747 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
6748 int err = 0;
6749
b28ab83c 6750 mutex_lock(&swhash->hlist_mutex);
b28ab83c 6751 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d904
FW
6752 struct swevent_hlist *hlist;
6753
6754 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
6755 if (!hlist) {
6756 err = -ENOMEM;
6757 goto exit;
6758 }
b28ab83c 6759 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 6760 }
b28ab83c 6761 swhash->hlist_refcount++;
9ed6060d 6762exit:
b28ab83c 6763 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
6764
6765 return err;
6766}
6767
6768static int swevent_hlist_get(struct perf_event *event)
6769{
6770 int err;
6771 int cpu, failed_cpu;
6772
76e1d904
FW
6773 get_online_cpus();
6774 for_each_possible_cpu(cpu) {
6775 err = swevent_hlist_get_cpu(event, cpu);
6776 if (err) {
6777 failed_cpu = cpu;
6778 goto fail;
6779 }
6780 }
6781 put_online_cpus();
6782
6783 return 0;
9ed6060d 6784fail:
76e1d904
FW
6785 for_each_possible_cpu(cpu) {
6786 if (cpu == failed_cpu)
6787 break;
6788 swevent_hlist_put_cpu(event, cpu);
6789 }
6790
6791 put_online_cpus();
6792 return err;
6793}
6794
c5905afb 6795struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 6796
b0a873eb
PZ
6797static void sw_perf_event_destroy(struct perf_event *event)
6798{
6799 u64 event_id = event->attr.config;
95476b64 6800
b0a873eb
PZ
6801 WARN_ON(event->parent);
6802
c5905afb 6803 static_key_slow_dec(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
6804 swevent_hlist_put(event);
6805}
6806
6807static int perf_swevent_init(struct perf_event *event)
6808{
8176cced 6809 u64 event_id = event->attr.config;
b0a873eb
PZ
6810
6811 if (event->attr.type != PERF_TYPE_SOFTWARE)
6812 return -ENOENT;
6813
2481c5fa
SE
6814 /*
6815 * no branch sampling for software events
6816 */
6817 if (has_branch_stack(event))
6818 return -EOPNOTSUPP;
6819
b0a873eb
PZ
6820 switch (event_id) {
6821 case PERF_COUNT_SW_CPU_CLOCK:
6822 case PERF_COUNT_SW_TASK_CLOCK:
6823 return -ENOENT;
6824
6825 default:
6826 break;
6827 }
6828
ce677831 6829 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
6830 return -ENOENT;
6831
6832 if (!event->parent) {
6833 int err;
6834
6835 err = swevent_hlist_get(event);
6836 if (err)
6837 return err;
6838
c5905afb 6839 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
6840 event->destroy = sw_perf_event_destroy;
6841 }
6842
6843 return 0;
6844}
6845
6846static struct pmu perf_swevent = {
89a1e187 6847 .task_ctx_nr = perf_sw_context,
95476b64 6848
34f43927
PZ
6849 .capabilities = PERF_PMU_CAP_NO_NMI,
6850
b0a873eb 6851 .event_init = perf_swevent_init,
a4eaf7f1
PZ
6852 .add = perf_swevent_add,
6853 .del = perf_swevent_del,
6854 .start = perf_swevent_start,
6855 .stop = perf_swevent_stop,
1c024eca 6856 .read = perf_swevent_read,
1c024eca
PZ
6857};
6858
b0a873eb
PZ
6859#ifdef CONFIG_EVENT_TRACING
6860
1c024eca
PZ
6861static int perf_tp_filter_match(struct perf_event *event,
6862 struct perf_sample_data *data)
6863{
6864 void *record = data->raw->data;
6865
b71b437e
PZ
6866 /* only top level events have filters set */
6867 if (event->parent)
6868 event = event->parent;
6869
1c024eca
PZ
6870 if (likely(!event->filter) || filter_match_preds(event->filter, record))
6871 return 1;
6872 return 0;
6873}
6874
6875static int perf_tp_event_match(struct perf_event *event,
6876 struct perf_sample_data *data,
6877 struct pt_regs *regs)
6878{
a0f7d0f7
FW
6879 if (event->hw.state & PERF_HES_STOPPED)
6880 return 0;
580d607c
PZ
6881 /*
6882 * All tracepoints are from kernel-space.
6883 */
6884 if (event->attr.exclude_kernel)
1c024eca
PZ
6885 return 0;
6886
6887 if (!perf_tp_filter_match(event, data))
6888 return 0;
6889
6890 return 1;
6891}
6892
6893void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
e6dab5ff
AV
6894 struct pt_regs *regs, struct hlist_head *head, int rctx,
6895 struct task_struct *task)
95476b64
FW
6896{
6897 struct perf_sample_data data;
1c024eca 6898 struct perf_event *event;
1c024eca 6899
95476b64
FW
6900 struct perf_raw_record raw = {
6901 .size = entry_size,
6902 .data = record,
6903 };
6904
fd0d000b 6905 perf_sample_data_init(&data, addr, 0);
95476b64
FW
6906 data.raw = &raw;
6907
b67bfe0d 6908 hlist_for_each_entry_rcu(event, head, hlist_entry) {
1c024eca 6909 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 6910 perf_swevent_event(event, count, &data, regs);
4f41c013 6911 }
ecc55f84 6912
e6dab5ff
AV
6913 /*
6914 * If we got specified a target task, also iterate its context and
6915 * deliver this event there too.
6916 */
6917 if (task && task != current) {
6918 struct perf_event_context *ctx;
6919 struct trace_entry *entry = record;
6920
6921 rcu_read_lock();
6922 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
6923 if (!ctx)
6924 goto unlock;
6925
6926 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
6927 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6928 continue;
6929 if (event->attr.config != entry->type)
6930 continue;
6931 if (perf_tp_event_match(event, &data, regs))
6932 perf_swevent_event(event, count, &data, regs);
6933 }
6934unlock:
6935 rcu_read_unlock();
6936 }
6937
ecc55f84 6938 perf_swevent_put_recursion_context(rctx);
95476b64
FW
6939}
6940EXPORT_SYMBOL_GPL(perf_tp_event);
6941
cdd6c482 6942static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 6943{
1c024eca 6944 perf_trace_destroy(event);
e077df4f
PZ
6945}
6946
b0a873eb 6947static int perf_tp_event_init(struct perf_event *event)
e077df4f 6948{
76e1d904
FW
6949 int err;
6950
b0a873eb
PZ
6951 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6952 return -ENOENT;
6953
2481c5fa
SE
6954 /*
6955 * no branch sampling for tracepoint events
6956 */
6957 if (has_branch_stack(event))
6958 return -EOPNOTSUPP;
6959
1c024eca
PZ
6960 err = perf_trace_init(event);
6961 if (err)
b0a873eb 6962 return err;
e077df4f 6963
cdd6c482 6964 event->destroy = tp_perf_event_destroy;
e077df4f 6965
b0a873eb
PZ
6966 return 0;
6967}
6968
6969static struct pmu perf_tracepoint = {
89a1e187
PZ
6970 .task_ctx_nr = perf_sw_context,
6971
b0a873eb 6972 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
6973 .add = perf_trace_add,
6974 .del = perf_trace_del,
6975 .start = perf_swevent_start,
6976 .stop = perf_swevent_stop,
b0a873eb 6977 .read = perf_swevent_read,
b0a873eb
PZ
6978};
6979
6980static inline void perf_tp_register(void)
6981{
2e80a82a 6982 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f 6983}
6fb2915d
LZ
6984
6985static int perf_event_set_filter(struct perf_event *event, void __user *arg)
6986{
6987 char *filter_str;
6988 int ret;
6989
6990 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6991 return -EINVAL;
6992
6993 filter_str = strndup_user(arg, PAGE_SIZE);
6994 if (IS_ERR(filter_str))
6995 return PTR_ERR(filter_str);
6996
6997 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
6998
6999 kfree(filter_str);
7000 return ret;
7001}
7002
7003static void perf_event_free_filter(struct perf_event *event)
7004{
7005 ftrace_profile_free_filter(event);
7006}
7007
2541517c
AS
7008static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7009{
7010 struct bpf_prog *prog;
7011
7012 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7013 return -EINVAL;
7014
7015 if (event->tp_event->prog)
7016 return -EEXIST;
7017
04a22fae
WN
7018 if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE))
7019 /* bpf programs can only be attached to u/kprobes */
2541517c
AS
7020 return -EINVAL;
7021
7022 prog = bpf_prog_get(prog_fd);
7023 if (IS_ERR(prog))
7024 return PTR_ERR(prog);
7025
6c373ca8 7026 if (prog->type != BPF_PROG_TYPE_KPROBE) {
2541517c
AS
7027 /* valid fd, but invalid bpf program type */
7028 bpf_prog_put(prog);
7029 return -EINVAL;
7030 }
7031
7032 event->tp_event->prog = prog;
7033
7034 return 0;
7035}
7036
7037static void perf_event_free_bpf_prog(struct perf_event *event)
7038{
7039 struct bpf_prog *prog;
7040
7041 if (!event->tp_event)
7042 return;
7043
7044 prog = event->tp_event->prog;
7045 if (prog) {
7046 event->tp_event->prog = NULL;
7047 bpf_prog_put(prog);
7048 }
7049}
7050
e077df4f 7051#else
6fb2915d 7052
b0a873eb 7053static inline void perf_tp_register(void)
e077df4f 7054{
e077df4f 7055}
6fb2915d
LZ
7056
7057static int perf_event_set_filter(struct perf_event *event, void __user *arg)
7058{
7059 return -ENOENT;
7060}
7061
7062static void perf_event_free_filter(struct perf_event *event)
7063{
7064}
7065
2541517c
AS
7066static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7067{
7068 return -ENOENT;
7069}
7070
7071static void perf_event_free_bpf_prog(struct perf_event *event)
7072{
7073}
07b139c8 7074#endif /* CONFIG_EVENT_TRACING */
e077df4f 7075
24f1e32c 7076#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 7077void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 7078{
f5ffe02e
FW
7079 struct perf_sample_data sample;
7080 struct pt_regs *regs = data;
7081
fd0d000b 7082 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 7083
a4eaf7f1 7084 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 7085 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
7086}
7087#endif
7088
b0a873eb
PZ
7089/*
7090 * hrtimer based swevent callback
7091 */
f29ac756 7092
b0a873eb 7093static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 7094{
b0a873eb
PZ
7095 enum hrtimer_restart ret = HRTIMER_RESTART;
7096 struct perf_sample_data data;
7097 struct pt_regs *regs;
7098 struct perf_event *event;
7099 u64 period;
f29ac756 7100
b0a873eb 7101 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
7102
7103 if (event->state != PERF_EVENT_STATE_ACTIVE)
7104 return HRTIMER_NORESTART;
7105
b0a873eb 7106 event->pmu->read(event);
f344011c 7107
fd0d000b 7108 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
7109 regs = get_irq_regs();
7110
7111 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 7112 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 7113 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
7114 ret = HRTIMER_NORESTART;
7115 }
24f1e32c 7116
b0a873eb
PZ
7117 period = max_t(u64, 10000, event->hw.sample_period);
7118 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 7119
b0a873eb 7120 return ret;
f29ac756
PZ
7121}
7122
b0a873eb 7123static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 7124{
b0a873eb 7125 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
7126 s64 period;
7127
7128 if (!is_sampling_event(event))
7129 return;
f5ffe02e 7130
5d508e82
FBH
7131 period = local64_read(&hwc->period_left);
7132 if (period) {
7133 if (period < 0)
7134 period = 10000;
fa407f35 7135
5d508e82
FBH
7136 local64_set(&hwc->period_left, 0);
7137 } else {
7138 period = max_t(u64, 10000, hwc->sample_period);
7139 }
3497d206
TG
7140 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
7141 HRTIMER_MODE_REL_PINNED);
24f1e32c 7142}
b0a873eb
PZ
7143
7144static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 7145{
b0a873eb
PZ
7146 struct hw_perf_event *hwc = &event->hw;
7147
6c7e550f 7148 if (is_sampling_event(event)) {
b0a873eb 7149 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 7150 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
7151
7152 hrtimer_cancel(&hwc->hrtimer);
7153 }
24f1e32c
FW
7154}
7155
ba3dd36c
PZ
7156static void perf_swevent_init_hrtimer(struct perf_event *event)
7157{
7158 struct hw_perf_event *hwc = &event->hw;
7159
7160 if (!is_sampling_event(event))
7161 return;
7162
7163 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7164 hwc->hrtimer.function = perf_swevent_hrtimer;
7165
7166 /*
7167 * Since hrtimers have a fixed rate, we can do a static freq->period
7168 * mapping and avoid the whole period adjust feedback stuff.
7169 */
7170 if (event->attr.freq) {
7171 long freq = event->attr.sample_freq;
7172
7173 event->attr.sample_period = NSEC_PER_SEC / freq;
7174 hwc->sample_period = event->attr.sample_period;
7175 local64_set(&hwc->period_left, hwc->sample_period);
778141e3 7176 hwc->last_period = hwc->sample_period;
ba3dd36c
PZ
7177 event->attr.freq = 0;
7178 }
7179}
7180
b0a873eb
PZ
7181/*
7182 * Software event: cpu wall time clock
7183 */
7184
7185static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 7186{
b0a873eb
PZ
7187 s64 prev;
7188 u64 now;
7189
a4eaf7f1 7190 now = local_clock();
b0a873eb
PZ
7191 prev = local64_xchg(&event->hw.prev_count, now);
7192 local64_add(now - prev, &event->count);
24f1e32c 7193}
24f1e32c 7194
a4eaf7f1 7195static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 7196{
a4eaf7f1 7197 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 7198 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
7199}
7200
a4eaf7f1 7201static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 7202{
b0a873eb
PZ
7203 perf_swevent_cancel_hrtimer(event);
7204 cpu_clock_event_update(event);
7205}
f29ac756 7206
a4eaf7f1
PZ
7207static int cpu_clock_event_add(struct perf_event *event, int flags)
7208{
7209 if (flags & PERF_EF_START)
7210 cpu_clock_event_start(event, flags);
6a694a60 7211 perf_event_update_userpage(event);
a4eaf7f1
PZ
7212
7213 return 0;
7214}
7215
7216static void cpu_clock_event_del(struct perf_event *event, int flags)
7217{
7218 cpu_clock_event_stop(event, flags);
7219}
7220
b0a873eb
PZ
7221static void cpu_clock_event_read(struct perf_event *event)
7222{
7223 cpu_clock_event_update(event);
7224}
f344011c 7225
b0a873eb
PZ
7226static int cpu_clock_event_init(struct perf_event *event)
7227{
7228 if (event->attr.type != PERF_TYPE_SOFTWARE)
7229 return -ENOENT;
7230
7231 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
7232 return -ENOENT;
7233
2481c5fa
SE
7234 /*
7235 * no branch sampling for software events
7236 */
7237 if (has_branch_stack(event))
7238 return -EOPNOTSUPP;
7239
ba3dd36c
PZ
7240 perf_swevent_init_hrtimer(event);
7241
b0a873eb 7242 return 0;
f29ac756
PZ
7243}
7244
b0a873eb 7245static struct pmu perf_cpu_clock = {
89a1e187
PZ
7246 .task_ctx_nr = perf_sw_context,
7247
34f43927
PZ
7248 .capabilities = PERF_PMU_CAP_NO_NMI,
7249
b0a873eb 7250 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
7251 .add = cpu_clock_event_add,
7252 .del = cpu_clock_event_del,
7253 .start = cpu_clock_event_start,
7254 .stop = cpu_clock_event_stop,
b0a873eb
PZ
7255 .read = cpu_clock_event_read,
7256};
7257
7258/*
7259 * Software event: task time clock
7260 */
7261
7262static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 7263{
b0a873eb
PZ
7264 u64 prev;
7265 s64 delta;
5c92d124 7266
b0a873eb
PZ
7267 prev = local64_xchg(&event->hw.prev_count, now);
7268 delta = now - prev;
7269 local64_add(delta, &event->count);
7270}
5c92d124 7271
a4eaf7f1 7272static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 7273{
a4eaf7f1 7274 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 7275 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
7276}
7277
a4eaf7f1 7278static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
7279{
7280 perf_swevent_cancel_hrtimer(event);
7281 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
7282}
7283
7284static int task_clock_event_add(struct perf_event *event, int flags)
7285{
7286 if (flags & PERF_EF_START)
7287 task_clock_event_start(event, flags);
6a694a60 7288 perf_event_update_userpage(event);
b0a873eb 7289
a4eaf7f1
PZ
7290 return 0;
7291}
7292
7293static void task_clock_event_del(struct perf_event *event, int flags)
7294{
7295 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
7296}
7297
7298static void task_clock_event_read(struct perf_event *event)
7299{
768a06e2
PZ
7300 u64 now = perf_clock();
7301 u64 delta = now - event->ctx->timestamp;
7302 u64 time = event->ctx->time + delta;
b0a873eb
PZ
7303
7304 task_clock_event_update(event, time);
7305}
7306
7307static int task_clock_event_init(struct perf_event *event)
6fb2915d 7308{
b0a873eb
PZ
7309 if (event->attr.type != PERF_TYPE_SOFTWARE)
7310 return -ENOENT;
7311
7312 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
7313 return -ENOENT;
7314
2481c5fa
SE
7315 /*
7316 * no branch sampling for software events
7317 */
7318 if (has_branch_stack(event))
7319 return -EOPNOTSUPP;
7320
ba3dd36c
PZ
7321 perf_swevent_init_hrtimer(event);
7322
b0a873eb 7323 return 0;
6fb2915d
LZ
7324}
7325
b0a873eb 7326static struct pmu perf_task_clock = {
89a1e187
PZ
7327 .task_ctx_nr = perf_sw_context,
7328
34f43927
PZ
7329 .capabilities = PERF_PMU_CAP_NO_NMI,
7330
b0a873eb 7331 .event_init = task_clock_event_init,
a4eaf7f1
PZ
7332 .add = task_clock_event_add,
7333 .del = task_clock_event_del,
7334 .start = task_clock_event_start,
7335 .stop = task_clock_event_stop,
b0a873eb
PZ
7336 .read = task_clock_event_read,
7337};
6fb2915d 7338
ad5133b7 7339static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 7340{
e077df4f 7341}
6fb2915d 7342
fbbe0701
SB
7343static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
7344{
7345}
7346
ad5133b7 7347static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 7348{
ad5133b7 7349 return 0;
6fb2915d
LZ
7350}
7351
18ab2cd3 7352static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
fbbe0701
SB
7353
7354static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
6fb2915d 7355{
fbbe0701
SB
7356 __this_cpu_write(nop_txn_flags, flags);
7357
7358 if (flags & ~PERF_PMU_TXN_ADD)
7359 return;
7360
ad5133b7 7361 perf_pmu_disable(pmu);
6fb2915d
LZ
7362}
7363
ad5133b7
PZ
7364static int perf_pmu_commit_txn(struct pmu *pmu)
7365{
fbbe0701
SB
7366 unsigned int flags = __this_cpu_read(nop_txn_flags);
7367
7368 __this_cpu_write(nop_txn_flags, 0);
7369
7370 if (flags & ~PERF_PMU_TXN_ADD)
7371 return 0;
7372
ad5133b7
PZ
7373 perf_pmu_enable(pmu);
7374 return 0;
7375}
e077df4f 7376
ad5133b7 7377static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 7378{
fbbe0701
SB
7379 unsigned int flags = __this_cpu_read(nop_txn_flags);
7380
7381 __this_cpu_write(nop_txn_flags, 0);
7382
7383 if (flags & ~PERF_PMU_TXN_ADD)
7384 return;
7385
ad5133b7 7386 perf_pmu_enable(pmu);
24f1e32c
FW
7387}
7388
35edc2a5
PZ
7389static int perf_event_idx_default(struct perf_event *event)
7390{
c719f560 7391 return 0;
35edc2a5
PZ
7392}
7393
8dc85d54
PZ
7394/*
7395 * Ensures all contexts with the same task_ctx_nr have the same
7396 * pmu_cpu_context too.
7397 */
9e317041 7398static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
24f1e32c 7399{
8dc85d54 7400 struct pmu *pmu;
b326e956 7401
8dc85d54
PZ
7402 if (ctxn < 0)
7403 return NULL;
24f1e32c 7404
8dc85d54
PZ
7405 list_for_each_entry(pmu, &pmus, entry) {
7406 if (pmu->task_ctx_nr == ctxn)
7407 return pmu->pmu_cpu_context;
7408 }
24f1e32c 7409
8dc85d54 7410 return NULL;
24f1e32c
FW
7411}
7412
51676957 7413static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c 7414{
51676957
PZ
7415 int cpu;
7416
7417 for_each_possible_cpu(cpu) {
7418 struct perf_cpu_context *cpuctx;
7419
7420 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7421
3f1f3320
PZ
7422 if (cpuctx->unique_pmu == old_pmu)
7423 cpuctx->unique_pmu = pmu;
51676957
PZ
7424 }
7425}
7426
7427static void free_pmu_context(struct pmu *pmu)
7428{
7429 struct pmu *i;
f5ffe02e 7430
8dc85d54 7431 mutex_lock(&pmus_lock);
0475f9ea 7432 /*
8dc85d54 7433 * Like a real lame refcount.
0475f9ea 7434 */
51676957
PZ
7435 list_for_each_entry(i, &pmus, entry) {
7436 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
7437 update_pmu_context(i, pmu);
8dc85d54 7438 goto out;
51676957 7439 }
8dc85d54 7440 }
d6d020e9 7441
51676957 7442 free_percpu(pmu->pmu_cpu_context);
8dc85d54
PZ
7443out:
7444 mutex_unlock(&pmus_lock);
24f1e32c 7445}
2e80a82a 7446static struct idr pmu_idr;
d6d020e9 7447
abe43400
PZ
7448static ssize_t
7449type_show(struct device *dev, struct device_attribute *attr, char *page)
7450{
7451 struct pmu *pmu = dev_get_drvdata(dev);
7452
7453 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
7454}
90826ca7 7455static DEVICE_ATTR_RO(type);
abe43400 7456
62b85639
SE
7457static ssize_t
7458perf_event_mux_interval_ms_show(struct device *dev,
7459 struct device_attribute *attr,
7460 char *page)
7461{
7462 struct pmu *pmu = dev_get_drvdata(dev);
7463
7464 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
7465}
7466
272325c4
PZ
7467static DEFINE_MUTEX(mux_interval_mutex);
7468
62b85639
SE
7469static ssize_t
7470perf_event_mux_interval_ms_store(struct device *dev,
7471 struct device_attribute *attr,
7472 const char *buf, size_t count)
7473{
7474 struct pmu *pmu = dev_get_drvdata(dev);
7475 int timer, cpu, ret;
7476
7477 ret = kstrtoint(buf, 0, &timer);
7478 if (ret)
7479 return ret;
7480
7481 if (timer < 1)
7482 return -EINVAL;
7483
7484 /* same value, noting to do */
7485 if (timer == pmu->hrtimer_interval_ms)
7486 return count;
7487
272325c4 7488 mutex_lock(&mux_interval_mutex);
62b85639
SE
7489 pmu->hrtimer_interval_ms = timer;
7490
7491 /* update all cpuctx for this PMU */
272325c4
PZ
7492 get_online_cpus();
7493 for_each_online_cpu(cpu) {
62b85639
SE
7494 struct perf_cpu_context *cpuctx;
7495 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7496 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
7497
272325c4
PZ
7498 cpu_function_call(cpu,
7499 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
62b85639 7500 }
272325c4
PZ
7501 put_online_cpus();
7502 mutex_unlock(&mux_interval_mutex);
62b85639
SE
7503
7504 return count;
7505}
90826ca7 7506static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
62b85639 7507
90826ca7
GKH
7508static struct attribute *pmu_dev_attrs[] = {
7509 &dev_attr_type.attr,
7510 &dev_attr_perf_event_mux_interval_ms.attr,
7511 NULL,
abe43400 7512};
90826ca7 7513ATTRIBUTE_GROUPS(pmu_dev);
abe43400
PZ
7514
7515static int pmu_bus_running;
7516static struct bus_type pmu_bus = {
7517 .name = "event_source",
90826ca7 7518 .dev_groups = pmu_dev_groups,
abe43400
PZ
7519};
7520
7521static void pmu_dev_release(struct device *dev)
7522{
7523 kfree(dev);
7524}
7525
7526static int pmu_dev_alloc(struct pmu *pmu)
7527{
7528 int ret = -ENOMEM;
7529
7530 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
7531 if (!pmu->dev)
7532 goto out;
7533
0c9d42ed 7534 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
7535 device_initialize(pmu->dev);
7536 ret = dev_set_name(pmu->dev, "%s", pmu->name);
7537 if (ret)
7538 goto free_dev;
7539
7540 dev_set_drvdata(pmu->dev, pmu);
7541 pmu->dev->bus = &pmu_bus;
7542 pmu->dev->release = pmu_dev_release;
7543 ret = device_add(pmu->dev);
7544 if (ret)
7545 goto free_dev;
7546
7547out:
7548 return ret;
7549
7550free_dev:
7551 put_device(pmu->dev);
7552 goto out;
7553}
7554
547e9fd7 7555static struct lock_class_key cpuctx_mutex;
facc4307 7556static struct lock_class_key cpuctx_lock;
547e9fd7 7557
03d8e80b 7558int perf_pmu_register(struct pmu *pmu, const char *name, int type)
24f1e32c 7559{
108b02cf 7560 int cpu, ret;
24f1e32c 7561
b0a873eb 7562 mutex_lock(&pmus_lock);
33696fc0
PZ
7563 ret = -ENOMEM;
7564 pmu->pmu_disable_count = alloc_percpu(int);
7565 if (!pmu->pmu_disable_count)
7566 goto unlock;
f29ac756 7567
2e80a82a
PZ
7568 pmu->type = -1;
7569 if (!name)
7570 goto skip_type;
7571 pmu->name = name;
7572
7573 if (type < 0) {
0e9c3be2
TH
7574 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
7575 if (type < 0) {
7576 ret = type;
2e80a82a
PZ
7577 goto free_pdc;
7578 }
7579 }
7580 pmu->type = type;
7581
abe43400
PZ
7582 if (pmu_bus_running) {
7583 ret = pmu_dev_alloc(pmu);
7584 if (ret)
7585 goto free_idr;
7586 }
7587
2e80a82a 7588skip_type:
8dc85d54
PZ
7589 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
7590 if (pmu->pmu_cpu_context)
7591 goto got_cpu_context;
f29ac756 7592
c4814202 7593 ret = -ENOMEM;
108b02cf
PZ
7594 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
7595 if (!pmu->pmu_cpu_context)
abe43400 7596 goto free_dev;
f344011c 7597
108b02cf
PZ
7598 for_each_possible_cpu(cpu) {
7599 struct perf_cpu_context *cpuctx;
7600
7601 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 7602 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 7603 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 7604 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
108b02cf 7605 cpuctx->ctx.pmu = pmu;
9e630205 7606
272325c4 7607 __perf_mux_hrtimer_init(cpuctx, cpu);
9e630205 7608
3f1f3320 7609 cpuctx->unique_pmu = pmu;
108b02cf 7610 }
76e1d904 7611
8dc85d54 7612got_cpu_context:
ad5133b7
PZ
7613 if (!pmu->start_txn) {
7614 if (pmu->pmu_enable) {
7615 /*
7616 * If we have pmu_enable/pmu_disable calls, install
7617 * transaction stubs that use that to try and batch
7618 * hardware accesses.
7619 */
7620 pmu->start_txn = perf_pmu_start_txn;
7621 pmu->commit_txn = perf_pmu_commit_txn;
7622 pmu->cancel_txn = perf_pmu_cancel_txn;
7623 } else {
fbbe0701 7624 pmu->start_txn = perf_pmu_nop_txn;
ad5133b7
PZ
7625 pmu->commit_txn = perf_pmu_nop_int;
7626 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 7627 }
5c92d124 7628 }
15dbf27c 7629
ad5133b7
PZ
7630 if (!pmu->pmu_enable) {
7631 pmu->pmu_enable = perf_pmu_nop_void;
7632 pmu->pmu_disable = perf_pmu_nop_void;
7633 }
7634
35edc2a5
PZ
7635 if (!pmu->event_idx)
7636 pmu->event_idx = perf_event_idx_default;
7637
b0a873eb 7638 list_add_rcu(&pmu->entry, &pmus);
bed5b25a 7639 atomic_set(&pmu->exclusive_cnt, 0);
33696fc0
PZ
7640 ret = 0;
7641unlock:
b0a873eb
PZ
7642 mutex_unlock(&pmus_lock);
7643
33696fc0 7644 return ret;
108b02cf 7645
abe43400
PZ
7646free_dev:
7647 device_del(pmu->dev);
7648 put_device(pmu->dev);
7649
2e80a82a
PZ
7650free_idr:
7651 if (pmu->type >= PERF_TYPE_MAX)
7652 idr_remove(&pmu_idr, pmu->type);
7653
108b02cf
PZ
7654free_pdc:
7655 free_percpu(pmu->pmu_disable_count);
7656 goto unlock;
f29ac756 7657}
c464c76e 7658EXPORT_SYMBOL_GPL(perf_pmu_register);
f29ac756 7659
b0a873eb 7660void perf_pmu_unregister(struct pmu *pmu)
5c92d124 7661{
b0a873eb
PZ
7662 mutex_lock(&pmus_lock);
7663 list_del_rcu(&pmu->entry);
7664 mutex_unlock(&pmus_lock);
5c92d124 7665
0475f9ea 7666 /*
cde8e884
PZ
7667 * We dereference the pmu list under both SRCU and regular RCU, so
7668 * synchronize against both of those.
0475f9ea 7669 */
b0a873eb 7670 synchronize_srcu(&pmus_srcu);
cde8e884 7671 synchronize_rcu();
d6d020e9 7672
33696fc0 7673 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
7674 if (pmu->type >= PERF_TYPE_MAX)
7675 idr_remove(&pmu_idr, pmu->type);
abe43400
PZ
7676 device_del(pmu->dev);
7677 put_device(pmu->dev);
51676957 7678 free_pmu_context(pmu);
b0a873eb 7679}
c464c76e 7680EXPORT_SYMBOL_GPL(perf_pmu_unregister);
d6d020e9 7681
cc34b98b
MR
7682static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
7683{
ccd41c86 7684 struct perf_event_context *ctx = NULL;
cc34b98b
MR
7685 int ret;
7686
7687 if (!try_module_get(pmu->module))
7688 return -ENODEV;
ccd41c86
PZ
7689
7690 if (event->group_leader != event) {
8b10c5e2
PZ
7691 /*
7692 * This ctx->mutex can nest when we're called through
7693 * inheritance. See the perf_event_ctx_lock_nested() comment.
7694 */
7695 ctx = perf_event_ctx_lock_nested(event->group_leader,
7696 SINGLE_DEPTH_NESTING);
ccd41c86
PZ
7697 BUG_ON(!ctx);
7698 }
7699
cc34b98b
MR
7700 event->pmu = pmu;
7701 ret = pmu->event_init(event);
ccd41c86
PZ
7702
7703 if (ctx)
7704 perf_event_ctx_unlock(event->group_leader, ctx);
7705
cc34b98b
MR
7706 if (ret)
7707 module_put(pmu->module);
7708
7709 return ret;
7710}
7711
18ab2cd3 7712static struct pmu *perf_init_event(struct perf_event *event)
b0a873eb
PZ
7713{
7714 struct pmu *pmu = NULL;
7715 int idx;
940c5b29 7716 int ret;
b0a873eb
PZ
7717
7718 idx = srcu_read_lock(&pmus_srcu);
2e80a82a
PZ
7719
7720 rcu_read_lock();
7721 pmu = idr_find(&pmu_idr, event->attr.type);
7722 rcu_read_unlock();
940c5b29 7723 if (pmu) {
cc34b98b 7724 ret = perf_try_init_event(pmu, event);
940c5b29
LM
7725 if (ret)
7726 pmu = ERR_PTR(ret);
2e80a82a 7727 goto unlock;
940c5b29 7728 }
2e80a82a 7729
b0a873eb 7730 list_for_each_entry_rcu(pmu, &pmus, entry) {
cc34b98b 7731 ret = perf_try_init_event(pmu, event);
b0a873eb 7732 if (!ret)
e5f4d339 7733 goto unlock;
76e1d904 7734
b0a873eb
PZ
7735 if (ret != -ENOENT) {
7736 pmu = ERR_PTR(ret);
e5f4d339 7737 goto unlock;
f344011c 7738 }
5c92d124 7739 }
e5f4d339
PZ
7740 pmu = ERR_PTR(-ENOENT);
7741unlock:
b0a873eb 7742 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 7743
4aeb0b42 7744 return pmu;
5c92d124
IM
7745}
7746
4beb31f3
FW
7747static void account_event_cpu(struct perf_event *event, int cpu)
7748{
7749 if (event->parent)
7750 return;
7751
4beb31f3
FW
7752 if (is_cgroup_event(event))
7753 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
7754}
7755
766d6c07
FW
7756static void account_event(struct perf_event *event)
7757{
25432ae9
PZ
7758 bool inc = false;
7759
4beb31f3
FW
7760 if (event->parent)
7761 return;
7762
766d6c07 7763 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 7764 inc = true;
766d6c07
FW
7765 if (event->attr.mmap || event->attr.mmap_data)
7766 atomic_inc(&nr_mmap_events);
7767 if (event->attr.comm)
7768 atomic_inc(&nr_comm_events);
7769 if (event->attr.task)
7770 atomic_inc(&nr_task_events);
948b26b6
FW
7771 if (event->attr.freq) {
7772 if (atomic_inc_return(&nr_freq_events) == 1)
7773 tick_nohz_full_kick_all();
7774 }
45ac1403
AH
7775 if (event->attr.context_switch) {
7776 atomic_inc(&nr_switch_events);
25432ae9 7777 inc = true;
45ac1403 7778 }
4beb31f3 7779 if (has_branch_stack(event))
25432ae9 7780 inc = true;
4beb31f3 7781 if (is_cgroup_event(event))
25432ae9
PZ
7782 inc = true;
7783
7784 if (inc)
766d6c07 7785 static_key_slow_inc(&perf_sched_events.key);
4beb31f3
FW
7786
7787 account_event_cpu(event, event->cpu);
766d6c07
FW
7788}
7789
0793a61d 7790/*
cdd6c482 7791 * Allocate and initialize a event structure
0793a61d 7792 */
cdd6c482 7793static struct perf_event *
c3f00c70 7794perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
7795 struct task_struct *task,
7796 struct perf_event *group_leader,
7797 struct perf_event *parent_event,
4dc0da86 7798 perf_overflow_handler_t overflow_handler,
79dff51e 7799 void *context, int cgroup_fd)
0793a61d 7800{
51b0fe39 7801 struct pmu *pmu;
cdd6c482
IM
7802 struct perf_event *event;
7803 struct hw_perf_event *hwc;
90983b16 7804 long err = -EINVAL;
0793a61d 7805
66832eb4
ON
7806 if ((unsigned)cpu >= nr_cpu_ids) {
7807 if (!task || cpu != -1)
7808 return ERR_PTR(-EINVAL);
7809 }
7810
c3f00c70 7811 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 7812 if (!event)
d5d2bc0d 7813 return ERR_PTR(-ENOMEM);
0793a61d 7814
04289bb9 7815 /*
cdd6c482 7816 * Single events are their own group leaders, with an
04289bb9
IM
7817 * empty sibling list:
7818 */
7819 if (!group_leader)
cdd6c482 7820 group_leader = event;
04289bb9 7821
cdd6c482
IM
7822 mutex_init(&event->child_mutex);
7823 INIT_LIST_HEAD(&event->child_list);
fccc714b 7824
cdd6c482
IM
7825 INIT_LIST_HEAD(&event->group_entry);
7826 INIT_LIST_HEAD(&event->event_entry);
7827 INIT_LIST_HEAD(&event->sibling_list);
10c6db11 7828 INIT_LIST_HEAD(&event->rb_entry);
71ad88ef 7829 INIT_LIST_HEAD(&event->active_entry);
f3ae75de
SE
7830 INIT_HLIST_NODE(&event->hlist_entry);
7831
10c6db11 7832
cdd6c482 7833 init_waitqueue_head(&event->waitq);
e360adbe 7834 init_irq_work(&event->pending, perf_pending_event);
0793a61d 7835
cdd6c482 7836 mutex_init(&event->mmap_mutex);
7b732a75 7837
a6fa941d 7838 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
7839 event->cpu = cpu;
7840 event->attr = *attr;
7841 event->group_leader = group_leader;
7842 event->pmu = NULL;
cdd6c482 7843 event->oncpu = -1;
a96bbc16 7844
cdd6c482 7845 event->parent = parent_event;
b84fbc9f 7846
17cf22c3 7847 event->ns = get_pid_ns(task_active_pid_ns(current));
cdd6c482 7848 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 7849
cdd6c482 7850 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 7851
d580ff86
PZ
7852 if (task) {
7853 event->attach_state = PERF_ATTACH_TASK;
d580ff86 7854 /*
50f16a8b
PZ
7855 * XXX pmu::event_init needs to know what task to account to
7856 * and we cannot use the ctx information because we need the
7857 * pmu before we get a ctx.
d580ff86 7858 */
50f16a8b 7859 event->hw.target = task;
d580ff86
PZ
7860 }
7861
34f43927
PZ
7862 event->clock = &local_clock;
7863 if (parent_event)
7864 event->clock = parent_event->clock;
7865
4dc0da86 7866 if (!overflow_handler && parent_event) {
b326e956 7867 overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
7868 context = parent_event->overflow_handler_context;
7869 }
66832eb4 7870
b326e956 7871 event->overflow_handler = overflow_handler;
4dc0da86 7872 event->overflow_handler_context = context;
97eaf530 7873
0231bb53 7874 perf_event__state_init(event);
a86ed508 7875
4aeb0b42 7876 pmu = NULL;
b8e83514 7877
cdd6c482 7878 hwc = &event->hw;
bd2b5b12 7879 hwc->sample_period = attr->sample_period;
0d48696f 7880 if (attr->freq && attr->sample_freq)
bd2b5b12 7881 hwc->sample_period = 1;
eced1dfc 7882 hwc->last_period = hwc->sample_period;
bd2b5b12 7883
e7850595 7884 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 7885
2023b359 7886 /*
cdd6c482 7887 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b359 7888 */
3dab77fb 7889 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
90983b16 7890 goto err_ns;
a46a2300
YZ
7891
7892 if (!has_branch_stack(event))
7893 event->attr.branch_sample_type = 0;
2023b359 7894
79dff51e
MF
7895 if (cgroup_fd != -1) {
7896 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
7897 if (err)
7898 goto err_ns;
7899 }
7900
b0a873eb 7901 pmu = perf_init_event(event);
4aeb0b42 7902 if (!pmu)
90983b16
FW
7903 goto err_ns;
7904 else if (IS_ERR(pmu)) {
4aeb0b42 7905 err = PTR_ERR(pmu);
90983b16 7906 goto err_ns;
621a01ea 7907 }
d5d2bc0d 7908
bed5b25a
AS
7909 err = exclusive_event_init(event);
7910 if (err)
7911 goto err_pmu;
7912
cdd6c482 7913 if (!event->parent) {
927c7a9e
FW
7914 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
7915 err = get_callchain_buffers();
90983b16 7916 if (err)
bed5b25a 7917 goto err_per_task;
d010b332 7918 }
f344011c 7919 }
9ee318a7 7920
cdd6c482 7921 return event;
90983b16 7922
bed5b25a
AS
7923err_per_task:
7924 exclusive_event_destroy(event);
7925
90983b16
FW
7926err_pmu:
7927 if (event->destroy)
7928 event->destroy(event);
c464c76e 7929 module_put(pmu->module);
90983b16 7930err_ns:
79dff51e
MF
7931 if (is_cgroup_event(event))
7932 perf_detach_cgroup(event);
90983b16
FW
7933 if (event->ns)
7934 put_pid_ns(event->ns);
7935 kfree(event);
7936
7937 return ERR_PTR(err);
0793a61d
TG
7938}
7939
cdd6c482
IM
7940static int perf_copy_attr(struct perf_event_attr __user *uattr,
7941 struct perf_event_attr *attr)
974802ea 7942{
974802ea 7943 u32 size;
cdf8073d 7944 int ret;
974802ea
PZ
7945
7946 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
7947 return -EFAULT;
7948
7949 /*
7950 * zero the full structure, so that a short copy will be nice.
7951 */
7952 memset(attr, 0, sizeof(*attr));
7953
7954 ret = get_user(size, &uattr->size);
7955 if (ret)
7956 return ret;
7957
7958 if (size > PAGE_SIZE) /* silly large */
7959 goto err_size;
7960
7961 if (!size) /* abi compat */
7962 size = PERF_ATTR_SIZE_VER0;
7963
7964 if (size < PERF_ATTR_SIZE_VER0)
7965 goto err_size;
7966
7967 /*
7968 * If we're handed a bigger struct than we know of,
cdf8073d
IS
7969 * ensure all the unknown bits are 0 - i.e. new
7970 * user-space does not rely on any kernel feature
7971 * extensions we dont know about yet.
974802ea
PZ
7972 */
7973 if (size > sizeof(*attr)) {
cdf8073d
IS
7974 unsigned char __user *addr;
7975 unsigned char __user *end;
7976 unsigned char val;
974802ea 7977
cdf8073d
IS
7978 addr = (void __user *)uattr + sizeof(*attr);
7979 end = (void __user *)uattr + size;
974802ea 7980
cdf8073d 7981 for (; addr < end; addr++) {
974802ea
PZ
7982 ret = get_user(val, addr);
7983 if (ret)
7984 return ret;
7985 if (val)
7986 goto err_size;
7987 }
b3e62e35 7988 size = sizeof(*attr);
974802ea
PZ
7989 }
7990
7991 ret = copy_from_user(attr, uattr, size);
7992 if (ret)
7993 return -EFAULT;
7994
cd757645 7995 if (attr->__reserved_1)
974802ea
PZ
7996 return -EINVAL;
7997
7998 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
7999 return -EINVAL;
8000
8001 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
8002 return -EINVAL;
8003
bce38cd5
SE
8004 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
8005 u64 mask = attr->branch_sample_type;
8006
8007 /* only using defined bits */
8008 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
8009 return -EINVAL;
8010
8011 /* at least one branch bit must be set */
8012 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
8013 return -EINVAL;
8014
bce38cd5
SE
8015 /* propagate priv level, when not set for branch */
8016 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
8017
8018 /* exclude_kernel checked on syscall entry */
8019 if (!attr->exclude_kernel)
8020 mask |= PERF_SAMPLE_BRANCH_KERNEL;
8021
8022 if (!attr->exclude_user)
8023 mask |= PERF_SAMPLE_BRANCH_USER;
8024
8025 if (!attr->exclude_hv)
8026 mask |= PERF_SAMPLE_BRANCH_HV;
8027 /*
8028 * adjust user setting (for HW filter setup)
8029 */
8030 attr->branch_sample_type = mask;
8031 }
e712209a
SE
8032 /* privileged levels capture (kernel, hv): check permissions */
8033 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
2b923c8f
SE
8034 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
8035 return -EACCES;
bce38cd5 8036 }
4018994f 8037
c5ebcedb 8038 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
4018994f 8039 ret = perf_reg_validate(attr->sample_regs_user);
c5ebcedb
JO
8040 if (ret)
8041 return ret;
8042 }
8043
8044 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
8045 if (!arch_perf_have_user_stack_dump())
8046 return -ENOSYS;
8047
8048 /*
8049 * We have __u32 type for the size, but so far
8050 * we can only use __u16 as maximum due to the
8051 * __u16 sample size limit.
8052 */
8053 if (attr->sample_stack_user >= USHRT_MAX)
8054 ret = -EINVAL;
8055 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
8056 ret = -EINVAL;
8057 }
4018994f 8058
60e2364e
SE
8059 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
8060 ret = perf_reg_validate(attr->sample_regs_intr);
974802ea
PZ
8061out:
8062 return ret;
8063
8064err_size:
8065 put_user(sizeof(*attr), &uattr->size);
8066 ret = -E2BIG;
8067 goto out;
8068}
8069
ac9721f3
PZ
8070static int
8071perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 8072{
b69cf536 8073 struct ring_buffer *rb = NULL;
a4be7c27
PZ
8074 int ret = -EINVAL;
8075
ac9721f3 8076 if (!output_event)
a4be7c27
PZ
8077 goto set;
8078
ac9721f3
PZ
8079 /* don't allow circular references */
8080 if (event == output_event)
a4be7c27
PZ
8081 goto out;
8082
0f139300
PZ
8083 /*
8084 * Don't allow cross-cpu buffers
8085 */
8086 if (output_event->cpu != event->cpu)
8087 goto out;
8088
8089 /*
76369139 8090 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
8091 */
8092 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
8093 goto out;
8094
34f43927
PZ
8095 /*
8096 * Mixing clocks in the same buffer is trouble you don't need.
8097 */
8098 if (output_event->clock != event->clock)
8099 goto out;
8100
45bfb2e5
PZ
8101 /*
8102 * If both events generate aux data, they must be on the same PMU
8103 */
8104 if (has_aux(event) && has_aux(output_event) &&
8105 event->pmu != output_event->pmu)
8106 goto out;
8107
a4be7c27 8108set:
cdd6c482 8109 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
8110 /* Can't redirect output if we've got an active mmap() */
8111 if (atomic_read(&event->mmap_count))
8112 goto unlock;
a4be7c27 8113
ac9721f3 8114 if (output_event) {
76369139
FW
8115 /* get the rb we want to redirect to */
8116 rb = ring_buffer_get(output_event);
8117 if (!rb)
ac9721f3 8118 goto unlock;
a4be7c27
PZ
8119 }
8120
b69cf536 8121 ring_buffer_attach(event, rb);
9bb5d40c 8122
a4be7c27 8123 ret = 0;
ac9721f3
PZ
8124unlock:
8125 mutex_unlock(&event->mmap_mutex);
8126
a4be7c27 8127out:
a4be7c27
PZ
8128 return ret;
8129}
8130
f63a8daa
PZ
8131static void mutex_lock_double(struct mutex *a, struct mutex *b)
8132{
8133 if (b < a)
8134 swap(a, b);
8135
8136 mutex_lock(a);
8137 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
8138}
8139
34f43927
PZ
8140static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
8141{
8142 bool nmi_safe = false;
8143
8144 switch (clk_id) {
8145 case CLOCK_MONOTONIC:
8146 event->clock = &ktime_get_mono_fast_ns;
8147 nmi_safe = true;
8148 break;
8149
8150 case CLOCK_MONOTONIC_RAW:
8151 event->clock = &ktime_get_raw_fast_ns;
8152 nmi_safe = true;
8153 break;
8154
8155 case CLOCK_REALTIME:
8156 event->clock = &ktime_get_real_ns;
8157 break;
8158
8159 case CLOCK_BOOTTIME:
8160 event->clock = &ktime_get_boot_ns;
8161 break;
8162
8163 case CLOCK_TAI:
8164 event->clock = &ktime_get_tai_ns;
8165 break;
8166
8167 default:
8168 return -EINVAL;
8169 }
8170
8171 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
8172 return -EINVAL;
8173
8174 return 0;
8175}
8176
0793a61d 8177/**
cdd6c482 8178 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 8179 *
cdd6c482 8180 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 8181 * @pid: target pid
9f66a381 8182 * @cpu: target cpu
cdd6c482 8183 * @group_fd: group leader event fd
0793a61d 8184 */
cdd6c482
IM
8185SYSCALL_DEFINE5(perf_event_open,
8186 struct perf_event_attr __user *, attr_uptr,
2743a5b0 8187 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 8188{
b04243ef
PZ
8189 struct perf_event *group_leader = NULL, *output_event = NULL;
8190 struct perf_event *event, *sibling;
cdd6c482 8191 struct perf_event_attr attr;
f63a8daa 8192 struct perf_event_context *ctx, *uninitialized_var(gctx);
cdd6c482 8193 struct file *event_file = NULL;
2903ff01 8194 struct fd group = {NULL, 0};
38a81da2 8195 struct task_struct *task = NULL;
89a1e187 8196 struct pmu *pmu;
ea635c64 8197 int event_fd;
b04243ef 8198 int move_group = 0;
dc86cabe 8199 int err;
a21b0b35 8200 int f_flags = O_RDWR;
79dff51e 8201 int cgroup_fd = -1;
0793a61d 8202
2743a5b0 8203 /* for future expandability... */
e5d1367f 8204 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
8205 return -EINVAL;
8206
dc86cabe
IM
8207 err = perf_copy_attr(attr_uptr, &attr);
8208 if (err)
8209 return err;
eab656ae 8210
0764771d
PZ
8211 if (!attr.exclude_kernel) {
8212 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
8213 return -EACCES;
8214 }
8215
df58ab24 8216 if (attr.freq) {
cdd6c482 8217 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24 8218 return -EINVAL;
0819b2e3
PZ
8219 } else {
8220 if (attr.sample_period & (1ULL << 63))
8221 return -EINVAL;
df58ab24
PZ
8222 }
8223
e5d1367f
SE
8224 /*
8225 * In cgroup mode, the pid argument is used to pass the fd
8226 * opened to the cgroup directory in cgroupfs. The cpu argument
8227 * designates the cpu on which to monitor threads from that
8228 * cgroup.
8229 */
8230 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
8231 return -EINVAL;
8232
a21b0b35
YD
8233 if (flags & PERF_FLAG_FD_CLOEXEC)
8234 f_flags |= O_CLOEXEC;
8235
8236 event_fd = get_unused_fd_flags(f_flags);
ea635c64
AV
8237 if (event_fd < 0)
8238 return event_fd;
8239
ac9721f3 8240 if (group_fd != -1) {
2903ff01
AV
8241 err = perf_fget_light(group_fd, &group);
8242 if (err)
d14b12d7 8243 goto err_fd;
2903ff01 8244 group_leader = group.file->private_data;
ac9721f3
PZ
8245 if (flags & PERF_FLAG_FD_OUTPUT)
8246 output_event = group_leader;
8247 if (flags & PERF_FLAG_FD_NO_GROUP)
8248 group_leader = NULL;
8249 }
8250
e5d1367f 8251 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
8252 task = find_lively_task_by_vpid(pid);
8253 if (IS_ERR(task)) {
8254 err = PTR_ERR(task);
8255 goto err_group_fd;
8256 }
8257 }
8258
1f4ee503
PZ
8259 if (task && group_leader &&
8260 group_leader->attr.inherit != attr.inherit) {
8261 err = -EINVAL;
8262 goto err_task;
8263 }
8264
fbfc623f
YZ
8265 get_online_cpus();
8266
79dff51e
MF
8267 if (flags & PERF_FLAG_PID_CGROUP)
8268 cgroup_fd = pid;
8269
4dc0da86 8270 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
79dff51e 8271 NULL, NULL, cgroup_fd);
d14b12d7
SE
8272 if (IS_ERR(event)) {
8273 err = PTR_ERR(event);
1f4ee503 8274 goto err_cpus;
d14b12d7
SE
8275 }
8276
53b25335
VW
8277 if (is_sampling_event(event)) {
8278 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
8279 err = -ENOTSUPP;
8280 goto err_alloc;
8281 }
8282 }
8283
766d6c07
FW
8284 account_event(event);
8285
89a1e187
PZ
8286 /*
8287 * Special case software events and allow them to be part of
8288 * any hardware group.
8289 */
8290 pmu = event->pmu;
b04243ef 8291
34f43927
PZ
8292 if (attr.use_clockid) {
8293 err = perf_event_set_clock(event, attr.clockid);
8294 if (err)
8295 goto err_alloc;
8296 }
8297
b04243ef
PZ
8298 if (group_leader &&
8299 (is_software_event(event) != is_software_event(group_leader))) {
8300 if (is_software_event(event)) {
8301 /*
8302 * If event and group_leader are not both a software
8303 * event, and event is, then group leader is not.
8304 *
8305 * Allow the addition of software events to !software
8306 * groups, this is safe because software events never
8307 * fail to schedule.
8308 */
8309 pmu = group_leader->pmu;
8310 } else if (is_software_event(group_leader) &&
8311 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
8312 /*
8313 * In case the group is a pure software group, and we
8314 * try to add a hardware event, move the whole group to
8315 * the hardware context.
8316 */
8317 move_group = 1;
8318 }
8319 }
89a1e187
PZ
8320
8321 /*
8322 * Get the target context (task or percpu):
8323 */
4af57ef2 8324 ctx = find_get_context(pmu, task, event);
89a1e187
PZ
8325 if (IS_ERR(ctx)) {
8326 err = PTR_ERR(ctx);
c6be5a5c 8327 goto err_alloc;
89a1e187
PZ
8328 }
8329
bed5b25a
AS
8330 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
8331 err = -EBUSY;
8332 goto err_context;
8333 }
8334
fd1edb3a
PZ
8335 if (task) {
8336 put_task_struct(task);
8337 task = NULL;
8338 }
8339
ccff286d 8340 /*
cdd6c482 8341 * Look up the group leader (we will attach this event to it):
04289bb9 8342 */
ac9721f3 8343 if (group_leader) {
dc86cabe 8344 err = -EINVAL;
04289bb9 8345
04289bb9 8346 /*
ccff286d
IM
8347 * Do not allow a recursive hierarchy (this new sibling
8348 * becoming part of another group-sibling):
8349 */
8350 if (group_leader->group_leader != group_leader)
c3f00c70 8351 goto err_context;
34f43927
PZ
8352
8353 /* All events in a group should have the same clock */
8354 if (group_leader->clock != event->clock)
8355 goto err_context;
8356
ccff286d
IM
8357 /*
8358 * Do not allow to attach to a group in a different
8359 * task or CPU context:
04289bb9 8360 */
b04243ef 8361 if (move_group) {
c3c87e77
PZ
8362 /*
8363 * Make sure we're both on the same task, or both
8364 * per-cpu events.
8365 */
8366 if (group_leader->ctx->task != ctx->task)
8367 goto err_context;
8368
8369 /*
8370 * Make sure we're both events for the same CPU;
8371 * grouping events for different CPUs is broken; since
8372 * you can never concurrently schedule them anyhow.
8373 */
8374 if (group_leader->cpu != event->cpu)
b04243ef
PZ
8375 goto err_context;
8376 } else {
8377 if (group_leader->ctx != ctx)
8378 goto err_context;
8379 }
8380
3b6f9e5c
PM
8381 /*
8382 * Only a group leader can be exclusive or pinned
8383 */
0d48696f 8384 if (attr.exclusive || attr.pinned)
c3f00c70 8385 goto err_context;
ac9721f3
PZ
8386 }
8387
8388 if (output_event) {
8389 err = perf_event_set_output(event, output_event);
8390 if (err)
c3f00c70 8391 goto err_context;
ac9721f3 8392 }
0793a61d 8393
a21b0b35
YD
8394 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
8395 f_flags);
ea635c64
AV
8396 if (IS_ERR(event_file)) {
8397 err = PTR_ERR(event_file);
c3f00c70 8398 goto err_context;
ea635c64 8399 }
9b51f66d 8400
b04243ef 8401 if (move_group) {
f63a8daa 8402 gctx = group_leader->ctx;
f55fc2a5 8403 mutex_lock_double(&gctx->mutex, &ctx->mutex);
84c4e620
PZ
8404 if (gctx->task == TASK_TOMBSTONE) {
8405 err = -ESRCH;
8406 goto err_locked;
8407 }
f55fc2a5
PZ
8408 } else {
8409 mutex_lock(&ctx->mutex);
8410 }
8411
84c4e620
PZ
8412 if (ctx->task == TASK_TOMBSTONE) {
8413 err = -ESRCH;
8414 goto err_locked;
8415 }
8416
a723968c
PZ
8417 if (!perf_event_validate_size(event)) {
8418 err = -E2BIG;
8419 goto err_locked;
8420 }
8421
f55fc2a5
PZ
8422 /*
8423 * Must be under the same ctx::mutex as perf_install_in_context(),
8424 * because we need to serialize with concurrent event creation.
8425 */
8426 if (!exclusive_event_installable(event, ctx)) {
8427 /* exclusive and group stuff are assumed mutually exclusive */
8428 WARN_ON_ONCE(move_group);
f63a8daa 8429
f55fc2a5
PZ
8430 err = -EBUSY;
8431 goto err_locked;
8432 }
f63a8daa 8433
f55fc2a5
PZ
8434 WARN_ON_ONCE(ctx->parent_ctx);
8435
8436 if (move_group) {
f63a8daa
PZ
8437 /*
8438 * See perf_event_ctx_lock() for comments on the details
8439 * of swizzling perf_event::ctx.
8440 */
45a0e07a 8441 perf_remove_from_context(group_leader, 0);
0231bb53 8442
b04243ef
PZ
8443 list_for_each_entry(sibling, &group_leader->sibling_list,
8444 group_entry) {
45a0e07a 8445 perf_remove_from_context(sibling, 0);
b04243ef
PZ
8446 put_ctx(gctx);
8447 }
b04243ef 8448
f63a8daa
PZ
8449 /*
8450 * Wait for everybody to stop referencing the events through
8451 * the old lists, before installing it on new lists.
8452 */
0cda4c02 8453 synchronize_rcu();
f63a8daa 8454
8f95b435
PZI
8455 /*
8456 * Install the group siblings before the group leader.
8457 *
8458 * Because a group leader will try and install the entire group
8459 * (through the sibling list, which is still in-tact), we can
8460 * end up with siblings installed in the wrong context.
8461 *
8462 * By installing siblings first we NO-OP because they're not
8463 * reachable through the group lists.
8464 */
b04243ef
PZ
8465 list_for_each_entry(sibling, &group_leader->sibling_list,
8466 group_entry) {
8f95b435 8467 perf_event__state_init(sibling);
9fc81d87 8468 perf_install_in_context(ctx, sibling, sibling->cpu);
b04243ef
PZ
8469 get_ctx(ctx);
8470 }
8f95b435
PZI
8471
8472 /*
8473 * Removing from the context ends up with disabled
8474 * event. What we want here is event in the initial
8475 * startup state, ready to be add into new context.
8476 */
8477 perf_event__state_init(group_leader);
8478 perf_install_in_context(ctx, group_leader, group_leader->cpu);
8479 get_ctx(ctx);
b04243ef 8480
f55fc2a5
PZ
8481 /*
8482 * Now that all events are installed in @ctx, nothing
8483 * references @gctx anymore, so drop the last reference we have
8484 * on it.
8485 */
8486 put_ctx(gctx);
bed5b25a
AS
8487 }
8488
f73e22ab
PZ
8489 /*
8490 * Precalculate sample_data sizes; do while holding ctx::mutex such
8491 * that we're serialized against further additions and before
8492 * perf_install_in_context() which is the point the event is active and
8493 * can use these values.
8494 */
8495 perf_event__header_size(event);
8496 perf_event__id_header_size(event);
8497
78cd2c74
PZ
8498 event->owner = current;
8499
e2d37cd2 8500 perf_install_in_context(ctx, event, event->cpu);
fe4b04fa 8501 perf_unpin_context(ctx);
f63a8daa 8502
f55fc2a5 8503 if (move_group)
f63a8daa 8504 mutex_unlock(&gctx->mutex);
d859e29f 8505 mutex_unlock(&ctx->mutex);
9b51f66d 8506
fbfc623f
YZ
8507 put_online_cpus();
8508
cdd6c482
IM
8509 mutex_lock(&current->perf_event_mutex);
8510 list_add_tail(&event->owner_entry, &current->perf_event_list);
8511 mutex_unlock(&current->perf_event_mutex);
082ff5a2 8512
8a49542c
PZ
8513 /*
8514 * Drop the reference on the group_event after placing the
8515 * new event on the sibling_list. This ensures destruction
8516 * of the group leader will find the pointer to itself in
8517 * perf_group_detach().
8518 */
2903ff01 8519 fdput(group);
ea635c64
AV
8520 fd_install(event_fd, event_file);
8521 return event_fd;
0793a61d 8522
f55fc2a5
PZ
8523err_locked:
8524 if (move_group)
8525 mutex_unlock(&gctx->mutex);
8526 mutex_unlock(&ctx->mutex);
8527/* err_file: */
8528 fput(event_file);
c3f00c70 8529err_context:
fe4b04fa 8530 perf_unpin_context(ctx);
ea635c64 8531 put_ctx(ctx);
c6be5a5c 8532err_alloc:
13005627
PZ
8533 /*
8534 * If event_file is set, the fput() above will have called ->release()
8535 * and that will take care of freeing the event.
8536 */
8537 if (!event_file)
8538 free_event(event);
1f4ee503 8539err_cpus:
fbfc623f 8540 put_online_cpus();
1f4ee503 8541err_task:
e7d0bc04
PZ
8542 if (task)
8543 put_task_struct(task);
89a1e187 8544err_group_fd:
2903ff01 8545 fdput(group);
ea635c64
AV
8546err_fd:
8547 put_unused_fd(event_fd);
dc86cabe 8548 return err;
0793a61d
TG
8549}
8550
fb0459d7
AV
8551/**
8552 * perf_event_create_kernel_counter
8553 *
8554 * @attr: attributes of the counter to create
8555 * @cpu: cpu in which the counter is bound
38a81da2 8556 * @task: task to profile (NULL for percpu)
fb0459d7
AV
8557 */
8558struct perf_event *
8559perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 8560 struct task_struct *task,
4dc0da86
AK
8561 perf_overflow_handler_t overflow_handler,
8562 void *context)
fb0459d7 8563{
fb0459d7 8564 struct perf_event_context *ctx;
c3f00c70 8565 struct perf_event *event;
fb0459d7 8566 int err;
d859e29f 8567
fb0459d7
AV
8568 /*
8569 * Get the target context (task or percpu):
8570 */
d859e29f 8571
4dc0da86 8572 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
79dff51e 8573 overflow_handler, context, -1);
c3f00c70
PZ
8574 if (IS_ERR(event)) {
8575 err = PTR_ERR(event);
8576 goto err;
8577 }
d859e29f 8578
f8697762 8579 /* Mark owner so we could distinguish it from user events. */
63b6da39 8580 event->owner = TASK_TOMBSTONE;
f8697762 8581
766d6c07
FW
8582 account_event(event);
8583
4af57ef2 8584 ctx = find_get_context(event->pmu, task, event);
c6567f64
FW
8585 if (IS_ERR(ctx)) {
8586 err = PTR_ERR(ctx);
c3f00c70 8587 goto err_free;
d859e29f 8588 }
fb0459d7 8589
fb0459d7
AV
8590 WARN_ON_ONCE(ctx->parent_ctx);
8591 mutex_lock(&ctx->mutex);
84c4e620
PZ
8592 if (ctx->task == TASK_TOMBSTONE) {
8593 err = -ESRCH;
8594 goto err_unlock;
8595 }
8596
bed5b25a 8597 if (!exclusive_event_installable(event, ctx)) {
bed5b25a 8598 err = -EBUSY;
84c4e620 8599 goto err_unlock;
bed5b25a
AS
8600 }
8601
fb0459d7 8602 perf_install_in_context(ctx, event, cpu);
fe4b04fa 8603 perf_unpin_context(ctx);
fb0459d7
AV
8604 mutex_unlock(&ctx->mutex);
8605
fb0459d7
AV
8606 return event;
8607
84c4e620
PZ
8608err_unlock:
8609 mutex_unlock(&ctx->mutex);
8610 perf_unpin_context(ctx);
8611 put_ctx(ctx);
c3f00c70
PZ
8612err_free:
8613 free_event(event);
8614err:
c6567f64 8615 return ERR_PTR(err);
9b51f66d 8616}
fb0459d7 8617EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 8618
0cda4c02
YZ
8619void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
8620{
8621 struct perf_event_context *src_ctx;
8622 struct perf_event_context *dst_ctx;
8623 struct perf_event *event, *tmp;
8624 LIST_HEAD(events);
8625
8626 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
8627 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
8628
f63a8daa
PZ
8629 /*
8630 * See perf_event_ctx_lock() for comments on the details
8631 * of swizzling perf_event::ctx.
8632 */
8633 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
0cda4c02
YZ
8634 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
8635 event_entry) {
45a0e07a 8636 perf_remove_from_context(event, 0);
9a545de0 8637 unaccount_event_cpu(event, src_cpu);
0cda4c02 8638 put_ctx(src_ctx);
9886167d 8639 list_add(&event->migrate_entry, &events);
0cda4c02 8640 }
0cda4c02 8641
8f95b435
PZI
8642 /*
8643 * Wait for the events to quiesce before re-instating them.
8644 */
0cda4c02
YZ
8645 synchronize_rcu();
8646
8f95b435
PZI
8647 /*
8648 * Re-instate events in 2 passes.
8649 *
8650 * Skip over group leaders and only install siblings on this first
8651 * pass, siblings will not get enabled without a leader, however a
8652 * leader will enable its siblings, even if those are still on the old
8653 * context.
8654 */
8655 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
8656 if (event->group_leader == event)
8657 continue;
8658
8659 list_del(&event->migrate_entry);
8660 if (event->state >= PERF_EVENT_STATE_OFF)
8661 event->state = PERF_EVENT_STATE_INACTIVE;
8662 account_event_cpu(event, dst_cpu);
8663 perf_install_in_context(dst_ctx, event, dst_cpu);
8664 get_ctx(dst_ctx);
8665 }
8666
8667 /*
8668 * Once all the siblings are setup properly, install the group leaders
8669 * to make it go.
8670 */
9886167d
PZ
8671 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
8672 list_del(&event->migrate_entry);
0cda4c02
YZ
8673 if (event->state >= PERF_EVENT_STATE_OFF)
8674 event->state = PERF_EVENT_STATE_INACTIVE;
9a545de0 8675 account_event_cpu(event, dst_cpu);
0cda4c02
YZ
8676 perf_install_in_context(dst_ctx, event, dst_cpu);
8677 get_ctx(dst_ctx);
8678 }
8679 mutex_unlock(&dst_ctx->mutex);
f63a8daa 8680 mutex_unlock(&src_ctx->mutex);
0cda4c02
YZ
8681}
8682EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
8683
cdd6c482 8684static void sync_child_event(struct perf_event *child_event,
38b200d6 8685 struct task_struct *child)
d859e29f 8686{
cdd6c482 8687 struct perf_event *parent_event = child_event->parent;
8bc20959 8688 u64 child_val;
d859e29f 8689
cdd6c482
IM
8690 if (child_event->attr.inherit_stat)
8691 perf_event_read_event(child_event, child);
38b200d6 8692
b5e58793 8693 child_val = perf_event_count(child_event);
d859e29f
PM
8694
8695 /*
8696 * Add back the child's count to the parent's count:
8697 */
a6e6dea6 8698 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
8699 atomic64_add(child_event->total_time_enabled,
8700 &parent_event->child_total_time_enabled);
8701 atomic64_add(child_event->total_time_running,
8702 &parent_event->child_total_time_running);
d859e29f
PM
8703}
8704
9b51f66d 8705static void
8ba289b8
PZ
8706perf_event_exit_event(struct perf_event *child_event,
8707 struct perf_event_context *child_ctx,
8708 struct task_struct *child)
9b51f66d 8709{
8ba289b8
PZ
8710 struct perf_event *parent_event = child_event->parent;
8711
1903d50c
PZ
8712 /*
8713 * Do not destroy the 'original' grouping; because of the context
8714 * switch optimization the original events could've ended up in a
8715 * random child task.
8716 *
8717 * If we were to destroy the original group, all group related
8718 * operations would cease to function properly after this random
8719 * child dies.
8720 *
8721 * Do destroy all inherited groups, we don't care about those
8722 * and being thorough is better.
8723 */
32132a3d
PZ
8724 raw_spin_lock_irq(&child_ctx->lock);
8725 WARN_ON_ONCE(child_ctx->is_active);
8726
8ba289b8 8727 if (parent_event)
32132a3d
PZ
8728 perf_group_detach(child_event);
8729 list_del_event(child_event, child_ctx);
c6e5b732 8730 child_event->state = PERF_EVENT_STATE_EXIT; /* see perf_event_release_kernel() */
32132a3d 8731 raw_spin_unlock_irq(&child_ctx->lock);
0cc0c027 8732
9b51f66d 8733 /*
8ba289b8 8734 * Parent events are governed by their filedesc, retain them.
9b51f66d 8735 */
8ba289b8 8736 if (!parent_event) {
179033b3 8737 perf_event_wakeup(child_event);
8ba289b8 8738 return;
4bcf349a 8739 }
8ba289b8
PZ
8740 /*
8741 * Child events can be cleaned up.
8742 */
8743
8744 sync_child_event(child_event, child);
8745
8746 /*
8747 * Remove this event from the parent's list
8748 */
8749 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
8750 mutex_lock(&parent_event->child_mutex);
8751 list_del_init(&child_event->child_list);
8752 mutex_unlock(&parent_event->child_mutex);
8753
8754 /*
8755 * Kick perf_poll() for is_event_hup().
8756 */
8757 perf_event_wakeup(parent_event);
8758 free_event(child_event);
8759 put_event(parent_event);
9b51f66d
IM
8760}
8761
8dc85d54 8762static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 8763{
211de6eb 8764 struct perf_event_context *child_ctx, *clone_ctx = NULL;
63b6da39 8765 struct perf_event *child_event, *next;
63b6da39
PZ
8766
8767 WARN_ON_ONCE(child != current);
9b51f66d 8768
6a3351b6 8769 child_ctx = perf_pin_task_context(child, ctxn);
63b6da39 8770 if (!child_ctx)
9b51f66d
IM
8771 return;
8772
ad3a37de 8773 /*
6a3351b6
PZ
8774 * In order to reduce the amount of tricky in ctx tear-down, we hold
8775 * ctx::mutex over the entire thing. This serializes against almost
8776 * everything that wants to access the ctx.
8777 *
8778 * The exception is sys_perf_event_open() /
8779 * perf_event_create_kernel_count() which does find_get_context()
8780 * without ctx::mutex (it cannot because of the move_group double mutex
8781 * lock thing). See the comments in perf_install_in_context().
ad3a37de 8782 */
6a3351b6 8783 mutex_lock(&child_ctx->mutex);
c93f7669
PM
8784
8785 /*
6a3351b6
PZ
8786 * In a single ctx::lock section, de-schedule the events and detach the
8787 * context from the task such that we cannot ever get it scheduled back
8788 * in.
c93f7669 8789 */
6a3351b6 8790 raw_spin_lock_irq(&child_ctx->lock);
63b6da39 8791 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
4a1c0f26 8792
71a851b4 8793 /*
63b6da39
PZ
8794 * Now that the context is inactive, destroy the task <-> ctx relation
8795 * and mark the context dead.
71a851b4 8796 */
63b6da39
PZ
8797 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
8798 put_ctx(child_ctx); /* cannot be last */
8799 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
8800 put_task_struct(current); /* cannot be last */
4a1c0f26 8801
211de6eb 8802 clone_ctx = unclone_ctx(child_ctx);
6a3351b6 8803 raw_spin_unlock_irq(&child_ctx->lock);
9f498cc5 8804
211de6eb
PZ
8805 if (clone_ctx)
8806 put_ctx(clone_ctx);
4a1c0f26 8807
9f498cc5 8808 /*
cdd6c482
IM
8809 * Report the task dead after unscheduling the events so that we
8810 * won't get any samples after PERF_RECORD_EXIT. We can however still
8811 * get a few PERF_RECORD_READ events.
9f498cc5 8812 */
cdd6c482 8813 perf_event_task(child, child_ctx, 0);
a63eaf34 8814
ebf905fc 8815 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
8ba289b8 8816 perf_event_exit_event(child_event, child_ctx, child);
8bc20959 8817
a63eaf34
PM
8818 mutex_unlock(&child_ctx->mutex);
8819
8820 put_ctx(child_ctx);
9b51f66d
IM
8821}
8822
8dc85d54
PZ
8823/*
8824 * When a child task exits, feed back event values to parent events.
8825 */
8826void perf_event_exit_task(struct task_struct *child)
8827{
8882135b 8828 struct perf_event *event, *tmp;
8dc85d54
PZ
8829 int ctxn;
8830
8882135b
PZ
8831 mutex_lock(&child->perf_event_mutex);
8832 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
8833 owner_entry) {
8834 list_del_init(&event->owner_entry);
8835
8836 /*
8837 * Ensure the list deletion is visible before we clear
8838 * the owner, closes a race against perf_release() where
8839 * we need to serialize on the owner->perf_event_mutex.
8840 */
f47c02c0 8841 smp_store_release(&event->owner, NULL);
8882135b
PZ
8842 }
8843 mutex_unlock(&child->perf_event_mutex);
8844
8dc85d54
PZ
8845 for_each_task_context_nr(ctxn)
8846 perf_event_exit_task_context(child, ctxn);
4e93ad60
JO
8847
8848 /*
8849 * The perf_event_exit_task_context calls perf_event_task
8850 * with child's task_ctx, which generates EXIT events for
8851 * child contexts and sets child->perf_event_ctxp[] to NULL.
8852 * At this point we need to send EXIT events to cpu contexts.
8853 */
8854 perf_event_task(child, NULL, 0);
8dc85d54
PZ
8855}
8856
889ff015
FW
8857static void perf_free_event(struct perf_event *event,
8858 struct perf_event_context *ctx)
8859{
8860 struct perf_event *parent = event->parent;
8861
8862 if (WARN_ON_ONCE(!parent))
8863 return;
8864
8865 mutex_lock(&parent->child_mutex);
8866 list_del_init(&event->child_list);
8867 mutex_unlock(&parent->child_mutex);
8868
a6fa941d 8869 put_event(parent);
889ff015 8870
652884fe 8871 raw_spin_lock_irq(&ctx->lock);
8a49542c 8872 perf_group_detach(event);
889ff015 8873 list_del_event(event, ctx);
652884fe 8874 raw_spin_unlock_irq(&ctx->lock);
889ff015
FW
8875 free_event(event);
8876}
8877
bbbee908 8878/*
652884fe 8879 * Free an unexposed, unused context as created by inheritance by
8dc85d54 8880 * perf_event_init_task below, used by fork() in case of fail.
652884fe
PZ
8881 *
8882 * Not all locks are strictly required, but take them anyway to be nice and
8883 * help out with the lockdep assertions.
bbbee908 8884 */
cdd6c482 8885void perf_event_free_task(struct task_struct *task)
bbbee908 8886{
8dc85d54 8887 struct perf_event_context *ctx;
cdd6c482 8888 struct perf_event *event, *tmp;
8dc85d54 8889 int ctxn;
bbbee908 8890
8dc85d54
PZ
8891 for_each_task_context_nr(ctxn) {
8892 ctx = task->perf_event_ctxp[ctxn];
8893 if (!ctx)
8894 continue;
bbbee908 8895
8dc85d54 8896 mutex_lock(&ctx->mutex);
bbbee908 8897again:
8dc85d54
PZ
8898 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
8899 group_entry)
8900 perf_free_event(event, ctx);
bbbee908 8901
8dc85d54
PZ
8902 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
8903 group_entry)
8904 perf_free_event(event, ctx);
bbbee908 8905
8dc85d54
PZ
8906 if (!list_empty(&ctx->pinned_groups) ||
8907 !list_empty(&ctx->flexible_groups))
8908 goto again;
bbbee908 8909
8dc85d54 8910 mutex_unlock(&ctx->mutex);
bbbee908 8911
8dc85d54
PZ
8912 put_ctx(ctx);
8913 }
889ff015
FW
8914}
8915
4e231c79
PZ
8916void perf_event_delayed_put(struct task_struct *task)
8917{
8918 int ctxn;
8919
8920 for_each_task_context_nr(ctxn)
8921 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
8922}
8923
e03e7ee3 8924struct file *perf_event_get(unsigned int fd)
ffe8690c 8925{
e03e7ee3 8926 struct file *file;
ffe8690c 8927
e03e7ee3
AS
8928 file = fget_raw(fd);
8929 if (!file)
8930 return ERR_PTR(-EBADF);
ffe8690c 8931
e03e7ee3
AS
8932 if (file->f_op != &perf_fops) {
8933 fput(file);
8934 return ERR_PTR(-EBADF);
8935 }
ffe8690c 8936
e03e7ee3 8937 return file;
ffe8690c
KX
8938}
8939
8940const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
8941{
8942 if (!event)
8943 return ERR_PTR(-EINVAL);
8944
8945 return &event->attr;
8946}
8947
97dee4f3
PZ
8948/*
8949 * inherit a event from parent task to child task:
8950 */
8951static struct perf_event *
8952inherit_event(struct perf_event *parent_event,
8953 struct task_struct *parent,
8954 struct perf_event_context *parent_ctx,
8955 struct task_struct *child,
8956 struct perf_event *group_leader,
8957 struct perf_event_context *child_ctx)
8958{
1929def9 8959 enum perf_event_active_state parent_state = parent_event->state;
97dee4f3 8960 struct perf_event *child_event;
cee010ec 8961 unsigned long flags;
97dee4f3
PZ
8962
8963 /*
8964 * Instead of creating recursive hierarchies of events,
8965 * we link inherited events back to the original parent,
8966 * which has a filp for sure, which we use as the reference
8967 * count:
8968 */
8969 if (parent_event->parent)
8970 parent_event = parent_event->parent;
8971
8972 child_event = perf_event_alloc(&parent_event->attr,
8973 parent_event->cpu,
d580ff86 8974 child,
97dee4f3 8975 group_leader, parent_event,
79dff51e 8976 NULL, NULL, -1);
97dee4f3
PZ
8977 if (IS_ERR(child_event))
8978 return child_event;
a6fa941d 8979
c6e5b732
PZ
8980 /*
8981 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
8982 * must be under the same lock in order to serialize against
8983 * perf_event_release_kernel(), such that either we must observe
8984 * is_orphaned_event() or they will observe us on the child_list.
8985 */
8986 mutex_lock(&parent_event->child_mutex);
fadfe7be
JO
8987 if (is_orphaned_event(parent_event) ||
8988 !atomic_long_inc_not_zero(&parent_event->refcount)) {
c6e5b732 8989 mutex_unlock(&parent_event->child_mutex);
a6fa941d
AV
8990 free_event(child_event);
8991 return NULL;
8992 }
8993
97dee4f3
PZ
8994 get_ctx(child_ctx);
8995
8996 /*
8997 * Make the child state follow the state of the parent event,
8998 * not its attr.disabled bit. We hold the parent's mutex,
8999 * so we won't race with perf_event_{en, dis}able_family.
9000 */
1929def9 9001 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
97dee4f3
PZ
9002 child_event->state = PERF_EVENT_STATE_INACTIVE;
9003 else
9004 child_event->state = PERF_EVENT_STATE_OFF;
9005
9006 if (parent_event->attr.freq) {
9007 u64 sample_period = parent_event->hw.sample_period;
9008 struct hw_perf_event *hwc = &child_event->hw;
9009
9010 hwc->sample_period = sample_period;
9011 hwc->last_period = sample_period;
9012
9013 local64_set(&hwc->period_left, sample_period);
9014 }
9015
9016 child_event->ctx = child_ctx;
9017 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
9018 child_event->overflow_handler_context
9019 = parent_event->overflow_handler_context;
97dee4f3 9020
614b6780
TG
9021 /*
9022 * Precalculate sample_data sizes
9023 */
9024 perf_event__header_size(child_event);
6844c09d 9025 perf_event__id_header_size(child_event);
614b6780 9026
97dee4f3
PZ
9027 /*
9028 * Link it up in the child's context:
9029 */
cee010ec 9030 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 9031 add_event_to_ctx(child_event, child_ctx);
cee010ec 9032 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 9033
97dee4f3
PZ
9034 /*
9035 * Link this into the parent event's child list
9036 */
97dee4f3
PZ
9037 list_add_tail(&child_event->child_list, &parent_event->child_list);
9038 mutex_unlock(&parent_event->child_mutex);
9039
9040 return child_event;
9041}
9042
9043static int inherit_group(struct perf_event *parent_event,
9044 struct task_struct *parent,
9045 struct perf_event_context *parent_ctx,
9046 struct task_struct *child,
9047 struct perf_event_context *child_ctx)
9048{
9049 struct perf_event *leader;
9050 struct perf_event *sub;
9051 struct perf_event *child_ctr;
9052
9053 leader = inherit_event(parent_event, parent, parent_ctx,
9054 child, NULL, child_ctx);
9055 if (IS_ERR(leader))
9056 return PTR_ERR(leader);
9057 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
9058 child_ctr = inherit_event(sub, parent, parent_ctx,
9059 child, leader, child_ctx);
9060 if (IS_ERR(child_ctr))
9061 return PTR_ERR(child_ctr);
9062 }
9063 return 0;
889ff015
FW
9064}
9065
9066static int
9067inherit_task_group(struct perf_event *event, struct task_struct *parent,
9068 struct perf_event_context *parent_ctx,
8dc85d54 9069 struct task_struct *child, int ctxn,
889ff015
FW
9070 int *inherited_all)
9071{
9072 int ret;
8dc85d54 9073 struct perf_event_context *child_ctx;
889ff015
FW
9074
9075 if (!event->attr.inherit) {
9076 *inherited_all = 0;
9077 return 0;
bbbee908
PZ
9078 }
9079
fe4b04fa 9080 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
9081 if (!child_ctx) {
9082 /*
9083 * This is executed from the parent task context, so
9084 * inherit events that have been marked for cloning.
9085 * First allocate and initialize a context for the
9086 * child.
9087 */
bbbee908 9088
734df5ab 9089 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
889ff015
FW
9090 if (!child_ctx)
9091 return -ENOMEM;
bbbee908 9092
8dc85d54 9093 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
9094 }
9095
9096 ret = inherit_group(event, parent, parent_ctx,
9097 child, child_ctx);
9098
9099 if (ret)
9100 *inherited_all = 0;
9101
9102 return ret;
bbbee908
PZ
9103}
9104
9b51f66d 9105/*
cdd6c482 9106 * Initialize the perf_event context in task_struct
9b51f66d 9107 */
985c8dcb 9108static int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 9109{
889ff015 9110 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
9111 struct perf_event_context *cloned_ctx;
9112 struct perf_event *event;
9b51f66d 9113 struct task_struct *parent = current;
564c2b21 9114 int inherited_all = 1;
dddd3379 9115 unsigned long flags;
6ab423e0 9116 int ret = 0;
9b51f66d 9117
8dc85d54 9118 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
9119 return 0;
9120
ad3a37de 9121 /*
25346b93
PM
9122 * If the parent's context is a clone, pin it so it won't get
9123 * swapped under us.
ad3a37de 9124 */
8dc85d54 9125 parent_ctx = perf_pin_task_context(parent, ctxn);
ffb4ef21
PZ
9126 if (!parent_ctx)
9127 return 0;
25346b93 9128
ad3a37de
PM
9129 /*
9130 * No need to check if parent_ctx != NULL here; since we saw
9131 * it non-NULL earlier, the only reason for it to become NULL
9132 * is if we exit, and since we're currently in the middle of
9133 * a fork we can't be exiting at the same time.
9134 */
ad3a37de 9135
9b51f66d
IM
9136 /*
9137 * Lock the parent list. No need to lock the child - not PID
9138 * hashed yet and not running, so nobody can access it.
9139 */
d859e29f 9140 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
9141
9142 /*
9143 * We dont have to disable NMIs - we are only looking at
9144 * the list, not manipulating it:
9145 */
889ff015 9146 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d54
PZ
9147 ret = inherit_task_group(event, parent, parent_ctx,
9148 child, ctxn, &inherited_all);
889ff015
FW
9149 if (ret)
9150 break;
9151 }
b93f7978 9152
dddd3379
TG
9153 /*
9154 * We can't hold ctx->lock when iterating the ->flexible_group list due
9155 * to allocations, but we need to prevent rotation because
9156 * rotate_ctx() will change the list from interrupt context.
9157 */
9158 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
9159 parent_ctx->rotate_disable = 1;
9160 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
9161
889ff015 9162 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d54
PZ
9163 ret = inherit_task_group(event, parent, parent_ctx,
9164 child, ctxn, &inherited_all);
889ff015 9165 if (ret)
9b51f66d 9166 break;
564c2b21
PM
9167 }
9168
dddd3379
TG
9169 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
9170 parent_ctx->rotate_disable = 0;
dddd3379 9171
8dc85d54 9172 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 9173
05cbaa28 9174 if (child_ctx && inherited_all) {
564c2b21
PM
9175 /*
9176 * Mark the child context as a clone of the parent
9177 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
9178 *
9179 * Note that if the parent is a clone, the holding of
9180 * parent_ctx->lock avoids it from being uncloned.
564c2b21 9181 */
c5ed5145 9182 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
9183 if (cloned_ctx) {
9184 child_ctx->parent_ctx = cloned_ctx;
25346b93 9185 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
9186 } else {
9187 child_ctx->parent_ctx = parent_ctx;
9188 child_ctx->parent_gen = parent_ctx->generation;
9189 }
9190 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
9191 }
9192
c5ed5145 9193 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29f 9194 mutex_unlock(&parent_ctx->mutex);
6ab423e0 9195
25346b93 9196 perf_unpin_context(parent_ctx);
fe4b04fa 9197 put_ctx(parent_ctx);
ad3a37de 9198
6ab423e0 9199 return ret;
9b51f66d
IM
9200}
9201
8dc85d54
PZ
9202/*
9203 * Initialize the perf_event context in task_struct
9204 */
9205int perf_event_init_task(struct task_struct *child)
9206{
9207 int ctxn, ret;
9208
8550d7cb
ON
9209 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
9210 mutex_init(&child->perf_event_mutex);
9211 INIT_LIST_HEAD(&child->perf_event_list);
9212
8dc85d54
PZ
9213 for_each_task_context_nr(ctxn) {
9214 ret = perf_event_init_context(child, ctxn);
6c72e350
PZ
9215 if (ret) {
9216 perf_event_free_task(child);
8dc85d54 9217 return ret;
6c72e350 9218 }
8dc85d54
PZ
9219 }
9220
9221 return 0;
9222}
9223
220b140b
PM
9224static void __init perf_event_init_all_cpus(void)
9225{
b28ab83c 9226 struct swevent_htable *swhash;
220b140b 9227 int cpu;
220b140b
PM
9228
9229 for_each_possible_cpu(cpu) {
b28ab83c
PZ
9230 swhash = &per_cpu(swevent_htable, cpu);
9231 mutex_init(&swhash->hlist_mutex);
2fde4f94 9232 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
220b140b
PM
9233 }
9234}
9235
0db0628d 9236static void perf_event_init_cpu(int cpu)
0793a61d 9237{
108b02cf 9238 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 9239
b28ab83c 9240 mutex_lock(&swhash->hlist_mutex);
059fcd8c 9241 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
76e1d904
FW
9242 struct swevent_hlist *hlist;
9243
b28ab83c
PZ
9244 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
9245 WARN_ON(!hlist);
9246 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 9247 }
b28ab83c 9248 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
9249}
9250
2965faa5 9251#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
108b02cf 9252static void __perf_event_exit_context(void *__info)
0793a61d 9253{
108b02cf 9254 struct perf_event_context *ctx = __info;
fae3fde6
PZ
9255 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
9256 struct perf_event *event;
0793a61d 9257
fae3fde6
PZ
9258 raw_spin_lock(&ctx->lock);
9259 list_for_each_entry(event, &ctx->event_list, event_entry)
45a0e07a 9260 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
fae3fde6 9261 raw_spin_unlock(&ctx->lock);
0793a61d 9262}
108b02cf
PZ
9263
9264static void perf_event_exit_cpu_context(int cpu)
9265{
9266 struct perf_event_context *ctx;
9267 struct pmu *pmu;
9268 int idx;
9269
9270 idx = srcu_read_lock(&pmus_srcu);
9271 list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c 9272 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cf
PZ
9273
9274 mutex_lock(&ctx->mutex);
9275 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
9276 mutex_unlock(&ctx->mutex);
9277 }
9278 srcu_read_unlock(&pmus_srcu, idx);
108b02cf
PZ
9279}
9280
cdd6c482 9281static void perf_event_exit_cpu(int cpu)
0793a61d 9282{
e3703f8c 9283 perf_event_exit_cpu_context(cpu);
0793a61d
TG
9284}
9285#else
cdd6c482 9286static inline void perf_event_exit_cpu(int cpu) { }
0793a61d
TG
9287#endif
9288
c277443c
PZ
9289static int
9290perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
9291{
9292 int cpu;
9293
9294 for_each_online_cpu(cpu)
9295 perf_event_exit_cpu(cpu);
9296
9297 return NOTIFY_OK;
9298}
9299
9300/*
9301 * Run the perf reboot notifier at the very last possible moment so that
9302 * the generic watchdog code runs as long as possible.
9303 */
9304static struct notifier_block perf_reboot_notifier = {
9305 .notifier_call = perf_reboot,
9306 .priority = INT_MIN,
9307};
9308
0db0628d 9309static int
0793a61d
TG
9310perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
9311{
9312 unsigned int cpu = (long)hcpu;
9313
4536e4d1 9314 switch (action & ~CPU_TASKS_FROZEN) {
0793a61d
TG
9315
9316 case CPU_UP_PREPARE:
cdd6c482 9317 perf_event_init_cpu(cpu);
0793a61d
TG
9318 break;
9319
9320 case CPU_DOWN_PREPARE:
cdd6c482 9321 perf_event_exit_cpu(cpu);
0793a61d 9322 break;
0793a61d
TG
9323 default:
9324 break;
9325 }
9326
9327 return NOTIFY_OK;
9328}
9329
cdd6c482 9330void __init perf_event_init(void)
0793a61d 9331{
3c502e7a
JW
9332 int ret;
9333
2e80a82a
PZ
9334 idr_init(&pmu_idr);
9335
220b140b 9336 perf_event_init_all_cpus();
b0a873eb 9337 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
9338 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
9339 perf_pmu_register(&perf_cpu_clock, NULL, -1);
9340 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb
PZ
9341 perf_tp_register();
9342 perf_cpu_notifier(perf_cpu_notify);
c277443c 9343 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
9344
9345 ret = init_hw_breakpoint();
9346 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520
GN
9347
9348 /* do not patch jump label more than once per second */
9349 jump_label_rate_limit(&perf_sched_events, HZ);
b01c3a00
JO
9350
9351 /*
9352 * Build time assertion that we keep the data_head at the intended
9353 * location. IOW, validation we got the __reserved[] size right.
9354 */
9355 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
9356 != 1024);
0793a61d 9357}
abe43400 9358
fd979c01
CS
9359ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
9360 char *page)
9361{
9362 struct perf_pmu_events_attr *pmu_attr =
9363 container_of(attr, struct perf_pmu_events_attr, attr);
9364
9365 if (pmu_attr->event_str)
9366 return sprintf(page, "%s\n", pmu_attr->event_str);
9367
9368 return 0;
9369}
9370
abe43400
PZ
9371static int __init perf_event_sysfs_init(void)
9372{
9373 struct pmu *pmu;
9374 int ret;
9375
9376 mutex_lock(&pmus_lock);
9377
9378 ret = bus_register(&pmu_bus);
9379 if (ret)
9380 goto unlock;
9381
9382 list_for_each_entry(pmu, &pmus, entry) {
9383 if (!pmu->name || pmu->type < 0)
9384 continue;
9385
9386 ret = pmu_dev_alloc(pmu);
9387 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
9388 }
9389 pmu_bus_running = 1;
9390 ret = 0;
9391
9392unlock:
9393 mutex_unlock(&pmus_lock);
9394
9395 return ret;
9396}
9397device_initcall(perf_event_sysfs_init);
e5d1367f
SE
9398
9399#ifdef CONFIG_CGROUP_PERF
eb95419b
TH
9400static struct cgroup_subsys_state *
9401perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
e5d1367f
SE
9402{
9403 struct perf_cgroup *jc;
e5d1367f 9404
1b15d055 9405 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
9406 if (!jc)
9407 return ERR_PTR(-ENOMEM);
9408
e5d1367f
SE
9409 jc->info = alloc_percpu(struct perf_cgroup_info);
9410 if (!jc->info) {
9411 kfree(jc);
9412 return ERR_PTR(-ENOMEM);
9413 }
9414
e5d1367f
SE
9415 return &jc->css;
9416}
9417
eb95419b 9418static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
e5d1367f 9419{
eb95419b
TH
9420 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
9421
e5d1367f
SE
9422 free_percpu(jc->info);
9423 kfree(jc);
9424}
9425
9426static int __perf_cgroup_move(void *info)
9427{
9428 struct task_struct *task = info;
ddaaf4e2 9429 rcu_read_lock();
e5d1367f 9430 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
ddaaf4e2 9431 rcu_read_unlock();
e5d1367f
SE
9432 return 0;
9433}
9434
1f7dd3e5 9435static void perf_cgroup_attach(struct cgroup_taskset *tset)
e5d1367f 9436{
bb9d97b6 9437 struct task_struct *task;
1f7dd3e5 9438 struct cgroup_subsys_state *css;
bb9d97b6 9439
1f7dd3e5 9440 cgroup_taskset_for_each(task, css, tset)
bb9d97b6 9441 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
9442}
9443
073219e9 9444struct cgroup_subsys perf_event_cgrp_subsys = {
92fb9748
TH
9445 .css_alloc = perf_cgroup_css_alloc,
9446 .css_free = perf_cgroup_css_free,
bb9d97b6 9447 .attach = perf_cgroup_attach,
e5d1367f
SE
9448};
9449#endif /* CONFIG_CGROUP_PERF */