perf/core: Add group reads to perf_event_read()
[linux-2.6-block.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e
IM
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
12351ef8 21#include <linux/tick.h>
0793a61d 22#include <linux/sysfs.h>
22a4f650 23#include <linux/dcache.h>
0793a61d 24#include <linux/percpu.h>
22a4f650 25#include <linux/ptrace.h>
c277443c 26#include <linux/reboot.h>
b9cacc7b 27#include <linux/vmstat.h>
abe43400 28#include <linux/device.h>
6e5fdeed 29#include <linux/export.h>
906010b2 30#include <linux/vmalloc.h>
b9cacc7b
PZ
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
0793a61d
TG
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
aa9c4c0f 36#include <linux/kernel_stat.h>
39bed6cb 37#include <linux/cgroup.h>
cdd6c482 38#include <linux/perf_event.h>
af658dca 39#include <linux/trace_events.h>
3c502e7a 40#include <linux/hw_breakpoint.h>
c5ebcedb 41#include <linux/mm_types.h>
c464c76e 42#include <linux/module.h>
f972eb63 43#include <linux/mman.h>
b3f20785 44#include <linux/compat.h>
2541517c
AS
45#include <linux/bpf.h>
46#include <linux/filter.h>
0793a61d 47
76369139
FW
48#include "internal.h"
49
4e193bd4
TB
50#include <asm/irq_regs.h>
51
fadfe7be
JO
52static struct workqueue_struct *perf_wq;
53
272325c4
PZ
54typedef int (*remote_function_f)(void *);
55
fe4b04fa 56struct remote_function_call {
e7e7ee2e 57 struct task_struct *p;
272325c4 58 remote_function_f func;
e7e7ee2e
IM
59 void *info;
60 int ret;
fe4b04fa
PZ
61};
62
63static void remote_function(void *data)
64{
65 struct remote_function_call *tfc = data;
66 struct task_struct *p = tfc->p;
67
68 if (p) {
69 tfc->ret = -EAGAIN;
70 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
71 return;
72 }
73
74 tfc->ret = tfc->func(tfc->info);
75}
76
77/**
78 * task_function_call - call a function on the cpu on which a task runs
79 * @p: the task to evaluate
80 * @func: the function to be called
81 * @info: the function call argument
82 *
83 * Calls the function @func when the task is currently running. This might
84 * be on the current CPU, which just calls the function directly
85 *
86 * returns: @func return value, or
87 * -ESRCH - when the process isn't running
88 * -EAGAIN - when the process moved away
89 */
90static int
272325c4 91task_function_call(struct task_struct *p, remote_function_f func, void *info)
fe4b04fa
PZ
92{
93 struct remote_function_call data = {
e7e7ee2e
IM
94 .p = p,
95 .func = func,
96 .info = info,
97 .ret = -ESRCH, /* No such (running) process */
fe4b04fa
PZ
98 };
99
100 if (task_curr(p))
101 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
102
103 return data.ret;
104}
105
106/**
107 * cpu_function_call - call a function on the cpu
108 * @func: the function to be called
109 * @info: the function call argument
110 *
111 * Calls the function @func on the remote cpu.
112 *
113 * returns: @func return value or -ENXIO when the cpu is offline
114 */
272325c4 115static int cpu_function_call(int cpu, remote_function_f func, void *info)
fe4b04fa
PZ
116{
117 struct remote_function_call data = {
e7e7ee2e
IM
118 .p = NULL,
119 .func = func,
120 .info = info,
121 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
122 };
123
124 smp_call_function_single(cpu, remote_function, &data, 1);
125
126 return data.ret;
127}
128
f8697762
JO
129#define EVENT_OWNER_KERNEL ((void *) -1)
130
131static bool is_kernel_event(struct perf_event *event)
132{
133 return event->owner == EVENT_OWNER_KERNEL;
134}
135
e5d1367f
SE
136#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
137 PERF_FLAG_FD_OUTPUT |\
a21b0b35
YD
138 PERF_FLAG_PID_CGROUP |\
139 PERF_FLAG_FD_CLOEXEC)
e5d1367f 140
bce38cd5
SE
141/*
142 * branch priv levels that need permission checks
143 */
144#define PERF_SAMPLE_BRANCH_PERM_PLM \
145 (PERF_SAMPLE_BRANCH_KERNEL |\
146 PERF_SAMPLE_BRANCH_HV)
147
0b3fcf17
SE
148enum event_type_t {
149 EVENT_FLEXIBLE = 0x1,
150 EVENT_PINNED = 0x2,
151 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
152};
153
e5d1367f
SE
154/*
155 * perf_sched_events : >0 events exist
156 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
157 */
c5905afb 158struct static_key_deferred perf_sched_events __read_mostly;
e5d1367f 159static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
ba532500 160static DEFINE_PER_CPU(int, perf_sched_cb_usages);
e5d1367f 161
cdd6c482
IM
162static atomic_t nr_mmap_events __read_mostly;
163static atomic_t nr_comm_events __read_mostly;
164static atomic_t nr_task_events __read_mostly;
948b26b6 165static atomic_t nr_freq_events __read_mostly;
45ac1403 166static atomic_t nr_switch_events __read_mostly;
9ee318a7 167
108b02cf
PZ
168static LIST_HEAD(pmus);
169static DEFINE_MUTEX(pmus_lock);
170static struct srcu_struct pmus_srcu;
171
0764771d 172/*
cdd6c482 173 * perf event paranoia level:
0fbdea19
IM
174 * -1 - not paranoid at all
175 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 176 * 1 - disallow cpu events for unpriv
0fbdea19 177 * 2 - disallow kernel profiling for unpriv
0764771d 178 */
cdd6c482 179int sysctl_perf_event_paranoid __read_mostly = 1;
0764771d 180
20443384
FW
181/* Minimum for 512 kiB + 1 user control page */
182int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
183
184/*
cdd6c482 185 * max perf event sample rate
df58ab24 186 */
14c63f17
DH
187#define DEFAULT_MAX_SAMPLE_RATE 100000
188#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
189#define DEFAULT_CPU_TIME_MAX_PERCENT 25
190
191int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
192
193static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
194static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
195
d9494cb4
PZ
196static int perf_sample_allowed_ns __read_mostly =
197 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
14c63f17
DH
198
199void update_perf_cpu_limits(void)
200{
201 u64 tmp = perf_sample_period_ns;
202
203 tmp *= sysctl_perf_cpu_time_max_percent;
e5302920 204 do_div(tmp, 100);
d9494cb4 205 ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
14c63f17 206}
163ec435 207
9e630205
SE
208static int perf_rotate_context(struct perf_cpu_context *cpuctx);
209
163ec435
PZ
210int perf_proc_update_handler(struct ctl_table *table, int write,
211 void __user *buffer, size_t *lenp,
212 loff_t *ppos)
213{
723478c8 214 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
163ec435
PZ
215
216 if (ret || !write)
217 return ret;
218
219 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
14c63f17
DH
220 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
221 update_perf_cpu_limits();
222
223 return 0;
224}
225
226int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
227
228int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
229 void __user *buffer, size_t *lenp,
230 loff_t *ppos)
231{
232 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
233
234 if (ret || !write)
235 return ret;
236
237 update_perf_cpu_limits();
163ec435
PZ
238
239 return 0;
240}
1ccd1549 241
14c63f17
DH
242/*
243 * perf samples are done in some very critical code paths (NMIs).
244 * If they take too much CPU time, the system can lock up and not
245 * get any real work done. This will drop the sample rate when
246 * we detect that events are taking too long.
247 */
248#define NR_ACCUMULATED_SAMPLES 128
d9494cb4 249static DEFINE_PER_CPU(u64, running_sample_length);
14c63f17 250
6a02ad66 251static void perf_duration_warn(struct irq_work *w)
14c63f17 252{
6a02ad66 253 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
14c63f17 254 u64 avg_local_sample_len;
e5302920 255 u64 local_samples_len;
6a02ad66 256
4a32fea9 257 local_samples_len = __this_cpu_read(running_sample_length);
6a02ad66
PZ
258 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
259
260 printk_ratelimited(KERN_WARNING
261 "perf interrupt took too long (%lld > %lld), lowering "
262 "kernel.perf_event_max_sample_rate to %d\n",
cd578abb 263 avg_local_sample_len, allowed_ns >> 1,
6a02ad66
PZ
264 sysctl_perf_event_sample_rate);
265}
266
267static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
268
269void perf_sample_event_took(u64 sample_len_ns)
270{
d9494cb4 271 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
6a02ad66
PZ
272 u64 avg_local_sample_len;
273 u64 local_samples_len;
14c63f17 274
d9494cb4 275 if (allowed_ns == 0)
14c63f17
DH
276 return;
277
278 /* decay the counter by 1 average sample */
4a32fea9 279 local_samples_len = __this_cpu_read(running_sample_length);
14c63f17
DH
280 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
281 local_samples_len += sample_len_ns;
4a32fea9 282 __this_cpu_write(running_sample_length, local_samples_len);
14c63f17
DH
283
284 /*
285 * note: this will be biased artifically low until we have
286 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
287 * from having to maintain a count.
288 */
289 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
290
d9494cb4 291 if (avg_local_sample_len <= allowed_ns)
14c63f17
DH
292 return;
293
294 if (max_samples_per_tick <= 1)
295 return;
296
297 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
298 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
299 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
300
14c63f17 301 update_perf_cpu_limits();
6a02ad66 302
cd578abb
PZ
303 if (!irq_work_queue(&perf_duration_work)) {
304 early_printk("perf interrupt took too long (%lld > %lld), lowering "
305 "kernel.perf_event_max_sample_rate to %d\n",
306 avg_local_sample_len, allowed_ns >> 1,
307 sysctl_perf_event_sample_rate);
308 }
14c63f17
DH
309}
310
cdd6c482 311static atomic64_t perf_event_id;
a96bbc16 312
0b3fcf17
SE
313static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
314 enum event_type_t event_type);
315
316static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
317 enum event_type_t event_type,
318 struct task_struct *task);
319
320static void update_context_time(struct perf_event_context *ctx);
321static u64 perf_event_time(struct perf_event *event);
0b3fcf17 322
cdd6c482 323void __weak perf_event_print_debug(void) { }
0793a61d 324
84c79910 325extern __weak const char *perf_pmu_name(void)
0793a61d 326{
84c79910 327 return "pmu";
0793a61d
TG
328}
329
0b3fcf17
SE
330static inline u64 perf_clock(void)
331{
332 return local_clock();
333}
334
34f43927
PZ
335static inline u64 perf_event_clock(struct perf_event *event)
336{
337 return event->clock();
338}
339
e5d1367f
SE
340static inline struct perf_cpu_context *
341__get_cpu_context(struct perf_event_context *ctx)
342{
343 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
344}
345
facc4307
PZ
346static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
347 struct perf_event_context *ctx)
348{
349 raw_spin_lock(&cpuctx->ctx.lock);
350 if (ctx)
351 raw_spin_lock(&ctx->lock);
352}
353
354static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
355 struct perf_event_context *ctx)
356{
357 if (ctx)
358 raw_spin_unlock(&ctx->lock);
359 raw_spin_unlock(&cpuctx->ctx.lock);
360}
361
e5d1367f
SE
362#ifdef CONFIG_CGROUP_PERF
363
e5d1367f
SE
364static inline bool
365perf_cgroup_match(struct perf_event *event)
366{
367 struct perf_event_context *ctx = event->ctx;
368 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
369
ef824fa1
TH
370 /* @event doesn't care about cgroup */
371 if (!event->cgrp)
372 return true;
373
374 /* wants specific cgroup scope but @cpuctx isn't associated with any */
375 if (!cpuctx->cgrp)
376 return false;
377
378 /*
379 * Cgroup scoping is recursive. An event enabled for a cgroup is
380 * also enabled for all its descendant cgroups. If @cpuctx's
381 * cgroup is a descendant of @event's (the test covers identity
382 * case), it's a match.
383 */
384 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
385 event->cgrp->css.cgroup);
e5d1367f
SE
386}
387
e5d1367f
SE
388static inline void perf_detach_cgroup(struct perf_event *event)
389{
4e2ba650 390 css_put(&event->cgrp->css);
e5d1367f
SE
391 event->cgrp = NULL;
392}
393
394static inline int is_cgroup_event(struct perf_event *event)
395{
396 return event->cgrp != NULL;
397}
398
399static inline u64 perf_cgroup_event_time(struct perf_event *event)
400{
401 struct perf_cgroup_info *t;
402
403 t = per_cpu_ptr(event->cgrp->info, event->cpu);
404 return t->time;
405}
406
407static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
408{
409 struct perf_cgroup_info *info;
410 u64 now;
411
412 now = perf_clock();
413
414 info = this_cpu_ptr(cgrp->info);
415
416 info->time += now - info->timestamp;
417 info->timestamp = now;
418}
419
420static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
421{
422 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
423 if (cgrp_out)
424 __update_cgrp_time(cgrp_out);
425}
426
427static inline void update_cgrp_time_from_event(struct perf_event *event)
428{
3f7cce3c
SE
429 struct perf_cgroup *cgrp;
430
e5d1367f 431 /*
3f7cce3c
SE
432 * ensure we access cgroup data only when needed and
433 * when we know the cgroup is pinned (css_get)
e5d1367f 434 */
3f7cce3c 435 if (!is_cgroup_event(event))
e5d1367f
SE
436 return;
437
3f7cce3c
SE
438 cgrp = perf_cgroup_from_task(current);
439 /*
440 * Do not update time when cgroup is not active
441 */
442 if (cgrp == event->cgrp)
443 __update_cgrp_time(event->cgrp);
e5d1367f
SE
444}
445
446static inline void
3f7cce3c
SE
447perf_cgroup_set_timestamp(struct task_struct *task,
448 struct perf_event_context *ctx)
e5d1367f
SE
449{
450 struct perf_cgroup *cgrp;
451 struct perf_cgroup_info *info;
452
3f7cce3c
SE
453 /*
454 * ctx->lock held by caller
455 * ensure we do not access cgroup data
456 * unless we have the cgroup pinned (css_get)
457 */
458 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
459 return;
460
461 cgrp = perf_cgroup_from_task(task);
462 info = this_cpu_ptr(cgrp->info);
3f7cce3c 463 info->timestamp = ctx->timestamp;
e5d1367f
SE
464}
465
466#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
467#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
468
469/*
470 * reschedule events based on the cgroup constraint of task.
471 *
472 * mode SWOUT : schedule out everything
473 * mode SWIN : schedule in based on cgroup for next
474 */
475void perf_cgroup_switch(struct task_struct *task, int mode)
476{
477 struct perf_cpu_context *cpuctx;
478 struct pmu *pmu;
479 unsigned long flags;
480
481 /*
482 * disable interrupts to avoid geting nr_cgroup
483 * changes via __perf_event_disable(). Also
484 * avoids preemption.
485 */
486 local_irq_save(flags);
487
488 /*
489 * we reschedule only in the presence of cgroup
490 * constrained events.
491 */
492 rcu_read_lock();
493
494 list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f 495 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95cf59ea
PZ
496 if (cpuctx->unique_pmu != pmu)
497 continue; /* ensure we process each cpuctx once */
e5d1367f 498
e5d1367f
SE
499 /*
500 * perf_cgroup_events says at least one
501 * context on this CPU has cgroup events.
502 *
503 * ctx->nr_cgroups reports the number of cgroup
504 * events for a context.
505 */
506 if (cpuctx->ctx.nr_cgroups > 0) {
facc4307
PZ
507 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
508 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f
SE
509
510 if (mode & PERF_CGROUP_SWOUT) {
511 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
512 /*
513 * must not be done before ctxswout due
514 * to event_filter_match() in event_sched_out()
515 */
516 cpuctx->cgrp = NULL;
517 }
518
519 if (mode & PERF_CGROUP_SWIN) {
e566b76e 520 WARN_ON_ONCE(cpuctx->cgrp);
95cf59ea
PZ
521 /*
522 * set cgrp before ctxsw in to allow
523 * event_filter_match() to not have to pass
524 * task around
e5d1367f
SE
525 */
526 cpuctx->cgrp = perf_cgroup_from_task(task);
527 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
528 }
facc4307
PZ
529 perf_pmu_enable(cpuctx->ctx.pmu);
530 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f 531 }
e5d1367f
SE
532 }
533
534 rcu_read_unlock();
535
536 local_irq_restore(flags);
537}
538
a8d757ef
SE
539static inline void perf_cgroup_sched_out(struct task_struct *task,
540 struct task_struct *next)
e5d1367f 541{
a8d757ef
SE
542 struct perf_cgroup *cgrp1;
543 struct perf_cgroup *cgrp2 = NULL;
544
545 /*
546 * we come here when we know perf_cgroup_events > 0
547 */
548 cgrp1 = perf_cgroup_from_task(task);
549
550 /*
551 * next is NULL when called from perf_event_enable_on_exec()
552 * that will systematically cause a cgroup_switch()
553 */
554 if (next)
555 cgrp2 = perf_cgroup_from_task(next);
556
557 /*
558 * only schedule out current cgroup events if we know
559 * that we are switching to a different cgroup. Otherwise,
560 * do no touch the cgroup events.
561 */
562 if (cgrp1 != cgrp2)
563 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
e5d1367f
SE
564}
565
a8d757ef
SE
566static inline void perf_cgroup_sched_in(struct task_struct *prev,
567 struct task_struct *task)
e5d1367f 568{
a8d757ef
SE
569 struct perf_cgroup *cgrp1;
570 struct perf_cgroup *cgrp2 = NULL;
571
572 /*
573 * we come here when we know perf_cgroup_events > 0
574 */
575 cgrp1 = perf_cgroup_from_task(task);
576
577 /* prev can never be NULL */
578 cgrp2 = perf_cgroup_from_task(prev);
579
580 /*
581 * only need to schedule in cgroup events if we are changing
582 * cgroup during ctxsw. Cgroup events were not scheduled
583 * out of ctxsw out if that was not the case.
584 */
585 if (cgrp1 != cgrp2)
586 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
e5d1367f
SE
587}
588
589static inline int perf_cgroup_connect(int fd, struct perf_event *event,
590 struct perf_event_attr *attr,
591 struct perf_event *group_leader)
592{
593 struct perf_cgroup *cgrp;
594 struct cgroup_subsys_state *css;
2903ff01
AV
595 struct fd f = fdget(fd);
596 int ret = 0;
e5d1367f 597
2903ff01 598 if (!f.file)
e5d1367f
SE
599 return -EBADF;
600
b583043e 601 css = css_tryget_online_from_dir(f.file->f_path.dentry,
ec903c0c 602 &perf_event_cgrp_subsys);
3db272c0
LZ
603 if (IS_ERR(css)) {
604 ret = PTR_ERR(css);
605 goto out;
606 }
e5d1367f
SE
607
608 cgrp = container_of(css, struct perf_cgroup, css);
609 event->cgrp = cgrp;
610
611 /*
612 * all events in a group must monitor
613 * the same cgroup because a task belongs
614 * to only one perf cgroup at a time
615 */
616 if (group_leader && group_leader->cgrp != cgrp) {
617 perf_detach_cgroup(event);
618 ret = -EINVAL;
e5d1367f 619 }
3db272c0 620out:
2903ff01 621 fdput(f);
e5d1367f
SE
622 return ret;
623}
624
625static inline void
626perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
627{
628 struct perf_cgroup_info *t;
629 t = per_cpu_ptr(event->cgrp->info, event->cpu);
630 event->shadow_ctx_time = now - t->timestamp;
631}
632
633static inline void
634perf_cgroup_defer_enabled(struct perf_event *event)
635{
636 /*
637 * when the current task's perf cgroup does not match
638 * the event's, we need to remember to call the
639 * perf_mark_enable() function the first time a task with
640 * a matching perf cgroup is scheduled in.
641 */
642 if (is_cgroup_event(event) && !perf_cgroup_match(event))
643 event->cgrp_defer_enabled = 1;
644}
645
646static inline void
647perf_cgroup_mark_enabled(struct perf_event *event,
648 struct perf_event_context *ctx)
649{
650 struct perf_event *sub;
651 u64 tstamp = perf_event_time(event);
652
653 if (!event->cgrp_defer_enabled)
654 return;
655
656 event->cgrp_defer_enabled = 0;
657
658 event->tstamp_enabled = tstamp - event->total_time_enabled;
659 list_for_each_entry(sub, &event->sibling_list, group_entry) {
660 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
661 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
662 sub->cgrp_defer_enabled = 0;
663 }
664 }
665}
666#else /* !CONFIG_CGROUP_PERF */
667
668static inline bool
669perf_cgroup_match(struct perf_event *event)
670{
671 return true;
672}
673
674static inline void perf_detach_cgroup(struct perf_event *event)
675{}
676
677static inline int is_cgroup_event(struct perf_event *event)
678{
679 return 0;
680}
681
682static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
683{
684 return 0;
685}
686
687static inline void update_cgrp_time_from_event(struct perf_event *event)
688{
689}
690
691static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
692{
693}
694
a8d757ef
SE
695static inline void perf_cgroup_sched_out(struct task_struct *task,
696 struct task_struct *next)
e5d1367f
SE
697{
698}
699
a8d757ef
SE
700static inline void perf_cgroup_sched_in(struct task_struct *prev,
701 struct task_struct *task)
e5d1367f
SE
702{
703}
704
705static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
706 struct perf_event_attr *attr,
707 struct perf_event *group_leader)
708{
709 return -EINVAL;
710}
711
712static inline void
3f7cce3c
SE
713perf_cgroup_set_timestamp(struct task_struct *task,
714 struct perf_event_context *ctx)
e5d1367f
SE
715{
716}
717
718void
719perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
720{
721}
722
723static inline void
724perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
725{
726}
727
728static inline u64 perf_cgroup_event_time(struct perf_event *event)
729{
730 return 0;
731}
732
733static inline void
734perf_cgroup_defer_enabled(struct perf_event *event)
735{
736}
737
738static inline void
739perf_cgroup_mark_enabled(struct perf_event *event,
740 struct perf_event_context *ctx)
741{
742}
743#endif
744
9e630205
SE
745/*
746 * set default to be dependent on timer tick just
747 * like original code
748 */
749#define PERF_CPU_HRTIMER (1000 / HZ)
750/*
751 * function must be called with interrupts disbled
752 */
272325c4 753static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
9e630205
SE
754{
755 struct perf_cpu_context *cpuctx;
9e630205
SE
756 int rotations = 0;
757
758 WARN_ON(!irqs_disabled());
759
760 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
9e630205
SE
761 rotations = perf_rotate_context(cpuctx);
762
4cfafd30
PZ
763 raw_spin_lock(&cpuctx->hrtimer_lock);
764 if (rotations)
9e630205 765 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
4cfafd30
PZ
766 else
767 cpuctx->hrtimer_active = 0;
768 raw_spin_unlock(&cpuctx->hrtimer_lock);
9e630205 769
4cfafd30 770 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
9e630205
SE
771}
772
272325c4 773static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
9e630205 774{
272325c4 775 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 776 struct pmu *pmu = cpuctx->ctx.pmu;
272325c4 777 u64 interval;
9e630205
SE
778
779 /* no multiplexing needed for SW PMU */
780 if (pmu->task_ctx_nr == perf_sw_context)
781 return;
782
62b85639
SE
783 /*
784 * check default is sane, if not set then force to
785 * default interval (1/tick)
786 */
272325c4
PZ
787 interval = pmu->hrtimer_interval_ms;
788 if (interval < 1)
789 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
62b85639 790
272325c4 791 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
9e630205 792
4cfafd30
PZ
793 raw_spin_lock_init(&cpuctx->hrtimer_lock);
794 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
272325c4 795 timer->function = perf_mux_hrtimer_handler;
9e630205
SE
796}
797
272325c4 798static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
9e630205 799{
272325c4 800 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 801 struct pmu *pmu = cpuctx->ctx.pmu;
4cfafd30 802 unsigned long flags;
9e630205
SE
803
804 /* not for SW PMU */
805 if (pmu->task_ctx_nr == perf_sw_context)
272325c4 806 return 0;
9e630205 807
4cfafd30
PZ
808 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
809 if (!cpuctx->hrtimer_active) {
810 cpuctx->hrtimer_active = 1;
811 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
812 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
813 }
814 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
9e630205 815
272325c4 816 return 0;
9e630205
SE
817}
818
33696fc0 819void perf_pmu_disable(struct pmu *pmu)
9e35ad38 820{
33696fc0
PZ
821 int *count = this_cpu_ptr(pmu->pmu_disable_count);
822 if (!(*count)++)
823 pmu->pmu_disable(pmu);
9e35ad38 824}
9e35ad38 825
33696fc0 826void perf_pmu_enable(struct pmu *pmu)
9e35ad38 827{
33696fc0
PZ
828 int *count = this_cpu_ptr(pmu->pmu_disable_count);
829 if (!--(*count))
830 pmu->pmu_enable(pmu);
9e35ad38 831}
9e35ad38 832
2fde4f94 833static DEFINE_PER_CPU(struct list_head, active_ctx_list);
e9d2b064
PZ
834
835/*
2fde4f94
MR
836 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
837 * perf_event_task_tick() are fully serialized because they're strictly cpu
838 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
839 * disabled, while perf_event_task_tick is called from IRQ context.
e9d2b064 840 */
2fde4f94 841static void perf_event_ctx_activate(struct perf_event_context *ctx)
9e35ad38 842{
2fde4f94 843 struct list_head *head = this_cpu_ptr(&active_ctx_list);
b5ab4cd5 844
e9d2b064 845 WARN_ON(!irqs_disabled());
b5ab4cd5 846
2fde4f94
MR
847 WARN_ON(!list_empty(&ctx->active_ctx_list));
848
849 list_add(&ctx->active_ctx_list, head);
850}
851
852static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
853{
854 WARN_ON(!irqs_disabled());
855
856 WARN_ON(list_empty(&ctx->active_ctx_list));
857
858 list_del_init(&ctx->active_ctx_list);
9e35ad38 859}
9e35ad38 860
cdd6c482 861static void get_ctx(struct perf_event_context *ctx)
a63eaf34 862{
e5289d4a 863 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
864}
865
4af57ef2
YZ
866static void free_ctx(struct rcu_head *head)
867{
868 struct perf_event_context *ctx;
869
870 ctx = container_of(head, struct perf_event_context, rcu_head);
871 kfree(ctx->task_ctx_data);
872 kfree(ctx);
873}
874
cdd6c482 875static void put_ctx(struct perf_event_context *ctx)
a63eaf34 876{
564c2b21
PM
877 if (atomic_dec_and_test(&ctx->refcount)) {
878 if (ctx->parent_ctx)
879 put_ctx(ctx->parent_ctx);
c93f7669
PM
880 if (ctx->task)
881 put_task_struct(ctx->task);
4af57ef2 882 call_rcu(&ctx->rcu_head, free_ctx);
564c2b21 883 }
a63eaf34
PM
884}
885
f63a8daa
PZ
886/*
887 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
888 * perf_pmu_migrate_context() we need some magic.
889 *
890 * Those places that change perf_event::ctx will hold both
891 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
892 *
8b10c5e2
PZ
893 * Lock ordering is by mutex address. There are two other sites where
894 * perf_event_context::mutex nests and those are:
895 *
896 * - perf_event_exit_task_context() [ child , 0 ]
897 * __perf_event_exit_task()
898 * sync_child_event()
899 * put_event() [ parent, 1 ]
900 *
901 * - perf_event_init_context() [ parent, 0 ]
902 * inherit_task_group()
903 * inherit_group()
904 * inherit_event()
905 * perf_event_alloc()
906 * perf_init_event()
907 * perf_try_init_event() [ child , 1 ]
908 *
909 * While it appears there is an obvious deadlock here -- the parent and child
910 * nesting levels are inverted between the two. This is in fact safe because
911 * life-time rules separate them. That is an exiting task cannot fork, and a
912 * spawning task cannot (yet) exit.
913 *
914 * But remember that that these are parent<->child context relations, and
915 * migration does not affect children, therefore these two orderings should not
916 * interact.
f63a8daa
PZ
917 *
918 * The change in perf_event::ctx does not affect children (as claimed above)
919 * because the sys_perf_event_open() case will install a new event and break
920 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
921 * concerned with cpuctx and that doesn't have children.
922 *
923 * The places that change perf_event::ctx will issue:
924 *
925 * perf_remove_from_context();
926 * synchronize_rcu();
927 * perf_install_in_context();
928 *
929 * to affect the change. The remove_from_context() + synchronize_rcu() should
930 * quiesce the event, after which we can install it in the new location. This
931 * means that only external vectors (perf_fops, prctl) can perturb the event
932 * while in transit. Therefore all such accessors should also acquire
933 * perf_event_context::mutex to serialize against this.
934 *
935 * However; because event->ctx can change while we're waiting to acquire
936 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
937 * function.
938 *
939 * Lock order:
940 * task_struct::perf_event_mutex
941 * perf_event_context::mutex
942 * perf_event_context::lock
943 * perf_event::child_mutex;
944 * perf_event::mmap_mutex
945 * mmap_sem
946 */
a83fe28e
PZ
947static struct perf_event_context *
948perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
f63a8daa
PZ
949{
950 struct perf_event_context *ctx;
951
952again:
953 rcu_read_lock();
954 ctx = ACCESS_ONCE(event->ctx);
955 if (!atomic_inc_not_zero(&ctx->refcount)) {
956 rcu_read_unlock();
957 goto again;
958 }
959 rcu_read_unlock();
960
a83fe28e 961 mutex_lock_nested(&ctx->mutex, nesting);
f63a8daa
PZ
962 if (event->ctx != ctx) {
963 mutex_unlock(&ctx->mutex);
964 put_ctx(ctx);
965 goto again;
966 }
967
968 return ctx;
969}
970
a83fe28e
PZ
971static inline struct perf_event_context *
972perf_event_ctx_lock(struct perf_event *event)
973{
974 return perf_event_ctx_lock_nested(event, 0);
975}
976
f63a8daa
PZ
977static void perf_event_ctx_unlock(struct perf_event *event,
978 struct perf_event_context *ctx)
979{
980 mutex_unlock(&ctx->mutex);
981 put_ctx(ctx);
982}
983
211de6eb
PZ
984/*
985 * This must be done under the ctx->lock, such as to serialize against
986 * context_equiv(), therefore we cannot call put_ctx() since that might end up
987 * calling scheduler related locks and ctx->lock nests inside those.
988 */
989static __must_check struct perf_event_context *
990unclone_ctx(struct perf_event_context *ctx)
71a851b4 991{
211de6eb
PZ
992 struct perf_event_context *parent_ctx = ctx->parent_ctx;
993
994 lockdep_assert_held(&ctx->lock);
995
996 if (parent_ctx)
71a851b4 997 ctx->parent_ctx = NULL;
5a3126d4 998 ctx->generation++;
211de6eb
PZ
999
1000 return parent_ctx;
71a851b4
PZ
1001}
1002
6844c09d
ACM
1003static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1004{
1005 /*
1006 * only top level events have the pid namespace they were created in
1007 */
1008 if (event->parent)
1009 event = event->parent;
1010
1011 return task_tgid_nr_ns(p, event->ns);
1012}
1013
1014static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1015{
1016 /*
1017 * only top level events have the pid namespace they were created in
1018 */
1019 if (event->parent)
1020 event = event->parent;
1021
1022 return task_pid_nr_ns(p, event->ns);
1023}
1024
7f453c24 1025/*
cdd6c482 1026 * If we inherit events we want to return the parent event id
7f453c24
PZ
1027 * to userspace.
1028 */
cdd6c482 1029static u64 primary_event_id(struct perf_event *event)
7f453c24 1030{
cdd6c482 1031 u64 id = event->id;
7f453c24 1032
cdd6c482
IM
1033 if (event->parent)
1034 id = event->parent->id;
7f453c24
PZ
1035
1036 return id;
1037}
1038
25346b93 1039/*
cdd6c482 1040 * Get the perf_event_context for a task and lock it.
25346b93
PM
1041 * This has to cope with with the fact that until it is locked,
1042 * the context could get moved to another task.
1043 */
cdd6c482 1044static struct perf_event_context *
8dc85d54 1045perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 1046{
cdd6c482 1047 struct perf_event_context *ctx;
25346b93 1048
9ed6060d 1049retry:
058ebd0e
PZ
1050 /*
1051 * One of the few rules of preemptible RCU is that one cannot do
1052 * rcu_read_unlock() while holding a scheduler (or nested) lock when
1053 * part of the read side critical section was preemptible -- see
1054 * rcu_read_unlock_special().
1055 *
1056 * Since ctx->lock nests under rq->lock we must ensure the entire read
1057 * side critical section is non-preemptible.
1058 */
1059 preempt_disable();
1060 rcu_read_lock();
8dc85d54 1061 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
1062 if (ctx) {
1063 /*
1064 * If this context is a clone of another, it might
1065 * get swapped for another underneath us by
cdd6c482 1066 * perf_event_task_sched_out, though the
25346b93
PM
1067 * rcu_read_lock() protects us from any context
1068 * getting freed. Lock the context and check if it
1069 * got swapped before we could get the lock, and retry
1070 * if so. If we locked the right context, then it
1071 * can't get swapped on us any more.
1072 */
e625cce1 1073 raw_spin_lock_irqsave(&ctx->lock, *flags);
8dc85d54 1074 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
e625cce1 1075 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
058ebd0e
PZ
1076 rcu_read_unlock();
1077 preempt_enable();
25346b93
PM
1078 goto retry;
1079 }
b49a9e7e
PZ
1080
1081 if (!atomic_inc_not_zero(&ctx->refcount)) {
e625cce1 1082 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
b49a9e7e
PZ
1083 ctx = NULL;
1084 }
25346b93
PM
1085 }
1086 rcu_read_unlock();
058ebd0e 1087 preempt_enable();
25346b93
PM
1088 return ctx;
1089}
1090
1091/*
1092 * Get the context for a task and increment its pin_count so it
1093 * can't get swapped to another task. This also increments its
1094 * reference count so that the context can't get freed.
1095 */
8dc85d54
PZ
1096static struct perf_event_context *
1097perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 1098{
cdd6c482 1099 struct perf_event_context *ctx;
25346b93
PM
1100 unsigned long flags;
1101
8dc85d54 1102 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
1103 if (ctx) {
1104 ++ctx->pin_count;
e625cce1 1105 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1106 }
1107 return ctx;
1108}
1109
cdd6c482 1110static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
1111{
1112 unsigned long flags;
1113
e625cce1 1114 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 1115 --ctx->pin_count;
e625cce1 1116 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1117}
1118
f67218c3
PZ
1119/*
1120 * Update the record of the current time in a context.
1121 */
1122static void update_context_time(struct perf_event_context *ctx)
1123{
1124 u64 now = perf_clock();
1125
1126 ctx->time += now - ctx->timestamp;
1127 ctx->timestamp = now;
1128}
1129
4158755d
SE
1130static u64 perf_event_time(struct perf_event *event)
1131{
1132 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
1133
1134 if (is_cgroup_event(event))
1135 return perf_cgroup_event_time(event);
1136
4158755d
SE
1137 return ctx ? ctx->time : 0;
1138}
1139
f67218c3
PZ
1140/*
1141 * Update the total_time_enabled and total_time_running fields for a event.
b7526f0c 1142 * The caller of this function needs to hold the ctx->lock.
f67218c3
PZ
1143 */
1144static void update_event_times(struct perf_event *event)
1145{
1146 struct perf_event_context *ctx = event->ctx;
1147 u64 run_end;
1148
1149 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1150 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1151 return;
e5d1367f
SE
1152 /*
1153 * in cgroup mode, time_enabled represents
1154 * the time the event was enabled AND active
1155 * tasks were in the monitored cgroup. This is
1156 * independent of the activity of the context as
1157 * there may be a mix of cgroup and non-cgroup events.
1158 *
1159 * That is why we treat cgroup events differently
1160 * here.
1161 */
1162 if (is_cgroup_event(event))
46cd6a7f 1163 run_end = perf_cgroup_event_time(event);
e5d1367f
SE
1164 else if (ctx->is_active)
1165 run_end = ctx->time;
acd1d7c1
PZ
1166 else
1167 run_end = event->tstamp_stopped;
1168
1169 event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3
PZ
1170
1171 if (event->state == PERF_EVENT_STATE_INACTIVE)
1172 run_end = event->tstamp_stopped;
1173 else
4158755d 1174 run_end = perf_event_time(event);
f67218c3
PZ
1175
1176 event->total_time_running = run_end - event->tstamp_running;
e5d1367f 1177
f67218c3
PZ
1178}
1179
96c21a46
PZ
1180/*
1181 * Update total_time_enabled and total_time_running for all events in a group.
1182 */
1183static void update_group_times(struct perf_event *leader)
1184{
1185 struct perf_event *event;
1186
1187 update_event_times(leader);
1188 list_for_each_entry(event, &leader->sibling_list, group_entry)
1189 update_event_times(event);
1190}
1191
889ff015
FW
1192static struct list_head *
1193ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1194{
1195 if (event->attr.pinned)
1196 return &ctx->pinned_groups;
1197 else
1198 return &ctx->flexible_groups;
1199}
1200
fccc714b 1201/*
cdd6c482 1202 * Add a event from the lists for its context.
fccc714b
PZ
1203 * Must be called with ctx->mutex and ctx->lock held.
1204 */
04289bb9 1205static void
cdd6c482 1206list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1207{
8a49542c
PZ
1208 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1209 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9
IM
1210
1211 /*
8a49542c
PZ
1212 * If we're a stand alone event or group leader, we go to the context
1213 * list, group events are kept attached to the group so that
1214 * perf_group_detach can, at all times, locate all siblings.
04289bb9 1215 */
8a49542c 1216 if (event->group_leader == event) {
889ff015
FW
1217 struct list_head *list;
1218
d6f962b5
FW
1219 if (is_software_event(event))
1220 event->group_flags |= PERF_GROUP_SOFTWARE;
1221
889ff015
FW
1222 list = ctx_group_list(event, ctx);
1223 list_add_tail(&event->group_entry, list);
5c148194 1224 }
592903cd 1225
08309379 1226 if (is_cgroup_event(event))
e5d1367f 1227 ctx->nr_cgroups++;
e5d1367f 1228
cdd6c482
IM
1229 list_add_rcu(&event->event_entry, &ctx->event_list);
1230 ctx->nr_events++;
1231 if (event->attr.inherit_stat)
bfbd3381 1232 ctx->nr_stat++;
5a3126d4
PZ
1233
1234 ctx->generation++;
04289bb9
IM
1235}
1236
0231bb53
JO
1237/*
1238 * Initialize event state based on the perf_event_attr::disabled.
1239 */
1240static inline void perf_event__state_init(struct perf_event *event)
1241{
1242 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1243 PERF_EVENT_STATE_INACTIVE;
1244}
1245
c320c7b7
ACM
1246/*
1247 * Called at perf_event creation and when events are attached/detached from a
1248 * group.
1249 */
1250static void perf_event__read_size(struct perf_event *event)
1251{
1252 int entry = sizeof(u64); /* value */
1253 int size = 0;
1254 int nr = 1;
1255
1256 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1257 size += sizeof(u64);
1258
1259 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1260 size += sizeof(u64);
1261
1262 if (event->attr.read_format & PERF_FORMAT_ID)
1263 entry += sizeof(u64);
1264
1265 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1266 nr += event->group_leader->nr_siblings;
1267 size += sizeof(u64);
1268 }
1269
1270 size += entry * nr;
1271 event->read_size = size;
1272}
1273
1274static void perf_event__header_size(struct perf_event *event)
1275{
1276 struct perf_sample_data *data;
1277 u64 sample_type = event->attr.sample_type;
1278 u16 size = 0;
1279
1280 perf_event__read_size(event);
1281
1282 if (sample_type & PERF_SAMPLE_IP)
1283 size += sizeof(data->ip);
1284
6844c09d
ACM
1285 if (sample_type & PERF_SAMPLE_ADDR)
1286 size += sizeof(data->addr);
1287
1288 if (sample_type & PERF_SAMPLE_PERIOD)
1289 size += sizeof(data->period);
1290
c3feedf2
AK
1291 if (sample_type & PERF_SAMPLE_WEIGHT)
1292 size += sizeof(data->weight);
1293
6844c09d
ACM
1294 if (sample_type & PERF_SAMPLE_READ)
1295 size += event->read_size;
1296
d6be9ad6
SE
1297 if (sample_type & PERF_SAMPLE_DATA_SRC)
1298 size += sizeof(data->data_src.val);
1299
fdfbbd07
AK
1300 if (sample_type & PERF_SAMPLE_TRANSACTION)
1301 size += sizeof(data->txn);
1302
6844c09d
ACM
1303 event->header_size = size;
1304}
1305
1306static void perf_event__id_header_size(struct perf_event *event)
1307{
1308 struct perf_sample_data *data;
1309 u64 sample_type = event->attr.sample_type;
1310 u16 size = 0;
1311
c320c7b7
ACM
1312 if (sample_type & PERF_SAMPLE_TID)
1313 size += sizeof(data->tid_entry);
1314
1315 if (sample_type & PERF_SAMPLE_TIME)
1316 size += sizeof(data->time);
1317
ff3d527c
AH
1318 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1319 size += sizeof(data->id);
1320
c320c7b7
ACM
1321 if (sample_type & PERF_SAMPLE_ID)
1322 size += sizeof(data->id);
1323
1324 if (sample_type & PERF_SAMPLE_STREAM_ID)
1325 size += sizeof(data->stream_id);
1326
1327 if (sample_type & PERF_SAMPLE_CPU)
1328 size += sizeof(data->cpu_entry);
1329
6844c09d 1330 event->id_header_size = size;
c320c7b7
ACM
1331}
1332
8a49542c
PZ
1333static void perf_group_attach(struct perf_event *event)
1334{
c320c7b7 1335 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 1336
74c3337c
PZ
1337 /*
1338 * We can have double attach due to group movement in perf_event_open.
1339 */
1340 if (event->attach_state & PERF_ATTACH_GROUP)
1341 return;
1342
8a49542c
PZ
1343 event->attach_state |= PERF_ATTACH_GROUP;
1344
1345 if (group_leader == event)
1346 return;
1347
652884fe
PZ
1348 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1349
8a49542c
PZ
1350 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1351 !is_software_event(event))
1352 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1353
1354 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1355 group_leader->nr_siblings++;
c320c7b7
ACM
1356
1357 perf_event__header_size(group_leader);
1358
1359 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1360 perf_event__header_size(pos);
8a49542c
PZ
1361}
1362
a63eaf34 1363/*
cdd6c482 1364 * Remove a event from the lists for its context.
fccc714b 1365 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1366 */
04289bb9 1367static void
cdd6c482 1368list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1369{
68cacd29 1370 struct perf_cpu_context *cpuctx;
652884fe
PZ
1371
1372 WARN_ON_ONCE(event->ctx != ctx);
1373 lockdep_assert_held(&ctx->lock);
1374
8a49542c
PZ
1375 /*
1376 * We can have double detach due to exit/hot-unplug + close.
1377 */
1378 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1379 return;
8a49542c
PZ
1380
1381 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1382
68cacd29 1383 if (is_cgroup_event(event)) {
e5d1367f 1384 ctx->nr_cgroups--;
68cacd29
SE
1385 cpuctx = __get_cpu_context(ctx);
1386 /*
1387 * if there are no more cgroup events
1388 * then cler cgrp to avoid stale pointer
1389 * in update_cgrp_time_from_cpuctx()
1390 */
1391 if (!ctx->nr_cgroups)
1392 cpuctx->cgrp = NULL;
1393 }
e5d1367f 1394
cdd6c482
IM
1395 ctx->nr_events--;
1396 if (event->attr.inherit_stat)
bfbd3381 1397 ctx->nr_stat--;
8bc20959 1398
cdd6c482 1399 list_del_rcu(&event->event_entry);
04289bb9 1400
8a49542c
PZ
1401 if (event->group_leader == event)
1402 list_del_init(&event->group_entry);
5c148194 1403
96c21a46 1404 update_group_times(event);
b2e74a26
SE
1405
1406 /*
1407 * If event was in error state, then keep it
1408 * that way, otherwise bogus counts will be
1409 * returned on read(). The only way to get out
1410 * of error state is by explicit re-enabling
1411 * of the event
1412 */
1413 if (event->state > PERF_EVENT_STATE_OFF)
1414 event->state = PERF_EVENT_STATE_OFF;
5a3126d4
PZ
1415
1416 ctx->generation++;
050735b0
PZ
1417}
1418
8a49542c 1419static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1420{
1421 struct perf_event *sibling, *tmp;
8a49542c
PZ
1422 struct list_head *list = NULL;
1423
1424 /*
1425 * We can have double detach due to exit/hot-unplug + close.
1426 */
1427 if (!(event->attach_state & PERF_ATTACH_GROUP))
1428 return;
1429
1430 event->attach_state &= ~PERF_ATTACH_GROUP;
1431
1432 /*
1433 * If this is a sibling, remove it from its group.
1434 */
1435 if (event->group_leader != event) {
1436 list_del_init(&event->group_entry);
1437 event->group_leader->nr_siblings--;
c320c7b7 1438 goto out;
8a49542c
PZ
1439 }
1440
1441 if (!list_empty(&event->group_entry))
1442 list = &event->group_entry;
2e2af50b 1443
04289bb9 1444 /*
cdd6c482
IM
1445 * If this was a group event with sibling events then
1446 * upgrade the siblings to singleton events by adding them
8a49542c 1447 * to whatever list we are on.
04289bb9 1448 */
cdd6c482 1449 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c
PZ
1450 if (list)
1451 list_move_tail(&sibling->group_entry, list);
04289bb9 1452 sibling->group_leader = sibling;
d6f962b5
FW
1453
1454 /* Inherit group flags from the previous leader */
1455 sibling->group_flags = event->group_flags;
652884fe
PZ
1456
1457 WARN_ON_ONCE(sibling->ctx != event->ctx);
04289bb9 1458 }
c320c7b7
ACM
1459
1460out:
1461 perf_event__header_size(event->group_leader);
1462
1463 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1464 perf_event__header_size(tmp);
04289bb9
IM
1465}
1466
fadfe7be
JO
1467/*
1468 * User event without the task.
1469 */
1470static bool is_orphaned_event(struct perf_event *event)
1471{
1472 return event && !is_kernel_event(event) && !event->owner;
1473}
1474
1475/*
1476 * Event has a parent but parent's task finished and it's
1477 * alive only because of children holding refference.
1478 */
1479static bool is_orphaned_child(struct perf_event *event)
1480{
1481 return is_orphaned_event(event->parent);
1482}
1483
1484static void orphans_remove_work(struct work_struct *work);
1485
1486static void schedule_orphans_remove(struct perf_event_context *ctx)
1487{
1488 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
1489 return;
1490
1491 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
1492 get_ctx(ctx);
1493 ctx->orphans_remove_sched = true;
1494 }
1495}
1496
1497static int __init perf_workqueue_init(void)
1498{
1499 perf_wq = create_singlethread_workqueue("perf");
1500 WARN(!perf_wq, "failed to create perf workqueue\n");
1501 return perf_wq ? 0 : -1;
1502}
1503
1504core_initcall(perf_workqueue_init);
1505
66eb579e
MR
1506static inline int pmu_filter_match(struct perf_event *event)
1507{
1508 struct pmu *pmu = event->pmu;
1509 return pmu->filter_match ? pmu->filter_match(event) : 1;
1510}
1511
fa66f07a
SE
1512static inline int
1513event_filter_match(struct perf_event *event)
1514{
e5d1367f 1515 return (event->cpu == -1 || event->cpu == smp_processor_id())
66eb579e 1516 && perf_cgroup_match(event) && pmu_filter_match(event);
fa66f07a
SE
1517}
1518
9ffcfa6f
SE
1519static void
1520event_sched_out(struct perf_event *event,
3b6f9e5c 1521 struct perf_cpu_context *cpuctx,
cdd6c482 1522 struct perf_event_context *ctx)
3b6f9e5c 1523{
4158755d 1524 u64 tstamp = perf_event_time(event);
fa66f07a 1525 u64 delta;
652884fe
PZ
1526
1527 WARN_ON_ONCE(event->ctx != ctx);
1528 lockdep_assert_held(&ctx->lock);
1529
fa66f07a
SE
1530 /*
1531 * An event which could not be activated because of
1532 * filter mismatch still needs to have its timings
1533 * maintained, otherwise bogus information is return
1534 * via read() for time_enabled, time_running:
1535 */
1536 if (event->state == PERF_EVENT_STATE_INACTIVE
1537 && !event_filter_match(event)) {
e5d1367f 1538 delta = tstamp - event->tstamp_stopped;
fa66f07a 1539 event->tstamp_running += delta;
4158755d 1540 event->tstamp_stopped = tstamp;
fa66f07a
SE
1541 }
1542
cdd6c482 1543 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1544 return;
3b6f9e5c 1545
44377277
AS
1546 perf_pmu_disable(event->pmu);
1547
cdd6c482
IM
1548 event->state = PERF_EVENT_STATE_INACTIVE;
1549 if (event->pending_disable) {
1550 event->pending_disable = 0;
1551 event->state = PERF_EVENT_STATE_OFF;
970892a9 1552 }
4158755d 1553 event->tstamp_stopped = tstamp;
a4eaf7f1 1554 event->pmu->del(event, 0);
cdd6c482 1555 event->oncpu = -1;
3b6f9e5c 1556
cdd6c482 1557 if (!is_software_event(event))
3b6f9e5c 1558 cpuctx->active_oncpu--;
2fde4f94
MR
1559 if (!--ctx->nr_active)
1560 perf_event_ctx_deactivate(ctx);
0f5a2601
PZ
1561 if (event->attr.freq && event->attr.sample_freq)
1562 ctx->nr_freq--;
cdd6c482 1563 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c 1564 cpuctx->exclusive = 0;
44377277 1565
fadfe7be
JO
1566 if (is_orphaned_child(event))
1567 schedule_orphans_remove(ctx);
1568
44377277 1569 perf_pmu_enable(event->pmu);
3b6f9e5c
PM
1570}
1571
d859e29f 1572static void
cdd6c482 1573group_sched_out(struct perf_event *group_event,
d859e29f 1574 struct perf_cpu_context *cpuctx,
cdd6c482 1575 struct perf_event_context *ctx)
d859e29f 1576{
cdd6c482 1577 struct perf_event *event;
fa66f07a 1578 int state = group_event->state;
d859e29f 1579
cdd6c482 1580 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
1581
1582 /*
1583 * Schedule out siblings (if any):
1584 */
cdd6c482
IM
1585 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1586 event_sched_out(event, cpuctx, ctx);
d859e29f 1587
fa66f07a 1588 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29f
PM
1589 cpuctx->exclusive = 0;
1590}
1591
46ce0fe9
PZ
1592struct remove_event {
1593 struct perf_event *event;
1594 bool detach_group;
1595};
1596
0793a61d 1597/*
cdd6c482 1598 * Cross CPU call to remove a performance event
0793a61d 1599 *
cdd6c482 1600 * We disable the event on the hardware level first. After that we
0793a61d
TG
1601 * remove it from the context list.
1602 */
fe4b04fa 1603static int __perf_remove_from_context(void *info)
0793a61d 1604{
46ce0fe9
PZ
1605 struct remove_event *re = info;
1606 struct perf_event *event = re->event;
cdd6c482 1607 struct perf_event_context *ctx = event->ctx;
108b02cf 1608 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
0793a61d 1609
e625cce1 1610 raw_spin_lock(&ctx->lock);
cdd6c482 1611 event_sched_out(event, cpuctx, ctx);
46ce0fe9
PZ
1612 if (re->detach_group)
1613 perf_group_detach(event);
cdd6c482 1614 list_del_event(event, ctx);
64ce3126
PZ
1615 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1616 ctx->is_active = 0;
1617 cpuctx->task_ctx = NULL;
1618 }
e625cce1 1619 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1620
1621 return 0;
0793a61d
TG
1622}
1623
1624
1625/*
cdd6c482 1626 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 1627 *
cdd6c482 1628 * CPU events are removed with a smp call. For task events we only
0793a61d 1629 * call when the task is on a CPU.
c93f7669 1630 *
cdd6c482
IM
1631 * If event->ctx is a cloned context, callers must make sure that
1632 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
1633 * remains valid. This is OK when called from perf_release since
1634 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 1635 * When called from perf_event_exit_task, it's OK because the
c93f7669 1636 * context has been detached from its task.
0793a61d 1637 */
46ce0fe9 1638static void perf_remove_from_context(struct perf_event *event, bool detach_group)
0793a61d 1639{
cdd6c482 1640 struct perf_event_context *ctx = event->ctx;
0793a61d 1641 struct task_struct *task = ctx->task;
46ce0fe9
PZ
1642 struct remove_event re = {
1643 .event = event,
1644 .detach_group = detach_group,
1645 };
0793a61d 1646
fe4b04fa
PZ
1647 lockdep_assert_held(&ctx->mutex);
1648
0793a61d
TG
1649 if (!task) {
1650 /*
226424ee
MR
1651 * Per cpu events are removed via an smp call. The removal can
1652 * fail if the CPU is currently offline, but in that case we
1653 * already called __perf_remove_from_context from
1654 * perf_event_exit_cpu.
0793a61d 1655 */
46ce0fe9 1656 cpu_function_call(event->cpu, __perf_remove_from_context, &re);
0793a61d
TG
1657 return;
1658 }
1659
1660retry:
46ce0fe9 1661 if (!task_function_call(task, __perf_remove_from_context, &re))
fe4b04fa 1662 return;
0793a61d 1663
e625cce1 1664 raw_spin_lock_irq(&ctx->lock);
0793a61d 1665 /*
fe4b04fa
PZ
1666 * If we failed to find a running task, but find the context active now
1667 * that we've acquired the ctx->lock, retry.
0793a61d 1668 */
fe4b04fa 1669 if (ctx->is_active) {
e625cce1 1670 raw_spin_unlock_irq(&ctx->lock);
3577af70
CW
1671 /*
1672 * Reload the task pointer, it might have been changed by
1673 * a concurrent perf_event_context_sched_out().
1674 */
1675 task = ctx->task;
0793a61d
TG
1676 goto retry;
1677 }
1678
1679 /*
fe4b04fa
PZ
1680 * Since the task isn't running, its safe to remove the event, us
1681 * holding the ctx->lock ensures the task won't get scheduled in.
0793a61d 1682 */
46ce0fe9
PZ
1683 if (detach_group)
1684 perf_group_detach(event);
fe4b04fa 1685 list_del_event(event, ctx);
e625cce1 1686 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1687}
1688
d859e29f 1689/*
cdd6c482 1690 * Cross CPU call to disable a performance event
d859e29f 1691 */
500ad2d8 1692int __perf_event_disable(void *info)
d859e29f 1693{
cdd6c482 1694 struct perf_event *event = info;
cdd6c482 1695 struct perf_event_context *ctx = event->ctx;
108b02cf 1696 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f
PM
1697
1698 /*
cdd6c482
IM
1699 * If this is a per-task event, need to check whether this
1700 * event's task is the current task on this cpu.
fe4b04fa
PZ
1701 *
1702 * Can trigger due to concurrent perf_event_context_sched_out()
1703 * flipping contexts around.
d859e29f 1704 */
665c2142 1705 if (ctx->task && cpuctx->task_ctx != ctx)
fe4b04fa 1706 return -EINVAL;
d859e29f 1707
e625cce1 1708 raw_spin_lock(&ctx->lock);
d859e29f
PM
1709
1710 /*
cdd6c482 1711 * If the event is on, turn it off.
d859e29f
PM
1712 * If it is in error state, leave it in error state.
1713 */
cdd6c482 1714 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
4af4998b 1715 update_context_time(ctx);
e5d1367f 1716 update_cgrp_time_from_event(event);
cdd6c482
IM
1717 update_group_times(event);
1718 if (event == event->group_leader)
1719 group_sched_out(event, cpuctx, ctx);
d859e29f 1720 else
cdd6c482
IM
1721 event_sched_out(event, cpuctx, ctx);
1722 event->state = PERF_EVENT_STATE_OFF;
d859e29f
PM
1723 }
1724
e625cce1 1725 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1726
1727 return 0;
d859e29f
PM
1728}
1729
1730/*
cdd6c482 1731 * Disable a event.
c93f7669 1732 *
cdd6c482
IM
1733 * If event->ctx is a cloned context, callers must make sure that
1734 * every task struct that event->ctx->task could possibly point to
c93f7669 1735 * remains valid. This condition is satisifed when called through
cdd6c482
IM
1736 * perf_event_for_each_child or perf_event_for_each because they
1737 * hold the top-level event's child_mutex, so any descendant that
1738 * goes to exit will block in sync_child_event.
1739 * When called from perf_pending_event it's OK because event->ctx
c93f7669 1740 * is the current context on this CPU and preemption is disabled,
cdd6c482 1741 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 1742 */
f63a8daa 1743static void _perf_event_disable(struct perf_event *event)
d859e29f 1744{
cdd6c482 1745 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
1746 struct task_struct *task = ctx->task;
1747
1748 if (!task) {
1749 /*
cdd6c482 1750 * Disable the event on the cpu that it's on
d859e29f 1751 */
fe4b04fa 1752 cpu_function_call(event->cpu, __perf_event_disable, event);
d859e29f
PM
1753 return;
1754 }
1755
9ed6060d 1756retry:
fe4b04fa
PZ
1757 if (!task_function_call(task, __perf_event_disable, event))
1758 return;
d859e29f 1759
e625cce1 1760 raw_spin_lock_irq(&ctx->lock);
d859e29f 1761 /*
cdd6c482 1762 * If the event is still active, we need to retry the cross-call.
d859e29f 1763 */
cdd6c482 1764 if (event->state == PERF_EVENT_STATE_ACTIVE) {
e625cce1 1765 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
1766 /*
1767 * Reload the task pointer, it might have been changed by
1768 * a concurrent perf_event_context_sched_out().
1769 */
1770 task = ctx->task;
d859e29f
PM
1771 goto retry;
1772 }
1773
1774 /*
1775 * Since we have the lock this context can't be scheduled
1776 * in, so we can change the state safely.
1777 */
cdd6c482
IM
1778 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1779 update_group_times(event);
1780 event->state = PERF_EVENT_STATE_OFF;
53cfbf59 1781 }
e625cce1 1782 raw_spin_unlock_irq(&ctx->lock);
d859e29f 1783}
f63a8daa
PZ
1784
1785/*
1786 * Strictly speaking kernel users cannot create groups and therefore this
1787 * interface does not need the perf_event_ctx_lock() magic.
1788 */
1789void perf_event_disable(struct perf_event *event)
1790{
1791 struct perf_event_context *ctx;
1792
1793 ctx = perf_event_ctx_lock(event);
1794 _perf_event_disable(event);
1795 perf_event_ctx_unlock(event, ctx);
1796}
dcfce4a0 1797EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 1798
e5d1367f
SE
1799static void perf_set_shadow_time(struct perf_event *event,
1800 struct perf_event_context *ctx,
1801 u64 tstamp)
1802{
1803 /*
1804 * use the correct time source for the time snapshot
1805 *
1806 * We could get by without this by leveraging the
1807 * fact that to get to this function, the caller
1808 * has most likely already called update_context_time()
1809 * and update_cgrp_time_xx() and thus both timestamp
1810 * are identical (or very close). Given that tstamp is,
1811 * already adjusted for cgroup, we could say that:
1812 * tstamp - ctx->timestamp
1813 * is equivalent to
1814 * tstamp - cgrp->timestamp.
1815 *
1816 * Then, in perf_output_read(), the calculation would
1817 * work with no changes because:
1818 * - event is guaranteed scheduled in
1819 * - no scheduled out in between
1820 * - thus the timestamp would be the same
1821 *
1822 * But this is a bit hairy.
1823 *
1824 * So instead, we have an explicit cgroup call to remain
1825 * within the time time source all along. We believe it
1826 * is cleaner and simpler to understand.
1827 */
1828 if (is_cgroup_event(event))
1829 perf_cgroup_set_shadow_time(event, tstamp);
1830 else
1831 event->shadow_ctx_time = tstamp - ctx->timestamp;
1832}
1833
4fe757dd
PZ
1834#define MAX_INTERRUPTS (~0ULL)
1835
1836static void perf_log_throttle(struct perf_event *event, int enable);
ec0d7729 1837static void perf_log_itrace_start(struct perf_event *event);
4fe757dd 1838
235c7fc7 1839static int
9ffcfa6f 1840event_sched_in(struct perf_event *event,
235c7fc7 1841 struct perf_cpu_context *cpuctx,
6e37738a 1842 struct perf_event_context *ctx)
235c7fc7 1843{
4158755d 1844 u64 tstamp = perf_event_time(event);
44377277 1845 int ret = 0;
4158755d 1846
63342411
PZ
1847 lockdep_assert_held(&ctx->lock);
1848
cdd6c482 1849 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
1850 return 0;
1851
cdd6c482 1852 event->state = PERF_EVENT_STATE_ACTIVE;
6e37738a 1853 event->oncpu = smp_processor_id();
4fe757dd
PZ
1854
1855 /*
1856 * Unthrottle events, since we scheduled we might have missed several
1857 * ticks already, also for a heavily scheduling task there is little
1858 * guarantee it'll get a tick in a timely manner.
1859 */
1860 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1861 perf_log_throttle(event, 1);
1862 event->hw.interrupts = 0;
1863 }
1864
235c7fc7
IM
1865 /*
1866 * The new state must be visible before we turn it on in the hardware:
1867 */
1868 smp_wmb();
1869
44377277
AS
1870 perf_pmu_disable(event->pmu);
1871
72f669c0
SL
1872 perf_set_shadow_time(event, ctx, tstamp);
1873
ec0d7729
AS
1874 perf_log_itrace_start(event);
1875
a4eaf7f1 1876 if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482
IM
1877 event->state = PERF_EVENT_STATE_INACTIVE;
1878 event->oncpu = -1;
44377277
AS
1879 ret = -EAGAIN;
1880 goto out;
235c7fc7
IM
1881 }
1882
00a2916f
PZ
1883 event->tstamp_running += tstamp - event->tstamp_stopped;
1884
cdd6c482 1885 if (!is_software_event(event))
3b6f9e5c 1886 cpuctx->active_oncpu++;
2fde4f94
MR
1887 if (!ctx->nr_active++)
1888 perf_event_ctx_activate(ctx);
0f5a2601
PZ
1889 if (event->attr.freq && event->attr.sample_freq)
1890 ctx->nr_freq++;
235c7fc7 1891
cdd6c482 1892 if (event->attr.exclusive)
3b6f9e5c
PM
1893 cpuctx->exclusive = 1;
1894
fadfe7be
JO
1895 if (is_orphaned_child(event))
1896 schedule_orphans_remove(ctx);
1897
44377277
AS
1898out:
1899 perf_pmu_enable(event->pmu);
1900
1901 return ret;
235c7fc7
IM
1902}
1903
6751b71e 1904static int
cdd6c482 1905group_sched_in(struct perf_event *group_event,
6751b71e 1906 struct perf_cpu_context *cpuctx,
6e37738a 1907 struct perf_event_context *ctx)
6751b71e 1908{
6bde9b6c 1909 struct perf_event *event, *partial_group = NULL;
4a234593 1910 struct pmu *pmu = ctx->pmu;
d7842da4
SE
1911 u64 now = ctx->time;
1912 bool simulate = false;
6751b71e 1913
cdd6c482 1914 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
1915 return 0;
1916
fbbe0701 1917 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
6bde9b6c 1918
9ffcfa6f 1919 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 1920 pmu->cancel_txn(pmu);
272325c4 1921 perf_mux_hrtimer_restart(cpuctx);
6751b71e 1922 return -EAGAIN;
90151c35 1923 }
6751b71e
PM
1924
1925 /*
1926 * Schedule in siblings as one group (if any):
1927 */
cdd6c482 1928 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f 1929 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 1930 partial_group = event;
6751b71e
PM
1931 goto group_error;
1932 }
1933 }
1934
9ffcfa6f 1935 if (!pmu->commit_txn(pmu))
6e85158c 1936 return 0;
9ffcfa6f 1937
6751b71e
PM
1938group_error:
1939 /*
1940 * Groups can be scheduled in as one unit only, so undo any
1941 * partial group before returning:
d7842da4
SE
1942 * The events up to the failed event are scheduled out normally,
1943 * tstamp_stopped will be updated.
1944 *
1945 * The failed events and the remaining siblings need to have
1946 * their timings updated as if they had gone thru event_sched_in()
1947 * and event_sched_out(). This is required to get consistent timings
1948 * across the group. This also takes care of the case where the group
1949 * could never be scheduled by ensuring tstamp_stopped is set to mark
1950 * the time the event was actually stopped, such that time delta
1951 * calculation in update_event_times() is correct.
6751b71e 1952 */
cdd6c482
IM
1953 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1954 if (event == partial_group)
d7842da4
SE
1955 simulate = true;
1956
1957 if (simulate) {
1958 event->tstamp_running += now - event->tstamp_stopped;
1959 event->tstamp_stopped = now;
1960 } else {
1961 event_sched_out(event, cpuctx, ctx);
1962 }
6751b71e 1963 }
9ffcfa6f 1964 event_sched_out(group_event, cpuctx, ctx);
6751b71e 1965
ad5133b7 1966 pmu->cancel_txn(pmu);
90151c35 1967
272325c4 1968 perf_mux_hrtimer_restart(cpuctx);
9e630205 1969
6751b71e
PM
1970 return -EAGAIN;
1971}
1972
3b6f9e5c 1973/*
cdd6c482 1974 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 1975 */
cdd6c482 1976static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
1977 struct perf_cpu_context *cpuctx,
1978 int can_add_hw)
1979{
1980 /*
cdd6c482 1981 * Groups consisting entirely of software events can always go on.
3b6f9e5c 1982 */
d6f962b5 1983 if (event->group_flags & PERF_GROUP_SOFTWARE)
3b6f9e5c
PM
1984 return 1;
1985 /*
1986 * If an exclusive group is already on, no other hardware
cdd6c482 1987 * events can go on.
3b6f9e5c
PM
1988 */
1989 if (cpuctx->exclusive)
1990 return 0;
1991 /*
1992 * If this group is exclusive and there are already
cdd6c482 1993 * events on the CPU, it can't go on.
3b6f9e5c 1994 */
cdd6c482 1995 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
1996 return 0;
1997 /*
1998 * Otherwise, try to add it if all previous groups were able
1999 * to go on.
2000 */
2001 return can_add_hw;
2002}
2003
cdd6c482
IM
2004static void add_event_to_ctx(struct perf_event *event,
2005 struct perf_event_context *ctx)
53cfbf59 2006{
4158755d
SE
2007 u64 tstamp = perf_event_time(event);
2008
cdd6c482 2009 list_add_event(event, ctx);
8a49542c 2010 perf_group_attach(event);
4158755d
SE
2011 event->tstamp_enabled = tstamp;
2012 event->tstamp_running = tstamp;
2013 event->tstamp_stopped = tstamp;
53cfbf59
PM
2014}
2015
2c29ef0f
PZ
2016static void task_ctx_sched_out(struct perf_event_context *ctx);
2017static void
2018ctx_sched_in(struct perf_event_context *ctx,
2019 struct perf_cpu_context *cpuctx,
2020 enum event_type_t event_type,
2021 struct task_struct *task);
fe4b04fa 2022
dce5855b
PZ
2023static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2024 struct perf_event_context *ctx,
2025 struct task_struct *task)
2026{
2027 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2028 if (ctx)
2029 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2030 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2031 if (ctx)
2032 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2033}
2034
0793a61d 2035/*
cdd6c482 2036 * Cross CPU call to install and enable a performance event
682076ae
PZ
2037 *
2038 * Must be called with ctx->mutex held
0793a61d 2039 */
fe4b04fa 2040static int __perf_install_in_context(void *info)
0793a61d 2041{
cdd6c482
IM
2042 struct perf_event *event = info;
2043 struct perf_event_context *ctx = event->ctx;
108b02cf 2044 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f
PZ
2045 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2046 struct task_struct *task = current;
2047
b58f6b0d 2048 perf_ctx_lock(cpuctx, task_ctx);
2c29ef0f 2049 perf_pmu_disable(cpuctx->ctx.pmu);
0793a61d
TG
2050
2051 /*
2c29ef0f 2052 * If there was an active task_ctx schedule it out.
0793a61d 2053 */
b58f6b0d 2054 if (task_ctx)
2c29ef0f 2055 task_ctx_sched_out(task_ctx);
b58f6b0d
PZ
2056
2057 /*
2058 * If the context we're installing events in is not the
2059 * active task_ctx, flip them.
2060 */
2061 if (ctx->task && task_ctx != ctx) {
2062 if (task_ctx)
2063 raw_spin_unlock(&task_ctx->lock);
2064 raw_spin_lock(&ctx->lock);
2065 task_ctx = ctx;
2066 }
2067
2068 if (task_ctx) {
2069 cpuctx->task_ctx = task_ctx;
2c29ef0f
PZ
2070 task = task_ctx->task;
2071 }
b58f6b0d 2072
2c29ef0f 2073 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
0793a61d 2074
4af4998b 2075 update_context_time(ctx);
e5d1367f
SE
2076 /*
2077 * update cgrp time only if current cgrp
2078 * matches event->cgrp. Must be done before
2079 * calling add_event_to_ctx()
2080 */
2081 update_cgrp_time_from_event(event);
0793a61d 2082
cdd6c482 2083 add_event_to_ctx(event, ctx);
0793a61d 2084
d859e29f 2085 /*
2c29ef0f 2086 * Schedule everything back in
d859e29f 2087 */
dce5855b 2088 perf_event_sched_in(cpuctx, task_ctx, task);
2c29ef0f
PZ
2089
2090 perf_pmu_enable(cpuctx->ctx.pmu);
2091 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa
PZ
2092
2093 return 0;
0793a61d
TG
2094}
2095
2096/*
cdd6c482 2097 * Attach a performance event to a context
0793a61d 2098 *
cdd6c482
IM
2099 * First we add the event to the list with the hardware enable bit
2100 * in event->hw_config cleared.
0793a61d 2101 *
cdd6c482 2102 * If the event is attached to a task which is on a CPU we use a smp
0793a61d
TG
2103 * call to enable it in the task context. The task might have been
2104 * scheduled away, but we check this in the smp call again.
2105 */
2106static void
cdd6c482
IM
2107perf_install_in_context(struct perf_event_context *ctx,
2108 struct perf_event *event,
0793a61d
TG
2109 int cpu)
2110{
2111 struct task_struct *task = ctx->task;
2112
fe4b04fa
PZ
2113 lockdep_assert_held(&ctx->mutex);
2114
c3f00c70 2115 event->ctx = ctx;
0cda4c02
YZ
2116 if (event->cpu != -1)
2117 event->cpu = cpu;
c3f00c70 2118
0793a61d
TG
2119 if (!task) {
2120 /*
cdd6c482 2121 * Per cpu events are installed via an smp call and
af901ca1 2122 * the install is always successful.
0793a61d 2123 */
fe4b04fa 2124 cpu_function_call(cpu, __perf_install_in_context, event);
0793a61d
TG
2125 return;
2126 }
2127
0793a61d 2128retry:
fe4b04fa
PZ
2129 if (!task_function_call(task, __perf_install_in_context, event))
2130 return;
0793a61d 2131
e625cce1 2132 raw_spin_lock_irq(&ctx->lock);
0793a61d 2133 /*
fe4b04fa
PZ
2134 * If we failed to find a running task, but find the context active now
2135 * that we've acquired the ctx->lock, retry.
0793a61d 2136 */
fe4b04fa 2137 if (ctx->is_active) {
e625cce1 2138 raw_spin_unlock_irq(&ctx->lock);
3577af70
CW
2139 /*
2140 * Reload the task pointer, it might have been changed by
2141 * a concurrent perf_event_context_sched_out().
2142 */
2143 task = ctx->task;
0793a61d
TG
2144 goto retry;
2145 }
2146
2147 /*
fe4b04fa
PZ
2148 * Since the task isn't running, its safe to add the event, us holding
2149 * the ctx->lock ensures the task won't get scheduled in.
0793a61d 2150 */
fe4b04fa 2151 add_event_to_ctx(event, ctx);
e625cce1 2152 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
2153}
2154
fa289bec 2155/*
cdd6c482 2156 * Put a event into inactive state and update time fields.
fa289bec
PM
2157 * Enabling the leader of a group effectively enables all
2158 * the group members that aren't explicitly disabled, so we
2159 * have to update their ->tstamp_enabled also.
2160 * Note: this works for group members as well as group leaders
2161 * since the non-leader members' sibling_lists will be empty.
2162 */
1d9b482e 2163static void __perf_event_mark_enabled(struct perf_event *event)
fa289bec 2164{
cdd6c482 2165 struct perf_event *sub;
4158755d 2166 u64 tstamp = perf_event_time(event);
fa289bec 2167
cdd6c482 2168 event->state = PERF_EVENT_STATE_INACTIVE;
4158755d 2169 event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d 2170 list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d
SE
2171 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2172 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d 2173 }
fa289bec
PM
2174}
2175
d859e29f 2176/*
cdd6c482 2177 * Cross CPU call to enable a performance event
d859e29f 2178 */
fe4b04fa 2179static int __perf_event_enable(void *info)
04289bb9 2180{
cdd6c482 2181 struct perf_event *event = info;
cdd6c482
IM
2182 struct perf_event_context *ctx = event->ctx;
2183 struct perf_event *leader = event->group_leader;
108b02cf 2184 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f 2185 int err;
04289bb9 2186
06f41796
JO
2187 /*
2188 * There's a time window between 'ctx->is_active' check
2189 * in perf_event_enable function and this place having:
2190 * - IRQs on
2191 * - ctx->lock unlocked
2192 *
2193 * where the task could be killed and 'ctx' deactivated
2194 * by perf_event_exit_task.
2195 */
2196 if (!ctx->is_active)
fe4b04fa 2197 return -EINVAL;
3cbed429 2198
e625cce1 2199 raw_spin_lock(&ctx->lock);
4af4998b 2200 update_context_time(ctx);
d859e29f 2201
cdd6c482 2202 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f 2203 goto unlock;
e5d1367f
SE
2204
2205 /*
2206 * set current task's cgroup time reference point
2207 */
3f7cce3c 2208 perf_cgroup_set_timestamp(current, ctx);
e5d1367f 2209
1d9b482e 2210 __perf_event_mark_enabled(event);
04289bb9 2211
e5d1367f
SE
2212 if (!event_filter_match(event)) {
2213 if (is_cgroup_event(event))
2214 perf_cgroup_defer_enabled(event);
f4c4176f 2215 goto unlock;
e5d1367f 2216 }
f4c4176f 2217
04289bb9 2218 /*
cdd6c482 2219 * If the event is in a group and isn't the group leader,
d859e29f 2220 * then don't put it on unless the group is on.
04289bb9 2221 */
cdd6c482 2222 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
d859e29f 2223 goto unlock;
3b6f9e5c 2224
cdd6c482 2225 if (!group_can_go_on(event, cpuctx, 1)) {
d859e29f 2226 err = -EEXIST;
e758a33d 2227 } else {
cdd6c482 2228 if (event == leader)
6e37738a 2229 err = group_sched_in(event, cpuctx, ctx);
e758a33d 2230 else
6e37738a 2231 err = event_sched_in(event, cpuctx, ctx);
e758a33d 2232 }
d859e29f
PM
2233
2234 if (err) {
2235 /*
cdd6c482 2236 * If this event can't go on and it's part of a
d859e29f
PM
2237 * group, then the whole group has to come off.
2238 */
9e630205 2239 if (leader != event) {
d859e29f 2240 group_sched_out(leader, cpuctx, ctx);
272325c4 2241 perf_mux_hrtimer_restart(cpuctx);
9e630205 2242 }
0d48696f 2243 if (leader->attr.pinned) {
53cfbf59 2244 update_group_times(leader);
cdd6c482 2245 leader->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2246 }
d859e29f
PM
2247 }
2248
9ed6060d 2249unlock:
e625cce1 2250 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
2251
2252 return 0;
d859e29f
PM
2253}
2254
2255/*
cdd6c482 2256 * Enable a event.
c93f7669 2257 *
cdd6c482
IM
2258 * If event->ctx is a cloned context, callers must make sure that
2259 * every task struct that event->ctx->task could possibly point to
c93f7669 2260 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2261 * perf_event_for_each_child or perf_event_for_each as described
2262 * for perf_event_disable.
d859e29f 2263 */
f63a8daa 2264static void _perf_event_enable(struct perf_event *event)
d859e29f 2265{
cdd6c482 2266 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
2267 struct task_struct *task = ctx->task;
2268
2269 if (!task) {
2270 /*
cdd6c482 2271 * Enable the event on the cpu that it's on
d859e29f 2272 */
fe4b04fa 2273 cpu_function_call(event->cpu, __perf_event_enable, event);
d859e29f
PM
2274 return;
2275 }
2276
e625cce1 2277 raw_spin_lock_irq(&ctx->lock);
cdd6c482 2278 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f
PM
2279 goto out;
2280
2281 /*
cdd6c482
IM
2282 * If the event is in error state, clear that first.
2283 * That way, if we see the event in error state below, we
d859e29f
PM
2284 * know that it has gone back into error state, as distinct
2285 * from the task having been scheduled away before the
2286 * cross-call arrived.
2287 */
cdd6c482
IM
2288 if (event->state == PERF_EVENT_STATE_ERROR)
2289 event->state = PERF_EVENT_STATE_OFF;
d859e29f 2290
9ed6060d 2291retry:
fe4b04fa 2292 if (!ctx->is_active) {
1d9b482e 2293 __perf_event_mark_enabled(event);
fe4b04fa
PZ
2294 goto out;
2295 }
2296
e625cce1 2297 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
2298
2299 if (!task_function_call(task, __perf_event_enable, event))
2300 return;
d859e29f 2301
e625cce1 2302 raw_spin_lock_irq(&ctx->lock);
d859e29f
PM
2303
2304 /*
cdd6c482 2305 * If the context is active and the event is still off,
d859e29f
PM
2306 * we need to retry the cross-call.
2307 */
fe4b04fa
PZ
2308 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
2309 /*
2310 * task could have been flipped by a concurrent
2311 * perf_event_context_sched_out()
2312 */
2313 task = ctx->task;
d859e29f 2314 goto retry;
fe4b04fa 2315 }
fa289bec 2316
9ed6060d 2317out:
e625cce1 2318 raw_spin_unlock_irq(&ctx->lock);
d859e29f 2319}
f63a8daa
PZ
2320
2321/*
2322 * See perf_event_disable();
2323 */
2324void perf_event_enable(struct perf_event *event)
2325{
2326 struct perf_event_context *ctx;
2327
2328 ctx = perf_event_ctx_lock(event);
2329 _perf_event_enable(event);
2330 perf_event_ctx_unlock(event, ctx);
2331}
dcfce4a0 2332EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 2333
f63a8daa 2334static int _perf_event_refresh(struct perf_event *event, int refresh)
79f14641 2335{
2023b359 2336 /*
cdd6c482 2337 * not supported on inherited events
2023b359 2338 */
2e939d1d 2339 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
2340 return -EINVAL;
2341
cdd6c482 2342 atomic_add(refresh, &event->event_limit);
f63a8daa 2343 _perf_event_enable(event);
2023b359
PZ
2344
2345 return 0;
79f14641 2346}
f63a8daa
PZ
2347
2348/*
2349 * See perf_event_disable()
2350 */
2351int perf_event_refresh(struct perf_event *event, int refresh)
2352{
2353 struct perf_event_context *ctx;
2354 int ret;
2355
2356 ctx = perf_event_ctx_lock(event);
2357 ret = _perf_event_refresh(event, refresh);
2358 perf_event_ctx_unlock(event, ctx);
2359
2360 return ret;
2361}
26ca5c11 2362EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 2363
5b0311e1
FW
2364static void ctx_sched_out(struct perf_event_context *ctx,
2365 struct perf_cpu_context *cpuctx,
2366 enum event_type_t event_type)
235c7fc7 2367{
cdd6c482 2368 struct perf_event *event;
db24d33e 2369 int is_active = ctx->is_active;
235c7fc7 2370
db24d33e 2371 ctx->is_active &= ~event_type;
cdd6c482 2372 if (likely(!ctx->nr_events))
facc4307
PZ
2373 return;
2374
4af4998b 2375 update_context_time(ctx);
e5d1367f 2376 update_cgrp_time_from_cpuctx(cpuctx);
5b0311e1 2377 if (!ctx->nr_active)
facc4307 2378 return;
5b0311e1 2379
075e0b00 2380 perf_pmu_disable(ctx->pmu);
db24d33e 2381 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
889ff015
FW
2382 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2383 group_sched_out(event, cpuctx, ctx);
9ed6060d 2384 }
889ff015 2385
db24d33e 2386 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
889ff015 2387 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e1 2388 group_sched_out(event, cpuctx, ctx);
9ed6060d 2389 }
1b9a644f 2390 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
2391}
2392
564c2b21 2393/*
5a3126d4
PZ
2394 * Test whether two contexts are equivalent, i.e. whether they have both been
2395 * cloned from the same version of the same context.
2396 *
2397 * Equivalence is measured using a generation number in the context that is
2398 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2399 * and list_del_event().
564c2b21 2400 */
cdd6c482
IM
2401static int context_equiv(struct perf_event_context *ctx1,
2402 struct perf_event_context *ctx2)
564c2b21 2403{
211de6eb
PZ
2404 lockdep_assert_held(&ctx1->lock);
2405 lockdep_assert_held(&ctx2->lock);
2406
5a3126d4
PZ
2407 /* Pinning disables the swap optimization */
2408 if (ctx1->pin_count || ctx2->pin_count)
2409 return 0;
2410
2411 /* If ctx1 is the parent of ctx2 */
2412 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2413 return 1;
2414
2415 /* If ctx2 is the parent of ctx1 */
2416 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2417 return 1;
2418
2419 /*
2420 * If ctx1 and ctx2 have the same parent; we flatten the parent
2421 * hierarchy, see perf_event_init_context().
2422 */
2423 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2424 ctx1->parent_gen == ctx2->parent_gen)
2425 return 1;
2426
2427 /* Unmatched */
2428 return 0;
564c2b21
PM
2429}
2430
cdd6c482
IM
2431static void __perf_event_sync_stat(struct perf_event *event,
2432 struct perf_event *next_event)
bfbd3381
PZ
2433{
2434 u64 value;
2435
cdd6c482 2436 if (!event->attr.inherit_stat)
bfbd3381
PZ
2437 return;
2438
2439 /*
cdd6c482 2440 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
2441 * because we're in the middle of a context switch and have IRQs
2442 * disabled, which upsets smp_call_function_single(), however
cdd6c482 2443 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
2444 * don't need to use it.
2445 */
cdd6c482
IM
2446 switch (event->state) {
2447 case PERF_EVENT_STATE_ACTIVE:
3dbebf15
PZ
2448 event->pmu->read(event);
2449 /* fall-through */
bfbd3381 2450
cdd6c482
IM
2451 case PERF_EVENT_STATE_INACTIVE:
2452 update_event_times(event);
bfbd3381
PZ
2453 break;
2454
2455 default:
2456 break;
2457 }
2458
2459 /*
cdd6c482 2460 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
2461 * values when we flip the contexts.
2462 */
e7850595
PZ
2463 value = local64_read(&next_event->count);
2464 value = local64_xchg(&event->count, value);
2465 local64_set(&next_event->count, value);
bfbd3381 2466
cdd6c482
IM
2467 swap(event->total_time_enabled, next_event->total_time_enabled);
2468 swap(event->total_time_running, next_event->total_time_running);
19d2e755 2469
bfbd3381 2470 /*
19d2e755 2471 * Since we swizzled the values, update the user visible data too.
bfbd3381 2472 */
cdd6c482
IM
2473 perf_event_update_userpage(event);
2474 perf_event_update_userpage(next_event);
bfbd3381
PZ
2475}
2476
cdd6c482
IM
2477static void perf_event_sync_stat(struct perf_event_context *ctx,
2478 struct perf_event_context *next_ctx)
bfbd3381 2479{
cdd6c482 2480 struct perf_event *event, *next_event;
bfbd3381
PZ
2481
2482 if (!ctx->nr_stat)
2483 return;
2484
02ffdbc8
PZ
2485 update_context_time(ctx);
2486
cdd6c482
IM
2487 event = list_first_entry(&ctx->event_list,
2488 struct perf_event, event_entry);
bfbd3381 2489
cdd6c482
IM
2490 next_event = list_first_entry(&next_ctx->event_list,
2491 struct perf_event, event_entry);
bfbd3381 2492
cdd6c482
IM
2493 while (&event->event_entry != &ctx->event_list &&
2494 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 2495
cdd6c482 2496 __perf_event_sync_stat(event, next_event);
bfbd3381 2497
cdd6c482
IM
2498 event = list_next_entry(event, event_entry);
2499 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
2500 }
2501}
2502
fe4b04fa
PZ
2503static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2504 struct task_struct *next)
0793a61d 2505{
8dc85d54 2506 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482 2507 struct perf_event_context *next_ctx;
5a3126d4 2508 struct perf_event_context *parent, *next_parent;
108b02cf 2509 struct perf_cpu_context *cpuctx;
c93f7669 2510 int do_switch = 1;
0793a61d 2511
108b02cf
PZ
2512 if (likely(!ctx))
2513 return;
10989fb2 2514
108b02cf
PZ
2515 cpuctx = __get_cpu_context(ctx);
2516 if (!cpuctx->task_ctx)
0793a61d
TG
2517 return;
2518
c93f7669 2519 rcu_read_lock();
8dc85d54 2520 next_ctx = next->perf_event_ctxp[ctxn];
5a3126d4
PZ
2521 if (!next_ctx)
2522 goto unlock;
2523
2524 parent = rcu_dereference(ctx->parent_ctx);
2525 next_parent = rcu_dereference(next_ctx->parent_ctx);
2526
2527 /* If neither context have a parent context; they cannot be clones. */
802c8a61 2528 if (!parent && !next_parent)
5a3126d4
PZ
2529 goto unlock;
2530
2531 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
c93f7669
PM
2532 /*
2533 * Looks like the two contexts are clones, so we might be
2534 * able to optimize the context switch. We lock both
2535 * contexts and check that they are clones under the
2536 * lock (including re-checking that neither has been
2537 * uncloned in the meantime). It doesn't matter which
2538 * order we take the locks because no other cpu could
2539 * be trying to lock both of these tasks.
2540 */
e625cce1
TG
2541 raw_spin_lock(&ctx->lock);
2542 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 2543 if (context_equiv(ctx, next_ctx)) {
665c2142
PZ
2544 /*
2545 * XXX do we need a memory barrier of sorts
cdd6c482 2546 * wrt to rcu_dereference() of perf_event_ctxp
665c2142 2547 */
8dc85d54
PZ
2548 task->perf_event_ctxp[ctxn] = next_ctx;
2549 next->perf_event_ctxp[ctxn] = ctx;
c93f7669
PM
2550 ctx->task = next;
2551 next_ctx->task = task;
5a158c3c
YZ
2552
2553 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2554
c93f7669 2555 do_switch = 0;
bfbd3381 2556
cdd6c482 2557 perf_event_sync_stat(ctx, next_ctx);
c93f7669 2558 }
e625cce1
TG
2559 raw_spin_unlock(&next_ctx->lock);
2560 raw_spin_unlock(&ctx->lock);
564c2b21 2561 }
5a3126d4 2562unlock:
c93f7669 2563 rcu_read_unlock();
564c2b21 2564
c93f7669 2565 if (do_switch) {
facc4307 2566 raw_spin_lock(&ctx->lock);
5b0311e1 2567 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
c93f7669 2568 cpuctx->task_ctx = NULL;
facc4307 2569 raw_spin_unlock(&ctx->lock);
c93f7669 2570 }
0793a61d
TG
2571}
2572
ba532500
YZ
2573void perf_sched_cb_dec(struct pmu *pmu)
2574{
2575 this_cpu_dec(perf_sched_cb_usages);
2576}
2577
2578void perf_sched_cb_inc(struct pmu *pmu)
2579{
2580 this_cpu_inc(perf_sched_cb_usages);
2581}
2582
2583/*
2584 * This function provides the context switch callback to the lower code
2585 * layer. It is invoked ONLY when the context switch callback is enabled.
2586 */
2587static void perf_pmu_sched_task(struct task_struct *prev,
2588 struct task_struct *next,
2589 bool sched_in)
2590{
2591 struct perf_cpu_context *cpuctx;
2592 struct pmu *pmu;
2593 unsigned long flags;
2594
2595 if (prev == next)
2596 return;
2597
2598 local_irq_save(flags);
2599
2600 rcu_read_lock();
2601
2602 list_for_each_entry_rcu(pmu, &pmus, entry) {
2603 if (pmu->sched_task) {
2604 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2605
2606 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2607
2608 perf_pmu_disable(pmu);
2609
2610 pmu->sched_task(cpuctx->task_ctx, sched_in);
2611
2612 perf_pmu_enable(pmu);
2613
2614 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2615 }
2616 }
2617
2618 rcu_read_unlock();
2619
2620 local_irq_restore(flags);
2621}
2622
45ac1403
AH
2623static void perf_event_switch(struct task_struct *task,
2624 struct task_struct *next_prev, bool sched_in);
2625
8dc85d54
PZ
2626#define for_each_task_context_nr(ctxn) \
2627 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2628
2629/*
2630 * Called from scheduler to remove the events of the current task,
2631 * with interrupts disabled.
2632 *
2633 * We stop each event and update the event value in event->count.
2634 *
2635 * This does not protect us against NMI, but disable()
2636 * sets the disabled bit in the control field of event _before_
2637 * accessing the event control register. If a NMI hits, then it will
2638 * not restart the event.
2639 */
ab0cce56
JO
2640void __perf_event_task_sched_out(struct task_struct *task,
2641 struct task_struct *next)
8dc85d54
PZ
2642{
2643 int ctxn;
2644
ba532500
YZ
2645 if (__this_cpu_read(perf_sched_cb_usages))
2646 perf_pmu_sched_task(task, next, false);
2647
45ac1403
AH
2648 if (atomic_read(&nr_switch_events))
2649 perf_event_switch(task, next, false);
2650
8dc85d54
PZ
2651 for_each_task_context_nr(ctxn)
2652 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
2653
2654 /*
2655 * if cgroup events exist on this CPU, then we need
2656 * to check if we have to switch out PMU state.
2657 * cgroup event are system-wide mode only
2658 */
4a32fea9 2659 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
a8d757ef 2660 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
2661}
2662
04dc2dbb 2663static void task_ctx_sched_out(struct perf_event_context *ctx)
a08b159f 2664{
108b02cf 2665 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
a08b159f 2666
a63eaf34
PM
2667 if (!cpuctx->task_ctx)
2668 return;
012b84da
IM
2669
2670 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2671 return;
2672
04dc2dbb 2673 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
a08b159f
PM
2674 cpuctx->task_ctx = NULL;
2675}
2676
5b0311e1
FW
2677/*
2678 * Called with IRQs disabled
2679 */
2680static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2681 enum event_type_t event_type)
2682{
2683 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
2684}
2685
235c7fc7 2686static void
5b0311e1 2687ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a 2688 struct perf_cpu_context *cpuctx)
0793a61d 2689{
cdd6c482 2690 struct perf_event *event;
0793a61d 2691
889ff015
FW
2692 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2693 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2694 continue;
5632ab12 2695 if (!event_filter_match(event))
3b6f9e5c
PM
2696 continue;
2697
e5d1367f
SE
2698 /* may need to reset tstamp_enabled */
2699 if (is_cgroup_event(event))
2700 perf_cgroup_mark_enabled(event, ctx);
2701
8c9ed8e1 2702 if (group_can_go_on(event, cpuctx, 1))
6e37738a 2703 group_sched_in(event, cpuctx, ctx);
3b6f9e5c
PM
2704
2705 /*
2706 * If this pinned group hasn't been scheduled,
2707 * put it in error state.
2708 */
cdd6c482
IM
2709 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2710 update_group_times(event);
2711 event->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2712 }
3b6f9e5c 2713 }
5b0311e1
FW
2714}
2715
2716static void
2717ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a 2718 struct perf_cpu_context *cpuctx)
5b0311e1
FW
2719{
2720 struct perf_event *event;
2721 int can_add_hw = 1;
3b6f9e5c 2722
889ff015
FW
2723 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2724 /* Ignore events in OFF or ERROR state */
2725 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2726 continue;
04289bb9
IM
2727 /*
2728 * Listen to the 'cpu' scheduling filter constraint
cdd6c482 2729 * of events:
04289bb9 2730 */
5632ab12 2731 if (!event_filter_match(event))
0793a61d
TG
2732 continue;
2733
e5d1367f
SE
2734 /* may need to reset tstamp_enabled */
2735 if (is_cgroup_event(event))
2736 perf_cgroup_mark_enabled(event, ctx);
2737
9ed6060d 2738 if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a 2739 if (group_sched_in(event, cpuctx, ctx))
dd0e6ba2 2740 can_add_hw = 0;
9ed6060d 2741 }
0793a61d 2742 }
5b0311e1
FW
2743}
2744
2745static void
2746ctx_sched_in(struct perf_event_context *ctx,
2747 struct perf_cpu_context *cpuctx,
e5d1367f
SE
2748 enum event_type_t event_type,
2749 struct task_struct *task)
5b0311e1 2750{
e5d1367f 2751 u64 now;
db24d33e 2752 int is_active = ctx->is_active;
e5d1367f 2753
db24d33e 2754 ctx->is_active |= event_type;
5b0311e1 2755 if (likely(!ctx->nr_events))
facc4307 2756 return;
5b0311e1 2757
e5d1367f
SE
2758 now = perf_clock();
2759 ctx->timestamp = now;
3f7cce3c 2760 perf_cgroup_set_timestamp(task, ctx);
5b0311e1
FW
2761 /*
2762 * First go through the list and put on any pinned groups
2763 * in order to give them the best chance of going on.
2764 */
db24d33e 2765 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
6e37738a 2766 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
2767
2768 /* Then walk through the lower prio flexible groups */
db24d33e 2769 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
6e37738a 2770 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
2771}
2772
329c0e01 2773static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
2774 enum event_type_t event_type,
2775 struct task_struct *task)
329c0e01
FW
2776{
2777 struct perf_event_context *ctx = &cpuctx->ctx;
2778
e5d1367f 2779 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
2780}
2781
e5d1367f
SE
2782static void perf_event_context_sched_in(struct perf_event_context *ctx,
2783 struct task_struct *task)
235c7fc7 2784{
108b02cf 2785 struct perf_cpu_context *cpuctx;
235c7fc7 2786
108b02cf 2787 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
2788 if (cpuctx->task_ctx == ctx)
2789 return;
2790
facc4307 2791 perf_ctx_lock(cpuctx, ctx);
1b9a644f 2792 perf_pmu_disable(ctx->pmu);
329c0e01
FW
2793 /*
2794 * We want to keep the following priority order:
2795 * cpu pinned (that don't need to move), task pinned,
2796 * cpu flexible, task flexible.
2797 */
2798 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2799
1d5f003f
GN
2800 if (ctx->nr_events)
2801 cpuctx->task_ctx = ctx;
9b33fa6b 2802
86b47c25
GN
2803 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2804
facc4307
PZ
2805 perf_pmu_enable(ctx->pmu);
2806 perf_ctx_unlock(cpuctx, ctx);
235c7fc7
IM
2807}
2808
8dc85d54
PZ
2809/*
2810 * Called from scheduler to add the events of the current task
2811 * with interrupts disabled.
2812 *
2813 * We restore the event value and then enable it.
2814 *
2815 * This does not protect us against NMI, but enable()
2816 * sets the enabled bit in the control field of event _before_
2817 * accessing the event control register. If a NMI hits, then it will
2818 * keep the event running.
2819 */
ab0cce56
JO
2820void __perf_event_task_sched_in(struct task_struct *prev,
2821 struct task_struct *task)
8dc85d54
PZ
2822{
2823 struct perf_event_context *ctx;
2824 int ctxn;
2825
2826 for_each_task_context_nr(ctxn) {
2827 ctx = task->perf_event_ctxp[ctxn];
2828 if (likely(!ctx))
2829 continue;
2830
e5d1367f 2831 perf_event_context_sched_in(ctx, task);
8dc85d54 2832 }
e5d1367f
SE
2833 /*
2834 * if cgroup events exist on this CPU, then we need
2835 * to check if we have to switch in PMU state.
2836 * cgroup event are system-wide mode only
2837 */
4a32fea9 2838 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
a8d757ef 2839 perf_cgroup_sched_in(prev, task);
d010b332 2840
45ac1403
AH
2841 if (atomic_read(&nr_switch_events))
2842 perf_event_switch(task, prev, true);
2843
ba532500
YZ
2844 if (__this_cpu_read(perf_sched_cb_usages))
2845 perf_pmu_sched_task(prev, task, true);
235c7fc7
IM
2846}
2847
abd50713
PZ
2848static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2849{
2850 u64 frequency = event->attr.sample_freq;
2851 u64 sec = NSEC_PER_SEC;
2852 u64 divisor, dividend;
2853
2854 int count_fls, nsec_fls, frequency_fls, sec_fls;
2855
2856 count_fls = fls64(count);
2857 nsec_fls = fls64(nsec);
2858 frequency_fls = fls64(frequency);
2859 sec_fls = 30;
2860
2861 /*
2862 * We got @count in @nsec, with a target of sample_freq HZ
2863 * the target period becomes:
2864 *
2865 * @count * 10^9
2866 * period = -------------------
2867 * @nsec * sample_freq
2868 *
2869 */
2870
2871 /*
2872 * Reduce accuracy by one bit such that @a and @b converge
2873 * to a similar magnitude.
2874 */
fe4b04fa 2875#define REDUCE_FLS(a, b) \
abd50713
PZ
2876do { \
2877 if (a##_fls > b##_fls) { \
2878 a >>= 1; \
2879 a##_fls--; \
2880 } else { \
2881 b >>= 1; \
2882 b##_fls--; \
2883 } \
2884} while (0)
2885
2886 /*
2887 * Reduce accuracy until either term fits in a u64, then proceed with
2888 * the other, so that finally we can do a u64/u64 division.
2889 */
2890 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2891 REDUCE_FLS(nsec, frequency);
2892 REDUCE_FLS(sec, count);
2893 }
2894
2895 if (count_fls + sec_fls > 64) {
2896 divisor = nsec * frequency;
2897
2898 while (count_fls + sec_fls > 64) {
2899 REDUCE_FLS(count, sec);
2900 divisor >>= 1;
2901 }
2902
2903 dividend = count * sec;
2904 } else {
2905 dividend = count * sec;
2906
2907 while (nsec_fls + frequency_fls > 64) {
2908 REDUCE_FLS(nsec, frequency);
2909 dividend >>= 1;
2910 }
2911
2912 divisor = nsec * frequency;
2913 }
2914
f6ab91ad
PZ
2915 if (!divisor)
2916 return dividend;
2917
abd50713
PZ
2918 return div64_u64(dividend, divisor);
2919}
2920
e050e3f0
SE
2921static DEFINE_PER_CPU(int, perf_throttled_count);
2922static DEFINE_PER_CPU(u64, perf_throttled_seq);
2923
f39d47ff 2924static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 2925{
cdd6c482 2926 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 2927 s64 period, sample_period;
bd2b5b12
PZ
2928 s64 delta;
2929
abd50713 2930 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
2931
2932 delta = (s64)(period - hwc->sample_period);
2933 delta = (delta + 7) / 8; /* low pass filter */
2934
2935 sample_period = hwc->sample_period + delta;
2936
2937 if (!sample_period)
2938 sample_period = 1;
2939
bd2b5b12 2940 hwc->sample_period = sample_period;
abd50713 2941
e7850595 2942 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
2943 if (disable)
2944 event->pmu->stop(event, PERF_EF_UPDATE);
2945
e7850595 2946 local64_set(&hwc->period_left, 0);
f39d47ff
SE
2947
2948 if (disable)
2949 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 2950 }
bd2b5b12
PZ
2951}
2952
e050e3f0
SE
2953/*
2954 * combine freq adjustment with unthrottling to avoid two passes over the
2955 * events. At the same time, make sure, having freq events does not change
2956 * the rate of unthrottling as that would introduce bias.
2957 */
2958static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2959 int needs_unthr)
60db5e09 2960{
cdd6c482
IM
2961 struct perf_event *event;
2962 struct hw_perf_event *hwc;
e050e3f0 2963 u64 now, period = TICK_NSEC;
abd50713 2964 s64 delta;
60db5e09 2965
e050e3f0
SE
2966 /*
2967 * only need to iterate over all events iff:
2968 * - context have events in frequency mode (needs freq adjust)
2969 * - there are events to unthrottle on this cpu
2970 */
2971 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
2972 return;
2973
e050e3f0 2974 raw_spin_lock(&ctx->lock);
f39d47ff 2975 perf_pmu_disable(ctx->pmu);
e050e3f0 2976
03541f8b 2977 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 2978 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
2979 continue;
2980
5632ab12 2981 if (!event_filter_match(event))
5d27c23d
PZ
2982 continue;
2983
44377277
AS
2984 perf_pmu_disable(event->pmu);
2985
cdd6c482 2986 hwc = &event->hw;
6a24ed6c 2987
ae23bff1 2988 if (hwc->interrupts == MAX_INTERRUPTS) {
e050e3f0 2989 hwc->interrupts = 0;
cdd6c482 2990 perf_log_throttle(event, 1);
a4eaf7f1 2991 event->pmu->start(event, 0);
a78ac325
PZ
2992 }
2993
cdd6c482 2994 if (!event->attr.freq || !event->attr.sample_freq)
44377277 2995 goto next;
60db5e09 2996
e050e3f0
SE
2997 /*
2998 * stop the event and update event->count
2999 */
3000 event->pmu->stop(event, PERF_EF_UPDATE);
3001
e7850595 3002 now = local64_read(&event->count);
abd50713
PZ
3003 delta = now - hwc->freq_count_stamp;
3004 hwc->freq_count_stamp = now;
60db5e09 3005
e050e3f0
SE
3006 /*
3007 * restart the event
3008 * reload only if value has changed
f39d47ff
SE
3009 * we have stopped the event so tell that
3010 * to perf_adjust_period() to avoid stopping it
3011 * twice.
e050e3f0 3012 */
abd50713 3013 if (delta > 0)
f39d47ff 3014 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
3015
3016 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
44377277
AS
3017 next:
3018 perf_pmu_enable(event->pmu);
60db5e09 3019 }
e050e3f0 3020
f39d47ff 3021 perf_pmu_enable(ctx->pmu);
e050e3f0 3022 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
3023}
3024
235c7fc7 3025/*
cdd6c482 3026 * Round-robin a context's events:
235c7fc7 3027 */
cdd6c482 3028static void rotate_ctx(struct perf_event_context *ctx)
0793a61d 3029{
dddd3379
TG
3030 /*
3031 * Rotate the first entry last of non-pinned groups. Rotation might be
3032 * disabled by the inheritance code.
3033 */
3034 if (!ctx->rotate_disable)
3035 list_rotate_left(&ctx->flexible_groups);
235c7fc7
IM
3036}
3037
9e630205 3038static int perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7 3039{
8dc85d54 3040 struct perf_event_context *ctx = NULL;
2fde4f94 3041 int rotate = 0;
7fc23a53 3042
b5ab4cd5 3043 if (cpuctx->ctx.nr_events) {
b5ab4cd5
PZ
3044 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3045 rotate = 1;
3046 }
235c7fc7 3047
8dc85d54 3048 ctx = cpuctx->task_ctx;
b5ab4cd5 3049 if (ctx && ctx->nr_events) {
b5ab4cd5
PZ
3050 if (ctx->nr_events != ctx->nr_active)
3051 rotate = 1;
3052 }
9717e6cd 3053
e050e3f0 3054 if (!rotate)
0f5a2601
PZ
3055 goto done;
3056
facc4307 3057 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 3058 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 3059
e050e3f0
SE
3060 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3061 if (ctx)
3062 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d 3063
e050e3f0
SE
3064 rotate_ctx(&cpuctx->ctx);
3065 if (ctx)
3066 rotate_ctx(ctx);
235c7fc7 3067
e050e3f0 3068 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 3069
0f5a2601
PZ
3070 perf_pmu_enable(cpuctx->ctx.pmu);
3071 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
b5ab4cd5 3072done:
9e630205
SE
3073
3074 return rotate;
e9d2b064
PZ
3075}
3076
026249ef
FW
3077#ifdef CONFIG_NO_HZ_FULL
3078bool perf_event_can_stop_tick(void)
3079{
948b26b6 3080 if (atomic_read(&nr_freq_events) ||
d84153d6 3081 __this_cpu_read(perf_throttled_count))
026249ef 3082 return false;
d84153d6
FW
3083 else
3084 return true;
026249ef
FW
3085}
3086#endif
3087
e9d2b064
PZ
3088void perf_event_task_tick(void)
3089{
2fde4f94
MR
3090 struct list_head *head = this_cpu_ptr(&active_ctx_list);
3091 struct perf_event_context *ctx, *tmp;
e050e3f0 3092 int throttled;
b5ab4cd5 3093
e9d2b064
PZ
3094 WARN_ON(!irqs_disabled());
3095
e050e3f0
SE
3096 __this_cpu_inc(perf_throttled_seq);
3097 throttled = __this_cpu_xchg(perf_throttled_count, 0);
3098
2fde4f94 3099 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
e050e3f0 3100 perf_adjust_freq_unthr_context(ctx, throttled);
0793a61d
TG
3101}
3102
889ff015
FW
3103static int event_enable_on_exec(struct perf_event *event,
3104 struct perf_event_context *ctx)
3105{
3106 if (!event->attr.enable_on_exec)
3107 return 0;
3108
3109 event->attr.enable_on_exec = 0;
3110 if (event->state >= PERF_EVENT_STATE_INACTIVE)
3111 return 0;
3112
1d9b482e 3113 __perf_event_mark_enabled(event);
889ff015
FW
3114
3115 return 1;
3116}
3117
57e7986e 3118/*
cdd6c482 3119 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
3120 * This expects task == current.
3121 */
8dc85d54 3122static void perf_event_enable_on_exec(struct perf_event_context *ctx)
57e7986e 3123{
211de6eb 3124 struct perf_event_context *clone_ctx = NULL;
cdd6c482 3125 struct perf_event *event;
57e7986e
PM
3126 unsigned long flags;
3127 int enabled = 0;
889ff015 3128 int ret;
57e7986e
PM
3129
3130 local_irq_save(flags);
cdd6c482 3131 if (!ctx || !ctx->nr_events)
57e7986e
PM
3132 goto out;
3133
e566b76e
SE
3134 /*
3135 * We must ctxsw out cgroup events to avoid conflict
3136 * when invoking perf_task_event_sched_in() later on
3137 * in this function. Otherwise we end up trying to
3138 * ctxswin cgroup events which are already scheduled
3139 * in.
3140 */
a8d757ef 3141 perf_cgroup_sched_out(current, NULL);
57e7986e 3142
e625cce1 3143 raw_spin_lock(&ctx->lock);
04dc2dbb 3144 task_ctx_sched_out(ctx);
57e7986e 3145
b79387ef 3146 list_for_each_entry(event, &ctx->event_list, event_entry) {
889ff015
FW
3147 ret = event_enable_on_exec(event, ctx);
3148 if (ret)
3149 enabled = 1;
57e7986e
PM
3150 }
3151
3152 /*
cdd6c482 3153 * Unclone this context if we enabled any event.
57e7986e 3154 */
71a851b4 3155 if (enabled)
211de6eb 3156 clone_ctx = unclone_ctx(ctx);
57e7986e 3157
e625cce1 3158 raw_spin_unlock(&ctx->lock);
57e7986e 3159
e566b76e
SE
3160 /*
3161 * Also calls ctxswin for cgroup events, if any:
3162 */
e5d1367f 3163 perf_event_context_sched_in(ctx, ctx->task);
9ed6060d 3164out:
57e7986e 3165 local_irq_restore(flags);
211de6eb
PZ
3166
3167 if (clone_ctx)
3168 put_ctx(clone_ctx);
57e7986e
PM
3169}
3170
e041e328
PZ
3171void perf_event_exec(void)
3172{
3173 struct perf_event_context *ctx;
3174 int ctxn;
3175
3176 rcu_read_lock();
3177 for_each_task_context_nr(ctxn) {
3178 ctx = current->perf_event_ctxp[ctxn];
3179 if (!ctx)
3180 continue;
3181
3182 perf_event_enable_on_exec(ctx);
3183 }
3184 rcu_read_unlock();
3185}
3186
0492d4c5
PZ
3187struct perf_read_data {
3188 struct perf_event *event;
3189 bool group;
3190};
3191
0793a61d 3192/*
cdd6c482 3193 * Cross CPU call to read the hardware event
0793a61d 3194 */
cdd6c482 3195static void __perf_event_read(void *info)
0793a61d 3196{
0492d4c5
PZ
3197 struct perf_read_data *data = info;
3198 struct perf_event *sub, *event = data->event;
cdd6c482 3199 struct perf_event_context *ctx = event->ctx;
108b02cf 3200 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
621a01ea 3201
e1ac3614
PM
3202 /*
3203 * If this is a task context, we need to check whether it is
3204 * the current task context of this cpu. If not it has been
3205 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
3206 * event->count would have been updated to a recent sample
3207 * when the event was scheduled out.
e1ac3614
PM
3208 */
3209 if (ctx->task && cpuctx->task_ctx != ctx)
3210 return;
3211
e625cce1 3212 raw_spin_lock(&ctx->lock);
e5d1367f 3213 if (ctx->is_active) {
542e72fc 3214 update_context_time(ctx);
e5d1367f
SE
3215 update_cgrp_time_from_event(event);
3216 }
0492d4c5 3217
cdd6c482 3218 update_event_times(event);
542e72fc
PZ
3219 if (event->state == PERF_EVENT_STATE_ACTIVE)
3220 event->pmu->read(event);
0492d4c5
PZ
3221
3222 if (!data->group)
3223 goto unlock;
3224
3225 list_for_each_entry(sub, &event->sibling_list, group_entry) {
3226 update_event_times(sub);
3227 if (sub->state == PERF_EVENT_STATE_ACTIVE)
3228 sub->pmu->read(sub);
3229 }
3230
3231unlock:
e625cce1 3232 raw_spin_unlock(&ctx->lock);
0793a61d
TG
3233}
3234
b5e58793
PZ
3235static inline u64 perf_event_count(struct perf_event *event)
3236{
eacd3ecc
MF
3237 if (event->pmu->count)
3238 return event->pmu->count(event);
3239
3240 return __perf_event_count(event);
b5e58793
PZ
3241}
3242
ffe8690c
KX
3243/*
3244 * NMI-safe method to read a local event, that is an event that
3245 * is:
3246 * - either for the current task, or for this CPU
3247 * - does not have inherit set, for inherited task events
3248 * will not be local and we cannot read them atomically
3249 * - must not have a pmu::count method
3250 */
3251u64 perf_event_read_local(struct perf_event *event)
3252{
3253 unsigned long flags;
3254 u64 val;
3255
3256 /*
3257 * Disabling interrupts avoids all counter scheduling (context
3258 * switches, timer based rotation and IPIs).
3259 */
3260 local_irq_save(flags);
3261
3262 /* If this is a per-task event, it must be for current */
3263 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
3264 event->hw.target != current);
3265
3266 /* If this is a per-CPU event, it must be for this CPU */
3267 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
3268 event->cpu != smp_processor_id());
3269
3270 /*
3271 * It must not be an event with inherit set, we cannot read
3272 * all child counters from atomic context.
3273 */
3274 WARN_ON_ONCE(event->attr.inherit);
3275
3276 /*
3277 * It must not have a pmu::count method, those are not
3278 * NMI safe.
3279 */
3280 WARN_ON_ONCE(event->pmu->count);
3281
3282 /*
3283 * If the event is currently on this CPU, its either a per-task event,
3284 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3285 * oncpu == -1).
3286 */
3287 if (event->oncpu == smp_processor_id())
3288 event->pmu->read(event);
3289
3290 val = local64_read(&event->count);
3291 local_irq_restore(flags);
3292
3293 return val;
3294}
3295
0492d4c5 3296static void perf_event_read(struct perf_event *event, bool group)
0793a61d
TG
3297{
3298 /*
cdd6c482
IM
3299 * If event is enabled and currently active on a CPU, update the
3300 * value in the event structure:
0793a61d 3301 */
cdd6c482 3302 if (event->state == PERF_EVENT_STATE_ACTIVE) {
0492d4c5
PZ
3303 struct perf_read_data data = {
3304 .event = event,
3305 .group = group,
3306 };
cdd6c482 3307 smp_call_function_single(event->oncpu,
0492d4c5 3308 __perf_event_read, &data, 1);
cdd6c482 3309 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
3310 struct perf_event_context *ctx = event->ctx;
3311 unsigned long flags;
3312
e625cce1 3313 raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9
SE
3314 /*
3315 * may read while context is not active
3316 * (e.g., thread is blocked), in that case
3317 * we cannot update context time
3318 */
e5d1367f 3319 if (ctx->is_active) {
c530ccd9 3320 update_context_time(ctx);
e5d1367f
SE
3321 update_cgrp_time_from_event(event);
3322 }
0492d4c5
PZ
3323 if (group)
3324 update_group_times(event);
3325 else
3326 update_event_times(event);
e625cce1 3327 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d 3328 }
0793a61d
TG
3329}
3330
a63eaf34 3331/*
cdd6c482 3332 * Initialize the perf_event context in a task_struct:
a63eaf34 3333 */
eb184479 3334static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 3335{
e625cce1 3336 raw_spin_lock_init(&ctx->lock);
a63eaf34 3337 mutex_init(&ctx->mutex);
2fde4f94 3338 INIT_LIST_HEAD(&ctx->active_ctx_list);
889ff015
FW
3339 INIT_LIST_HEAD(&ctx->pinned_groups);
3340 INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34
PM
3341 INIT_LIST_HEAD(&ctx->event_list);
3342 atomic_set(&ctx->refcount, 1);
fadfe7be 3343 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
eb184479
PZ
3344}
3345
3346static struct perf_event_context *
3347alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3348{
3349 struct perf_event_context *ctx;
3350
3351 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3352 if (!ctx)
3353 return NULL;
3354
3355 __perf_event_init_context(ctx);
3356 if (task) {
3357 ctx->task = task;
3358 get_task_struct(task);
0793a61d 3359 }
eb184479
PZ
3360 ctx->pmu = pmu;
3361
3362 return ctx;
a63eaf34
PM
3363}
3364
2ebd4ffb
MH
3365static struct task_struct *
3366find_lively_task_by_vpid(pid_t vpid)
3367{
3368 struct task_struct *task;
3369 int err;
0793a61d
TG
3370
3371 rcu_read_lock();
2ebd4ffb 3372 if (!vpid)
0793a61d
TG
3373 task = current;
3374 else
2ebd4ffb 3375 task = find_task_by_vpid(vpid);
0793a61d
TG
3376 if (task)
3377 get_task_struct(task);
3378 rcu_read_unlock();
3379
3380 if (!task)
3381 return ERR_PTR(-ESRCH);
3382
0793a61d 3383 /* Reuse ptrace permission checks for now. */
c93f7669
PM
3384 err = -EACCES;
3385 if (!ptrace_may_access(task, PTRACE_MODE_READ))
3386 goto errout;
3387
2ebd4ffb
MH
3388 return task;
3389errout:
3390 put_task_struct(task);
3391 return ERR_PTR(err);
3392
3393}
3394
fe4b04fa
PZ
3395/*
3396 * Returns a matching context with refcount and pincount.
3397 */
108b02cf 3398static struct perf_event_context *
4af57ef2
YZ
3399find_get_context(struct pmu *pmu, struct task_struct *task,
3400 struct perf_event *event)
0793a61d 3401{
211de6eb 3402 struct perf_event_context *ctx, *clone_ctx = NULL;
22a4f650 3403 struct perf_cpu_context *cpuctx;
4af57ef2 3404 void *task_ctx_data = NULL;
25346b93 3405 unsigned long flags;
8dc85d54 3406 int ctxn, err;
4af57ef2 3407 int cpu = event->cpu;
0793a61d 3408
22a4ec72 3409 if (!task) {
cdd6c482 3410 /* Must be root to operate on a CPU event: */
0764771d 3411 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
3412 return ERR_PTR(-EACCES);
3413
0793a61d 3414 /*
cdd6c482 3415 * We could be clever and allow to attach a event to an
0793a61d
TG
3416 * offline CPU and activate it when the CPU comes up, but
3417 * that's for later.
3418 */
f6325e30 3419 if (!cpu_online(cpu))
0793a61d
TG
3420 return ERR_PTR(-ENODEV);
3421
108b02cf 3422 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 3423 ctx = &cpuctx->ctx;
c93f7669 3424 get_ctx(ctx);
fe4b04fa 3425 ++ctx->pin_count;
0793a61d 3426
0793a61d
TG
3427 return ctx;
3428 }
3429
8dc85d54
PZ
3430 err = -EINVAL;
3431 ctxn = pmu->task_ctx_nr;
3432 if (ctxn < 0)
3433 goto errout;
3434
4af57ef2
YZ
3435 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3436 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3437 if (!task_ctx_data) {
3438 err = -ENOMEM;
3439 goto errout;
3440 }
3441 }
3442
9ed6060d 3443retry:
8dc85d54 3444 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 3445 if (ctx) {
211de6eb 3446 clone_ctx = unclone_ctx(ctx);
fe4b04fa 3447 ++ctx->pin_count;
4af57ef2
YZ
3448
3449 if (task_ctx_data && !ctx->task_ctx_data) {
3450 ctx->task_ctx_data = task_ctx_data;
3451 task_ctx_data = NULL;
3452 }
e625cce1 3453 raw_spin_unlock_irqrestore(&ctx->lock, flags);
211de6eb
PZ
3454
3455 if (clone_ctx)
3456 put_ctx(clone_ctx);
9137fb28 3457 } else {
eb184479 3458 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
3459 err = -ENOMEM;
3460 if (!ctx)
3461 goto errout;
eb184479 3462
4af57ef2
YZ
3463 if (task_ctx_data) {
3464 ctx->task_ctx_data = task_ctx_data;
3465 task_ctx_data = NULL;
3466 }
3467
dbe08d82
ON
3468 err = 0;
3469 mutex_lock(&task->perf_event_mutex);
3470 /*
3471 * If it has already passed perf_event_exit_task().
3472 * we must see PF_EXITING, it takes this mutex too.
3473 */
3474 if (task->flags & PF_EXITING)
3475 err = -ESRCH;
3476 else if (task->perf_event_ctxp[ctxn])
3477 err = -EAGAIN;
fe4b04fa 3478 else {
9137fb28 3479 get_ctx(ctx);
fe4b04fa 3480 ++ctx->pin_count;
dbe08d82 3481 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 3482 }
dbe08d82
ON
3483 mutex_unlock(&task->perf_event_mutex);
3484
3485 if (unlikely(err)) {
9137fb28 3486 put_ctx(ctx);
dbe08d82
ON
3487
3488 if (err == -EAGAIN)
3489 goto retry;
3490 goto errout;
a63eaf34
PM
3491 }
3492 }
3493
4af57ef2 3494 kfree(task_ctx_data);
0793a61d 3495 return ctx;
c93f7669 3496
9ed6060d 3497errout:
4af57ef2 3498 kfree(task_ctx_data);
c93f7669 3499 return ERR_PTR(err);
0793a61d
TG
3500}
3501
6fb2915d 3502static void perf_event_free_filter(struct perf_event *event);
2541517c 3503static void perf_event_free_bpf_prog(struct perf_event *event);
6fb2915d 3504
cdd6c482 3505static void free_event_rcu(struct rcu_head *head)
592903cd 3506{
cdd6c482 3507 struct perf_event *event;
592903cd 3508
cdd6c482
IM
3509 event = container_of(head, struct perf_event, rcu_head);
3510 if (event->ns)
3511 put_pid_ns(event->ns);
6fb2915d 3512 perf_event_free_filter(event);
cdd6c482 3513 kfree(event);
592903cd
PZ
3514}
3515
b69cf536
PZ
3516static void ring_buffer_attach(struct perf_event *event,
3517 struct ring_buffer *rb);
925d519a 3518
4beb31f3 3519static void unaccount_event_cpu(struct perf_event *event, int cpu)
f1600952 3520{
4beb31f3
FW
3521 if (event->parent)
3522 return;
3523
4beb31f3
FW
3524 if (is_cgroup_event(event))
3525 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3526}
925d519a 3527
4beb31f3
FW
3528static void unaccount_event(struct perf_event *event)
3529{
3530 if (event->parent)
3531 return;
3532
3533 if (event->attach_state & PERF_ATTACH_TASK)
3534 static_key_slow_dec_deferred(&perf_sched_events);
3535 if (event->attr.mmap || event->attr.mmap_data)
3536 atomic_dec(&nr_mmap_events);
3537 if (event->attr.comm)
3538 atomic_dec(&nr_comm_events);
3539 if (event->attr.task)
3540 atomic_dec(&nr_task_events);
948b26b6
FW
3541 if (event->attr.freq)
3542 atomic_dec(&nr_freq_events);
45ac1403
AH
3543 if (event->attr.context_switch) {
3544 static_key_slow_dec_deferred(&perf_sched_events);
3545 atomic_dec(&nr_switch_events);
3546 }
4beb31f3
FW
3547 if (is_cgroup_event(event))
3548 static_key_slow_dec_deferred(&perf_sched_events);
3549 if (has_branch_stack(event))
3550 static_key_slow_dec_deferred(&perf_sched_events);
3551
3552 unaccount_event_cpu(event, event->cpu);
3553}
925d519a 3554
bed5b25a
AS
3555/*
3556 * The following implement mutual exclusion of events on "exclusive" pmus
3557 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3558 * at a time, so we disallow creating events that might conflict, namely:
3559 *
3560 * 1) cpu-wide events in the presence of per-task events,
3561 * 2) per-task events in the presence of cpu-wide events,
3562 * 3) two matching events on the same context.
3563 *
3564 * The former two cases are handled in the allocation path (perf_event_alloc(),
3565 * __free_event()), the latter -- before the first perf_install_in_context().
3566 */
3567static int exclusive_event_init(struct perf_event *event)
3568{
3569 struct pmu *pmu = event->pmu;
3570
3571 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3572 return 0;
3573
3574 /*
3575 * Prevent co-existence of per-task and cpu-wide events on the
3576 * same exclusive pmu.
3577 *
3578 * Negative pmu::exclusive_cnt means there are cpu-wide
3579 * events on this "exclusive" pmu, positive means there are
3580 * per-task events.
3581 *
3582 * Since this is called in perf_event_alloc() path, event::ctx
3583 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
3584 * to mean "per-task event", because unlike other attach states it
3585 * never gets cleared.
3586 */
3587 if (event->attach_state & PERF_ATTACH_TASK) {
3588 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
3589 return -EBUSY;
3590 } else {
3591 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
3592 return -EBUSY;
3593 }
3594
3595 return 0;
3596}
3597
3598static void exclusive_event_destroy(struct perf_event *event)
3599{
3600 struct pmu *pmu = event->pmu;
3601
3602 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3603 return;
3604
3605 /* see comment in exclusive_event_init() */
3606 if (event->attach_state & PERF_ATTACH_TASK)
3607 atomic_dec(&pmu->exclusive_cnt);
3608 else
3609 atomic_inc(&pmu->exclusive_cnt);
3610}
3611
3612static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3613{
3614 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
3615 (e1->cpu == e2->cpu ||
3616 e1->cpu == -1 ||
3617 e2->cpu == -1))
3618 return true;
3619 return false;
3620}
3621
3622/* Called under the same ctx::mutex as perf_install_in_context() */
3623static bool exclusive_event_installable(struct perf_event *event,
3624 struct perf_event_context *ctx)
3625{
3626 struct perf_event *iter_event;
3627 struct pmu *pmu = event->pmu;
3628
3629 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3630 return true;
3631
3632 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
3633 if (exclusive_event_match(iter_event, event))
3634 return false;
3635 }
3636
3637 return true;
3638}
3639
766d6c07
FW
3640static void __free_event(struct perf_event *event)
3641{
cdd6c482 3642 if (!event->parent) {
927c7a9e
FW
3643 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3644 put_callchain_buffers();
f344011c 3645 }
9ee318a7 3646
dead9f29
AS
3647 perf_event_free_bpf_prog(event);
3648
766d6c07
FW
3649 if (event->destroy)
3650 event->destroy(event);
3651
3652 if (event->ctx)
3653 put_ctx(event->ctx);
3654
bed5b25a
AS
3655 if (event->pmu) {
3656 exclusive_event_destroy(event);
c464c76e 3657 module_put(event->pmu->module);
bed5b25a 3658 }
c464c76e 3659
766d6c07
FW
3660 call_rcu(&event->rcu_head, free_event_rcu);
3661}
683ede43
PZ
3662
3663static void _free_event(struct perf_event *event)
f1600952 3664{
e360adbe 3665 irq_work_sync(&event->pending);
925d519a 3666
4beb31f3 3667 unaccount_event(event);
9ee318a7 3668
76369139 3669 if (event->rb) {
9bb5d40c
PZ
3670 /*
3671 * Can happen when we close an event with re-directed output.
3672 *
3673 * Since we have a 0 refcount, perf_mmap_close() will skip
3674 * over us; possibly making our ring_buffer_put() the last.
3675 */
3676 mutex_lock(&event->mmap_mutex);
b69cf536 3677 ring_buffer_attach(event, NULL);
9bb5d40c 3678 mutex_unlock(&event->mmap_mutex);
a4be7c27
PZ
3679 }
3680
e5d1367f
SE
3681 if (is_cgroup_event(event))
3682 perf_detach_cgroup(event);
3683
766d6c07 3684 __free_event(event);
f1600952
PZ
3685}
3686
683ede43
PZ
3687/*
3688 * Used to free events which have a known refcount of 1, such as in error paths
3689 * where the event isn't exposed yet and inherited events.
3690 */
3691static void free_event(struct perf_event *event)
0793a61d 3692{
683ede43
PZ
3693 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
3694 "unexpected event refcount: %ld; ptr=%p\n",
3695 atomic_long_read(&event->refcount), event)) {
3696 /* leak to avoid use-after-free */
3697 return;
3698 }
0793a61d 3699
683ede43 3700 _free_event(event);
0793a61d
TG
3701}
3702
a66a3052 3703/*
f8697762 3704 * Remove user event from the owner task.
a66a3052 3705 */
f8697762 3706static void perf_remove_from_owner(struct perf_event *event)
fb0459d7 3707{
8882135b 3708 struct task_struct *owner;
fb0459d7 3709
8882135b
PZ
3710 rcu_read_lock();
3711 owner = ACCESS_ONCE(event->owner);
3712 /*
3713 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
3714 * !owner it means the list deletion is complete and we can indeed
3715 * free this event, otherwise we need to serialize on
3716 * owner->perf_event_mutex.
3717 */
3718 smp_read_barrier_depends();
3719 if (owner) {
3720 /*
3721 * Since delayed_put_task_struct() also drops the last
3722 * task reference we can safely take a new reference
3723 * while holding the rcu_read_lock().
3724 */
3725 get_task_struct(owner);
3726 }
3727 rcu_read_unlock();
3728
3729 if (owner) {
f63a8daa
PZ
3730 /*
3731 * If we're here through perf_event_exit_task() we're already
3732 * holding ctx->mutex which would be an inversion wrt. the
3733 * normal lock order.
3734 *
3735 * However we can safely take this lock because its the child
3736 * ctx->mutex.
3737 */
3738 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
3739
8882135b
PZ
3740 /*
3741 * We have to re-check the event->owner field, if it is cleared
3742 * we raced with perf_event_exit_task(), acquiring the mutex
3743 * ensured they're done, and we can proceed with freeing the
3744 * event.
3745 */
3746 if (event->owner)
3747 list_del_init(&event->owner_entry);
3748 mutex_unlock(&owner->perf_event_mutex);
3749 put_task_struct(owner);
3750 }
f8697762
JO
3751}
3752
f8697762
JO
3753static void put_event(struct perf_event *event)
3754{
a83fe28e 3755 struct perf_event_context *ctx;
f8697762
JO
3756
3757 if (!atomic_long_dec_and_test(&event->refcount))
3758 return;
3759
3760 if (!is_kernel_event(event))
3761 perf_remove_from_owner(event);
8882135b 3762
683ede43
PZ
3763 /*
3764 * There are two ways this annotation is useful:
3765 *
3766 * 1) there is a lock recursion from perf_event_exit_task
3767 * see the comment there.
3768 *
3769 * 2) there is a lock-inversion with mmap_sem through
b15f495b 3770 * perf_read_group(), which takes faults while
683ede43
PZ
3771 * holding ctx->mutex, however this is called after
3772 * the last filedesc died, so there is no possibility
3773 * to trigger the AB-BA case.
3774 */
a83fe28e
PZ
3775 ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
3776 WARN_ON_ONCE(ctx->parent_ctx);
683ede43 3777 perf_remove_from_context(event, true);
d415a7f1 3778 perf_event_ctx_unlock(event, ctx);
683ede43
PZ
3779
3780 _free_event(event);
a6fa941d
AV
3781}
3782
683ede43
PZ
3783int perf_event_release_kernel(struct perf_event *event)
3784{
3785 put_event(event);
3786 return 0;
3787}
3788EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3789
8b10c5e2
PZ
3790/*
3791 * Called when the last reference to the file is gone.
3792 */
a6fa941d
AV
3793static int perf_release(struct inode *inode, struct file *file)
3794{
3795 put_event(file->private_data);
3796 return 0;
fb0459d7 3797}
fb0459d7 3798
fadfe7be
JO
3799/*
3800 * Remove all orphanes events from the context.
3801 */
3802static void orphans_remove_work(struct work_struct *work)
3803{
3804 struct perf_event_context *ctx;
3805 struct perf_event *event, *tmp;
3806
3807 ctx = container_of(work, struct perf_event_context,
3808 orphans_remove.work);
3809
3810 mutex_lock(&ctx->mutex);
3811 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
3812 struct perf_event *parent_event = event->parent;
3813
3814 if (!is_orphaned_child(event))
3815 continue;
3816
3817 perf_remove_from_context(event, true);
3818
3819 mutex_lock(&parent_event->child_mutex);
3820 list_del_init(&event->child_list);
3821 mutex_unlock(&parent_event->child_mutex);
3822
3823 free_event(event);
3824 put_event(parent_event);
3825 }
3826
3827 raw_spin_lock_irq(&ctx->lock);
3828 ctx->orphans_remove_sched = false;
3829 raw_spin_unlock_irq(&ctx->lock);
3830 mutex_unlock(&ctx->mutex);
3831
3832 put_ctx(ctx);
3833}
3834
59ed446f 3835u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 3836{
cdd6c482 3837 struct perf_event *child;
e53c0994
PZ
3838 u64 total = 0;
3839
59ed446f
PZ
3840 *enabled = 0;
3841 *running = 0;
3842
6f10581a 3843 mutex_lock(&event->child_mutex);
01add3ea 3844
0492d4c5 3845 perf_event_read(event, false);
01add3ea
SB
3846 total += perf_event_count(event);
3847
59ed446f
PZ
3848 *enabled += event->total_time_enabled +
3849 atomic64_read(&event->child_total_time_enabled);
3850 *running += event->total_time_running +
3851 atomic64_read(&event->child_total_time_running);
3852
3853 list_for_each_entry(child, &event->child_list, child_list) {
0492d4c5 3854 perf_event_read(child, false);
01add3ea 3855 total += perf_event_count(child);
59ed446f
PZ
3856 *enabled += child->total_time_enabled;
3857 *running += child->total_time_running;
3858 }
6f10581a 3859 mutex_unlock(&event->child_mutex);
e53c0994
PZ
3860
3861 return total;
3862}
fb0459d7 3863EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 3864
b15f495b 3865static int perf_read_group(struct perf_event *event,
3dab77fb
PZ
3866 u64 read_format, char __user *buf)
3867{
cdd6c482 3868 struct perf_event *leader = event->group_leader, *sub;
6f10581a 3869 struct perf_event_context *ctx = leader->ctx;
f63a8daa 3870 int n = 0, size = 0, ret;
59ed446f 3871 u64 count, enabled, running;
f63a8daa
PZ
3872 u64 values[5];
3873
3874 lockdep_assert_held(&ctx->mutex);
abf4868b 3875
59ed446f 3876 count = perf_event_read_value(leader, &enabled, &running);
3dab77fb
PZ
3877
3878 values[n++] = 1 + leader->nr_siblings;
59ed446f
PZ
3879 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3880 values[n++] = enabled;
3881 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3882 values[n++] = running;
abf4868b
PZ
3883 values[n++] = count;
3884 if (read_format & PERF_FORMAT_ID)
3885 values[n++] = primary_event_id(leader);
3dab77fb
PZ
3886
3887 size = n * sizeof(u64);
3888
3889 if (copy_to_user(buf, values, size))
f63a8daa 3890 return -EFAULT;
3dab77fb 3891
6f10581a 3892 ret = size;
3dab77fb 3893
65abc865 3894 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
abf4868b 3895 n = 0;
3dab77fb 3896
59ed446f 3897 values[n++] = perf_event_read_value(sub, &enabled, &running);
abf4868b
PZ
3898 if (read_format & PERF_FORMAT_ID)
3899 values[n++] = primary_event_id(sub);
3900
3901 size = n * sizeof(u64);
3902
184d3da8 3903 if (copy_to_user(buf + ret, values, size)) {
f63a8daa 3904 return -EFAULT;
6f10581a 3905 }
abf4868b
PZ
3906
3907 ret += size;
3dab77fb
PZ
3908 }
3909
abf4868b 3910 return ret;
3dab77fb
PZ
3911}
3912
b15f495b 3913static int perf_read_one(struct perf_event *event,
3dab77fb
PZ
3914 u64 read_format, char __user *buf)
3915{
59ed446f 3916 u64 enabled, running;
3dab77fb
PZ
3917 u64 values[4];
3918 int n = 0;
3919
59ed446f
PZ
3920 values[n++] = perf_event_read_value(event, &enabled, &running);
3921 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3922 values[n++] = enabled;
3923 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3924 values[n++] = running;
3dab77fb 3925 if (read_format & PERF_FORMAT_ID)
cdd6c482 3926 values[n++] = primary_event_id(event);
3dab77fb
PZ
3927
3928 if (copy_to_user(buf, values, n * sizeof(u64)))
3929 return -EFAULT;
3930
3931 return n * sizeof(u64);
3932}
3933
dc633982
JO
3934static bool is_event_hup(struct perf_event *event)
3935{
3936 bool no_children;
3937
3938 if (event->state != PERF_EVENT_STATE_EXIT)
3939 return false;
3940
3941 mutex_lock(&event->child_mutex);
3942 no_children = list_empty(&event->child_list);
3943 mutex_unlock(&event->child_mutex);
3944 return no_children;
3945}
3946
0793a61d 3947/*
cdd6c482 3948 * Read the performance event - simple non blocking version for now
0793a61d
TG
3949 */
3950static ssize_t
b15f495b 3951__perf_read(struct perf_event *event, char __user *buf, size_t count)
0793a61d 3952{
cdd6c482 3953 u64 read_format = event->attr.read_format;
3dab77fb 3954 int ret;
0793a61d 3955
3b6f9e5c 3956 /*
cdd6c482 3957 * Return end-of-file for a read on a event that is in
3b6f9e5c
PM
3958 * error state (i.e. because it was pinned but it couldn't be
3959 * scheduled on to the CPU at some point).
3960 */
cdd6c482 3961 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
3962 return 0;
3963
c320c7b7 3964 if (count < event->read_size)
3dab77fb
PZ
3965 return -ENOSPC;
3966
cdd6c482 3967 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 3968 if (read_format & PERF_FORMAT_GROUP)
b15f495b 3969 ret = perf_read_group(event, read_format, buf);
3dab77fb 3970 else
b15f495b 3971 ret = perf_read_one(event, read_format, buf);
0793a61d 3972
3dab77fb 3973 return ret;
0793a61d
TG
3974}
3975
0793a61d
TG
3976static ssize_t
3977perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3978{
cdd6c482 3979 struct perf_event *event = file->private_data;
f63a8daa
PZ
3980 struct perf_event_context *ctx;
3981 int ret;
0793a61d 3982
f63a8daa 3983 ctx = perf_event_ctx_lock(event);
b15f495b 3984 ret = __perf_read(event, buf, count);
f63a8daa
PZ
3985 perf_event_ctx_unlock(event, ctx);
3986
3987 return ret;
0793a61d
TG
3988}
3989
3990static unsigned int perf_poll(struct file *file, poll_table *wait)
3991{
cdd6c482 3992 struct perf_event *event = file->private_data;
76369139 3993 struct ring_buffer *rb;
61b67684 3994 unsigned int events = POLLHUP;
c7138f37 3995
e708d7ad 3996 poll_wait(file, &event->waitq, wait);
179033b3 3997
dc633982 3998 if (is_event_hup(event))
179033b3 3999 return events;
c7138f37 4000
10c6db11 4001 /*
9bb5d40c
PZ
4002 * Pin the event->rb by taking event->mmap_mutex; otherwise
4003 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
10c6db11
PZ
4004 */
4005 mutex_lock(&event->mmap_mutex);
9bb5d40c
PZ
4006 rb = event->rb;
4007 if (rb)
76369139 4008 events = atomic_xchg(&rb->poll, 0);
10c6db11 4009 mutex_unlock(&event->mmap_mutex);
0793a61d
TG
4010 return events;
4011}
4012
f63a8daa 4013static void _perf_event_reset(struct perf_event *event)
6de6a7b9 4014{
0492d4c5 4015 perf_event_read(event, false);
e7850595 4016 local64_set(&event->count, 0);
cdd6c482 4017 perf_event_update_userpage(event);
3df5edad
PZ
4018}
4019
c93f7669 4020/*
cdd6c482
IM
4021 * Holding the top-level event's child_mutex means that any
4022 * descendant process that has inherited this event will block
4023 * in sync_child_event if it goes to exit, thus satisfying the
4024 * task existence requirements of perf_event_enable/disable.
c93f7669 4025 */
cdd6c482
IM
4026static void perf_event_for_each_child(struct perf_event *event,
4027 void (*func)(struct perf_event *))
3df5edad 4028{
cdd6c482 4029 struct perf_event *child;
3df5edad 4030
cdd6c482 4031 WARN_ON_ONCE(event->ctx->parent_ctx);
f63a8daa 4032
cdd6c482
IM
4033 mutex_lock(&event->child_mutex);
4034 func(event);
4035 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 4036 func(child);
cdd6c482 4037 mutex_unlock(&event->child_mutex);
3df5edad
PZ
4038}
4039
cdd6c482
IM
4040static void perf_event_for_each(struct perf_event *event,
4041 void (*func)(struct perf_event *))
3df5edad 4042{
cdd6c482
IM
4043 struct perf_event_context *ctx = event->ctx;
4044 struct perf_event *sibling;
3df5edad 4045
f63a8daa
PZ
4046 lockdep_assert_held(&ctx->mutex);
4047
cdd6c482 4048 event = event->group_leader;
75f937f2 4049
cdd6c482 4050 perf_event_for_each_child(event, func);
cdd6c482 4051 list_for_each_entry(sibling, &event->sibling_list, group_entry)
724b6daa 4052 perf_event_for_each_child(sibling, func);
6de6a7b9
PZ
4053}
4054
c7999c6f
PZ
4055struct period_event {
4056 struct perf_event *event;
08247e31 4057 u64 value;
c7999c6f 4058};
08247e31 4059
c7999c6f
PZ
4060static int __perf_event_period(void *info)
4061{
4062 struct period_event *pe = info;
4063 struct perf_event *event = pe->event;
4064 struct perf_event_context *ctx = event->ctx;
4065 u64 value = pe->value;
4066 bool active;
08247e31 4067
c7999c6f 4068 raw_spin_lock(&ctx->lock);
cdd6c482 4069 if (event->attr.freq) {
cdd6c482 4070 event->attr.sample_freq = value;
08247e31 4071 } else {
cdd6c482
IM
4072 event->attr.sample_period = value;
4073 event->hw.sample_period = value;
08247e31 4074 }
bad7192b
PZ
4075
4076 active = (event->state == PERF_EVENT_STATE_ACTIVE);
4077 if (active) {
4078 perf_pmu_disable(ctx->pmu);
4079 event->pmu->stop(event, PERF_EF_UPDATE);
4080 }
4081
4082 local64_set(&event->hw.period_left, 0);
4083
4084 if (active) {
4085 event->pmu->start(event, PERF_EF_RELOAD);
4086 perf_pmu_enable(ctx->pmu);
4087 }
c7999c6f 4088 raw_spin_unlock(&ctx->lock);
bad7192b 4089
c7999c6f
PZ
4090 return 0;
4091}
4092
4093static int perf_event_period(struct perf_event *event, u64 __user *arg)
4094{
4095 struct period_event pe = { .event = event, };
4096 struct perf_event_context *ctx = event->ctx;
4097 struct task_struct *task;
4098 u64 value;
4099
4100 if (!is_sampling_event(event))
4101 return -EINVAL;
4102
4103 if (copy_from_user(&value, arg, sizeof(value)))
4104 return -EFAULT;
4105
4106 if (!value)
4107 return -EINVAL;
4108
4109 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4110 return -EINVAL;
4111
4112 task = ctx->task;
4113 pe.value = value;
4114
4115 if (!task) {
4116 cpu_function_call(event->cpu, __perf_event_period, &pe);
4117 return 0;
4118 }
4119
4120retry:
4121 if (!task_function_call(task, __perf_event_period, &pe))
4122 return 0;
4123
4124 raw_spin_lock_irq(&ctx->lock);
4125 if (ctx->is_active) {
4126 raw_spin_unlock_irq(&ctx->lock);
4127 task = ctx->task;
4128 goto retry;
4129 }
4130
4131 __perf_event_period(&pe);
e625cce1 4132 raw_spin_unlock_irq(&ctx->lock);
08247e31 4133
c7999c6f 4134 return 0;
08247e31
PZ
4135}
4136
ac9721f3
PZ
4137static const struct file_operations perf_fops;
4138
2903ff01 4139static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 4140{
2903ff01
AV
4141 struct fd f = fdget(fd);
4142 if (!f.file)
4143 return -EBADF;
ac9721f3 4144
2903ff01
AV
4145 if (f.file->f_op != &perf_fops) {
4146 fdput(f);
4147 return -EBADF;
ac9721f3 4148 }
2903ff01
AV
4149 *p = f;
4150 return 0;
ac9721f3
PZ
4151}
4152
4153static int perf_event_set_output(struct perf_event *event,
4154 struct perf_event *output_event);
6fb2915d 4155static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2541517c 4156static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
a4be7c27 4157
f63a8daa 4158static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
d859e29f 4159{
cdd6c482 4160 void (*func)(struct perf_event *);
3df5edad 4161 u32 flags = arg;
d859e29f
PM
4162
4163 switch (cmd) {
cdd6c482 4164 case PERF_EVENT_IOC_ENABLE:
f63a8daa 4165 func = _perf_event_enable;
d859e29f 4166 break;
cdd6c482 4167 case PERF_EVENT_IOC_DISABLE:
f63a8daa 4168 func = _perf_event_disable;
79f14641 4169 break;
cdd6c482 4170 case PERF_EVENT_IOC_RESET:
f63a8daa 4171 func = _perf_event_reset;
6de6a7b9 4172 break;
3df5edad 4173
cdd6c482 4174 case PERF_EVENT_IOC_REFRESH:
f63a8daa 4175 return _perf_event_refresh(event, arg);
08247e31 4176
cdd6c482
IM
4177 case PERF_EVENT_IOC_PERIOD:
4178 return perf_event_period(event, (u64 __user *)arg);
08247e31 4179
cf4957f1
JO
4180 case PERF_EVENT_IOC_ID:
4181 {
4182 u64 id = primary_event_id(event);
4183
4184 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4185 return -EFAULT;
4186 return 0;
4187 }
4188
cdd6c482 4189 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 4190 {
ac9721f3 4191 int ret;
ac9721f3 4192 if (arg != -1) {
2903ff01
AV
4193 struct perf_event *output_event;
4194 struct fd output;
4195 ret = perf_fget_light(arg, &output);
4196 if (ret)
4197 return ret;
4198 output_event = output.file->private_data;
4199 ret = perf_event_set_output(event, output_event);
4200 fdput(output);
4201 } else {
4202 ret = perf_event_set_output(event, NULL);
ac9721f3 4203 }
ac9721f3
PZ
4204 return ret;
4205 }
a4be7c27 4206
6fb2915d
LZ
4207 case PERF_EVENT_IOC_SET_FILTER:
4208 return perf_event_set_filter(event, (void __user *)arg);
4209
2541517c
AS
4210 case PERF_EVENT_IOC_SET_BPF:
4211 return perf_event_set_bpf_prog(event, arg);
4212
d859e29f 4213 default:
3df5edad 4214 return -ENOTTY;
d859e29f 4215 }
3df5edad
PZ
4216
4217 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 4218 perf_event_for_each(event, func);
3df5edad 4219 else
cdd6c482 4220 perf_event_for_each_child(event, func);
3df5edad
PZ
4221
4222 return 0;
d859e29f
PM
4223}
4224
f63a8daa
PZ
4225static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4226{
4227 struct perf_event *event = file->private_data;
4228 struct perf_event_context *ctx;
4229 long ret;
4230
4231 ctx = perf_event_ctx_lock(event);
4232 ret = _perf_ioctl(event, cmd, arg);
4233 perf_event_ctx_unlock(event, ctx);
4234
4235 return ret;
4236}
4237
b3f20785
PM
4238#ifdef CONFIG_COMPAT
4239static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4240 unsigned long arg)
4241{
4242 switch (_IOC_NR(cmd)) {
4243 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4244 case _IOC_NR(PERF_EVENT_IOC_ID):
4245 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4246 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4247 cmd &= ~IOCSIZE_MASK;
4248 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4249 }
4250 break;
4251 }
4252 return perf_ioctl(file, cmd, arg);
4253}
4254#else
4255# define perf_compat_ioctl NULL
4256#endif
4257
cdd6c482 4258int perf_event_task_enable(void)
771d7cde 4259{
f63a8daa 4260 struct perf_event_context *ctx;
cdd6c482 4261 struct perf_event *event;
771d7cde 4262
cdd6c482 4263 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
4264 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4265 ctx = perf_event_ctx_lock(event);
4266 perf_event_for_each_child(event, _perf_event_enable);
4267 perf_event_ctx_unlock(event, ctx);
4268 }
cdd6c482 4269 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
4270
4271 return 0;
4272}
4273
cdd6c482 4274int perf_event_task_disable(void)
771d7cde 4275{
f63a8daa 4276 struct perf_event_context *ctx;
cdd6c482 4277 struct perf_event *event;
771d7cde 4278
cdd6c482 4279 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
4280 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4281 ctx = perf_event_ctx_lock(event);
4282 perf_event_for_each_child(event, _perf_event_disable);
4283 perf_event_ctx_unlock(event, ctx);
4284 }
cdd6c482 4285 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
4286
4287 return 0;
4288}
4289
cdd6c482 4290static int perf_event_index(struct perf_event *event)
194002b2 4291{
a4eaf7f1
PZ
4292 if (event->hw.state & PERF_HES_STOPPED)
4293 return 0;
4294
cdd6c482 4295 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
4296 return 0;
4297
35edc2a5 4298 return event->pmu->event_idx(event);
194002b2
PZ
4299}
4300
c4794295 4301static void calc_timer_values(struct perf_event *event,
e3f3541c 4302 u64 *now,
7f310a5d
EM
4303 u64 *enabled,
4304 u64 *running)
c4794295 4305{
e3f3541c 4306 u64 ctx_time;
c4794295 4307
e3f3541c
PZ
4308 *now = perf_clock();
4309 ctx_time = event->shadow_ctx_time + *now;
c4794295
EM
4310 *enabled = ctx_time - event->tstamp_enabled;
4311 *running = ctx_time - event->tstamp_running;
4312}
4313
fa731587
PZ
4314static void perf_event_init_userpage(struct perf_event *event)
4315{
4316 struct perf_event_mmap_page *userpg;
4317 struct ring_buffer *rb;
4318
4319 rcu_read_lock();
4320 rb = rcu_dereference(event->rb);
4321 if (!rb)
4322 goto unlock;
4323
4324 userpg = rb->user_page;
4325
4326 /* Allow new userspace to detect that bit 0 is deprecated */
4327 userpg->cap_bit0_is_deprecated = 1;
4328 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
e8c6deac
AS
4329 userpg->data_offset = PAGE_SIZE;
4330 userpg->data_size = perf_data_size(rb);
fa731587
PZ
4331
4332unlock:
4333 rcu_read_unlock();
4334}
4335
c1317ec2
AL
4336void __weak arch_perf_update_userpage(
4337 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
4338{
4339}
4340
38ff667b
PZ
4341/*
4342 * Callers need to ensure there can be no nesting of this function, otherwise
4343 * the seqlock logic goes bad. We can not serialize this because the arch
4344 * code calls this from NMI context.
4345 */
cdd6c482 4346void perf_event_update_userpage(struct perf_event *event)
37d81828 4347{
cdd6c482 4348 struct perf_event_mmap_page *userpg;
76369139 4349 struct ring_buffer *rb;
e3f3541c 4350 u64 enabled, running, now;
38ff667b
PZ
4351
4352 rcu_read_lock();
5ec4c599
PZ
4353 rb = rcu_dereference(event->rb);
4354 if (!rb)
4355 goto unlock;
4356
0d641208
EM
4357 /*
4358 * compute total_time_enabled, total_time_running
4359 * based on snapshot values taken when the event
4360 * was last scheduled in.
4361 *
4362 * we cannot simply called update_context_time()
4363 * because of locking issue as we can be called in
4364 * NMI context
4365 */
e3f3541c 4366 calc_timer_values(event, &now, &enabled, &running);
38ff667b 4367
76369139 4368 userpg = rb->user_page;
7b732a75
PZ
4369 /*
4370 * Disable preemption so as to not let the corresponding user-space
4371 * spin too long if we get preempted.
4372 */
4373 preempt_disable();
37d81828 4374 ++userpg->lock;
92f22a38 4375 barrier();
cdd6c482 4376 userpg->index = perf_event_index(event);
b5e58793 4377 userpg->offset = perf_event_count(event);
365a4038 4378 if (userpg->index)
e7850595 4379 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 4380
0d641208 4381 userpg->time_enabled = enabled +
cdd6c482 4382 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 4383
0d641208 4384 userpg->time_running = running +
cdd6c482 4385 atomic64_read(&event->child_total_time_running);
7f8b4e4e 4386
c1317ec2 4387 arch_perf_update_userpage(event, userpg, now);
e3f3541c 4388
92f22a38 4389 barrier();
37d81828 4390 ++userpg->lock;
7b732a75 4391 preempt_enable();
38ff667b 4392unlock:
7b732a75 4393 rcu_read_unlock();
37d81828
PM
4394}
4395
906010b2
PZ
4396static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4397{
4398 struct perf_event *event = vma->vm_file->private_data;
76369139 4399 struct ring_buffer *rb;
906010b2
PZ
4400 int ret = VM_FAULT_SIGBUS;
4401
4402 if (vmf->flags & FAULT_FLAG_MKWRITE) {
4403 if (vmf->pgoff == 0)
4404 ret = 0;
4405 return ret;
4406 }
4407
4408 rcu_read_lock();
76369139
FW
4409 rb = rcu_dereference(event->rb);
4410 if (!rb)
906010b2
PZ
4411 goto unlock;
4412
4413 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4414 goto unlock;
4415
76369139 4416 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
4417 if (!vmf->page)
4418 goto unlock;
4419
4420 get_page(vmf->page);
4421 vmf->page->mapping = vma->vm_file->f_mapping;
4422 vmf->page->index = vmf->pgoff;
4423
4424 ret = 0;
4425unlock:
4426 rcu_read_unlock();
4427
4428 return ret;
4429}
4430
10c6db11
PZ
4431static void ring_buffer_attach(struct perf_event *event,
4432 struct ring_buffer *rb)
4433{
b69cf536 4434 struct ring_buffer *old_rb = NULL;
10c6db11
PZ
4435 unsigned long flags;
4436
b69cf536
PZ
4437 if (event->rb) {
4438 /*
4439 * Should be impossible, we set this when removing
4440 * event->rb_entry and wait/clear when adding event->rb_entry.
4441 */
4442 WARN_ON_ONCE(event->rcu_pending);
10c6db11 4443
b69cf536 4444 old_rb = event->rb;
b69cf536
PZ
4445 spin_lock_irqsave(&old_rb->event_lock, flags);
4446 list_del_rcu(&event->rb_entry);
4447 spin_unlock_irqrestore(&old_rb->event_lock, flags);
10c6db11 4448
2f993cf0
ON
4449 event->rcu_batches = get_state_synchronize_rcu();
4450 event->rcu_pending = 1;
b69cf536 4451 }
10c6db11 4452
b69cf536 4453 if (rb) {
2f993cf0
ON
4454 if (event->rcu_pending) {
4455 cond_synchronize_rcu(event->rcu_batches);
4456 event->rcu_pending = 0;
4457 }
4458
b69cf536
PZ
4459 spin_lock_irqsave(&rb->event_lock, flags);
4460 list_add_rcu(&event->rb_entry, &rb->event_list);
4461 spin_unlock_irqrestore(&rb->event_lock, flags);
4462 }
4463
4464 rcu_assign_pointer(event->rb, rb);
4465
4466 if (old_rb) {
4467 ring_buffer_put(old_rb);
4468 /*
4469 * Since we detached before setting the new rb, so that we
4470 * could attach the new rb, we could have missed a wakeup.
4471 * Provide it now.
4472 */
4473 wake_up_all(&event->waitq);
4474 }
10c6db11
PZ
4475}
4476
4477static void ring_buffer_wakeup(struct perf_event *event)
4478{
4479 struct ring_buffer *rb;
4480
4481 rcu_read_lock();
4482 rb = rcu_dereference(event->rb);
9bb5d40c
PZ
4483 if (rb) {
4484 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
4485 wake_up_all(&event->waitq);
4486 }
10c6db11
PZ
4487 rcu_read_unlock();
4488}
4489
fdc26706 4490struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 4491{
76369139 4492 struct ring_buffer *rb;
7b732a75 4493
ac9721f3 4494 rcu_read_lock();
76369139
FW
4495 rb = rcu_dereference(event->rb);
4496 if (rb) {
4497 if (!atomic_inc_not_zero(&rb->refcount))
4498 rb = NULL;
ac9721f3
PZ
4499 }
4500 rcu_read_unlock();
4501
76369139 4502 return rb;
ac9721f3
PZ
4503}
4504
fdc26706 4505void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 4506{
76369139 4507 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 4508 return;
7b732a75 4509
9bb5d40c 4510 WARN_ON_ONCE(!list_empty(&rb->event_list));
10c6db11 4511
76369139 4512 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
4513}
4514
4515static void perf_mmap_open(struct vm_area_struct *vma)
4516{
cdd6c482 4517 struct perf_event *event = vma->vm_file->private_data;
7b732a75 4518
cdd6c482 4519 atomic_inc(&event->mmap_count);
9bb5d40c 4520 atomic_inc(&event->rb->mmap_count);
1e0fb9ec 4521
45bfb2e5
PZ
4522 if (vma->vm_pgoff)
4523 atomic_inc(&event->rb->aux_mmap_count);
4524
1e0fb9ec
AL
4525 if (event->pmu->event_mapped)
4526 event->pmu->event_mapped(event);
7b732a75
PZ
4527}
4528
9bb5d40c
PZ
4529/*
4530 * A buffer can be mmap()ed multiple times; either directly through the same
4531 * event, or through other events by use of perf_event_set_output().
4532 *
4533 * In order to undo the VM accounting done by perf_mmap() we need to destroy
4534 * the buffer here, where we still have a VM context. This means we need
4535 * to detach all events redirecting to us.
4536 */
7b732a75
PZ
4537static void perf_mmap_close(struct vm_area_struct *vma)
4538{
cdd6c482 4539 struct perf_event *event = vma->vm_file->private_data;
7b732a75 4540
b69cf536 4541 struct ring_buffer *rb = ring_buffer_get(event);
9bb5d40c
PZ
4542 struct user_struct *mmap_user = rb->mmap_user;
4543 int mmap_locked = rb->mmap_locked;
4544 unsigned long size = perf_data_size(rb);
789f90fc 4545
1e0fb9ec
AL
4546 if (event->pmu->event_unmapped)
4547 event->pmu->event_unmapped(event);
4548
45bfb2e5
PZ
4549 /*
4550 * rb->aux_mmap_count will always drop before rb->mmap_count and
4551 * event->mmap_count, so it is ok to use event->mmap_mutex to
4552 * serialize with perf_mmap here.
4553 */
4554 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
4555 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
4556 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
4557 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
4558
4559 rb_free_aux(rb);
4560 mutex_unlock(&event->mmap_mutex);
4561 }
4562
9bb5d40c
PZ
4563 atomic_dec(&rb->mmap_count);
4564
4565 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
b69cf536 4566 goto out_put;
9bb5d40c 4567
b69cf536 4568 ring_buffer_attach(event, NULL);
9bb5d40c
PZ
4569 mutex_unlock(&event->mmap_mutex);
4570
4571 /* If there's still other mmap()s of this buffer, we're done. */
b69cf536
PZ
4572 if (atomic_read(&rb->mmap_count))
4573 goto out_put;
ac9721f3 4574
9bb5d40c
PZ
4575 /*
4576 * No other mmap()s, detach from all other events that might redirect
4577 * into the now unreachable buffer. Somewhat complicated by the
4578 * fact that rb::event_lock otherwise nests inside mmap_mutex.
4579 */
4580again:
4581 rcu_read_lock();
4582 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
4583 if (!atomic_long_inc_not_zero(&event->refcount)) {
4584 /*
4585 * This event is en-route to free_event() which will
4586 * detach it and remove it from the list.
4587 */
4588 continue;
4589 }
4590 rcu_read_unlock();
789f90fc 4591
9bb5d40c
PZ
4592 mutex_lock(&event->mmap_mutex);
4593 /*
4594 * Check we didn't race with perf_event_set_output() which can
4595 * swizzle the rb from under us while we were waiting to
4596 * acquire mmap_mutex.
4597 *
4598 * If we find a different rb; ignore this event, a next
4599 * iteration will no longer find it on the list. We have to
4600 * still restart the iteration to make sure we're not now
4601 * iterating the wrong list.
4602 */
b69cf536
PZ
4603 if (event->rb == rb)
4604 ring_buffer_attach(event, NULL);
4605
cdd6c482 4606 mutex_unlock(&event->mmap_mutex);
9bb5d40c 4607 put_event(event);
ac9721f3 4608
9bb5d40c
PZ
4609 /*
4610 * Restart the iteration; either we're on the wrong list or
4611 * destroyed its integrity by doing a deletion.
4612 */
4613 goto again;
7b732a75 4614 }
9bb5d40c
PZ
4615 rcu_read_unlock();
4616
4617 /*
4618 * It could be there's still a few 0-ref events on the list; they'll
4619 * get cleaned up by free_event() -- they'll also still have their
4620 * ref on the rb and will free it whenever they are done with it.
4621 *
4622 * Aside from that, this buffer is 'fully' detached and unmapped,
4623 * undo the VM accounting.
4624 */
4625
4626 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
4627 vma->vm_mm->pinned_vm -= mmap_locked;
4628 free_uid(mmap_user);
4629
b69cf536 4630out_put:
9bb5d40c 4631 ring_buffer_put(rb); /* could be last */
37d81828
PM
4632}
4633
f0f37e2f 4634static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8 4635 .open = perf_mmap_open,
45bfb2e5 4636 .close = perf_mmap_close, /* non mergable */
43a21ea8
PZ
4637 .fault = perf_mmap_fault,
4638 .page_mkwrite = perf_mmap_fault,
37d81828
PM
4639};
4640
4641static int perf_mmap(struct file *file, struct vm_area_struct *vma)
4642{
cdd6c482 4643 struct perf_event *event = file->private_data;
22a4f650 4644 unsigned long user_locked, user_lock_limit;
789f90fc 4645 struct user_struct *user = current_user();
22a4f650 4646 unsigned long locked, lock_limit;
45bfb2e5 4647 struct ring_buffer *rb = NULL;
7b732a75
PZ
4648 unsigned long vma_size;
4649 unsigned long nr_pages;
45bfb2e5 4650 long user_extra = 0, extra = 0;
d57e34fd 4651 int ret = 0, flags = 0;
37d81828 4652
c7920614
PZ
4653 /*
4654 * Don't allow mmap() of inherited per-task counters. This would
4655 * create a performance issue due to all children writing to the
76369139 4656 * same rb.
c7920614
PZ
4657 */
4658 if (event->cpu == -1 && event->attr.inherit)
4659 return -EINVAL;
4660
43a21ea8 4661 if (!(vma->vm_flags & VM_SHARED))
37d81828 4662 return -EINVAL;
7b732a75
PZ
4663
4664 vma_size = vma->vm_end - vma->vm_start;
45bfb2e5
PZ
4665
4666 if (vma->vm_pgoff == 0) {
4667 nr_pages = (vma_size / PAGE_SIZE) - 1;
4668 } else {
4669 /*
4670 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
4671 * mapped, all subsequent mappings should have the same size
4672 * and offset. Must be above the normal perf buffer.
4673 */
4674 u64 aux_offset, aux_size;
4675
4676 if (!event->rb)
4677 return -EINVAL;
4678
4679 nr_pages = vma_size / PAGE_SIZE;
4680
4681 mutex_lock(&event->mmap_mutex);
4682 ret = -EINVAL;
4683
4684 rb = event->rb;
4685 if (!rb)
4686 goto aux_unlock;
4687
4688 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
4689 aux_size = ACCESS_ONCE(rb->user_page->aux_size);
4690
4691 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
4692 goto aux_unlock;
4693
4694 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
4695 goto aux_unlock;
4696
4697 /* already mapped with a different offset */
4698 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
4699 goto aux_unlock;
4700
4701 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
4702 goto aux_unlock;
4703
4704 /* already mapped with a different size */
4705 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
4706 goto aux_unlock;
4707
4708 if (!is_power_of_2(nr_pages))
4709 goto aux_unlock;
4710
4711 if (!atomic_inc_not_zero(&rb->mmap_count))
4712 goto aux_unlock;
4713
4714 if (rb_has_aux(rb)) {
4715 atomic_inc(&rb->aux_mmap_count);
4716 ret = 0;
4717 goto unlock;
4718 }
4719
4720 atomic_set(&rb->aux_mmap_count, 1);
4721 user_extra = nr_pages;
4722
4723 goto accounting;
4724 }
7b732a75 4725
7730d865 4726 /*
76369139 4727 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
4728 * can do bitmasks instead of modulo.
4729 */
2ed11312 4730 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
4731 return -EINVAL;
4732
7b732a75 4733 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
4734 return -EINVAL;
4735
cdd6c482 4736 WARN_ON_ONCE(event->ctx->parent_ctx);
9bb5d40c 4737again:
cdd6c482 4738 mutex_lock(&event->mmap_mutex);
76369139 4739 if (event->rb) {
9bb5d40c 4740 if (event->rb->nr_pages != nr_pages) {
ebb3c4c4 4741 ret = -EINVAL;
9bb5d40c
PZ
4742 goto unlock;
4743 }
4744
4745 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
4746 /*
4747 * Raced against perf_mmap_close() through
4748 * perf_event_set_output(). Try again, hope for better
4749 * luck.
4750 */
4751 mutex_unlock(&event->mmap_mutex);
4752 goto again;
4753 }
4754
ebb3c4c4
PZ
4755 goto unlock;
4756 }
4757
789f90fc 4758 user_extra = nr_pages + 1;
45bfb2e5
PZ
4759
4760accounting:
cdd6c482 4761 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
4762
4763 /*
4764 * Increase the limit linearly with more CPUs:
4765 */
4766 user_lock_limit *= num_online_cpus();
4767
789f90fc 4768 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 4769
789f90fc
PZ
4770 if (user_locked > user_lock_limit)
4771 extra = user_locked - user_lock_limit;
7b732a75 4772
78d7d407 4773 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 4774 lock_limit >>= PAGE_SHIFT;
bc3e53f6 4775 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 4776
459ec28a
IM
4777 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
4778 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
4779 ret = -EPERM;
4780 goto unlock;
4781 }
7b732a75 4782
45bfb2e5 4783 WARN_ON(!rb && event->rb);
906010b2 4784
d57e34fd 4785 if (vma->vm_flags & VM_WRITE)
76369139 4786 flags |= RING_BUFFER_WRITABLE;
d57e34fd 4787
76369139 4788 if (!rb) {
45bfb2e5
PZ
4789 rb = rb_alloc(nr_pages,
4790 event->attr.watermark ? event->attr.wakeup_watermark : 0,
4791 event->cpu, flags);
26cb63ad 4792
45bfb2e5
PZ
4793 if (!rb) {
4794 ret = -ENOMEM;
4795 goto unlock;
4796 }
43a21ea8 4797
45bfb2e5
PZ
4798 atomic_set(&rb->mmap_count, 1);
4799 rb->mmap_user = get_current_user();
4800 rb->mmap_locked = extra;
26cb63ad 4801
45bfb2e5 4802 ring_buffer_attach(event, rb);
ac9721f3 4803
45bfb2e5
PZ
4804 perf_event_init_userpage(event);
4805 perf_event_update_userpage(event);
4806 } else {
1a594131
AS
4807 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
4808 event->attr.aux_watermark, flags);
45bfb2e5
PZ
4809 if (!ret)
4810 rb->aux_mmap_locked = extra;
4811 }
9a0f05cb 4812
ebb3c4c4 4813unlock:
45bfb2e5
PZ
4814 if (!ret) {
4815 atomic_long_add(user_extra, &user->locked_vm);
4816 vma->vm_mm->pinned_vm += extra;
4817
ac9721f3 4818 atomic_inc(&event->mmap_count);
45bfb2e5
PZ
4819 } else if (rb) {
4820 atomic_dec(&rb->mmap_count);
4821 }
4822aux_unlock:
cdd6c482 4823 mutex_unlock(&event->mmap_mutex);
37d81828 4824
9bb5d40c
PZ
4825 /*
4826 * Since pinned accounting is per vm we cannot allow fork() to copy our
4827 * vma.
4828 */
26cb63ad 4829 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
37d81828 4830 vma->vm_ops = &perf_mmap_vmops;
7b732a75 4831
1e0fb9ec
AL
4832 if (event->pmu->event_mapped)
4833 event->pmu->event_mapped(event);
4834
7b732a75 4835 return ret;
37d81828
PM
4836}
4837
3c446b3d
PZ
4838static int perf_fasync(int fd, struct file *filp, int on)
4839{
496ad9aa 4840 struct inode *inode = file_inode(filp);
cdd6c482 4841 struct perf_event *event = filp->private_data;
3c446b3d
PZ
4842 int retval;
4843
4844 mutex_lock(&inode->i_mutex);
cdd6c482 4845 retval = fasync_helper(fd, filp, on, &event->fasync);
3c446b3d
PZ
4846 mutex_unlock(&inode->i_mutex);
4847
4848 if (retval < 0)
4849 return retval;
4850
4851 return 0;
4852}
4853
0793a61d 4854static const struct file_operations perf_fops = {
3326c1ce 4855 .llseek = no_llseek,
0793a61d
TG
4856 .release = perf_release,
4857 .read = perf_read,
4858 .poll = perf_poll,
d859e29f 4859 .unlocked_ioctl = perf_ioctl,
b3f20785 4860 .compat_ioctl = perf_compat_ioctl,
37d81828 4861 .mmap = perf_mmap,
3c446b3d 4862 .fasync = perf_fasync,
0793a61d
TG
4863};
4864
925d519a 4865/*
cdd6c482 4866 * Perf event wakeup
925d519a
PZ
4867 *
4868 * If there's data, ensure we set the poll() state and publish everything
4869 * to user-space before waking everybody up.
4870 */
4871
fed66e2c
PZ
4872static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
4873{
4874 /* only the parent has fasync state */
4875 if (event->parent)
4876 event = event->parent;
4877 return &event->fasync;
4878}
4879
cdd6c482 4880void perf_event_wakeup(struct perf_event *event)
925d519a 4881{
10c6db11 4882 ring_buffer_wakeup(event);
4c9e2542 4883
cdd6c482 4884 if (event->pending_kill) {
fed66e2c 4885 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
cdd6c482 4886 event->pending_kill = 0;
4c9e2542 4887 }
925d519a
PZ
4888}
4889
e360adbe 4890static void perf_pending_event(struct irq_work *entry)
79f14641 4891{
cdd6c482
IM
4892 struct perf_event *event = container_of(entry,
4893 struct perf_event, pending);
d525211f
PZ
4894 int rctx;
4895
4896 rctx = perf_swevent_get_recursion_context();
4897 /*
4898 * If we 'fail' here, that's OK, it means recursion is already disabled
4899 * and we won't recurse 'further'.
4900 */
79f14641 4901
cdd6c482
IM
4902 if (event->pending_disable) {
4903 event->pending_disable = 0;
4904 __perf_event_disable(event);
79f14641
PZ
4905 }
4906
cdd6c482
IM
4907 if (event->pending_wakeup) {
4908 event->pending_wakeup = 0;
4909 perf_event_wakeup(event);
79f14641 4910 }
d525211f
PZ
4911
4912 if (rctx >= 0)
4913 perf_swevent_put_recursion_context(rctx);
79f14641
PZ
4914}
4915
39447b38
ZY
4916/*
4917 * We assume there is only KVM supporting the callbacks.
4918 * Later on, we might change it to a list if there is
4919 * another virtualization implementation supporting the callbacks.
4920 */
4921struct perf_guest_info_callbacks *perf_guest_cbs;
4922
4923int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4924{
4925 perf_guest_cbs = cbs;
4926 return 0;
4927}
4928EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
4929
4930int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4931{
4932 perf_guest_cbs = NULL;
4933 return 0;
4934}
4935EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
4936
4018994f
JO
4937static void
4938perf_output_sample_regs(struct perf_output_handle *handle,
4939 struct pt_regs *regs, u64 mask)
4940{
4941 int bit;
4942
4943 for_each_set_bit(bit, (const unsigned long *) &mask,
4944 sizeof(mask) * BITS_PER_BYTE) {
4945 u64 val;
4946
4947 val = perf_reg_value(regs, bit);
4948 perf_output_put(handle, val);
4949 }
4950}
4951
60e2364e 4952static void perf_sample_regs_user(struct perf_regs *regs_user,
88a7c26a
AL
4953 struct pt_regs *regs,
4954 struct pt_regs *regs_user_copy)
4018994f 4955{
88a7c26a
AL
4956 if (user_mode(regs)) {
4957 regs_user->abi = perf_reg_abi(current);
2565711f 4958 regs_user->regs = regs;
88a7c26a
AL
4959 } else if (current->mm) {
4960 perf_get_regs_user(regs_user, regs, regs_user_copy);
2565711f
PZ
4961 } else {
4962 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
4963 regs_user->regs = NULL;
4018994f
JO
4964 }
4965}
4966
60e2364e
SE
4967static void perf_sample_regs_intr(struct perf_regs *regs_intr,
4968 struct pt_regs *regs)
4969{
4970 regs_intr->regs = regs;
4971 regs_intr->abi = perf_reg_abi(current);
4972}
4973
4974
c5ebcedb
JO
4975/*
4976 * Get remaining task size from user stack pointer.
4977 *
4978 * It'd be better to take stack vma map and limit this more
4979 * precisly, but there's no way to get it safely under interrupt,
4980 * so using TASK_SIZE as limit.
4981 */
4982static u64 perf_ustack_task_size(struct pt_regs *regs)
4983{
4984 unsigned long addr = perf_user_stack_pointer(regs);
4985
4986 if (!addr || addr >= TASK_SIZE)
4987 return 0;
4988
4989 return TASK_SIZE - addr;
4990}
4991
4992static u16
4993perf_sample_ustack_size(u16 stack_size, u16 header_size,
4994 struct pt_regs *regs)
4995{
4996 u64 task_size;
4997
4998 /* No regs, no stack pointer, no dump. */
4999 if (!regs)
5000 return 0;
5001
5002 /*
5003 * Check if we fit in with the requested stack size into the:
5004 * - TASK_SIZE
5005 * If we don't, we limit the size to the TASK_SIZE.
5006 *
5007 * - remaining sample size
5008 * If we don't, we customize the stack size to
5009 * fit in to the remaining sample size.
5010 */
5011
5012 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5013 stack_size = min(stack_size, (u16) task_size);
5014
5015 /* Current header size plus static size and dynamic size. */
5016 header_size += 2 * sizeof(u64);
5017
5018 /* Do we fit in with the current stack dump size? */
5019 if ((u16) (header_size + stack_size) < header_size) {
5020 /*
5021 * If we overflow the maximum size for the sample,
5022 * we customize the stack dump size to fit in.
5023 */
5024 stack_size = USHRT_MAX - header_size - sizeof(u64);
5025 stack_size = round_up(stack_size, sizeof(u64));
5026 }
5027
5028 return stack_size;
5029}
5030
5031static void
5032perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5033 struct pt_regs *regs)
5034{
5035 /* Case of a kernel thread, nothing to dump */
5036 if (!regs) {
5037 u64 size = 0;
5038 perf_output_put(handle, size);
5039 } else {
5040 unsigned long sp;
5041 unsigned int rem;
5042 u64 dyn_size;
5043
5044 /*
5045 * We dump:
5046 * static size
5047 * - the size requested by user or the best one we can fit
5048 * in to the sample max size
5049 * data
5050 * - user stack dump data
5051 * dynamic size
5052 * - the actual dumped size
5053 */
5054
5055 /* Static size. */
5056 perf_output_put(handle, dump_size);
5057
5058 /* Data. */
5059 sp = perf_user_stack_pointer(regs);
5060 rem = __output_copy_user(handle, (void *) sp, dump_size);
5061 dyn_size = dump_size - rem;
5062
5063 perf_output_skip(handle, rem);
5064
5065 /* Dynamic size. */
5066 perf_output_put(handle, dyn_size);
5067 }
5068}
5069
c980d109
ACM
5070static void __perf_event_header__init_id(struct perf_event_header *header,
5071 struct perf_sample_data *data,
5072 struct perf_event *event)
6844c09d
ACM
5073{
5074 u64 sample_type = event->attr.sample_type;
5075
5076 data->type = sample_type;
5077 header->size += event->id_header_size;
5078
5079 if (sample_type & PERF_SAMPLE_TID) {
5080 /* namespace issues */
5081 data->tid_entry.pid = perf_event_pid(event, current);
5082 data->tid_entry.tid = perf_event_tid(event, current);
5083 }
5084
5085 if (sample_type & PERF_SAMPLE_TIME)
34f43927 5086 data->time = perf_event_clock(event);
6844c09d 5087
ff3d527c 5088 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
6844c09d
ACM
5089 data->id = primary_event_id(event);
5090
5091 if (sample_type & PERF_SAMPLE_STREAM_ID)
5092 data->stream_id = event->id;
5093
5094 if (sample_type & PERF_SAMPLE_CPU) {
5095 data->cpu_entry.cpu = raw_smp_processor_id();
5096 data->cpu_entry.reserved = 0;
5097 }
5098}
5099
76369139
FW
5100void perf_event_header__init_id(struct perf_event_header *header,
5101 struct perf_sample_data *data,
5102 struct perf_event *event)
c980d109
ACM
5103{
5104 if (event->attr.sample_id_all)
5105 __perf_event_header__init_id(header, data, event);
5106}
5107
5108static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5109 struct perf_sample_data *data)
5110{
5111 u64 sample_type = data->type;
5112
5113 if (sample_type & PERF_SAMPLE_TID)
5114 perf_output_put(handle, data->tid_entry);
5115
5116 if (sample_type & PERF_SAMPLE_TIME)
5117 perf_output_put(handle, data->time);
5118
5119 if (sample_type & PERF_SAMPLE_ID)
5120 perf_output_put(handle, data->id);
5121
5122 if (sample_type & PERF_SAMPLE_STREAM_ID)
5123 perf_output_put(handle, data->stream_id);
5124
5125 if (sample_type & PERF_SAMPLE_CPU)
5126 perf_output_put(handle, data->cpu_entry);
ff3d527c
AH
5127
5128 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5129 perf_output_put(handle, data->id);
c980d109
ACM
5130}
5131
76369139
FW
5132void perf_event__output_id_sample(struct perf_event *event,
5133 struct perf_output_handle *handle,
5134 struct perf_sample_data *sample)
c980d109
ACM
5135{
5136 if (event->attr.sample_id_all)
5137 __perf_event__output_id_sample(handle, sample);
5138}
5139
3dab77fb 5140static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
5141 struct perf_event *event,
5142 u64 enabled, u64 running)
3dab77fb 5143{
cdd6c482 5144 u64 read_format = event->attr.read_format;
3dab77fb
PZ
5145 u64 values[4];
5146 int n = 0;
5147
b5e58793 5148 values[n++] = perf_event_count(event);
3dab77fb 5149 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 5150 values[n++] = enabled +
cdd6c482 5151 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
5152 }
5153 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 5154 values[n++] = running +
cdd6c482 5155 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
5156 }
5157 if (read_format & PERF_FORMAT_ID)
cdd6c482 5158 values[n++] = primary_event_id(event);
3dab77fb 5159
76369139 5160 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
5161}
5162
5163/*
cdd6c482 5164 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb
PZ
5165 */
5166static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
5167 struct perf_event *event,
5168 u64 enabled, u64 running)
3dab77fb 5169{
cdd6c482
IM
5170 struct perf_event *leader = event->group_leader, *sub;
5171 u64 read_format = event->attr.read_format;
3dab77fb
PZ
5172 u64 values[5];
5173 int n = 0;
5174
5175 values[n++] = 1 + leader->nr_siblings;
5176
5177 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 5178 values[n++] = enabled;
3dab77fb
PZ
5179
5180 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 5181 values[n++] = running;
3dab77fb 5182
cdd6c482 5183 if (leader != event)
3dab77fb
PZ
5184 leader->pmu->read(leader);
5185
b5e58793 5186 values[n++] = perf_event_count(leader);
3dab77fb 5187 if (read_format & PERF_FORMAT_ID)
cdd6c482 5188 values[n++] = primary_event_id(leader);
3dab77fb 5189
76369139 5190 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 5191
65abc865 5192 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb
PZ
5193 n = 0;
5194
6f5ab001
JO
5195 if ((sub != event) &&
5196 (sub->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
5197 sub->pmu->read(sub);
5198
b5e58793 5199 values[n++] = perf_event_count(sub);
3dab77fb 5200 if (read_format & PERF_FORMAT_ID)
cdd6c482 5201 values[n++] = primary_event_id(sub);
3dab77fb 5202
76369139 5203 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
5204 }
5205}
5206
eed01528
SE
5207#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5208 PERF_FORMAT_TOTAL_TIME_RUNNING)
5209
3dab77fb 5210static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 5211 struct perf_event *event)
3dab77fb 5212{
e3f3541c 5213 u64 enabled = 0, running = 0, now;
eed01528
SE
5214 u64 read_format = event->attr.read_format;
5215
5216 /*
5217 * compute total_time_enabled, total_time_running
5218 * based on snapshot values taken when the event
5219 * was last scheduled in.
5220 *
5221 * we cannot simply called update_context_time()
5222 * because of locking issue as we are called in
5223 * NMI context
5224 */
c4794295 5225 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 5226 calc_timer_values(event, &now, &enabled, &running);
eed01528 5227
cdd6c482 5228 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 5229 perf_output_read_group(handle, event, enabled, running);
3dab77fb 5230 else
eed01528 5231 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
5232}
5233
5622f295
MM
5234void perf_output_sample(struct perf_output_handle *handle,
5235 struct perf_event_header *header,
5236 struct perf_sample_data *data,
cdd6c482 5237 struct perf_event *event)
5622f295
MM
5238{
5239 u64 sample_type = data->type;
5240
5241 perf_output_put(handle, *header);
5242
ff3d527c
AH
5243 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5244 perf_output_put(handle, data->id);
5245
5622f295
MM
5246 if (sample_type & PERF_SAMPLE_IP)
5247 perf_output_put(handle, data->ip);
5248
5249 if (sample_type & PERF_SAMPLE_TID)
5250 perf_output_put(handle, data->tid_entry);
5251
5252 if (sample_type & PERF_SAMPLE_TIME)
5253 perf_output_put(handle, data->time);
5254
5255 if (sample_type & PERF_SAMPLE_ADDR)
5256 perf_output_put(handle, data->addr);
5257
5258 if (sample_type & PERF_SAMPLE_ID)
5259 perf_output_put(handle, data->id);
5260
5261 if (sample_type & PERF_SAMPLE_STREAM_ID)
5262 perf_output_put(handle, data->stream_id);
5263
5264 if (sample_type & PERF_SAMPLE_CPU)
5265 perf_output_put(handle, data->cpu_entry);
5266
5267 if (sample_type & PERF_SAMPLE_PERIOD)
5268 perf_output_put(handle, data->period);
5269
5270 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 5271 perf_output_read(handle, event);
5622f295
MM
5272
5273 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5274 if (data->callchain) {
5275 int size = 1;
5276
5277 if (data->callchain)
5278 size += data->callchain->nr;
5279
5280 size *= sizeof(u64);
5281
76369139 5282 __output_copy(handle, data->callchain, size);
5622f295
MM
5283 } else {
5284 u64 nr = 0;
5285 perf_output_put(handle, nr);
5286 }
5287 }
5288
5289 if (sample_type & PERF_SAMPLE_RAW) {
5290 if (data->raw) {
5291 perf_output_put(handle, data->raw->size);
76369139
FW
5292 __output_copy(handle, data->raw->data,
5293 data->raw->size);
5622f295
MM
5294 } else {
5295 struct {
5296 u32 size;
5297 u32 data;
5298 } raw = {
5299 .size = sizeof(u32),
5300 .data = 0,
5301 };
5302 perf_output_put(handle, raw);
5303 }
5304 }
a7ac67ea 5305
bce38cd5
SE
5306 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5307 if (data->br_stack) {
5308 size_t size;
5309
5310 size = data->br_stack->nr
5311 * sizeof(struct perf_branch_entry);
5312
5313 perf_output_put(handle, data->br_stack->nr);
5314 perf_output_copy(handle, data->br_stack->entries, size);
5315 } else {
5316 /*
5317 * we always store at least the value of nr
5318 */
5319 u64 nr = 0;
5320 perf_output_put(handle, nr);
5321 }
5322 }
4018994f
JO
5323
5324 if (sample_type & PERF_SAMPLE_REGS_USER) {
5325 u64 abi = data->regs_user.abi;
5326
5327 /*
5328 * If there are no regs to dump, notice it through
5329 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5330 */
5331 perf_output_put(handle, abi);
5332
5333 if (abi) {
5334 u64 mask = event->attr.sample_regs_user;
5335 perf_output_sample_regs(handle,
5336 data->regs_user.regs,
5337 mask);
5338 }
5339 }
c5ebcedb 5340
a5cdd40c 5341 if (sample_type & PERF_SAMPLE_STACK_USER) {
c5ebcedb
JO
5342 perf_output_sample_ustack(handle,
5343 data->stack_user_size,
5344 data->regs_user.regs);
a5cdd40c 5345 }
c3feedf2
AK
5346
5347 if (sample_type & PERF_SAMPLE_WEIGHT)
5348 perf_output_put(handle, data->weight);
d6be9ad6
SE
5349
5350 if (sample_type & PERF_SAMPLE_DATA_SRC)
5351 perf_output_put(handle, data->data_src.val);
a5cdd40c 5352
fdfbbd07
AK
5353 if (sample_type & PERF_SAMPLE_TRANSACTION)
5354 perf_output_put(handle, data->txn);
5355
60e2364e
SE
5356 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5357 u64 abi = data->regs_intr.abi;
5358 /*
5359 * If there are no regs to dump, notice it through
5360 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5361 */
5362 perf_output_put(handle, abi);
5363
5364 if (abi) {
5365 u64 mask = event->attr.sample_regs_intr;
5366
5367 perf_output_sample_regs(handle,
5368 data->regs_intr.regs,
5369 mask);
5370 }
5371 }
5372
a5cdd40c
PZ
5373 if (!event->attr.watermark) {
5374 int wakeup_events = event->attr.wakeup_events;
5375
5376 if (wakeup_events) {
5377 struct ring_buffer *rb = handle->rb;
5378 int events = local_inc_return(&rb->events);
5379
5380 if (events >= wakeup_events) {
5381 local_sub(wakeup_events, &rb->events);
5382 local_inc(&rb->wakeup);
5383 }
5384 }
5385 }
5622f295
MM
5386}
5387
5388void perf_prepare_sample(struct perf_event_header *header,
5389 struct perf_sample_data *data,
cdd6c482 5390 struct perf_event *event,
5622f295 5391 struct pt_regs *regs)
7b732a75 5392{
cdd6c482 5393 u64 sample_type = event->attr.sample_type;
7b732a75 5394
cdd6c482 5395 header->type = PERF_RECORD_SAMPLE;
c320c7b7 5396 header->size = sizeof(*header) + event->header_size;
5622f295
MM
5397
5398 header->misc = 0;
5399 header->misc |= perf_misc_flags(regs);
6fab0192 5400
c980d109 5401 __perf_event_header__init_id(header, data, event);
6844c09d 5402
c320c7b7 5403 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
5404 data->ip = perf_instruction_pointer(regs);
5405
b23f3325 5406 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 5407 int size = 1;
394ee076 5408
e6dab5ff 5409 data->callchain = perf_callchain(event, regs);
5622f295
MM
5410
5411 if (data->callchain)
5412 size += data->callchain->nr;
5413
5414 header->size += size * sizeof(u64);
394ee076
PZ
5415 }
5416
3a43ce68 5417 if (sample_type & PERF_SAMPLE_RAW) {
a044560c
PZ
5418 int size = sizeof(u32);
5419
5420 if (data->raw)
5421 size += data->raw->size;
5422 else
5423 size += sizeof(u32);
5424
5425 WARN_ON_ONCE(size & (sizeof(u64)-1));
5622f295 5426 header->size += size;
7f453c24 5427 }
bce38cd5
SE
5428
5429 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5430 int size = sizeof(u64); /* nr */
5431 if (data->br_stack) {
5432 size += data->br_stack->nr
5433 * sizeof(struct perf_branch_entry);
5434 }
5435 header->size += size;
5436 }
4018994f 5437
2565711f 5438 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
88a7c26a
AL
5439 perf_sample_regs_user(&data->regs_user, regs,
5440 &data->regs_user_copy);
2565711f 5441
4018994f
JO
5442 if (sample_type & PERF_SAMPLE_REGS_USER) {
5443 /* regs dump ABI info */
5444 int size = sizeof(u64);
5445
4018994f
JO
5446 if (data->regs_user.regs) {
5447 u64 mask = event->attr.sample_regs_user;
5448 size += hweight64(mask) * sizeof(u64);
5449 }
5450
5451 header->size += size;
5452 }
c5ebcedb
JO
5453
5454 if (sample_type & PERF_SAMPLE_STACK_USER) {
5455 /*
5456 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
5457 * processed as the last one or have additional check added
5458 * in case new sample type is added, because we could eat
5459 * up the rest of the sample size.
5460 */
c5ebcedb
JO
5461 u16 stack_size = event->attr.sample_stack_user;
5462 u16 size = sizeof(u64);
5463
c5ebcedb 5464 stack_size = perf_sample_ustack_size(stack_size, header->size,
2565711f 5465 data->regs_user.regs);
c5ebcedb
JO
5466
5467 /*
5468 * If there is something to dump, add space for the dump
5469 * itself and for the field that tells the dynamic size,
5470 * which is how many have been actually dumped.
5471 */
5472 if (stack_size)
5473 size += sizeof(u64) + stack_size;
5474
5475 data->stack_user_size = stack_size;
5476 header->size += size;
5477 }
60e2364e
SE
5478
5479 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5480 /* regs dump ABI info */
5481 int size = sizeof(u64);
5482
5483 perf_sample_regs_intr(&data->regs_intr, regs);
5484
5485 if (data->regs_intr.regs) {
5486 u64 mask = event->attr.sample_regs_intr;
5487
5488 size += hweight64(mask) * sizeof(u64);
5489 }
5490
5491 header->size += size;
5492 }
5622f295 5493}
7f453c24 5494
21509084
YZ
5495void perf_event_output(struct perf_event *event,
5496 struct perf_sample_data *data,
5497 struct pt_regs *regs)
5622f295
MM
5498{
5499 struct perf_output_handle handle;
5500 struct perf_event_header header;
689802b2 5501
927c7a9e
FW
5502 /* protect the callchain buffers */
5503 rcu_read_lock();
5504
cdd6c482 5505 perf_prepare_sample(&header, data, event, regs);
5c148194 5506
a7ac67ea 5507 if (perf_output_begin(&handle, event, header.size))
927c7a9e 5508 goto exit;
0322cd6e 5509
cdd6c482 5510 perf_output_sample(&handle, &header, data, event);
f413cdb8 5511
8a057d84 5512 perf_output_end(&handle);
927c7a9e
FW
5513
5514exit:
5515 rcu_read_unlock();
0322cd6e
PZ
5516}
5517
38b200d6 5518/*
cdd6c482 5519 * read event_id
38b200d6
PZ
5520 */
5521
5522struct perf_read_event {
5523 struct perf_event_header header;
5524
5525 u32 pid;
5526 u32 tid;
38b200d6
PZ
5527};
5528
5529static void
cdd6c482 5530perf_event_read_event(struct perf_event *event,
38b200d6
PZ
5531 struct task_struct *task)
5532{
5533 struct perf_output_handle handle;
c980d109 5534 struct perf_sample_data sample;
dfc65094 5535 struct perf_read_event read_event = {
38b200d6 5536 .header = {
cdd6c482 5537 .type = PERF_RECORD_READ,
38b200d6 5538 .misc = 0,
c320c7b7 5539 .size = sizeof(read_event) + event->read_size,
38b200d6 5540 },
cdd6c482
IM
5541 .pid = perf_event_pid(event, task),
5542 .tid = perf_event_tid(event, task),
38b200d6 5543 };
3dab77fb 5544 int ret;
38b200d6 5545
c980d109 5546 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 5547 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
5548 if (ret)
5549 return;
5550
dfc65094 5551 perf_output_put(&handle, read_event);
cdd6c482 5552 perf_output_read(&handle, event);
c980d109 5553 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 5554
38b200d6
PZ
5555 perf_output_end(&handle);
5556}
5557
52d857a8
JO
5558typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
5559
5560static void
5561perf_event_aux_ctx(struct perf_event_context *ctx,
52d857a8
JO
5562 perf_event_aux_output_cb output,
5563 void *data)
5564{
5565 struct perf_event *event;
5566
5567 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5568 if (event->state < PERF_EVENT_STATE_INACTIVE)
5569 continue;
5570 if (!event_filter_match(event))
5571 continue;
67516844 5572 output(event, data);
52d857a8
JO
5573 }
5574}
5575
5576static void
67516844 5577perf_event_aux(perf_event_aux_output_cb output, void *data,
52d857a8
JO
5578 struct perf_event_context *task_ctx)
5579{
5580 struct perf_cpu_context *cpuctx;
5581 struct perf_event_context *ctx;
5582 struct pmu *pmu;
5583 int ctxn;
5584
5585 rcu_read_lock();
5586 list_for_each_entry_rcu(pmu, &pmus, entry) {
5587 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
5588 if (cpuctx->unique_pmu != pmu)
5589 goto next;
67516844 5590 perf_event_aux_ctx(&cpuctx->ctx, output, data);
52d857a8
JO
5591 if (task_ctx)
5592 goto next;
5593 ctxn = pmu->task_ctx_nr;
5594 if (ctxn < 0)
5595 goto next;
5596 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
5597 if (ctx)
67516844 5598 perf_event_aux_ctx(ctx, output, data);
52d857a8
JO
5599next:
5600 put_cpu_ptr(pmu->pmu_cpu_context);
5601 }
5602
5603 if (task_ctx) {
5604 preempt_disable();
67516844 5605 perf_event_aux_ctx(task_ctx, output, data);
52d857a8
JO
5606 preempt_enable();
5607 }
5608 rcu_read_unlock();
5609}
5610
60313ebe 5611/*
9f498cc5
PZ
5612 * task tracking -- fork/exit
5613 *
13d7a241 5614 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
60313ebe
PZ
5615 */
5616
9f498cc5 5617struct perf_task_event {
3a80b4a3 5618 struct task_struct *task;
cdd6c482 5619 struct perf_event_context *task_ctx;
60313ebe
PZ
5620
5621 struct {
5622 struct perf_event_header header;
5623
5624 u32 pid;
5625 u32 ppid;
9f498cc5
PZ
5626 u32 tid;
5627 u32 ptid;
393b2ad8 5628 u64 time;
cdd6c482 5629 } event_id;
60313ebe
PZ
5630};
5631
67516844
JO
5632static int perf_event_task_match(struct perf_event *event)
5633{
13d7a241
SE
5634 return event->attr.comm || event->attr.mmap ||
5635 event->attr.mmap2 || event->attr.mmap_data ||
5636 event->attr.task;
67516844
JO
5637}
5638
cdd6c482 5639static void perf_event_task_output(struct perf_event *event,
52d857a8 5640 void *data)
60313ebe 5641{
52d857a8 5642 struct perf_task_event *task_event = data;
60313ebe 5643 struct perf_output_handle handle;
c980d109 5644 struct perf_sample_data sample;
9f498cc5 5645 struct task_struct *task = task_event->task;
c980d109 5646 int ret, size = task_event->event_id.header.size;
8bb39f9a 5647
67516844
JO
5648 if (!perf_event_task_match(event))
5649 return;
5650
c980d109 5651 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 5652
c980d109 5653 ret = perf_output_begin(&handle, event,
a7ac67ea 5654 task_event->event_id.header.size);
ef60777c 5655 if (ret)
c980d109 5656 goto out;
60313ebe 5657
cdd6c482
IM
5658 task_event->event_id.pid = perf_event_pid(event, task);
5659 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 5660
cdd6c482
IM
5661 task_event->event_id.tid = perf_event_tid(event, task);
5662 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 5663
34f43927
PZ
5664 task_event->event_id.time = perf_event_clock(event);
5665
cdd6c482 5666 perf_output_put(&handle, task_event->event_id);
393b2ad8 5667
c980d109
ACM
5668 perf_event__output_id_sample(event, &handle, &sample);
5669
60313ebe 5670 perf_output_end(&handle);
c980d109
ACM
5671out:
5672 task_event->event_id.header.size = size;
60313ebe
PZ
5673}
5674
cdd6c482
IM
5675static void perf_event_task(struct task_struct *task,
5676 struct perf_event_context *task_ctx,
3a80b4a3 5677 int new)
60313ebe 5678{
9f498cc5 5679 struct perf_task_event task_event;
60313ebe 5680
cdd6c482
IM
5681 if (!atomic_read(&nr_comm_events) &&
5682 !atomic_read(&nr_mmap_events) &&
5683 !atomic_read(&nr_task_events))
60313ebe
PZ
5684 return;
5685
9f498cc5 5686 task_event = (struct perf_task_event){
3a80b4a3
PZ
5687 .task = task,
5688 .task_ctx = task_ctx,
cdd6c482 5689 .event_id = {
60313ebe 5690 .header = {
cdd6c482 5691 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 5692 .misc = 0,
cdd6c482 5693 .size = sizeof(task_event.event_id),
60313ebe 5694 },
573402db
PZ
5695 /* .pid */
5696 /* .ppid */
9f498cc5
PZ
5697 /* .tid */
5698 /* .ptid */
34f43927 5699 /* .time */
60313ebe
PZ
5700 },
5701 };
5702
67516844 5703 perf_event_aux(perf_event_task_output,
52d857a8
JO
5704 &task_event,
5705 task_ctx);
9f498cc5
PZ
5706}
5707
cdd6c482 5708void perf_event_fork(struct task_struct *task)
9f498cc5 5709{
cdd6c482 5710 perf_event_task(task, NULL, 1);
60313ebe
PZ
5711}
5712
8d1b2d93
PZ
5713/*
5714 * comm tracking
5715 */
5716
5717struct perf_comm_event {
22a4f650
IM
5718 struct task_struct *task;
5719 char *comm;
8d1b2d93
PZ
5720 int comm_size;
5721
5722 struct {
5723 struct perf_event_header header;
5724
5725 u32 pid;
5726 u32 tid;
cdd6c482 5727 } event_id;
8d1b2d93
PZ
5728};
5729
67516844
JO
5730static int perf_event_comm_match(struct perf_event *event)
5731{
5732 return event->attr.comm;
5733}
5734
cdd6c482 5735static void perf_event_comm_output(struct perf_event *event,
52d857a8 5736 void *data)
8d1b2d93 5737{
52d857a8 5738 struct perf_comm_event *comm_event = data;
8d1b2d93 5739 struct perf_output_handle handle;
c980d109 5740 struct perf_sample_data sample;
cdd6c482 5741 int size = comm_event->event_id.header.size;
c980d109
ACM
5742 int ret;
5743
67516844
JO
5744 if (!perf_event_comm_match(event))
5745 return;
5746
c980d109
ACM
5747 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
5748 ret = perf_output_begin(&handle, event,
a7ac67ea 5749 comm_event->event_id.header.size);
8d1b2d93
PZ
5750
5751 if (ret)
c980d109 5752 goto out;
8d1b2d93 5753
cdd6c482
IM
5754 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
5755 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 5756
cdd6c482 5757 perf_output_put(&handle, comm_event->event_id);
76369139 5758 __output_copy(&handle, comm_event->comm,
8d1b2d93 5759 comm_event->comm_size);
c980d109
ACM
5760
5761 perf_event__output_id_sample(event, &handle, &sample);
5762
8d1b2d93 5763 perf_output_end(&handle);
c980d109
ACM
5764out:
5765 comm_event->event_id.header.size = size;
8d1b2d93
PZ
5766}
5767
cdd6c482 5768static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93 5769{
413ee3b4 5770 char comm[TASK_COMM_LEN];
8d1b2d93 5771 unsigned int size;
8d1b2d93 5772
413ee3b4 5773 memset(comm, 0, sizeof(comm));
96b02d78 5774 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 5775 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
5776
5777 comm_event->comm = comm;
5778 comm_event->comm_size = size;
5779
cdd6c482 5780 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8dc85d54 5781
67516844 5782 perf_event_aux(perf_event_comm_output,
52d857a8
JO
5783 comm_event,
5784 NULL);
8d1b2d93
PZ
5785}
5786
82b89778 5787void perf_event_comm(struct task_struct *task, bool exec)
8d1b2d93 5788{
9ee318a7
PZ
5789 struct perf_comm_event comm_event;
5790
cdd6c482 5791 if (!atomic_read(&nr_comm_events))
9ee318a7 5792 return;
a63eaf34 5793
9ee318a7 5794 comm_event = (struct perf_comm_event){
8d1b2d93 5795 .task = task,
573402db
PZ
5796 /* .comm */
5797 /* .comm_size */
cdd6c482 5798 .event_id = {
573402db 5799 .header = {
cdd6c482 5800 .type = PERF_RECORD_COMM,
82b89778 5801 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
573402db
PZ
5802 /* .size */
5803 },
5804 /* .pid */
5805 /* .tid */
8d1b2d93
PZ
5806 },
5807 };
5808
cdd6c482 5809 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
5810}
5811
0a4a9391
PZ
5812/*
5813 * mmap tracking
5814 */
5815
5816struct perf_mmap_event {
089dd79d
PZ
5817 struct vm_area_struct *vma;
5818
5819 const char *file_name;
5820 int file_size;
13d7a241
SE
5821 int maj, min;
5822 u64 ino;
5823 u64 ino_generation;
f972eb63 5824 u32 prot, flags;
0a4a9391
PZ
5825
5826 struct {
5827 struct perf_event_header header;
5828
5829 u32 pid;
5830 u32 tid;
5831 u64 start;
5832 u64 len;
5833 u64 pgoff;
cdd6c482 5834 } event_id;
0a4a9391
PZ
5835};
5836
67516844
JO
5837static int perf_event_mmap_match(struct perf_event *event,
5838 void *data)
5839{
5840 struct perf_mmap_event *mmap_event = data;
5841 struct vm_area_struct *vma = mmap_event->vma;
5842 int executable = vma->vm_flags & VM_EXEC;
5843
5844 return (!executable && event->attr.mmap_data) ||
13d7a241 5845 (executable && (event->attr.mmap || event->attr.mmap2));
67516844
JO
5846}
5847
cdd6c482 5848static void perf_event_mmap_output(struct perf_event *event,
52d857a8 5849 void *data)
0a4a9391 5850{
52d857a8 5851 struct perf_mmap_event *mmap_event = data;
0a4a9391 5852 struct perf_output_handle handle;
c980d109 5853 struct perf_sample_data sample;
cdd6c482 5854 int size = mmap_event->event_id.header.size;
c980d109 5855 int ret;
0a4a9391 5856
67516844
JO
5857 if (!perf_event_mmap_match(event, data))
5858 return;
5859
13d7a241
SE
5860 if (event->attr.mmap2) {
5861 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
5862 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
5863 mmap_event->event_id.header.size += sizeof(mmap_event->min);
5864 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
d008d525 5865 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
f972eb63
PZ
5866 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
5867 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
13d7a241
SE
5868 }
5869
c980d109
ACM
5870 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
5871 ret = perf_output_begin(&handle, event,
a7ac67ea 5872 mmap_event->event_id.header.size);
0a4a9391 5873 if (ret)
c980d109 5874 goto out;
0a4a9391 5875
cdd6c482
IM
5876 mmap_event->event_id.pid = perf_event_pid(event, current);
5877 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 5878
cdd6c482 5879 perf_output_put(&handle, mmap_event->event_id);
13d7a241
SE
5880
5881 if (event->attr.mmap2) {
5882 perf_output_put(&handle, mmap_event->maj);
5883 perf_output_put(&handle, mmap_event->min);
5884 perf_output_put(&handle, mmap_event->ino);
5885 perf_output_put(&handle, mmap_event->ino_generation);
f972eb63
PZ
5886 perf_output_put(&handle, mmap_event->prot);
5887 perf_output_put(&handle, mmap_event->flags);
13d7a241
SE
5888 }
5889
76369139 5890 __output_copy(&handle, mmap_event->file_name,
0a4a9391 5891 mmap_event->file_size);
c980d109
ACM
5892
5893 perf_event__output_id_sample(event, &handle, &sample);
5894
78d613eb 5895 perf_output_end(&handle);
c980d109
ACM
5896out:
5897 mmap_event->event_id.header.size = size;
0a4a9391
PZ
5898}
5899
cdd6c482 5900static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391 5901{
089dd79d
PZ
5902 struct vm_area_struct *vma = mmap_event->vma;
5903 struct file *file = vma->vm_file;
13d7a241
SE
5904 int maj = 0, min = 0;
5905 u64 ino = 0, gen = 0;
f972eb63 5906 u32 prot = 0, flags = 0;
0a4a9391
PZ
5907 unsigned int size;
5908 char tmp[16];
5909 char *buf = NULL;
2c42cfbf 5910 char *name;
413ee3b4 5911
0a4a9391 5912 if (file) {
13d7a241
SE
5913 struct inode *inode;
5914 dev_t dev;
3ea2f2b9 5915
2c42cfbf 5916 buf = kmalloc(PATH_MAX, GFP_KERNEL);
0a4a9391 5917 if (!buf) {
c7e548b4
ON
5918 name = "//enomem";
5919 goto cpy_name;
0a4a9391 5920 }
413ee3b4 5921 /*
3ea2f2b9 5922 * d_path() works from the end of the rb backwards, so we
413ee3b4
AB
5923 * need to add enough zero bytes after the string to handle
5924 * the 64bit alignment we do later.
5925 */
9bf39ab2 5926 name = file_path(file, buf, PATH_MAX - sizeof(u64));
0a4a9391 5927 if (IS_ERR(name)) {
c7e548b4
ON
5928 name = "//toolong";
5929 goto cpy_name;
0a4a9391 5930 }
13d7a241
SE
5931 inode = file_inode(vma->vm_file);
5932 dev = inode->i_sb->s_dev;
5933 ino = inode->i_ino;
5934 gen = inode->i_generation;
5935 maj = MAJOR(dev);
5936 min = MINOR(dev);
f972eb63
PZ
5937
5938 if (vma->vm_flags & VM_READ)
5939 prot |= PROT_READ;
5940 if (vma->vm_flags & VM_WRITE)
5941 prot |= PROT_WRITE;
5942 if (vma->vm_flags & VM_EXEC)
5943 prot |= PROT_EXEC;
5944
5945 if (vma->vm_flags & VM_MAYSHARE)
5946 flags = MAP_SHARED;
5947 else
5948 flags = MAP_PRIVATE;
5949
5950 if (vma->vm_flags & VM_DENYWRITE)
5951 flags |= MAP_DENYWRITE;
5952 if (vma->vm_flags & VM_MAYEXEC)
5953 flags |= MAP_EXECUTABLE;
5954 if (vma->vm_flags & VM_LOCKED)
5955 flags |= MAP_LOCKED;
5956 if (vma->vm_flags & VM_HUGETLB)
5957 flags |= MAP_HUGETLB;
5958
c7e548b4 5959 goto got_name;
0a4a9391 5960 } else {
fbe26abe
JO
5961 if (vma->vm_ops && vma->vm_ops->name) {
5962 name = (char *) vma->vm_ops->name(vma);
5963 if (name)
5964 goto cpy_name;
5965 }
5966
2c42cfbf 5967 name = (char *)arch_vma_name(vma);
c7e548b4
ON
5968 if (name)
5969 goto cpy_name;
089dd79d 5970
32c5fb7e 5971 if (vma->vm_start <= vma->vm_mm->start_brk &&
3af9e859 5972 vma->vm_end >= vma->vm_mm->brk) {
c7e548b4
ON
5973 name = "[heap]";
5974 goto cpy_name;
32c5fb7e
ON
5975 }
5976 if (vma->vm_start <= vma->vm_mm->start_stack &&
3af9e859 5977 vma->vm_end >= vma->vm_mm->start_stack) {
c7e548b4
ON
5978 name = "[stack]";
5979 goto cpy_name;
089dd79d
PZ
5980 }
5981
c7e548b4
ON
5982 name = "//anon";
5983 goto cpy_name;
0a4a9391
PZ
5984 }
5985
c7e548b4
ON
5986cpy_name:
5987 strlcpy(tmp, name, sizeof(tmp));
5988 name = tmp;
0a4a9391 5989got_name:
2c42cfbf
PZ
5990 /*
5991 * Since our buffer works in 8 byte units we need to align our string
5992 * size to a multiple of 8. However, we must guarantee the tail end is
5993 * zero'd out to avoid leaking random bits to userspace.
5994 */
5995 size = strlen(name)+1;
5996 while (!IS_ALIGNED(size, sizeof(u64)))
5997 name[size++] = '\0';
0a4a9391
PZ
5998
5999 mmap_event->file_name = name;
6000 mmap_event->file_size = size;
13d7a241
SE
6001 mmap_event->maj = maj;
6002 mmap_event->min = min;
6003 mmap_event->ino = ino;
6004 mmap_event->ino_generation = gen;
f972eb63
PZ
6005 mmap_event->prot = prot;
6006 mmap_event->flags = flags;
0a4a9391 6007
2fe85427
SE
6008 if (!(vma->vm_flags & VM_EXEC))
6009 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
6010
cdd6c482 6011 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 6012
67516844 6013 perf_event_aux(perf_event_mmap_output,
52d857a8
JO
6014 mmap_event,
6015 NULL);
665c2142 6016
0a4a9391
PZ
6017 kfree(buf);
6018}
6019
3af9e859 6020void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 6021{
9ee318a7
PZ
6022 struct perf_mmap_event mmap_event;
6023
cdd6c482 6024 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
6025 return;
6026
6027 mmap_event = (struct perf_mmap_event){
089dd79d 6028 .vma = vma,
573402db
PZ
6029 /* .file_name */
6030 /* .file_size */
cdd6c482 6031 .event_id = {
573402db 6032 .header = {
cdd6c482 6033 .type = PERF_RECORD_MMAP,
39447b38 6034 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
6035 /* .size */
6036 },
6037 /* .pid */
6038 /* .tid */
089dd79d
PZ
6039 .start = vma->vm_start,
6040 .len = vma->vm_end - vma->vm_start,
3a0304e9 6041 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391 6042 },
13d7a241
SE
6043 /* .maj (attr_mmap2 only) */
6044 /* .min (attr_mmap2 only) */
6045 /* .ino (attr_mmap2 only) */
6046 /* .ino_generation (attr_mmap2 only) */
f972eb63
PZ
6047 /* .prot (attr_mmap2 only) */
6048 /* .flags (attr_mmap2 only) */
0a4a9391
PZ
6049 };
6050
cdd6c482 6051 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
6052}
6053
68db7e98
AS
6054void perf_event_aux_event(struct perf_event *event, unsigned long head,
6055 unsigned long size, u64 flags)
6056{
6057 struct perf_output_handle handle;
6058 struct perf_sample_data sample;
6059 struct perf_aux_event {
6060 struct perf_event_header header;
6061 u64 offset;
6062 u64 size;
6063 u64 flags;
6064 } rec = {
6065 .header = {
6066 .type = PERF_RECORD_AUX,
6067 .misc = 0,
6068 .size = sizeof(rec),
6069 },
6070 .offset = head,
6071 .size = size,
6072 .flags = flags,
6073 };
6074 int ret;
6075
6076 perf_event_header__init_id(&rec.header, &sample, event);
6077 ret = perf_output_begin(&handle, event, rec.header.size);
6078
6079 if (ret)
6080 return;
6081
6082 perf_output_put(&handle, rec);
6083 perf_event__output_id_sample(event, &handle, &sample);
6084
6085 perf_output_end(&handle);
6086}
6087
f38b0dbb
KL
6088/*
6089 * Lost/dropped samples logging
6090 */
6091void perf_log_lost_samples(struct perf_event *event, u64 lost)
6092{
6093 struct perf_output_handle handle;
6094 struct perf_sample_data sample;
6095 int ret;
6096
6097 struct {
6098 struct perf_event_header header;
6099 u64 lost;
6100 } lost_samples_event = {
6101 .header = {
6102 .type = PERF_RECORD_LOST_SAMPLES,
6103 .misc = 0,
6104 .size = sizeof(lost_samples_event),
6105 },
6106 .lost = lost,
6107 };
6108
6109 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
6110
6111 ret = perf_output_begin(&handle, event,
6112 lost_samples_event.header.size);
6113 if (ret)
6114 return;
6115
6116 perf_output_put(&handle, lost_samples_event);
6117 perf_event__output_id_sample(event, &handle, &sample);
6118 perf_output_end(&handle);
6119}
6120
45ac1403
AH
6121/*
6122 * context_switch tracking
6123 */
6124
6125struct perf_switch_event {
6126 struct task_struct *task;
6127 struct task_struct *next_prev;
6128
6129 struct {
6130 struct perf_event_header header;
6131 u32 next_prev_pid;
6132 u32 next_prev_tid;
6133 } event_id;
6134};
6135
6136static int perf_event_switch_match(struct perf_event *event)
6137{
6138 return event->attr.context_switch;
6139}
6140
6141static void perf_event_switch_output(struct perf_event *event, void *data)
6142{
6143 struct perf_switch_event *se = data;
6144 struct perf_output_handle handle;
6145 struct perf_sample_data sample;
6146 int ret;
6147
6148 if (!perf_event_switch_match(event))
6149 return;
6150
6151 /* Only CPU-wide events are allowed to see next/prev pid/tid */
6152 if (event->ctx->task) {
6153 se->event_id.header.type = PERF_RECORD_SWITCH;
6154 se->event_id.header.size = sizeof(se->event_id.header);
6155 } else {
6156 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
6157 se->event_id.header.size = sizeof(se->event_id);
6158 se->event_id.next_prev_pid =
6159 perf_event_pid(event, se->next_prev);
6160 se->event_id.next_prev_tid =
6161 perf_event_tid(event, se->next_prev);
6162 }
6163
6164 perf_event_header__init_id(&se->event_id.header, &sample, event);
6165
6166 ret = perf_output_begin(&handle, event, se->event_id.header.size);
6167 if (ret)
6168 return;
6169
6170 if (event->ctx->task)
6171 perf_output_put(&handle, se->event_id.header);
6172 else
6173 perf_output_put(&handle, se->event_id);
6174
6175 perf_event__output_id_sample(event, &handle, &sample);
6176
6177 perf_output_end(&handle);
6178}
6179
6180static void perf_event_switch(struct task_struct *task,
6181 struct task_struct *next_prev, bool sched_in)
6182{
6183 struct perf_switch_event switch_event;
6184
6185 /* N.B. caller checks nr_switch_events != 0 */
6186
6187 switch_event = (struct perf_switch_event){
6188 .task = task,
6189 .next_prev = next_prev,
6190 .event_id = {
6191 .header = {
6192 /* .type */
6193 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
6194 /* .size */
6195 },
6196 /* .next_prev_pid */
6197 /* .next_prev_tid */
6198 },
6199 };
6200
6201 perf_event_aux(perf_event_switch_output,
6202 &switch_event,
6203 NULL);
6204}
6205
a78ac325
PZ
6206/*
6207 * IRQ throttle logging
6208 */
6209
cdd6c482 6210static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
6211{
6212 struct perf_output_handle handle;
c980d109 6213 struct perf_sample_data sample;
a78ac325
PZ
6214 int ret;
6215
6216 struct {
6217 struct perf_event_header header;
6218 u64 time;
cca3f454 6219 u64 id;
7f453c24 6220 u64 stream_id;
a78ac325
PZ
6221 } throttle_event = {
6222 .header = {
cdd6c482 6223 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
6224 .misc = 0,
6225 .size = sizeof(throttle_event),
6226 },
34f43927 6227 .time = perf_event_clock(event),
cdd6c482
IM
6228 .id = primary_event_id(event),
6229 .stream_id = event->id,
a78ac325
PZ
6230 };
6231
966ee4d6 6232 if (enable)
cdd6c482 6233 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 6234
c980d109
ACM
6235 perf_event_header__init_id(&throttle_event.header, &sample, event);
6236
6237 ret = perf_output_begin(&handle, event,
a7ac67ea 6238 throttle_event.header.size);
a78ac325
PZ
6239 if (ret)
6240 return;
6241
6242 perf_output_put(&handle, throttle_event);
c980d109 6243 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
6244 perf_output_end(&handle);
6245}
6246
ec0d7729
AS
6247static void perf_log_itrace_start(struct perf_event *event)
6248{
6249 struct perf_output_handle handle;
6250 struct perf_sample_data sample;
6251 struct perf_aux_event {
6252 struct perf_event_header header;
6253 u32 pid;
6254 u32 tid;
6255 } rec;
6256 int ret;
6257
6258 if (event->parent)
6259 event = event->parent;
6260
6261 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
6262 event->hw.itrace_started)
6263 return;
6264
ec0d7729
AS
6265 rec.header.type = PERF_RECORD_ITRACE_START;
6266 rec.header.misc = 0;
6267 rec.header.size = sizeof(rec);
6268 rec.pid = perf_event_pid(event, current);
6269 rec.tid = perf_event_tid(event, current);
6270
6271 perf_event_header__init_id(&rec.header, &sample, event);
6272 ret = perf_output_begin(&handle, event, rec.header.size);
6273
6274 if (ret)
6275 return;
6276
6277 perf_output_put(&handle, rec);
6278 perf_event__output_id_sample(event, &handle, &sample);
6279
6280 perf_output_end(&handle);
6281}
6282
f6c7d5fe 6283/*
cdd6c482 6284 * Generic event overflow handling, sampling.
f6c7d5fe
PZ
6285 */
6286
a8b0ca17 6287static int __perf_event_overflow(struct perf_event *event,
5622f295
MM
6288 int throttle, struct perf_sample_data *data,
6289 struct pt_regs *regs)
f6c7d5fe 6290{
cdd6c482
IM
6291 int events = atomic_read(&event->event_limit);
6292 struct hw_perf_event *hwc = &event->hw;
e050e3f0 6293 u64 seq;
79f14641
PZ
6294 int ret = 0;
6295
96398826
PZ
6296 /*
6297 * Non-sampling counters might still use the PMI to fold short
6298 * hardware counters, ignore those.
6299 */
6300 if (unlikely(!is_sampling_event(event)))
6301 return 0;
6302
e050e3f0
SE
6303 seq = __this_cpu_read(perf_throttled_seq);
6304 if (seq != hwc->interrupts_seq) {
6305 hwc->interrupts_seq = seq;
6306 hwc->interrupts = 1;
6307 } else {
6308 hwc->interrupts++;
6309 if (unlikely(throttle
6310 && hwc->interrupts >= max_samples_per_tick)) {
6311 __this_cpu_inc(perf_throttled_count);
163ec435
PZ
6312 hwc->interrupts = MAX_INTERRUPTS;
6313 perf_log_throttle(event, 0);
d84153d6 6314 tick_nohz_full_kick();
a78ac325
PZ
6315 ret = 1;
6316 }
e050e3f0 6317 }
60db5e09 6318
cdd6c482 6319 if (event->attr.freq) {
def0a9b2 6320 u64 now = perf_clock();
abd50713 6321 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 6322
abd50713 6323 hwc->freq_time_stamp = now;
bd2b5b12 6324
abd50713 6325 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 6326 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
6327 }
6328
2023b359
PZ
6329 /*
6330 * XXX event_limit might not quite work as expected on inherited
cdd6c482 6331 * events
2023b359
PZ
6332 */
6333
cdd6c482
IM
6334 event->pending_kill = POLL_IN;
6335 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 6336 ret = 1;
cdd6c482 6337 event->pending_kill = POLL_HUP;
a8b0ca17
PZ
6338 event->pending_disable = 1;
6339 irq_work_queue(&event->pending);
79f14641
PZ
6340 }
6341
453f19ee 6342 if (event->overflow_handler)
a8b0ca17 6343 event->overflow_handler(event, data, regs);
453f19ee 6344 else
a8b0ca17 6345 perf_event_output(event, data, regs);
453f19ee 6346
fed66e2c 6347 if (*perf_event_fasync(event) && event->pending_kill) {
a8b0ca17
PZ
6348 event->pending_wakeup = 1;
6349 irq_work_queue(&event->pending);
f506b3dc
PZ
6350 }
6351
79f14641 6352 return ret;
f6c7d5fe
PZ
6353}
6354
a8b0ca17 6355int perf_event_overflow(struct perf_event *event,
5622f295
MM
6356 struct perf_sample_data *data,
6357 struct pt_regs *regs)
850bc73f 6358{
a8b0ca17 6359 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
6360}
6361
15dbf27c 6362/*
cdd6c482 6363 * Generic software event infrastructure
15dbf27c
PZ
6364 */
6365
b28ab83c
PZ
6366struct swevent_htable {
6367 struct swevent_hlist *swevent_hlist;
6368 struct mutex hlist_mutex;
6369 int hlist_refcount;
6370
6371 /* Recursion avoidance in each contexts */
6372 int recursion[PERF_NR_CONTEXTS];
39af6b16
JO
6373
6374 /* Keeps track of cpu being initialized/exited */
6375 bool online;
b28ab83c
PZ
6376};
6377
6378static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
6379
7b4b6658 6380/*
cdd6c482
IM
6381 * We directly increment event->count and keep a second value in
6382 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
6383 * is kept in the range [-sample_period, 0] so that we can use the
6384 * sign as trigger.
6385 */
6386
ab573844 6387u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 6388{
cdd6c482 6389 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
6390 u64 period = hwc->last_period;
6391 u64 nr, offset;
6392 s64 old, val;
6393
6394 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
6395
6396again:
e7850595 6397 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
6398 if (val < 0)
6399 return 0;
15dbf27c 6400
7b4b6658
PZ
6401 nr = div64_u64(period + val, period);
6402 offset = nr * period;
6403 val -= offset;
e7850595 6404 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 6405 goto again;
15dbf27c 6406
7b4b6658 6407 return nr;
15dbf27c
PZ
6408}
6409
0cff784a 6410static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 6411 struct perf_sample_data *data,
5622f295 6412 struct pt_regs *regs)
15dbf27c 6413{
cdd6c482 6414 struct hw_perf_event *hwc = &event->hw;
850bc73f 6415 int throttle = 0;
15dbf27c 6416
0cff784a
PZ
6417 if (!overflow)
6418 overflow = perf_swevent_set_period(event);
15dbf27c 6419
7b4b6658
PZ
6420 if (hwc->interrupts == MAX_INTERRUPTS)
6421 return;
15dbf27c 6422
7b4b6658 6423 for (; overflow; overflow--) {
a8b0ca17 6424 if (__perf_event_overflow(event, throttle,
5622f295 6425 data, regs)) {
7b4b6658
PZ
6426 /*
6427 * We inhibit the overflow from happening when
6428 * hwc->interrupts == MAX_INTERRUPTS.
6429 */
6430 break;
6431 }
cf450a73 6432 throttle = 1;
7b4b6658 6433 }
15dbf27c
PZ
6434}
6435
a4eaf7f1 6436static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 6437 struct perf_sample_data *data,
5622f295 6438 struct pt_regs *regs)
7b4b6658 6439{
cdd6c482 6440 struct hw_perf_event *hwc = &event->hw;
d6d020e9 6441
e7850595 6442 local64_add(nr, &event->count);
d6d020e9 6443
0cff784a
PZ
6444 if (!regs)
6445 return;
6446
6c7e550f 6447 if (!is_sampling_event(event))
7b4b6658 6448 return;
d6d020e9 6449
5d81e5cf
AV
6450 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
6451 data->period = nr;
6452 return perf_swevent_overflow(event, 1, data, regs);
6453 } else
6454 data->period = event->hw.last_period;
6455
0cff784a 6456 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 6457 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 6458
e7850595 6459 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 6460 return;
df1a132b 6461
a8b0ca17 6462 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
6463}
6464
f5ffe02e
FW
6465static int perf_exclude_event(struct perf_event *event,
6466 struct pt_regs *regs)
6467{
a4eaf7f1 6468 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 6469 return 1;
a4eaf7f1 6470
f5ffe02e
FW
6471 if (regs) {
6472 if (event->attr.exclude_user && user_mode(regs))
6473 return 1;
6474
6475 if (event->attr.exclude_kernel && !user_mode(regs))
6476 return 1;
6477 }
6478
6479 return 0;
6480}
6481
cdd6c482 6482static int perf_swevent_match(struct perf_event *event,
1c432d89 6483 enum perf_type_id type,
6fb2915d
LZ
6484 u32 event_id,
6485 struct perf_sample_data *data,
6486 struct pt_regs *regs)
15dbf27c 6487{
cdd6c482 6488 if (event->attr.type != type)
a21ca2ca 6489 return 0;
f5ffe02e 6490
cdd6c482 6491 if (event->attr.config != event_id)
15dbf27c
PZ
6492 return 0;
6493
f5ffe02e
FW
6494 if (perf_exclude_event(event, regs))
6495 return 0;
15dbf27c
PZ
6496
6497 return 1;
6498}
6499
76e1d904
FW
6500static inline u64 swevent_hash(u64 type, u32 event_id)
6501{
6502 u64 val = event_id | (type << 32);
6503
6504 return hash_64(val, SWEVENT_HLIST_BITS);
6505}
6506
49f135ed
FW
6507static inline struct hlist_head *
6508__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 6509{
49f135ed
FW
6510 u64 hash = swevent_hash(type, event_id);
6511
6512 return &hlist->heads[hash];
6513}
76e1d904 6514
49f135ed
FW
6515/* For the read side: events when they trigger */
6516static inline struct hlist_head *
b28ab83c 6517find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
6518{
6519 struct swevent_hlist *hlist;
76e1d904 6520
b28ab83c 6521 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
6522 if (!hlist)
6523 return NULL;
6524
49f135ed
FW
6525 return __find_swevent_head(hlist, type, event_id);
6526}
6527
6528/* For the event head insertion and removal in the hlist */
6529static inline struct hlist_head *
b28ab83c 6530find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
6531{
6532 struct swevent_hlist *hlist;
6533 u32 event_id = event->attr.config;
6534 u64 type = event->attr.type;
6535
6536 /*
6537 * Event scheduling is always serialized against hlist allocation
6538 * and release. Which makes the protected version suitable here.
6539 * The context lock guarantees that.
6540 */
b28ab83c 6541 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
6542 lockdep_is_held(&event->ctx->lock));
6543 if (!hlist)
6544 return NULL;
6545
6546 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
6547}
6548
6549static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 6550 u64 nr,
76e1d904
FW
6551 struct perf_sample_data *data,
6552 struct pt_regs *regs)
15dbf27c 6553{
4a32fea9 6554 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 6555 struct perf_event *event;
76e1d904 6556 struct hlist_head *head;
15dbf27c 6557
76e1d904 6558 rcu_read_lock();
b28ab83c 6559 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
6560 if (!head)
6561 goto end;
6562
b67bfe0d 6563 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6fb2915d 6564 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 6565 perf_swevent_event(event, nr, data, regs);
15dbf27c 6566 }
76e1d904
FW
6567end:
6568 rcu_read_unlock();
15dbf27c
PZ
6569}
6570
86038c5e
PZI
6571DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
6572
4ed7c92d 6573int perf_swevent_get_recursion_context(void)
96f6d444 6574{
4a32fea9 6575 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
96f6d444 6576
b28ab83c 6577 return get_recursion_context(swhash->recursion);
96f6d444 6578}
645e8cc0 6579EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 6580
fa9f90be 6581inline void perf_swevent_put_recursion_context(int rctx)
15dbf27c 6582{
4a32fea9 6583 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
927c7a9e 6584
b28ab83c 6585 put_recursion_context(swhash->recursion, rctx);
ce71b9df 6586}
15dbf27c 6587
86038c5e 6588void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 6589{
a4234bfc 6590 struct perf_sample_data data;
4ed7c92d 6591
86038c5e 6592 if (WARN_ON_ONCE(!regs))
4ed7c92d 6593 return;
a4234bfc 6594
fd0d000b 6595 perf_sample_data_init(&data, addr, 0);
a8b0ca17 6596 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
86038c5e
PZI
6597}
6598
6599void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
6600{
6601 int rctx;
6602
6603 preempt_disable_notrace();
6604 rctx = perf_swevent_get_recursion_context();
6605 if (unlikely(rctx < 0))
6606 goto fail;
6607
6608 ___perf_sw_event(event_id, nr, regs, addr);
4ed7c92d
PZ
6609
6610 perf_swevent_put_recursion_context(rctx);
86038c5e 6611fail:
1c024eca 6612 preempt_enable_notrace();
b8e83514
PZ
6613}
6614
cdd6c482 6615static void perf_swevent_read(struct perf_event *event)
15dbf27c 6616{
15dbf27c
PZ
6617}
6618
a4eaf7f1 6619static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 6620{
4a32fea9 6621 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 6622 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
6623 struct hlist_head *head;
6624
6c7e550f 6625 if (is_sampling_event(event)) {
7b4b6658 6626 hwc->last_period = hwc->sample_period;
cdd6c482 6627 perf_swevent_set_period(event);
7b4b6658 6628 }
76e1d904 6629
a4eaf7f1
PZ
6630 hwc->state = !(flags & PERF_EF_START);
6631
b28ab83c 6632 head = find_swevent_head(swhash, event);
39af6b16
JO
6633 if (!head) {
6634 /*
6635 * We can race with cpu hotplug code. Do not
6636 * WARN if the cpu just got unplugged.
6637 */
6638 WARN_ON_ONCE(swhash->online);
76e1d904 6639 return -EINVAL;
39af6b16 6640 }
76e1d904
FW
6641
6642 hlist_add_head_rcu(&event->hlist_entry, head);
6a694a60 6643 perf_event_update_userpage(event);
76e1d904 6644
15dbf27c
PZ
6645 return 0;
6646}
6647
a4eaf7f1 6648static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 6649{
76e1d904 6650 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
6651}
6652
a4eaf7f1 6653static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 6654{
a4eaf7f1 6655 event->hw.state = 0;
d6d020e9 6656}
aa9c4c0f 6657
a4eaf7f1 6658static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 6659{
a4eaf7f1 6660 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
6661}
6662
49f135ed
FW
6663/* Deref the hlist from the update side */
6664static inline struct swevent_hlist *
b28ab83c 6665swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 6666{
b28ab83c
PZ
6667 return rcu_dereference_protected(swhash->swevent_hlist,
6668 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
6669}
6670
b28ab83c 6671static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 6672{
b28ab83c 6673 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 6674
49f135ed 6675 if (!hlist)
76e1d904
FW
6676 return;
6677
70691d4a 6678 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
fa4bbc4c 6679 kfree_rcu(hlist, rcu_head);
76e1d904
FW
6680}
6681
6682static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
6683{
b28ab83c 6684 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 6685
b28ab83c 6686 mutex_lock(&swhash->hlist_mutex);
76e1d904 6687
b28ab83c
PZ
6688 if (!--swhash->hlist_refcount)
6689 swevent_hlist_release(swhash);
76e1d904 6690
b28ab83c 6691 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
6692}
6693
6694static void swevent_hlist_put(struct perf_event *event)
6695{
6696 int cpu;
6697
76e1d904
FW
6698 for_each_possible_cpu(cpu)
6699 swevent_hlist_put_cpu(event, cpu);
6700}
6701
6702static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
6703{
b28ab83c 6704 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
6705 int err = 0;
6706
b28ab83c 6707 mutex_lock(&swhash->hlist_mutex);
76e1d904 6708
b28ab83c 6709 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d904
FW
6710 struct swevent_hlist *hlist;
6711
6712 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
6713 if (!hlist) {
6714 err = -ENOMEM;
6715 goto exit;
6716 }
b28ab83c 6717 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 6718 }
b28ab83c 6719 swhash->hlist_refcount++;
9ed6060d 6720exit:
b28ab83c 6721 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
6722
6723 return err;
6724}
6725
6726static int swevent_hlist_get(struct perf_event *event)
6727{
6728 int err;
6729 int cpu, failed_cpu;
6730
76e1d904
FW
6731 get_online_cpus();
6732 for_each_possible_cpu(cpu) {
6733 err = swevent_hlist_get_cpu(event, cpu);
6734 if (err) {
6735 failed_cpu = cpu;
6736 goto fail;
6737 }
6738 }
6739 put_online_cpus();
6740
6741 return 0;
9ed6060d 6742fail:
76e1d904
FW
6743 for_each_possible_cpu(cpu) {
6744 if (cpu == failed_cpu)
6745 break;
6746 swevent_hlist_put_cpu(event, cpu);
6747 }
6748
6749 put_online_cpus();
6750 return err;
6751}
6752
c5905afb 6753struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 6754
b0a873eb
PZ
6755static void sw_perf_event_destroy(struct perf_event *event)
6756{
6757 u64 event_id = event->attr.config;
95476b64 6758
b0a873eb
PZ
6759 WARN_ON(event->parent);
6760
c5905afb 6761 static_key_slow_dec(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
6762 swevent_hlist_put(event);
6763}
6764
6765static int perf_swevent_init(struct perf_event *event)
6766{
8176cced 6767 u64 event_id = event->attr.config;
b0a873eb
PZ
6768
6769 if (event->attr.type != PERF_TYPE_SOFTWARE)
6770 return -ENOENT;
6771
2481c5fa
SE
6772 /*
6773 * no branch sampling for software events
6774 */
6775 if (has_branch_stack(event))
6776 return -EOPNOTSUPP;
6777
b0a873eb
PZ
6778 switch (event_id) {
6779 case PERF_COUNT_SW_CPU_CLOCK:
6780 case PERF_COUNT_SW_TASK_CLOCK:
6781 return -ENOENT;
6782
6783 default:
6784 break;
6785 }
6786
ce677831 6787 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
6788 return -ENOENT;
6789
6790 if (!event->parent) {
6791 int err;
6792
6793 err = swevent_hlist_get(event);
6794 if (err)
6795 return err;
6796
c5905afb 6797 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
6798 event->destroy = sw_perf_event_destroy;
6799 }
6800
6801 return 0;
6802}
6803
6804static struct pmu perf_swevent = {
89a1e187 6805 .task_ctx_nr = perf_sw_context,
95476b64 6806
34f43927
PZ
6807 .capabilities = PERF_PMU_CAP_NO_NMI,
6808
b0a873eb 6809 .event_init = perf_swevent_init,
a4eaf7f1
PZ
6810 .add = perf_swevent_add,
6811 .del = perf_swevent_del,
6812 .start = perf_swevent_start,
6813 .stop = perf_swevent_stop,
1c024eca 6814 .read = perf_swevent_read,
1c024eca
PZ
6815};
6816
b0a873eb
PZ
6817#ifdef CONFIG_EVENT_TRACING
6818
1c024eca
PZ
6819static int perf_tp_filter_match(struct perf_event *event,
6820 struct perf_sample_data *data)
6821{
6822 void *record = data->raw->data;
6823
6824 if (likely(!event->filter) || filter_match_preds(event->filter, record))
6825 return 1;
6826 return 0;
6827}
6828
6829static int perf_tp_event_match(struct perf_event *event,
6830 struct perf_sample_data *data,
6831 struct pt_regs *regs)
6832{
a0f7d0f7
FW
6833 if (event->hw.state & PERF_HES_STOPPED)
6834 return 0;
580d607c
PZ
6835 /*
6836 * All tracepoints are from kernel-space.
6837 */
6838 if (event->attr.exclude_kernel)
1c024eca
PZ
6839 return 0;
6840
6841 if (!perf_tp_filter_match(event, data))
6842 return 0;
6843
6844 return 1;
6845}
6846
6847void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
e6dab5ff
AV
6848 struct pt_regs *regs, struct hlist_head *head, int rctx,
6849 struct task_struct *task)
95476b64
FW
6850{
6851 struct perf_sample_data data;
1c024eca 6852 struct perf_event *event;
1c024eca 6853
95476b64
FW
6854 struct perf_raw_record raw = {
6855 .size = entry_size,
6856 .data = record,
6857 };
6858
fd0d000b 6859 perf_sample_data_init(&data, addr, 0);
95476b64
FW
6860 data.raw = &raw;
6861
b67bfe0d 6862 hlist_for_each_entry_rcu(event, head, hlist_entry) {
1c024eca 6863 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 6864 perf_swevent_event(event, count, &data, regs);
4f41c013 6865 }
ecc55f84 6866
e6dab5ff
AV
6867 /*
6868 * If we got specified a target task, also iterate its context and
6869 * deliver this event there too.
6870 */
6871 if (task && task != current) {
6872 struct perf_event_context *ctx;
6873 struct trace_entry *entry = record;
6874
6875 rcu_read_lock();
6876 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
6877 if (!ctx)
6878 goto unlock;
6879
6880 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
6881 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6882 continue;
6883 if (event->attr.config != entry->type)
6884 continue;
6885 if (perf_tp_event_match(event, &data, regs))
6886 perf_swevent_event(event, count, &data, regs);
6887 }
6888unlock:
6889 rcu_read_unlock();
6890 }
6891
ecc55f84 6892 perf_swevent_put_recursion_context(rctx);
95476b64
FW
6893}
6894EXPORT_SYMBOL_GPL(perf_tp_event);
6895
cdd6c482 6896static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 6897{
1c024eca 6898 perf_trace_destroy(event);
e077df4f
PZ
6899}
6900
b0a873eb 6901static int perf_tp_event_init(struct perf_event *event)
e077df4f 6902{
76e1d904
FW
6903 int err;
6904
b0a873eb
PZ
6905 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6906 return -ENOENT;
6907
2481c5fa
SE
6908 /*
6909 * no branch sampling for tracepoint events
6910 */
6911 if (has_branch_stack(event))
6912 return -EOPNOTSUPP;
6913
1c024eca
PZ
6914 err = perf_trace_init(event);
6915 if (err)
b0a873eb 6916 return err;
e077df4f 6917
cdd6c482 6918 event->destroy = tp_perf_event_destroy;
e077df4f 6919
b0a873eb
PZ
6920 return 0;
6921}
6922
6923static struct pmu perf_tracepoint = {
89a1e187
PZ
6924 .task_ctx_nr = perf_sw_context,
6925
b0a873eb 6926 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
6927 .add = perf_trace_add,
6928 .del = perf_trace_del,
6929 .start = perf_swevent_start,
6930 .stop = perf_swevent_stop,
b0a873eb 6931 .read = perf_swevent_read,
b0a873eb
PZ
6932};
6933
6934static inline void perf_tp_register(void)
6935{
2e80a82a 6936 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f 6937}
6fb2915d
LZ
6938
6939static int perf_event_set_filter(struct perf_event *event, void __user *arg)
6940{
6941 char *filter_str;
6942 int ret;
6943
6944 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6945 return -EINVAL;
6946
6947 filter_str = strndup_user(arg, PAGE_SIZE);
6948 if (IS_ERR(filter_str))
6949 return PTR_ERR(filter_str);
6950
6951 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
6952
6953 kfree(filter_str);
6954 return ret;
6955}
6956
6957static void perf_event_free_filter(struct perf_event *event)
6958{
6959 ftrace_profile_free_filter(event);
6960}
6961
2541517c
AS
6962static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
6963{
6964 struct bpf_prog *prog;
6965
6966 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6967 return -EINVAL;
6968
6969 if (event->tp_event->prog)
6970 return -EEXIST;
6971
04a22fae
WN
6972 if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE))
6973 /* bpf programs can only be attached to u/kprobes */
2541517c
AS
6974 return -EINVAL;
6975
6976 prog = bpf_prog_get(prog_fd);
6977 if (IS_ERR(prog))
6978 return PTR_ERR(prog);
6979
6c373ca8 6980 if (prog->type != BPF_PROG_TYPE_KPROBE) {
2541517c
AS
6981 /* valid fd, but invalid bpf program type */
6982 bpf_prog_put(prog);
6983 return -EINVAL;
6984 }
6985
6986 event->tp_event->prog = prog;
6987
6988 return 0;
6989}
6990
6991static void perf_event_free_bpf_prog(struct perf_event *event)
6992{
6993 struct bpf_prog *prog;
6994
6995 if (!event->tp_event)
6996 return;
6997
6998 prog = event->tp_event->prog;
6999 if (prog) {
7000 event->tp_event->prog = NULL;
7001 bpf_prog_put(prog);
7002 }
7003}
7004
e077df4f 7005#else
6fb2915d 7006
b0a873eb 7007static inline void perf_tp_register(void)
e077df4f 7008{
e077df4f 7009}
6fb2915d
LZ
7010
7011static int perf_event_set_filter(struct perf_event *event, void __user *arg)
7012{
7013 return -ENOENT;
7014}
7015
7016static void perf_event_free_filter(struct perf_event *event)
7017{
7018}
7019
2541517c
AS
7020static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7021{
7022 return -ENOENT;
7023}
7024
7025static void perf_event_free_bpf_prog(struct perf_event *event)
7026{
7027}
07b139c8 7028#endif /* CONFIG_EVENT_TRACING */
e077df4f 7029
24f1e32c 7030#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 7031void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 7032{
f5ffe02e
FW
7033 struct perf_sample_data sample;
7034 struct pt_regs *regs = data;
7035
fd0d000b 7036 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 7037
a4eaf7f1 7038 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 7039 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
7040}
7041#endif
7042
b0a873eb
PZ
7043/*
7044 * hrtimer based swevent callback
7045 */
f29ac756 7046
b0a873eb 7047static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 7048{
b0a873eb
PZ
7049 enum hrtimer_restart ret = HRTIMER_RESTART;
7050 struct perf_sample_data data;
7051 struct pt_regs *regs;
7052 struct perf_event *event;
7053 u64 period;
f29ac756 7054
b0a873eb 7055 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
7056
7057 if (event->state != PERF_EVENT_STATE_ACTIVE)
7058 return HRTIMER_NORESTART;
7059
b0a873eb 7060 event->pmu->read(event);
f344011c 7061
fd0d000b 7062 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
7063 regs = get_irq_regs();
7064
7065 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 7066 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 7067 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
7068 ret = HRTIMER_NORESTART;
7069 }
24f1e32c 7070
b0a873eb
PZ
7071 period = max_t(u64, 10000, event->hw.sample_period);
7072 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 7073
b0a873eb 7074 return ret;
f29ac756
PZ
7075}
7076
b0a873eb 7077static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 7078{
b0a873eb 7079 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
7080 s64 period;
7081
7082 if (!is_sampling_event(event))
7083 return;
f5ffe02e 7084
5d508e82
FBH
7085 period = local64_read(&hwc->period_left);
7086 if (period) {
7087 if (period < 0)
7088 period = 10000;
fa407f35 7089
5d508e82
FBH
7090 local64_set(&hwc->period_left, 0);
7091 } else {
7092 period = max_t(u64, 10000, hwc->sample_period);
7093 }
3497d206
TG
7094 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
7095 HRTIMER_MODE_REL_PINNED);
24f1e32c 7096}
b0a873eb
PZ
7097
7098static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 7099{
b0a873eb
PZ
7100 struct hw_perf_event *hwc = &event->hw;
7101
6c7e550f 7102 if (is_sampling_event(event)) {
b0a873eb 7103 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 7104 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
7105
7106 hrtimer_cancel(&hwc->hrtimer);
7107 }
24f1e32c
FW
7108}
7109
ba3dd36c
PZ
7110static void perf_swevent_init_hrtimer(struct perf_event *event)
7111{
7112 struct hw_perf_event *hwc = &event->hw;
7113
7114 if (!is_sampling_event(event))
7115 return;
7116
7117 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7118 hwc->hrtimer.function = perf_swevent_hrtimer;
7119
7120 /*
7121 * Since hrtimers have a fixed rate, we can do a static freq->period
7122 * mapping and avoid the whole period adjust feedback stuff.
7123 */
7124 if (event->attr.freq) {
7125 long freq = event->attr.sample_freq;
7126
7127 event->attr.sample_period = NSEC_PER_SEC / freq;
7128 hwc->sample_period = event->attr.sample_period;
7129 local64_set(&hwc->period_left, hwc->sample_period);
778141e3 7130 hwc->last_period = hwc->sample_period;
ba3dd36c
PZ
7131 event->attr.freq = 0;
7132 }
7133}
7134
b0a873eb
PZ
7135/*
7136 * Software event: cpu wall time clock
7137 */
7138
7139static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 7140{
b0a873eb
PZ
7141 s64 prev;
7142 u64 now;
7143
a4eaf7f1 7144 now = local_clock();
b0a873eb
PZ
7145 prev = local64_xchg(&event->hw.prev_count, now);
7146 local64_add(now - prev, &event->count);
24f1e32c 7147}
24f1e32c 7148
a4eaf7f1 7149static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 7150{
a4eaf7f1 7151 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 7152 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
7153}
7154
a4eaf7f1 7155static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 7156{
b0a873eb
PZ
7157 perf_swevent_cancel_hrtimer(event);
7158 cpu_clock_event_update(event);
7159}
f29ac756 7160
a4eaf7f1
PZ
7161static int cpu_clock_event_add(struct perf_event *event, int flags)
7162{
7163 if (flags & PERF_EF_START)
7164 cpu_clock_event_start(event, flags);
6a694a60 7165 perf_event_update_userpage(event);
a4eaf7f1
PZ
7166
7167 return 0;
7168}
7169
7170static void cpu_clock_event_del(struct perf_event *event, int flags)
7171{
7172 cpu_clock_event_stop(event, flags);
7173}
7174
b0a873eb
PZ
7175static void cpu_clock_event_read(struct perf_event *event)
7176{
7177 cpu_clock_event_update(event);
7178}
f344011c 7179
b0a873eb
PZ
7180static int cpu_clock_event_init(struct perf_event *event)
7181{
7182 if (event->attr.type != PERF_TYPE_SOFTWARE)
7183 return -ENOENT;
7184
7185 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
7186 return -ENOENT;
7187
2481c5fa
SE
7188 /*
7189 * no branch sampling for software events
7190 */
7191 if (has_branch_stack(event))
7192 return -EOPNOTSUPP;
7193
ba3dd36c
PZ
7194 perf_swevent_init_hrtimer(event);
7195
b0a873eb 7196 return 0;
f29ac756
PZ
7197}
7198
b0a873eb 7199static struct pmu perf_cpu_clock = {
89a1e187
PZ
7200 .task_ctx_nr = perf_sw_context,
7201
34f43927
PZ
7202 .capabilities = PERF_PMU_CAP_NO_NMI,
7203
b0a873eb 7204 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
7205 .add = cpu_clock_event_add,
7206 .del = cpu_clock_event_del,
7207 .start = cpu_clock_event_start,
7208 .stop = cpu_clock_event_stop,
b0a873eb
PZ
7209 .read = cpu_clock_event_read,
7210};
7211
7212/*
7213 * Software event: task time clock
7214 */
7215
7216static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 7217{
b0a873eb
PZ
7218 u64 prev;
7219 s64 delta;
5c92d124 7220
b0a873eb
PZ
7221 prev = local64_xchg(&event->hw.prev_count, now);
7222 delta = now - prev;
7223 local64_add(delta, &event->count);
7224}
5c92d124 7225
a4eaf7f1 7226static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 7227{
a4eaf7f1 7228 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 7229 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
7230}
7231
a4eaf7f1 7232static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
7233{
7234 perf_swevent_cancel_hrtimer(event);
7235 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
7236}
7237
7238static int task_clock_event_add(struct perf_event *event, int flags)
7239{
7240 if (flags & PERF_EF_START)
7241 task_clock_event_start(event, flags);
6a694a60 7242 perf_event_update_userpage(event);
b0a873eb 7243
a4eaf7f1
PZ
7244 return 0;
7245}
7246
7247static void task_clock_event_del(struct perf_event *event, int flags)
7248{
7249 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
7250}
7251
7252static void task_clock_event_read(struct perf_event *event)
7253{
768a06e2
PZ
7254 u64 now = perf_clock();
7255 u64 delta = now - event->ctx->timestamp;
7256 u64 time = event->ctx->time + delta;
b0a873eb
PZ
7257
7258 task_clock_event_update(event, time);
7259}
7260
7261static int task_clock_event_init(struct perf_event *event)
6fb2915d 7262{
b0a873eb
PZ
7263 if (event->attr.type != PERF_TYPE_SOFTWARE)
7264 return -ENOENT;
7265
7266 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
7267 return -ENOENT;
7268
2481c5fa
SE
7269 /*
7270 * no branch sampling for software events
7271 */
7272 if (has_branch_stack(event))
7273 return -EOPNOTSUPP;
7274
ba3dd36c
PZ
7275 perf_swevent_init_hrtimer(event);
7276
b0a873eb 7277 return 0;
6fb2915d
LZ
7278}
7279
b0a873eb 7280static struct pmu perf_task_clock = {
89a1e187
PZ
7281 .task_ctx_nr = perf_sw_context,
7282
34f43927
PZ
7283 .capabilities = PERF_PMU_CAP_NO_NMI,
7284
b0a873eb 7285 .event_init = task_clock_event_init,
a4eaf7f1
PZ
7286 .add = task_clock_event_add,
7287 .del = task_clock_event_del,
7288 .start = task_clock_event_start,
7289 .stop = task_clock_event_stop,
b0a873eb
PZ
7290 .read = task_clock_event_read,
7291};
6fb2915d 7292
ad5133b7 7293static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 7294{
e077df4f 7295}
6fb2915d 7296
fbbe0701
SB
7297static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
7298{
7299}
7300
ad5133b7 7301static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 7302{
ad5133b7 7303 return 0;
6fb2915d
LZ
7304}
7305
fbbe0701
SB
7306DEFINE_PER_CPU(unsigned int, nop_txn_flags);
7307
7308static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
6fb2915d 7309{
fbbe0701
SB
7310 __this_cpu_write(nop_txn_flags, flags);
7311
7312 if (flags & ~PERF_PMU_TXN_ADD)
7313 return;
7314
ad5133b7 7315 perf_pmu_disable(pmu);
6fb2915d
LZ
7316}
7317
ad5133b7
PZ
7318static int perf_pmu_commit_txn(struct pmu *pmu)
7319{
fbbe0701
SB
7320 unsigned int flags = __this_cpu_read(nop_txn_flags);
7321
7322 __this_cpu_write(nop_txn_flags, 0);
7323
7324 if (flags & ~PERF_PMU_TXN_ADD)
7325 return 0;
7326
ad5133b7
PZ
7327 perf_pmu_enable(pmu);
7328 return 0;
7329}
e077df4f 7330
ad5133b7 7331static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 7332{
fbbe0701
SB
7333 unsigned int flags = __this_cpu_read(nop_txn_flags);
7334
7335 __this_cpu_write(nop_txn_flags, 0);
7336
7337 if (flags & ~PERF_PMU_TXN_ADD)
7338 return;
7339
ad5133b7 7340 perf_pmu_enable(pmu);
24f1e32c
FW
7341}
7342
35edc2a5
PZ
7343static int perf_event_idx_default(struct perf_event *event)
7344{
c719f560 7345 return 0;
35edc2a5
PZ
7346}
7347
8dc85d54
PZ
7348/*
7349 * Ensures all contexts with the same task_ctx_nr have the same
7350 * pmu_cpu_context too.
7351 */
9e317041 7352static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
24f1e32c 7353{
8dc85d54 7354 struct pmu *pmu;
b326e956 7355
8dc85d54
PZ
7356 if (ctxn < 0)
7357 return NULL;
24f1e32c 7358
8dc85d54
PZ
7359 list_for_each_entry(pmu, &pmus, entry) {
7360 if (pmu->task_ctx_nr == ctxn)
7361 return pmu->pmu_cpu_context;
7362 }
24f1e32c 7363
8dc85d54 7364 return NULL;
24f1e32c
FW
7365}
7366
51676957 7367static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c 7368{
51676957
PZ
7369 int cpu;
7370
7371 for_each_possible_cpu(cpu) {
7372 struct perf_cpu_context *cpuctx;
7373
7374 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7375
3f1f3320
PZ
7376 if (cpuctx->unique_pmu == old_pmu)
7377 cpuctx->unique_pmu = pmu;
51676957
PZ
7378 }
7379}
7380
7381static void free_pmu_context(struct pmu *pmu)
7382{
7383 struct pmu *i;
f5ffe02e 7384
8dc85d54 7385 mutex_lock(&pmus_lock);
0475f9ea 7386 /*
8dc85d54 7387 * Like a real lame refcount.
0475f9ea 7388 */
51676957
PZ
7389 list_for_each_entry(i, &pmus, entry) {
7390 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
7391 update_pmu_context(i, pmu);
8dc85d54 7392 goto out;
51676957 7393 }
8dc85d54 7394 }
d6d020e9 7395
51676957 7396 free_percpu(pmu->pmu_cpu_context);
8dc85d54
PZ
7397out:
7398 mutex_unlock(&pmus_lock);
24f1e32c 7399}
2e80a82a 7400static struct idr pmu_idr;
d6d020e9 7401
abe43400
PZ
7402static ssize_t
7403type_show(struct device *dev, struct device_attribute *attr, char *page)
7404{
7405 struct pmu *pmu = dev_get_drvdata(dev);
7406
7407 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
7408}
90826ca7 7409static DEVICE_ATTR_RO(type);
abe43400 7410
62b85639
SE
7411static ssize_t
7412perf_event_mux_interval_ms_show(struct device *dev,
7413 struct device_attribute *attr,
7414 char *page)
7415{
7416 struct pmu *pmu = dev_get_drvdata(dev);
7417
7418 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
7419}
7420
272325c4
PZ
7421static DEFINE_MUTEX(mux_interval_mutex);
7422
62b85639
SE
7423static ssize_t
7424perf_event_mux_interval_ms_store(struct device *dev,
7425 struct device_attribute *attr,
7426 const char *buf, size_t count)
7427{
7428 struct pmu *pmu = dev_get_drvdata(dev);
7429 int timer, cpu, ret;
7430
7431 ret = kstrtoint(buf, 0, &timer);
7432 if (ret)
7433 return ret;
7434
7435 if (timer < 1)
7436 return -EINVAL;
7437
7438 /* same value, noting to do */
7439 if (timer == pmu->hrtimer_interval_ms)
7440 return count;
7441
272325c4 7442 mutex_lock(&mux_interval_mutex);
62b85639
SE
7443 pmu->hrtimer_interval_ms = timer;
7444
7445 /* update all cpuctx for this PMU */
272325c4
PZ
7446 get_online_cpus();
7447 for_each_online_cpu(cpu) {
62b85639
SE
7448 struct perf_cpu_context *cpuctx;
7449 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7450 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
7451
272325c4
PZ
7452 cpu_function_call(cpu,
7453 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
62b85639 7454 }
272325c4
PZ
7455 put_online_cpus();
7456 mutex_unlock(&mux_interval_mutex);
62b85639
SE
7457
7458 return count;
7459}
90826ca7 7460static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
62b85639 7461
90826ca7
GKH
7462static struct attribute *pmu_dev_attrs[] = {
7463 &dev_attr_type.attr,
7464 &dev_attr_perf_event_mux_interval_ms.attr,
7465 NULL,
abe43400 7466};
90826ca7 7467ATTRIBUTE_GROUPS(pmu_dev);
abe43400
PZ
7468
7469static int pmu_bus_running;
7470static struct bus_type pmu_bus = {
7471 .name = "event_source",
90826ca7 7472 .dev_groups = pmu_dev_groups,
abe43400
PZ
7473};
7474
7475static void pmu_dev_release(struct device *dev)
7476{
7477 kfree(dev);
7478}
7479
7480static int pmu_dev_alloc(struct pmu *pmu)
7481{
7482 int ret = -ENOMEM;
7483
7484 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
7485 if (!pmu->dev)
7486 goto out;
7487
0c9d42ed 7488 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
7489 device_initialize(pmu->dev);
7490 ret = dev_set_name(pmu->dev, "%s", pmu->name);
7491 if (ret)
7492 goto free_dev;
7493
7494 dev_set_drvdata(pmu->dev, pmu);
7495 pmu->dev->bus = &pmu_bus;
7496 pmu->dev->release = pmu_dev_release;
7497 ret = device_add(pmu->dev);
7498 if (ret)
7499 goto free_dev;
7500
7501out:
7502 return ret;
7503
7504free_dev:
7505 put_device(pmu->dev);
7506 goto out;
7507}
7508
547e9fd7 7509static struct lock_class_key cpuctx_mutex;
facc4307 7510static struct lock_class_key cpuctx_lock;
547e9fd7 7511
03d8e80b 7512int perf_pmu_register(struct pmu *pmu, const char *name, int type)
24f1e32c 7513{
108b02cf 7514 int cpu, ret;
24f1e32c 7515
b0a873eb 7516 mutex_lock(&pmus_lock);
33696fc0
PZ
7517 ret = -ENOMEM;
7518 pmu->pmu_disable_count = alloc_percpu(int);
7519 if (!pmu->pmu_disable_count)
7520 goto unlock;
f29ac756 7521
2e80a82a
PZ
7522 pmu->type = -1;
7523 if (!name)
7524 goto skip_type;
7525 pmu->name = name;
7526
7527 if (type < 0) {
0e9c3be2
TH
7528 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
7529 if (type < 0) {
7530 ret = type;
2e80a82a
PZ
7531 goto free_pdc;
7532 }
7533 }
7534 pmu->type = type;
7535
abe43400
PZ
7536 if (pmu_bus_running) {
7537 ret = pmu_dev_alloc(pmu);
7538 if (ret)
7539 goto free_idr;
7540 }
7541
2e80a82a 7542skip_type:
8dc85d54
PZ
7543 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
7544 if (pmu->pmu_cpu_context)
7545 goto got_cpu_context;
f29ac756 7546
c4814202 7547 ret = -ENOMEM;
108b02cf
PZ
7548 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
7549 if (!pmu->pmu_cpu_context)
abe43400 7550 goto free_dev;
f344011c 7551
108b02cf
PZ
7552 for_each_possible_cpu(cpu) {
7553 struct perf_cpu_context *cpuctx;
7554
7555 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 7556 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 7557 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 7558 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
108b02cf 7559 cpuctx->ctx.pmu = pmu;
9e630205 7560
272325c4 7561 __perf_mux_hrtimer_init(cpuctx, cpu);
9e630205 7562
3f1f3320 7563 cpuctx->unique_pmu = pmu;
108b02cf 7564 }
76e1d904 7565
8dc85d54 7566got_cpu_context:
ad5133b7
PZ
7567 if (!pmu->start_txn) {
7568 if (pmu->pmu_enable) {
7569 /*
7570 * If we have pmu_enable/pmu_disable calls, install
7571 * transaction stubs that use that to try and batch
7572 * hardware accesses.
7573 */
7574 pmu->start_txn = perf_pmu_start_txn;
7575 pmu->commit_txn = perf_pmu_commit_txn;
7576 pmu->cancel_txn = perf_pmu_cancel_txn;
7577 } else {
fbbe0701 7578 pmu->start_txn = perf_pmu_nop_txn;
ad5133b7
PZ
7579 pmu->commit_txn = perf_pmu_nop_int;
7580 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 7581 }
5c92d124 7582 }
15dbf27c 7583
ad5133b7
PZ
7584 if (!pmu->pmu_enable) {
7585 pmu->pmu_enable = perf_pmu_nop_void;
7586 pmu->pmu_disable = perf_pmu_nop_void;
7587 }
7588
35edc2a5
PZ
7589 if (!pmu->event_idx)
7590 pmu->event_idx = perf_event_idx_default;
7591
b0a873eb 7592 list_add_rcu(&pmu->entry, &pmus);
bed5b25a 7593 atomic_set(&pmu->exclusive_cnt, 0);
33696fc0
PZ
7594 ret = 0;
7595unlock:
b0a873eb
PZ
7596 mutex_unlock(&pmus_lock);
7597
33696fc0 7598 return ret;
108b02cf 7599
abe43400
PZ
7600free_dev:
7601 device_del(pmu->dev);
7602 put_device(pmu->dev);
7603
2e80a82a
PZ
7604free_idr:
7605 if (pmu->type >= PERF_TYPE_MAX)
7606 idr_remove(&pmu_idr, pmu->type);
7607
108b02cf
PZ
7608free_pdc:
7609 free_percpu(pmu->pmu_disable_count);
7610 goto unlock;
f29ac756 7611}
c464c76e 7612EXPORT_SYMBOL_GPL(perf_pmu_register);
f29ac756 7613
b0a873eb 7614void perf_pmu_unregister(struct pmu *pmu)
5c92d124 7615{
b0a873eb
PZ
7616 mutex_lock(&pmus_lock);
7617 list_del_rcu(&pmu->entry);
7618 mutex_unlock(&pmus_lock);
5c92d124 7619
0475f9ea 7620 /*
cde8e884
PZ
7621 * We dereference the pmu list under both SRCU and regular RCU, so
7622 * synchronize against both of those.
0475f9ea 7623 */
b0a873eb 7624 synchronize_srcu(&pmus_srcu);
cde8e884 7625 synchronize_rcu();
d6d020e9 7626
33696fc0 7627 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
7628 if (pmu->type >= PERF_TYPE_MAX)
7629 idr_remove(&pmu_idr, pmu->type);
abe43400
PZ
7630 device_del(pmu->dev);
7631 put_device(pmu->dev);
51676957 7632 free_pmu_context(pmu);
b0a873eb 7633}
c464c76e 7634EXPORT_SYMBOL_GPL(perf_pmu_unregister);
d6d020e9 7635
cc34b98b
MR
7636static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
7637{
ccd41c86 7638 struct perf_event_context *ctx = NULL;
cc34b98b
MR
7639 int ret;
7640
7641 if (!try_module_get(pmu->module))
7642 return -ENODEV;
ccd41c86
PZ
7643
7644 if (event->group_leader != event) {
8b10c5e2
PZ
7645 /*
7646 * This ctx->mutex can nest when we're called through
7647 * inheritance. See the perf_event_ctx_lock_nested() comment.
7648 */
7649 ctx = perf_event_ctx_lock_nested(event->group_leader,
7650 SINGLE_DEPTH_NESTING);
ccd41c86
PZ
7651 BUG_ON(!ctx);
7652 }
7653
cc34b98b
MR
7654 event->pmu = pmu;
7655 ret = pmu->event_init(event);
ccd41c86
PZ
7656
7657 if (ctx)
7658 perf_event_ctx_unlock(event->group_leader, ctx);
7659
cc34b98b
MR
7660 if (ret)
7661 module_put(pmu->module);
7662
7663 return ret;
7664}
7665
b0a873eb
PZ
7666struct pmu *perf_init_event(struct perf_event *event)
7667{
7668 struct pmu *pmu = NULL;
7669 int idx;
940c5b29 7670 int ret;
b0a873eb
PZ
7671
7672 idx = srcu_read_lock(&pmus_srcu);
2e80a82a
PZ
7673
7674 rcu_read_lock();
7675 pmu = idr_find(&pmu_idr, event->attr.type);
7676 rcu_read_unlock();
940c5b29 7677 if (pmu) {
cc34b98b 7678 ret = perf_try_init_event(pmu, event);
940c5b29
LM
7679 if (ret)
7680 pmu = ERR_PTR(ret);
2e80a82a 7681 goto unlock;
940c5b29 7682 }
2e80a82a 7683
b0a873eb 7684 list_for_each_entry_rcu(pmu, &pmus, entry) {
cc34b98b 7685 ret = perf_try_init_event(pmu, event);
b0a873eb 7686 if (!ret)
e5f4d339 7687 goto unlock;
76e1d904 7688
b0a873eb
PZ
7689 if (ret != -ENOENT) {
7690 pmu = ERR_PTR(ret);
e5f4d339 7691 goto unlock;
f344011c 7692 }
5c92d124 7693 }
e5f4d339
PZ
7694 pmu = ERR_PTR(-ENOENT);
7695unlock:
b0a873eb 7696 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 7697
4aeb0b42 7698 return pmu;
5c92d124
IM
7699}
7700
4beb31f3
FW
7701static void account_event_cpu(struct perf_event *event, int cpu)
7702{
7703 if (event->parent)
7704 return;
7705
4beb31f3
FW
7706 if (is_cgroup_event(event))
7707 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
7708}
7709
766d6c07
FW
7710static void account_event(struct perf_event *event)
7711{
4beb31f3
FW
7712 if (event->parent)
7713 return;
7714
766d6c07
FW
7715 if (event->attach_state & PERF_ATTACH_TASK)
7716 static_key_slow_inc(&perf_sched_events.key);
7717 if (event->attr.mmap || event->attr.mmap_data)
7718 atomic_inc(&nr_mmap_events);
7719 if (event->attr.comm)
7720 atomic_inc(&nr_comm_events);
7721 if (event->attr.task)
7722 atomic_inc(&nr_task_events);
948b26b6
FW
7723 if (event->attr.freq) {
7724 if (atomic_inc_return(&nr_freq_events) == 1)
7725 tick_nohz_full_kick_all();
7726 }
45ac1403
AH
7727 if (event->attr.context_switch) {
7728 atomic_inc(&nr_switch_events);
7729 static_key_slow_inc(&perf_sched_events.key);
7730 }
4beb31f3 7731 if (has_branch_stack(event))
766d6c07 7732 static_key_slow_inc(&perf_sched_events.key);
4beb31f3 7733 if (is_cgroup_event(event))
766d6c07 7734 static_key_slow_inc(&perf_sched_events.key);
4beb31f3
FW
7735
7736 account_event_cpu(event, event->cpu);
766d6c07
FW
7737}
7738
0793a61d 7739/*
cdd6c482 7740 * Allocate and initialize a event structure
0793a61d 7741 */
cdd6c482 7742static struct perf_event *
c3f00c70 7743perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
7744 struct task_struct *task,
7745 struct perf_event *group_leader,
7746 struct perf_event *parent_event,
4dc0da86 7747 perf_overflow_handler_t overflow_handler,
79dff51e 7748 void *context, int cgroup_fd)
0793a61d 7749{
51b0fe39 7750 struct pmu *pmu;
cdd6c482
IM
7751 struct perf_event *event;
7752 struct hw_perf_event *hwc;
90983b16 7753 long err = -EINVAL;
0793a61d 7754
66832eb4
ON
7755 if ((unsigned)cpu >= nr_cpu_ids) {
7756 if (!task || cpu != -1)
7757 return ERR_PTR(-EINVAL);
7758 }
7759
c3f00c70 7760 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 7761 if (!event)
d5d2bc0d 7762 return ERR_PTR(-ENOMEM);
0793a61d 7763
04289bb9 7764 /*
cdd6c482 7765 * Single events are their own group leaders, with an
04289bb9
IM
7766 * empty sibling list:
7767 */
7768 if (!group_leader)
cdd6c482 7769 group_leader = event;
04289bb9 7770
cdd6c482
IM
7771 mutex_init(&event->child_mutex);
7772 INIT_LIST_HEAD(&event->child_list);
fccc714b 7773
cdd6c482
IM
7774 INIT_LIST_HEAD(&event->group_entry);
7775 INIT_LIST_HEAD(&event->event_entry);
7776 INIT_LIST_HEAD(&event->sibling_list);
10c6db11 7777 INIT_LIST_HEAD(&event->rb_entry);
71ad88ef 7778 INIT_LIST_HEAD(&event->active_entry);
f3ae75de
SE
7779 INIT_HLIST_NODE(&event->hlist_entry);
7780
10c6db11 7781
cdd6c482 7782 init_waitqueue_head(&event->waitq);
e360adbe 7783 init_irq_work(&event->pending, perf_pending_event);
0793a61d 7784
cdd6c482 7785 mutex_init(&event->mmap_mutex);
7b732a75 7786
a6fa941d 7787 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
7788 event->cpu = cpu;
7789 event->attr = *attr;
7790 event->group_leader = group_leader;
7791 event->pmu = NULL;
cdd6c482 7792 event->oncpu = -1;
a96bbc16 7793
cdd6c482 7794 event->parent = parent_event;
b84fbc9f 7795
17cf22c3 7796 event->ns = get_pid_ns(task_active_pid_ns(current));
cdd6c482 7797 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 7798
cdd6c482 7799 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 7800
d580ff86
PZ
7801 if (task) {
7802 event->attach_state = PERF_ATTACH_TASK;
d580ff86 7803 /*
50f16a8b
PZ
7804 * XXX pmu::event_init needs to know what task to account to
7805 * and we cannot use the ctx information because we need the
7806 * pmu before we get a ctx.
d580ff86 7807 */
50f16a8b 7808 event->hw.target = task;
d580ff86
PZ
7809 }
7810
34f43927
PZ
7811 event->clock = &local_clock;
7812 if (parent_event)
7813 event->clock = parent_event->clock;
7814
4dc0da86 7815 if (!overflow_handler && parent_event) {
b326e956 7816 overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
7817 context = parent_event->overflow_handler_context;
7818 }
66832eb4 7819
b326e956 7820 event->overflow_handler = overflow_handler;
4dc0da86 7821 event->overflow_handler_context = context;
97eaf530 7822
0231bb53 7823 perf_event__state_init(event);
a86ed508 7824
4aeb0b42 7825 pmu = NULL;
b8e83514 7826
cdd6c482 7827 hwc = &event->hw;
bd2b5b12 7828 hwc->sample_period = attr->sample_period;
0d48696f 7829 if (attr->freq && attr->sample_freq)
bd2b5b12 7830 hwc->sample_period = 1;
eced1dfc 7831 hwc->last_period = hwc->sample_period;
bd2b5b12 7832
e7850595 7833 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 7834
2023b359 7835 /*
cdd6c482 7836 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b359 7837 */
3dab77fb 7838 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
90983b16 7839 goto err_ns;
a46a2300
YZ
7840
7841 if (!has_branch_stack(event))
7842 event->attr.branch_sample_type = 0;
2023b359 7843
79dff51e
MF
7844 if (cgroup_fd != -1) {
7845 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
7846 if (err)
7847 goto err_ns;
7848 }
7849
b0a873eb 7850 pmu = perf_init_event(event);
4aeb0b42 7851 if (!pmu)
90983b16
FW
7852 goto err_ns;
7853 else if (IS_ERR(pmu)) {
4aeb0b42 7854 err = PTR_ERR(pmu);
90983b16 7855 goto err_ns;
621a01ea 7856 }
d5d2bc0d 7857
bed5b25a
AS
7858 err = exclusive_event_init(event);
7859 if (err)
7860 goto err_pmu;
7861
cdd6c482 7862 if (!event->parent) {
927c7a9e
FW
7863 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
7864 err = get_callchain_buffers();
90983b16 7865 if (err)
bed5b25a 7866 goto err_per_task;
d010b332 7867 }
f344011c 7868 }
9ee318a7 7869
cdd6c482 7870 return event;
90983b16 7871
bed5b25a
AS
7872err_per_task:
7873 exclusive_event_destroy(event);
7874
90983b16
FW
7875err_pmu:
7876 if (event->destroy)
7877 event->destroy(event);
c464c76e 7878 module_put(pmu->module);
90983b16 7879err_ns:
79dff51e
MF
7880 if (is_cgroup_event(event))
7881 perf_detach_cgroup(event);
90983b16
FW
7882 if (event->ns)
7883 put_pid_ns(event->ns);
7884 kfree(event);
7885
7886 return ERR_PTR(err);
0793a61d
TG
7887}
7888
cdd6c482
IM
7889static int perf_copy_attr(struct perf_event_attr __user *uattr,
7890 struct perf_event_attr *attr)
974802ea 7891{
974802ea 7892 u32 size;
cdf8073d 7893 int ret;
974802ea
PZ
7894
7895 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
7896 return -EFAULT;
7897
7898 /*
7899 * zero the full structure, so that a short copy will be nice.
7900 */
7901 memset(attr, 0, sizeof(*attr));
7902
7903 ret = get_user(size, &uattr->size);
7904 if (ret)
7905 return ret;
7906
7907 if (size > PAGE_SIZE) /* silly large */
7908 goto err_size;
7909
7910 if (!size) /* abi compat */
7911 size = PERF_ATTR_SIZE_VER0;
7912
7913 if (size < PERF_ATTR_SIZE_VER0)
7914 goto err_size;
7915
7916 /*
7917 * If we're handed a bigger struct than we know of,
cdf8073d
IS
7918 * ensure all the unknown bits are 0 - i.e. new
7919 * user-space does not rely on any kernel feature
7920 * extensions we dont know about yet.
974802ea
PZ
7921 */
7922 if (size > sizeof(*attr)) {
cdf8073d
IS
7923 unsigned char __user *addr;
7924 unsigned char __user *end;
7925 unsigned char val;
974802ea 7926
cdf8073d
IS
7927 addr = (void __user *)uattr + sizeof(*attr);
7928 end = (void __user *)uattr + size;
974802ea 7929
cdf8073d 7930 for (; addr < end; addr++) {
974802ea
PZ
7931 ret = get_user(val, addr);
7932 if (ret)
7933 return ret;
7934 if (val)
7935 goto err_size;
7936 }
b3e62e35 7937 size = sizeof(*attr);
974802ea
PZ
7938 }
7939
7940 ret = copy_from_user(attr, uattr, size);
7941 if (ret)
7942 return -EFAULT;
7943
cd757645 7944 if (attr->__reserved_1)
974802ea
PZ
7945 return -EINVAL;
7946
7947 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
7948 return -EINVAL;
7949
7950 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
7951 return -EINVAL;
7952
bce38cd5
SE
7953 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
7954 u64 mask = attr->branch_sample_type;
7955
7956 /* only using defined bits */
7957 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
7958 return -EINVAL;
7959
7960 /* at least one branch bit must be set */
7961 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
7962 return -EINVAL;
7963
bce38cd5
SE
7964 /* propagate priv level, when not set for branch */
7965 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
7966
7967 /* exclude_kernel checked on syscall entry */
7968 if (!attr->exclude_kernel)
7969 mask |= PERF_SAMPLE_BRANCH_KERNEL;
7970
7971 if (!attr->exclude_user)
7972 mask |= PERF_SAMPLE_BRANCH_USER;
7973
7974 if (!attr->exclude_hv)
7975 mask |= PERF_SAMPLE_BRANCH_HV;
7976 /*
7977 * adjust user setting (for HW filter setup)
7978 */
7979 attr->branch_sample_type = mask;
7980 }
e712209a
SE
7981 /* privileged levels capture (kernel, hv): check permissions */
7982 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
2b923c8f
SE
7983 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
7984 return -EACCES;
bce38cd5 7985 }
4018994f 7986
c5ebcedb 7987 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
4018994f 7988 ret = perf_reg_validate(attr->sample_regs_user);
c5ebcedb
JO
7989 if (ret)
7990 return ret;
7991 }
7992
7993 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
7994 if (!arch_perf_have_user_stack_dump())
7995 return -ENOSYS;
7996
7997 /*
7998 * We have __u32 type for the size, but so far
7999 * we can only use __u16 as maximum due to the
8000 * __u16 sample size limit.
8001 */
8002 if (attr->sample_stack_user >= USHRT_MAX)
8003 ret = -EINVAL;
8004 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
8005 ret = -EINVAL;
8006 }
4018994f 8007
60e2364e
SE
8008 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
8009 ret = perf_reg_validate(attr->sample_regs_intr);
974802ea
PZ
8010out:
8011 return ret;
8012
8013err_size:
8014 put_user(sizeof(*attr), &uattr->size);
8015 ret = -E2BIG;
8016 goto out;
8017}
8018
ac9721f3
PZ
8019static int
8020perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 8021{
b69cf536 8022 struct ring_buffer *rb = NULL;
a4be7c27
PZ
8023 int ret = -EINVAL;
8024
ac9721f3 8025 if (!output_event)
a4be7c27
PZ
8026 goto set;
8027
ac9721f3
PZ
8028 /* don't allow circular references */
8029 if (event == output_event)
a4be7c27
PZ
8030 goto out;
8031
0f139300
PZ
8032 /*
8033 * Don't allow cross-cpu buffers
8034 */
8035 if (output_event->cpu != event->cpu)
8036 goto out;
8037
8038 /*
76369139 8039 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
8040 */
8041 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
8042 goto out;
8043
34f43927
PZ
8044 /*
8045 * Mixing clocks in the same buffer is trouble you don't need.
8046 */
8047 if (output_event->clock != event->clock)
8048 goto out;
8049
45bfb2e5
PZ
8050 /*
8051 * If both events generate aux data, they must be on the same PMU
8052 */
8053 if (has_aux(event) && has_aux(output_event) &&
8054 event->pmu != output_event->pmu)
8055 goto out;
8056
a4be7c27 8057set:
cdd6c482 8058 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
8059 /* Can't redirect output if we've got an active mmap() */
8060 if (atomic_read(&event->mmap_count))
8061 goto unlock;
a4be7c27 8062
ac9721f3 8063 if (output_event) {
76369139
FW
8064 /* get the rb we want to redirect to */
8065 rb = ring_buffer_get(output_event);
8066 if (!rb)
ac9721f3 8067 goto unlock;
a4be7c27
PZ
8068 }
8069
b69cf536 8070 ring_buffer_attach(event, rb);
9bb5d40c 8071
a4be7c27 8072 ret = 0;
ac9721f3
PZ
8073unlock:
8074 mutex_unlock(&event->mmap_mutex);
8075
a4be7c27 8076out:
a4be7c27
PZ
8077 return ret;
8078}
8079
f63a8daa
PZ
8080static void mutex_lock_double(struct mutex *a, struct mutex *b)
8081{
8082 if (b < a)
8083 swap(a, b);
8084
8085 mutex_lock(a);
8086 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
8087}
8088
34f43927
PZ
8089static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
8090{
8091 bool nmi_safe = false;
8092
8093 switch (clk_id) {
8094 case CLOCK_MONOTONIC:
8095 event->clock = &ktime_get_mono_fast_ns;
8096 nmi_safe = true;
8097 break;
8098
8099 case CLOCK_MONOTONIC_RAW:
8100 event->clock = &ktime_get_raw_fast_ns;
8101 nmi_safe = true;
8102 break;
8103
8104 case CLOCK_REALTIME:
8105 event->clock = &ktime_get_real_ns;
8106 break;
8107
8108 case CLOCK_BOOTTIME:
8109 event->clock = &ktime_get_boot_ns;
8110 break;
8111
8112 case CLOCK_TAI:
8113 event->clock = &ktime_get_tai_ns;
8114 break;
8115
8116 default:
8117 return -EINVAL;
8118 }
8119
8120 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
8121 return -EINVAL;
8122
8123 return 0;
8124}
8125
0793a61d 8126/**
cdd6c482 8127 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 8128 *
cdd6c482 8129 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 8130 * @pid: target pid
9f66a381 8131 * @cpu: target cpu
cdd6c482 8132 * @group_fd: group leader event fd
0793a61d 8133 */
cdd6c482
IM
8134SYSCALL_DEFINE5(perf_event_open,
8135 struct perf_event_attr __user *, attr_uptr,
2743a5b0 8136 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 8137{
b04243ef
PZ
8138 struct perf_event *group_leader = NULL, *output_event = NULL;
8139 struct perf_event *event, *sibling;
cdd6c482 8140 struct perf_event_attr attr;
f63a8daa 8141 struct perf_event_context *ctx, *uninitialized_var(gctx);
cdd6c482 8142 struct file *event_file = NULL;
2903ff01 8143 struct fd group = {NULL, 0};
38a81da2 8144 struct task_struct *task = NULL;
89a1e187 8145 struct pmu *pmu;
ea635c64 8146 int event_fd;
b04243ef 8147 int move_group = 0;
dc86cabe 8148 int err;
a21b0b35 8149 int f_flags = O_RDWR;
79dff51e 8150 int cgroup_fd = -1;
0793a61d 8151
2743a5b0 8152 /* for future expandability... */
e5d1367f 8153 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
8154 return -EINVAL;
8155
dc86cabe
IM
8156 err = perf_copy_attr(attr_uptr, &attr);
8157 if (err)
8158 return err;
eab656ae 8159
0764771d
PZ
8160 if (!attr.exclude_kernel) {
8161 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
8162 return -EACCES;
8163 }
8164
df58ab24 8165 if (attr.freq) {
cdd6c482 8166 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24 8167 return -EINVAL;
0819b2e3
PZ
8168 } else {
8169 if (attr.sample_period & (1ULL << 63))
8170 return -EINVAL;
df58ab24
PZ
8171 }
8172
e5d1367f
SE
8173 /*
8174 * In cgroup mode, the pid argument is used to pass the fd
8175 * opened to the cgroup directory in cgroupfs. The cpu argument
8176 * designates the cpu on which to monitor threads from that
8177 * cgroup.
8178 */
8179 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
8180 return -EINVAL;
8181
a21b0b35
YD
8182 if (flags & PERF_FLAG_FD_CLOEXEC)
8183 f_flags |= O_CLOEXEC;
8184
8185 event_fd = get_unused_fd_flags(f_flags);
ea635c64
AV
8186 if (event_fd < 0)
8187 return event_fd;
8188
ac9721f3 8189 if (group_fd != -1) {
2903ff01
AV
8190 err = perf_fget_light(group_fd, &group);
8191 if (err)
d14b12d7 8192 goto err_fd;
2903ff01 8193 group_leader = group.file->private_data;
ac9721f3
PZ
8194 if (flags & PERF_FLAG_FD_OUTPUT)
8195 output_event = group_leader;
8196 if (flags & PERF_FLAG_FD_NO_GROUP)
8197 group_leader = NULL;
8198 }
8199
e5d1367f 8200 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
8201 task = find_lively_task_by_vpid(pid);
8202 if (IS_ERR(task)) {
8203 err = PTR_ERR(task);
8204 goto err_group_fd;
8205 }
8206 }
8207
1f4ee503
PZ
8208 if (task && group_leader &&
8209 group_leader->attr.inherit != attr.inherit) {
8210 err = -EINVAL;
8211 goto err_task;
8212 }
8213
fbfc623f
YZ
8214 get_online_cpus();
8215
79dff51e
MF
8216 if (flags & PERF_FLAG_PID_CGROUP)
8217 cgroup_fd = pid;
8218
4dc0da86 8219 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
79dff51e 8220 NULL, NULL, cgroup_fd);
d14b12d7
SE
8221 if (IS_ERR(event)) {
8222 err = PTR_ERR(event);
1f4ee503 8223 goto err_cpus;
d14b12d7
SE
8224 }
8225
53b25335
VW
8226 if (is_sampling_event(event)) {
8227 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
8228 err = -ENOTSUPP;
8229 goto err_alloc;
8230 }
8231 }
8232
766d6c07
FW
8233 account_event(event);
8234
89a1e187
PZ
8235 /*
8236 * Special case software events and allow them to be part of
8237 * any hardware group.
8238 */
8239 pmu = event->pmu;
b04243ef 8240
34f43927
PZ
8241 if (attr.use_clockid) {
8242 err = perf_event_set_clock(event, attr.clockid);
8243 if (err)
8244 goto err_alloc;
8245 }
8246
b04243ef
PZ
8247 if (group_leader &&
8248 (is_software_event(event) != is_software_event(group_leader))) {
8249 if (is_software_event(event)) {
8250 /*
8251 * If event and group_leader are not both a software
8252 * event, and event is, then group leader is not.
8253 *
8254 * Allow the addition of software events to !software
8255 * groups, this is safe because software events never
8256 * fail to schedule.
8257 */
8258 pmu = group_leader->pmu;
8259 } else if (is_software_event(group_leader) &&
8260 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
8261 /*
8262 * In case the group is a pure software group, and we
8263 * try to add a hardware event, move the whole group to
8264 * the hardware context.
8265 */
8266 move_group = 1;
8267 }
8268 }
89a1e187
PZ
8269
8270 /*
8271 * Get the target context (task or percpu):
8272 */
4af57ef2 8273 ctx = find_get_context(pmu, task, event);
89a1e187
PZ
8274 if (IS_ERR(ctx)) {
8275 err = PTR_ERR(ctx);
c6be5a5c 8276 goto err_alloc;
89a1e187
PZ
8277 }
8278
bed5b25a
AS
8279 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
8280 err = -EBUSY;
8281 goto err_context;
8282 }
8283
fd1edb3a
PZ
8284 if (task) {
8285 put_task_struct(task);
8286 task = NULL;
8287 }
8288
ccff286d 8289 /*
cdd6c482 8290 * Look up the group leader (we will attach this event to it):
04289bb9 8291 */
ac9721f3 8292 if (group_leader) {
dc86cabe 8293 err = -EINVAL;
04289bb9 8294
04289bb9 8295 /*
ccff286d
IM
8296 * Do not allow a recursive hierarchy (this new sibling
8297 * becoming part of another group-sibling):
8298 */
8299 if (group_leader->group_leader != group_leader)
c3f00c70 8300 goto err_context;
34f43927
PZ
8301
8302 /* All events in a group should have the same clock */
8303 if (group_leader->clock != event->clock)
8304 goto err_context;
8305
ccff286d
IM
8306 /*
8307 * Do not allow to attach to a group in a different
8308 * task or CPU context:
04289bb9 8309 */
b04243ef 8310 if (move_group) {
c3c87e77
PZ
8311 /*
8312 * Make sure we're both on the same task, or both
8313 * per-cpu events.
8314 */
8315 if (group_leader->ctx->task != ctx->task)
8316 goto err_context;
8317
8318 /*
8319 * Make sure we're both events for the same CPU;
8320 * grouping events for different CPUs is broken; since
8321 * you can never concurrently schedule them anyhow.
8322 */
8323 if (group_leader->cpu != event->cpu)
b04243ef
PZ
8324 goto err_context;
8325 } else {
8326 if (group_leader->ctx != ctx)
8327 goto err_context;
8328 }
8329
3b6f9e5c
PM
8330 /*
8331 * Only a group leader can be exclusive or pinned
8332 */
0d48696f 8333 if (attr.exclusive || attr.pinned)
c3f00c70 8334 goto err_context;
ac9721f3
PZ
8335 }
8336
8337 if (output_event) {
8338 err = perf_event_set_output(event, output_event);
8339 if (err)
c3f00c70 8340 goto err_context;
ac9721f3 8341 }
0793a61d 8342
a21b0b35
YD
8343 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
8344 f_flags);
ea635c64
AV
8345 if (IS_ERR(event_file)) {
8346 err = PTR_ERR(event_file);
c3f00c70 8347 goto err_context;
ea635c64 8348 }
9b51f66d 8349
b04243ef 8350 if (move_group) {
f63a8daa
PZ
8351 gctx = group_leader->ctx;
8352
8353 /*
8354 * See perf_event_ctx_lock() for comments on the details
8355 * of swizzling perf_event::ctx.
8356 */
8357 mutex_lock_double(&gctx->mutex, &ctx->mutex);
b04243ef 8358
46ce0fe9 8359 perf_remove_from_context(group_leader, false);
0231bb53 8360
b04243ef
PZ
8361 list_for_each_entry(sibling, &group_leader->sibling_list,
8362 group_entry) {
46ce0fe9 8363 perf_remove_from_context(sibling, false);
b04243ef
PZ
8364 put_ctx(gctx);
8365 }
f63a8daa
PZ
8366 } else {
8367 mutex_lock(&ctx->mutex);
ea635c64 8368 }
9b51f66d 8369
ad3a37de 8370 WARN_ON_ONCE(ctx->parent_ctx);
b04243ef
PZ
8371
8372 if (move_group) {
f63a8daa
PZ
8373 /*
8374 * Wait for everybody to stop referencing the events through
8375 * the old lists, before installing it on new lists.
8376 */
0cda4c02 8377 synchronize_rcu();
f63a8daa 8378
8f95b435
PZI
8379 /*
8380 * Install the group siblings before the group leader.
8381 *
8382 * Because a group leader will try and install the entire group
8383 * (through the sibling list, which is still in-tact), we can
8384 * end up with siblings installed in the wrong context.
8385 *
8386 * By installing siblings first we NO-OP because they're not
8387 * reachable through the group lists.
8388 */
b04243ef
PZ
8389 list_for_each_entry(sibling, &group_leader->sibling_list,
8390 group_entry) {
8f95b435 8391 perf_event__state_init(sibling);
9fc81d87 8392 perf_install_in_context(ctx, sibling, sibling->cpu);
b04243ef
PZ
8393 get_ctx(ctx);
8394 }
8f95b435
PZI
8395
8396 /*
8397 * Removing from the context ends up with disabled
8398 * event. What we want here is event in the initial
8399 * startup state, ready to be add into new context.
8400 */
8401 perf_event__state_init(group_leader);
8402 perf_install_in_context(ctx, group_leader, group_leader->cpu);
8403 get_ctx(ctx);
b04243ef
PZ
8404 }
8405
bed5b25a
AS
8406 if (!exclusive_event_installable(event, ctx)) {
8407 err = -EBUSY;
8408 mutex_unlock(&ctx->mutex);
8409 fput(event_file);
8410 goto err_context;
8411 }
8412
e2d37cd2 8413 perf_install_in_context(ctx, event, event->cpu);
fe4b04fa 8414 perf_unpin_context(ctx);
f63a8daa
PZ
8415
8416 if (move_group) {
8417 mutex_unlock(&gctx->mutex);
8418 put_ctx(gctx);
8419 }
d859e29f 8420 mutex_unlock(&ctx->mutex);
9b51f66d 8421
fbfc623f
YZ
8422 put_online_cpus();
8423
cdd6c482 8424 event->owner = current;
8882135b 8425
cdd6c482
IM
8426 mutex_lock(&current->perf_event_mutex);
8427 list_add_tail(&event->owner_entry, &current->perf_event_list);
8428 mutex_unlock(&current->perf_event_mutex);
082ff5a2 8429
c320c7b7
ACM
8430 /*
8431 * Precalculate sample_data sizes
8432 */
8433 perf_event__header_size(event);
6844c09d 8434 perf_event__id_header_size(event);
c320c7b7 8435
8a49542c
PZ
8436 /*
8437 * Drop the reference on the group_event after placing the
8438 * new event on the sibling_list. This ensures destruction
8439 * of the group leader will find the pointer to itself in
8440 * perf_group_detach().
8441 */
2903ff01 8442 fdput(group);
ea635c64
AV
8443 fd_install(event_fd, event_file);
8444 return event_fd;
0793a61d 8445
c3f00c70 8446err_context:
fe4b04fa 8447 perf_unpin_context(ctx);
ea635c64 8448 put_ctx(ctx);
c6be5a5c 8449err_alloc:
ea635c64 8450 free_event(event);
1f4ee503 8451err_cpus:
fbfc623f 8452 put_online_cpus();
1f4ee503 8453err_task:
e7d0bc04
PZ
8454 if (task)
8455 put_task_struct(task);
89a1e187 8456err_group_fd:
2903ff01 8457 fdput(group);
ea635c64
AV
8458err_fd:
8459 put_unused_fd(event_fd);
dc86cabe 8460 return err;
0793a61d
TG
8461}
8462
fb0459d7
AV
8463/**
8464 * perf_event_create_kernel_counter
8465 *
8466 * @attr: attributes of the counter to create
8467 * @cpu: cpu in which the counter is bound
38a81da2 8468 * @task: task to profile (NULL for percpu)
fb0459d7
AV
8469 */
8470struct perf_event *
8471perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 8472 struct task_struct *task,
4dc0da86
AK
8473 perf_overflow_handler_t overflow_handler,
8474 void *context)
fb0459d7 8475{
fb0459d7 8476 struct perf_event_context *ctx;
c3f00c70 8477 struct perf_event *event;
fb0459d7 8478 int err;
d859e29f 8479
fb0459d7
AV
8480 /*
8481 * Get the target context (task or percpu):
8482 */
d859e29f 8483
4dc0da86 8484 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
79dff51e 8485 overflow_handler, context, -1);
c3f00c70
PZ
8486 if (IS_ERR(event)) {
8487 err = PTR_ERR(event);
8488 goto err;
8489 }
d859e29f 8490
f8697762
JO
8491 /* Mark owner so we could distinguish it from user events. */
8492 event->owner = EVENT_OWNER_KERNEL;
8493
766d6c07
FW
8494 account_event(event);
8495
4af57ef2 8496 ctx = find_get_context(event->pmu, task, event);
c6567f64
FW
8497 if (IS_ERR(ctx)) {
8498 err = PTR_ERR(ctx);
c3f00c70 8499 goto err_free;
d859e29f 8500 }
fb0459d7 8501
fb0459d7
AV
8502 WARN_ON_ONCE(ctx->parent_ctx);
8503 mutex_lock(&ctx->mutex);
bed5b25a
AS
8504 if (!exclusive_event_installable(event, ctx)) {
8505 mutex_unlock(&ctx->mutex);
8506 perf_unpin_context(ctx);
8507 put_ctx(ctx);
8508 err = -EBUSY;
8509 goto err_free;
8510 }
8511
fb0459d7 8512 perf_install_in_context(ctx, event, cpu);
fe4b04fa 8513 perf_unpin_context(ctx);
fb0459d7
AV
8514 mutex_unlock(&ctx->mutex);
8515
fb0459d7
AV
8516 return event;
8517
c3f00c70
PZ
8518err_free:
8519 free_event(event);
8520err:
c6567f64 8521 return ERR_PTR(err);
9b51f66d 8522}
fb0459d7 8523EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 8524
0cda4c02
YZ
8525void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
8526{
8527 struct perf_event_context *src_ctx;
8528 struct perf_event_context *dst_ctx;
8529 struct perf_event *event, *tmp;
8530 LIST_HEAD(events);
8531
8532 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
8533 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
8534
f63a8daa
PZ
8535 /*
8536 * See perf_event_ctx_lock() for comments on the details
8537 * of swizzling perf_event::ctx.
8538 */
8539 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
0cda4c02
YZ
8540 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
8541 event_entry) {
46ce0fe9 8542 perf_remove_from_context(event, false);
9a545de0 8543 unaccount_event_cpu(event, src_cpu);
0cda4c02 8544 put_ctx(src_ctx);
9886167d 8545 list_add(&event->migrate_entry, &events);
0cda4c02 8546 }
0cda4c02 8547
8f95b435
PZI
8548 /*
8549 * Wait for the events to quiesce before re-instating them.
8550 */
0cda4c02
YZ
8551 synchronize_rcu();
8552
8f95b435
PZI
8553 /*
8554 * Re-instate events in 2 passes.
8555 *
8556 * Skip over group leaders and only install siblings on this first
8557 * pass, siblings will not get enabled without a leader, however a
8558 * leader will enable its siblings, even if those are still on the old
8559 * context.
8560 */
8561 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
8562 if (event->group_leader == event)
8563 continue;
8564
8565 list_del(&event->migrate_entry);
8566 if (event->state >= PERF_EVENT_STATE_OFF)
8567 event->state = PERF_EVENT_STATE_INACTIVE;
8568 account_event_cpu(event, dst_cpu);
8569 perf_install_in_context(dst_ctx, event, dst_cpu);
8570 get_ctx(dst_ctx);
8571 }
8572
8573 /*
8574 * Once all the siblings are setup properly, install the group leaders
8575 * to make it go.
8576 */
9886167d
PZ
8577 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
8578 list_del(&event->migrate_entry);
0cda4c02
YZ
8579 if (event->state >= PERF_EVENT_STATE_OFF)
8580 event->state = PERF_EVENT_STATE_INACTIVE;
9a545de0 8581 account_event_cpu(event, dst_cpu);
0cda4c02
YZ
8582 perf_install_in_context(dst_ctx, event, dst_cpu);
8583 get_ctx(dst_ctx);
8584 }
8585 mutex_unlock(&dst_ctx->mutex);
f63a8daa 8586 mutex_unlock(&src_ctx->mutex);
0cda4c02
YZ
8587}
8588EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
8589
cdd6c482 8590static void sync_child_event(struct perf_event *child_event,
38b200d6 8591 struct task_struct *child)
d859e29f 8592{
cdd6c482 8593 struct perf_event *parent_event = child_event->parent;
8bc20959 8594 u64 child_val;
d859e29f 8595
cdd6c482
IM
8596 if (child_event->attr.inherit_stat)
8597 perf_event_read_event(child_event, child);
38b200d6 8598
b5e58793 8599 child_val = perf_event_count(child_event);
d859e29f
PM
8600
8601 /*
8602 * Add back the child's count to the parent's count:
8603 */
a6e6dea6 8604 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
8605 atomic64_add(child_event->total_time_enabled,
8606 &parent_event->child_total_time_enabled);
8607 atomic64_add(child_event->total_time_running,
8608 &parent_event->child_total_time_running);
d859e29f
PM
8609
8610 /*
cdd6c482 8611 * Remove this event from the parent's list
d859e29f 8612 */
cdd6c482
IM
8613 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
8614 mutex_lock(&parent_event->child_mutex);
8615 list_del_init(&child_event->child_list);
8616 mutex_unlock(&parent_event->child_mutex);
d859e29f 8617
dc633982
JO
8618 /*
8619 * Make sure user/parent get notified, that we just
8620 * lost one event.
8621 */
8622 perf_event_wakeup(parent_event);
8623
d859e29f 8624 /*
cdd6c482 8625 * Release the parent event, if this was the last
d859e29f
PM
8626 * reference to it.
8627 */
a6fa941d 8628 put_event(parent_event);
d859e29f
PM
8629}
8630
9b51f66d 8631static void
cdd6c482
IM
8632__perf_event_exit_task(struct perf_event *child_event,
8633 struct perf_event_context *child_ctx,
38b200d6 8634 struct task_struct *child)
9b51f66d 8635{
1903d50c
PZ
8636 /*
8637 * Do not destroy the 'original' grouping; because of the context
8638 * switch optimization the original events could've ended up in a
8639 * random child task.
8640 *
8641 * If we were to destroy the original group, all group related
8642 * operations would cease to function properly after this random
8643 * child dies.
8644 *
8645 * Do destroy all inherited groups, we don't care about those
8646 * and being thorough is better.
8647 */
8648 perf_remove_from_context(child_event, !!child_event->parent);
0cc0c027 8649
9b51f66d 8650 /*
38b435b1 8651 * It can happen that the parent exits first, and has events
9b51f66d 8652 * that are still around due to the child reference. These
38b435b1 8653 * events need to be zapped.
9b51f66d 8654 */
38b435b1 8655 if (child_event->parent) {
cdd6c482
IM
8656 sync_child_event(child_event, child);
8657 free_event(child_event);
179033b3
JO
8658 } else {
8659 child_event->state = PERF_EVENT_STATE_EXIT;
8660 perf_event_wakeup(child_event);
4bcf349a 8661 }
9b51f66d
IM
8662}
8663
8dc85d54 8664static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 8665{
ebf905fc 8666 struct perf_event *child_event, *next;
211de6eb 8667 struct perf_event_context *child_ctx, *clone_ctx = NULL;
a63eaf34 8668 unsigned long flags;
9b51f66d 8669
8dc85d54 8670 if (likely(!child->perf_event_ctxp[ctxn])) {
cdd6c482 8671 perf_event_task(child, NULL, 0);
9b51f66d 8672 return;
9f498cc5 8673 }
9b51f66d 8674
a63eaf34 8675 local_irq_save(flags);
ad3a37de
PM
8676 /*
8677 * We can't reschedule here because interrupts are disabled,
8678 * and either child is current or it is a task that can't be
8679 * scheduled, so we are now safe from rescheduling changing
8680 * our context.
8681 */
806839b2 8682 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
c93f7669
PM
8683
8684 /*
8685 * Take the context lock here so that if find_get_context is
cdd6c482 8686 * reading child->perf_event_ctxp, we wait until it has
c93f7669
PM
8687 * incremented the context's refcount before we do put_ctx below.
8688 */
e625cce1 8689 raw_spin_lock(&child_ctx->lock);
04dc2dbb 8690 task_ctx_sched_out(child_ctx);
8dc85d54 8691 child->perf_event_ctxp[ctxn] = NULL;
4a1c0f26 8692
71a851b4
PZ
8693 /*
8694 * If this context is a clone; unclone it so it can't get
8695 * swapped to another process while we're removing all
cdd6c482 8696 * the events from it.
71a851b4 8697 */
211de6eb 8698 clone_ctx = unclone_ctx(child_ctx);
5e942bb3 8699 update_context_time(child_ctx);
e625cce1 8700 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
9f498cc5 8701
211de6eb
PZ
8702 if (clone_ctx)
8703 put_ctx(clone_ctx);
4a1c0f26 8704
9f498cc5 8705 /*
cdd6c482
IM
8706 * Report the task dead after unscheduling the events so that we
8707 * won't get any samples after PERF_RECORD_EXIT. We can however still
8708 * get a few PERF_RECORD_READ events.
9f498cc5 8709 */
cdd6c482 8710 perf_event_task(child, child_ctx, 0);
a63eaf34 8711
66fff224
PZ
8712 /*
8713 * We can recurse on the same lock type through:
8714 *
cdd6c482
IM
8715 * __perf_event_exit_task()
8716 * sync_child_event()
a6fa941d
AV
8717 * put_event()
8718 * mutex_lock(&ctx->mutex)
66fff224
PZ
8719 *
8720 * But since its the parent context it won't be the same instance.
8721 */
a0507c84 8722 mutex_lock(&child_ctx->mutex);
a63eaf34 8723
ebf905fc 8724 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
cdd6c482 8725 __perf_event_exit_task(child_event, child_ctx, child);
8bc20959 8726
a63eaf34
PM
8727 mutex_unlock(&child_ctx->mutex);
8728
8729 put_ctx(child_ctx);
9b51f66d
IM
8730}
8731
8dc85d54
PZ
8732/*
8733 * When a child task exits, feed back event values to parent events.
8734 */
8735void perf_event_exit_task(struct task_struct *child)
8736{
8882135b 8737 struct perf_event *event, *tmp;
8dc85d54
PZ
8738 int ctxn;
8739
8882135b
PZ
8740 mutex_lock(&child->perf_event_mutex);
8741 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
8742 owner_entry) {
8743 list_del_init(&event->owner_entry);
8744
8745 /*
8746 * Ensure the list deletion is visible before we clear
8747 * the owner, closes a race against perf_release() where
8748 * we need to serialize on the owner->perf_event_mutex.
8749 */
8750 smp_wmb();
8751 event->owner = NULL;
8752 }
8753 mutex_unlock(&child->perf_event_mutex);
8754
8dc85d54
PZ
8755 for_each_task_context_nr(ctxn)
8756 perf_event_exit_task_context(child, ctxn);
8757}
8758
889ff015
FW
8759static void perf_free_event(struct perf_event *event,
8760 struct perf_event_context *ctx)
8761{
8762 struct perf_event *parent = event->parent;
8763
8764 if (WARN_ON_ONCE(!parent))
8765 return;
8766
8767 mutex_lock(&parent->child_mutex);
8768 list_del_init(&event->child_list);
8769 mutex_unlock(&parent->child_mutex);
8770
a6fa941d 8771 put_event(parent);
889ff015 8772
652884fe 8773 raw_spin_lock_irq(&ctx->lock);
8a49542c 8774 perf_group_detach(event);
889ff015 8775 list_del_event(event, ctx);
652884fe 8776 raw_spin_unlock_irq(&ctx->lock);
889ff015
FW
8777 free_event(event);
8778}
8779
bbbee908 8780/*
652884fe 8781 * Free an unexposed, unused context as created by inheritance by
8dc85d54 8782 * perf_event_init_task below, used by fork() in case of fail.
652884fe
PZ
8783 *
8784 * Not all locks are strictly required, but take them anyway to be nice and
8785 * help out with the lockdep assertions.
bbbee908 8786 */
cdd6c482 8787void perf_event_free_task(struct task_struct *task)
bbbee908 8788{
8dc85d54 8789 struct perf_event_context *ctx;
cdd6c482 8790 struct perf_event *event, *tmp;
8dc85d54 8791 int ctxn;
bbbee908 8792
8dc85d54
PZ
8793 for_each_task_context_nr(ctxn) {
8794 ctx = task->perf_event_ctxp[ctxn];
8795 if (!ctx)
8796 continue;
bbbee908 8797
8dc85d54 8798 mutex_lock(&ctx->mutex);
bbbee908 8799again:
8dc85d54
PZ
8800 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
8801 group_entry)
8802 perf_free_event(event, ctx);
bbbee908 8803
8dc85d54
PZ
8804 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
8805 group_entry)
8806 perf_free_event(event, ctx);
bbbee908 8807
8dc85d54
PZ
8808 if (!list_empty(&ctx->pinned_groups) ||
8809 !list_empty(&ctx->flexible_groups))
8810 goto again;
bbbee908 8811
8dc85d54 8812 mutex_unlock(&ctx->mutex);
bbbee908 8813
8dc85d54
PZ
8814 put_ctx(ctx);
8815 }
889ff015
FW
8816}
8817
4e231c79
PZ
8818void perf_event_delayed_put(struct task_struct *task)
8819{
8820 int ctxn;
8821
8822 for_each_task_context_nr(ctxn)
8823 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
8824}
8825
ffe8690c
KX
8826struct perf_event *perf_event_get(unsigned int fd)
8827{
8828 int err;
8829 struct fd f;
8830 struct perf_event *event;
8831
8832 err = perf_fget_light(fd, &f);
8833 if (err)
8834 return ERR_PTR(err);
8835
8836 event = f.file->private_data;
8837 atomic_long_inc(&event->refcount);
8838 fdput(f);
8839
8840 return event;
8841}
8842
8843const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
8844{
8845 if (!event)
8846 return ERR_PTR(-EINVAL);
8847
8848 return &event->attr;
8849}
8850
97dee4f3
PZ
8851/*
8852 * inherit a event from parent task to child task:
8853 */
8854static struct perf_event *
8855inherit_event(struct perf_event *parent_event,
8856 struct task_struct *parent,
8857 struct perf_event_context *parent_ctx,
8858 struct task_struct *child,
8859 struct perf_event *group_leader,
8860 struct perf_event_context *child_ctx)
8861{
1929def9 8862 enum perf_event_active_state parent_state = parent_event->state;
97dee4f3 8863 struct perf_event *child_event;
cee010ec 8864 unsigned long flags;
97dee4f3
PZ
8865
8866 /*
8867 * Instead of creating recursive hierarchies of events,
8868 * we link inherited events back to the original parent,
8869 * which has a filp for sure, which we use as the reference
8870 * count:
8871 */
8872 if (parent_event->parent)
8873 parent_event = parent_event->parent;
8874
8875 child_event = perf_event_alloc(&parent_event->attr,
8876 parent_event->cpu,
d580ff86 8877 child,
97dee4f3 8878 group_leader, parent_event,
79dff51e 8879 NULL, NULL, -1);
97dee4f3
PZ
8880 if (IS_ERR(child_event))
8881 return child_event;
a6fa941d 8882
fadfe7be
JO
8883 if (is_orphaned_event(parent_event) ||
8884 !atomic_long_inc_not_zero(&parent_event->refcount)) {
a6fa941d
AV
8885 free_event(child_event);
8886 return NULL;
8887 }
8888
97dee4f3
PZ
8889 get_ctx(child_ctx);
8890
8891 /*
8892 * Make the child state follow the state of the parent event,
8893 * not its attr.disabled bit. We hold the parent's mutex,
8894 * so we won't race with perf_event_{en, dis}able_family.
8895 */
1929def9 8896 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
97dee4f3
PZ
8897 child_event->state = PERF_EVENT_STATE_INACTIVE;
8898 else
8899 child_event->state = PERF_EVENT_STATE_OFF;
8900
8901 if (parent_event->attr.freq) {
8902 u64 sample_period = parent_event->hw.sample_period;
8903 struct hw_perf_event *hwc = &child_event->hw;
8904
8905 hwc->sample_period = sample_period;
8906 hwc->last_period = sample_period;
8907
8908 local64_set(&hwc->period_left, sample_period);
8909 }
8910
8911 child_event->ctx = child_ctx;
8912 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
8913 child_event->overflow_handler_context
8914 = parent_event->overflow_handler_context;
97dee4f3 8915
614b6780
TG
8916 /*
8917 * Precalculate sample_data sizes
8918 */
8919 perf_event__header_size(child_event);
6844c09d 8920 perf_event__id_header_size(child_event);
614b6780 8921
97dee4f3
PZ
8922 /*
8923 * Link it up in the child's context:
8924 */
cee010ec 8925 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 8926 add_event_to_ctx(child_event, child_ctx);
cee010ec 8927 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 8928
97dee4f3
PZ
8929 /*
8930 * Link this into the parent event's child list
8931 */
8932 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
8933 mutex_lock(&parent_event->child_mutex);
8934 list_add_tail(&child_event->child_list, &parent_event->child_list);
8935 mutex_unlock(&parent_event->child_mutex);
8936
8937 return child_event;
8938}
8939
8940static int inherit_group(struct perf_event *parent_event,
8941 struct task_struct *parent,
8942 struct perf_event_context *parent_ctx,
8943 struct task_struct *child,
8944 struct perf_event_context *child_ctx)
8945{
8946 struct perf_event *leader;
8947 struct perf_event *sub;
8948 struct perf_event *child_ctr;
8949
8950 leader = inherit_event(parent_event, parent, parent_ctx,
8951 child, NULL, child_ctx);
8952 if (IS_ERR(leader))
8953 return PTR_ERR(leader);
8954 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
8955 child_ctr = inherit_event(sub, parent, parent_ctx,
8956 child, leader, child_ctx);
8957 if (IS_ERR(child_ctr))
8958 return PTR_ERR(child_ctr);
8959 }
8960 return 0;
889ff015
FW
8961}
8962
8963static int
8964inherit_task_group(struct perf_event *event, struct task_struct *parent,
8965 struct perf_event_context *parent_ctx,
8dc85d54 8966 struct task_struct *child, int ctxn,
889ff015
FW
8967 int *inherited_all)
8968{
8969 int ret;
8dc85d54 8970 struct perf_event_context *child_ctx;
889ff015
FW
8971
8972 if (!event->attr.inherit) {
8973 *inherited_all = 0;
8974 return 0;
bbbee908
PZ
8975 }
8976
fe4b04fa 8977 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
8978 if (!child_ctx) {
8979 /*
8980 * This is executed from the parent task context, so
8981 * inherit events that have been marked for cloning.
8982 * First allocate and initialize a context for the
8983 * child.
8984 */
bbbee908 8985
734df5ab 8986 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
889ff015
FW
8987 if (!child_ctx)
8988 return -ENOMEM;
bbbee908 8989
8dc85d54 8990 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
8991 }
8992
8993 ret = inherit_group(event, parent, parent_ctx,
8994 child, child_ctx);
8995
8996 if (ret)
8997 *inherited_all = 0;
8998
8999 return ret;
bbbee908
PZ
9000}
9001
9b51f66d 9002/*
cdd6c482 9003 * Initialize the perf_event context in task_struct
9b51f66d 9004 */
985c8dcb 9005static int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 9006{
889ff015 9007 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
9008 struct perf_event_context *cloned_ctx;
9009 struct perf_event *event;
9b51f66d 9010 struct task_struct *parent = current;
564c2b21 9011 int inherited_all = 1;
dddd3379 9012 unsigned long flags;
6ab423e0 9013 int ret = 0;
9b51f66d 9014
8dc85d54 9015 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
9016 return 0;
9017
ad3a37de 9018 /*
25346b93
PM
9019 * If the parent's context is a clone, pin it so it won't get
9020 * swapped under us.
ad3a37de 9021 */
8dc85d54 9022 parent_ctx = perf_pin_task_context(parent, ctxn);
ffb4ef21
PZ
9023 if (!parent_ctx)
9024 return 0;
25346b93 9025
ad3a37de
PM
9026 /*
9027 * No need to check if parent_ctx != NULL here; since we saw
9028 * it non-NULL earlier, the only reason for it to become NULL
9029 * is if we exit, and since we're currently in the middle of
9030 * a fork we can't be exiting at the same time.
9031 */
ad3a37de 9032
9b51f66d
IM
9033 /*
9034 * Lock the parent list. No need to lock the child - not PID
9035 * hashed yet and not running, so nobody can access it.
9036 */
d859e29f 9037 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
9038
9039 /*
9040 * We dont have to disable NMIs - we are only looking at
9041 * the list, not manipulating it:
9042 */
889ff015 9043 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d54
PZ
9044 ret = inherit_task_group(event, parent, parent_ctx,
9045 child, ctxn, &inherited_all);
889ff015
FW
9046 if (ret)
9047 break;
9048 }
b93f7978 9049
dddd3379
TG
9050 /*
9051 * We can't hold ctx->lock when iterating the ->flexible_group list due
9052 * to allocations, but we need to prevent rotation because
9053 * rotate_ctx() will change the list from interrupt context.
9054 */
9055 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
9056 parent_ctx->rotate_disable = 1;
9057 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
9058
889ff015 9059 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d54
PZ
9060 ret = inherit_task_group(event, parent, parent_ctx,
9061 child, ctxn, &inherited_all);
889ff015 9062 if (ret)
9b51f66d 9063 break;
564c2b21
PM
9064 }
9065
dddd3379
TG
9066 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
9067 parent_ctx->rotate_disable = 0;
dddd3379 9068
8dc85d54 9069 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 9070
05cbaa28 9071 if (child_ctx && inherited_all) {
564c2b21
PM
9072 /*
9073 * Mark the child context as a clone of the parent
9074 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
9075 *
9076 * Note that if the parent is a clone, the holding of
9077 * parent_ctx->lock avoids it from being uncloned.
564c2b21 9078 */
c5ed5145 9079 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
9080 if (cloned_ctx) {
9081 child_ctx->parent_ctx = cloned_ctx;
25346b93 9082 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
9083 } else {
9084 child_ctx->parent_ctx = parent_ctx;
9085 child_ctx->parent_gen = parent_ctx->generation;
9086 }
9087 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
9088 }
9089
c5ed5145 9090 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29f 9091 mutex_unlock(&parent_ctx->mutex);
6ab423e0 9092
25346b93 9093 perf_unpin_context(parent_ctx);
fe4b04fa 9094 put_ctx(parent_ctx);
ad3a37de 9095
6ab423e0 9096 return ret;
9b51f66d
IM
9097}
9098
8dc85d54
PZ
9099/*
9100 * Initialize the perf_event context in task_struct
9101 */
9102int perf_event_init_task(struct task_struct *child)
9103{
9104 int ctxn, ret;
9105
8550d7cb
ON
9106 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
9107 mutex_init(&child->perf_event_mutex);
9108 INIT_LIST_HEAD(&child->perf_event_list);
9109
8dc85d54
PZ
9110 for_each_task_context_nr(ctxn) {
9111 ret = perf_event_init_context(child, ctxn);
6c72e350
PZ
9112 if (ret) {
9113 perf_event_free_task(child);
8dc85d54 9114 return ret;
6c72e350 9115 }
8dc85d54
PZ
9116 }
9117
9118 return 0;
9119}
9120
220b140b
PM
9121static void __init perf_event_init_all_cpus(void)
9122{
b28ab83c 9123 struct swevent_htable *swhash;
220b140b 9124 int cpu;
220b140b
PM
9125
9126 for_each_possible_cpu(cpu) {
b28ab83c
PZ
9127 swhash = &per_cpu(swevent_htable, cpu);
9128 mutex_init(&swhash->hlist_mutex);
2fde4f94 9129 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
220b140b
PM
9130 }
9131}
9132
0db0628d 9133static void perf_event_init_cpu(int cpu)
0793a61d 9134{
108b02cf 9135 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 9136
b28ab83c 9137 mutex_lock(&swhash->hlist_mutex);
39af6b16 9138 swhash->online = true;
4536e4d1 9139 if (swhash->hlist_refcount > 0) {
76e1d904
FW
9140 struct swevent_hlist *hlist;
9141
b28ab83c
PZ
9142 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
9143 WARN_ON(!hlist);
9144 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 9145 }
b28ab83c 9146 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
9147}
9148
2965faa5 9149#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
108b02cf 9150static void __perf_event_exit_context(void *__info)
0793a61d 9151{
226424ee 9152 struct remove_event re = { .detach_group = true };
108b02cf 9153 struct perf_event_context *ctx = __info;
0793a61d 9154
e3703f8c 9155 rcu_read_lock();
46ce0fe9
PZ
9156 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
9157 __perf_remove_from_context(&re);
e3703f8c 9158 rcu_read_unlock();
0793a61d 9159}
108b02cf
PZ
9160
9161static void perf_event_exit_cpu_context(int cpu)
9162{
9163 struct perf_event_context *ctx;
9164 struct pmu *pmu;
9165 int idx;
9166
9167 idx = srcu_read_lock(&pmus_srcu);
9168 list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c 9169 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cf
PZ
9170
9171 mutex_lock(&ctx->mutex);
9172 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
9173 mutex_unlock(&ctx->mutex);
9174 }
9175 srcu_read_unlock(&pmus_srcu, idx);
108b02cf
PZ
9176}
9177
cdd6c482 9178static void perf_event_exit_cpu(int cpu)
0793a61d 9179{
b28ab83c 9180 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
d859e29f 9181
e3703f8c
PZ
9182 perf_event_exit_cpu_context(cpu);
9183
b28ab83c 9184 mutex_lock(&swhash->hlist_mutex);
39af6b16 9185 swhash->online = false;
b28ab83c
PZ
9186 swevent_hlist_release(swhash);
9187 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
9188}
9189#else
cdd6c482 9190static inline void perf_event_exit_cpu(int cpu) { }
0793a61d
TG
9191#endif
9192
c277443c
PZ
9193static int
9194perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
9195{
9196 int cpu;
9197
9198 for_each_online_cpu(cpu)
9199 perf_event_exit_cpu(cpu);
9200
9201 return NOTIFY_OK;
9202}
9203
9204/*
9205 * Run the perf reboot notifier at the very last possible moment so that
9206 * the generic watchdog code runs as long as possible.
9207 */
9208static struct notifier_block perf_reboot_notifier = {
9209 .notifier_call = perf_reboot,
9210 .priority = INT_MIN,
9211};
9212
0db0628d 9213static int
0793a61d
TG
9214perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
9215{
9216 unsigned int cpu = (long)hcpu;
9217
4536e4d1 9218 switch (action & ~CPU_TASKS_FROZEN) {
0793a61d
TG
9219
9220 case CPU_UP_PREPARE:
5e11637e 9221 case CPU_DOWN_FAILED:
cdd6c482 9222 perf_event_init_cpu(cpu);
0793a61d
TG
9223 break;
9224
5e11637e 9225 case CPU_UP_CANCELED:
0793a61d 9226 case CPU_DOWN_PREPARE:
cdd6c482 9227 perf_event_exit_cpu(cpu);
0793a61d 9228 break;
0793a61d
TG
9229 default:
9230 break;
9231 }
9232
9233 return NOTIFY_OK;
9234}
9235
cdd6c482 9236void __init perf_event_init(void)
0793a61d 9237{
3c502e7a
JW
9238 int ret;
9239
2e80a82a
PZ
9240 idr_init(&pmu_idr);
9241
220b140b 9242 perf_event_init_all_cpus();
b0a873eb 9243 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
9244 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
9245 perf_pmu_register(&perf_cpu_clock, NULL, -1);
9246 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb
PZ
9247 perf_tp_register();
9248 perf_cpu_notifier(perf_cpu_notify);
c277443c 9249 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
9250
9251 ret = init_hw_breakpoint();
9252 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520
GN
9253
9254 /* do not patch jump label more than once per second */
9255 jump_label_rate_limit(&perf_sched_events, HZ);
b01c3a00
JO
9256
9257 /*
9258 * Build time assertion that we keep the data_head at the intended
9259 * location. IOW, validation we got the __reserved[] size right.
9260 */
9261 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
9262 != 1024);
0793a61d 9263}
abe43400 9264
fd979c01
CS
9265ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
9266 char *page)
9267{
9268 struct perf_pmu_events_attr *pmu_attr =
9269 container_of(attr, struct perf_pmu_events_attr, attr);
9270
9271 if (pmu_attr->event_str)
9272 return sprintf(page, "%s\n", pmu_attr->event_str);
9273
9274 return 0;
9275}
9276
abe43400
PZ
9277static int __init perf_event_sysfs_init(void)
9278{
9279 struct pmu *pmu;
9280 int ret;
9281
9282 mutex_lock(&pmus_lock);
9283
9284 ret = bus_register(&pmu_bus);
9285 if (ret)
9286 goto unlock;
9287
9288 list_for_each_entry(pmu, &pmus, entry) {
9289 if (!pmu->name || pmu->type < 0)
9290 continue;
9291
9292 ret = pmu_dev_alloc(pmu);
9293 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
9294 }
9295 pmu_bus_running = 1;
9296 ret = 0;
9297
9298unlock:
9299 mutex_unlock(&pmus_lock);
9300
9301 return ret;
9302}
9303device_initcall(perf_event_sysfs_init);
e5d1367f
SE
9304
9305#ifdef CONFIG_CGROUP_PERF
eb95419b
TH
9306static struct cgroup_subsys_state *
9307perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
e5d1367f
SE
9308{
9309 struct perf_cgroup *jc;
e5d1367f 9310
1b15d055 9311 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
9312 if (!jc)
9313 return ERR_PTR(-ENOMEM);
9314
e5d1367f
SE
9315 jc->info = alloc_percpu(struct perf_cgroup_info);
9316 if (!jc->info) {
9317 kfree(jc);
9318 return ERR_PTR(-ENOMEM);
9319 }
9320
e5d1367f
SE
9321 return &jc->css;
9322}
9323
eb95419b 9324static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
e5d1367f 9325{
eb95419b
TH
9326 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
9327
e5d1367f
SE
9328 free_percpu(jc->info);
9329 kfree(jc);
9330}
9331
9332static int __perf_cgroup_move(void *info)
9333{
9334 struct task_struct *task = info;
9335 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
9336 return 0;
9337}
9338
eb95419b
TH
9339static void perf_cgroup_attach(struct cgroup_subsys_state *css,
9340 struct cgroup_taskset *tset)
e5d1367f 9341{
bb9d97b6
TH
9342 struct task_struct *task;
9343
924f0d9a 9344 cgroup_taskset_for_each(task, tset)
bb9d97b6 9345 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
9346}
9347
eb95419b
TH
9348static void perf_cgroup_exit(struct cgroup_subsys_state *css,
9349 struct cgroup_subsys_state *old_css,
761b3ef5 9350 struct task_struct *task)
e5d1367f 9351{
bb9d97b6 9352 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
9353}
9354
073219e9 9355struct cgroup_subsys perf_event_cgrp_subsys = {
92fb9748
TH
9356 .css_alloc = perf_cgroup_css_alloc,
9357 .css_free = perf_cgroup_css_free,
e7e7ee2e 9358 .exit = perf_cgroup_exit,
bb9d97b6 9359 .attach = perf_cgroup_attach,
e5d1367f
SE
9360};
9361#endif /* CONFIG_CGROUP_PERF */