perf/x86: Use extended offcore mask on Haswell
[linux-2.6-block.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e
IM
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
12351ef8 21#include <linux/tick.h>
0793a61d 22#include <linux/sysfs.h>
22a4f650 23#include <linux/dcache.h>
0793a61d 24#include <linux/percpu.h>
22a4f650 25#include <linux/ptrace.h>
c277443c 26#include <linux/reboot.h>
b9cacc7b 27#include <linux/vmstat.h>
abe43400 28#include <linux/device.h>
6e5fdeed 29#include <linux/export.h>
906010b2 30#include <linux/vmalloc.h>
b9cacc7b
PZ
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
0793a61d
TG
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
aa9c4c0f 36#include <linux/kernel_stat.h>
cdd6c482 37#include <linux/perf_event.h>
6fb2915d 38#include <linux/ftrace_event.h>
3c502e7a 39#include <linux/hw_breakpoint.h>
c5ebcedb 40#include <linux/mm_types.h>
877c6856 41#include <linux/cgroup.h>
c464c76e 42#include <linux/module.h>
f972eb63 43#include <linux/mman.h>
0793a61d 44
76369139
FW
45#include "internal.h"
46
4e193bd4
TB
47#include <asm/irq_regs.h>
48
fadfe7be
JO
49static struct workqueue_struct *perf_wq;
50
fe4b04fa 51struct remote_function_call {
e7e7ee2e
IM
52 struct task_struct *p;
53 int (*func)(void *info);
54 void *info;
55 int ret;
fe4b04fa
PZ
56};
57
58static void remote_function(void *data)
59{
60 struct remote_function_call *tfc = data;
61 struct task_struct *p = tfc->p;
62
63 if (p) {
64 tfc->ret = -EAGAIN;
65 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
66 return;
67 }
68
69 tfc->ret = tfc->func(tfc->info);
70}
71
72/**
73 * task_function_call - call a function on the cpu on which a task runs
74 * @p: the task to evaluate
75 * @func: the function to be called
76 * @info: the function call argument
77 *
78 * Calls the function @func when the task is currently running. This might
79 * be on the current CPU, which just calls the function directly
80 *
81 * returns: @func return value, or
82 * -ESRCH - when the process isn't running
83 * -EAGAIN - when the process moved away
84 */
85static int
86task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
87{
88 struct remote_function_call data = {
e7e7ee2e
IM
89 .p = p,
90 .func = func,
91 .info = info,
92 .ret = -ESRCH, /* No such (running) process */
fe4b04fa
PZ
93 };
94
95 if (task_curr(p))
96 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
97
98 return data.ret;
99}
100
101/**
102 * cpu_function_call - call a function on the cpu
103 * @func: the function to be called
104 * @info: the function call argument
105 *
106 * Calls the function @func on the remote cpu.
107 *
108 * returns: @func return value or -ENXIO when the cpu is offline
109 */
110static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
111{
112 struct remote_function_call data = {
e7e7ee2e
IM
113 .p = NULL,
114 .func = func,
115 .info = info,
116 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
117 };
118
119 smp_call_function_single(cpu, remote_function, &data, 1);
120
121 return data.ret;
122}
123
f8697762
JO
124#define EVENT_OWNER_KERNEL ((void *) -1)
125
126static bool is_kernel_event(struct perf_event *event)
127{
128 return event->owner == EVENT_OWNER_KERNEL;
129}
130
e5d1367f
SE
131#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
132 PERF_FLAG_FD_OUTPUT |\
a21b0b35
YD
133 PERF_FLAG_PID_CGROUP |\
134 PERF_FLAG_FD_CLOEXEC)
e5d1367f 135
bce38cd5
SE
136/*
137 * branch priv levels that need permission checks
138 */
139#define PERF_SAMPLE_BRANCH_PERM_PLM \
140 (PERF_SAMPLE_BRANCH_KERNEL |\
141 PERF_SAMPLE_BRANCH_HV)
142
0b3fcf17
SE
143enum event_type_t {
144 EVENT_FLEXIBLE = 0x1,
145 EVENT_PINNED = 0x2,
146 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
147};
148
e5d1367f
SE
149/*
150 * perf_sched_events : >0 events exist
151 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
152 */
c5905afb 153struct static_key_deferred perf_sched_events __read_mostly;
e5d1367f 154static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
d010b332 155static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
e5d1367f 156
cdd6c482
IM
157static atomic_t nr_mmap_events __read_mostly;
158static atomic_t nr_comm_events __read_mostly;
159static atomic_t nr_task_events __read_mostly;
948b26b6 160static atomic_t nr_freq_events __read_mostly;
9ee318a7 161
108b02cf
PZ
162static LIST_HEAD(pmus);
163static DEFINE_MUTEX(pmus_lock);
164static struct srcu_struct pmus_srcu;
165
0764771d 166/*
cdd6c482 167 * perf event paranoia level:
0fbdea19
IM
168 * -1 - not paranoid at all
169 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 170 * 1 - disallow cpu events for unpriv
0fbdea19 171 * 2 - disallow kernel profiling for unpriv
0764771d 172 */
cdd6c482 173int sysctl_perf_event_paranoid __read_mostly = 1;
0764771d 174
20443384
FW
175/* Minimum for 512 kiB + 1 user control page */
176int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
177
178/*
cdd6c482 179 * max perf event sample rate
df58ab24 180 */
14c63f17
DH
181#define DEFAULT_MAX_SAMPLE_RATE 100000
182#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
183#define DEFAULT_CPU_TIME_MAX_PERCENT 25
184
185int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
186
187static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
188static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
189
d9494cb4
PZ
190static int perf_sample_allowed_ns __read_mostly =
191 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
14c63f17
DH
192
193void update_perf_cpu_limits(void)
194{
195 u64 tmp = perf_sample_period_ns;
196
197 tmp *= sysctl_perf_cpu_time_max_percent;
e5302920 198 do_div(tmp, 100);
d9494cb4 199 ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
14c63f17 200}
163ec435 201
9e630205
SE
202static int perf_rotate_context(struct perf_cpu_context *cpuctx);
203
163ec435
PZ
204int perf_proc_update_handler(struct ctl_table *table, int write,
205 void __user *buffer, size_t *lenp,
206 loff_t *ppos)
207{
723478c8 208 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
163ec435
PZ
209
210 if (ret || !write)
211 return ret;
212
213 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
14c63f17
DH
214 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
215 update_perf_cpu_limits();
216
217 return 0;
218}
219
220int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
221
222int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
223 void __user *buffer, size_t *lenp,
224 loff_t *ppos)
225{
226 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
227
228 if (ret || !write)
229 return ret;
230
231 update_perf_cpu_limits();
163ec435
PZ
232
233 return 0;
234}
1ccd1549 235
14c63f17
DH
236/*
237 * perf samples are done in some very critical code paths (NMIs).
238 * If they take too much CPU time, the system can lock up and not
239 * get any real work done. This will drop the sample rate when
240 * we detect that events are taking too long.
241 */
242#define NR_ACCUMULATED_SAMPLES 128
d9494cb4 243static DEFINE_PER_CPU(u64, running_sample_length);
14c63f17 244
6a02ad66 245static void perf_duration_warn(struct irq_work *w)
14c63f17 246{
6a02ad66 247 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
14c63f17 248 u64 avg_local_sample_len;
e5302920 249 u64 local_samples_len;
6a02ad66
PZ
250
251 local_samples_len = __get_cpu_var(running_sample_length);
252 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
253
254 printk_ratelimited(KERN_WARNING
255 "perf interrupt took too long (%lld > %lld), lowering "
256 "kernel.perf_event_max_sample_rate to %d\n",
cd578abb 257 avg_local_sample_len, allowed_ns >> 1,
6a02ad66
PZ
258 sysctl_perf_event_sample_rate);
259}
260
261static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
262
263void perf_sample_event_took(u64 sample_len_ns)
264{
d9494cb4 265 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
6a02ad66
PZ
266 u64 avg_local_sample_len;
267 u64 local_samples_len;
14c63f17 268
d9494cb4 269 if (allowed_ns == 0)
14c63f17
DH
270 return;
271
272 /* decay the counter by 1 average sample */
273 local_samples_len = __get_cpu_var(running_sample_length);
274 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
275 local_samples_len += sample_len_ns;
276 __get_cpu_var(running_sample_length) = local_samples_len;
277
278 /*
279 * note: this will be biased artifically low until we have
280 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
281 * from having to maintain a count.
282 */
283 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
284
d9494cb4 285 if (avg_local_sample_len <= allowed_ns)
14c63f17
DH
286 return;
287
288 if (max_samples_per_tick <= 1)
289 return;
290
291 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
292 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
293 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
294
14c63f17 295 update_perf_cpu_limits();
6a02ad66 296
cd578abb
PZ
297 if (!irq_work_queue(&perf_duration_work)) {
298 early_printk("perf interrupt took too long (%lld > %lld), lowering "
299 "kernel.perf_event_max_sample_rate to %d\n",
300 avg_local_sample_len, allowed_ns >> 1,
301 sysctl_perf_event_sample_rate);
302 }
14c63f17
DH
303}
304
cdd6c482 305static atomic64_t perf_event_id;
a96bbc16 306
0b3fcf17
SE
307static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
308 enum event_type_t event_type);
309
310static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
311 enum event_type_t event_type,
312 struct task_struct *task);
313
314static void update_context_time(struct perf_event_context *ctx);
315static u64 perf_event_time(struct perf_event *event);
0b3fcf17 316
cdd6c482 317void __weak perf_event_print_debug(void) { }
0793a61d 318
84c79910 319extern __weak const char *perf_pmu_name(void)
0793a61d 320{
84c79910 321 return "pmu";
0793a61d
TG
322}
323
0b3fcf17
SE
324static inline u64 perf_clock(void)
325{
326 return local_clock();
327}
328
e5d1367f
SE
329static inline struct perf_cpu_context *
330__get_cpu_context(struct perf_event_context *ctx)
331{
332 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
333}
334
facc4307
PZ
335static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
336 struct perf_event_context *ctx)
337{
338 raw_spin_lock(&cpuctx->ctx.lock);
339 if (ctx)
340 raw_spin_lock(&ctx->lock);
341}
342
343static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
344 struct perf_event_context *ctx)
345{
346 if (ctx)
347 raw_spin_unlock(&ctx->lock);
348 raw_spin_unlock(&cpuctx->ctx.lock);
349}
350
e5d1367f
SE
351#ifdef CONFIG_CGROUP_PERF
352
877c6856
LZ
353/*
354 * perf_cgroup_info keeps track of time_enabled for a cgroup.
355 * This is a per-cpu dynamically allocated data structure.
356 */
357struct perf_cgroup_info {
358 u64 time;
359 u64 timestamp;
360};
361
362struct perf_cgroup {
363 struct cgroup_subsys_state css;
86e213e1 364 struct perf_cgroup_info __percpu *info;
877c6856
LZ
365};
366
3f7cce3c
SE
367/*
368 * Must ensure cgroup is pinned (css_get) before calling
369 * this function. In other words, we cannot call this function
370 * if there is no cgroup event for the current CPU context.
371 */
e5d1367f
SE
372static inline struct perf_cgroup *
373perf_cgroup_from_task(struct task_struct *task)
374{
073219e9 375 return container_of(task_css(task, perf_event_cgrp_id),
8af01f56 376 struct perf_cgroup, css);
e5d1367f
SE
377}
378
379static inline bool
380perf_cgroup_match(struct perf_event *event)
381{
382 struct perf_event_context *ctx = event->ctx;
383 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
384
ef824fa1
TH
385 /* @event doesn't care about cgroup */
386 if (!event->cgrp)
387 return true;
388
389 /* wants specific cgroup scope but @cpuctx isn't associated with any */
390 if (!cpuctx->cgrp)
391 return false;
392
393 /*
394 * Cgroup scoping is recursive. An event enabled for a cgroup is
395 * also enabled for all its descendant cgroups. If @cpuctx's
396 * cgroup is a descendant of @event's (the test covers identity
397 * case), it's a match.
398 */
399 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
400 event->cgrp->css.cgroup);
e5d1367f
SE
401}
402
e5d1367f
SE
403static inline void perf_put_cgroup(struct perf_event *event)
404{
405 css_put(&event->cgrp->css);
406}
407
408static inline void perf_detach_cgroup(struct perf_event *event)
409{
410 perf_put_cgroup(event);
411 event->cgrp = NULL;
412}
413
414static inline int is_cgroup_event(struct perf_event *event)
415{
416 return event->cgrp != NULL;
417}
418
419static inline u64 perf_cgroup_event_time(struct perf_event *event)
420{
421 struct perf_cgroup_info *t;
422
423 t = per_cpu_ptr(event->cgrp->info, event->cpu);
424 return t->time;
425}
426
427static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
428{
429 struct perf_cgroup_info *info;
430 u64 now;
431
432 now = perf_clock();
433
434 info = this_cpu_ptr(cgrp->info);
435
436 info->time += now - info->timestamp;
437 info->timestamp = now;
438}
439
440static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
441{
442 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
443 if (cgrp_out)
444 __update_cgrp_time(cgrp_out);
445}
446
447static inline void update_cgrp_time_from_event(struct perf_event *event)
448{
3f7cce3c
SE
449 struct perf_cgroup *cgrp;
450
e5d1367f 451 /*
3f7cce3c
SE
452 * ensure we access cgroup data only when needed and
453 * when we know the cgroup is pinned (css_get)
e5d1367f 454 */
3f7cce3c 455 if (!is_cgroup_event(event))
e5d1367f
SE
456 return;
457
3f7cce3c
SE
458 cgrp = perf_cgroup_from_task(current);
459 /*
460 * Do not update time when cgroup is not active
461 */
462 if (cgrp == event->cgrp)
463 __update_cgrp_time(event->cgrp);
e5d1367f
SE
464}
465
466static inline void
3f7cce3c
SE
467perf_cgroup_set_timestamp(struct task_struct *task,
468 struct perf_event_context *ctx)
e5d1367f
SE
469{
470 struct perf_cgroup *cgrp;
471 struct perf_cgroup_info *info;
472
3f7cce3c
SE
473 /*
474 * ctx->lock held by caller
475 * ensure we do not access cgroup data
476 * unless we have the cgroup pinned (css_get)
477 */
478 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
479 return;
480
481 cgrp = perf_cgroup_from_task(task);
482 info = this_cpu_ptr(cgrp->info);
3f7cce3c 483 info->timestamp = ctx->timestamp;
e5d1367f
SE
484}
485
486#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
487#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
488
489/*
490 * reschedule events based on the cgroup constraint of task.
491 *
492 * mode SWOUT : schedule out everything
493 * mode SWIN : schedule in based on cgroup for next
494 */
495void perf_cgroup_switch(struct task_struct *task, int mode)
496{
497 struct perf_cpu_context *cpuctx;
498 struct pmu *pmu;
499 unsigned long flags;
500
501 /*
502 * disable interrupts to avoid geting nr_cgroup
503 * changes via __perf_event_disable(). Also
504 * avoids preemption.
505 */
506 local_irq_save(flags);
507
508 /*
509 * we reschedule only in the presence of cgroup
510 * constrained events.
511 */
512 rcu_read_lock();
513
514 list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f 515 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95cf59ea
PZ
516 if (cpuctx->unique_pmu != pmu)
517 continue; /* ensure we process each cpuctx once */
e5d1367f 518
e5d1367f
SE
519 /*
520 * perf_cgroup_events says at least one
521 * context on this CPU has cgroup events.
522 *
523 * ctx->nr_cgroups reports the number of cgroup
524 * events for a context.
525 */
526 if (cpuctx->ctx.nr_cgroups > 0) {
facc4307
PZ
527 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
528 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f
SE
529
530 if (mode & PERF_CGROUP_SWOUT) {
531 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
532 /*
533 * must not be done before ctxswout due
534 * to event_filter_match() in event_sched_out()
535 */
536 cpuctx->cgrp = NULL;
537 }
538
539 if (mode & PERF_CGROUP_SWIN) {
e566b76e 540 WARN_ON_ONCE(cpuctx->cgrp);
95cf59ea
PZ
541 /*
542 * set cgrp before ctxsw in to allow
543 * event_filter_match() to not have to pass
544 * task around
e5d1367f
SE
545 */
546 cpuctx->cgrp = perf_cgroup_from_task(task);
547 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
548 }
facc4307
PZ
549 perf_pmu_enable(cpuctx->ctx.pmu);
550 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f 551 }
e5d1367f
SE
552 }
553
554 rcu_read_unlock();
555
556 local_irq_restore(flags);
557}
558
a8d757ef
SE
559static inline void perf_cgroup_sched_out(struct task_struct *task,
560 struct task_struct *next)
e5d1367f 561{
a8d757ef
SE
562 struct perf_cgroup *cgrp1;
563 struct perf_cgroup *cgrp2 = NULL;
564
565 /*
566 * we come here when we know perf_cgroup_events > 0
567 */
568 cgrp1 = perf_cgroup_from_task(task);
569
570 /*
571 * next is NULL when called from perf_event_enable_on_exec()
572 * that will systematically cause a cgroup_switch()
573 */
574 if (next)
575 cgrp2 = perf_cgroup_from_task(next);
576
577 /*
578 * only schedule out current cgroup events if we know
579 * that we are switching to a different cgroup. Otherwise,
580 * do no touch the cgroup events.
581 */
582 if (cgrp1 != cgrp2)
583 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
e5d1367f
SE
584}
585
a8d757ef
SE
586static inline void perf_cgroup_sched_in(struct task_struct *prev,
587 struct task_struct *task)
e5d1367f 588{
a8d757ef
SE
589 struct perf_cgroup *cgrp1;
590 struct perf_cgroup *cgrp2 = NULL;
591
592 /*
593 * we come here when we know perf_cgroup_events > 0
594 */
595 cgrp1 = perf_cgroup_from_task(task);
596
597 /* prev can never be NULL */
598 cgrp2 = perf_cgroup_from_task(prev);
599
600 /*
601 * only need to schedule in cgroup events if we are changing
602 * cgroup during ctxsw. Cgroup events were not scheduled
603 * out of ctxsw out if that was not the case.
604 */
605 if (cgrp1 != cgrp2)
606 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
e5d1367f
SE
607}
608
609static inline int perf_cgroup_connect(int fd, struct perf_event *event,
610 struct perf_event_attr *attr,
611 struct perf_event *group_leader)
612{
613 struct perf_cgroup *cgrp;
614 struct cgroup_subsys_state *css;
2903ff01
AV
615 struct fd f = fdget(fd);
616 int ret = 0;
e5d1367f 617
2903ff01 618 if (!f.file)
e5d1367f
SE
619 return -EBADF;
620
ec903c0c
TH
621 css = css_tryget_online_from_dir(f.file->f_dentry,
622 &perf_event_cgrp_subsys);
3db272c0
LZ
623 if (IS_ERR(css)) {
624 ret = PTR_ERR(css);
625 goto out;
626 }
e5d1367f
SE
627
628 cgrp = container_of(css, struct perf_cgroup, css);
629 event->cgrp = cgrp;
630
631 /*
632 * all events in a group must monitor
633 * the same cgroup because a task belongs
634 * to only one perf cgroup at a time
635 */
636 if (group_leader && group_leader->cgrp != cgrp) {
637 perf_detach_cgroup(event);
638 ret = -EINVAL;
e5d1367f 639 }
3db272c0 640out:
2903ff01 641 fdput(f);
e5d1367f
SE
642 return ret;
643}
644
645static inline void
646perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
647{
648 struct perf_cgroup_info *t;
649 t = per_cpu_ptr(event->cgrp->info, event->cpu);
650 event->shadow_ctx_time = now - t->timestamp;
651}
652
653static inline void
654perf_cgroup_defer_enabled(struct perf_event *event)
655{
656 /*
657 * when the current task's perf cgroup does not match
658 * the event's, we need to remember to call the
659 * perf_mark_enable() function the first time a task with
660 * a matching perf cgroup is scheduled in.
661 */
662 if (is_cgroup_event(event) && !perf_cgroup_match(event))
663 event->cgrp_defer_enabled = 1;
664}
665
666static inline void
667perf_cgroup_mark_enabled(struct perf_event *event,
668 struct perf_event_context *ctx)
669{
670 struct perf_event *sub;
671 u64 tstamp = perf_event_time(event);
672
673 if (!event->cgrp_defer_enabled)
674 return;
675
676 event->cgrp_defer_enabled = 0;
677
678 event->tstamp_enabled = tstamp - event->total_time_enabled;
679 list_for_each_entry(sub, &event->sibling_list, group_entry) {
680 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
681 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
682 sub->cgrp_defer_enabled = 0;
683 }
684 }
685}
686#else /* !CONFIG_CGROUP_PERF */
687
688static inline bool
689perf_cgroup_match(struct perf_event *event)
690{
691 return true;
692}
693
694static inline void perf_detach_cgroup(struct perf_event *event)
695{}
696
697static inline int is_cgroup_event(struct perf_event *event)
698{
699 return 0;
700}
701
702static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
703{
704 return 0;
705}
706
707static inline void update_cgrp_time_from_event(struct perf_event *event)
708{
709}
710
711static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
712{
713}
714
a8d757ef
SE
715static inline void perf_cgroup_sched_out(struct task_struct *task,
716 struct task_struct *next)
e5d1367f
SE
717{
718}
719
a8d757ef
SE
720static inline void perf_cgroup_sched_in(struct task_struct *prev,
721 struct task_struct *task)
e5d1367f
SE
722{
723}
724
725static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
726 struct perf_event_attr *attr,
727 struct perf_event *group_leader)
728{
729 return -EINVAL;
730}
731
732static inline void
3f7cce3c
SE
733perf_cgroup_set_timestamp(struct task_struct *task,
734 struct perf_event_context *ctx)
e5d1367f
SE
735{
736}
737
738void
739perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
740{
741}
742
743static inline void
744perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
745{
746}
747
748static inline u64 perf_cgroup_event_time(struct perf_event *event)
749{
750 return 0;
751}
752
753static inline void
754perf_cgroup_defer_enabled(struct perf_event *event)
755{
756}
757
758static inline void
759perf_cgroup_mark_enabled(struct perf_event *event,
760 struct perf_event_context *ctx)
761{
762}
763#endif
764
9e630205
SE
765/*
766 * set default to be dependent on timer tick just
767 * like original code
768 */
769#define PERF_CPU_HRTIMER (1000 / HZ)
770/*
771 * function must be called with interrupts disbled
772 */
773static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
774{
775 struct perf_cpu_context *cpuctx;
776 enum hrtimer_restart ret = HRTIMER_NORESTART;
777 int rotations = 0;
778
779 WARN_ON(!irqs_disabled());
780
781 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
782
783 rotations = perf_rotate_context(cpuctx);
784
785 /*
786 * arm timer if needed
787 */
788 if (rotations) {
789 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
790 ret = HRTIMER_RESTART;
791 }
792
793 return ret;
794}
795
796/* CPU is going down */
797void perf_cpu_hrtimer_cancel(int cpu)
798{
799 struct perf_cpu_context *cpuctx;
800 struct pmu *pmu;
801 unsigned long flags;
802
803 if (WARN_ON(cpu != smp_processor_id()))
804 return;
805
806 local_irq_save(flags);
807
808 rcu_read_lock();
809
810 list_for_each_entry_rcu(pmu, &pmus, entry) {
811 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
812
813 if (pmu->task_ctx_nr == perf_sw_context)
814 continue;
815
816 hrtimer_cancel(&cpuctx->hrtimer);
817 }
818
819 rcu_read_unlock();
820
821 local_irq_restore(flags);
822}
823
824static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
825{
826 struct hrtimer *hr = &cpuctx->hrtimer;
827 struct pmu *pmu = cpuctx->ctx.pmu;
62b85639 828 int timer;
9e630205
SE
829
830 /* no multiplexing needed for SW PMU */
831 if (pmu->task_ctx_nr == perf_sw_context)
832 return;
833
62b85639
SE
834 /*
835 * check default is sane, if not set then force to
836 * default interval (1/tick)
837 */
838 timer = pmu->hrtimer_interval_ms;
839 if (timer < 1)
840 timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
841
842 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
9e630205
SE
843
844 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
845 hr->function = perf_cpu_hrtimer_handler;
846}
847
848static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
849{
850 struct hrtimer *hr = &cpuctx->hrtimer;
851 struct pmu *pmu = cpuctx->ctx.pmu;
852
853 /* not for SW PMU */
854 if (pmu->task_ctx_nr == perf_sw_context)
855 return;
856
857 if (hrtimer_active(hr))
858 return;
859
860 if (!hrtimer_callback_running(hr))
861 __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
862 0, HRTIMER_MODE_REL_PINNED, 0);
863}
864
33696fc0 865void perf_pmu_disable(struct pmu *pmu)
9e35ad38 866{
33696fc0
PZ
867 int *count = this_cpu_ptr(pmu->pmu_disable_count);
868 if (!(*count)++)
869 pmu->pmu_disable(pmu);
9e35ad38 870}
9e35ad38 871
33696fc0 872void perf_pmu_enable(struct pmu *pmu)
9e35ad38 873{
33696fc0
PZ
874 int *count = this_cpu_ptr(pmu->pmu_disable_count);
875 if (!--(*count))
876 pmu->pmu_enable(pmu);
9e35ad38 877}
9e35ad38 878
e9d2b064
PZ
879static DEFINE_PER_CPU(struct list_head, rotation_list);
880
881/*
882 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
883 * because they're strictly cpu affine and rotate_start is called with IRQs
884 * disabled, while rotate_context is called from IRQ context.
885 */
108b02cf 886static void perf_pmu_rotate_start(struct pmu *pmu)
9e35ad38 887{
108b02cf 888 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
e9d2b064 889 struct list_head *head = &__get_cpu_var(rotation_list);
b5ab4cd5 890
e9d2b064 891 WARN_ON(!irqs_disabled());
b5ab4cd5 892
d84153d6 893 if (list_empty(&cpuctx->rotation_list))
e9d2b064 894 list_add(&cpuctx->rotation_list, head);
9e35ad38 895}
9e35ad38 896
cdd6c482 897static void get_ctx(struct perf_event_context *ctx)
a63eaf34 898{
e5289d4a 899 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
900}
901
cdd6c482 902static void put_ctx(struct perf_event_context *ctx)
a63eaf34 903{
564c2b21
PM
904 if (atomic_dec_and_test(&ctx->refcount)) {
905 if (ctx->parent_ctx)
906 put_ctx(ctx->parent_ctx);
c93f7669
PM
907 if (ctx->task)
908 put_task_struct(ctx->task);
cb796ff3 909 kfree_rcu(ctx, rcu_head);
564c2b21 910 }
a63eaf34
PM
911}
912
cdd6c482 913static void unclone_ctx(struct perf_event_context *ctx)
71a851b4
PZ
914{
915 if (ctx->parent_ctx) {
916 put_ctx(ctx->parent_ctx);
917 ctx->parent_ctx = NULL;
918 }
5a3126d4 919 ctx->generation++;
71a851b4
PZ
920}
921
6844c09d
ACM
922static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
923{
924 /*
925 * only top level events have the pid namespace they were created in
926 */
927 if (event->parent)
928 event = event->parent;
929
930 return task_tgid_nr_ns(p, event->ns);
931}
932
933static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
934{
935 /*
936 * only top level events have the pid namespace they were created in
937 */
938 if (event->parent)
939 event = event->parent;
940
941 return task_pid_nr_ns(p, event->ns);
942}
943
7f453c24 944/*
cdd6c482 945 * If we inherit events we want to return the parent event id
7f453c24
PZ
946 * to userspace.
947 */
cdd6c482 948static u64 primary_event_id(struct perf_event *event)
7f453c24 949{
cdd6c482 950 u64 id = event->id;
7f453c24 951
cdd6c482
IM
952 if (event->parent)
953 id = event->parent->id;
7f453c24
PZ
954
955 return id;
956}
957
25346b93 958/*
cdd6c482 959 * Get the perf_event_context for a task and lock it.
25346b93
PM
960 * This has to cope with with the fact that until it is locked,
961 * the context could get moved to another task.
962 */
cdd6c482 963static struct perf_event_context *
8dc85d54 964perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 965{
cdd6c482 966 struct perf_event_context *ctx;
25346b93 967
9ed6060d 968retry:
058ebd0e
PZ
969 /*
970 * One of the few rules of preemptible RCU is that one cannot do
971 * rcu_read_unlock() while holding a scheduler (or nested) lock when
972 * part of the read side critical section was preemptible -- see
973 * rcu_read_unlock_special().
974 *
975 * Since ctx->lock nests under rq->lock we must ensure the entire read
976 * side critical section is non-preemptible.
977 */
978 preempt_disable();
979 rcu_read_lock();
8dc85d54 980 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
981 if (ctx) {
982 /*
983 * If this context is a clone of another, it might
984 * get swapped for another underneath us by
cdd6c482 985 * perf_event_task_sched_out, though the
25346b93
PM
986 * rcu_read_lock() protects us from any context
987 * getting freed. Lock the context and check if it
988 * got swapped before we could get the lock, and retry
989 * if so. If we locked the right context, then it
990 * can't get swapped on us any more.
991 */
e625cce1 992 raw_spin_lock_irqsave(&ctx->lock, *flags);
8dc85d54 993 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
e625cce1 994 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
058ebd0e
PZ
995 rcu_read_unlock();
996 preempt_enable();
25346b93
PM
997 goto retry;
998 }
b49a9e7e
PZ
999
1000 if (!atomic_inc_not_zero(&ctx->refcount)) {
e625cce1 1001 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
b49a9e7e
PZ
1002 ctx = NULL;
1003 }
25346b93
PM
1004 }
1005 rcu_read_unlock();
058ebd0e 1006 preempt_enable();
25346b93
PM
1007 return ctx;
1008}
1009
1010/*
1011 * Get the context for a task and increment its pin_count so it
1012 * can't get swapped to another task. This also increments its
1013 * reference count so that the context can't get freed.
1014 */
8dc85d54
PZ
1015static struct perf_event_context *
1016perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 1017{
cdd6c482 1018 struct perf_event_context *ctx;
25346b93
PM
1019 unsigned long flags;
1020
8dc85d54 1021 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
1022 if (ctx) {
1023 ++ctx->pin_count;
e625cce1 1024 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1025 }
1026 return ctx;
1027}
1028
cdd6c482 1029static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
1030{
1031 unsigned long flags;
1032
e625cce1 1033 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 1034 --ctx->pin_count;
e625cce1 1035 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1036}
1037
f67218c3
PZ
1038/*
1039 * Update the record of the current time in a context.
1040 */
1041static void update_context_time(struct perf_event_context *ctx)
1042{
1043 u64 now = perf_clock();
1044
1045 ctx->time += now - ctx->timestamp;
1046 ctx->timestamp = now;
1047}
1048
4158755d
SE
1049static u64 perf_event_time(struct perf_event *event)
1050{
1051 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
1052
1053 if (is_cgroup_event(event))
1054 return perf_cgroup_event_time(event);
1055
4158755d
SE
1056 return ctx ? ctx->time : 0;
1057}
1058
f67218c3
PZ
1059/*
1060 * Update the total_time_enabled and total_time_running fields for a event.
b7526f0c 1061 * The caller of this function needs to hold the ctx->lock.
f67218c3
PZ
1062 */
1063static void update_event_times(struct perf_event *event)
1064{
1065 struct perf_event_context *ctx = event->ctx;
1066 u64 run_end;
1067
1068 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1069 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1070 return;
e5d1367f
SE
1071 /*
1072 * in cgroup mode, time_enabled represents
1073 * the time the event was enabled AND active
1074 * tasks were in the monitored cgroup. This is
1075 * independent of the activity of the context as
1076 * there may be a mix of cgroup and non-cgroup events.
1077 *
1078 * That is why we treat cgroup events differently
1079 * here.
1080 */
1081 if (is_cgroup_event(event))
46cd6a7f 1082 run_end = perf_cgroup_event_time(event);
e5d1367f
SE
1083 else if (ctx->is_active)
1084 run_end = ctx->time;
acd1d7c1
PZ
1085 else
1086 run_end = event->tstamp_stopped;
1087
1088 event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3
PZ
1089
1090 if (event->state == PERF_EVENT_STATE_INACTIVE)
1091 run_end = event->tstamp_stopped;
1092 else
4158755d 1093 run_end = perf_event_time(event);
f67218c3
PZ
1094
1095 event->total_time_running = run_end - event->tstamp_running;
e5d1367f 1096
f67218c3
PZ
1097}
1098
96c21a46
PZ
1099/*
1100 * Update total_time_enabled and total_time_running for all events in a group.
1101 */
1102static void update_group_times(struct perf_event *leader)
1103{
1104 struct perf_event *event;
1105
1106 update_event_times(leader);
1107 list_for_each_entry(event, &leader->sibling_list, group_entry)
1108 update_event_times(event);
1109}
1110
889ff015
FW
1111static struct list_head *
1112ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1113{
1114 if (event->attr.pinned)
1115 return &ctx->pinned_groups;
1116 else
1117 return &ctx->flexible_groups;
1118}
1119
fccc714b 1120/*
cdd6c482 1121 * Add a event from the lists for its context.
fccc714b
PZ
1122 * Must be called with ctx->mutex and ctx->lock held.
1123 */
04289bb9 1124static void
cdd6c482 1125list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1126{
8a49542c
PZ
1127 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1128 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9
IM
1129
1130 /*
8a49542c
PZ
1131 * If we're a stand alone event or group leader, we go to the context
1132 * list, group events are kept attached to the group so that
1133 * perf_group_detach can, at all times, locate all siblings.
04289bb9 1134 */
8a49542c 1135 if (event->group_leader == event) {
889ff015
FW
1136 struct list_head *list;
1137
d6f962b5
FW
1138 if (is_software_event(event))
1139 event->group_flags |= PERF_GROUP_SOFTWARE;
1140
889ff015
FW
1141 list = ctx_group_list(event, ctx);
1142 list_add_tail(&event->group_entry, list);
5c148194 1143 }
592903cd 1144
08309379 1145 if (is_cgroup_event(event))
e5d1367f 1146 ctx->nr_cgroups++;
e5d1367f 1147
d010b332
SE
1148 if (has_branch_stack(event))
1149 ctx->nr_branch_stack++;
1150
cdd6c482 1151 list_add_rcu(&event->event_entry, &ctx->event_list);
b5ab4cd5 1152 if (!ctx->nr_events)
108b02cf 1153 perf_pmu_rotate_start(ctx->pmu);
cdd6c482
IM
1154 ctx->nr_events++;
1155 if (event->attr.inherit_stat)
bfbd3381 1156 ctx->nr_stat++;
5a3126d4
PZ
1157
1158 ctx->generation++;
04289bb9
IM
1159}
1160
0231bb53
JO
1161/*
1162 * Initialize event state based on the perf_event_attr::disabled.
1163 */
1164static inline void perf_event__state_init(struct perf_event *event)
1165{
1166 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1167 PERF_EVENT_STATE_INACTIVE;
1168}
1169
c320c7b7
ACM
1170/*
1171 * Called at perf_event creation and when events are attached/detached from a
1172 * group.
1173 */
1174static void perf_event__read_size(struct perf_event *event)
1175{
1176 int entry = sizeof(u64); /* value */
1177 int size = 0;
1178 int nr = 1;
1179
1180 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1181 size += sizeof(u64);
1182
1183 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1184 size += sizeof(u64);
1185
1186 if (event->attr.read_format & PERF_FORMAT_ID)
1187 entry += sizeof(u64);
1188
1189 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1190 nr += event->group_leader->nr_siblings;
1191 size += sizeof(u64);
1192 }
1193
1194 size += entry * nr;
1195 event->read_size = size;
1196}
1197
1198static void perf_event__header_size(struct perf_event *event)
1199{
1200 struct perf_sample_data *data;
1201 u64 sample_type = event->attr.sample_type;
1202 u16 size = 0;
1203
1204 perf_event__read_size(event);
1205
1206 if (sample_type & PERF_SAMPLE_IP)
1207 size += sizeof(data->ip);
1208
6844c09d
ACM
1209 if (sample_type & PERF_SAMPLE_ADDR)
1210 size += sizeof(data->addr);
1211
1212 if (sample_type & PERF_SAMPLE_PERIOD)
1213 size += sizeof(data->period);
1214
c3feedf2
AK
1215 if (sample_type & PERF_SAMPLE_WEIGHT)
1216 size += sizeof(data->weight);
1217
6844c09d
ACM
1218 if (sample_type & PERF_SAMPLE_READ)
1219 size += event->read_size;
1220
d6be9ad6
SE
1221 if (sample_type & PERF_SAMPLE_DATA_SRC)
1222 size += sizeof(data->data_src.val);
1223
fdfbbd07
AK
1224 if (sample_type & PERF_SAMPLE_TRANSACTION)
1225 size += sizeof(data->txn);
1226
6844c09d
ACM
1227 event->header_size = size;
1228}
1229
1230static void perf_event__id_header_size(struct perf_event *event)
1231{
1232 struct perf_sample_data *data;
1233 u64 sample_type = event->attr.sample_type;
1234 u16 size = 0;
1235
c320c7b7
ACM
1236 if (sample_type & PERF_SAMPLE_TID)
1237 size += sizeof(data->tid_entry);
1238
1239 if (sample_type & PERF_SAMPLE_TIME)
1240 size += sizeof(data->time);
1241
ff3d527c
AH
1242 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1243 size += sizeof(data->id);
1244
c320c7b7
ACM
1245 if (sample_type & PERF_SAMPLE_ID)
1246 size += sizeof(data->id);
1247
1248 if (sample_type & PERF_SAMPLE_STREAM_ID)
1249 size += sizeof(data->stream_id);
1250
1251 if (sample_type & PERF_SAMPLE_CPU)
1252 size += sizeof(data->cpu_entry);
1253
6844c09d 1254 event->id_header_size = size;
c320c7b7
ACM
1255}
1256
8a49542c
PZ
1257static void perf_group_attach(struct perf_event *event)
1258{
c320c7b7 1259 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 1260
74c3337c
PZ
1261 /*
1262 * We can have double attach due to group movement in perf_event_open.
1263 */
1264 if (event->attach_state & PERF_ATTACH_GROUP)
1265 return;
1266
8a49542c
PZ
1267 event->attach_state |= PERF_ATTACH_GROUP;
1268
1269 if (group_leader == event)
1270 return;
1271
1272 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1273 !is_software_event(event))
1274 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1275
1276 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1277 group_leader->nr_siblings++;
c320c7b7
ACM
1278
1279 perf_event__header_size(group_leader);
1280
1281 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1282 perf_event__header_size(pos);
8a49542c
PZ
1283}
1284
a63eaf34 1285/*
cdd6c482 1286 * Remove a event from the lists for its context.
fccc714b 1287 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1288 */
04289bb9 1289static void
cdd6c482 1290list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1291{
68cacd29 1292 struct perf_cpu_context *cpuctx;
8a49542c
PZ
1293 /*
1294 * We can have double detach due to exit/hot-unplug + close.
1295 */
1296 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1297 return;
8a49542c
PZ
1298
1299 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1300
68cacd29 1301 if (is_cgroup_event(event)) {
e5d1367f 1302 ctx->nr_cgroups--;
68cacd29
SE
1303 cpuctx = __get_cpu_context(ctx);
1304 /*
1305 * if there are no more cgroup events
1306 * then cler cgrp to avoid stale pointer
1307 * in update_cgrp_time_from_cpuctx()
1308 */
1309 if (!ctx->nr_cgroups)
1310 cpuctx->cgrp = NULL;
1311 }
e5d1367f 1312
d010b332
SE
1313 if (has_branch_stack(event))
1314 ctx->nr_branch_stack--;
1315
cdd6c482
IM
1316 ctx->nr_events--;
1317 if (event->attr.inherit_stat)
bfbd3381 1318 ctx->nr_stat--;
8bc20959 1319
cdd6c482 1320 list_del_rcu(&event->event_entry);
04289bb9 1321
8a49542c
PZ
1322 if (event->group_leader == event)
1323 list_del_init(&event->group_entry);
5c148194 1324
96c21a46 1325 update_group_times(event);
b2e74a26
SE
1326
1327 /*
1328 * If event was in error state, then keep it
1329 * that way, otherwise bogus counts will be
1330 * returned on read(). The only way to get out
1331 * of error state is by explicit re-enabling
1332 * of the event
1333 */
1334 if (event->state > PERF_EVENT_STATE_OFF)
1335 event->state = PERF_EVENT_STATE_OFF;
5a3126d4
PZ
1336
1337 ctx->generation++;
050735b0
PZ
1338}
1339
8a49542c 1340static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1341{
1342 struct perf_event *sibling, *tmp;
8a49542c
PZ
1343 struct list_head *list = NULL;
1344
1345 /*
1346 * We can have double detach due to exit/hot-unplug + close.
1347 */
1348 if (!(event->attach_state & PERF_ATTACH_GROUP))
1349 return;
1350
1351 event->attach_state &= ~PERF_ATTACH_GROUP;
1352
1353 /*
1354 * If this is a sibling, remove it from its group.
1355 */
1356 if (event->group_leader != event) {
1357 list_del_init(&event->group_entry);
1358 event->group_leader->nr_siblings--;
c320c7b7 1359 goto out;
8a49542c
PZ
1360 }
1361
1362 if (!list_empty(&event->group_entry))
1363 list = &event->group_entry;
2e2af50b 1364
04289bb9 1365 /*
cdd6c482
IM
1366 * If this was a group event with sibling events then
1367 * upgrade the siblings to singleton events by adding them
8a49542c 1368 * to whatever list we are on.
04289bb9 1369 */
cdd6c482 1370 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c
PZ
1371 if (list)
1372 list_move_tail(&sibling->group_entry, list);
04289bb9 1373 sibling->group_leader = sibling;
d6f962b5
FW
1374
1375 /* Inherit group flags from the previous leader */
1376 sibling->group_flags = event->group_flags;
04289bb9 1377 }
c320c7b7
ACM
1378
1379out:
1380 perf_event__header_size(event->group_leader);
1381
1382 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1383 perf_event__header_size(tmp);
04289bb9
IM
1384}
1385
fadfe7be
JO
1386/*
1387 * User event without the task.
1388 */
1389static bool is_orphaned_event(struct perf_event *event)
1390{
1391 return event && !is_kernel_event(event) && !event->owner;
1392}
1393
1394/*
1395 * Event has a parent but parent's task finished and it's
1396 * alive only because of children holding refference.
1397 */
1398static bool is_orphaned_child(struct perf_event *event)
1399{
1400 return is_orphaned_event(event->parent);
1401}
1402
1403static void orphans_remove_work(struct work_struct *work);
1404
1405static void schedule_orphans_remove(struct perf_event_context *ctx)
1406{
1407 if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
1408 return;
1409
1410 if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
1411 get_ctx(ctx);
1412 ctx->orphans_remove_sched = true;
1413 }
1414}
1415
1416static int __init perf_workqueue_init(void)
1417{
1418 perf_wq = create_singlethread_workqueue("perf");
1419 WARN(!perf_wq, "failed to create perf workqueue\n");
1420 return perf_wq ? 0 : -1;
1421}
1422
1423core_initcall(perf_workqueue_init);
1424
fa66f07a
SE
1425static inline int
1426event_filter_match(struct perf_event *event)
1427{
e5d1367f
SE
1428 return (event->cpu == -1 || event->cpu == smp_processor_id())
1429 && perf_cgroup_match(event);
fa66f07a
SE
1430}
1431
9ffcfa6f
SE
1432static void
1433event_sched_out(struct perf_event *event,
3b6f9e5c 1434 struct perf_cpu_context *cpuctx,
cdd6c482 1435 struct perf_event_context *ctx)
3b6f9e5c 1436{
4158755d 1437 u64 tstamp = perf_event_time(event);
fa66f07a
SE
1438 u64 delta;
1439 /*
1440 * An event which could not be activated because of
1441 * filter mismatch still needs to have its timings
1442 * maintained, otherwise bogus information is return
1443 * via read() for time_enabled, time_running:
1444 */
1445 if (event->state == PERF_EVENT_STATE_INACTIVE
1446 && !event_filter_match(event)) {
e5d1367f 1447 delta = tstamp - event->tstamp_stopped;
fa66f07a 1448 event->tstamp_running += delta;
4158755d 1449 event->tstamp_stopped = tstamp;
fa66f07a
SE
1450 }
1451
cdd6c482 1452 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1453 return;
3b6f9e5c 1454
44377277
AS
1455 perf_pmu_disable(event->pmu);
1456
cdd6c482
IM
1457 event->state = PERF_EVENT_STATE_INACTIVE;
1458 if (event->pending_disable) {
1459 event->pending_disable = 0;
1460 event->state = PERF_EVENT_STATE_OFF;
970892a9 1461 }
4158755d 1462 event->tstamp_stopped = tstamp;
a4eaf7f1 1463 event->pmu->del(event, 0);
cdd6c482 1464 event->oncpu = -1;
3b6f9e5c 1465
cdd6c482 1466 if (!is_software_event(event))
3b6f9e5c
PM
1467 cpuctx->active_oncpu--;
1468 ctx->nr_active--;
0f5a2601
PZ
1469 if (event->attr.freq && event->attr.sample_freq)
1470 ctx->nr_freq--;
cdd6c482 1471 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c 1472 cpuctx->exclusive = 0;
44377277 1473
fadfe7be
JO
1474 if (is_orphaned_child(event))
1475 schedule_orphans_remove(ctx);
1476
44377277 1477 perf_pmu_enable(event->pmu);
3b6f9e5c
PM
1478}
1479
d859e29f 1480static void
cdd6c482 1481group_sched_out(struct perf_event *group_event,
d859e29f 1482 struct perf_cpu_context *cpuctx,
cdd6c482 1483 struct perf_event_context *ctx)
d859e29f 1484{
cdd6c482 1485 struct perf_event *event;
fa66f07a 1486 int state = group_event->state;
d859e29f 1487
cdd6c482 1488 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
1489
1490 /*
1491 * Schedule out siblings (if any):
1492 */
cdd6c482
IM
1493 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1494 event_sched_out(event, cpuctx, ctx);
d859e29f 1495
fa66f07a 1496 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29f
PM
1497 cpuctx->exclusive = 0;
1498}
1499
46ce0fe9
PZ
1500struct remove_event {
1501 struct perf_event *event;
1502 bool detach_group;
1503};
1504
0793a61d 1505/*
cdd6c482 1506 * Cross CPU call to remove a performance event
0793a61d 1507 *
cdd6c482 1508 * We disable the event on the hardware level first. After that we
0793a61d
TG
1509 * remove it from the context list.
1510 */
fe4b04fa 1511static int __perf_remove_from_context(void *info)
0793a61d 1512{
46ce0fe9
PZ
1513 struct remove_event *re = info;
1514 struct perf_event *event = re->event;
cdd6c482 1515 struct perf_event_context *ctx = event->ctx;
108b02cf 1516 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
0793a61d 1517
e625cce1 1518 raw_spin_lock(&ctx->lock);
cdd6c482 1519 event_sched_out(event, cpuctx, ctx);
46ce0fe9
PZ
1520 if (re->detach_group)
1521 perf_group_detach(event);
cdd6c482 1522 list_del_event(event, ctx);
64ce3126
PZ
1523 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1524 ctx->is_active = 0;
1525 cpuctx->task_ctx = NULL;
1526 }
e625cce1 1527 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1528
1529 return 0;
0793a61d
TG
1530}
1531
1532
1533/*
cdd6c482 1534 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 1535 *
cdd6c482 1536 * CPU events are removed with a smp call. For task events we only
0793a61d 1537 * call when the task is on a CPU.
c93f7669 1538 *
cdd6c482
IM
1539 * If event->ctx is a cloned context, callers must make sure that
1540 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
1541 * remains valid. This is OK when called from perf_release since
1542 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 1543 * When called from perf_event_exit_task, it's OK because the
c93f7669 1544 * context has been detached from its task.
0793a61d 1545 */
46ce0fe9 1546static void perf_remove_from_context(struct perf_event *event, bool detach_group)
0793a61d 1547{
cdd6c482 1548 struct perf_event_context *ctx = event->ctx;
0793a61d 1549 struct task_struct *task = ctx->task;
46ce0fe9
PZ
1550 struct remove_event re = {
1551 .event = event,
1552 .detach_group = detach_group,
1553 };
0793a61d 1554
fe4b04fa
PZ
1555 lockdep_assert_held(&ctx->mutex);
1556
0793a61d
TG
1557 if (!task) {
1558 /*
cdd6c482 1559 * Per cpu events are removed via an smp call and
af901ca1 1560 * the removal is always successful.
0793a61d 1561 */
46ce0fe9 1562 cpu_function_call(event->cpu, __perf_remove_from_context, &re);
0793a61d
TG
1563 return;
1564 }
1565
1566retry:
46ce0fe9 1567 if (!task_function_call(task, __perf_remove_from_context, &re))
fe4b04fa 1568 return;
0793a61d 1569
e625cce1 1570 raw_spin_lock_irq(&ctx->lock);
0793a61d 1571 /*
fe4b04fa
PZ
1572 * If we failed to find a running task, but find the context active now
1573 * that we've acquired the ctx->lock, retry.
0793a61d 1574 */
fe4b04fa 1575 if (ctx->is_active) {
e625cce1 1576 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1577 goto retry;
1578 }
1579
1580 /*
fe4b04fa
PZ
1581 * Since the task isn't running, its safe to remove the event, us
1582 * holding the ctx->lock ensures the task won't get scheduled in.
0793a61d 1583 */
46ce0fe9
PZ
1584 if (detach_group)
1585 perf_group_detach(event);
fe4b04fa 1586 list_del_event(event, ctx);
e625cce1 1587 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1588}
1589
d859e29f 1590/*
cdd6c482 1591 * Cross CPU call to disable a performance event
d859e29f 1592 */
500ad2d8 1593int __perf_event_disable(void *info)
d859e29f 1594{
cdd6c482 1595 struct perf_event *event = info;
cdd6c482 1596 struct perf_event_context *ctx = event->ctx;
108b02cf 1597 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f
PM
1598
1599 /*
cdd6c482
IM
1600 * If this is a per-task event, need to check whether this
1601 * event's task is the current task on this cpu.
fe4b04fa
PZ
1602 *
1603 * Can trigger due to concurrent perf_event_context_sched_out()
1604 * flipping contexts around.
d859e29f 1605 */
665c2142 1606 if (ctx->task && cpuctx->task_ctx != ctx)
fe4b04fa 1607 return -EINVAL;
d859e29f 1608
e625cce1 1609 raw_spin_lock(&ctx->lock);
d859e29f
PM
1610
1611 /*
cdd6c482 1612 * If the event is on, turn it off.
d859e29f
PM
1613 * If it is in error state, leave it in error state.
1614 */
cdd6c482 1615 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
4af4998b 1616 update_context_time(ctx);
e5d1367f 1617 update_cgrp_time_from_event(event);
cdd6c482
IM
1618 update_group_times(event);
1619 if (event == event->group_leader)
1620 group_sched_out(event, cpuctx, ctx);
d859e29f 1621 else
cdd6c482
IM
1622 event_sched_out(event, cpuctx, ctx);
1623 event->state = PERF_EVENT_STATE_OFF;
d859e29f
PM
1624 }
1625
e625cce1 1626 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1627
1628 return 0;
d859e29f
PM
1629}
1630
1631/*
cdd6c482 1632 * Disable a event.
c93f7669 1633 *
cdd6c482
IM
1634 * If event->ctx is a cloned context, callers must make sure that
1635 * every task struct that event->ctx->task could possibly point to
c93f7669 1636 * remains valid. This condition is satisifed when called through
cdd6c482
IM
1637 * perf_event_for_each_child or perf_event_for_each because they
1638 * hold the top-level event's child_mutex, so any descendant that
1639 * goes to exit will block in sync_child_event.
1640 * When called from perf_pending_event it's OK because event->ctx
c93f7669 1641 * is the current context on this CPU and preemption is disabled,
cdd6c482 1642 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 1643 */
44234adc 1644void perf_event_disable(struct perf_event *event)
d859e29f 1645{
cdd6c482 1646 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
1647 struct task_struct *task = ctx->task;
1648
1649 if (!task) {
1650 /*
cdd6c482 1651 * Disable the event on the cpu that it's on
d859e29f 1652 */
fe4b04fa 1653 cpu_function_call(event->cpu, __perf_event_disable, event);
d859e29f
PM
1654 return;
1655 }
1656
9ed6060d 1657retry:
fe4b04fa
PZ
1658 if (!task_function_call(task, __perf_event_disable, event))
1659 return;
d859e29f 1660
e625cce1 1661 raw_spin_lock_irq(&ctx->lock);
d859e29f 1662 /*
cdd6c482 1663 * If the event is still active, we need to retry the cross-call.
d859e29f 1664 */
cdd6c482 1665 if (event->state == PERF_EVENT_STATE_ACTIVE) {
e625cce1 1666 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
1667 /*
1668 * Reload the task pointer, it might have been changed by
1669 * a concurrent perf_event_context_sched_out().
1670 */
1671 task = ctx->task;
d859e29f
PM
1672 goto retry;
1673 }
1674
1675 /*
1676 * Since we have the lock this context can't be scheduled
1677 * in, so we can change the state safely.
1678 */
cdd6c482
IM
1679 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1680 update_group_times(event);
1681 event->state = PERF_EVENT_STATE_OFF;
53cfbf59 1682 }
e625cce1 1683 raw_spin_unlock_irq(&ctx->lock);
d859e29f 1684}
dcfce4a0 1685EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 1686
e5d1367f
SE
1687static void perf_set_shadow_time(struct perf_event *event,
1688 struct perf_event_context *ctx,
1689 u64 tstamp)
1690{
1691 /*
1692 * use the correct time source for the time snapshot
1693 *
1694 * We could get by without this by leveraging the
1695 * fact that to get to this function, the caller
1696 * has most likely already called update_context_time()
1697 * and update_cgrp_time_xx() and thus both timestamp
1698 * are identical (or very close). Given that tstamp is,
1699 * already adjusted for cgroup, we could say that:
1700 * tstamp - ctx->timestamp
1701 * is equivalent to
1702 * tstamp - cgrp->timestamp.
1703 *
1704 * Then, in perf_output_read(), the calculation would
1705 * work with no changes because:
1706 * - event is guaranteed scheduled in
1707 * - no scheduled out in between
1708 * - thus the timestamp would be the same
1709 *
1710 * But this is a bit hairy.
1711 *
1712 * So instead, we have an explicit cgroup call to remain
1713 * within the time time source all along. We believe it
1714 * is cleaner and simpler to understand.
1715 */
1716 if (is_cgroup_event(event))
1717 perf_cgroup_set_shadow_time(event, tstamp);
1718 else
1719 event->shadow_ctx_time = tstamp - ctx->timestamp;
1720}
1721
4fe757dd
PZ
1722#define MAX_INTERRUPTS (~0ULL)
1723
1724static void perf_log_throttle(struct perf_event *event, int enable);
1725
235c7fc7 1726static int
9ffcfa6f 1727event_sched_in(struct perf_event *event,
235c7fc7 1728 struct perf_cpu_context *cpuctx,
6e37738a 1729 struct perf_event_context *ctx)
235c7fc7 1730{
4158755d 1731 u64 tstamp = perf_event_time(event);
44377277 1732 int ret = 0;
4158755d 1733
63342411
PZ
1734 lockdep_assert_held(&ctx->lock);
1735
cdd6c482 1736 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
1737 return 0;
1738
cdd6c482 1739 event->state = PERF_EVENT_STATE_ACTIVE;
6e37738a 1740 event->oncpu = smp_processor_id();
4fe757dd
PZ
1741
1742 /*
1743 * Unthrottle events, since we scheduled we might have missed several
1744 * ticks already, also for a heavily scheduling task there is little
1745 * guarantee it'll get a tick in a timely manner.
1746 */
1747 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1748 perf_log_throttle(event, 1);
1749 event->hw.interrupts = 0;
1750 }
1751
235c7fc7
IM
1752 /*
1753 * The new state must be visible before we turn it on in the hardware:
1754 */
1755 smp_wmb();
1756
44377277
AS
1757 perf_pmu_disable(event->pmu);
1758
a4eaf7f1 1759 if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482
IM
1760 event->state = PERF_EVENT_STATE_INACTIVE;
1761 event->oncpu = -1;
44377277
AS
1762 ret = -EAGAIN;
1763 goto out;
235c7fc7
IM
1764 }
1765
4158755d 1766 event->tstamp_running += tstamp - event->tstamp_stopped;
9ffcfa6f 1767
e5d1367f 1768 perf_set_shadow_time(event, ctx, tstamp);
eed01528 1769
cdd6c482 1770 if (!is_software_event(event))
3b6f9e5c 1771 cpuctx->active_oncpu++;
235c7fc7 1772 ctx->nr_active++;
0f5a2601
PZ
1773 if (event->attr.freq && event->attr.sample_freq)
1774 ctx->nr_freq++;
235c7fc7 1775
cdd6c482 1776 if (event->attr.exclusive)
3b6f9e5c
PM
1777 cpuctx->exclusive = 1;
1778
fadfe7be
JO
1779 if (is_orphaned_child(event))
1780 schedule_orphans_remove(ctx);
1781
44377277
AS
1782out:
1783 perf_pmu_enable(event->pmu);
1784
1785 return ret;
235c7fc7
IM
1786}
1787
6751b71e 1788static int
cdd6c482 1789group_sched_in(struct perf_event *group_event,
6751b71e 1790 struct perf_cpu_context *cpuctx,
6e37738a 1791 struct perf_event_context *ctx)
6751b71e 1792{
6bde9b6c 1793 struct perf_event *event, *partial_group = NULL;
4a234593 1794 struct pmu *pmu = ctx->pmu;
d7842da4
SE
1795 u64 now = ctx->time;
1796 bool simulate = false;
6751b71e 1797
cdd6c482 1798 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
1799 return 0;
1800
ad5133b7 1801 pmu->start_txn(pmu);
6bde9b6c 1802
9ffcfa6f 1803 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 1804 pmu->cancel_txn(pmu);
9e630205 1805 perf_cpu_hrtimer_restart(cpuctx);
6751b71e 1806 return -EAGAIN;
90151c35 1807 }
6751b71e
PM
1808
1809 /*
1810 * Schedule in siblings as one group (if any):
1811 */
cdd6c482 1812 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f 1813 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 1814 partial_group = event;
6751b71e
PM
1815 goto group_error;
1816 }
1817 }
1818
9ffcfa6f 1819 if (!pmu->commit_txn(pmu))
6e85158c 1820 return 0;
9ffcfa6f 1821
6751b71e
PM
1822group_error:
1823 /*
1824 * Groups can be scheduled in as one unit only, so undo any
1825 * partial group before returning:
d7842da4
SE
1826 * The events up to the failed event are scheduled out normally,
1827 * tstamp_stopped will be updated.
1828 *
1829 * The failed events and the remaining siblings need to have
1830 * their timings updated as if they had gone thru event_sched_in()
1831 * and event_sched_out(). This is required to get consistent timings
1832 * across the group. This also takes care of the case where the group
1833 * could never be scheduled by ensuring tstamp_stopped is set to mark
1834 * the time the event was actually stopped, such that time delta
1835 * calculation in update_event_times() is correct.
6751b71e 1836 */
cdd6c482
IM
1837 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1838 if (event == partial_group)
d7842da4
SE
1839 simulate = true;
1840
1841 if (simulate) {
1842 event->tstamp_running += now - event->tstamp_stopped;
1843 event->tstamp_stopped = now;
1844 } else {
1845 event_sched_out(event, cpuctx, ctx);
1846 }
6751b71e 1847 }
9ffcfa6f 1848 event_sched_out(group_event, cpuctx, ctx);
6751b71e 1849
ad5133b7 1850 pmu->cancel_txn(pmu);
90151c35 1851
9e630205
SE
1852 perf_cpu_hrtimer_restart(cpuctx);
1853
6751b71e
PM
1854 return -EAGAIN;
1855}
1856
3b6f9e5c 1857/*
cdd6c482 1858 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 1859 */
cdd6c482 1860static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
1861 struct perf_cpu_context *cpuctx,
1862 int can_add_hw)
1863{
1864 /*
cdd6c482 1865 * Groups consisting entirely of software events can always go on.
3b6f9e5c 1866 */
d6f962b5 1867 if (event->group_flags & PERF_GROUP_SOFTWARE)
3b6f9e5c
PM
1868 return 1;
1869 /*
1870 * If an exclusive group is already on, no other hardware
cdd6c482 1871 * events can go on.
3b6f9e5c
PM
1872 */
1873 if (cpuctx->exclusive)
1874 return 0;
1875 /*
1876 * If this group is exclusive and there are already
cdd6c482 1877 * events on the CPU, it can't go on.
3b6f9e5c 1878 */
cdd6c482 1879 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
1880 return 0;
1881 /*
1882 * Otherwise, try to add it if all previous groups were able
1883 * to go on.
1884 */
1885 return can_add_hw;
1886}
1887
cdd6c482
IM
1888static void add_event_to_ctx(struct perf_event *event,
1889 struct perf_event_context *ctx)
53cfbf59 1890{
4158755d
SE
1891 u64 tstamp = perf_event_time(event);
1892
cdd6c482 1893 list_add_event(event, ctx);
8a49542c 1894 perf_group_attach(event);
4158755d
SE
1895 event->tstamp_enabled = tstamp;
1896 event->tstamp_running = tstamp;
1897 event->tstamp_stopped = tstamp;
53cfbf59
PM
1898}
1899
2c29ef0f
PZ
1900static void task_ctx_sched_out(struct perf_event_context *ctx);
1901static void
1902ctx_sched_in(struct perf_event_context *ctx,
1903 struct perf_cpu_context *cpuctx,
1904 enum event_type_t event_type,
1905 struct task_struct *task);
fe4b04fa 1906
dce5855b
PZ
1907static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1908 struct perf_event_context *ctx,
1909 struct task_struct *task)
1910{
1911 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1912 if (ctx)
1913 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1914 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1915 if (ctx)
1916 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1917}
1918
0793a61d 1919/*
cdd6c482 1920 * Cross CPU call to install and enable a performance event
682076ae
PZ
1921 *
1922 * Must be called with ctx->mutex held
0793a61d 1923 */
fe4b04fa 1924static int __perf_install_in_context(void *info)
0793a61d 1925{
cdd6c482
IM
1926 struct perf_event *event = info;
1927 struct perf_event_context *ctx = event->ctx;
108b02cf 1928 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f
PZ
1929 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1930 struct task_struct *task = current;
1931
b58f6b0d 1932 perf_ctx_lock(cpuctx, task_ctx);
2c29ef0f 1933 perf_pmu_disable(cpuctx->ctx.pmu);
0793a61d
TG
1934
1935 /*
2c29ef0f 1936 * If there was an active task_ctx schedule it out.
0793a61d 1937 */
b58f6b0d 1938 if (task_ctx)
2c29ef0f 1939 task_ctx_sched_out(task_ctx);
b58f6b0d
PZ
1940
1941 /*
1942 * If the context we're installing events in is not the
1943 * active task_ctx, flip them.
1944 */
1945 if (ctx->task && task_ctx != ctx) {
1946 if (task_ctx)
1947 raw_spin_unlock(&task_ctx->lock);
1948 raw_spin_lock(&ctx->lock);
1949 task_ctx = ctx;
1950 }
1951
1952 if (task_ctx) {
1953 cpuctx->task_ctx = task_ctx;
2c29ef0f
PZ
1954 task = task_ctx->task;
1955 }
b58f6b0d 1956
2c29ef0f 1957 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
0793a61d 1958
4af4998b 1959 update_context_time(ctx);
e5d1367f
SE
1960 /*
1961 * update cgrp time only if current cgrp
1962 * matches event->cgrp. Must be done before
1963 * calling add_event_to_ctx()
1964 */
1965 update_cgrp_time_from_event(event);
0793a61d 1966
cdd6c482 1967 add_event_to_ctx(event, ctx);
0793a61d 1968
d859e29f 1969 /*
2c29ef0f 1970 * Schedule everything back in
d859e29f 1971 */
dce5855b 1972 perf_event_sched_in(cpuctx, task_ctx, task);
2c29ef0f
PZ
1973
1974 perf_pmu_enable(cpuctx->ctx.pmu);
1975 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa
PZ
1976
1977 return 0;
0793a61d
TG
1978}
1979
1980/*
cdd6c482 1981 * Attach a performance event to a context
0793a61d 1982 *
cdd6c482
IM
1983 * First we add the event to the list with the hardware enable bit
1984 * in event->hw_config cleared.
0793a61d 1985 *
cdd6c482 1986 * If the event is attached to a task which is on a CPU we use a smp
0793a61d
TG
1987 * call to enable it in the task context. The task might have been
1988 * scheduled away, but we check this in the smp call again.
1989 */
1990static void
cdd6c482
IM
1991perf_install_in_context(struct perf_event_context *ctx,
1992 struct perf_event *event,
0793a61d
TG
1993 int cpu)
1994{
1995 struct task_struct *task = ctx->task;
1996
fe4b04fa
PZ
1997 lockdep_assert_held(&ctx->mutex);
1998
c3f00c70 1999 event->ctx = ctx;
0cda4c02
YZ
2000 if (event->cpu != -1)
2001 event->cpu = cpu;
c3f00c70 2002
0793a61d
TG
2003 if (!task) {
2004 /*
cdd6c482 2005 * Per cpu events are installed via an smp call and
af901ca1 2006 * the install is always successful.
0793a61d 2007 */
fe4b04fa 2008 cpu_function_call(cpu, __perf_install_in_context, event);
0793a61d
TG
2009 return;
2010 }
2011
0793a61d 2012retry:
fe4b04fa
PZ
2013 if (!task_function_call(task, __perf_install_in_context, event))
2014 return;
0793a61d 2015
e625cce1 2016 raw_spin_lock_irq(&ctx->lock);
0793a61d 2017 /*
fe4b04fa
PZ
2018 * If we failed to find a running task, but find the context active now
2019 * that we've acquired the ctx->lock, retry.
0793a61d 2020 */
fe4b04fa 2021 if (ctx->is_active) {
e625cce1 2022 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
2023 goto retry;
2024 }
2025
2026 /*
fe4b04fa
PZ
2027 * Since the task isn't running, its safe to add the event, us holding
2028 * the ctx->lock ensures the task won't get scheduled in.
0793a61d 2029 */
fe4b04fa 2030 add_event_to_ctx(event, ctx);
e625cce1 2031 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
2032}
2033
fa289bec 2034/*
cdd6c482 2035 * Put a event into inactive state and update time fields.
fa289bec
PM
2036 * Enabling the leader of a group effectively enables all
2037 * the group members that aren't explicitly disabled, so we
2038 * have to update their ->tstamp_enabled also.
2039 * Note: this works for group members as well as group leaders
2040 * since the non-leader members' sibling_lists will be empty.
2041 */
1d9b482e 2042static void __perf_event_mark_enabled(struct perf_event *event)
fa289bec 2043{
cdd6c482 2044 struct perf_event *sub;
4158755d 2045 u64 tstamp = perf_event_time(event);
fa289bec 2046
cdd6c482 2047 event->state = PERF_EVENT_STATE_INACTIVE;
4158755d 2048 event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d 2049 list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d
SE
2050 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2051 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d 2052 }
fa289bec
PM
2053}
2054
d859e29f 2055/*
cdd6c482 2056 * Cross CPU call to enable a performance event
d859e29f 2057 */
fe4b04fa 2058static int __perf_event_enable(void *info)
04289bb9 2059{
cdd6c482 2060 struct perf_event *event = info;
cdd6c482
IM
2061 struct perf_event_context *ctx = event->ctx;
2062 struct perf_event *leader = event->group_leader;
108b02cf 2063 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f 2064 int err;
04289bb9 2065
06f41796
JO
2066 /*
2067 * There's a time window between 'ctx->is_active' check
2068 * in perf_event_enable function and this place having:
2069 * - IRQs on
2070 * - ctx->lock unlocked
2071 *
2072 * where the task could be killed and 'ctx' deactivated
2073 * by perf_event_exit_task.
2074 */
2075 if (!ctx->is_active)
fe4b04fa 2076 return -EINVAL;
3cbed429 2077
e625cce1 2078 raw_spin_lock(&ctx->lock);
4af4998b 2079 update_context_time(ctx);
d859e29f 2080
cdd6c482 2081 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f 2082 goto unlock;
e5d1367f
SE
2083
2084 /*
2085 * set current task's cgroup time reference point
2086 */
3f7cce3c 2087 perf_cgroup_set_timestamp(current, ctx);
e5d1367f 2088
1d9b482e 2089 __perf_event_mark_enabled(event);
04289bb9 2090
e5d1367f
SE
2091 if (!event_filter_match(event)) {
2092 if (is_cgroup_event(event))
2093 perf_cgroup_defer_enabled(event);
f4c4176f 2094 goto unlock;
e5d1367f 2095 }
f4c4176f 2096
04289bb9 2097 /*
cdd6c482 2098 * If the event is in a group and isn't the group leader,
d859e29f 2099 * then don't put it on unless the group is on.
04289bb9 2100 */
cdd6c482 2101 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
d859e29f 2102 goto unlock;
3b6f9e5c 2103
cdd6c482 2104 if (!group_can_go_on(event, cpuctx, 1)) {
d859e29f 2105 err = -EEXIST;
e758a33d 2106 } else {
cdd6c482 2107 if (event == leader)
6e37738a 2108 err = group_sched_in(event, cpuctx, ctx);
e758a33d 2109 else
6e37738a 2110 err = event_sched_in(event, cpuctx, ctx);
e758a33d 2111 }
d859e29f
PM
2112
2113 if (err) {
2114 /*
cdd6c482 2115 * If this event can't go on and it's part of a
d859e29f
PM
2116 * group, then the whole group has to come off.
2117 */
9e630205 2118 if (leader != event) {
d859e29f 2119 group_sched_out(leader, cpuctx, ctx);
9e630205
SE
2120 perf_cpu_hrtimer_restart(cpuctx);
2121 }
0d48696f 2122 if (leader->attr.pinned) {
53cfbf59 2123 update_group_times(leader);
cdd6c482 2124 leader->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2125 }
d859e29f
PM
2126 }
2127
9ed6060d 2128unlock:
e625cce1 2129 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
2130
2131 return 0;
d859e29f
PM
2132}
2133
2134/*
cdd6c482 2135 * Enable a event.
c93f7669 2136 *
cdd6c482
IM
2137 * If event->ctx is a cloned context, callers must make sure that
2138 * every task struct that event->ctx->task could possibly point to
c93f7669 2139 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2140 * perf_event_for_each_child or perf_event_for_each as described
2141 * for perf_event_disable.
d859e29f 2142 */
44234adc 2143void perf_event_enable(struct perf_event *event)
d859e29f 2144{
cdd6c482 2145 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
2146 struct task_struct *task = ctx->task;
2147
2148 if (!task) {
2149 /*
cdd6c482 2150 * Enable the event on the cpu that it's on
d859e29f 2151 */
fe4b04fa 2152 cpu_function_call(event->cpu, __perf_event_enable, event);
d859e29f
PM
2153 return;
2154 }
2155
e625cce1 2156 raw_spin_lock_irq(&ctx->lock);
cdd6c482 2157 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f
PM
2158 goto out;
2159
2160 /*
cdd6c482
IM
2161 * If the event is in error state, clear that first.
2162 * That way, if we see the event in error state below, we
d859e29f
PM
2163 * know that it has gone back into error state, as distinct
2164 * from the task having been scheduled away before the
2165 * cross-call arrived.
2166 */
cdd6c482
IM
2167 if (event->state == PERF_EVENT_STATE_ERROR)
2168 event->state = PERF_EVENT_STATE_OFF;
d859e29f 2169
9ed6060d 2170retry:
fe4b04fa 2171 if (!ctx->is_active) {
1d9b482e 2172 __perf_event_mark_enabled(event);
fe4b04fa
PZ
2173 goto out;
2174 }
2175
e625cce1 2176 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
2177
2178 if (!task_function_call(task, __perf_event_enable, event))
2179 return;
d859e29f 2180
e625cce1 2181 raw_spin_lock_irq(&ctx->lock);
d859e29f
PM
2182
2183 /*
cdd6c482 2184 * If the context is active and the event is still off,
d859e29f
PM
2185 * we need to retry the cross-call.
2186 */
fe4b04fa
PZ
2187 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
2188 /*
2189 * task could have been flipped by a concurrent
2190 * perf_event_context_sched_out()
2191 */
2192 task = ctx->task;
d859e29f 2193 goto retry;
fe4b04fa 2194 }
fa289bec 2195
9ed6060d 2196out:
e625cce1 2197 raw_spin_unlock_irq(&ctx->lock);
d859e29f 2198}
dcfce4a0 2199EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 2200
26ca5c11 2201int perf_event_refresh(struct perf_event *event, int refresh)
79f14641 2202{
2023b359 2203 /*
cdd6c482 2204 * not supported on inherited events
2023b359 2205 */
2e939d1d 2206 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
2207 return -EINVAL;
2208
cdd6c482
IM
2209 atomic_add(refresh, &event->event_limit);
2210 perf_event_enable(event);
2023b359
PZ
2211
2212 return 0;
79f14641 2213}
26ca5c11 2214EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 2215
5b0311e1
FW
2216static void ctx_sched_out(struct perf_event_context *ctx,
2217 struct perf_cpu_context *cpuctx,
2218 enum event_type_t event_type)
235c7fc7 2219{
cdd6c482 2220 struct perf_event *event;
db24d33e 2221 int is_active = ctx->is_active;
235c7fc7 2222
db24d33e 2223 ctx->is_active &= ~event_type;
cdd6c482 2224 if (likely(!ctx->nr_events))
facc4307
PZ
2225 return;
2226
4af4998b 2227 update_context_time(ctx);
e5d1367f 2228 update_cgrp_time_from_cpuctx(cpuctx);
5b0311e1 2229 if (!ctx->nr_active)
facc4307 2230 return;
5b0311e1 2231
075e0b00 2232 perf_pmu_disable(ctx->pmu);
db24d33e 2233 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
889ff015
FW
2234 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2235 group_sched_out(event, cpuctx, ctx);
9ed6060d 2236 }
889ff015 2237
db24d33e 2238 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
889ff015 2239 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e1 2240 group_sched_out(event, cpuctx, ctx);
9ed6060d 2241 }
1b9a644f 2242 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
2243}
2244
564c2b21 2245/*
5a3126d4
PZ
2246 * Test whether two contexts are equivalent, i.e. whether they have both been
2247 * cloned from the same version of the same context.
2248 *
2249 * Equivalence is measured using a generation number in the context that is
2250 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2251 * and list_del_event().
564c2b21 2252 */
cdd6c482
IM
2253static int context_equiv(struct perf_event_context *ctx1,
2254 struct perf_event_context *ctx2)
564c2b21 2255{
5a3126d4
PZ
2256 /* Pinning disables the swap optimization */
2257 if (ctx1->pin_count || ctx2->pin_count)
2258 return 0;
2259
2260 /* If ctx1 is the parent of ctx2 */
2261 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2262 return 1;
2263
2264 /* If ctx2 is the parent of ctx1 */
2265 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2266 return 1;
2267
2268 /*
2269 * If ctx1 and ctx2 have the same parent; we flatten the parent
2270 * hierarchy, see perf_event_init_context().
2271 */
2272 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2273 ctx1->parent_gen == ctx2->parent_gen)
2274 return 1;
2275
2276 /* Unmatched */
2277 return 0;
564c2b21
PM
2278}
2279
cdd6c482
IM
2280static void __perf_event_sync_stat(struct perf_event *event,
2281 struct perf_event *next_event)
bfbd3381
PZ
2282{
2283 u64 value;
2284
cdd6c482 2285 if (!event->attr.inherit_stat)
bfbd3381
PZ
2286 return;
2287
2288 /*
cdd6c482 2289 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
2290 * because we're in the middle of a context switch and have IRQs
2291 * disabled, which upsets smp_call_function_single(), however
cdd6c482 2292 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
2293 * don't need to use it.
2294 */
cdd6c482
IM
2295 switch (event->state) {
2296 case PERF_EVENT_STATE_ACTIVE:
3dbebf15
PZ
2297 event->pmu->read(event);
2298 /* fall-through */
bfbd3381 2299
cdd6c482
IM
2300 case PERF_EVENT_STATE_INACTIVE:
2301 update_event_times(event);
bfbd3381
PZ
2302 break;
2303
2304 default:
2305 break;
2306 }
2307
2308 /*
cdd6c482 2309 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
2310 * values when we flip the contexts.
2311 */
e7850595
PZ
2312 value = local64_read(&next_event->count);
2313 value = local64_xchg(&event->count, value);
2314 local64_set(&next_event->count, value);
bfbd3381 2315
cdd6c482
IM
2316 swap(event->total_time_enabled, next_event->total_time_enabled);
2317 swap(event->total_time_running, next_event->total_time_running);
19d2e755 2318
bfbd3381 2319 /*
19d2e755 2320 * Since we swizzled the values, update the user visible data too.
bfbd3381 2321 */
cdd6c482
IM
2322 perf_event_update_userpage(event);
2323 perf_event_update_userpage(next_event);
bfbd3381
PZ
2324}
2325
cdd6c482
IM
2326static void perf_event_sync_stat(struct perf_event_context *ctx,
2327 struct perf_event_context *next_ctx)
bfbd3381 2328{
cdd6c482 2329 struct perf_event *event, *next_event;
bfbd3381
PZ
2330
2331 if (!ctx->nr_stat)
2332 return;
2333
02ffdbc8
PZ
2334 update_context_time(ctx);
2335
cdd6c482
IM
2336 event = list_first_entry(&ctx->event_list,
2337 struct perf_event, event_entry);
bfbd3381 2338
cdd6c482
IM
2339 next_event = list_first_entry(&next_ctx->event_list,
2340 struct perf_event, event_entry);
bfbd3381 2341
cdd6c482
IM
2342 while (&event->event_entry != &ctx->event_list &&
2343 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 2344
cdd6c482 2345 __perf_event_sync_stat(event, next_event);
bfbd3381 2346
cdd6c482
IM
2347 event = list_next_entry(event, event_entry);
2348 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
2349 }
2350}
2351
fe4b04fa
PZ
2352static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2353 struct task_struct *next)
0793a61d 2354{
8dc85d54 2355 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482 2356 struct perf_event_context *next_ctx;
5a3126d4 2357 struct perf_event_context *parent, *next_parent;
108b02cf 2358 struct perf_cpu_context *cpuctx;
c93f7669 2359 int do_switch = 1;
0793a61d 2360
108b02cf
PZ
2361 if (likely(!ctx))
2362 return;
10989fb2 2363
108b02cf
PZ
2364 cpuctx = __get_cpu_context(ctx);
2365 if (!cpuctx->task_ctx)
0793a61d
TG
2366 return;
2367
c93f7669 2368 rcu_read_lock();
8dc85d54 2369 next_ctx = next->perf_event_ctxp[ctxn];
5a3126d4
PZ
2370 if (!next_ctx)
2371 goto unlock;
2372
2373 parent = rcu_dereference(ctx->parent_ctx);
2374 next_parent = rcu_dereference(next_ctx->parent_ctx);
2375
2376 /* If neither context have a parent context; they cannot be clones. */
1f9a7268 2377 if (!parent || !next_parent)
5a3126d4
PZ
2378 goto unlock;
2379
2380 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
c93f7669
PM
2381 /*
2382 * Looks like the two contexts are clones, so we might be
2383 * able to optimize the context switch. We lock both
2384 * contexts and check that they are clones under the
2385 * lock (including re-checking that neither has been
2386 * uncloned in the meantime). It doesn't matter which
2387 * order we take the locks because no other cpu could
2388 * be trying to lock both of these tasks.
2389 */
e625cce1
TG
2390 raw_spin_lock(&ctx->lock);
2391 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 2392 if (context_equiv(ctx, next_ctx)) {
665c2142
PZ
2393 /*
2394 * XXX do we need a memory barrier of sorts
cdd6c482 2395 * wrt to rcu_dereference() of perf_event_ctxp
665c2142 2396 */
8dc85d54
PZ
2397 task->perf_event_ctxp[ctxn] = next_ctx;
2398 next->perf_event_ctxp[ctxn] = ctx;
c93f7669
PM
2399 ctx->task = next;
2400 next_ctx->task = task;
2401 do_switch = 0;
bfbd3381 2402
cdd6c482 2403 perf_event_sync_stat(ctx, next_ctx);
c93f7669 2404 }
e625cce1
TG
2405 raw_spin_unlock(&next_ctx->lock);
2406 raw_spin_unlock(&ctx->lock);
564c2b21 2407 }
5a3126d4 2408unlock:
c93f7669 2409 rcu_read_unlock();
564c2b21 2410
c93f7669 2411 if (do_switch) {
facc4307 2412 raw_spin_lock(&ctx->lock);
5b0311e1 2413 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
c93f7669 2414 cpuctx->task_ctx = NULL;
facc4307 2415 raw_spin_unlock(&ctx->lock);
c93f7669 2416 }
0793a61d
TG
2417}
2418
8dc85d54
PZ
2419#define for_each_task_context_nr(ctxn) \
2420 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2421
2422/*
2423 * Called from scheduler to remove the events of the current task,
2424 * with interrupts disabled.
2425 *
2426 * We stop each event and update the event value in event->count.
2427 *
2428 * This does not protect us against NMI, but disable()
2429 * sets the disabled bit in the control field of event _before_
2430 * accessing the event control register. If a NMI hits, then it will
2431 * not restart the event.
2432 */
ab0cce56
JO
2433void __perf_event_task_sched_out(struct task_struct *task,
2434 struct task_struct *next)
8dc85d54
PZ
2435{
2436 int ctxn;
2437
8dc85d54
PZ
2438 for_each_task_context_nr(ctxn)
2439 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
2440
2441 /*
2442 * if cgroup events exist on this CPU, then we need
2443 * to check if we have to switch out PMU state.
2444 * cgroup event are system-wide mode only
2445 */
2446 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef 2447 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
2448}
2449
04dc2dbb 2450static void task_ctx_sched_out(struct perf_event_context *ctx)
a08b159f 2451{
108b02cf 2452 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
a08b159f 2453
a63eaf34
PM
2454 if (!cpuctx->task_ctx)
2455 return;
012b84da
IM
2456
2457 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2458 return;
2459
04dc2dbb 2460 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
a08b159f
PM
2461 cpuctx->task_ctx = NULL;
2462}
2463
5b0311e1
FW
2464/*
2465 * Called with IRQs disabled
2466 */
2467static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2468 enum event_type_t event_type)
2469{
2470 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
2471}
2472
235c7fc7 2473static void
5b0311e1 2474ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a 2475 struct perf_cpu_context *cpuctx)
0793a61d 2476{
cdd6c482 2477 struct perf_event *event;
0793a61d 2478
889ff015
FW
2479 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2480 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2481 continue;
5632ab12 2482 if (!event_filter_match(event))
3b6f9e5c
PM
2483 continue;
2484
e5d1367f
SE
2485 /* may need to reset tstamp_enabled */
2486 if (is_cgroup_event(event))
2487 perf_cgroup_mark_enabled(event, ctx);
2488
8c9ed8e1 2489 if (group_can_go_on(event, cpuctx, 1))
6e37738a 2490 group_sched_in(event, cpuctx, ctx);
3b6f9e5c
PM
2491
2492 /*
2493 * If this pinned group hasn't been scheduled,
2494 * put it in error state.
2495 */
cdd6c482
IM
2496 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2497 update_group_times(event);
2498 event->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2499 }
3b6f9e5c 2500 }
5b0311e1
FW
2501}
2502
2503static void
2504ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a 2505 struct perf_cpu_context *cpuctx)
5b0311e1
FW
2506{
2507 struct perf_event *event;
2508 int can_add_hw = 1;
3b6f9e5c 2509
889ff015
FW
2510 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2511 /* Ignore events in OFF or ERROR state */
2512 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2513 continue;
04289bb9
IM
2514 /*
2515 * Listen to the 'cpu' scheduling filter constraint
cdd6c482 2516 * of events:
04289bb9 2517 */
5632ab12 2518 if (!event_filter_match(event))
0793a61d
TG
2519 continue;
2520
e5d1367f
SE
2521 /* may need to reset tstamp_enabled */
2522 if (is_cgroup_event(event))
2523 perf_cgroup_mark_enabled(event, ctx);
2524
9ed6060d 2525 if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a 2526 if (group_sched_in(event, cpuctx, ctx))
dd0e6ba2 2527 can_add_hw = 0;
9ed6060d 2528 }
0793a61d 2529 }
5b0311e1
FW
2530}
2531
2532static void
2533ctx_sched_in(struct perf_event_context *ctx,
2534 struct perf_cpu_context *cpuctx,
e5d1367f
SE
2535 enum event_type_t event_type,
2536 struct task_struct *task)
5b0311e1 2537{
e5d1367f 2538 u64 now;
db24d33e 2539 int is_active = ctx->is_active;
e5d1367f 2540
db24d33e 2541 ctx->is_active |= event_type;
5b0311e1 2542 if (likely(!ctx->nr_events))
facc4307 2543 return;
5b0311e1 2544
e5d1367f
SE
2545 now = perf_clock();
2546 ctx->timestamp = now;
3f7cce3c 2547 perf_cgroup_set_timestamp(task, ctx);
5b0311e1
FW
2548 /*
2549 * First go through the list and put on any pinned groups
2550 * in order to give them the best chance of going on.
2551 */
db24d33e 2552 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
6e37738a 2553 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
2554
2555 /* Then walk through the lower prio flexible groups */
db24d33e 2556 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
6e37738a 2557 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
2558}
2559
329c0e01 2560static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
2561 enum event_type_t event_type,
2562 struct task_struct *task)
329c0e01
FW
2563{
2564 struct perf_event_context *ctx = &cpuctx->ctx;
2565
e5d1367f 2566 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
2567}
2568
e5d1367f
SE
2569static void perf_event_context_sched_in(struct perf_event_context *ctx,
2570 struct task_struct *task)
235c7fc7 2571{
108b02cf 2572 struct perf_cpu_context *cpuctx;
235c7fc7 2573
108b02cf 2574 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
2575 if (cpuctx->task_ctx == ctx)
2576 return;
2577
facc4307 2578 perf_ctx_lock(cpuctx, ctx);
1b9a644f 2579 perf_pmu_disable(ctx->pmu);
329c0e01
FW
2580 /*
2581 * We want to keep the following priority order:
2582 * cpu pinned (that don't need to move), task pinned,
2583 * cpu flexible, task flexible.
2584 */
2585 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2586
1d5f003f
GN
2587 if (ctx->nr_events)
2588 cpuctx->task_ctx = ctx;
9b33fa6b 2589
86b47c25
GN
2590 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2591
facc4307
PZ
2592 perf_pmu_enable(ctx->pmu);
2593 perf_ctx_unlock(cpuctx, ctx);
2594
b5ab4cd5
PZ
2595 /*
2596 * Since these rotations are per-cpu, we need to ensure the
2597 * cpu-context we got scheduled on is actually rotating.
2598 */
108b02cf 2599 perf_pmu_rotate_start(ctx->pmu);
235c7fc7
IM
2600}
2601
d010b332
SE
2602/*
2603 * When sampling the branck stack in system-wide, it may be necessary
2604 * to flush the stack on context switch. This happens when the branch
2605 * stack does not tag its entries with the pid of the current task.
2606 * Otherwise it becomes impossible to associate a branch entry with a
2607 * task. This ambiguity is more likely to appear when the branch stack
2608 * supports priv level filtering and the user sets it to monitor only
2609 * at the user level (which could be a useful measurement in system-wide
2610 * mode). In that case, the risk is high of having a branch stack with
2611 * branch from multiple tasks. Flushing may mean dropping the existing
2612 * entries or stashing them somewhere in the PMU specific code layer.
2613 *
2614 * This function provides the context switch callback to the lower code
2615 * layer. It is invoked ONLY when there is at least one system-wide context
2616 * with at least one active event using taken branch sampling.
2617 */
2618static void perf_branch_stack_sched_in(struct task_struct *prev,
2619 struct task_struct *task)
2620{
2621 struct perf_cpu_context *cpuctx;
2622 struct pmu *pmu;
2623 unsigned long flags;
2624
2625 /* no need to flush branch stack if not changing task */
2626 if (prev == task)
2627 return;
2628
2629 local_irq_save(flags);
2630
2631 rcu_read_lock();
2632
2633 list_for_each_entry_rcu(pmu, &pmus, entry) {
2634 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2635
2636 /*
2637 * check if the context has at least one
2638 * event using PERF_SAMPLE_BRANCH_STACK
2639 */
2640 if (cpuctx->ctx.nr_branch_stack > 0
2641 && pmu->flush_branch_stack) {
2642
d010b332
SE
2643 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2644
2645 perf_pmu_disable(pmu);
2646
2647 pmu->flush_branch_stack();
2648
2649 perf_pmu_enable(pmu);
2650
2651 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2652 }
2653 }
2654
2655 rcu_read_unlock();
2656
2657 local_irq_restore(flags);
2658}
2659
8dc85d54
PZ
2660/*
2661 * Called from scheduler to add the events of the current task
2662 * with interrupts disabled.
2663 *
2664 * We restore the event value and then enable it.
2665 *
2666 * This does not protect us against NMI, but enable()
2667 * sets the enabled bit in the control field of event _before_
2668 * accessing the event control register. If a NMI hits, then it will
2669 * keep the event running.
2670 */
ab0cce56
JO
2671void __perf_event_task_sched_in(struct task_struct *prev,
2672 struct task_struct *task)
8dc85d54
PZ
2673{
2674 struct perf_event_context *ctx;
2675 int ctxn;
2676
2677 for_each_task_context_nr(ctxn) {
2678 ctx = task->perf_event_ctxp[ctxn];
2679 if (likely(!ctx))
2680 continue;
2681
e5d1367f 2682 perf_event_context_sched_in(ctx, task);
8dc85d54 2683 }
e5d1367f
SE
2684 /*
2685 * if cgroup events exist on this CPU, then we need
2686 * to check if we have to switch in PMU state.
2687 * cgroup event are system-wide mode only
2688 */
2689 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef 2690 perf_cgroup_sched_in(prev, task);
d010b332
SE
2691
2692 /* check for system-wide branch_stack events */
2693 if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2694 perf_branch_stack_sched_in(prev, task);
235c7fc7
IM
2695}
2696
abd50713
PZ
2697static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2698{
2699 u64 frequency = event->attr.sample_freq;
2700 u64 sec = NSEC_PER_SEC;
2701 u64 divisor, dividend;
2702
2703 int count_fls, nsec_fls, frequency_fls, sec_fls;
2704
2705 count_fls = fls64(count);
2706 nsec_fls = fls64(nsec);
2707 frequency_fls = fls64(frequency);
2708 sec_fls = 30;
2709
2710 /*
2711 * We got @count in @nsec, with a target of sample_freq HZ
2712 * the target period becomes:
2713 *
2714 * @count * 10^9
2715 * period = -------------------
2716 * @nsec * sample_freq
2717 *
2718 */
2719
2720 /*
2721 * Reduce accuracy by one bit such that @a and @b converge
2722 * to a similar magnitude.
2723 */
fe4b04fa 2724#define REDUCE_FLS(a, b) \
abd50713
PZ
2725do { \
2726 if (a##_fls > b##_fls) { \
2727 a >>= 1; \
2728 a##_fls--; \
2729 } else { \
2730 b >>= 1; \
2731 b##_fls--; \
2732 } \
2733} while (0)
2734
2735 /*
2736 * Reduce accuracy until either term fits in a u64, then proceed with
2737 * the other, so that finally we can do a u64/u64 division.
2738 */
2739 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2740 REDUCE_FLS(nsec, frequency);
2741 REDUCE_FLS(sec, count);
2742 }
2743
2744 if (count_fls + sec_fls > 64) {
2745 divisor = nsec * frequency;
2746
2747 while (count_fls + sec_fls > 64) {
2748 REDUCE_FLS(count, sec);
2749 divisor >>= 1;
2750 }
2751
2752 dividend = count * sec;
2753 } else {
2754 dividend = count * sec;
2755
2756 while (nsec_fls + frequency_fls > 64) {
2757 REDUCE_FLS(nsec, frequency);
2758 dividend >>= 1;
2759 }
2760
2761 divisor = nsec * frequency;
2762 }
2763
f6ab91ad
PZ
2764 if (!divisor)
2765 return dividend;
2766
abd50713
PZ
2767 return div64_u64(dividend, divisor);
2768}
2769
e050e3f0
SE
2770static DEFINE_PER_CPU(int, perf_throttled_count);
2771static DEFINE_PER_CPU(u64, perf_throttled_seq);
2772
f39d47ff 2773static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 2774{
cdd6c482 2775 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 2776 s64 period, sample_period;
bd2b5b12
PZ
2777 s64 delta;
2778
abd50713 2779 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
2780
2781 delta = (s64)(period - hwc->sample_period);
2782 delta = (delta + 7) / 8; /* low pass filter */
2783
2784 sample_period = hwc->sample_period + delta;
2785
2786 if (!sample_period)
2787 sample_period = 1;
2788
bd2b5b12 2789 hwc->sample_period = sample_period;
abd50713 2790
e7850595 2791 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
2792 if (disable)
2793 event->pmu->stop(event, PERF_EF_UPDATE);
2794
e7850595 2795 local64_set(&hwc->period_left, 0);
f39d47ff
SE
2796
2797 if (disable)
2798 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 2799 }
bd2b5b12
PZ
2800}
2801
e050e3f0
SE
2802/*
2803 * combine freq adjustment with unthrottling to avoid two passes over the
2804 * events. At the same time, make sure, having freq events does not change
2805 * the rate of unthrottling as that would introduce bias.
2806 */
2807static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2808 int needs_unthr)
60db5e09 2809{
cdd6c482
IM
2810 struct perf_event *event;
2811 struct hw_perf_event *hwc;
e050e3f0 2812 u64 now, period = TICK_NSEC;
abd50713 2813 s64 delta;
60db5e09 2814
e050e3f0
SE
2815 /*
2816 * only need to iterate over all events iff:
2817 * - context have events in frequency mode (needs freq adjust)
2818 * - there are events to unthrottle on this cpu
2819 */
2820 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
2821 return;
2822
e050e3f0 2823 raw_spin_lock(&ctx->lock);
f39d47ff 2824 perf_pmu_disable(ctx->pmu);
e050e3f0 2825
03541f8b 2826 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 2827 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
2828 continue;
2829
5632ab12 2830 if (!event_filter_match(event))
5d27c23d
PZ
2831 continue;
2832
44377277
AS
2833 perf_pmu_disable(event->pmu);
2834
cdd6c482 2835 hwc = &event->hw;
6a24ed6c 2836
ae23bff1 2837 if (hwc->interrupts == MAX_INTERRUPTS) {
e050e3f0 2838 hwc->interrupts = 0;
cdd6c482 2839 perf_log_throttle(event, 1);
a4eaf7f1 2840 event->pmu->start(event, 0);
a78ac325
PZ
2841 }
2842
cdd6c482 2843 if (!event->attr.freq || !event->attr.sample_freq)
44377277 2844 goto next;
60db5e09 2845
e050e3f0
SE
2846 /*
2847 * stop the event and update event->count
2848 */
2849 event->pmu->stop(event, PERF_EF_UPDATE);
2850
e7850595 2851 now = local64_read(&event->count);
abd50713
PZ
2852 delta = now - hwc->freq_count_stamp;
2853 hwc->freq_count_stamp = now;
60db5e09 2854
e050e3f0
SE
2855 /*
2856 * restart the event
2857 * reload only if value has changed
f39d47ff
SE
2858 * we have stopped the event so tell that
2859 * to perf_adjust_period() to avoid stopping it
2860 * twice.
e050e3f0 2861 */
abd50713 2862 if (delta > 0)
f39d47ff 2863 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
2864
2865 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
44377277
AS
2866 next:
2867 perf_pmu_enable(event->pmu);
60db5e09 2868 }
e050e3f0 2869
f39d47ff 2870 perf_pmu_enable(ctx->pmu);
e050e3f0 2871 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
2872}
2873
235c7fc7 2874/*
cdd6c482 2875 * Round-robin a context's events:
235c7fc7 2876 */
cdd6c482 2877static void rotate_ctx(struct perf_event_context *ctx)
0793a61d 2878{
dddd3379
TG
2879 /*
2880 * Rotate the first entry last of non-pinned groups. Rotation might be
2881 * disabled by the inheritance code.
2882 */
2883 if (!ctx->rotate_disable)
2884 list_rotate_left(&ctx->flexible_groups);
235c7fc7
IM
2885}
2886
b5ab4cd5 2887/*
e9d2b064
PZ
2888 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2889 * because they're strictly cpu affine and rotate_start is called with IRQs
2890 * disabled, while rotate_context is called from IRQ context.
b5ab4cd5 2891 */
9e630205 2892static int perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7 2893{
8dc85d54 2894 struct perf_event_context *ctx = NULL;
e050e3f0 2895 int rotate = 0, remove = 1;
7fc23a53 2896
b5ab4cd5 2897 if (cpuctx->ctx.nr_events) {
e9d2b064 2898 remove = 0;
b5ab4cd5
PZ
2899 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2900 rotate = 1;
2901 }
235c7fc7 2902
8dc85d54 2903 ctx = cpuctx->task_ctx;
b5ab4cd5 2904 if (ctx && ctx->nr_events) {
e9d2b064 2905 remove = 0;
b5ab4cd5
PZ
2906 if (ctx->nr_events != ctx->nr_active)
2907 rotate = 1;
2908 }
9717e6cd 2909
e050e3f0 2910 if (!rotate)
0f5a2601
PZ
2911 goto done;
2912
facc4307 2913 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 2914 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 2915
e050e3f0
SE
2916 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2917 if (ctx)
2918 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d 2919
e050e3f0
SE
2920 rotate_ctx(&cpuctx->ctx);
2921 if (ctx)
2922 rotate_ctx(ctx);
235c7fc7 2923
e050e3f0 2924 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 2925
0f5a2601
PZ
2926 perf_pmu_enable(cpuctx->ctx.pmu);
2927 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
b5ab4cd5 2928done:
e9d2b064
PZ
2929 if (remove)
2930 list_del_init(&cpuctx->rotation_list);
9e630205
SE
2931
2932 return rotate;
e9d2b064
PZ
2933}
2934
026249ef
FW
2935#ifdef CONFIG_NO_HZ_FULL
2936bool perf_event_can_stop_tick(void)
2937{
948b26b6 2938 if (atomic_read(&nr_freq_events) ||
d84153d6 2939 __this_cpu_read(perf_throttled_count))
026249ef 2940 return false;
d84153d6
FW
2941 else
2942 return true;
026249ef
FW
2943}
2944#endif
2945
e9d2b064
PZ
2946void perf_event_task_tick(void)
2947{
2948 struct list_head *head = &__get_cpu_var(rotation_list);
2949 struct perf_cpu_context *cpuctx, *tmp;
e050e3f0
SE
2950 struct perf_event_context *ctx;
2951 int throttled;
b5ab4cd5 2952
e9d2b064
PZ
2953 WARN_ON(!irqs_disabled());
2954
e050e3f0
SE
2955 __this_cpu_inc(perf_throttled_seq);
2956 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2957
e9d2b064 2958 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
e050e3f0
SE
2959 ctx = &cpuctx->ctx;
2960 perf_adjust_freq_unthr_context(ctx, throttled);
2961
2962 ctx = cpuctx->task_ctx;
2963 if (ctx)
2964 perf_adjust_freq_unthr_context(ctx, throttled);
e9d2b064 2965 }
0793a61d
TG
2966}
2967
889ff015
FW
2968static int event_enable_on_exec(struct perf_event *event,
2969 struct perf_event_context *ctx)
2970{
2971 if (!event->attr.enable_on_exec)
2972 return 0;
2973
2974 event->attr.enable_on_exec = 0;
2975 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2976 return 0;
2977
1d9b482e 2978 __perf_event_mark_enabled(event);
889ff015
FW
2979
2980 return 1;
2981}
2982
57e7986e 2983/*
cdd6c482 2984 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
2985 * This expects task == current.
2986 */
8dc85d54 2987static void perf_event_enable_on_exec(struct perf_event_context *ctx)
57e7986e 2988{
cdd6c482 2989 struct perf_event *event;
57e7986e
PM
2990 unsigned long flags;
2991 int enabled = 0;
889ff015 2992 int ret;
57e7986e
PM
2993
2994 local_irq_save(flags);
cdd6c482 2995 if (!ctx || !ctx->nr_events)
57e7986e
PM
2996 goto out;
2997
e566b76e
SE
2998 /*
2999 * We must ctxsw out cgroup events to avoid conflict
3000 * when invoking perf_task_event_sched_in() later on
3001 * in this function. Otherwise we end up trying to
3002 * ctxswin cgroup events which are already scheduled
3003 * in.
3004 */
a8d757ef 3005 perf_cgroup_sched_out(current, NULL);
57e7986e 3006
e625cce1 3007 raw_spin_lock(&ctx->lock);
04dc2dbb 3008 task_ctx_sched_out(ctx);
57e7986e 3009
b79387ef 3010 list_for_each_entry(event, &ctx->event_list, event_entry) {
889ff015
FW
3011 ret = event_enable_on_exec(event, ctx);
3012 if (ret)
3013 enabled = 1;
57e7986e
PM
3014 }
3015
3016 /*
cdd6c482 3017 * Unclone this context if we enabled any event.
57e7986e 3018 */
71a851b4
PZ
3019 if (enabled)
3020 unclone_ctx(ctx);
57e7986e 3021
e625cce1 3022 raw_spin_unlock(&ctx->lock);
57e7986e 3023
e566b76e
SE
3024 /*
3025 * Also calls ctxswin for cgroup events, if any:
3026 */
e5d1367f 3027 perf_event_context_sched_in(ctx, ctx->task);
9ed6060d 3028out:
57e7986e
PM
3029 local_irq_restore(flags);
3030}
3031
e041e328
PZ
3032void perf_event_exec(void)
3033{
3034 struct perf_event_context *ctx;
3035 int ctxn;
3036
3037 rcu_read_lock();
3038 for_each_task_context_nr(ctxn) {
3039 ctx = current->perf_event_ctxp[ctxn];
3040 if (!ctx)
3041 continue;
3042
3043 perf_event_enable_on_exec(ctx);
3044 }
3045 rcu_read_unlock();
3046}
3047
0793a61d 3048/*
cdd6c482 3049 * Cross CPU call to read the hardware event
0793a61d 3050 */
cdd6c482 3051static void __perf_event_read(void *info)
0793a61d 3052{
cdd6c482
IM
3053 struct perf_event *event = info;
3054 struct perf_event_context *ctx = event->ctx;
108b02cf 3055 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
621a01ea 3056
e1ac3614
PM
3057 /*
3058 * If this is a task context, we need to check whether it is
3059 * the current task context of this cpu. If not it has been
3060 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
3061 * event->count would have been updated to a recent sample
3062 * when the event was scheduled out.
e1ac3614
PM
3063 */
3064 if (ctx->task && cpuctx->task_ctx != ctx)
3065 return;
3066
e625cce1 3067 raw_spin_lock(&ctx->lock);
e5d1367f 3068 if (ctx->is_active) {
542e72fc 3069 update_context_time(ctx);
e5d1367f
SE
3070 update_cgrp_time_from_event(event);
3071 }
cdd6c482 3072 update_event_times(event);
542e72fc
PZ
3073 if (event->state == PERF_EVENT_STATE_ACTIVE)
3074 event->pmu->read(event);
e625cce1 3075 raw_spin_unlock(&ctx->lock);
0793a61d
TG
3076}
3077
b5e58793
PZ
3078static inline u64 perf_event_count(struct perf_event *event)
3079{
e7850595 3080 return local64_read(&event->count) + atomic64_read(&event->child_count);
b5e58793
PZ
3081}
3082
cdd6c482 3083static u64 perf_event_read(struct perf_event *event)
0793a61d
TG
3084{
3085 /*
cdd6c482
IM
3086 * If event is enabled and currently active on a CPU, update the
3087 * value in the event structure:
0793a61d 3088 */
cdd6c482
IM
3089 if (event->state == PERF_EVENT_STATE_ACTIVE) {
3090 smp_call_function_single(event->oncpu,
3091 __perf_event_read, event, 1);
3092 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
3093 struct perf_event_context *ctx = event->ctx;
3094 unsigned long flags;
3095
e625cce1 3096 raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9
SE
3097 /*
3098 * may read while context is not active
3099 * (e.g., thread is blocked), in that case
3100 * we cannot update context time
3101 */
e5d1367f 3102 if (ctx->is_active) {
c530ccd9 3103 update_context_time(ctx);
e5d1367f
SE
3104 update_cgrp_time_from_event(event);
3105 }
cdd6c482 3106 update_event_times(event);
e625cce1 3107 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d
TG
3108 }
3109
b5e58793 3110 return perf_event_count(event);
0793a61d
TG
3111}
3112
a63eaf34 3113/*
cdd6c482 3114 * Initialize the perf_event context in a task_struct:
a63eaf34 3115 */
eb184479 3116static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 3117{
e625cce1 3118 raw_spin_lock_init(&ctx->lock);
a63eaf34 3119 mutex_init(&ctx->mutex);
889ff015
FW
3120 INIT_LIST_HEAD(&ctx->pinned_groups);
3121 INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34
PM
3122 INIT_LIST_HEAD(&ctx->event_list);
3123 atomic_set(&ctx->refcount, 1);
fadfe7be 3124 INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
eb184479
PZ
3125}
3126
3127static struct perf_event_context *
3128alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3129{
3130 struct perf_event_context *ctx;
3131
3132 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3133 if (!ctx)
3134 return NULL;
3135
3136 __perf_event_init_context(ctx);
3137 if (task) {
3138 ctx->task = task;
3139 get_task_struct(task);
0793a61d 3140 }
eb184479
PZ
3141 ctx->pmu = pmu;
3142
3143 return ctx;
a63eaf34
PM
3144}
3145
2ebd4ffb
MH
3146static struct task_struct *
3147find_lively_task_by_vpid(pid_t vpid)
3148{
3149 struct task_struct *task;
3150 int err;
0793a61d
TG
3151
3152 rcu_read_lock();
2ebd4ffb 3153 if (!vpid)
0793a61d
TG
3154 task = current;
3155 else
2ebd4ffb 3156 task = find_task_by_vpid(vpid);
0793a61d
TG
3157 if (task)
3158 get_task_struct(task);
3159 rcu_read_unlock();
3160
3161 if (!task)
3162 return ERR_PTR(-ESRCH);
3163
0793a61d 3164 /* Reuse ptrace permission checks for now. */
c93f7669
PM
3165 err = -EACCES;
3166 if (!ptrace_may_access(task, PTRACE_MODE_READ))
3167 goto errout;
3168
2ebd4ffb
MH
3169 return task;
3170errout:
3171 put_task_struct(task);
3172 return ERR_PTR(err);
3173
3174}
3175
fe4b04fa
PZ
3176/*
3177 * Returns a matching context with refcount and pincount.
3178 */
108b02cf 3179static struct perf_event_context *
38a81da2 3180find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
0793a61d 3181{
cdd6c482 3182 struct perf_event_context *ctx;
22a4f650 3183 struct perf_cpu_context *cpuctx;
25346b93 3184 unsigned long flags;
8dc85d54 3185 int ctxn, err;
0793a61d 3186
22a4ec72 3187 if (!task) {
cdd6c482 3188 /* Must be root to operate on a CPU event: */
0764771d 3189 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
3190 return ERR_PTR(-EACCES);
3191
0793a61d 3192 /*
cdd6c482 3193 * We could be clever and allow to attach a event to an
0793a61d
TG
3194 * offline CPU and activate it when the CPU comes up, but
3195 * that's for later.
3196 */
f6325e30 3197 if (!cpu_online(cpu))
0793a61d
TG
3198 return ERR_PTR(-ENODEV);
3199
108b02cf 3200 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 3201 ctx = &cpuctx->ctx;
c93f7669 3202 get_ctx(ctx);
fe4b04fa 3203 ++ctx->pin_count;
0793a61d 3204
0793a61d
TG
3205 return ctx;
3206 }
3207
8dc85d54
PZ
3208 err = -EINVAL;
3209 ctxn = pmu->task_ctx_nr;
3210 if (ctxn < 0)
3211 goto errout;
3212
9ed6060d 3213retry:
8dc85d54 3214 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 3215 if (ctx) {
71a851b4 3216 unclone_ctx(ctx);
fe4b04fa 3217 ++ctx->pin_count;
e625cce1 3218 raw_spin_unlock_irqrestore(&ctx->lock, flags);
9137fb28 3219 } else {
eb184479 3220 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
3221 err = -ENOMEM;
3222 if (!ctx)
3223 goto errout;
eb184479 3224
dbe08d82
ON
3225 err = 0;
3226 mutex_lock(&task->perf_event_mutex);
3227 /*
3228 * If it has already passed perf_event_exit_task().
3229 * we must see PF_EXITING, it takes this mutex too.
3230 */
3231 if (task->flags & PF_EXITING)
3232 err = -ESRCH;
3233 else if (task->perf_event_ctxp[ctxn])
3234 err = -EAGAIN;
fe4b04fa 3235 else {
9137fb28 3236 get_ctx(ctx);
fe4b04fa 3237 ++ctx->pin_count;
dbe08d82 3238 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 3239 }
dbe08d82
ON
3240 mutex_unlock(&task->perf_event_mutex);
3241
3242 if (unlikely(err)) {
9137fb28 3243 put_ctx(ctx);
dbe08d82
ON
3244
3245 if (err == -EAGAIN)
3246 goto retry;
3247 goto errout;
a63eaf34
PM
3248 }
3249 }
3250
0793a61d 3251 return ctx;
c93f7669 3252
9ed6060d 3253errout:
c93f7669 3254 return ERR_PTR(err);
0793a61d
TG
3255}
3256
6fb2915d
LZ
3257static void perf_event_free_filter(struct perf_event *event);
3258
cdd6c482 3259static void free_event_rcu(struct rcu_head *head)
592903cd 3260{
cdd6c482 3261 struct perf_event *event;
592903cd 3262
cdd6c482
IM
3263 event = container_of(head, struct perf_event, rcu_head);
3264 if (event->ns)
3265 put_pid_ns(event->ns);
6fb2915d 3266 perf_event_free_filter(event);
cdd6c482 3267 kfree(event);
592903cd
PZ
3268}
3269
76369139 3270static void ring_buffer_put(struct ring_buffer *rb);
b69cf536
PZ
3271static void ring_buffer_attach(struct perf_event *event,
3272 struct ring_buffer *rb);
925d519a 3273
4beb31f3 3274static void unaccount_event_cpu(struct perf_event *event, int cpu)
f1600952 3275{
4beb31f3
FW
3276 if (event->parent)
3277 return;
3278
3279 if (has_branch_stack(event)) {
3280 if (!(event->attach_state & PERF_ATTACH_TASK))
3281 atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
3282 }
3283 if (is_cgroup_event(event))
3284 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3285}
925d519a 3286
4beb31f3
FW
3287static void unaccount_event(struct perf_event *event)
3288{
3289 if (event->parent)
3290 return;
3291
3292 if (event->attach_state & PERF_ATTACH_TASK)
3293 static_key_slow_dec_deferred(&perf_sched_events);
3294 if (event->attr.mmap || event->attr.mmap_data)
3295 atomic_dec(&nr_mmap_events);
3296 if (event->attr.comm)
3297 atomic_dec(&nr_comm_events);
3298 if (event->attr.task)
3299 atomic_dec(&nr_task_events);
948b26b6
FW
3300 if (event->attr.freq)
3301 atomic_dec(&nr_freq_events);
4beb31f3
FW
3302 if (is_cgroup_event(event))
3303 static_key_slow_dec_deferred(&perf_sched_events);
3304 if (has_branch_stack(event))
3305 static_key_slow_dec_deferred(&perf_sched_events);
3306
3307 unaccount_event_cpu(event, event->cpu);
3308}
925d519a 3309
766d6c07
FW
3310static void __free_event(struct perf_event *event)
3311{
cdd6c482 3312 if (!event->parent) {
927c7a9e
FW
3313 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3314 put_callchain_buffers();
f344011c 3315 }
9ee318a7 3316
766d6c07
FW
3317 if (event->destroy)
3318 event->destroy(event);
3319
3320 if (event->ctx)
3321 put_ctx(event->ctx);
3322
c464c76e
YZ
3323 if (event->pmu)
3324 module_put(event->pmu->module);
3325
766d6c07
FW
3326 call_rcu(&event->rcu_head, free_event_rcu);
3327}
683ede43
PZ
3328
3329static void _free_event(struct perf_event *event)
f1600952 3330{
e360adbe 3331 irq_work_sync(&event->pending);
925d519a 3332
4beb31f3 3333 unaccount_event(event);
9ee318a7 3334
76369139 3335 if (event->rb) {
9bb5d40c
PZ
3336 /*
3337 * Can happen when we close an event with re-directed output.
3338 *
3339 * Since we have a 0 refcount, perf_mmap_close() will skip
3340 * over us; possibly making our ring_buffer_put() the last.
3341 */
3342 mutex_lock(&event->mmap_mutex);
b69cf536 3343 ring_buffer_attach(event, NULL);
9bb5d40c 3344 mutex_unlock(&event->mmap_mutex);
a4be7c27
PZ
3345 }
3346
e5d1367f
SE
3347 if (is_cgroup_event(event))
3348 perf_detach_cgroup(event);
3349
766d6c07 3350 __free_event(event);
f1600952
PZ
3351}
3352
683ede43
PZ
3353/*
3354 * Used to free events which have a known refcount of 1, such as in error paths
3355 * where the event isn't exposed yet and inherited events.
3356 */
3357static void free_event(struct perf_event *event)
0793a61d 3358{
683ede43
PZ
3359 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
3360 "unexpected event refcount: %ld; ptr=%p\n",
3361 atomic_long_read(&event->refcount), event)) {
3362 /* leak to avoid use-after-free */
3363 return;
3364 }
0793a61d 3365
683ede43 3366 _free_event(event);
0793a61d
TG
3367}
3368
a66a3052 3369/*
f8697762 3370 * Remove user event from the owner task.
a66a3052 3371 */
f8697762 3372static void perf_remove_from_owner(struct perf_event *event)
fb0459d7 3373{
8882135b 3374 struct task_struct *owner;
fb0459d7 3375
8882135b
PZ
3376 rcu_read_lock();
3377 owner = ACCESS_ONCE(event->owner);
3378 /*
3379 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
3380 * !owner it means the list deletion is complete and we can indeed
3381 * free this event, otherwise we need to serialize on
3382 * owner->perf_event_mutex.
3383 */
3384 smp_read_barrier_depends();
3385 if (owner) {
3386 /*
3387 * Since delayed_put_task_struct() also drops the last
3388 * task reference we can safely take a new reference
3389 * while holding the rcu_read_lock().
3390 */
3391 get_task_struct(owner);
3392 }
3393 rcu_read_unlock();
3394
3395 if (owner) {
3396 mutex_lock(&owner->perf_event_mutex);
3397 /*
3398 * We have to re-check the event->owner field, if it is cleared
3399 * we raced with perf_event_exit_task(), acquiring the mutex
3400 * ensured they're done, and we can proceed with freeing the
3401 * event.
3402 */
3403 if (event->owner)
3404 list_del_init(&event->owner_entry);
3405 mutex_unlock(&owner->perf_event_mutex);
3406 put_task_struct(owner);
3407 }
f8697762
JO
3408}
3409
3410/*
3411 * Called when the last reference to the file is gone.
3412 */
3413static void put_event(struct perf_event *event)
3414{
3415 struct perf_event_context *ctx = event->ctx;
3416
3417 if (!atomic_long_dec_and_test(&event->refcount))
3418 return;
3419
3420 if (!is_kernel_event(event))
3421 perf_remove_from_owner(event);
8882135b 3422
683ede43
PZ
3423 WARN_ON_ONCE(ctx->parent_ctx);
3424 /*
3425 * There are two ways this annotation is useful:
3426 *
3427 * 1) there is a lock recursion from perf_event_exit_task
3428 * see the comment there.
3429 *
3430 * 2) there is a lock-inversion with mmap_sem through
3431 * perf_event_read_group(), which takes faults while
3432 * holding ctx->mutex, however this is called after
3433 * the last filedesc died, so there is no possibility
3434 * to trigger the AB-BA case.
3435 */
3436 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
3437 perf_remove_from_context(event, true);
3438 mutex_unlock(&ctx->mutex);
3439
3440 _free_event(event);
a6fa941d
AV
3441}
3442
683ede43
PZ
3443int perf_event_release_kernel(struct perf_event *event)
3444{
3445 put_event(event);
3446 return 0;
3447}
3448EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3449
a6fa941d
AV
3450static int perf_release(struct inode *inode, struct file *file)
3451{
3452 put_event(file->private_data);
3453 return 0;
fb0459d7 3454}
fb0459d7 3455
fadfe7be
JO
3456/*
3457 * Remove all orphanes events from the context.
3458 */
3459static void orphans_remove_work(struct work_struct *work)
3460{
3461 struct perf_event_context *ctx;
3462 struct perf_event *event, *tmp;
3463
3464 ctx = container_of(work, struct perf_event_context,
3465 orphans_remove.work);
3466
3467 mutex_lock(&ctx->mutex);
3468 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
3469 struct perf_event *parent_event = event->parent;
3470
3471 if (!is_orphaned_child(event))
3472 continue;
3473
3474 perf_remove_from_context(event, true);
3475
3476 mutex_lock(&parent_event->child_mutex);
3477 list_del_init(&event->child_list);
3478 mutex_unlock(&parent_event->child_mutex);
3479
3480 free_event(event);
3481 put_event(parent_event);
3482 }
3483
3484 raw_spin_lock_irq(&ctx->lock);
3485 ctx->orphans_remove_sched = false;
3486 raw_spin_unlock_irq(&ctx->lock);
3487 mutex_unlock(&ctx->mutex);
3488
3489 put_ctx(ctx);
3490}
3491
59ed446f 3492u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 3493{
cdd6c482 3494 struct perf_event *child;
e53c0994
PZ
3495 u64 total = 0;
3496
59ed446f
PZ
3497 *enabled = 0;
3498 *running = 0;
3499
6f10581a 3500 mutex_lock(&event->child_mutex);
cdd6c482 3501 total += perf_event_read(event);
59ed446f
PZ
3502 *enabled += event->total_time_enabled +
3503 atomic64_read(&event->child_total_time_enabled);
3504 *running += event->total_time_running +
3505 atomic64_read(&event->child_total_time_running);
3506
3507 list_for_each_entry(child, &event->child_list, child_list) {
cdd6c482 3508 total += perf_event_read(child);
59ed446f
PZ
3509 *enabled += child->total_time_enabled;
3510 *running += child->total_time_running;
3511 }
6f10581a 3512 mutex_unlock(&event->child_mutex);
e53c0994
PZ
3513
3514 return total;
3515}
fb0459d7 3516EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 3517
cdd6c482 3518static int perf_event_read_group(struct perf_event *event,
3dab77fb
PZ
3519 u64 read_format, char __user *buf)
3520{
cdd6c482 3521 struct perf_event *leader = event->group_leader, *sub;
6f10581a
PZ
3522 int n = 0, size = 0, ret = -EFAULT;
3523 struct perf_event_context *ctx = leader->ctx;
abf4868b 3524 u64 values[5];
59ed446f 3525 u64 count, enabled, running;
abf4868b 3526
6f10581a 3527 mutex_lock(&ctx->mutex);
59ed446f 3528 count = perf_event_read_value(leader, &enabled, &running);
3dab77fb
PZ
3529
3530 values[n++] = 1 + leader->nr_siblings;
59ed446f
PZ
3531 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3532 values[n++] = enabled;
3533 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3534 values[n++] = running;
abf4868b
PZ
3535 values[n++] = count;
3536 if (read_format & PERF_FORMAT_ID)
3537 values[n++] = primary_event_id(leader);
3dab77fb
PZ
3538
3539 size = n * sizeof(u64);
3540
3541 if (copy_to_user(buf, values, size))
6f10581a 3542 goto unlock;
3dab77fb 3543
6f10581a 3544 ret = size;
3dab77fb 3545
65abc865 3546 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
abf4868b 3547 n = 0;
3dab77fb 3548
59ed446f 3549 values[n++] = perf_event_read_value(sub, &enabled, &running);
abf4868b
PZ
3550 if (read_format & PERF_FORMAT_ID)
3551 values[n++] = primary_event_id(sub);
3552
3553 size = n * sizeof(u64);
3554
184d3da8 3555 if (copy_to_user(buf + ret, values, size)) {
6f10581a
PZ
3556 ret = -EFAULT;
3557 goto unlock;
3558 }
abf4868b
PZ
3559
3560 ret += size;
3dab77fb 3561 }
6f10581a
PZ
3562unlock:
3563 mutex_unlock(&ctx->mutex);
3dab77fb 3564
abf4868b 3565 return ret;
3dab77fb
PZ
3566}
3567
cdd6c482 3568static int perf_event_read_one(struct perf_event *event,
3dab77fb
PZ
3569 u64 read_format, char __user *buf)
3570{
59ed446f 3571 u64 enabled, running;
3dab77fb
PZ
3572 u64 values[4];
3573 int n = 0;
3574
59ed446f
PZ
3575 values[n++] = perf_event_read_value(event, &enabled, &running);
3576 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3577 values[n++] = enabled;
3578 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3579 values[n++] = running;
3dab77fb 3580 if (read_format & PERF_FORMAT_ID)
cdd6c482 3581 values[n++] = primary_event_id(event);
3dab77fb
PZ
3582
3583 if (copy_to_user(buf, values, n * sizeof(u64)))
3584 return -EFAULT;
3585
3586 return n * sizeof(u64);
3587}
3588
0793a61d 3589/*
cdd6c482 3590 * Read the performance event - simple non blocking version for now
0793a61d
TG
3591 */
3592static ssize_t
cdd6c482 3593perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
0793a61d 3594{
cdd6c482 3595 u64 read_format = event->attr.read_format;
3dab77fb 3596 int ret;
0793a61d 3597
3b6f9e5c 3598 /*
cdd6c482 3599 * Return end-of-file for a read on a event that is in
3b6f9e5c
PM
3600 * error state (i.e. because it was pinned but it couldn't be
3601 * scheduled on to the CPU at some point).
3602 */
cdd6c482 3603 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
3604 return 0;
3605
c320c7b7 3606 if (count < event->read_size)
3dab77fb
PZ
3607 return -ENOSPC;
3608
cdd6c482 3609 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 3610 if (read_format & PERF_FORMAT_GROUP)
cdd6c482 3611 ret = perf_event_read_group(event, read_format, buf);
3dab77fb 3612 else
cdd6c482 3613 ret = perf_event_read_one(event, read_format, buf);
0793a61d 3614
3dab77fb 3615 return ret;
0793a61d
TG
3616}
3617
0793a61d
TG
3618static ssize_t
3619perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3620{
cdd6c482 3621 struct perf_event *event = file->private_data;
0793a61d 3622
cdd6c482 3623 return perf_read_hw(event, buf, count);
0793a61d
TG
3624}
3625
3626static unsigned int perf_poll(struct file *file, poll_table *wait)
3627{
cdd6c482 3628 struct perf_event *event = file->private_data;
76369139 3629 struct ring_buffer *rb;
c33a0bc4 3630 unsigned int events = POLL_HUP;
c7138f37 3631
10c6db11 3632 /*
9bb5d40c
PZ
3633 * Pin the event->rb by taking event->mmap_mutex; otherwise
3634 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
10c6db11
PZ
3635 */
3636 mutex_lock(&event->mmap_mutex);
9bb5d40c
PZ
3637 rb = event->rb;
3638 if (rb)
76369139 3639 events = atomic_xchg(&rb->poll, 0);
10c6db11
PZ
3640 mutex_unlock(&event->mmap_mutex);
3641
cdd6c482 3642 poll_wait(file, &event->waitq, wait);
0793a61d 3643
0793a61d
TG
3644 return events;
3645}
3646
cdd6c482 3647static void perf_event_reset(struct perf_event *event)
6de6a7b9 3648{
cdd6c482 3649 (void)perf_event_read(event);
e7850595 3650 local64_set(&event->count, 0);
cdd6c482 3651 perf_event_update_userpage(event);
3df5edad
PZ
3652}
3653
c93f7669 3654/*
cdd6c482
IM
3655 * Holding the top-level event's child_mutex means that any
3656 * descendant process that has inherited this event will block
3657 * in sync_child_event if it goes to exit, thus satisfying the
3658 * task existence requirements of perf_event_enable/disable.
c93f7669 3659 */
cdd6c482
IM
3660static void perf_event_for_each_child(struct perf_event *event,
3661 void (*func)(struct perf_event *))
3df5edad 3662{
cdd6c482 3663 struct perf_event *child;
3df5edad 3664
cdd6c482
IM
3665 WARN_ON_ONCE(event->ctx->parent_ctx);
3666 mutex_lock(&event->child_mutex);
3667 func(event);
3668 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 3669 func(child);
cdd6c482 3670 mutex_unlock(&event->child_mutex);
3df5edad
PZ
3671}
3672
cdd6c482
IM
3673static void perf_event_for_each(struct perf_event *event,
3674 void (*func)(struct perf_event *))
3df5edad 3675{
cdd6c482
IM
3676 struct perf_event_context *ctx = event->ctx;
3677 struct perf_event *sibling;
3df5edad 3678
75f937f2
PZ
3679 WARN_ON_ONCE(ctx->parent_ctx);
3680 mutex_lock(&ctx->mutex);
cdd6c482 3681 event = event->group_leader;
75f937f2 3682
cdd6c482 3683 perf_event_for_each_child(event, func);
cdd6c482 3684 list_for_each_entry(sibling, &event->sibling_list, group_entry)
724b6daa 3685 perf_event_for_each_child(sibling, func);
75f937f2 3686 mutex_unlock(&ctx->mutex);
6de6a7b9
PZ
3687}
3688
cdd6c482 3689static int perf_event_period(struct perf_event *event, u64 __user *arg)
08247e31 3690{
cdd6c482 3691 struct perf_event_context *ctx = event->ctx;
bad7192b 3692 int ret = 0, active;
08247e31
PZ
3693 u64 value;
3694
6c7e550f 3695 if (!is_sampling_event(event))
08247e31
PZ
3696 return -EINVAL;
3697
ad0cf347 3698 if (copy_from_user(&value, arg, sizeof(value)))
08247e31
PZ
3699 return -EFAULT;
3700
3701 if (!value)
3702 return -EINVAL;
3703
e625cce1 3704 raw_spin_lock_irq(&ctx->lock);
cdd6c482
IM
3705 if (event->attr.freq) {
3706 if (value > sysctl_perf_event_sample_rate) {
08247e31
PZ
3707 ret = -EINVAL;
3708 goto unlock;
3709 }
3710
cdd6c482 3711 event->attr.sample_freq = value;
08247e31 3712 } else {
cdd6c482
IM
3713 event->attr.sample_period = value;
3714 event->hw.sample_period = value;
08247e31 3715 }
bad7192b
PZ
3716
3717 active = (event->state == PERF_EVENT_STATE_ACTIVE);
3718 if (active) {
3719 perf_pmu_disable(ctx->pmu);
3720 event->pmu->stop(event, PERF_EF_UPDATE);
3721 }
3722
3723 local64_set(&event->hw.period_left, 0);
3724
3725 if (active) {
3726 event->pmu->start(event, PERF_EF_RELOAD);
3727 perf_pmu_enable(ctx->pmu);
3728 }
3729
08247e31 3730unlock:
e625cce1 3731 raw_spin_unlock_irq(&ctx->lock);
08247e31
PZ
3732
3733 return ret;
3734}
3735
ac9721f3
PZ
3736static const struct file_operations perf_fops;
3737
2903ff01 3738static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 3739{
2903ff01
AV
3740 struct fd f = fdget(fd);
3741 if (!f.file)
3742 return -EBADF;
ac9721f3 3743
2903ff01
AV
3744 if (f.file->f_op != &perf_fops) {
3745 fdput(f);
3746 return -EBADF;
ac9721f3 3747 }
2903ff01
AV
3748 *p = f;
3749 return 0;
ac9721f3
PZ
3750}
3751
3752static int perf_event_set_output(struct perf_event *event,
3753 struct perf_event *output_event);
6fb2915d 3754static int perf_event_set_filter(struct perf_event *event, void __user *arg);
a4be7c27 3755
d859e29f
PM
3756static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3757{
cdd6c482
IM
3758 struct perf_event *event = file->private_data;
3759 void (*func)(struct perf_event *);
3df5edad 3760 u32 flags = arg;
d859e29f
PM
3761
3762 switch (cmd) {
cdd6c482
IM
3763 case PERF_EVENT_IOC_ENABLE:
3764 func = perf_event_enable;
d859e29f 3765 break;
cdd6c482
IM
3766 case PERF_EVENT_IOC_DISABLE:
3767 func = perf_event_disable;
79f14641 3768 break;
cdd6c482
IM
3769 case PERF_EVENT_IOC_RESET:
3770 func = perf_event_reset;
6de6a7b9 3771 break;
3df5edad 3772
cdd6c482
IM
3773 case PERF_EVENT_IOC_REFRESH:
3774 return perf_event_refresh(event, arg);
08247e31 3775
cdd6c482
IM
3776 case PERF_EVENT_IOC_PERIOD:
3777 return perf_event_period(event, (u64 __user *)arg);
08247e31 3778
cf4957f1
JO
3779 case PERF_EVENT_IOC_ID:
3780 {
3781 u64 id = primary_event_id(event);
3782
3783 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
3784 return -EFAULT;
3785 return 0;
3786 }
3787
cdd6c482 3788 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 3789 {
ac9721f3 3790 int ret;
ac9721f3 3791 if (arg != -1) {
2903ff01
AV
3792 struct perf_event *output_event;
3793 struct fd output;
3794 ret = perf_fget_light(arg, &output);
3795 if (ret)
3796 return ret;
3797 output_event = output.file->private_data;
3798 ret = perf_event_set_output(event, output_event);
3799 fdput(output);
3800 } else {
3801 ret = perf_event_set_output(event, NULL);
ac9721f3 3802 }
ac9721f3
PZ
3803 return ret;
3804 }
a4be7c27 3805
6fb2915d
LZ
3806 case PERF_EVENT_IOC_SET_FILTER:
3807 return perf_event_set_filter(event, (void __user *)arg);
3808
d859e29f 3809 default:
3df5edad 3810 return -ENOTTY;
d859e29f 3811 }
3df5edad
PZ
3812
3813 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 3814 perf_event_for_each(event, func);
3df5edad 3815 else
cdd6c482 3816 perf_event_for_each_child(event, func);
3df5edad
PZ
3817
3818 return 0;
d859e29f
PM
3819}
3820
cdd6c482 3821int perf_event_task_enable(void)
771d7cde 3822{
cdd6c482 3823 struct perf_event *event;
771d7cde 3824
cdd6c482
IM
3825 mutex_lock(&current->perf_event_mutex);
3826 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3827 perf_event_for_each_child(event, perf_event_enable);
3828 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
3829
3830 return 0;
3831}
3832
cdd6c482 3833int perf_event_task_disable(void)
771d7cde 3834{
cdd6c482 3835 struct perf_event *event;
771d7cde 3836
cdd6c482
IM
3837 mutex_lock(&current->perf_event_mutex);
3838 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3839 perf_event_for_each_child(event, perf_event_disable);
3840 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
3841
3842 return 0;
3843}
3844
cdd6c482 3845static int perf_event_index(struct perf_event *event)
194002b2 3846{
a4eaf7f1
PZ
3847 if (event->hw.state & PERF_HES_STOPPED)
3848 return 0;
3849
cdd6c482 3850 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
3851 return 0;
3852
35edc2a5 3853 return event->pmu->event_idx(event);
194002b2
PZ
3854}
3855
c4794295 3856static void calc_timer_values(struct perf_event *event,
e3f3541c 3857 u64 *now,
7f310a5d
EM
3858 u64 *enabled,
3859 u64 *running)
c4794295 3860{
e3f3541c 3861 u64 ctx_time;
c4794295 3862
e3f3541c
PZ
3863 *now = perf_clock();
3864 ctx_time = event->shadow_ctx_time + *now;
c4794295
EM
3865 *enabled = ctx_time - event->tstamp_enabled;
3866 *running = ctx_time - event->tstamp_running;
3867}
3868
fa731587
PZ
3869static void perf_event_init_userpage(struct perf_event *event)
3870{
3871 struct perf_event_mmap_page *userpg;
3872 struct ring_buffer *rb;
3873
3874 rcu_read_lock();
3875 rb = rcu_dereference(event->rb);
3876 if (!rb)
3877 goto unlock;
3878
3879 userpg = rb->user_page;
3880
3881 /* Allow new userspace to detect that bit 0 is deprecated */
3882 userpg->cap_bit0_is_deprecated = 1;
3883 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3884
3885unlock:
3886 rcu_read_unlock();
3887}
3888
c7206205 3889void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
3890{
3891}
3892
38ff667b
PZ
3893/*
3894 * Callers need to ensure there can be no nesting of this function, otherwise
3895 * the seqlock logic goes bad. We can not serialize this because the arch
3896 * code calls this from NMI context.
3897 */
cdd6c482 3898void perf_event_update_userpage(struct perf_event *event)
37d81828 3899{
cdd6c482 3900 struct perf_event_mmap_page *userpg;
76369139 3901 struct ring_buffer *rb;
e3f3541c 3902 u64 enabled, running, now;
38ff667b
PZ
3903
3904 rcu_read_lock();
5ec4c599
PZ
3905 rb = rcu_dereference(event->rb);
3906 if (!rb)
3907 goto unlock;
3908
0d641208
EM
3909 /*
3910 * compute total_time_enabled, total_time_running
3911 * based on snapshot values taken when the event
3912 * was last scheduled in.
3913 *
3914 * we cannot simply called update_context_time()
3915 * because of locking issue as we can be called in
3916 * NMI context
3917 */
e3f3541c 3918 calc_timer_values(event, &now, &enabled, &running);
38ff667b 3919
76369139 3920 userpg = rb->user_page;
7b732a75
PZ
3921 /*
3922 * Disable preemption so as to not let the corresponding user-space
3923 * spin too long if we get preempted.
3924 */
3925 preempt_disable();
37d81828 3926 ++userpg->lock;
92f22a38 3927 barrier();
cdd6c482 3928 userpg->index = perf_event_index(event);
b5e58793 3929 userpg->offset = perf_event_count(event);
365a4038 3930 if (userpg->index)
e7850595 3931 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 3932
0d641208 3933 userpg->time_enabled = enabled +
cdd6c482 3934 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 3935
0d641208 3936 userpg->time_running = running +
cdd6c482 3937 atomic64_read(&event->child_total_time_running);
7f8b4e4e 3938
c7206205 3939 arch_perf_update_userpage(userpg, now);
e3f3541c 3940
92f22a38 3941 barrier();
37d81828 3942 ++userpg->lock;
7b732a75 3943 preempt_enable();
38ff667b 3944unlock:
7b732a75 3945 rcu_read_unlock();
37d81828
PM
3946}
3947
906010b2
PZ
3948static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3949{
3950 struct perf_event *event = vma->vm_file->private_data;
76369139 3951 struct ring_buffer *rb;
906010b2
PZ
3952 int ret = VM_FAULT_SIGBUS;
3953
3954 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3955 if (vmf->pgoff == 0)
3956 ret = 0;
3957 return ret;
3958 }
3959
3960 rcu_read_lock();
76369139
FW
3961 rb = rcu_dereference(event->rb);
3962 if (!rb)
906010b2
PZ
3963 goto unlock;
3964
3965 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3966 goto unlock;
3967
76369139 3968 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
3969 if (!vmf->page)
3970 goto unlock;
3971
3972 get_page(vmf->page);
3973 vmf->page->mapping = vma->vm_file->f_mapping;
3974 vmf->page->index = vmf->pgoff;
3975
3976 ret = 0;
3977unlock:
3978 rcu_read_unlock();
3979
3980 return ret;
3981}
3982
10c6db11
PZ
3983static void ring_buffer_attach(struct perf_event *event,
3984 struct ring_buffer *rb)
3985{
b69cf536 3986 struct ring_buffer *old_rb = NULL;
10c6db11
PZ
3987 unsigned long flags;
3988
b69cf536
PZ
3989 if (event->rb) {
3990 /*
3991 * Should be impossible, we set this when removing
3992 * event->rb_entry and wait/clear when adding event->rb_entry.
3993 */
3994 WARN_ON_ONCE(event->rcu_pending);
10c6db11 3995
b69cf536
PZ
3996 old_rb = event->rb;
3997 event->rcu_batches = get_state_synchronize_rcu();
3998 event->rcu_pending = 1;
10c6db11 3999
b69cf536
PZ
4000 spin_lock_irqsave(&old_rb->event_lock, flags);
4001 list_del_rcu(&event->rb_entry);
4002 spin_unlock_irqrestore(&old_rb->event_lock, flags);
4003 }
10c6db11 4004
b69cf536
PZ
4005 if (event->rcu_pending && rb) {
4006 cond_synchronize_rcu(event->rcu_batches);
4007 event->rcu_pending = 0;
4008 }
10c6db11 4009
b69cf536
PZ
4010 if (rb) {
4011 spin_lock_irqsave(&rb->event_lock, flags);
4012 list_add_rcu(&event->rb_entry, &rb->event_list);
4013 spin_unlock_irqrestore(&rb->event_lock, flags);
4014 }
4015
4016 rcu_assign_pointer(event->rb, rb);
4017
4018 if (old_rb) {
4019 ring_buffer_put(old_rb);
4020 /*
4021 * Since we detached before setting the new rb, so that we
4022 * could attach the new rb, we could have missed a wakeup.
4023 * Provide it now.
4024 */
4025 wake_up_all(&event->waitq);
4026 }
10c6db11
PZ
4027}
4028
4029static void ring_buffer_wakeup(struct perf_event *event)
4030{
4031 struct ring_buffer *rb;
4032
4033 rcu_read_lock();
4034 rb = rcu_dereference(event->rb);
9bb5d40c
PZ
4035 if (rb) {
4036 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
4037 wake_up_all(&event->waitq);
4038 }
10c6db11
PZ
4039 rcu_read_unlock();
4040}
4041
76369139 4042static void rb_free_rcu(struct rcu_head *rcu_head)
906010b2 4043{
76369139 4044 struct ring_buffer *rb;
906010b2 4045
76369139
FW
4046 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
4047 rb_free(rb);
7b732a75
PZ
4048}
4049
76369139 4050static struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 4051{
76369139 4052 struct ring_buffer *rb;
7b732a75 4053
ac9721f3 4054 rcu_read_lock();
76369139
FW
4055 rb = rcu_dereference(event->rb);
4056 if (rb) {
4057 if (!atomic_inc_not_zero(&rb->refcount))
4058 rb = NULL;
ac9721f3
PZ
4059 }
4060 rcu_read_unlock();
4061
76369139 4062 return rb;
ac9721f3
PZ
4063}
4064
76369139 4065static void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 4066{
76369139 4067 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 4068 return;
7b732a75 4069
9bb5d40c 4070 WARN_ON_ONCE(!list_empty(&rb->event_list));
10c6db11 4071
76369139 4072 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
4073}
4074
4075static void perf_mmap_open(struct vm_area_struct *vma)
4076{
cdd6c482 4077 struct perf_event *event = vma->vm_file->private_data;
7b732a75 4078
cdd6c482 4079 atomic_inc(&event->mmap_count);
9bb5d40c 4080 atomic_inc(&event->rb->mmap_count);
7b732a75
PZ
4081}
4082
9bb5d40c
PZ
4083/*
4084 * A buffer can be mmap()ed multiple times; either directly through the same
4085 * event, or through other events by use of perf_event_set_output().
4086 *
4087 * In order to undo the VM accounting done by perf_mmap() we need to destroy
4088 * the buffer here, where we still have a VM context. This means we need
4089 * to detach all events redirecting to us.
4090 */
7b732a75
PZ
4091static void perf_mmap_close(struct vm_area_struct *vma)
4092{
cdd6c482 4093 struct perf_event *event = vma->vm_file->private_data;
7b732a75 4094
b69cf536 4095 struct ring_buffer *rb = ring_buffer_get(event);
9bb5d40c
PZ
4096 struct user_struct *mmap_user = rb->mmap_user;
4097 int mmap_locked = rb->mmap_locked;
4098 unsigned long size = perf_data_size(rb);
789f90fc 4099
9bb5d40c
PZ
4100 atomic_dec(&rb->mmap_count);
4101
4102 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
b69cf536 4103 goto out_put;
9bb5d40c 4104
b69cf536 4105 ring_buffer_attach(event, NULL);
9bb5d40c
PZ
4106 mutex_unlock(&event->mmap_mutex);
4107
4108 /* If there's still other mmap()s of this buffer, we're done. */
b69cf536
PZ
4109 if (atomic_read(&rb->mmap_count))
4110 goto out_put;
ac9721f3 4111
9bb5d40c
PZ
4112 /*
4113 * No other mmap()s, detach from all other events that might redirect
4114 * into the now unreachable buffer. Somewhat complicated by the
4115 * fact that rb::event_lock otherwise nests inside mmap_mutex.
4116 */
4117again:
4118 rcu_read_lock();
4119 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
4120 if (!atomic_long_inc_not_zero(&event->refcount)) {
4121 /*
4122 * This event is en-route to free_event() which will
4123 * detach it and remove it from the list.
4124 */
4125 continue;
4126 }
4127 rcu_read_unlock();
789f90fc 4128
9bb5d40c
PZ
4129 mutex_lock(&event->mmap_mutex);
4130 /*
4131 * Check we didn't race with perf_event_set_output() which can
4132 * swizzle the rb from under us while we were waiting to
4133 * acquire mmap_mutex.
4134 *
4135 * If we find a different rb; ignore this event, a next
4136 * iteration will no longer find it on the list. We have to
4137 * still restart the iteration to make sure we're not now
4138 * iterating the wrong list.
4139 */
b69cf536
PZ
4140 if (event->rb == rb)
4141 ring_buffer_attach(event, NULL);
4142
cdd6c482 4143 mutex_unlock(&event->mmap_mutex);
9bb5d40c 4144 put_event(event);
ac9721f3 4145
9bb5d40c
PZ
4146 /*
4147 * Restart the iteration; either we're on the wrong list or
4148 * destroyed its integrity by doing a deletion.
4149 */
4150 goto again;
7b732a75 4151 }
9bb5d40c
PZ
4152 rcu_read_unlock();
4153
4154 /*
4155 * It could be there's still a few 0-ref events on the list; they'll
4156 * get cleaned up by free_event() -- they'll also still have their
4157 * ref on the rb and will free it whenever they are done with it.
4158 *
4159 * Aside from that, this buffer is 'fully' detached and unmapped,
4160 * undo the VM accounting.
4161 */
4162
4163 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
4164 vma->vm_mm->pinned_vm -= mmap_locked;
4165 free_uid(mmap_user);
4166
b69cf536 4167out_put:
9bb5d40c 4168 ring_buffer_put(rb); /* could be last */
37d81828
PM
4169}
4170
f0f37e2f 4171static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8
PZ
4172 .open = perf_mmap_open,
4173 .close = perf_mmap_close,
4174 .fault = perf_mmap_fault,
4175 .page_mkwrite = perf_mmap_fault,
37d81828
PM
4176};
4177
4178static int perf_mmap(struct file *file, struct vm_area_struct *vma)
4179{
cdd6c482 4180 struct perf_event *event = file->private_data;
22a4f650 4181 unsigned long user_locked, user_lock_limit;
789f90fc 4182 struct user_struct *user = current_user();
22a4f650 4183 unsigned long locked, lock_limit;
76369139 4184 struct ring_buffer *rb;
7b732a75
PZ
4185 unsigned long vma_size;
4186 unsigned long nr_pages;
789f90fc 4187 long user_extra, extra;
d57e34fd 4188 int ret = 0, flags = 0;
37d81828 4189
c7920614
PZ
4190 /*
4191 * Don't allow mmap() of inherited per-task counters. This would
4192 * create a performance issue due to all children writing to the
76369139 4193 * same rb.
c7920614
PZ
4194 */
4195 if (event->cpu == -1 && event->attr.inherit)
4196 return -EINVAL;
4197
43a21ea8 4198 if (!(vma->vm_flags & VM_SHARED))
37d81828 4199 return -EINVAL;
7b732a75
PZ
4200
4201 vma_size = vma->vm_end - vma->vm_start;
4202 nr_pages = (vma_size / PAGE_SIZE) - 1;
4203
7730d865 4204 /*
76369139 4205 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
4206 * can do bitmasks instead of modulo.
4207 */
4208 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
4209 return -EINVAL;
4210
7b732a75 4211 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
4212 return -EINVAL;
4213
7b732a75
PZ
4214 if (vma->vm_pgoff != 0)
4215 return -EINVAL;
37d81828 4216
cdd6c482 4217 WARN_ON_ONCE(event->ctx->parent_ctx);
9bb5d40c 4218again:
cdd6c482 4219 mutex_lock(&event->mmap_mutex);
76369139 4220 if (event->rb) {
9bb5d40c 4221 if (event->rb->nr_pages != nr_pages) {
ebb3c4c4 4222 ret = -EINVAL;
9bb5d40c
PZ
4223 goto unlock;
4224 }
4225
4226 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
4227 /*
4228 * Raced against perf_mmap_close() through
4229 * perf_event_set_output(). Try again, hope for better
4230 * luck.
4231 */
4232 mutex_unlock(&event->mmap_mutex);
4233 goto again;
4234 }
4235
ebb3c4c4
PZ
4236 goto unlock;
4237 }
4238
789f90fc 4239 user_extra = nr_pages + 1;
cdd6c482 4240 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
4241
4242 /*
4243 * Increase the limit linearly with more CPUs:
4244 */
4245 user_lock_limit *= num_online_cpus();
4246
789f90fc 4247 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 4248
789f90fc
PZ
4249 extra = 0;
4250 if (user_locked > user_lock_limit)
4251 extra = user_locked - user_lock_limit;
7b732a75 4252
78d7d407 4253 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 4254 lock_limit >>= PAGE_SHIFT;
bc3e53f6 4255 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 4256
459ec28a
IM
4257 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
4258 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
4259 ret = -EPERM;
4260 goto unlock;
4261 }
7b732a75 4262
76369139 4263 WARN_ON(event->rb);
906010b2 4264
d57e34fd 4265 if (vma->vm_flags & VM_WRITE)
76369139 4266 flags |= RING_BUFFER_WRITABLE;
d57e34fd 4267
4ec8363d
VW
4268 rb = rb_alloc(nr_pages,
4269 event->attr.watermark ? event->attr.wakeup_watermark : 0,
4270 event->cpu, flags);
4271
76369139 4272 if (!rb) {
ac9721f3 4273 ret = -ENOMEM;
ebb3c4c4 4274 goto unlock;
ac9721f3 4275 }
26cb63ad 4276
9bb5d40c 4277 atomic_set(&rb->mmap_count, 1);
26cb63ad
PZ
4278 rb->mmap_locked = extra;
4279 rb->mmap_user = get_current_user();
43a21ea8 4280
ac9721f3 4281 atomic_long_add(user_extra, &user->locked_vm);
26cb63ad
PZ
4282 vma->vm_mm->pinned_vm += extra;
4283
9bb5d40c 4284 ring_buffer_attach(event, rb);
ac9721f3 4285
fa731587 4286 perf_event_init_userpage(event);
9a0f05cb
PZ
4287 perf_event_update_userpage(event);
4288
ebb3c4c4 4289unlock:
ac9721f3
PZ
4290 if (!ret)
4291 atomic_inc(&event->mmap_count);
cdd6c482 4292 mutex_unlock(&event->mmap_mutex);
37d81828 4293
9bb5d40c
PZ
4294 /*
4295 * Since pinned accounting is per vm we cannot allow fork() to copy our
4296 * vma.
4297 */
26cb63ad 4298 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
37d81828 4299 vma->vm_ops = &perf_mmap_vmops;
7b732a75
PZ
4300
4301 return ret;
37d81828
PM
4302}
4303
3c446b3d
PZ
4304static int perf_fasync(int fd, struct file *filp, int on)
4305{
496ad9aa 4306 struct inode *inode = file_inode(filp);
cdd6c482 4307 struct perf_event *event = filp->private_data;
3c446b3d
PZ
4308 int retval;
4309
4310 mutex_lock(&inode->i_mutex);
cdd6c482 4311 retval = fasync_helper(fd, filp, on, &event->fasync);
3c446b3d
PZ
4312 mutex_unlock(&inode->i_mutex);
4313
4314 if (retval < 0)
4315 return retval;
4316
4317 return 0;
4318}
4319
0793a61d 4320static const struct file_operations perf_fops = {
3326c1ce 4321 .llseek = no_llseek,
0793a61d
TG
4322 .release = perf_release,
4323 .read = perf_read,
4324 .poll = perf_poll,
d859e29f
PM
4325 .unlocked_ioctl = perf_ioctl,
4326 .compat_ioctl = perf_ioctl,
37d81828 4327 .mmap = perf_mmap,
3c446b3d 4328 .fasync = perf_fasync,
0793a61d
TG
4329};
4330
925d519a 4331/*
cdd6c482 4332 * Perf event wakeup
925d519a
PZ
4333 *
4334 * If there's data, ensure we set the poll() state and publish everything
4335 * to user-space before waking everybody up.
4336 */
4337
cdd6c482 4338void perf_event_wakeup(struct perf_event *event)
925d519a 4339{
10c6db11 4340 ring_buffer_wakeup(event);
4c9e2542 4341
cdd6c482
IM
4342 if (event->pending_kill) {
4343 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
4344 event->pending_kill = 0;
4c9e2542 4345 }
925d519a
PZ
4346}
4347
e360adbe 4348static void perf_pending_event(struct irq_work *entry)
79f14641 4349{
cdd6c482
IM
4350 struct perf_event *event = container_of(entry,
4351 struct perf_event, pending);
79f14641 4352
cdd6c482
IM
4353 if (event->pending_disable) {
4354 event->pending_disable = 0;
4355 __perf_event_disable(event);
79f14641
PZ
4356 }
4357
cdd6c482
IM
4358 if (event->pending_wakeup) {
4359 event->pending_wakeup = 0;
4360 perf_event_wakeup(event);
79f14641
PZ
4361 }
4362}
4363
39447b38
ZY
4364/*
4365 * We assume there is only KVM supporting the callbacks.
4366 * Later on, we might change it to a list if there is
4367 * another virtualization implementation supporting the callbacks.
4368 */
4369struct perf_guest_info_callbacks *perf_guest_cbs;
4370
4371int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4372{
4373 perf_guest_cbs = cbs;
4374 return 0;
4375}
4376EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
4377
4378int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4379{
4380 perf_guest_cbs = NULL;
4381 return 0;
4382}
4383EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
4384
4018994f
JO
4385static void
4386perf_output_sample_regs(struct perf_output_handle *handle,
4387 struct pt_regs *regs, u64 mask)
4388{
4389 int bit;
4390
4391 for_each_set_bit(bit, (const unsigned long *) &mask,
4392 sizeof(mask) * BITS_PER_BYTE) {
4393 u64 val;
4394
4395 val = perf_reg_value(regs, bit);
4396 perf_output_put(handle, val);
4397 }
4398}
4399
4400static void perf_sample_regs_user(struct perf_regs_user *regs_user,
4401 struct pt_regs *regs)
4402{
4403 if (!user_mode(regs)) {
4404 if (current->mm)
4405 regs = task_pt_regs(current);
4406 else
4407 regs = NULL;
4408 }
4409
4410 if (regs) {
4411 regs_user->regs = regs;
4412 regs_user->abi = perf_reg_abi(current);
4413 }
4414}
4415
c5ebcedb
JO
4416/*
4417 * Get remaining task size from user stack pointer.
4418 *
4419 * It'd be better to take stack vma map and limit this more
4420 * precisly, but there's no way to get it safely under interrupt,
4421 * so using TASK_SIZE as limit.
4422 */
4423static u64 perf_ustack_task_size(struct pt_regs *regs)
4424{
4425 unsigned long addr = perf_user_stack_pointer(regs);
4426
4427 if (!addr || addr >= TASK_SIZE)
4428 return 0;
4429
4430 return TASK_SIZE - addr;
4431}
4432
4433static u16
4434perf_sample_ustack_size(u16 stack_size, u16 header_size,
4435 struct pt_regs *regs)
4436{
4437 u64 task_size;
4438
4439 /* No regs, no stack pointer, no dump. */
4440 if (!regs)
4441 return 0;
4442
4443 /*
4444 * Check if we fit in with the requested stack size into the:
4445 * - TASK_SIZE
4446 * If we don't, we limit the size to the TASK_SIZE.
4447 *
4448 * - remaining sample size
4449 * If we don't, we customize the stack size to
4450 * fit in to the remaining sample size.
4451 */
4452
4453 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
4454 stack_size = min(stack_size, (u16) task_size);
4455
4456 /* Current header size plus static size and dynamic size. */
4457 header_size += 2 * sizeof(u64);
4458
4459 /* Do we fit in with the current stack dump size? */
4460 if ((u16) (header_size + stack_size) < header_size) {
4461 /*
4462 * If we overflow the maximum size for the sample,
4463 * we customize the stack dump size to fit in.
4464 */
4465 stack_size = USHRT_MAX - header_size - sizeof(u64);
4466 stack_size = round_up(stack_size, sizeof(u64));
4467 }
4468
4469 return stack_size;
4470}
4471
4472static void
4473perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
4474 struct pt_regs *regs)
4475{
4476 /* Case of a kernel thread, nothing to dump */
4477 if (!regs) {
4478 u64 size = 0;
4479 perf_output_put(handle, size);
4480 } else {
4481 unsigned long sp;
4482 unsigned int rem;
4483 u64 dyn_size;
4484
4485 /*
4486 * We dump:
4487 * static size
4488 * - the size requested by user or the best one we can fit
4489 * in to the sample max size
4490 * data
4491 * - user stack dump data
4492 * dynamic size
4493 * - the actual dumped size
4494 */
4495
4496 /* Static size. */
4497 perf_output_put(handle, dump_size);
4498
4499 /* Data. */
4500 sp = perf_user_stack_pointer(regs);
4501 rem = __output_copy_user(handle, (void *) sp, dump_size);
4502 dyn_size = dump_size - rem;
4503
4504 perf_output_skip(handle, rem);
4505
4506 /* Dynamic size. */
4507 perf_output_put(handle, dyn_size);
4508 }
4509}
4510
c980d109
ACM
4511static void __perf_event_header__init_id(struct perf_event_header *header,
4512 struct perf_sample_data *data,
4513 struct perf_event *event)
6844c09d
ACM
4514{
4515 u64 sample_type = event->attr.sample_type;
4516
4517 data->type = sample_type;
4518 header->size += event->id_header_size;
4519
4520 if (sample_type & PERF_SAMPLE_TID) {
4521 /* namespace issues */
4522 data->tid_entry.pid = perf_event_pid(event, current);
4523 data->tid_entry.tid = perf_event_tid(event, current);
4524 }
4525
4526 if (sample_type & PERF_SAMPLE_TIME)
4527 data->time = perf_clock();
4528
ff3d527c 4529 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
6844c09d
ACM
4530 data->id = primary_event_id(event);
4531
4532 if (sample_type & PERF_SAMPLE_STREAM_ID)
4533 data->stream_id = event->id;
4534
4535 if (sample_type & PERF_SAMPLE_CPU) {
4536 data->cpu_entry.cpu = raw_smp_processor_id();
4537 data->cpu_entry.reserved = 0;
4538 }
4539}
4540
76369139
FW
4541void perf_event_header__init_id(struct perf_event_header *header,
4542 struct perf_sample_data *data,
4543 struct perf_event *event)
c980d109
ACM
4544{
4545 if (event->attr.sample_id_all)
4546 __perf_event_header__init_id(header, data, event);
4547}
4548
4549static void __perf_event__output_id_sample(struct perf_output_handle *handle,
4550 struct perf_sample_data *data)
4551{
4552 u64 sample_type = data->type;
4553
4554 if (sample_type & PERF_SAMPLE_TID)
4555 perf_output_put(handle, data->tid_entry);
4556
4557 if (sample_type & PERF_SAMPLE_TIME)
4558 perf_output_put(handle, data->time);
4559
4560 if (sample_type & PERF_SAMPLE_ID)
4561 perf_output_put(handle, data->id);
4562
4563 if (sample_type & PERF_SAMPLE_STREAM_ID)
4564 perf_output_put(handle, data->stream_id);
4565
4566 if (sample_type & PERF_SAMPLE_CPU)
4567 perf_output_put(handle, data->cpu_entry);
ff3d527c
AH
4568
4569 if (sample_type & PERF_SAMPLE_IDENTIFIER)
4570 perf_output_put(handle, data->id);
c980d109
ACM
4571}
4572
76369139
FW
4573void perf_event__output_id_sample(struct perf_event *event,
4574 struct perf_output_handle *handle,
4575 struct perf_sample_data *sample)
c980d109
ACM
4576{
4577 if (event->attr.sample_id_all)
4578 __perf_event__output_id_sample(handle, sample);
4579}
4580
3dab77fb 4581static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
4582 struct perf_event *event,
4583 u64 enabled, u64 running)
3dab77fb 4584{
cdd6c482 4585 u64 read_format = event->attr.read_format;
3dab77fb
PZ
4586 u64 values[4];
4587 int n = 0;
4588
b5e58793 4589 values[n++] = perf_event_count(event);
3dab77fb 4590 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 4591 values[n++] = enabled +
cdd6c482 4592 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
4593 }
4594 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 4595 values[n++] = running +
cdd6c482 4596 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
4597 }
4598 if (read_format & PERF_FORMAT_ID)
cdd6c482 4599 values[n++] = primary_event_id(event);
3dab77fb 4600
76369139 4601 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
4602}
4603
4604/*
cdd6c482 4605 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb
PZ
4606 */
4607static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
4608 struct perf_event *event,
4609 u64 enabled, u64 running)
3dab77fb 4610{
cdd6c482
IM
4611 struct perf_event *leader = event->group_leader, *sub;
4612 u64 read_format = event->attr.read_format;
3dab77fb
PZ
4613 u64 values[5];
4614 int n = 0;
4615
4616 values[n++] = 1 + leader->nr_siblings;
4617
4618 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 4619 values[n++] = enabled;
3dab77fb
PZ
4620
4621 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 4622 values[n++] = running;
3dab77fb 4623
cdd6c482 4624 if (leader != event)
3dab77fb
PZ
4625 leader->pmu->read(leader);
4626
b5e58793 4627 values[n++] = perf_event_count(leader);
3dab77fb 4628 if (read_format & PERF_FORMAT_ID)
cdd6c482 4629 values[n++] = primary_event_id(leader);
3dab77fb 4630
76369139 4631 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 4632
65abc865 4633 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb
PZ
4634 n = 0;
4635
6f5ab001
JO
4636 if ((sub != event) &&
4637 (sub->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
4638 sub->pmu->read(sub);
4639
b5e58793 4640 values[n++] = perf_event_count(sub);
3dab77fb 4641 if (read_format & PERF_FORMAT_ID)
cdd6c482 4642 values[n++] = primary_event_id(sub);
3dab77fb 4643
76369139 4644 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
4645 }
4646}
4647
eed01528
SE
4648#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
4649 PERF_FORMAT_TOTAL_TIME_RUNNING)
4650
3dab77fb 4651static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 4652 struct perf_event *event)
3dab77fb 4653{
e3f3541c 4654 u64 enabled = 0, running = 0, now;
eed01528
SE
4655 u64 read_format = event->attr.read_format;
4656
4657 /*
4658 * compute total_time_enabled, total_time_running
4659 * based on snapshot values taken when the event
4660 * was last scheduled in.
4661 *
4662 * we cannot simply called update_context_time()
4663 * because of locking issue as we are called in
4664 * NMI context
4665 */
c4794295 4666 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 4667 calc_timer_values(event, &now, &enabled, &running);
eed01528 4668
cdd6c482 4669 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 4670 perf_output_read_group(handle, event, enabled, running);
3dab77fb 4671 else
eed01528 4672 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
4673}
4674
5622f295
MM
4675void perf_output_sample(struct perf_output_handle *handle,
4676 struct perf_event_header *header,
4677 struct perf_sample_data *data,
cdd6c482 4678 struct perf_event *event)
5622f295
MM
4679{
4680 u64 sample_type = data->type;
4681
4682 perf_output_put(handle, *header);
4683
ff3d527c
AH
4684 if (sample_type & PERF_SAMPLE_IDENTIFIER)
4685 perf_output_put(handle, data->id);
4686
5622f295
MM
4687 if (sample_type & PERF_SAMPLE_IP)
4688 perf_output_put(handle, data->ip);
4689
4690 if (sample_type & PERF_SAMPLE_TID)
4691 perf_output_put(handle, data->tid_entry);
4692
4693 if (sample_type & PERF_SAMPLE_TIME)
4694 perf_output_put(handle, data->time);
4695
4696 if (sample_type & PERF_SAMPLE_ADDR)
4697 perf_output_put(handle, data->addr);
4698
4699 if (sample_type & PERF_SAMPLE_ID)
4700 perf_output_put(handle, data->id);
4701
4702 if (sample_type & PERF_SAMPLE_STREAM_ID)
4703 perf_output_put(handle, data->stream_id);
4704
4705 if (sample_type & PERF_SAMPLE_CPU)
4706 perf_output_put(handle, data->cpu_entry);
4707
4708 if (sample_type & PERF_SAMPLE_PERIOD)
4709 perf_output_put(handle, data->period);
4710
4711 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 4712 perf_output_read(handle, event);
5622f295
MM
4713
4714 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4715 if (data->callchain) {
4716 int size = 1;
4717
4718 if (data->callchain)
4719 size += data->callchain->nr;
4720
4721 size *= sizeof(u64);
4722
76369139 4723 __output_copy(handle, data->callchain, size);
5622f295
MM
4724 } else {
4725 u64 nr = 0;
4726 perf_output_put(handle, nr);
4727 }
4728 }
4729
4730 if (sample_type & PERF_SAMPLE_RAW) {
4731 if (data->raw) {
4732 perf_output_put(handle, data->raw->size);
76369139
FW
4733 __output_copy(handle, data->raw->data,
4734 data->raw->size);
5622f295
MM
4735 } else {
4736 struct {
4737 u32 size;
4738 u32 data;
4739 } raw = {
4740 .size = sizeof(u32),
4741 .data = 0,
4742 };
4743 perf_output_put(handle, raw);
4744 }
4745 }
a7ac67ea 4746
bce38cd5
SE
4747 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4748 if (data->br_stack) {
4749 size_t size;
4750
4751 size = data->br_stack->nr
4752 * sizeof(struct perf_branch_entry);
4753
4754 perf_output_put(handle, data->br_stack->nr);
4755 perf_output_copy(handle, data->br_stack->entries, size);
4756 } else {
4757 /*
4758 * we always store at least the value of nr
4759 */
4760 u64 nr = 0;
4761 perf_output_put(handle, nr);
4762 }
4763 }
4018994f
JO
4764
4765 if (sample_type & PERF_SAMPLE_REGS_USER) {
4766 u64 abi = data->regs_user.abi;
4767
4768 /*
4769 * If there are no regs to dump, notice it through
4770 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
4771 */
4772 perf_output_put(handle, abi);
4773
4774 if (abi) {
4775 u64 mask = event->attr.sample_regs_user;
4776 perf_output_sample_regs(handle,
4777 data->regs_user.regs,
4778 mask);
4779 }
4780 }
c5ebcedb 4781
a5cdd40c 4782 if (sample_type & PERF_SAMPLE_STACK_USER) {
c5ebcedb
JO
4783 perf_output_sample_ustack(handle,
4784 data->stack_user_size,
4785 data->regs_user.regs);
a5cdd40c 4786 }
c3feedf2
AK
4787
4788 if (sample_type & PERF_SAMPLE_WEIGHT)
4789 perf_output_put(handle, data->weight);
d6be9ad6
SE
4790
4791 if (sample_type & PERF_SAMPLE_DATA_SRC)
4792 perf_output_put(handle, data->data_src.val);
a5cdd40c 4793
fdfbbd07
AK
4794 if (sample_type & PERF_SAMPLE_TRANSACTION)
4795 perf_output_put(handle, data->txn);
4796
a5cdd40c
PZ
4797 if (!event->attr.watermark) {
4798 int wakeup_events = event->attr.wakeup_events;
4799
4800 if (wakeup_events) {
4801 struct ring_buffer *rb = handle->rb;
4802 int events = local_inc_return(&rb->events);
4803
4804 if (events >= wakeup_events) {
4805 local_sub(wakeup_events, &rb->events);
4806 local_inc(&rb->wakeup);
4807 }
4808 }
4809 }
5622f295
MM
4810}
4811
4812void perf_prepare_sample(struct perf_event_header *header,
4813 struct perf_sample_data *data,
cdd6c482 4814 struct perf_event *event,
5622f295 4815 struct pt_regs *regs)
7b732a75 4816{
cdd6c482 4817 u64 sample_type = event->attr.sample_type;
7b732a75 4818
cdd6c482 4819 header->type = PERF_RECORD_SAMPLE;
c320c7b7 4820 header->size = sizeof(*header) + event->header_size;
5622f295
MM
4821
4822 header->misc = 0;
4823 header->misc |= perf_misc_flags(regs);
6fab0192 4824
c980d109 4825 __perf_event_header__init_id(header, data, event);
6844c09d 4826
c320c7b7 4827 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
4828 data->ip = perf_instruction_pointer(regs);
4829
b23f3325 4830 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 4831 int size = 1;
394ee076 4832
e6dab5ff 4833 data->callchain = perf_callchain(event, regs);
5622f295
MM
4834
4835 if (data->callchain)
4836 size += data->callchain->nr;
4837
4838 header->size += size * sizeof(u64);
394ee076
PZ
4839 }
4840
3a43ce68 4841 if (sample_type & PERF_SAMPLE_RAW) {
a044560c
PZ
4842 int size = sizeof(u32);
4843
4844 if (data->raw)
4845 size += data->raw->size;
4846 else
4847 size += sizeof(u32);
4848
4849 WARN_ON_ONCE(size & (sizeof(u64)-1));
5622f295 4850 header->size += size;
7f453c24 4851 }
bce38cd5
SE
4852
4853 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4854 int size = sizeof(u64); /* nr */
4855 if (data->br_stack) {
4856 size += data->br_stack->nr
4857 * sizeof(struct perf_branch_entry);
4858 }
4859 header->size += size;
4860 }
4018994f
JO
4861
4862 if (sample_type & PERF_SAMPLE_REGS_USER) {
4863 /* regs dump ABI info */
4864 int size = sizeof(u64);
4865
4866 perf_sample_regs_user(&data->regs_user, regs);
4867
4868 if (data->regs_user.regs) {
4869 u64 mask = event->attr.sample_regs_user;
4870 size += hweight64(mask) * sizeof(u64);
4871 }
4872
4873 header->size += size;
4874 }
c5ebcedb
JO
4875
4876 if (sample_type & PERF_SAMPLE_STACK_USER) {
4877 /*
4878 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
4879 * processed as the last one or have additional check added
4880 * in case new sample type is added, because we could eat
4881 * up the rest of the sample size.
4882 */
4883 struct perf_regs_user *uregs = &data->regs_user;
4884 u16 stack_size = event->attr.sample_stack_user;
4885 u16 size = sizeof(u64);
4886
4887 if (!uregs->abi)
4888 perf_sample_regs_user(uregs, regs);
4889
4890 stack_size = perf_sample_ustack_size(stack_size, header->size,
4891 uregs->regs);
4892
4893 /*
4894 * If there is something to dump, add space for the dump
4895 * itself and for the field that tells the dynamic size,
4896 * which is how many have been actually dumped.
4897 */
4898 if (stack_size)
4899 size += sizeof(u64) + stack_size;
4900
4901 data->stack_user_size = stack_size;
4902 header->size += size;
4903 }
5622f295 4904}
7f453c24 4905
a8b0ca17 4906static void perf_event_output(struct perf_event *event,
5622f295
MM
4907 struct perf_sample_data *data,
4908 struct pt_regs *regs)
4909{
4910 struct perf_output_handle handle;
4911 struct perf_event_header header;
689802b2 4912
927c7a9e
FW
4913 /* protect the callchain buffers */
4914 rcu_read_lock();
4915
cdd6c482 4916 perf_prepare_sample(&header, data, event, regs);
5c148194 4917
a7ac67ea 4918 if (perf_output_begin(&handle, event, header.size))
927c7a9e 4919 goto exit;
0322cd6e 4920
cdd6c482 4921 perf_output_sample(&handle, &header, data, event);
f413cdb8 4922
8a057d84 4923 perf_output_end(&handle);
927c7a9e
FW
4924
4925exit:
4926 rcu_read_unlock();
0322cd6e
PZ
4927}
4928
38b200d6 4929/*
cdd6c482 4930 * read event_id
38b200d6
PZ
4931 */
4932
4933struct perf_read_event {
4934 struct perf_event_header header;
4935
4936 u32 pid;
4937 u32 tid;
38b200d6
PZ
4938};
4939
4940static void
cdd6c482 4941perf_event_read_event(struct perf_event *event,
38b200d6
PZ
4942 struct task_struct *task)
4943{
4944 struct perf_output_handle handle;
c980d109 4945 struct perf_sample_data sample;
dfc65094 4946 struct perf_read_event read_event = {
38b200d6 4947 .header = {
cdd6c482 4948 .type = PERF_RECORD_READ,
38b200d6 4949 .misc = 0,
c320c7b7 4950 .size = sizeof(read_event) + event->read_size,
38b200d6 4951 },
cdd6c482
IM
4952 .pid = perf_event_pid(event, task),
4953 .tid = perf_event_tid(event, task),
38b200d6 4954 };
3dab77fb 4955 int ret;
38b200d6 4956
c980d109 4957 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 4958 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
4959 if (ret)
4960 return;
4961
dfc65094 4962 perf_output_put(&handle, read_event);
cdd6c482 4963 perf_output_read(&handle, event);
c980d109 4964 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 4965
38b200d6
PZ
4966 perf_output_end(&handle);
4967}
4968
52d857a8
JO
4969typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
4970
4971static void
4972perf_event_aux_ctx(struct perf_event_context *ctx,
52d857a8
JO
4973 perf_event_aux_output_cb output,
4974 void *data)
4975{
4976 struct perf_event *event;
4977
4978 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4979 if (event->state < PERF_EVENT_STATE_INACTIVE)
4980 continue;
4981 if (!event_filter_match(event))
4982 continue;
67516844 4983 output(event, data);
52d857a8
JO
4984 }
4985}
4986
4987static void
67516844 4988perf_event_aux(perf_event_aux_output_cb output, void *data,
52d857a8
JO
4989 struct perf_event_context *task_ctx)
4990{
4991 struct perf_cpu_context *cpuctx;
4992 struct perf_event_context *ctx;
4993 struct pmu *pmu;
4994 int ctxn;
4995
4996 rcu_read_lock();
4997 list_for_each_entry_rcu(pmu, &pmus, entry) {
4998 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4999 if (cpuctx->unique_pmu != pmu)
5000 goto next;
67516844 5001 perf_event_aux_ctx(&cpuctx->ctx, output, data);
52d857a8
JO
5002 if (task_ctx)
5003 goto next;
5004 ctxn = pmu->task_ctx_nr;
5005 if (ctxn < 0)
5006 goto next;
5007 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
5008 if (ctx)
67516844 5009 perf_event_aux_ctx(ctx, output, data);
52d857a8
JO
5010next:
5011 put_cpu_ptr(pmu->pmu_cpu_context);
5012 }
5013
5014 if (task_ctx) {
5015 preempt_disable();
67516844 5016 perf_event_aux_ctx(task_ctx, output, data);
52d857a8
JO
5017 preempt_enable();
5018 }
5019 rcu_read_unlock();
5020}
5021
60313ebe 5022/*
9f498cc5
PZ
5023 * task tracking -- fork/exit
5024 *
13d7a241 5025 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
60313ebe
PZ
5026 */
5027
9f498cc5 5028struct perf_task_event {
3a80b4a3 5029 struct task_struct *task;
cdd6c482 5030 struct perf_event_context *task_ctx;
60313ebe
PZ
5031
5032 struct {
5033 struct perf_event_header header;
5034
5035 u32 pid;
5036 u32 ppid;
9f498cc5
PZ
5037 u32 tid;
5038 u32 ptid;
393b2ad8 5039 u64 time;
cdd6c482 5040 } event_id;
60313ebe
PZ
5041};
5042
67516844
JO
5043static int perf_event_task_match(struct perf_event *event)
5044{
13d7a241
SE
5045 return event->attr.comm || event->attr.mmap ||
5046 event->attr.mmap2 || event->attr.mmap_data ||
5047 event->attr.task;
67516844
JO
5048}
5049
cdd6c482 5050static void perf_event_task_output(struct perf_event *event,
52d857a8 5051 void *data)
60313ebe 5052{
52d857a8 5053 struct perf_task_event *task_event = data;
60313ebe 5054 struct perf_output_handle handle;
c980d109 5055 struct perf_sample_data sample;
9f498cc5 5056 struct task_struct *task = task_event->task;
c980d109 5057 int ret, size = task_event->event_id.header.size;
8bb39f9a 5058
67516844
JO
5059 if (!perf_event_task_match(event))
5060 return;
5061
c980d109 5062 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 5063
c980d109 5064 ret = perf_output_begin(&handle, event,
a7ac67ea 5065 task_event->event_id.header.size);
ef60777c 5066 if (ret)
c980d109 5067 goto out;
60313ebe 5068
cdd6c482
IM
5069 task_event->event_id.pid = perf_event_pid(event, task);
5070 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 5071
cdd6c482
IM
5072 task_event->event_id.tid = perf_event_tid(event, task);
5073 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 5074
cdd6c482 5075 perf_output_put(&handle, task_event->event_id);
393b2ad8 5076
c980d109
ACM
5077 perf_event__output_id_sample(event, &handle, &sample);
5078
60313ebe 5079 perf_output_end(&handle);
c980d109
ACM
5080out:
5081 task_event->event_id.header.size = size;
60313ebe
PZ
5082}
5083
cdd6c482
IM
5084static void perf_event_task(struct task_struct *task,
5085 struct perf_event_context *task_ctx,
3a80b4a3 5086 int new)
60313ebe 5087{
9f498cc5 5088 struct perf_task_event task_event;
60313ebe 5089
cdd6c482
IM
5090 if (!atomic_read(&nr_comm_events) &&
5091 !atomic_read(&nr_mmap_events) &&
5092 !atomic_read(&nr_task_events))
60313ebe
PZ
5093 return;
5094
9f498cc5 5095 task_event = (struct perf_task_event){
3a80b4a3
PZ
5096 .task = task,
5097 .task_ctx = task_ctx,
cdd6c482 5098 .event_id = {
60313ebe 5099 .header = {
cdd6c482 5100 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 5101 .misc = 0,
cdd6c482 5102 .size = sizeof(task_event.event_id),
60313ebe 5103 },
573402db
PZ
5104 /* .pid */
5105 /* .ppid */
9f498cc5
PZ
5106 /* .tid */
5107 /* .ptid */
6f93d0a7 5108 .time = perf_clock(),
60313ebe
PZ
5109 },
5110 };
5111
67516844 5112 perf_event_aux(perf_event_task_output,
52d857a8
JO
5113 &task_event,
5114 task_ctx);
9f498cc5
PZ
5115}
5116
cdd6c482 5117void perf_event_fork(struct task_struct *task)
9f498cc5 5118{
cdd6c482 5119 perf_event_task(task, NULL, 1);
60313ebe
PZ
5120}
5121
8d1b2d93
PZ
5122/*
5123 * comm tracking
5124 */
5125
5126struct perf_comm_event {
22a4f650
IM
5127 struct task_struct *task;
5128 char *comm;
8d1b2d93
PZ
5129 int comm_size;
5130
5131 struct {
5132 struct perf_event_header header;
5133
5134 u32 pid;
5135 u32 tid;
cdd6c482 5136 } event_id;
8d1b2d93
PZ
5137};
5138
67516844
JO
5139static int perf_event_comm_match(struct perf_event *event)
5140{
5141 return event->attr.comm;
5142}
5143
cdd6c482 5144static void perf_event_comm_output(struct perf_event *event,
52d857a8 5145 void *data)
8d1b2d93 5146{
52d857a8 5147 struct perf_comm_event *comm_event = data;
8d1b2d93 5148 struct perf_output_handle handle;
c980d109 5149 struct perf_sample_data sample;
cdd6c482 5150 int size = comm_event->event_id.header.size;
c980d109
ACM
5151 int ret;
5152
67516844
JO
5153 if (!perf_event_comm_match(event))
5154 return;
5155
c980d109
ACM
5156 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
5157 ret = perf_output_begin(&handle, event,
a7ac67ea 5158 comm_event->event_id.header.size);
8d1b2d93
PZ
5159
5160 if (ret)
c980d109 5161 goto out;
8d1b2d93 5162
cdd6c482
IM
5163 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
5164 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 5165
cdd6c482 5166 perf_output_put(&handle, comm_event->event_id);
76369139 5167 __output_copy(&handle, comm_event->comm,
8d1b2d93 5168 comm_event->comm_size);
c980d109
ACM
5169
5170 perf_event__output_id_sample(event, &handle, &sample);
5171
8d1b2d93 5172 perf_output_end(&handle);
c980d109
ACM
5173out:
5174 comm_event->event_id.header.size = size;
8d1b2d93
PZ
5175}
5176
cdd6c482 5177static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93 5178{
413ee3b4 5179 char comm[TASK_COMM_LEN];
8d1b2d93 5180 unsigned int size;
8d1b2d93 5181
413ee3b4 5182 memset(comm, 0, sizeof(comm));
96b02d78 5183 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 5184 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
5185
5186 comm_event->comm = comm;
5187 comm_event->comm_size = size;
5188
cdd6c482 5189 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8dc85d54 5190
67516844 5191 perf_event_aux(perf_event_comm_output,
52d857a8
JO
5192 comm_event,
5193 NULL);
8d1b2d93
PZ
5194}
5195
82b89778 5196void perf_event_comm(struct task_struct *task, bool exec)
8d1b2d93 5197{
9ee318a7
PZ
5198 struct perf_comm_event comm_event;
5199
cdd6c482 5200 if (!atomic_read(&nr_comm_events))
9ee318a7 5201 return;
a63eaf34 5202
9ee318a7 5203 comm_event = (struct perf_comm_event){
8d1b2d93 5204 .task = task,
573402db
PZ
5205 /* .comm */
5206 /* .comm_size */
cdd6c482 5207 .event_id = {
573402db 5208 .header = {
cdd6c482 5209 .type = PERF_RECORD_COMM,
82b89778 5210 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
573402db
PZ
5211 /* .size */
5212 },
5213 /* .pid */
5214 /* .tid */
8d1b2d93
PZ
5215 },
5216 };
5217
cdd6c482 5218 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
5219}
5220
0a4a9391
PZ
5221/*
5222 * mmap tracking
5223 */
5224
5225struct perf_mmap_event {
089dd79d
PZ
5226 struct vm_area_struct *vma;
5227
5228 const char *file_name;
5229 int file_size;
13d7a241
SE
5230 int maj, min;
5231 u64 ino;
5232 u64 ino_generation;
f972eb63 5233 u32 prot, flags;
0a4a9391
PZ
5234
5235 struct {
5236 struct perf_event_header header;
5237
5238 u32 pid;
5239 u32 tid;
5240 u64 start;
5241 u64 len;
5242 u64 pgoff;
cdd6c482 5243 } event_id;
0a4a9391
PZ
5244};
5245
67516844
JO
5246static int perf_event_mmap_match(struct perf_event *event,
5247 void *data)
5248{
5249 struct perf_mmap_event *mmap_event = data;
5250 struct vm_area_struct *vma = mmap_event->vma;
5251 int executable = vma->vm_flags & VM_EXEC;
5252
5253 return (!executable && event->attr.mmap_data) ||
13d7a241 5254 (executable && (event->attr.mmap || event->attr.mmap2));
67516844
JO
5255}
5256
cdd6c482 5257static void perf_event_mmap_output(struct perf_event *event,
52d857a8 5258 void *data)
0a4a9391 5259{
52d857a8 5260 struct perf_mmap_event *mmap_event = data;
0a4a9391 5261 struct perf_output_handle handle;
c980d109 5262 struct perf_sample_data sample;
cdd6c482 5263 int size = mmap_event->event_id.header.size;
c980d109 5264 int ret;
0a4a9391 5265
67516844
JO
5266 if (!perf_event_mmap_match(event, data))
5267 return;
5268
13d7a241
SE
5269 if (event->attr.mmap2) {
5270 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
5271 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
5272 mmap_event->event_id.header.size += sizeof(mmap_event->min);
5273 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
d008d525 5274 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
f972eb63
PZ
5275 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
5276 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
13d7a241
SE
5277 }
5278
c980d109
ACM
5279 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
5280 ret = perf_output_begin(&handle, event,
a7ac67ea 5281 mmap_event->event_id.header.size);
0a4a9391 5282 if (ret)
c980d109 5283 goto out;
0a4a9391 5284
cdd6c482
IM
5285 mmap_event->event_id.pid = perf_event_pid(event, current);
5286 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 5287
cdd6c482 5288 perf_output_put(&handle, mmap_event->event_id);
13d7a241
SE
5289
5290 if (event->attr.mmap2) {
5291 perf_output_put(&handle, mmap_event->maj);
5292 perf_output_put(&handle, mmap_event->min);
5293 perf_output_put(&handle, mmap_event->ino);
5294 perf_output_put(&handle, mmap_event->ino_generation);
f972eb63
PZ
5295 perf_output_put(&handle, mmap_event->prot);
5296 perf_output_put(&handle, mmap_event->flags);
13d7a241
SE
5297 }
5298
76369139 5299 __output_copy(&handle, mmap_event->file_name,
0a4a9391 5300 mmap_event->file_size);
c980d109
ACM
5301
5302 perf_event__output_id_sample(event, &handle, &sample);
5303
78d613eb 5304 perf_output_end(&handle);
c980d109
ACM
5305out:
5306 mmap_event->event_id.header.size = size;
0a4a9391
PZ
5307}
5308
cdd6c482 5309static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391 5310{
089dd79d
PZ
5311 struct vm_area_struct *vma = mmap_event->vma;
5312 struct file *file = vma->vm_file;
13d7a241
SE
5313 int maj = 0, min = 0;
5314 u64 ino = 0, gen = 0;
f972eb63 5315 u32 prot = 0, flags = 0;
0a4a9391
PZ
5316 unsigned int size;
5317 char tmp[16];
5318 char *buf = NULL;
2c42cfbf 5319 char *name;
413ee3b4 5320
0a4a9391 5321 if (file) {
13d7a241
SE
5322 struct inode *inode;
5323 dev_t dev;
3ea2f2b9 5324
2c42cfbf 5325 buf = kmalloc(PATH_MAX, GFP_KERNEL);
0a4a9391 5326 if (!buf) {
c7e548b4
ON
5327 name = "//enomem";
5328 goto cpy_name;
0a4a9391 5329 }
413ee3b4 5330 /*
3ea2f2b9 5331 * d_path() works from the end of the rb backwards, so we
413ee3b4
AB
5332 * need to add enough zero bytes after the string to handle
5333 * the 64bit alignment we do later.
5334 */
3ea2f2b9 5335 name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
0a4a9391 5336 if (IS_ERR(name)) {
c7e548b4
ON
5337 name = "//toolong";
5338 goto cpy_name;
0a4a9391 5339 }
13d7a241
SE
5340 inode = file_inode(vma->vm_file);
5341 dev = inode->i_sb->s_dev;
5342 ino = inode->i_ino;
5343 gen = inode->i_generation;
5344 maj = MAJOR(dev);
5345 min = MINOR(dev);
f972eb63
PZ
5346
5347 if (vma->vm_flags & VM_READ)
5348 prot |= PROT_READ;
5349 if (vma->vm_flags & VM_WRITE)
5350 prot |= PROT_WRITE;
5351 if (vma->vm_flags & VM_EXEC)
5352 prot |= PROT_EXEC;
5353
5354 if (vma->vm_flags & VM_MAYSHARE)
5355 flags = MAP_SHARED;
5356 else
5357 flags = MAP_PRIVATE;
5358
5359 if (vma->vm_flags & VM_DENYWRITE)
5360 flags |= MAP_DENYWRITE;
5361 if (vma->vm_flags & VM_MAYEXEC)
5362 flags |= MAP_EXECUTABLE;
5363 if (vma->vm_flags & VM_LOCKED)
5364 flags |= MAP_LOCKED;
5365 if (vma->vm_flags & VM_HUGETLB)
5366 flags |= MAP_HUGETLB;
5367
c7e548b4 5368 goto got_name;
0a4a9391 5369 } else {
fbe26abe
JO
5370 if (vma->vm_ops && vma->vm_ops->name) {
5371 name = (char *) vma->vm_ops->name(vma);
5372 if (name)
5373 goto cpy_name;
5374 }
5375
2c42cfbf 5376 name = (char *)arch_vma_name(vma);
c7e548b4
ON
5377 if (name)
5378 goto cpy_name;
089dd79d 5379
32c5fb7e 5380 if (vma->vm_start <= vma->vm_mm->start_brk &&
3af9e859 5381 vma->vm_end >= vma->vm_mm->brk) {
c7e548b4
ON
5382 name = "[heap]";
5383 goto cpy_name;
32c5fb7e
ON
5384 }
5385 if (vma->vm_start <= vma->vm_mm->start_stack &&
3af9e859 5386 vma->vm_end >= vma->vm_mm->start_stack) {
c7e548b4
ON
5387 name = "[stack]";
5388 goto cpy_name;
089dd79d
PZ
5389 }
5390
c7e548b4
ON
5391 name = "//anon";
5392 goto cpy_name;
0a4a9391
PZ
5393 }
5394
c7e548b4
ON
5395cpy_name:
5396 strlcpy(tmp, name, sizeof(tmp));
5397 name = tmp;
0a4a9391 5398got_name:
2c42cfbf
PZ
5399 /*
5400 * Since our buffer works in 8 byte units we need to align our string
5401 * size to a multiple of 8. However, we must guarantee the tail end is
5402 * zero'd out to avoid leaking random bits to userspace.
5403 */
5404 size = strlen(name)+1;
5405 while (!IS_ALIGNED(size, sizeof(u64)))
5406 name[size++] = '\0';
0a4a9391
PZ
5407
5408 mmap_event->file_name = name;
5409 mmap_event->file_size = size;
13d7a241
SE
5410 mmap_event->maj = maj;
5411 mmap_event->min = min;
5412 mmap_event->ino = ino;
5413 mmap_event->ino_generation = gen;
f972eb63
PZ
5414 mmap_event->prot = prot;
5415 mmap_event->flags = flags;
0a4a9391 5416
2fe85427
SE
5417 if (!(vma->vm_flags & VM_EXEC))
5418 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
5419
cdd6c482 5420 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 5421
67516844 5422 perf_event_aux(perf_event_mmap_output,
52d857a8
JO
5423 mmap_event,
5424 NULL);
665c2142 5425
0a4a9391
PZ
5426 kfree(buf);
5427}
5428
3af9e859 5429void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 5430{
9ee318a7
PZ
5431 struct perf_mmap_event mmap_event;
5432
cdd6c482 5433 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
5434 return;
5435
5436 mmap_event = (struct perf_mmap_event){
089dd79d 5437 .vma = vma,
573402db
PZ
5438 /* .file_name */
5439 /* .file_size */
cdd6c482 5440 .event_id = {
573402db 5441 .header = {
cdd6c482 5442 .type = PERF_RECORD_MMAP,
39447b38 5443 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
5444 /* .size */
5445 },
5446 /* .pid */
5447 /* .tid */
089dd79d
PZ
5448 .start = vma->vm_start,
5449 .len = vma->vm_end - vma->vm_start,
3a0304e9 5450 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391 5451 },
13d7a241
SE
5452 /* .maj (attr_mmap2 only) */
5453 /* .min (attr_mmap2 only) */
5454 /* .ino (attr_mmap2 only) */
5455 /* .ino_generation (attr_mmap2 only) */
f972eb63
PZ
5456 /* .prot (attr_mmap2 only) */
5457 /* .flags (attr_mmap2 only) */
0a4a9391
PZ
5458 };
5459
cdd6c482 5460 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
5461}
5462
a78ac325
PZ
5463/*
5464 * IRQ throttle logging
5465 */
5466
cdd6c482 5467static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
5468{
5469 struct perf_output_handle handle;
c980d109 5470 struct perf_sample_data sample;
a78ac325
PZ
5471 int ret;
5472
5473 struct {
5474 struct perf_event_header header;
5475 u64 time;
cca3f454 5476 u64 id;
7f453c24 5477 u64 stream_id;
a78ac325
PZ
5478 } throttle_event = {
5479 .header = {
cdd6c482 5480 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
5481 .misc = 0,
5482 .size = sizeof(throttle_event),
5483 },
def0a9b2 5484 .time = perf_clock(),
cdd6c482
IM
5485 .id = primary_event_id(event),
5486 .stream_id = event->id,
a78ac325
PZ
5487 };
5488
966ee4d6 5489 if (enable)
cdd6c482 5490 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 5491
c980d109
ACM
5492 perf_event_header__init_id(&throttle_event.header, &sample, event);
5493
5494 ret = perf_output_begin(&handle, event,
a7ac67ea 5495 throttle_event.header.size);
a78ac325
PZ
5496 if (ret)
5497 return;
5498
5499 perf_output_put(&handle, throttle_event);
c980d109 5500 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
5501 perf_output_end(&handle);
5502}
5503
f6c7d5fe 5504/*
cdd6c482 5505 * Generic event overflow handling, sampling.
f6c7d5fe
PZ
5506 */
5507
a8b0ca17 5508static int __perf_event_overflow(struct perf_event *event,
5622f295
MM
5509 int throttle, struct perf_sample_data *data,
5510 struct pt_regs *regs)
f6c7d5fe 5511{
cdd6c482
IM
5512 int events = atomic_read(&event->event_limit);
5513 struct hw_perf_event *hwc = &event->hw;
e050e3f0 5514 u64 seq;
79f14641
PZ
5515 int ret = 0;
5516
96398826
PZ
5517 /*
5518 * Non-sampling counters might still use the PMI to fold short
5519 * hardware counters, ignore those.
5520 */
5521 if (unlikely(!is_sampling_event(event)))
5522 return 0;
5523
e050e3f0
SE
5524 seq = __this_cpu_read(perf_throttled_seq);
5525 if (seq != hwc->interrupts_seq) {
5526 hwc->interrupts_seq = seq;
5527 hwc->interrupts = 1;
5528 } else {
5529 hwc->interrupts++;
5530 if (unlikely(throttle
5531 && hwc->interrupts >= max_samples_per_tick)) {
5532 __this_cpu_inc(perf_throttled_count);
163ec435
PZ
5533 hwc->interrupts = MAX_INTERRUPTS;
5534 perf_log_throttle(event, 0);
d84153d6 5535 tick_nohz_full_kick();
a78ac325
PZ
5536 ret = 1;
5537 }
e050e3f0 5538 }
60db5e09 5539
cdd6c482 5540 if (event->attr.freq) {
def0a9b2 5541 u64 now = perf_clock();
abd50713 5542 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 5543
abd50713 5544 hwc->freq_time_stamp = now;
bd2b5b12 5545
abd50713 5546 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 5547 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
5548 }
5549
2023b359
PZ
5550 /*
5551 * XXX event_limit might not quite work as expected on inherited
cdd6c482 5552 * events
2023b359
PZ
5553 */
5554
cdd6c482
IM
5555 event->pending_kill = POLL_IN;
5556 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 5557 ret = 1;
cdd6c482 5558 event->pending_kill = POLL_HUP;
a8b0ca17
PZ
5559 event->pending_disable = 1;
5560 irq_work_queue(&event->pending);
79f14641
PZ
5561 }
5562
453f19ee 5563 if (event->overflow_handler)
a8b0ca17 5564 event->overflow_handler(event, data, regs);
453f19ee 5565 else
a8b0ca17 5566 perf_event_output(event, data, regs);
453f19ee 5567
f506b3dc 5568 if (event->fasync && event->pending_kill) {
a8b0ca17
PZ
5569 event->pending_wakeup = 1;
5570 irq_work_queue(&event->pending);
f506b3dc
PZ
5571 }
5572
79f14641 5573 return ret;
f6c7d5fe
PZ
5574}
5575
a8b0ca17 5576int perf_event_overflow(struct perf_event *event,
5622f295
MM
5577 struct perf_sample_data *data,
5578 struct pt_regs *regs)
850bc73f 5579{
a8b0ca17 5580 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
5581}
5582
15dbf27c 5583/*
cdd6c482 5584 * Generic software event infrastructure
15dbf27c
PZ
5585 */
5586
b28ab83c
PZ
5587struct swevent_htable {
5588 struct swevent_hlist *swevent_hlist;
5589 struct mutex hlist_mutex;
5590 int hlist_refcount;
5591
5592 /* Recursion avoidance in each contexts */
5593 int recursion[PERF_NR_CONTEXTS];
39af6b16
JO
5594
5595 /* Keeps track of cpu being initialized/exited */
5596 bool online;
b28ab83c
PZ
5597};
5598
5599static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
5600
7b4b6658 5601/*
cdd6c482
IM
5602 * We directly increment event->count and keep a second value in
5603 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
5604 * is kept in the range [-sample_period, 0] so that we can use the
5605 * sign as trigger.
5606 */
5607
ab573844 5608u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 5609{
cdd6c482 5610 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
5611 u64 period = hwc->last_period;
5612 u64 nr, offset;
5613 s64 old, val;
5614
5615 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
5616
5617again:
e7850595 5618 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
5619 if (val < 0)
5620 return 0;
15dbf27c 5621
7b4b6658
PZ
5622 nr = div64_u64(period + val, period);
5623 offset = nr * period;
5624 val -= offset;
e7850595 5625 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 5626 goto again;
15dbf27c 5627
7b4b6658 5628 return nr;
15dbf27c
PZ
5629}
5630
0cff784a 5631static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 5632 struct perf_sample_data *data,
5622f295 5633 struct pt_regs *regs)
15dbf27c 5634{
cdd6c482 5635 struct hw_perf_event *hwc = &event->hw;
850bc73f 5636 int throttle = 0;
15dbf27c 5637
0cff784a
PZ
5638 if (!overflow)
5639 overflow = perf_swevent_set_period(event);
15dbf27c 5640
7b4b6658
PZ
5641 if (hwc->interrupts == MAX_INTERRUPTS)
5642 return;
15dbf27c 5643
7b4b6658 5644 for (; overflow; overflow--) {
a8b0ca17 5645 if (__perf_event_overflow(event, throttle,
5622f295 5646 data, regs)) {
7b4b6658
PZ
5647 /*
5648 * We inhibit the overflow from happening when
5649 * hwc->interrupts == MAX_INTERRUPTS.
5650 */
5651 break;
5652 }
cf450a73 5653 throttle = 1;
7b4b6658 5654 }
15dbf27c
PZ
5655}
5656
a4eaf7f1 5657static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 5658 struct perf_sample_data *data,
5622f295 5659 struct pt_regs *regs)
7b4b6658 5660{
cdd6c482 5661 struct hw_perf_event *hwc = &event->hw;
d6d020e9 5662
e7850595 5663 local64_add(nr, &event->count);
d6d020e9 5664
0cff784a
PZ
5665 if (!regs)
5666 return;
5667
6c7e550f 5668 if (!is_sampling_event(event))
7b4b6658 5669 return;
d6d020e9 5670
5d81e5cf
AV
5671 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
5672 data->period = nr;
5673 return perf_swevent_overflow(event, 1, data, regs);
5674 } else
5675 data->period = event->hw.last_period;
5676
0cff784a 5677 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 5678 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 5679
e7850595 5680 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 5681 return;
df1a132b 5682
a8b0ca17 5683 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
5684}
5685
f5ffe02e
FW
5686static int perf_exclude_event(struct perf_event *event,
5687 struct pt_regs *regs)
5688{
a4eaf7f1 5689 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 5690 return 1;
a4eaf7f1 5691
f5ffe02e
FW
5692 if (regs) {
5693 if (event->attr.exclude_user && user_mode(regs))
5694 return 1;
5695
5696 if (event->attr.exclude_kernel && !user_mode(regs))
5697 return 1;
5698 }
5699
5700 return 0;
5701}
5702
cdd6c482 5703static int perf_swevent_match(struct perf_event *event,
1c432d89 5704 enum perf_type_id type,
6fb2915d
LZ
5705 u32 event_id,
5706 struct perf_sample_data *data,
5707 struct pt_regs *regs)
15dbf27c 5708{
cdd6c482 5709 if (event->attr.type != type)
a21ca2ca 5710 return 0;
f5ffe02e 5711
cdd6c482 5712 if (event->attr.config != event_id)
15dbf27c
PZ
5713 return 0;
5714
f5ffe02e
FW
5715 if (perf_exclude_event(event, regs))
5716 return 0;
15dbf27c
PZ
5717
5718 return 1;
5719}
5720
76e1d904
FW
5721static inline u64 swevent_hash(u64 type, u32 event_id)
5722{
5723 u64 val = event_id | (type << 32);
5724
5725 return hash_64(val, SWEVENT_HLIST_BITS);
5726}
5727
49f135ed
FW
5728static inline struct hlist_head *
5729__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 5730{
49f135ed
FW
5731 u64 hash = swevent_hash(type, event_id);
5732
5733 return &hlist->heads[hash];
5734}
76e1d904 5735
49f135ed
FW
5736/* For the read side: events when they trigger */
5737static inline struct hlist_head *
b28ab83c 5738find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
5739{
5740 struct swevent_hlist *hlist;
76e1d904 5741
b28ab83c 5742 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
5743 if (!hlist)
5744 return NULL;
5745
49f135ed
FW
5746 return __find_swevent_head(hlist, type, event_id);
5747}
5748
5749/* For the event head insertion and removal in the hlist */
5750static inline struct hlist_head *
b28ab83c 5751find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
5752{
5753 struct swevent_hlist *hlist;
5754 u32 event_id = event->attr.config;
5755 u64 type = event->attr.type;
5756
5757 /*
5758 * Event scheduling is always serialized against hlist allocation
5759 * and release. Which makes the protected version suitable here.
5760 * The context lock guarantees that.
5761 */
b28ab83c 5762 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
5763 lockdep_is_held(&event->ctx->lock));
5764 if (!hlist)
5765 return NULL;
5766
5767 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
5768}
5769
5770static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 5771 u64 nr,
76e1d904
FW
5772 struct perf_sample_data *data,
5773 struct pt_regs *regs)
15dbf27c 5774{
b28ab83c 5775 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482 5776 struct perf_event *event;
76e1d904 5777 struct hlist_head *head;
15dbf27c 5778
76e1d904 5779 rcu_read_lock();
b28ab83c 5780 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
5781 if (!head)
5782 goto end;
5783
b67bfe0d 5784 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6fb2915d 5785 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 5786 perf_swevent_event(event, nr, data, regs);
15dbf27c 5787 }
76e1d904
FW
5788end:
5789 rcu_read_unlock();
15dbf27c
PZ
5790}
5791
4ed7c92d 5792int perf_swevent_get_recursion_context(void)
96f6d444 5793{
b28ab83c 5794 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
96f6d444 5795
b28ab83c 5796 return get_recursion_context(swhash->recursion);
96f6d444 5797}
645e8cc0 5798EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 5799
fa9f90be 5800inline void perf_swevent_put_recursion_context(int rctx)
15dbf27c 5801{
b28ab83c 5802 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
927c7a9e 5803
b28ab83c 5804 put_recursion_context(swhash->recursion, rctx);
ce71b9df 5805}
15dbf27c 5806
a8b0ca17 5807void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 5808{
a4234bfc 5809 struct perf_sample_data data;
4ed7c92d
PZ
5810 int rctx;
5811
1c024eca 5812 preempt_disable_notrace();
4ed7c92d
PZ
5813 rctx = perf_swevent_get_recursion_context();
5814 if (rctx < 0)
5815 return;
a4234bfc 5816
fd0d000b 5817 perf_sample_data_init(&data, addr, 0);
92bf309a 5818
a8b0ca17 5819 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4ed7c92d
PZ
5820
5821 perf_swevent_put_recursion_context(rctx);
1c024eca 5822 preempt_enable_notrace();
b8e83514
PZ
5823}
5824
cdd6c482 5825static void perf_swevent_read(struct perf_event *event)
15dbf27c 5826{
15dbf27c
PZ
5827}
5828
a4eaf7f1 5829static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 5830{
b28ab83c 5831 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482 5832 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
5833 struct hlist_head *head;
5834
6c7e550f 5835 if (is_sampling_event(event)) {
7b4b6658 5836 hwc->last_period = hwc->sample_period;
cdd6c482 5837 perf_swevent_set_period(event);
7b4b6658 5838 }
76e1d904 5839
a4eaf7f1
PZ
5840 hwc->state = !(flags & PERF_EF_START);
5841
b28ab83c 5842 head = find_swevent_head(swhash, event);
39af6b16
JO
5843 if (!head) {
5844 /*
5845 * We can race with cpu hotplug code. Do not
5846 * WARN if the cpu just got unplugged.
5847 */
5848 WARN_ON_ONCE(swhash->online);
76e1d904 5849 return -EINVAL;
39af6b16 5850 }
76e1d904
FW
5851
5852 hlist_add_head_rcu(&event->hlist_entry, head);
5853
15dbf27c
PZ
5854 return 0;
5855}
5856
a4eaf7f1 5857static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 5858{
76e1d904 5859 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
5860}
5861
a4eaf7f1 5862static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 5863{
a4eaf7f1 5864 event->hw.state = 0;
d6d020e9 5865}
aa9c4c0f 5866
a4eaf7f1 5867static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 5868{
a4eaf7f1 5869 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
5870}
5871
49f135ed
FW
5872/* Deref the hlist from the update side */
5873static inline struct swevent_hlist *
b28ab83c 5874swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 5875{
b28ab83c
PZ
5876 return rcu_dereference_protected(swhash->swevent_hlist,
5877 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
5878}
5879
b28ab83c 5880static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 5881{
b28ab83c 5882 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 5883
49f135ed 5884 if (!hlist)
76e1d904
FW
5885 return;
5886
b28ab83c 5887 rcu_assign_pointer(swhash->swevent_hlist, NULL);
fa4bbc4c 5888 kfree_rcu(hlist, rcu_head);
76e1d904
FW
5889}
5890
5891static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5892{
b28ab83c 5893 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 5894
b28ab83c 5895 mutex_lock(&swhash->hlist_mutex);
76e1d904 5896
b28ab83c
PZ
5897 if (!--swhash->hlist_refcount)
5898 swevent_hlist_release(swhash);
76e1d904 5899
b28ab83c 5900 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
5901}
5902
5903static void swevent_hlist_put(struct perf_event *event)
5904{
5905 int cpu;
5906
76e1d904
FW
5907 for_each_possible_cpu(cpu)
5908 swevent_hlist_put_cpu(event, cpu);
5909}
5910
5911static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5912{
b28ab83c 5913 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
5914 int err = 0;
5915
b28ab83c 5916 mutex_lock(&swhash->hlist_mutex);
76e1d904 5917
b28ab83c 5918 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d904
FW
5919 struct swevent_hlist *hlist;
5920
5921 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5922 if (!hlist) {
5923 err = -ENOMEM;
5924 goto exit;
5925 }
b28ab83c 5926 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 5927 }
b28ab83c 5928 swhash->hlist_refcount++;
9ed6060d 5929exit:
b28ab83c 5930 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
5931
5932 return err;
5933}
5934
5935static int swevent_hlist_get(struct perf_event *event)
5936{
5937 int err;
5938 int cpu, failed_cpu;
5939
76e1d904
FW
5940 get_online_cpus();
5941 for_each_possible_cpu(cpu) {
5942 err = swevent_hlist_get_cpu(event, cpu);
5943 if (err) {
5944 failed_cpu = cpu;
5945 goto fail;
5946 }
5947 }
5948 put_online_cpus();
5949
5950 return 0;
9ed6060d 5951fail:
76e1d904
FW
5952 for_each_possible_cpu(cpu) {
5953 if (cpu == failed_cpu)
5954 break;
5955 swevent_hlist_put_cpu(event, cpu);
5956 }
5957
5958 put_online_cpus();
5959 return err;
5960}
5961
c5905afb 5962struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 5963
b0a873eb
PZ
5964static void sw_perf_event_destroy(struct perf_event *event)
5965{
5966 u64 event_id = event->attr.config;
95476b64 5967
b0a873eb
PZ
5968 WARN_ON(event->parent);
5969
c5905afb 5970 static_key_slow_dec(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
5971 swevent_hlist_put(event);
5972}
5973
5974static int perf_swevent_init(struct perf_event *event)
5975{
8176cced 5976 u64 event_id = event->attr.config;
b0a873eb
PZ
5977
5978 if (event->attr.type != PERF_TYPE_SOFTWARE)
5979 return -ENOENT;
5980
2481c5fa
SE
5981 /*
5982 * no branch sampling for software events
5983 */
5984 if (has_branch_stack(event))
5985 return -EOPNOTSUPP;
5986
b0a873eb
PZ
5987 switch (event_id) {
5988 case PERF_COUNT_SW_CPU_CLOCK:
5989 case PERF_COUNT_SW_TASK_CLOCK:
5990 return -ENOENT;
5991
5992 default:
5993 break;
5994 }
5995
ce677831 5996 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
5997 return -ENOENT;
5998
5999 if (!event->parent) {
6000 int err;
6001
6002 err = swevent_hlist_get(event);
6003 if (err)
6004 return err;
6005
c5905afb 6006 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
6007 event->destroy = sw_perf_event_destroy;
6008 }
6009
6010 return 0;
6011}
6012
35edc2a5
PZ
6013static int perf_swevent_event_idx(struct perf_event *event)
6014{
6015 return 0;
6016}
6017
b0a873eb 6018static struct pmu perf_swevent = {
89a1e187 6019 .task_ctx_nr = perf_sw_context,
95476b64 6020
b0a873eb 6021 .event_init = perf_swevent_init,
a4eaf7f1
PZ
6022 .add = perf_swevent_add,
6023 .del = perf_swevent_del,
6024 .start = perf_swevent_start,
6025 .stop = perf_swevent_stop,
1c024eca 6026 .read = perf_swevent_read,
35edc2a5
PZ
6027
6028 .event_idx = perf_swevent_event_idx,
1c024eca
PZ
6029};
6030
b0a873eb
PZ
6031#ifdef CONFIG_EVENT_TRACING
6032
1c024eca
PZ
6033static int perf_tp_filter_match(struct perf_event *event,
6034 struct perf_sample_data *data)
6035{
6036 void *record = data->raw->data;
6037
6038 if (likely(!event->filter) || filter_match_preds(event->filter, record))
6039 return 1;
6040 return 0;
6041}
6042
6043static int perf_tp_event_match(struct perf_event *event,
6044 struct perf_sample_data *data,
6045 struct pt_regs *regs)
6046{
a0f7d0f7
FW
6047 if (event->hw.state & PERF_HES_STOPPED)
6048 return 0;
580d607c
PZ
6049 /*
6050 * All tracepoints are from kernel-space.
6051 */
6052 if (event->attr.exclude_kernel)
1c024eca
PZ
6053 return 0;
6054
6055 if (!perf_tp_filter_match(event, data))
6056 return 0;
6057
6058 return 1;
6059}
6060
6061void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
e6dab5ff
AV
6062 struct pt_regs *regs, struct hlist_head *head, int rctx,
6063 struct task_struct *task)
95476b64
FW
6064{
6065 struct perf_sample_data data;
1c024eca 6066 struct perf_event *event;
1c024eca 6067
95476b64
FW
6068 struct perf_raw_record raw = {
6069 .size = entry_size,
6070 .data = record,
6071 };
6072
fd0d000b 6073 perf_sample_data_init(&data, addr, 0);
95476b64
FW
6074 data.raw = &raw;
6075
b67bfe0d 6076 hlist_for_each_entry_rcu(event, head, hlist_entry) {
1c024eca 6077 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 6078 perf_swevent_event(event, count, &data, regs);
4f41c013 6079 }
ecc55f84 6080
e6dab5ff
AV
6081 /*
6082 * If we got specified a target task, also iterate its context and
6083 * deliver this event there too.
6084 */
6085 if (task && task != current) {
6086 struct perf_event_context *ctx;
6087 struct trace_entry *entry = record;
6088
6089 rcu_read_lock();
6090 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
6091 if (!ctx)
6092 goto unlock;
6093
6094 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
6095 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6096 continue;
6097 if (event->attr.config != entry->type)
6098 continue;
6099 if (perf_tp_event_match(event, &data, regs))
6100 perf_swevent_event(event, count, &data, regs);
6101 }
6102unlock:
6103 rcu_read_unlock();
6104 }
6105
ecc55f84 6106 perf_swevent_put_recursion_context(rctx);
95476b64
FW
6107}
6108EXPORT_SYMBOL_GPL(perf_tp_event);
6109
cdd6c482 6110static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 6111{
1c024eca 6112 perf_trace_destroy(event);
e077df4f
PZ
6113}
6114
b0a873eb 6115static int perf_tp_event_init(struct perf_event *event)
e077df4f 6116{
76e1d904
FW
6117 int err;
6118
b0a873eb
PZ
6119 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6120 return -ENOENT;
6121
2481c5fa
SE
6122 /*
6123 * no branch sampling for tracepoint events
6124 */
6125 if (has_branch_stack(event))
6126 return -EOPNOTSUPP;
6127
1c024eca
PZ
6128 err = perf_trace_init(event);
6129 if (err)
b0a873eb 6130 return err;
e077df4f 6131
cdd6c482 6132 event->destroy = tp_perf_event_destroy;
e077df4f 6133
b0a873eb
PZ
6134 return 0;
6135}
6136
6137static struct pmu perf_tracepoint = {
89a1e187
PZ
6138 .task_ctx_nr = perf_sw_context,
6139
b0a873eb 6140 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
6141 .add = perf_trace_add,
6142 .del = perf_trace_del,
6143 .start = perf_swevent_start,
6144 .stop = perf_swevent_stop,
b0a873eb 6145 .read = perf_swevent_read,
35edc2a5
PZ
6146
6147 .event_idx = perf_swevent_event_idx,
b0a873eb
PZ
6148};
6149
6150static inline void perf_tp_register(void)
6151{
2e80a82a 6152 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f 6153}
6fb2915d
LZ
6154
6155static int perf_event_set_filter(struct perf_event *event, void __user *arg)
6156{
6157 char *filter_str;
6158 int ret;
6159
6160 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6161 return -EINVAL;
6162
6163 filter_str = strndup_user(arg, PAGE_SIZE);
6164 if (IS_ERR(filter_str))
6165 return PTR_ERR(filter_str);
6166
6167 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
6168
6169 kfree(filter_str);
6170 return ret;
6171}
6172
6173static void perf_event_free_filter(struct perf_event *event)
6174{
6175 ftrace_profile_free_filter(event);
6176}
6177
e077df4f 6178#else
6fb2915d 6179
b0a873eb 6180static inline void perf_tp_register(void)
e077df4f 6181{
e077df4f 6182}
6fb2915d
LZ
6183
6184static int perf_event_set_filter(struct perf_event *event, void __user *arg)
6185{
6186 return -ENOENT;
6187}
6188
6189static void perf_event_free_filter(struct perf_event *event)
6190{
6191}
6192
07b139c8 6193#endif /* CONFIG_EVENT_TRACING */
e077df4f 6194
24f1e32c 6195#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 6196void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 6197{
f5ffe02e
FW
6198 struct perf_sample_data sample;
6199 struct pt_regs *regs = data;
6200
fd0d000b 6201 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 6202
a4eaf7f1 6203 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 6204 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
6205}
6206#endif
6207
b0a873eb
PZ
6208/*
6209 * hrtimer based swevent callback
6210 */
f29ac756 6211
b0a873eb 6212static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 6213{
b0a873eb
PZ
6214 enum hrtimer_restart ret = HRTIMER_RESTART;
6215 struct perf_sample_data data;
6216 struct pt_regs *regs;
6217 struct perf_event *event;
6218 u64 period;
f29ac756 6219
b0a873eb 6220 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
6221
6222 if (event->state != PERF_EVENT_STATE_ACTIVE)
6223 return HRTIMER_NORESTART;
6224
b0a873eb 6225 event->pmu->read(event);
f344011c 6226
fd0d000b 6227 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
6228 regs = get_irq_regs();
6229
6230 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 6231 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 6232 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
6233 ret = HRTIMER_NORESTART;
6234 }
24f1e32c 6235
b0a873eb
PZ
6236 period = max_t(u64, 10000, event->hw.sample_period);
6237 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 6238
b0a873eb 6239 return ret;
f29ac756
PZ
6240}
6241
b0a873eb 6242static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 6243{
b0a873eb 6244 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
6245 s64 period;
6246
6247 if (!is_sampling_event(event))
6248 return;
f5ffe02e 6249
5d508e82
FBH
6250 period = local64_read(&hwc->period_left);
6251 if (period) {
6252 if (period < 0)
6253 period = 10000;
fa407f35 6254
5d508e82
FBH
6255 local64_set(&hwc->period_left, 0);
6256 } else {
6257 period = max_t(u64, 10000, hwc->sample_period);
6258 }
6259 __hrtimer_start_range_ns(&hwc->hrtimer,
b0a873eb 6260 ns_to_ktime(period), 0,
b5ab4cd5 6261 HRTIMER_MODE_REL_PINNED, 0);
24f1e32c 6262}
b0a873eb
PZ
6263
6264static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 6265{
b0a873eb
PZ
6266 struct hw_perf_event *hwc = &event->hw;
6267
6c7e550f 6268 if (is_sampling_event(event)) {
b0a873eb 6269 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 6270 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
6271
6272 hrtimer_cancel(&hwc->hrtimer);
6273 }
24f1e32c
FW
6274}
6275
ba3dd36c
PZ
6276static void perf_swevent_init_hrtimer(struct perf_event *event)
6277{
6278 struct hw_perf_event *hwc = &event->hw;
6279
6280 if (!is_sampling_event(event))
6281 return;
6282
6283 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6284 hwc->hrtimer.function = perf_swevent_hrtimer;
6285
6286 /*
6287 * Since hrtimers have a fixed rate, we can do a static freq->period
6288 * mapping and avoid the whole period adjust feedback stuff.
6289 */
6290 if (event->attr.freq) {
6291 long freq = event->attr.sample_freq;
6292
6293 event->attr.sample_period = NSEC_PER_SEC / freq;
6294 hwc->sample_period = event->attr.sample_period;
6295 local64_set(&hwc->period_left, hwc->sample_period);
778141e3 6296 hwc->last_period = hwc->sample_period;
ba3dd36c
PZ
6297 event->attr.freq = 0;
6298 }
6299}
6300
b0a873eb
PZ
6301/*
6302 * Software event: cpu wall time clock
6303 */
6304
6305static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 6306{
b0a873eb
PZ
6307 s64 prev;
6308 u64 now;
6309
a4eaf7f1 6310 now = local_clock();
b0a873eb
PZ
6311 prev = local64_xchg(&event->hw.prev_count, now);
6312 local64_add(now - prev, &event->count);
24f1e32c 6313}
24f1e32c 6314
a4eaf7f1 6315static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 6316{
a4eaf7f1 6317 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 6318 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
6319}
6320
a4eaf7f1 6321static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 6322{
b0a873eb
PZ
6323 perf_swevent_cancel_hrtimer(event);
6324 cpu_clock_event_update(event);
6325}
f29ac756 6326
a4eaf7f1
PZ
6327static int cpu_clock_event_add(struct perf_event *event, int flags)
6328{
6329 if (flags & PERF_EF_START)
6330 cpu_clock_event_start(event, flags);
6331
6332 return 0;
6333}
6334
6335static void cpu_clock_event_del(struct perf_event *event, int flags)
6336{
6337 cpu_clock_event_stop(event, flags);
6338}
6339
b0a873eb
PZ
6340static void cpu_clock_event_read(struct perf_event *event)
6341{
6342 cpu_clock_event_update(event);
6343}
f344011c 6344
b0a873eb
PZ
6345static int cpu_clock_event_init(struct perf_event *event)
6346{
6347 if (event->attr.type != PERF_TYPE_SOFTWARE)
6348 return -ENOENT;
6349
6350 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
6351 return -ENOENT;
6352
2481c5fa
SE
6353 /*
6354 * no branch sampling for software events
6355 */
6356 if (has_branch_stack(event))
6357 return -EOPNOTSUPP;
6358
ba3dd36c
PZ
6359 perf_swevent_init_hrtimer(event);
6360
b0a873eb 6361 return 0;
f29ac756
PZ
6362}
6363
b0a873eb 6364static struct pmu perf_cpu_clock = {
89a1e187
PZ
6365 .task_ctx_nr = perf_sw_context,
6366
b0a873eb 6367 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
6368 .add = cpu_clock_event_add,
6369 .del = cpu_clock_event_del,
6370 .start = cpu_clock_event_start,
6371 .stop = cpu_clock_event_stop,
b0a873eb 6372 .read = cpu_clock_event_read,
35edc2a5
PZ
6373
6374 .event_idx = perf_swevent_event_idx,
b0a873eb
PZ
6375};
6376
6377/*
6378 * Software event: task time clock
6379 */
6380
6381static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 6382{
b0a873eb
PZ
6383 u64 prev;
6384 s64 delta;
5c92d124 6385
b0a873eb
PZ
6386 prev = local64_xchg(&event->hw.prev_count, now);
6387 delta = now - prev;
6388 local64_add(delta, &event->count);
6389}
5c92d124 6390
a4eaf7f1 6391static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 6392{
a4eaf7f1 6393 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 6394 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
6395}
6396
a4eaf7f1 6397static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
6398{
6399 perf_swevent_cancel_hrtimer(event);
6400 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
6401}
6402
6403static int task_clock_event_add(struct perf_event *event, int flags)
6404{
6405 if (flags & PERF_EF_START)
6406 task_clock_event_start(event, flags);
b0a873eb 6407
a4eaf7f1
PZ
6408 return 0;
6409}
6410
6411static void task_clock_event_del(struct perf_event *event, int flags)
6412{
6413 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
6414}
6415
6416static void task_clock_event_read(struct perf_event *event)
6417{
768a06e2
PZ
6418 u64 now = perf_clock();
6419 u64 delta = now - event->ctx->timestamp;
6420 u64 time = event->ctx->time + delta;
b0a873eb
PZ
6421
6422 task_clock_event_update(event, time);
6423}
6424
6425static int task_clock_event_init(struct perf_event *event)
6fb2915d 6426{
b0a873eb
PZ
6427 if (event->attr.type != PERF_TYPE_SOFTWARE)
6428 return -ENOENT;
6429
6430 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
6431 return -ENOENT;
6432
2481c5fa
SE
6433 /*
6434 * no branch sampling for software events
6435 */
6436 if (has_branch_stack(event))
6437 return -EOPNOTSUPP;
6438
ba3dd36c
PZ
6439 perf_swevent_init_hrtimer(event);
6440
b0a873eb 6441 return 0;
6fb2915d
LZ
6442}
6443
b0a873eb 6444static struct pmu perf_task_clock = {
89a1e187
PZ
6445 .task_ctx_nr = perf_sw_context,
6446
b0a873eb 6447 .event_init = task_clock_event_init,
a4eaf7f1
PZ
6448 .add = task_clock_event_add,
6449 .del = task_clock_event_del,
6450 .start = task_clock_event_start,
6451 .stop = task_clock_event_stop,
b0a873eb 6452 .read = task_clock_event_read,
35edc2a5
PZ
6453
6454 .event_idx = perf_swevent_event_idx,
b0a873eb 6455};
6fb2915d 6456
ad5133b7 6457static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 6458{
e077df4f 6459}
6fb2915d 6460
ad5133b7 6461static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 6462{
ad5133b7 6463 return 0;
6fb2915d
LZ
6464}
6465
ad5133b7 6466static void perf_pmu_start_txn(struct pmu *pmu)
6fb2915d 6467{
ad5133b7 6468 perf_pmu_disable(pmu);
6fb2915d
LZ
6469}
6470
ad5133b7
PZ
6471static int perf_pmu_commit_txn(struct pmu *pmu)
6472{
6473 perf_pmu_enable(pmu);
6474 return 0;
6475}
e077df4f 6476
ad5133b7 6477static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 6478{
ad5133b7 6479 perf_pmu_enable(pmu);
24f1e32c
FW
6480}
6481
35edc2a5
PZ
6482static int perf_event_idx_default(struct perf_event *event)
6483{
6484 return event->hw.idx + 1;
6485}
6486
8dc85d54
PZ
6487/*
6488 * Ensures all contexts with the same task_ctx_nr have the same
6489 * pmu_cpu_context too.
6490 */
9e317041 6491static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
24f1e32c 6492{
8dc85d54 6493 struct pmu *pmu;
b326e956 6494
8dc85d54
PZ
6495 if (ctxn < 0)
6496 return NULL;
24f1e32c 6497
8dc85d54
PZ
6498 list_for_each_entry(pmu, &pmus, entry) {
6499 if (pmu->task_ctx_nr == ctxn)
6500 return pmu->pmu_cpu_context;
6501 }
24f1e32c 6502
8dc85d54 6503 return NULL;
24f1e32c
FW
6504}
6505
51676957 6506static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c 6507{
51676957
PZ
6508 int cpu;
6509
6510 for_each_possible_cpu(cpu) {
6511 struct perf_cpu_context *cpuctx;
6512
6513 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6514
3f1f3320
PZ
6515 if (cpuctx->unique_pmu == old_pmu)
6516 cpuctx->unique_pmu = pmu;
51676957
PZ
6517 }
6518}
6519
6520static void free_pmu_context(struct pmu *pmu)
6521{
6522 struct pmu *i;
f5ffe02e 6523
8dc85d54 6524 mutex_lock(&pmus_lock);
0475f9ea 6525 /*
8dc85d54 6526 * Like a real lame refcount.
0475f9ea 6527 */
51676957
PZ
6528 list_for_each_entry(i, &pmus, entry) {
6529 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
6530 update_pmu_context(i, pmu);
8dc85d54 6531 goto out;
51676957 6532 }
8dc85d54 6533 }
d6d020e9 6534
51676957 6535 free_percpu(pmu->pmu_cpu_context);
8dc85d54
PZ
6536out:
6537 mutex_unlock(&pmus_lock);
24f1e32c 6538}
2e80a82a 6539static struct idr pmu_idr;
d6d020e9 6540
abe43400
PZ
6541static ssize_t
6542type_show(struct device *dev, struct device_attribute *attr, char *page)
6543{
6544 struct pmu *pmu = dev_get_drvdata(dev);
6545
6546 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
6547}
90826ca7 6548static DEVICE_ATTR_RO(type);
abe43400 6549
62b85639
SE
6550static ssize_t
6551perf_event_mux_interval_ms_show(struct device *dev,
6552 struct device_attribute *attr,
6553 char *page)
6554{
6555 struct pmu *pmu = dev_get_drvdata(dev);
6556
6557 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
6558}
6559
6560static ssize_t
6561perf_event_mux_interval_ms_store(struct device *dev,
6562 struct device_attribute *attr,
6563 const char *buf, size_t count)
6564{
6565 struct pmu *pmu = dev_get_drvdata(dev);
6566 int timer, cpu, ret;
6567
6568 ret = kstrtoint(buf, 0, &timer);
6569 if (ret)
6570 return ret;
6571
6572 if (timer < 1)
6573 return -EINVAL;
6574
6575 /* same value, noting to do */
6576 if (timer == pmu->hrtimer_interval_ms)
6577 return count;
6578
6579 pmu->hrtimer_interval_ms = timer;
6580
6581 /* update all cpuctx for this PMU */
6582 for_each_possible_cpu(cpu) {
6583 struct perf_cpu_context *cpuctx;
6584 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6585 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
6586
6587 if (hrtimer_active(&cpuctx->hrtimer))
6588 hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
6589 }
6590
6591 return count;
6592}
90826ca7 6593static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
62b85639 6594
90826ca7
GKH
6595static struct attribute *pmu_dev_attrs[] = {
6596 &dev_attr_type.attr,
6597 &dev_attr_perf_event_mux_interval_ms.attr,
6598 NULL,
abe43400 6599};
90826ca7 6600ATTRIBUTE_GROUPS(pmu_dev);
abe43400
PZ
6601
6602static int pmu_bus_running;
6603static struct bus_type pmu_bus = {
6604 .name = "event_source",
90826ca7 6605 .dev_groups = pmu_dev_groups,
abe43400
PZ
6606};
6607
6608static void pmu_dev_release(struct device *dev)
6609{
6610 kfree(dev);
6611}
6612
6613static int pmu_dev_alloc(struct pmu *pmu)
6614{
6615 int ret = -ENOMEM;
6616
6617 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
6618 if (!pmu->dev)
6619 goto out;
6620
0c9d42ed 6621 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
6622 device_initialize(pmu->dev);
6623 ret = dev_set_name(pmu->dev, "%s", pmu->name);
6624 if (ret)
6625 goto free_dev;
6626
6627 dev_set_drvdata(pmu->dev, pmu);
6628 pmu->dev->bus = &pmu_bus;
6629 pmu->dev->release = pmu_dev_release;
6630 ret = device_add(pmu->dev);
6631 if (ret)
6632 goto free_dev;
6633
6634out:
6635 return ret;
6636
6637free_dev:
6638 put_device(pmu->dev);
6639 goto out;
6640}
6641
547e9fd7 6642static struct lock_class_key cpuctx_mutex;
facc4307 6643static struct lock_class_key cpuctx_lock;
547e9fd7 6644
03d8e80b 6645int perf_pmu_register(struct pmu *pmu, const char *name, int type)
24f1e32c 6646{
108b02cf 6647 int cpu, ret;
24f1e32c 6648
b0a873eb 6649 mutex_lock(&pmus_lock);
33696fc0
PZ
6650 ret = -ENOMEM;
6651 pmu->pmu_disable_count = alloc_percpu(int);
6652 if (!pmu->pmu_disable_count)
6653 goto unlock;
f29ac756 6654
2e80a82a
PZ
6655 pmu->type = -1;
6656 if (!name)
6657 goto skip_type;
6658 pmu->name = name;
6659
6660 if (type < 0) {
0e9c3be2
TH
6661 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
6662 if (type < 0) {
6663 ret = type;
2e80a82a
PZ
6664 goto free_pdc;
6665 }
6666 }
6667 pmu->type = type;
6668
abe43400
PZ
6669 if (pmu_bus_running) {
6670 ret = pmu_dev_alloc(pmu);
6671 if (ret)
6672 goto free_idr;
6673 }
6674
2e80a82a 6675skip_type:
8dc85d54
PZ
6676 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
6677 if (pmu->pmu_cpu_context)
6678 goto got_cpu_context;
f29ac756 6679
c4814202 6680 ret = -ENOMEM;
108b02cf
PZ
6681 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
6682 if (!pmu->pmu_cpu_context)
abe43400 6683 goto free_dev;
f344011c 6684
108b02cf
PZ
6685 for_each_possible_cpu(cpu) {
6686 struct perf_cpu_context *cpuctx;
6687
6688 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 6689 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 6690 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 6691 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
b04243ef 6692 cpuctx->ctx.type = cpu_context;
108b02cf 6693 cpuctx->ctx.pmu = pmu;
9e630205
SE
6694
6695 __perf_cpu_hrtimer_init(cpuctx, cpu);
6696
e9d2b064 6697 INIT_LIST_HEAD(&cpuctx->rotation_list);
3f1f3320 6698 cpuctx->unique_pmu = pmu;
108b02cf 6699 }
76e1d904 6700
8dc85d54 6701got_cpu_context:
ad5133b7
PZ
6702 if (!pmu->start_txn) {
6703 if (pmu->pmu_enable) {
6704 /*
6705 * If we have pmu_enable/pmu_disable calls, install
6706 * transaction stubs that use that to try and batch
6707 * hardware accesses.
6708 */
6709 pmu->start_txn = perf_pmu_start_txn;
6710 pmu->commit_txn = perf_pmu_commit_txn;
6711 pmu->cancel_txn = perf_pmu_cancel_txn;
6712 } else {
6713 pmu->start_txn = perf_pmu_nop_void;
6714 pmu->commit_txn = perf_pmu_nop_int;
6715 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 6716 }
5c92d124 6717 }
15dbf27c 6718
ad5133b7
PZ
6719 if (!pmu->pmu_enable) {
6720 pmu->pmu_enable = perf_pmu_nop_void;
6721 pmu->pmu_disable = perf_pmu_nop_void;
6722 }
6723
35edc2a5
PZ
6724 if (!pmu->event_idx)
6725 pmu->event_idx = perf_event_idx_default;
6726
b0a873eb 6727 list_add_rcu(&pmu->entry, &pmus);
33696fc0
PZ
6728 ret = 0;
6729unlock:
b0a873eb
PZ
6730 mutex_unlock(&pmus_lock);
6731
33696fc0 6732 return ret;
108b02cf 6733
abe43400
PZ
6734free_dev:
6735 device_del(pmu->dev);
6736 put_device(pmu->dev);
6737
2e80a82a
PZ
6738free_idr:
6739 if (pmu->type >= PERF_TYPE_MAX)
6740 idr_remove(&pmu_idr, pmu->type);
6741
108b02cf
PZ
6742free_pdc:
6743 free_percpu(pmu->pmu_disable_count);
6744 goto unlock;
f29ac756 6745}
c464c76e 6746EXPORT_SYMBOL_GPL(perf_pmu_register);
f29ac756 6747
b0a873eb 6748void perf_pmu_unregister(struct pmu *pmu)
5c92d124 6749{
b0a873eb
PZ
6750 mutex_lock(&pmus_lock);
6751 list_del_rcu(&pmu->entry);
6752 mutex_unlock(&pmus_lock);
5c92d124 6753
0475f9ea 6754 /*
cde8e884
PZ
6755 * We dereference the pmu list under both SRCU and regular RCU, so
6756 * synchronize against both of those.
0475f9ea 6757 */
b0a873eb 6758 synchronize_srcu(&pmus_srcu);
cde8e884 6759 synchronize_rcu();
d6d020e9 6760
33696fc0 6761 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
6762 if (pmu->type >= PERF_TYPE_MAX)
6763 idr_remove(&pmu_idr, pmu->type);
abe43400
PZ
6764 device_del(pmu->dev);
6765 put_device(pmu->dev);
51676957 6766 free_pmu_context(pmu);
b0a873eb 6767}
c464c76e 6768EXPORT_SYMBOL_GPL(perf_pmu_unregister);
d6d020e9 6769
b0a873eb
PZ
6770struct pmu *perf_init_event(struct perf_event *event)
6771{
6772 struct pmu *pmu = NULL;
6773 int idx;
940c5b29 6774 int ret;
b0a873eb
PZ
6775
6776 idx = srcu_read_lock(&pmus_srcu);
2e80a82a
PZ
6777
6778 rcu_read_lock();
6779 pmu = idr_find(&pmu_idr, event->attr.type);
6780 rcu_read_unlock();
940c5b29 6781 if (pmu) {
c464c76e
YZ
6782 if (!try_module_get(pmu->module)) {
6783 pmu = ERR_PTR(-ENODEV);
6784 goto unlock;
6785 }
7e5b2a01 6786 event->pmu = pmu;
940c5b29
LM
6787 ret = pmu->event_init(event);
6788 if (ret)
6789 pmu = ERR_PTR(ret);
2e80a82a 6790 goto unlock;
940c5b29 6791 }
2e80a82a 6792
b0a873eb 6793 list_for_each_entry_rcu(pmu, &pmus, entry) {
c464c76e
YZ
6794 if (!try_module_get(pmu->module)) {
6795 pmu = ERR_PTR(-ENODEV);
6796 goto unlock;
6797 }
7e5b2a01 6798 event->pmu = pmu;
940c5b29 6799 ret = pmu->event_init(event);
b0a873eb 6800 if (!ret)
e5f4d339 6801 goto unlock;
76e1d904 6802
b0a873eb
PZ
6803 if (ret != -ENOENT) {
6804 pmu = ERR_PTR(ret);
e5f4d339 6805 goto unlock;
f344011c 6806 }
5c92d124 6807 }
e5f4d339
PZ
6808 pmu = ERR_PTR(-ENOENT);
6809unlock:
b0a873eb 6810 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 6811
4aeb0b42 6812 return pmu;
5c92d124
IM
6813}
6814
4beb31f3
FW
6815static void account_event_cpu(struct perf_event *event, int cpu)
6816{
6817 if (event->parent)
6818 return;
6819
6820 if (has_branch_stack(event)) {
6821 if (!(event->attach_state & PERF_ATTACH_TASK))
6822 atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
6823 }
6824 if (is_cgroup_event(event))
6825 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
6826}
6827
766d6c07
FW
6828static void account_event(struct perf_event *event)
6829{
4beb31f3
FW
6830 if (event->parent)
6831 return;
6832
766d6c07
FW
6833 if (event->attach_state & PERF_ATTACH_TASK)
6834 static_key_slow_inc(&perf_sched_events.key);
6835 if (event->attr.mmap || event->attr.mmap_data)
6836 atomic_inc(&nr_mmap_events);
6837 if (event->attr.comm)
6838 atomic_inc(&nr_comm_events);
6839 if (event->attr.task)
6840 atomic_inc(&nr_task_events);
948b26b6
FW
6841 if (event->attr.freq) {
6842 if (atomic_inc_return(&nr_freq_events) == 1)
6843 tick_nohz_full_kick_all();
6844 }
4beb31f3 6845 if (has_branch_stack(event))
766d6c07 6846 static_key_slow_inc(&perf_sched_events.key);
4beb31f3 6847 if (is_cgroup_event(event))
766d6c07 6848 static_key_slow_inc(&perf_sched_events.key);
4beb31f3
FW
6849
6850 account_event_cpu(event, event->cpu);
766d6c07
FW
6851}
6852
0793a61d 6853/*
cdd6c482 6854 * Allocate and initialize a event structure
0793a61d 6855 */
cdd6c482 6856static struct perf_event *
c3f00c70 6857perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
6858 struct task_struct *task,
6859 struct perf_event *group_leader,
6860 struct perf_event *parent_event,
4dc0da86
AK
6861 perf_overflow_handler_t overflow_handler,
6862 void *context)
0793a61d 6863{
51b0fe39 6864 struct pmu *pmu;
cdd6c482
IM
6865 struct perf_event *event;
6866 struct hw_perf_event *hwc;
90983b16 6867 long err = -EINVAL;
0793a61d 6868
66832eb4
ON
6869 if ((unsigned)cpu >= nr_cpu_ids) {
6870 if (!task || cpu != -1)
6871 return ERR_PTR(-EINVAL);
6872 }
6873
c3f00c70 6874 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 6875 if (!event)
d5d2bc0d 6876 return ERR_PTR(-ENOMEM);
0793a61d 6877
04289bb9 6878 /*
cdd6c482 6879 * Single events are their own group leaders, with an
04289bb9
IM
6880 * empty sibling list:
6881 */
6882 if (!group_leader)
cdd6c482 6883 group_leader = event;
04289bb9 6884
cdd6c482
IM
6885 mutex_init(&event->child_mutex);
6886 INIT_LIST_HEAD(&event->child_list);
fccc714b 6887
cdd6c482
IM
6888 INIT_LIST_HEAD(&event->group_entry);
6889 INIT_LIST_HEAD(&event->event_entry);
6890 INIT_LIST_HEAD(&event->sibling_list);
10c6db11 6891 INIT_LIST_HEAD(&event->rb_entry);
71ad88ef 6892 INIT_LIST_HEAD(&event->active_entry);
f3ae75de
SE
6893 INIT_HLIST_NODE(&event->hlist_entry);
6894
10c6db11 6895
cdd6c482 6896 init_waitqueue_head(&event->waitq);
e360adbe 6897 init_irq_work(&event->pending, perf_pending_event);
0793a61d 6898
cdd6c482 6899 mutex_init(&event->mmap_mutex);
7b732a75 6900
a6fa941d 6901 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
6902 event->cpu = cpu;
6903 event->attr = *attr;
6904 event->group_leader = group_leader;
6905 event->pmu = NULL;
cdd6c482 6906 event->oncpu = -1;
a96bbc16 6907
cdd6c482 6908 event->parent = parent_event;
b84fbc9f 6909
17cf22c3 6910 event->ns = get_pid_ns(task_active_pid_ns(current));
cdd6c482 6911 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 6912
cdd6c482 6913 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 6914
d580ff86
PZ
6915 if (task) {
6916 event->attach_state = PERF_ATTACH_TASK;
f22c1bb6
ON
6917
6918 if (attr->type == PERF_TYPE_TRACEPOINT)
6919 event->hw.tp_target = task;
d580ff86
PZ
6920#ifdef CONFIG_HAVE_HW_BREAKPOINT
6921 /*
6922 * hw_breakpoint is a bit difficult here..
6923 */
f22c1bb6 6924 else if (attr->type == PERF_TYPE_BREAKPOINT)
d580ff86
PZ
6925 event->hw.bp_target = task;
6926#endif
6927 }
6928
4dc0da86 6929 if (!overflow_handler && parent_event) {
b326e956 6930 overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
6931 context = parent_event->overflow_handler_context;
6932 }
66832eb4 6933
b326e956 6934 event->overflow_handler = overflow_handler;
4dc0da86 6935 event->overflow_handler_context = context;
97eaf530 6936
0231bb53 6937 perf_event__state_init(event);
a86ed508 6938
4aeb0b42 6939 pmu = NULL;
b8e83514 6940
cdd6c482 6941 hwc = &event->hw;
bd2b5b12 6942 hwc->sample_period = attr->sample_period;
0d48696f 6943 if (attr->freq && attr->sample_freq)
bd2b5b12 6944 hwc->sample_period = 1;
eced1dfc 6945 hwc->last_period = hwc->sample_period;
bd2b5b12 6946
e7850595 6947 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 6948
2023b359 6949 /*
cdd6c482 6950 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b359 6951 */
3dab77fb 6952 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
90983b16 6953 goto err_ns;
2023b359 6954
b0a873eb 6955 pmu = perf_init_event(event);
4aeb0b42 6956 if (!pmu)
90983b16
FW
6957 goto err_ns;
6958 else if (IS_ERR(pmu)) {
4aeb0b42 6959 err = PTR_ERR(pmu);
90983b16 6960 goto err_ns;
621a01ea 6961 }
d5d2bc0d 6962
cdd6c482 6963 if (!event->parent) {
927c7a9e
FW
6964 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6965 err = get_callchain_buffers();
90983b16
FW
6966 if (err)
6967 goto err_pmu;
d010b332 6968 }
f344011c 6969 }
9ee318a7 6970
cdd6c482 6971 return event;
90983b16
FW
6972
6973err_pmu:
6974 if (event->destroy)
6975 event->destroy(event);
c464c76e 6976 module_put(pmu->module);
90983b16
FW
6977err_ns:
6978 if (event->ns)
6979 put_pid_ns(event->ns);
6980 kfree(event);
6981
6982 return ERR_PTR(err);
0793a61d
TG
6983}
6984
cdd6c482
IM
6985static int perf_copy_attr(struct perf_event_attr __user *uattr,
6986 struct perf_event_attr *attr)
974802ea 6987{
974802ea 6988 u32 size;
cdf8073d 6989 int ret;
974802ea
PZ
6990
6991 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6992 return -EFAULT;
6993
6994 /*
6995 * zero the full structure, so that a short copy will be nice.
6996 */
6997 memset(attr, 0, sizeof(*attr));
6998
6999 ret = get_user(size, &uattr->size);
7000 if (ret)
7001 return ret;
7002
7003 if (size > PAGE_SIZE) /* silly large */
7004 goto err_size;
7005
7006 if (!size) /* abi compat */
7007 size = PERF_ATTR_SIZE_VER0;
7008
7009 if (size < PERF_ATTR_SIZE_VER0)
7010 goto err_size;
7011
7012 /*
7013 * If we're handed a bigger struct than we know of,
cdf8073d
IS
7014 * ensure all the unknown bits are 0 - i.e. new
7015 * user-space does not rely on any kernel feature
7016 * extensions we dont know about yet.
974802ea
PZ
7017 */
7018 if (size > sizeof(*attr)) {
cdf8073d
IS
7019 unsigned char __user *addr;
7020 unsigned char __user *end;
7021 unsigned char val;
974802ea 7022
cdf8073d
IS
7023 addr = (void __user *)uattr + sizeof(*attr);
7024 end = (void __user *)uattr + size;
974802ea 7025
cdf8073d 7026 for (; addr < end; addr++) {
974802ea
PZ
7027 ret = get_user(val, addr);
7028 if (ret)
7029 return ret;
7030 if (val)
7031 goto err_size;
7032 }
b3e62e35 7033 size = sizeof(*attr);
974802ea
PZ
7034 }
7035
7036 ret = copy_from_user(attr, uattr, size);
7037 if (ret)
7038 return -EFAULT;
7039
cd757645 7040 if (attr->__reserved_1)
974802ea
PZ
7041 return -EINVAL;
7042
7043 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
7044 return -EINVAL;
7045
7046 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
7047 return -EINVAL;
7048
bce38cd5
SE
7049 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
7050 u64 mask = attr->branch_sample_type;
7051
7052 /* only using defined bits */
7053 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
7054 return -EINVAL;
7055
7056 /* at least one branch bit must be set */
7057 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
7058 return -EINVAL;
7059
bce38cd5
SE
7060 /* propagate priv level, when not set for branch */
7061 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
7062
7063 /* exclude_kernel checked on syscall entry */
7064 if (!attr->exclude_kernel)
7065 mask |= PERF_SAMPLE_BRANCH_KERNEL;
7066
7067 if (!attr->exclude_user)
7068 mask |= PERF_SAMPLE_BRANCH_USER;
7069
7070 if (!attr->exclude_hv)
7071 mask |= PERF_SAMPLE_BRANCH_HV;
7072 /*
7073 * adjust user setting (for HW filter setup)
7074 */
7075 attr->branch_sample_type = mask;
7076 }
e712209a
SE
7077 /* privileged levels capture (kernel, hv): check permissions */
7078 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
2b923c8f
SE
7079 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
7080 return -EACCES;
bce38cd5 7081 }
4018994f 7082
c5ebcedb 7083 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
4018994f 7084 ret = perf_reg_validate(attr->sample_regs_user);
c5ebcedb
JO
7085 if (ret)
7086 return ret;
7087 }
7088
7089 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
7090 if (!arch_perf_have_user_stack_dump())
7091 return -ENOSYS;
7092
7093 /*
7094 * We have __u32 type for the size, but so far
7095 * we can only use __u16 as maximum due to the
7096 * __u16 sample size limit.
7097 */
7098 if (attr->sample_stack_user >= USHRT_MAX)
7099 ret = -EINVAL;
7100 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
7101 ret = -EINVAL;
7102 }
4018994f 7103
974802ea
PZ
7104out:
7105 return ret;
7106
7107err_size:
7108 put_user(sizeof(*attr), &uattr->size);
7109 ret = -E2BIG;
7110 goto out;
7111}
7112
ac9721f3
PZ
7113static int
7114perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 7115{
b69cf536 7116 struct ring_buffer *rb = NULL;
a4be7c27
PZ
7117 int ret = -EINVAL;
7118
ac9721f3 7119 if (!output_event)
a4be7c27
PZ
7120 goto set;
7121
ac9721f3
PZ
7122 /* don't allow circular references */
7123 if (event == output_event)
a4be7c27
PZ
7124 goto out;
7125
0f139300
PZ
7126 /*
7127 * Don't allow cross-cpu buffers
7128 */
7129 if (output_event->cpu != event->cpu)
7130 goto out;
7131
7132 /*
76369139 7133 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
7134 */
7135 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
7136 goto out;
7137
a4be7c27 7138set:
cdd6c482 7139 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
7140 /* Can't redirect output if we've got an active mmap() */
7141 if (atomic_read(&event->mmap_count))
7142 goto unlock;
a4be7c27 7143
ac9721f3 7144 if (output_event) {
76369139
FW
7145 /* get the rb we want to redirect to */
7146 rb = ring_buffer_get(output_event);
7147 if (!rb)
ac9721f3 7148 goto unlock;
a4be7c27
PZ
7149 }
7150
b69cf536 7151 ring_buffer_attach(event, rb);
9bb5d40c 7152
a4be7c27 7153 ret = 0;
ac9721f3
PZ
7154unlock:
7155 mutex_unlock(&event->mmap_mutex);
7156
a4be7c27 7157out:
a4be7c27
PZ
7158 return ret;
7159}
7160
0793a61d 7161/**
cdd6c482 7162 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 7163 *
cdd6c482 7164 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 7165 * @pid: target pid
9f66a381 7166 * @cpu: target cpu
cdd6c482 7167 * @group_fd: group leader event fd
0793a61d 7168 */
cdd6c482
IM
7169SYSCALL_DEFINE5(perf_event_open,
7170 struct perf_event_attr __user *, attr_uptr,
2743a5b0 7171 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 7172{
b04243ef
PZ
7173 struct perf_event *group_leader = NULL, *output_event = NULL;
7174 struct perf_event *event, *sibling;
cdd6c482
IM
7175 struct perf_event_attr attr;
7176 struct perf_event_context *ctx;
7177 struct file *event_file = NULL;
2903ff01 7178 struct fd group = {NULL, 0};
38a81da2 7179 struct task_struct *task = NULL;
89a1e187 7180 struct pmu *pmu;
ea635c64 7181 int event_fd;
b04243ef 7182 int move_group = 0;
dc86cabe 7183 int err;
a21b0b35 7184 int f_flags = O_RDWR;
0793a61d 7185
2743a5b0 7186 /* for future expandability... */
e5d1367f 7187 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
7188 return -EINVAL;
7189
dc86cabe
IM
7190 err = perf_copy_attr(attr_uptr, &attr);
7191 if (err)
7192 return err;
eab656ae 7193
0764771d
PZ
7194 if (!attr.exclude_kernel) {
7195 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
7196 return -EACCES;
7197 }
7198
df58ab24 7199 if (attr.freq) {
cdd6c482 7200 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24 7201 return -EINVAL;
0819b2e3
PZ
7202 } else {
7203 if (attr.sample_period & (1ULL << 63))
7204 return -EINVAL;
df58ab24
PZ
7205 }
7206
e5d1367f
SE
7207 /*
7208 * In cgroup mode, the pid argument is used to pass the fd
7209 * opened to the cgroup directory in cgroupfs. The cpu argument
7210 * designates the cpu on which to monitor threads from that
7211 * cgroup.
7212 */
7213 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
7214 return -EINVAL;
7215
a21b0b35
YD
7216 if (flags & PERF_FLAG_FD_CLOEXEC)
7217 f_flags |= O_CLOEXEC;
7218
7219 event_fd = get_unused_fd_flags(f_flags);
ea635c64
AV
7220 if (event_fd < 0)
7221 return event_fd;
7222
ac9721f3 7223 if (group_fd != -1) {
2903ff01
AV
7224 err = perf_fget_light(group_fd, &group);
7225 if (err)
d14b12d7 7226 goto err_fd;
2903ff01 7227 group_leader = group.file->private_data;
ac9721f3
PZ
7228 if (flags & PERF_FLAG_FD_OUTPUT)
7229 output_event = group_leader;
7230 if (flags & PERF_FLAG_FD_NO_GROUP)
7231 group_leader = NULL;
7232 }
7233
e5d1367f 7234 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
7235 task = find_lively_task_by_vpid(pid);
7236 if (IS_ERR(task)) {
7237 err = PTR_ERR(task);
7238 goto err_group_fd;
7239 }
7240 }
7241
1f4ee503
PZ
7242 if (task && group_leader &&
7243 group_leader->attr.inherit != attr.inherit) {
7244 err = -EINVAL;
7245 goto err_task;
7246 }
7247
fbfc623f
YZ
7248 get_online_cpus();
7249
4dc0da86
AK
7250 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
7251 NULL, NULL);
d14b12d7
SE
7252 if (IS_ERR(event)) {
7253 err = PTR_ERR(event);
1f4ee503 7254 goto err_cpus;
d14b12d7
SE
7255 }
7256
e5d1367f
SE
7257 if (flags & PERF_FLAG_PID_CGROUP) {
7258 err = perf_cgroup_connect(pid, event, &attr, group_leader);
766d6c07
FW
7259 if (err) {
7260 __free_event(event);
1f4ee503 7261 goto err_cpus;
766d6c07 7262 }
e5d1367f
SE
7263 }
7264
53b25335
VW
7265 if (is_sampling_event(event)) {
7266 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
7267 err = -ENOTSUPP;
7268 goto err_alloc;
7269 }
7270 }
7271
766d6c07
FW
7272 account_event(event);
7273
89a1e187
PZ
7274 /*
7275 * Special case software events and allow them to be part of
7276 * any hardware group.
7277 */
7278 pmu = event->pmu;
b04243ef
PZ
7279
7280 if (group_leader &&
7281 (is_software_event(event) != is_software_event(group_leader))) {
7282 if (is_software_event(event)) {
7283 /*
7284 * If event and group_leader are not both a software
7285 * event, and event is, then group leader is not.
7286 *
7287 * Allow the addition of software events to !software
7288 * groups, this is safe because software events never
7289 * fail to schedule.
7290 */
7291 pmu = group_leader->pmu;
7292 } else if (is_software_event(group_leader) &&
7293 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
7294 /*
7295 * In case the group is a pure software group, and we
7296 * try to add a hardware event, move the whole group to
7297 * the hardware context.
7298 */
7299 move_group = 1;
7300 }
7301 }
89a1e187
PZ
7302
7303 /*
7304 * Get the target context (task or percpu):
7305 */
e2d37cd2 7306 ctx = find_get_context(pmu, task, event->cpu);
89a1e187
PZ
7307 if (IS_ERR(ctx)) {
7308 err = PTR_ERR(ctx);
c6be5a5c 7309 goto err_alloc;
89a1e187
PZ
7310 }
7311
fd1edb3a
PZ
7312 if (task) {
7313 put_task_struct(task);
7314 task = NULL;
7315 }
7316
ccff286d 7317 /*
cdd6c482 7318 * Look up the group leader (we will attach this event to it):
04289bb9 7319 */
ac9721f3 7320 if (group_leader) {
dc86cabe 7321 err = -EINVAL;
04289bb9 7322
04289bb9 7323 /*
ccff286d
IM
7324 * Do not allow a recursive hierarchy (this new sibling
7325 * becoming part of another group-sibling):
7326 */
7327 if (group_leader->group_leader != group_leader)
c3f00c70 7328 goto err_context;
ccff286d
IM
7329 /*
7330 * Do not allow to attach to a group in a different
7331 * task or CPU context:
04289bb9 7332 */
b04243ef
PZ
7333 if (move_group) {
7334 if (group_leader->ctx->type != ctx->type)
7335 goto err_context;
7336 } else {
7337 if (group_leader->ctx != ctx)
7338 goto err_context;
7339 }
7340
3b6f9e5c
PM
7341 /*
7342 * Only a group leader can be exclusive or pinned
7343 */
0d48696f 7344 if (attr.exclusive || attr.pinned)
c3f00c70 7345 goto err_context;
ac9721f3
PZ
7346 }
7347
7348 if (output_event) {
7349 err = perf_event_set_output(event, output_event);
7350 if (err)
c3f00c70 7351 goto err_context;
ac9721f3 7352 }
0793a61d 7353
a21b0b35
YD
7354 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
7355 f_flags);
ea635c64
AV
7356 if (IS_ERR(event_file)) {
7357 err = PTR_ERR(event_file);
c3f00c70 7358 goto err_context;
ea635c64 7359 }
9b51f66d 7360
b04243ef
PZ
7361 if (move_group) {
7362 struct perf_event_context *gctx = group_leader->ctx;
7363
7364 mutex_lock(&gctx->mutex);
46ce0fe9 7365 perf_remove_from_context(group_leader, false);
0231bb53
JO
7366
7367 /*
7368 * Removing from the context ends up with disabled
7369 * event. What we want here is event in the initial
7370 * startup state, ready to be add into new context.
7371 */
7372 perf_event__state_init(group_leader);
b04243ef
PZ
7373 list_for_each_entry(sibling, &group_leader->sibling_list,
7374 group_entry) {
46ce0fe9 7375 perf_remove_from_context(sibling, false);
0231bb53 7376 perf_event__state_init(sibling);
b04243ef
PZ
7377 put_ctx(gctx);
7378 }
7379 mutex_unlock(&gctx->mutex);
7380 put_ctx(gctx);
ea635c64 7381 }
9b51f66d 7382
ad3a37de 7383 WARN_ON_ONCE(ctx->parent_ctx);
d859e29f 7384 mutex_lock(&ctx->mutex);
b04243ef
PZ
7385
7386 if (move_group) {
0cda4c02 7387 synchronize_rcu();
e2d37cd2 7388 perf_install_in_context(ctx, group_leader, event->cpu);
b04243ef
PZ
7389 get_ctx(ctx);
7390 list_for_each_entry(sibling, &group_leader->sibling_list,
7391 group_entry) {
e2d37cd2 7392 perf_install_in_context(ctx, sibling, event->cpu);
b04243ef
PZ
7393 get_ctx(ctx);
7394 }
7395 }
7396
e2d37cd2 7397 perf_install_in_context(ctx, event, event->cpu);
fe4b04fa 7398 perf_unpin_context(ctx);
d859e29f 7399 mutex_unlock(&ctx->mutex);
9b51f66d 7400
fbfc623f
YZ
7401 put_online_cpus();
7402
cdd6c482 7403 event->owner = current;
8882135b 7404
cdd6c482
IM
7405 mutex_lock(&current->perf_event_mutex);
7406 list_add_tail(&event->owner_entry, &current->perf_event_list);
7407 mutex_unlock(&current->perf_event_mutex);
082ff5a2 7408
c320c7b7
ACM
7409 /*
7410 * Precalculate sample_data sizes
7411 */
7412 perf_event__header_size(event);
6844c09d 7413 perf_event__id_header_size(event);
c320c7b7 7414
8a49542c
PZ
7415 /*
7416 * Drop the reference on the group_event after placing the
7417 * new event on the sibling_list. This ensures destruction
7418 * of the group leader will find the pointer to itself in
7419 * perf_group_detach().
7420 */
2903ff01 7421 fdput(group);
ea635c64
AV
7422 fd_install(event_fd, event_file);
7423 return event_fd;
0793a61d 7424
c3f00c70 7425err_context:
fe4b04fa 7426 perf_unpin_context(ctx);
ea635c64 7427 put_ctx(ctx);
c6be5a5c 7428err_alloc:
ea635c64 7429 free_event(event);
1f4ee503 7430err_cpus:
fbfc623f 7431 put_online_cpus();
1f4ee503 7432err_task:
e7d0bc04
PZ
7433 if (task)
7434 put_task_struct(task);
89a1e187 7435err_group_fd:
2903ff01 7436 fdput(group);
ea635c64
AV
7437err_fd:
7438 put_unused_fd(event_fd);
dc86cabe 7439 return err;
0793a61d
TG
7440}
7441
fb0459d7
AV
7442/**
7443 * perf_event_create_kernel_counter
7444 *
7445 * @attr: attributes of the counter to create
7446 * @cpu: cpu in which the counter is bound
38a81da2 7447 * @task: task to profile (NULL for percpu)
fb0459d7
AV
7448 */
7449struct perf_event *
7450perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 7451 struct task_struct *task,
4dc0da86
AK
7452 perf_overflow_handler_t overflow_handler,
7453 void *context)
fb0459d7 7454{
fb0459d7 7455 struct perf_event_context *ctx;
c3f00c70 7456 struct perf_event *event;
fb0459d7 7457 int err;
d859e29f 7458
fb0459d7
AV
7459 /*
7460 * Get the target context (task or percpu):
7461 */
d859e29f 7462
4dc0da86
AK
7463 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
7464 overflow_handler, context);
c3f00c70
PZ
7465 if (IS_ERR(event)) {
7466 err = PTR_ERR(event);
7467 goto err;
7468 }
d859e29f 7469
f8697762
JO
7470 /* Mark owner so we could distinguish it from user events. */
7471 event->owner = EVENT_OWNER_KERNEL;
7472
766d6c07
FW
7473 account_event(event);
7474
38a81da2 7475 ctx = find_get_context(event->pmu, task, cpu);
c6567f64
FW
7476 if (IS_ERR(ctx)) {
7477 err = PTR_ERR(ctx);
c3f00c70 7478 goto err_free;
d859e29f 7479 }
fb0459d7 7480
fb0459d7
AV
7481 WARN_ON_ONCE(ctx->parent_ctx);
7482 mutex_lock(&ctx->mutex);
7483 perf_install_in_context(ctx, event, cpu);
fe4b04fa 7484 perf_unpin_context(ctx);
fb0459d7
AV
7485 mutex_unlock(&ctx->mutex);
7486
fb0459d7
AV
7487 return event;
7488
c3f00c70
PZ
7489err_free:
7490 free_event(event);
7491err:
c6567f64 7492 return ERR_PTR(err);
9b51f66d 7493}
fb0459d7 7494EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 7495
0cda4c02
YZ
7496void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7497{
7498 struct perf_event_context *src_ctx;
7499 struct perf_event_context *dst_ctx;
7500 struct perf_event *event, *tmp;
7501 LIST_HEAD(events);
7502
7503 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
7504 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
7505
7506 mutex_lock(&src_ctx->mutex);
7507 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7508 event_entry) {
46ce0fe9 7509 perf_remove_from_context(event, false);
9a545de0 7510 unaccount_event_cpu(event, src_cpu);
0cda4c02 7511 put_ctx(src_ctx);
9886167d 7512 list_add(&event->migrate_entry, &events);
0cda4c02
YZ
7513 }
7514 mutex_unlock(&src_ctx->mutex);
7515
7516 synchronize_rcu();
7517
7518 mutex_lock(&dst_ctx->mutex);
9886167d
PZ
7519 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7520 list_del(&event->migrate_entry);
0cda4c02
YZ
7521 if (event->state >= PERF_EVENT_STATE_OFF)
7522 event->state = PERF_EVENT_STATE_INACTIVE;
9a545de0 7523 account_event_cpu(event, dst_cpu);
0cda4c02
YZ
7524 perf_install_in_context(dst_ctx, event, dst_cpu);
7525 get_ctx(dst_ctx);
7526 }
7527 mutex_unlock(&dst_ctx->mutex);
7528}
7529EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
7530
cdd6c482 7531static void sync_child_event(struct perf_event *child_event,
38b200d6 7532 struct task_struct *child)
d859e29f 7533{
cdd6c482 7534 struct perf_event *parent_event = child_event->parent;
8bc20959 7535 u64 child_val;
d859e29f 7536
cdd6c482
IM
7537 if (child_event->attr.inherit_stat)
7538 perf_event_read_event(child_event, child);
38b200d6 7539
b5e58793 7540 child_val = perf_event_count(child_event);
d859e29f
PM
7541
7542 /*
7543 * Add back the child's count to the parent's count:
7544 */
a6e6dea6 7545 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
7546 atomic64_add(child_event->total_time_enabled,
7547 &parent_event->child_total_time_enabled);
7548 atomic64_add(child_event->total_time_running,
7549 &parent_event->child_total_time_running);
d859e29f
PM
7550
7551 /*
cdd6c482 7552 * Remove this event from the parent's list
d859e29f 7553 */
cdd6c482
IM
7554 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7555 mutex_lock(&parent_event->child_mutex);
7556 list_del_init(&child_event->child_list);
7557 mutex_unlock(&parent_event->child_mutex);
d859e29f
PM
7558
7559 /*
cdd6c482 7560 * Release the parent event, if this was the last
d859e29f
PM
7561 * reference to it.
7562 */
a6fa941d 7563 put_event(parent_event);
d859e29f
PM
7564}
7565
9b51f66d 7566static void
cdd6c482
IM
7567__perf_event_exit_task(struct perf_event *child_event,
7568 struct perf_event_context *child_ctx,
38b200d6 7569 struct task_struct *child)
9b51f66d 7570{
1903d50c
PZ
7571 /*
7572 * Do not destroy the 'original' grouping; because of the context
7573 * switch optimization the original events could've ended up in a
7574 * random child task.
7575 *
7576 * If we were to destroy the original group, all group related
7577 * operations would cease to function properly after this random
7578 * child dies.
7579 *
7580 * Do destroy all inherited groups, we don't care about those
7581 * and being thorough is better.
7582 */
7583 perf_remove_from_context(child_event, !!child_event->parent);
0cc0c027 7584
9b51f66d 7585 /*
38b435b1 7586 * It can happen that the parent exits first, and has events
9b51f66d 7587 * that are still around due to the child reference. These
38b435b1 7588 * events need to be zapped.
9b51f66d 7589 */
38b435b1 7590 if (child_event->parent) {
cdd6c482
IM
7591 sync_child_event(child_event, child);
7592 free_event(child_event);
4bcf349a 7593 }
9b51f66d
IM
7594}
7595
8dc85d54 7596static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 7597{
ebf905fc 7598 struct perf_event *child_event, *next;
4a1c0f26 7599 struct perf_event_context *child_ctx, *parent_ctx;
a63eaf34 7600 unsigned long flags;
9b51f66d 7601
8dc85d54 7602 if (likely(!child->perf_event_ctxp[ctxn])) {
cdd6c482 7603 perf_event_task(child, NULL, 0);
9b51f66d 7604 return;
9f498cc5 7605 }
9b51f66d 7606
a63eaf34 7607 local_irq_save(flags);
ad3a37de
PM
7608 /*
7609 * We can't reschedule here because interrupts are disabled,
7610 * and either child is current or it is a task that can't be
7611 * scheduled, so we are now safe from rescheduling changing
7612 * our context.
7613 */
806839b2 7614 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
c93f7669
PM
7615
7616 /*
7617 * Take the context lock here so that if find_get_context is
cdd6c482 7618 * reading child->perf_event_ctxp, we wait until it has
c93f7669
PM
7619 * incremented the context's refcount before we do put_ctx below.
7620 */
e625cce1 7621 raw_spin_lock(&child_ctx->lock);
04dc2dbb 7622 task_ctx_sched_out(child_ctx);
8dc85d54 7623 child->perf_event_ctxp[ctxn] = NULL;
4a1c0f26
PZ
7624
7625 /*
7626 * In order to avoid freeing: child_ctx->parent_ctx->task
7627 * under perf_event_context::lock, grab another reference.
7628 */
7629 parent_ctx = child_ctx->parent_ctx;
7630 if (parent_ctx)
7631 get_ctx(parent_ctx);
7632
71a851b4
PZ
7633 /*
7634 * If this context is a clone; unclone it so it can't get
7635 * swapped to another process while we're removing all
cdd6c482 7636 * the events from it.
71a851b4
PZ
7637 */
7638 unclone_ctx(child_ctx);
5e942bb3 7639 update_context_time(child_ctx);
e625cce1 7640 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
9f498cc5 7641
4a1c0f26
PZ
7642 /*
7643 * Now that we no longer hold perf_event_context::lock, drop
7644 * our extra child_ctx->parent_ctx reference.
7645 */
7646 if (parent_ctx)
7647 put_ctx(parent_ctx);
7648
9f498cc5 7649 /*
cdd6c482
IM
7650 * Report the task dead after unscheduling the events so that we
7651 * won't get any samples after PERF_RECORD_EXIT. We can however still
7652 * get a few PERF_RECORD_READ events.
9f498cc5 7653 */
cdd6c482 7654 perf_event_task(child, child_ctx, 0);
a63eaf34 7655
66fff224
PZ
7656 /*
7657 * We can recurse on the same lock type through:
7658 *
cdd6c482
IM
7659 * __perf_event_exit_task()
7660 * sync_child_event()
a6fa941d
AV
7661 * put_event()
7662 * mutex_lock(&ctx->mutex)
66fff224
PZ
7663 *
7664 * But since its the parent context it won't be the same instance.
7665 */
a0507c84 7666 mutex_lock(&child_ctx->mutex);
a63eaf34 7667
ebf905fc 7668 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
cdd6c482 7669 __perf_event_exit_task(child_event, child_ctx, child);
8bc20959 7670
a63eaf34
PM
7671 mutex_unlock(&child_ctx->mutex);
7672
7673 put_ctx(child_ctx);
9b51f66d
IM
7674}
7675
8dc85d54
PZ
7676/*
7677 * When a child task exits, feed back event values to parent events.
7678 */
7679void perf_event_exit_task(struct task_struct *child)
7680{
8882135b 7681 struct perf_event *event, *tmp;
8dc85d54
PZ
7682 int ctxn;
7683
8882135b
PZ
7684 mutex_lock(&child->perf_event_mutex);
7685 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
7686 owner_entry) {
7687 list_del_init(&event->owner_entry);
7688
7689 /*
7690 * Ensure the list deletion is visible before we clear
7691 * the owner, closes a race against perf_release() where
7692 * we need to serialize on the owner->perf_event_mutex.
7693 */
7694 smp_wmb();
7695 event->owner = NULL;
7696 }
7697 mutex_unlock(&child->perf_event_mutex);
7698
8dc85d54
PZ
7699 for_each_task_context_nr(ctxn)
7700 perf_event_exit_task_context(child, ctxn);
7701}
7702
889ff015
FW
7703static void perf_free_event(struct perf_event *event,
7704 struct perf_event_context *ctx)
7705{
7706 struct perf_event *parent = event->parent;
7707
7708 if (WARN_ON_ONCE(!parent))
7709 return;
7710
7711 mutex_lock(&parent->child_mutex);
7712 list_del_init(&event->child_list);
7713 mutex_unlock(&parent->child_mutex);
7714
a6fa941d 7715 put_event(parent);
889ff015 7716
8a49542c 7717 perf_group_detach(event);
889ff015
FW
7718 list_del_event(event, ctx);
7719 free_event(event);
7720}
7721
bbbee908
PZ
7722/*
7723 * free an unexposed, unused context as created by inheritance by
8dc85d54 7724 * perf_event_init_task below, used by fork() in case of fail.
bbbee908 7725 */
cdd6c482 7726void perf_event_free_task(struct task_struct *task)
bbbee908 7727{
8dc85d54 7728 struct perf_event_context *ctx;
cdd6c482 7729 struct perf_event *event, *tmp;
8dc85d54 7730 int ctxn;
bbbee908 7731
8dc85d54
PZ
7732 for_each_task_context_nr(ctxn) {
7733 ctx = task->perf_event_ctxp[ctxn];
7734 if (!ctx)
7735 continue;
bbbee908 7736
8dc85d54 7737 mutex_lock(&ctx->mutex);
bbbee908 7738again:
8dc85d54
PZ
7739 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
7740 group_entry)
7741 perf_free_event(event, ctx);
bbbee908 7742
8dc85d54
PZ
7743 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
7744 group_entry)
7745 perf_free_event(event, ctx);
bbbee908 7746
8dc85d54
PZ
7747 if (!list_empty(&ctx->pinned_groups) ||
7748 !list_empty(&ctx->flexible_groups))
7749 goto again;
bbbee908 7750
8dc85d54 7751 mutex_unlock(&ctx->mutex);
bbbee908 7752
8dc85d54
PZ
7753 put_ctx(ctx);
7754 }
889ff015
FW
7755}
7756
4e231c79
PZ
7757void perf_event_delayed_put(struct task_struct *task)
7758{
7759 int ctxn;
7760
7761 for_each_task_context_nr(ctxn)
7762 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
7763}
7764
97dee4f3
PZ
7765/*
7766 * inherit a event from parent task to child task:
7767 */
7768static struct perf_event *
7769inherit_event(struct perf_event *parent_event,
7770 struct task_struct *parent,
7771 struct perf_event_context *parent_ctx,
7772 struct task_struct *child,
7773 struct perf_event *group_leader,
7774 struct perf_event_context *child_ctx)
7775{
7776 struct perf_event *child_event;
cee010ec 7777 unsigned long flags;
97dee4f3
PZ
7778
7779 /*
7780 * Instead of creating recursive hierarchies of events,
7781 * we link inherited events back to the original parent,
7782 * which has a filp for sure, which we use as the reference
7783 * count:
7784 */
7785 if (parent_event->parent)
7786 parent_event = parent_event->parent;
7787
7788 child_event = perf_event_alloc(&parent_event->attr,
7789 parent_event->cpu,
d580ff86 7790 child,
97dee4f3 7791 group_leader, parent_event,
4dc0da86 7792 NULL, NULL);
97dee4f3
PZ
7793 if (IS_ERR(child_event))
7794 return child_event;
a6fa941d 7795
fadfe7be
JO
7796 if (is_orphaned_event(parent_event) ||
7797 !atomic_long_inc_not_zero(&parent_event->refcount)) {
a6fa941d
AV
7798 free_event(child_event);
7799 return NULL;
7800 }
7801
97dee4f3
PZ
7802 get_ctx(child_ctx);
7803
7804 /*
7805 * Make the child state follow the state of the parent event,
7806 * not its attr.disabled bit. We hold the parent's mutex,
7807 * so we won't race with perf_event_{en, dis}able_family.
7808 */
7809 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
7810 child_event->state = PERF_EVENT_STATE_INACTIVE;
7811 else
7812 child_event->state = PERF_EVENT_STATE_OFF;
7813
7814 if (parent_event->attr.freq) {
7815 u64 sample_period = parent_event->hw.sample_period;
7816 struct hw_perf_event *hwc = &child_event->hw;
7817
7818 hwc->sample_period = sample_period;
7819 hwc->last_period = sample_period;
7820
7821 local64_set(&hwc->period_left, sample_period);
7822 }
7823
7824 child_event->ctx = child_ctx;
7825 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
7826 child_event->overflow_handler_context
7827 = parent_event->overflow_handler_context;
97dee4f3 7828
614b6780
TG
7829 /*
7830 * Precalculate sample_data sizes
7831 */
7832 perf_event__header_size(child_event);
6844c09d 7833 perf_event__id_header_size(child_event);
614b6780 7834
97dee4f3
PZ
7835 /*
7836 * Link it up in the child's context:
7837 */
cee010ec 7838 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 7839 add_event_to_ctx(child_event, child_ctx);
cee010ec 7840 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 7841
97dee4f3
PZ
7842 /*
7843 * Link this into the parent event's child list
7844 */
7845 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7846 mutex_lock(&parent_event->child_mutex);
7847 list_add_tail(&child_event->child_list, &parent_event->child_list);
7848 mutex_unlock(&parent_event->child_mutex);
7849
7850 return child_event;
7851}
7852
7853static int inherit_group(struct perf_event *parent_event,
7854 struct task_struct *parent,
7855 struct perf_event_context *parent_ctx,
7856 struct task_struct *child,
7857 struct perf_event_context *child_ctx)
7858{
7859 struct perf_event *leader;
7860 struct perf_event *sub;
7861 struct perf_event *child_ctr;
7862
7863 leader = inherit_event(parent_event, parent, parent_ctx,
7864 child, NULL, child_ctx);
7865 if (IS_ERR(leader))
7866 return PTR_ERR(leader);
7867 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
7868 child_ctr = inherit_event(sub, parent, parent_ctx,
7869 child, leader, child_ctx);
7870 if (IS_ERR(child_ctr))
7871 return PTR_ERR(child_ctr);
7872 }
7873 return 0;
889ff015
FW
7874}
7875
7876static int
7877inherit_task_group(struct perf_event *event, struct task_struct *parent,
7878 struct perf_event_context *parent_ctx,
8dc85d54 7879 struct task_struct *child, int ctxn,
889ff015
FW
7880 int *inherited_all)
7881{
7882 int ret;
8dc85d54 7883 struct perf_event_context *child_ctx;
889ff015
FW
7884
7885 if (!event->attr.inherit) {
7886 *inherited_all = 0;
7887 return 0;
bbbee908
PZ
7888 }
7889
fe4b04fa 7890 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
7891 if (!child_ctx) {
7892 /*
7893 * This is executed from the parent task context, so
7894 * inherit events that have been marked for cloning.
7895 * First allocate and initialize a context for the
7896 * child.
7897 */
bbbee908 7898
734df5ab 7899 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
889ff015
FW
7900 if (!child_ctx)
7901 return -ENOMEM;
bbbee908 7902
8dc85d54 7903 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
7904 }
7905
7906 ret = inherit_group(event, parent, parent_ctx,
7907 child, child_ctx);
7908
7909 if (ret)
7910 *inherited_all = 0;
7911
7912 return ret;
bbbee908
PZ
7913}
7914
9b51f66d 7915/*
cdd6c482 7916 * Initialize the perf_event context in task_struct
9b51f66d 7917 */
985c8dcb 7918static int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 7919{
889ff015 7920 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
7921 struct perf_event_context *cloned_ctx;
7922 struct perf_event *event;
9b51f66d 7923 struct task_struct *parent = current;
564c2b21 7924 int inherited_all = 1;
dddd3379 7925 unsigned long flags;
6ab423e0 7926 int ret = 0;
9b51f66d 7927
8dc85d54 7928 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
7929 return 0;
7930
ad3a37de 7931 /*
25346b93
PM
7932 * If the parent's context is a clone, pin it so it won't get
7933 * swapped under us.
ad3a37de 7934 */
8dc85d54 7935 parent_ctx = perf_pin_task_context(parent, ctxn);
ffb4ef21
PZ
7936 if (!parent_ctx)
7937 return 0;
25346b93 7938
ad3a37de
PM
7939 /*
7940 * No need to check if parent_ctx != NULL here; since we saw
7941 * it non-NULL earlier, the only reason for it to become NULL
7942 * is if we exit, and since we're currently in the middle of
7943 * a fork we can't be exiting at the same time.
7944 */
ad3a37de 7945
9b51f66d
IM
7946 /*
7947 * Lock the parent list. No need to lock the child - not PID
7948 * hashed yet and not running, so nobody can access it.
7949 */
d859e29f 7950 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
7951
7952 /*
7953 * We dont have to disable NMIs - we are only looking at
7954 * the list, not manipulating it:
7955 */
889ff015 7956 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d54
PZ
7957 ret = inherit_task_group(event, parent, parent_ctx,
7958 child, ctxn, &inherited_all);
889ff015
FW
7959 if (ret)
7960 break;
7961 }
b93f7978 7962
dddd3379
TG
7963 /*
7964 * We can't hold ctx->lock when iterating the ->flexible_group list due
7965 * to allocations, but we need to prevent rotation because
7966 * rotate_ctx() will change the list from interrupt context.
7967 */
7968 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7969 parent_ctx->rotate_disable = 1;
7970 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7971
889ff015 7972 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d54
PZ
7973 ret = inherit_task_group(event, parent, parent_ctx,
7974 child, ctxn, &inherited_all);
889ff015 7975 if (ret)
9b51f66d 7976 break;
564c2b21
PM
7977 }
7978
dddd3379
TG
7979 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7980 parent_ctx->rotate_disable = 0;
dddd3379 7981
8dc85d54 7982 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 7983
05cbaa28 7984 if (child_ctx && inherited_all) {
564c2b21
PM
7985 /*
7986 * Mark the child context as a clone of the parent
7987 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
7988 *
7989 * Note that if the parent is a clone, the holding of
7990 * parent_ctx->lock avoids it from being uncloned.
564c2b21 7991 */
c5ed5145 7992 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
7993 if (cloned_ctx) {
7994 child_ctx->parent_ctx = cloned_ctx;
25346b93 7995 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
7996 } else {
7997 child_ctx->parent_ctx = parent_ctx;
7998 child_ctx->parent_gen = parent_ctx->generation;
7999 }
8000 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
8001 }
8002
c5ed5145 8003 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29f 8004 mutex_unlock(&parent_ctx->mutex);
6ab423e0 8005
25346b93 8006 perf_unpin_context(parent_ctx);
fe4b04fa 8007 put_ctx(parent_ctx);
ad3a37de 8008
6ab423e0 8009 return ret;
9b51f66d
IM
8010}
8011
8dc85d54
PZ
8012/*
8013 * Initialize the perf_event context in task_struct
8014 */
8015int perf_event_init_task(struct task_struct *child)
8016{
8017 int ctxn, ret;
8018
8550d7cb
ON
8019 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
8020 mutex_init(&child->perf_event_mutex);
8021 INIT_LIST_HEAD(&child->perf_event_list);
8022
8dc85d54
PZ
8023 for_each_task_context_nr(ctxn) {
8024 ret = perf_event_init_context(child, ctxn);
8025 if (ret)
8026 return ret;
8027 }
8028
8029 return 0;
8030}
8031
220b140b
PM
8032static void __init perf_event_init_all_cpus(void)
8033{
b28ab83c 8034 struct swevent_htable *swhash;
220b140b 8035 int cpu;
220b140b
PM
8036
8037 for_each_possible_cpu(cpu) {
b28ab83c
PZ
8038 swhash = &per_cpu(swevent_htable, cpu);
8039 mutex_init(&swhash->hlist_mutex);
e9d2b064 8040 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
220b140b
PM
8041 }
8042}
8043
0db0628d 8044static void perf_event_init_cpu(int cpu)
0793a61d 8045{
108b02cf 8046 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 8047
b28ab83c 8048 mutex_lock(&swhash->hlist_mutex);
39af6b16 8049 swhash->online = true;
4536e4d1 8050 if (swhash->hlist_refcount > 0) {
76e1d904
FW
8051 struct swevent_hlist *hlist;
8052
b28ab83c
PZ
8053 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
8054 WARN_ON(!hlist);
8055 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 8056 }
b28ab83c 8057 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
8058}
8059
c277443c 8060#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
e9d2b064 8061static void perf_pmu_rotate_stop(struct pmu *pmu)
0793a61d 8062{
e9d2b064
PZ
8063 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
8064
8065 WARN_ON(!irqs_disabled());
8066
8067 list_del_init(&cpuctx->rotation_list);
8068}
8069
108b02cf 8070static void __perf_event_exit_context(void *__info)
0793a61d 8071{
46ce0fe9 8072 struct remove_event re = { .detach_group = false };
108b02cf 8073 struct perf_event_context *ctx = __info;
0793a61d 8074
108b02cf 8075 perf_pmu_rotate_stop(ctx->pmu);
b5ab4cd5 8076
e3703f8c 8077 rcu_read_lock();
46ce0fe9
PZ
8078 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
8079 __perf_remove_from_context(&re);
e3703f8c 8080 rcu_read_unlock();
0793a61d 8081}
108b02cf
PZ
8082
8083static void perf_event_exit_cpu_context(int cpu)
8084{
8085 struct perf_event_context *ctx;
8086 struct pmu *pmu;
8087 int idx;
8088
8089 idx = srcu_read_lock(&pmus_srcu);
8090 list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c 8091 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cf
PZ
8092
8093 mutex_lock(&ctx->mutex);
8094 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
8095 mutex_unlock(&ctx->mutex);
8096 }
8097 srcu_read_unlock(&pmus_srcu, idx);
108b02cf
PZ
8098}
8099
cdd6c482 8100static void perf_event_exit_cpu(int cpu)
0793a61d 8101{
b28ab83c 8102 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
d859e29f 8103
e3703f8c
PZ
8104 perf_event_exit_cpu_context(cpu);
8105
b28ab83c 8106 mutex_lock(&swhash->hlist_mutex);
39af6b16 8107 swhash->online = false;
b28ab83c
PZ
8108 swevent_hlist_release(swhash);
8109 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
8110}
8111#else
cdd6c482 8112static inline void perf_event_exit_cpu(int cpu) { }
0793a61d
TG
8113#endif
8114
c277443c
PZ
8115static int
8116perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
8117{
8118 int cpu;
8119
8120 for_each_online_cpu(cpu)
8121 perf_event_exit_cpu(cpu);
8122
8123 return NOTIFY_OK;
8124}
8125
8126/*
8127 * Run the perf reboot notifier at the very last possible moment so that
8128 * the generic watchdog code runs as long as possible.
8129 */
8130static struct notifier_block perf_reboot_notifier = {
8131 .notifier_call = perf_reboot,
8132 .priority = INT_MIN,
8133};
8134
0db0628d 8135static int
0793a61d
TG
8136perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
8137{
8138 unsigned int cpu = (long)hcpu;
8139
4536e4d1 8140 switch (action & ~CPU_TASKS_FROZEN) {
0793a61d
TG
8141
8142 case CPU_UP_PREPARE:
5e11637e 8143 case CPU_DOWN_FAILED:
cdd6c482 8144 perf_event_init_cpu(cpu);
0793a61d
TG
8145 break;
8146
5e11637e 8147 case CPU_UP_CANCELED:
0793a61d 8148 case CPU_DOWN_PREPARE:
cdd6c482 8149 perf_event_exit_cpu(cpu);
0793a61d 8150 break;
0793a61d
TG
8151 default:
8152 break;
8153 }
8154
8155 return NOTIFY_OK;
8156}
8157
cdd6c482 8158void __init perf_event_init(void)
0793a61d 8159{
3c502e7a
JW
8160 int ret;
8161
2e80a82a
PZ
8162 idr_init(&pmu_idr);
8163
220b140b 8164 perf_event_init_all_cpus();
b0a873eb 8165 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
8166 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
8167 perf_pmu_register(&perf_cpu_clock, NULL, -1);
8168 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb
PZ
8169 perf_tp_register();
8170 perf_cpu_notifier(perf_cpu_notify);
c277443c 8171 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
8172
8173 ret = init_hw_breakpoint();
8174 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520
GN
8175
8176 /* do not patch jump label more than once per second */
8177 jump_label_rate_limit(&perf_sched_events, HZ);
b01c3a00
JO
8178
8179 /*
8180 * Build time assertion that we keep the data_head at the intended
8181 * location. IOW, validation we got the __reserved[] size right.
8182 */
8183 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
8184 != 1024);
0793a61d 8185}
abe43400
PZ
8186
8187static int __init perf_event_sysfs_init(void)
8188{
8189 struct pmu *pmu;
8190 int ret;
8191
8192 mutex_lock(&pmus_lock);
8193
8194 ret = bus_register(&pmu_bus);
8195 if (ret)
8196 goto unlock;
8197
8198 list_for_each_entry(pmu, &pmus, entry) {
8199 if (!pmu->name || pmu->type < 0)
8200 continue;
8201
8202 ret = pmu_dev_alloc(pmu);
8203 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
8204 }
8205 pmu_bus_running = 1;
8206 ret = 0;
8207
8208unlock:
8209 mutex_unlock(&pmus_lock);
8210
8211 return ret;
8212}
8213device_initcall(perf_event_sysfs_init);
e5d1367f
SE
8214
8215#ifdef CONFIG_CGROUP_PERF
eb95419b
TH
8216static struct cgroup_subsys_state *
8217perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
e5d1367f
SE
8218{
8219 struct perf_cgroup *jc;
e5d1367f 8220
1b15d055 8221 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
8222 if (!jc)
8223 return ERR_PTR(-ENOMEM);
8224
e5d1367f
SE
8225 jc->info = alloc_percpu(struct perf_cgroup_info);
8226 if (!jc->info) {
8227 kfree(jc);
8228 return ERR_PTR(-ENOMEM);
8229 }
8230
e5d1367f
SE
8231 return &jc->css;
8232}
8233
eb95419b 8234static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
e5d1367f 8235{
eb95419b
TH
8236 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
8237
e5d1367f
SE
8238 free_percpu(jc->info);
8239 kfree(jc);
8240}
8241
8242static int __perf_cgroup_move(void *info)
8243{
8244 struct task_struct *task = info;
8245 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
8246 return 0;
8247}
8248
eb95419b
TH
8249static void perf_cgroup_attach(struct cgroup_subsys_state *css,
8250 struct cgroup_taskset *tset)
e5d1367f 8251{
bb9d97b6
TH
8252 struct task_struct *task;
8253
924f0d9a 8254 cgroup_taskset_for_each(task, tset)
bb9d97b6 8255 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
8256}
8257
eb95419b
TH
8258static void perf_cgroup_exit(struct cgroup_subsys_state *css,
8259 struct cgroup_subsys_state *old_css,
761b3ef5 8260 struct task_struct *task)
e5d1367f
SE
8261{
8262 /*
8263 * cgroup_exit() is called in the copy_process() failure path.
8264 * Ignore this case since the task hasn't ran yet, this avoids
8265 * trying to poke a half freed task state from generic code.
8266 */
8267 if (!(task->flags & PF_EXITING))
8268 return;
8269
bb9d97b6 8270 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
8271}
8272
073219e9 8273struct cgroup_subsys perf_event_cgrp_subsys = {
92fb9748
TH
8274 .css_alloc = perf_cgroup_css_alloc,
8275 .css_free = perf_cgroup_css_free,
e7e7ee2e 8276 .exit = perf_cgroup_exit,
bb9d97b6 8277 .attach = perf_cgroup_attach,
e5d1367f
SE
8278};
8279#endif /* CONFIG_CGROUP_PERF */