cgroup: use release_agent_path_lock in cgroup_release_agent_show()
[linux-2.6-block.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e
IM
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
12351ef8 21#include <linux/tick.h>
0793a61d 22#include <linux/sysfs.h>
22a4f650 23#include <linux/dcache.h>
0793a61d 24#include <linux/percpu.h>
22a4f650 25#include <linux/ptrace.h>
c277443c 26#include <linux/reboot.h>
b9cacc7b 27#include <linux/vmstat.h>
abe43400 28#include <linux/device.h>
6e5fdeed 29#include <linux/export.h>
906010b2 30#include <linux/vmalloc.h>
b9cacc7b
PZ
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
0793a61d
TG
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
aa9c4c0f 36#include <linux/kernel_stat.h>
cdd6c482 37#include <linux/perf_event.h>
6fb2915d 38#include <linux/ftrace_event.h>
3c502e7a 39#include <linux/hw_breakpoint.h>
c5ebcedb 40#include <linux/mm_types.h>
877c6856 41#include <linux/cgroup.h>
0793a61d 42
76369139
FW
43#include "internal.h"
44
4e193bd4
TB
45#include <asm/irq_regs.h>
46
fe4b04fa 47struct remote_function_call {
e7e7ee2e
IM
48 struct task_struct *p;
49 int (*func)(void *info);
50 void *info;
51 int ret;
fe4b04fa
PZ
52};
53
54static void remote_function(void *data)
55{
56 struct remote_function_call *tfc = data;
57 struct task_struct *p = tfc->p;
58
59 if (p) {
60 tfc->ret = -EAGAIN;
61 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
62 return;
63 }
64
65 tfc->ret = tfc->func(tfc->info);
66}
67
68/**
69 * task_function_call - call a function on the cpu on which a task runs
70 * @p: the task to evaluate
71 * @func: the function to be called
72 * @info: the function call argument
73 *
74 * Calls the function @func when the task is currently running. This might
75 * be on the current CPU, which just calls the function directly
76 *
77 * returns: @func return value, or
78 * -ESRCH - when the process isn't running
79 * -EAGAIN - when the process moved away
80 */
81static int
82task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
83{
84 struct remote_function_call data = {
e7e7ee2e
IM
85 .p = p,
86 .func = func,
87 .info = info,
88 .ret = -ESRCH, /* No such (running) process */
fe4b04fa
PZ
89 };
90
91 if (task_curr(p))
92 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
93
94 return data.ret;
95}
96
97/**
98 * cpu_function_call - call a function on the cpu
99 * @func: the function to be called
100 * @info: the function call argument
101 *
102 * Calls the function @func on the remote cpu.
103 *
104 * returns: @func return value or -ENXIO when the cpu is offline
105 */
106static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
107{
108 struct remote_function_call data = {
e7e7ee2e
IM
109 .p = NULL,
110 .func = func,
111 .info = info,
112 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
113 };
114
115 smp_call_function_single(cpu, remote_function, &data, 1);
116
117 return data.ret;
118}
119
e5d1367f
SE
120#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
121 PERF_FLAG_FD_OUTPUT |\
a21b0b35
YD
122 PERF_FLAG_PID_CGROUP |\
123 PERF_FLAG_FD_CLOEXEC)
e5d1367f 124
bce38cd5
SE
125/*
126 * branch priv levels that need permission checks
127 */
128#define PERF_SAMPLE_BRANCH_PERM_PLM \
129 (PERF_SAMPLE_BRANCH_KERNEL |\
130 PERF_SAMPLE_BRANCH_HV)
131
0b3fcf17
SE
132enum event_type_t {
133 EVENT_FLEXIBLE = 0x1,
134 EVENT_PINNED = 0x2,
135 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
136};
137
e5d1367f
SE
138/*
139 * perf_sched_events : >0 events exist
140 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
141 */
c5905afb 142struct static_key_deferred perf_sched_events __read_mostly;
e5d1367f 143static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
d010b332 144static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
e5d1367f 145
cdd6c482
IM
146static atomic_t nr_mmap_events __read_mostly;
147static atomic_t nr_comm_events __read_mostly;
148static atomic_t nr_task_events __read_mostly;
948b26b6 149static atomic_t nr_freq_events __read_mostly;
9ee318a7 150
108b02cf
PZ
151static LIST_HEAD(pmus);
152static DEFINE_MUTEX(pmus_lock);
153static struct srcu_struct pmus_srcu;
154
0764771d 155/*
cdd6c482 156 * perf event paranoia level:
0fbdea19
IM
157 * -1 - not paranoid at all
158 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 159 * 1 - disallow cpu events for unpriv
0fbdea19 160 * 2 - disallow kernel profiling for unpriv
0764771d 161 */
cdd6c482 162int sysctl_perf_event_paranoid __read_mostly = 1;
0764771d 163
20443384
FW
164/* Minimum for 512 kiB + 1 user control page */
165int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
166
167/*
cdd6c482 168 * max perf event sample rate
df58ab24 169 */
14c63f17
DH
170#define DEFAULT_MAX_SAMPLE_RATE 100000
171#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
172#define DEFAULT_CPU_TIME_MAX_PERCENT 25
173
174int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
175
176static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
177static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
178
d9494cb4
PZ
179static int perf_sample_allowed_ns __read_mostly =
180 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
14c63f17
DH
181
182void update_perf_cpu_limits(void)
183{
184 u64 tmp = perf_sample_period_ns;
185
186 tmp *= sysctl_perf_cpu_time_max_percent;
e5302920 187 do_div(tmp, 100);
d9494cb4 188 ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
14c63f17 189}
163ec435 190
9e630205
SE
191static int perf_rotate_context(struct perf_cpu_context *cpuctx);
192
163ec435
PZ
193int perf_proc_update_handler(struct ctl_table *table, int write,
194 void __user *buffer, size_t *lenp,
195 loff_t *ppos)
196{
723478c8 197 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
163ec435
PZ
198
199 if (ret || !write)
200 return ret;
201
202 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
14c63f17
DH
203 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
204 update_perf_cpu_limits();
205
206 return 0;
207}
208
209int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
210
211int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
212 void __user *buffer, size_t *lenp,
213 loff_t *ppos)
214{
215 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
216
217 if (ret || !write)
218 return ret;
219
220 update_perf_cpu_limits();
163ec435
PZ
221
222 return 0;
223}
1ccd1549 224
14c63f17
DH
225/*
226 * perf samples are done in some very critical code paths (NMIs).
227 * If they take too much CPU time, the system can lock up and not
228 * get any real work done. This will drop the sample rate when
229 * we detect that events are taking too long.
230 */
231#define NR_ACCUMULATED_SAMPLES 128
d9494cb4 232static DEFINE_PER_CPU(u64, running_sample_length);
14c63f17 233
6a02ad66 234static void perf_duration_warn(struct irq_work *w)
14c63f17 235{
6a02ad66 236 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
14c63f17 237 u64 avg_local_sample_len;
e5302920 238 u64 local_samples_len;
6a02ad66
PZ
239
240 local_samples_len = __get_cpu_var(running_sample_length);
241 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
242
243 printk_ratelimited(KERN_WARNING
244 "perf interrupt took too long (%lld > %lld), lowering "
245 "kernel.perf_event_max_sample_rate to %d\n",
cd578abb 246 avg_local_sample_len, allowed_ns >> 1,
6a02ad66
PZ
247 sysctl_perf_event_sample_rate);
248}
249
250static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
251
252void perf_sample_event_took(u64 sample_len_ns)
253{
d9494cb4 254 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
6a02ad66
PZ
255 u64 avg_local_sample_len;
256 u64 local_samples_len;
14c63f17 257
d9494cb4 258 if (allowed_ns == 0)
14c63f17
DH
259 return;
260
261 /* decay the counter by 1 average sample */
262 local_samples_len = __get_cpu_var(running_sample_length);
263 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
264 local_samples_len += sample_len_ns;
265 __get_cpu_var(running_sample_length) = local_samples_len;
266
267 /*
268 * note: this will be biased artifically low until we have
269 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
270 * from having to maintain a count.
271 */
272 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
273
d9494cb4 274 if (avg_local_sample_len <= allowed_ns)
14c63f17
DH
275 return;
276
277 if (max_samples_per_tick <= 1)
278 return;
279
280 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
281 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
282 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
283
14c63f17 284 update_perf_cpu_limits();
6a02ad66 285
cd578abb
PZ
286 if (!irq_work_queue(&perf_duration_work)) {
287 early_printk("perf interrupt took too long (%lld > %lld), lowering "
288 "kernel.perf_event_max_sample_rate to %d\n",
289 avg_local_sample_len, allowed_ns >> 1,
290 sysctl_perf_event_sample_rate);
291 }
14c63f17
DH
292}
293
cdd6c482 294static atomic64_t perf_event_id;
a96bbc16 295
0b3fcf17
SE
296static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
297 enum event_type_t event_type);
298
299static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
300 enum event_type_t event_type,
301 struct task_struct *task);
302
303static void update_context_time(struct perf_event_context *ctx);
304static u64 perf_event_time(struct perf_event *event);
0b3fcf17 305
cdd6c482 306void __weak perf_event_print_debug(void) { }
0793a61d 307
84c79910 308extern __weak const char *perf_pmu_name(void)
0793a61d 309{
84c79910 310 return "pmu";
0793a61d
TG
311}
312
0b3fcf17
SE
313static inline u64 perf_clock(void)
314{
315 return local_clock();
316}
317
e5d1367f
SE
318static inline struct perf_cpu_context *
319__get_cpu_context(struct perf_event_context *ctx)
320{
321 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
322}
323
facc4307
PZ
324static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
325 struct perf_event_context *ctx)
326{
327 raw_spin_lock(&cpuctx->ctx.lock);
328 if (ctx)
329 raw_spin_lock(&ctx->lock);
330}
331
332static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
333 struct perf_event_context *ctx)
334{
335 if (ctx)
336 raw_spin_unlock(&ctx->lock);
337 raw_spin_unlock(&cpuctx->ctx.lock);
338}
339
e5d1367f
SE
340#ifdef CONFIG_CGROUP_PERF
341
877c6856
LZ
342/*
343 * perf_cgroup_info keeps track of time_enabled for a cgroup.
344 * This is a per-cpu dynamically allocated data structure.
345 */
346struct perf_cgroup_info {
347 u64 time;
348 u64 timestamp;
349};
350
351struct perf_cgroup {
352 struct cgroup_subsys_state css;
86e213e1 353 struct perf_cgroup_info __percpu *info;
877c6856
LZ
354};
355
3f7cce3c
SE
356/*
357 * Must ensure cgroup is pinned (css_get) before calling
358 * this function. In other words, we cannot call this function
359 * if there is no cgroup event for the current CPU context.
360 */
e5d1367f
SE
361static inline struct perf_cgroup *
362perf_cgroup_from_task(struct task_struct *task)
363{
073219e9 364 return container_of(task_css(task, perf_event_cgrp_id),
8af01f56 365 struct perf_cgroup, css);
e5d1367f
SE
366}
367
368static inline bool
369perf_cgroup_match(struct perf_event *event)
370{
371 struct perf_event_context *ctx = event->ctx;
372 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
373
ef824fa1
TH
374 /* @event doesn't care about cgroup */
375 if (!event->cgrp)
376 return true;
377
378 /* wants specific cgroup scope but @cpuctx isn't associated with any */
379 if (!cpuctx->cgrp)
380 return false;
381
382 /*
383 * Cgroup scoping is recursive. An event enabled for a cgroup is
384 * also enabled for all its descendant cgroups. If @cpuctx's
385 * cgroup is a descendant of @event's (the test covers identity
386 * case), it's a match.
387 */
388 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
389 event->cgrp->css.cgroup);
e5d1367f
SE
390}
391
e5d1367f
SE
392static inline void perf_put_cgroup(struct perf_event *event)
393{
394 css_put(&event->cgrp->css);
395}
396
397static inline void perf_detach_cgroup(struct perf_event *event)
398{
399 perf_put_cgroup(event);
400 event->cgrp = NULL;
401}
402
403static inline int is_cgroup_event(struct perf_event *event)
404{
405 return event->cgrp != NULL;
406}
407
408static inline u64 perf_cgroup_event_time(struct perf_event *event)
409{
410 struct perf_cgroup_info *t;
411
412 t = per_cpu_ptr(event->cgrp->info, event->cpu);
413 return t->time;
414}
415
416static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
417{
418 struct perf_cgroup_info *info;
419 u64 now;
420
421 now = perf_clock();
422
423 info = this_cpu_ptr(cgrp->info);
424
425 info->time += now - info->timestamp;
426 info->timestamp = now;
427}
428
429static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
430{
431 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
432 if (cgrp_out)
433 __update_cgrp_time(cgrp_out);
434}
435
436static inline void update_cgrp_time_from_event(struct perf_event *event)
437{
3f7cce3c
SE
438 struct perf_cgroup *cgrp;
439
e5d1367f 440 /*
3f7cce3c
SE
441 * ensure we access cgroup data only when needed and
442 * when we know the cgroup is pinned (css_get)
e5d1367f 443 */
3f7cce3c 444 if (!is_cgroup_event(event))
e5d1367f
SE
445 return;
446
3f7cce3c
SE
447 cgrp = perf_cgroup_from_task(current);
448 /*
449 * Do not update time when cgroup is not active
450 */
451 if (cgrp == event->cgrp)
452 __update_cgrp_time(event->cgrp);
e5d1367f
SE
453}
454
455static inline void
3f7cce3c
SE
456perf_cgroup_set_timestamp(struct task_struct *task,
457 struct perf_event_context *ctx)
e5d1367f
SE
458{
459 struct perf_cgroup *cgrp;
460 struct perf_cgroup_info *info;
461
3f7cce3c
SE
462 /*
463 * ctx->lock held by caller
464 * ensure we do not access cgroup data
465 * unless we have the cgroup pinned (css_get)
466 */
467 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
468 return;
469
470 cgrp = perf_cgroup_from_task(task);
471 info = this_cpu_ptr(cgrp->info);
3f7cce3c 472 info->timestamp = ctx->timestamp;
e5d1367f
SE
473}
474
475#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
476#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
477
478/*
479 * reschedule events based on the cgroup constraint of task.
480 *
481 * mode SWOUT : schedule out everything
482 * mode SWIN : schedule in based on cgroup for next
483 */
484void perf_cgroup_switch(struct task_struct *task, int mode)
485{
486 struct perf_cpu_context *cpuctx;
487 struct pmu *pmu;
488 unsigned long flags;
489
490 /*
491 * disable interrupts to avoid geting nr_cgroup
492 * changes via __perf_event_disable(). Also
493 * avoids preemption.
494 */
495 local_irq_save(flags);
496
497 /*
498 * we reschedule only in the presence of cgroup
499 * constrained events.
500 */
501 rcu_read_lock();
502
503 list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f 504 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95cf59ea
PZ
505 if (cpuctx->unique_pmu != pmu)
506 continue; /* ensure we process each cpuctx once */
e5d1367f 507
e5d1367f
SE
508 /*
509 * perf_cgroup_events says at least one
510 * context on this CPU has cgroup events.
511 *
512 * ctx->nr_cgroups reports the number of cgroup
513 * events for a context.
514 */
515 if (cpuctx->ctx.nr_cgroups > 0) {
facc4307
PZ
516 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
517 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f
SE
518
519 if (mode & PERF_CGROUP_SWOUT) {
520 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
521 /*
522 * must not be done before ctxswout due
523 * to event_filter_match() in event_sched_out()
524 */
525 cpuctx->cgrp = NULL;
526 }
527
528 if (mode & PERF_CGROUP_SWIN) {
e566b76e 529 WARN_ON_ONCE(cpuctx->cgrp);
95cf59ea
PZ
530 /*
531 * set cgrp before ctxsw in to allow
532 * event_filter_match() to not have to pass
533 * task around
e5d1367f
SE
534 */
535 cpuctx->cgrp = perf_cgroup_from_task(task);
536 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
537 }
facc4307
PZ
538 perf_pmu_enable(cpuctx->ctx.pmu);
539 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f 540 }
e5d1367f
SE
541 }
542
543 rcu_read_unlock();
544
545 local_irq_restore(flags);
546}
547
a8d757ef
SE
548static inline void perf_cgroup_sched_out(struct task_struct *task,
549 struct task_struct *next)
e5d1367f 550{
a8d757ef
SE
551 struct perf_cgroup *cgrp1;
552 struct perf_cgroup *cgrp2 = NULL;
553
554 /*
555 * we come here when we know perf_cgroup_events > 0
556 */
557 cgrp1 = perf_cgroup_from_task(task);
558
559 /*
560 * next is NULL when called from perf_event_enable_on_exec()
561 * that will systematically cause a cgroup_switch()
562 */
563 if (next)
564 cgrp2 = perf_cgroup_from_task(next);
565
566 /*
567 * only schedule out current cgroup events if we know
568 * that we are switching to a different cgroup. Otherwise,
569 * do no touch the cgroup events.
570 */
571 if (cgrp1 != cgrp2)
572 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
e5d1367f
SE
573}
574
a8d757ef
SE
575static inline void perf_cgroup_sched_in(struct task_struct *prev,
576 struct task_struct *task)
e5d1367f 577{
a8d757ef
SE
578 struct perf_cgroup *cgrp1;
579 struct perf_cgroup *cgrp2 = NULL;
580
581 /*
582 * we come here when we know perf_cgroup_events > 0
583 */
584 cgrp1 = perf_cgroup_from_task(task);
585
586 /* prev can never be NULL */
587 cgrp2 = perf_cgroup_from_task(prev);
588
589 /*
590 * only need to schedule in cgroup events if we are changing
591 * cgroup during ctxsw. Cgroup events were not scheduled
592 * out of ctxsw out if that was not the case.
593 */
594 if (cgrp1 != cgrp2)
595 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
e5d1367f
SE
596}
597
598static inline int perf_cgroup_connect(int fd, struct perf_event *event,
599 struct perf_event_attr *attr,
600 struct perf_event *group_leader)
601{
602 struct perf_cgroup *cgrp;
603 struct cgroup_subsys_state *css;
2903ff01
AV
604 struct fd f = fdget(fd);
605 int ret = 0;
e5d1367f 606
2903ff01 607 if (!f.file)
e5d1367f
SE
608 return -EBADF;
609
5a17f543 610 css = css_tryget_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys);
3db272c0
LZ
611 if (IS_ERR(css)) {
612 ret = PTR_ERR(css);
613 goto out;
614 }
e5d1367f
SE
615
616 cgrp = container_of(css, struct perf_cgroup, css);
617 event->cgrp = cgrp;
618
619 /*
620 * all events in a group must monitor
621 * the same cgroup because a task belongs
622 * to only one perf cgroup at a time
623 */
624 if (group_leader && group_leader->cgrp != cgrp) {
625 perf_detach_cgroup(event);
626 ret = -EINVAL;
e5d1367f 627 }
3db272c0 628out:
2903ff01 629 fdput(f);
e5d1367f
SE
630 return ret;
631}
632
633static inline void
634perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
635{
636 struct perf_cgroup_info *t;
637 t = per_cpu_ptr(event->cgrp->info, event->cpu);
638 event->shadow_ctx_time = now - t->timestamp;
639}
640
641static inline void
642perf_cgroup_defer_enabled(struct perf_event *event)
643{
644 /*
645 * when the current task's perf cgroup does not match
646 * the event's, we need to remember to call the
647 * perf_mark_enable() function the first time a task with
648 * a matching perf cgroup is scheduled in.
649 */
650 if (is_cgroup_event(event) && !perf_cgroup_match(event))
651 event->cgrp_defer_enabled = 1;
652}
653
654static inline void
655perf_cgroup_mark_enabled(struct perf_event *event,
656 struct perf_event_context *ctx)
657{
658 struct perf_event *sub;
659 u64 tstamp = perf_event_time(event);
660
661 if (!event->cgrp_defer_enabled)
662 return;
663
664 event->cgrp_defer_enabled = 0;
665
666 event->tstamp_enabled = tstamp - event->total_time_enabled;
667 list_for_each_entry(sub, &event->sibling_list, group_entry) {
668 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
669 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
670 sub->cgrp_defer_enabled = 0;
671 }
672 }
673}
674#else /* !CONFIG_CGROUP_PERF */
675
676static inline bool
677perf_cgroup_match(struct perf_event *event)
678{
679 return true;
680}
681
682static inline void perf_detach_cgroup(struct perf_event *event)
683{}
684
685static inline int is_cgroup_event(struct perf_event *event)
686{
687 return 0;
688}
689
690static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
691{
692 return 0;
693}
694
695static inline void update_cgrp_time_from_event(struct perf_event *event)
696{
697}
698
699static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
700{
701}
702
a8d757ef
SE
703static inline void perf_cgroup_sched_out(struct task_struct *task,
704 struct task_struct *next)
e5d1367f
SE
705{
706}
707
a8d757ef
SE
708static inline void perf_cgroup_sched_in(struct task_struct *prev,
709 struct task_struct *task)
e5d1367f
SE
710{
711}
712
713static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
714 struct perf_event_attr *attr,
715 struct perf_event *group_leader)
716{
717 return -EINVAL;
718}
719
720static inline void
3f7cce3c
SE
721perf_cgroup_set_timestamp(struct task_struct *task,
722 struct perf_event_context *ctx)
e5d1367f
SE
723{
724}
725
726void
727perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
728{
729}
730
731static inline void
732perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
733{
734}
735
736static inline u64 perf_cgroup_event_time(struct perf_event *event)
737{
738 return 0;
739}
740
741static inline void
742perf_cgroup_defer_enabled(struct perf_event *event)
743{
744}
745
746static inline void
747perf_cgroup_mark_enabled(struct perf_event *event,
748 struct perf_event_context *ctx)
749{
750}
751#endif
752
9e630205
SE
753/*
754 * set default to be dependent on timer tick just
755 * like original code
756 */
757#define PERF_CPU_HRTIMER (1000 / HZ)
758/*
759 * function must be called with interrupts disbled
760 */
761static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
762{
763 struct perf_cpu_context *cpuctx;
764 enum hrtimer_restart ret = HRTIMER_NORESTART;
765 int rotations = 0;
766
767 WARN_ON(!irqs_disabled());
768
769 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
770
771 rotations = perf_rotate_context(cpuctx);
772
773 /*
774 * arm timer if needed
775 */
776 if (rotations) {
777 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
778 ret = HRTIMER_RESTART;
779 }
780
781 return ret;
782}
783
784/* CPU is going down */
785void perf_cpu_hrtimer_cancel(int cpu)
786{
787 struct perf_cpu_context *cpuctx;
788 struct pmu *pmu;
789 unsigned long flags;
790
791 if (WARN_ON(cpu != smp_processor_id()))
792 return;
793
794 local_irq_save(flags);
795
796 rcu_read_lock();
797
798 list_for_each_entry_rcu(pmu, &pmus, entry) {
799 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
800
801 if (pmu->task_ctx_nr == perf_sw_context)
802 continue;
803
804 hrtimer_cancel(&cpuctx->hrtimer);
805 }
806
807 rcu_read_unlock();
808
809 local_irq_restore(flags);
810}
811
812static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
813{
814 struct hrtimer *hr = &cpuctx->hrtimer;
815 struct pmu *pmu = cpuctx->ctx.pmu;
62b85639 816 int timer;
9e630205
SE
817
818 /* no multiplexing needed for SW PMU */
819 if (pmu->task_ctx_nr == perf_sw_context)
820 return;
821
62b85639
SE
822 /*
823 * check default is sane, if not set then force to
824 * default interval (1/tick)
825 */
826 timer = pmu->hrtimer_interval_ms;
827 if (timer < 1)
828 timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
829
830 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
9e630205
SE
831
832 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
833 hr->function = perf_cpu_hrtimer_handler;
834}
835
836static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
837{
838 struct hrtimer *hr = &cpuctx->hrtimer;
839 struct pmu *pmu = cpuctx->ctx.pmu;
840
841 /* not for SW PMU */
842 if (pmu->task_ctx_nr == perf_sw_context)
843 return;
844
845 if (hrtimer_active(hr))
846 return;
847
848 if (!hrtimer_callback_running(hr))
849 __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
850 0, HRTIMER_MODE_REL_PINNED, 0);
851}
852
33696fc0 853void perf_pmu_disable(struct pmu *pmu)
9e35ad38 854{
33696fc0
PZ
855 int *count = this_cpu_ptr(pmu->pmu_disable_count);
856 if (!(*count)++)
857 pmu->pmu_disable(pmu);
9e35ad38 858}
9e35ad38 859
33696fc0 860void perf_pmu_enable(struct pmu *pmu)
9e35ad38 861{
33696fc0
PZ
862 int *count = this_cpu_ptr(pmu->pmu_disable_count);
863 if (!--(*count))
864 pmu->pmu_enable(pmu);
9e35ad38 865}
9e35ad38 866
e9d2b064
PZ
867static DEFINE_PER_CPU(struct list_head, rotation_list);
868
869/*
870 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
871 * because they're strictly cpu affine and rotate_start is called with IRQs
872 * disabled, while rotate_context is called from IRQ context.
873 */
108b02cf 874static void perf_pmu_rotate_start(struct pmu *pmu)
9e35ad38 875{
108b02cf 876 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
e9d2b064 877 struct list_head *head = &__get_cpu_var(rotation_list);
b5ab4cd5 878
e9d2b064 879 WARN_ON(!irqs_disabled());
b5ab4cd5 880
d84153d6 881 if (list_empty(&cpuctx->rotation_list))
e9d2b064 882 list_add(&cpuctx->rotation_list, head);
9e35ad38 883}
9e35ad38 884
cdd6c482 885static void get_ctx(struct perf_event_context *ctx)
a63eaf34 886{
e5289d4a 887 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
888}
889
cdd6c482 890static void put_ctx(struct perf_event_context *ctx)
a63eaf34 891{
564c2b21
PM
892 if (atomic_dec_and_test(&ctx->refcount)) {
893 if (ctx->parent_ctx)
894 put_ctx(ctx->parent_ctx);
c93f7669
PM
895 if (ctx->task)
896 put_task_struct(ctx->task);
cb796ff3 897 kfree_rcu(ctx, rcu_head);
564c2b21 898 }
a63eaf34
PM
899}
900
cdd6c482 901static void unclone_ctx(struct perf_event_context *ctx)
71a851b4
PZ
902{
903 if (ctx->parent_ctx) {
904 put_ctx(ctx->parent_ctx);
905 ctx->parent_ctx = NULL;
906 }
5a3126d4 907 ctx->generation++;
71a851b4
PZ
908}
909
6844c09d
ACM
910static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
911{
912 /*
913 * only top level events have the pid namespace they were created in
914 */
915 if (event->parent)
916 event = event->parent;
917
918 return task_tgid_nr_ns(p, event->ns);
919}
920
921static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
922{
923 /*
924 * only top level events have the pid namespace they were created in
925 */
926 if (event->parent)
927 event = event->parent;
928
929 return task_pid_nr_ns(p, event->ns);
930}
931
7f453c24 932/*
cdd6c482 933 * If we inherit events we want to return the parent event id
7f453c24
PZ
934 * to userspace.
935 */
cdd6c482 936static u64 primary_event_id(struct perf_event *event)
7f453c24 937{
cdd6c482 938 u64 id = event->id;
7f453c24 939
cdd6c482
IM
940 if (event->parent)
941 id = event->parent->id;
7f453c24
PZ
942
943 return id;
944}
945
25346b93 946/*
cdd6c482 947 * Get the perf_event_context for a task and lock it.
25346b93
PM
948 * This has to cope with with the fact that until it is locked,
949 * the context could get moved to another task.
950 */
cdd6c482 951static struct perf_event_context *
8dc85d54 952perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 953{
cdd6c482 954 struct perf_event_context *ctx;
25346b93 955
9ed6060d 956retry:
058ebd0e
PZ
957 /*
958 * One of the few rules of preemptible RCU is that one cannot do
959 * rcu_read_unlock() while holding a scheduler (or nested) lock when
960 * part of the read side critical section was preemptible -- see
961 * rcu_read_unlock_special().
962 *
963 * Since ctx->lock nests under rq->lock we must ensure the entire read
964 * side critical section is non-preemptible.
965 */
966 preempt_disable();
967 rcu_read_lock();
8dc85d54 968 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
969 if (ctx) {
970 /*
971 * If this context is a clone of another, it might
972 * get swapped for another underneath us by
cdd6c482 973 * perf_event_task_sched_out, though the
25346b93
PM
974 * rcu_read_lock() protects us from any context
975 * getting freed. Lock the context and check if it
976 * got swapped before we could get the lock, and retry
977 * if so. If we locked the right context, then it
978 * can't get swapped on us any more.
979 */
e625cce1 980 raw_spin_lock_irqsave(&ctx->lock, *flags);
8dc85d54 981 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
e625cce1 982 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
058ebd0e
PZ
983 rcu_read_unlock();
984 preempt_enable();
25346b93
PM
985 goto retry;
986 }
b49a9e7e
PZ
987
988 if (!atomic_inc_not_zero(&ctx->refcount)) {
e625cce1 989 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
b49a9e7e
PZ
990 ctx = NULL;
991 }
25346b93
PM
992 }
993 rcu_read_unlock();
058ebd0e 994 preempt_enable();
25346b93
PM
995 return ctx;
996}
997
998/*
999 * Get the context for a task and increment its pin_count so it
1000 * can't get swapped to another task. This also increments its
1001 * reference count so that the context can't get freed.
1002 */
8dc85d54
PZ
1003static struct perf_event_context *
1004perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 1005{
cdd6c482 1006 struct perf_event_context *ctx;
25346b93
PM
1007 unsigned long flags;
1008
8dc85d54 1009 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
1010 if (ctx) {
1011 ++ctx->pin_count;
e625cce1 1012 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1013 }
1014 return ctx;
1015}
1016
cdd6c482 1017static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
1018{
1019 unsigned long flags;
1020
e625cce1 1021 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 1022 --ctx->pin_count;
e625cce1 1023 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1024}
1025
f67218c3
PZ
1026/*
1027 * Update the record of the current time in a context.
1028 */
1029static void update_context_time(struct perf_event_context *ctx)
1030{
1031 u64 now = perf_clock();
1032
1033 ctx->time += now - ctx->timestamp;
1034 ctx->timestamp = now;
1035}
1036
4158755d
SE
1037static u64 perf_event_time(struct perf_event *event)
1038{
1039 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
1040
1041 if (is_cgroup_event(event))
1042 return perf_cgroup_event_time(event);
1043
4158755d
SE
1044 return ctx ? ctx->time : 0;
1045}
1046
f67218c3
PZ
1047/*
1048 * Update the total_time_enabled and total_time_running fields for a event.
b7526f0c 1049 * The caller of this function needs to hold the ctx->lock.
f67218c3
PZ
1050 */
1051static void update_event_times(struct perf_event *event)
1052{
1053 struct perf_event_context *ctx = event->ctx;
1054 u64 run_end;
1055
1056 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1057 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1058 return;
e5d1367f
SE
1059 /*
1060 * in cgroup mode, time_enabled represents
1061 * the time the event was enabled AND active
1062 * tasks were in the monitored cgroup. This is
1063 * independent of the activity of the context as
1064 * there may be a mix of cgroup and non-cgroup events.
1065 *
1066 * That is why we treat cgroup events differently
1067 * here.
1068 */
1069 if (is_cgroup_event(event))
46cd6a7f 1070 run_end = perf_cgroup_event_time(event);
e5d1367f
SE
1071 else if (ctx->is_active)
1072 run_end = ctx->time;
acd1d7c1
PZ
1073 else
1074 run_end = event->tstamp_stopped;
1075
1076 event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3
PZ
1077
1078 if (event->state == PERF_EVENT_STATE_INACTIVE)
1079 run_end = event->tstamp_stopped;
1080 else
4158755d 1081 run_end = perf_event_time(event);
f67218c3
PZ
1082
1083 event->total_time_running = run_end - event->tstamp_running;
e5d1367f 1084
f67218c3
PZ
1085}
1086
96c21a46
PZ
1087/*
1088 * Update total_time_enabled and total_time_running for all events in a group.
1089 */
1090static void update_group_times(struct perf_event *leader)
1091{
1092 struct perf_event *event;
1093
1094 update_event_times(leader);
1095 list_for_each_entry(event, &leader->sibling_list, group_entry)
1096 update_event_times(event);
1097}
1098
889ff015
FW
1099static struct list_head *
1100ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1101{
1102 if (event->attr.pinned)
1103 return &ctx->pinned_groups;
1104 else
1105 return &ctx->flexible_groups;
1106}
1107
fccc714b 1108/*
cdd6c482 1109 * Add a event from the lists for its context.
fccc714b
PZ
1110 * Must be called with ctx->mutex and ctx->lock held.
1111 */
04289bb9 1112static void
cdd6c482 1113list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1114{
8a49542c
PZ
1115 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1116 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9
IM
1117
1118 /*
8a49542c
PZ
1119 * If we're a stand alone event or group leader, we go to the context
1120 * list, group events are kept attached to the group so that
1121 * perf_group_detach can, at all times, locate all siblings.
04289bb9 1122 */
8a49542c 1123 if (event->group_leader == event) {
889ff015
FW
1124 struct list_head *list;
1125
d6f962b5
FW
1126 if (is_software_event(event))
1127 event->group_flags |= PERF_GROUP_SOFTWARE;
1128
889ff015
FW
1129 list = ctx_group_list(event, ctx);
1130 list_add_tail(&event->group_entry, list);
5c148194 1131 }
592903cd 1132
08309379 1133 if (is_cgroup_event(event))
e5d1367f 1134 ctx->nr_cgroups++;
e5d1367f 1135
d010b332
SE
1136 if (has_branch_stack(event))
1137 ctx->nr_branch_stack++;
1138
cdd6c482 1139 list_add_rcu(&event->event_entry, &ctx->event_list);
b5ab4cd5 1140 if (!ctx->nr_events)
108b02cf 1141 perf_pmu_rotate_start(ctx->pmu);
cdd6c482
IM
1142 ctx->nr_events++;
1143 if (event->attr.inherit_stat)
bfbd3381 1144 ctx->nr_stat++;
5a3126d4
PZ
1145
1146 ctx->generation++;
04289bb9
IM
1147}
1148
0231bb53
JO
1149/*
1150 * Initialize event state based on the perf_event_attr::disabled.
1151 */
1152static inline void perf_event__state_init(struct perf_event *event)
1153{
1154 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1155 PERF_EVENT_STATE_INACTIVE;
1156}
1157
c320c7b7
ACM
1158/*
1159 * Called at perf_event creation and when events are attached/detached from a
1160 * group.
1161 */
1162static void perf_event__read_size(struct perf_event *event)
1163{
1164 int entry = sizeof(u64); /* value */
1165 int size = 0;
1166 int nr = 1;
1167
1168 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1169 size += sizeof(u64);
1170
1171 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1172 size += sizeof(u64);
1173
1174 if (event->attr.read_format & PERF_FORMAT_ID)
1175 entry += sizeof(u64);
1176
1177 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1178 nr += event->group_leader->nr_siblings;
1179 size += sizeof(u64);
1180 }
1181
1182 size += entry * nr;
1183 event->read_size = size;
1184}
1185
1186static void perf_event__header_size(struct perf_event *event)
1187{
1188 struct perf_sample_data *data;
1189 u64 sample_type = event->attr.sample_type;
1190 u16 size = 0;
1191
1192 perf_event__read_size(event);
1193
1194 if (sample_type & PERF_SAMPLE_IP)
1195 size += sizeof(data->ip);
1196
6844c09d
ACM
1197 if (sample_type & PERF_SAMPLE_ADDR)
1198 size += sizeof(data->addr);
1199
1200 if (sample_type & PERF_SAMPLE_PERIOD)
1201 size += sizeof(data->period);
1202
c3feedf2
AK
1203 if (sample_type & PERF_SAMPLE_WEIGHT)
1204 size += sizeof(data->weight);
1205
6844c09d
ACM
1206 if (sample_type & PERF_SAMPLE_READ)
1207 size += event->read_size;
1208
d6be9ad6
SE
1209 if (sample_type & PERF_SAMPLE_DATA_SRC)
1210 size += sizeof(data->data_src.val);
1211
fdfbbd07
AK
1212 if (sample_type & PERF_SAMPLE_TRANSACTION)
1213 size += sizeof(data->txn);
1214
6844c09d
ACM
1215 event->header_size = size;
1216}
1217
1218static void perf_event__id_header_size(struct perf_event *event)
1219{
1220 struct perf_sample_data *data;
1221 u64 sample_type = event->attr.sample_type;
1222 u16 size = 0;
1223
c320c7b7
ACM
1224 if (sample_type & PERF_SAMPLE_TID)
1225 size += sizeof(data->tid_entry);
1226
1227 if (sample_type & PERF_SAMPLE_TIME)
1228 size += sizeof(data->time);
1229
ff3d527c
AH
1230 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1231 size += sizeof(data->id);
1232
c320c7b7
ACM
1233 if (sample_type & PERF_SAMPLE_ID)
1234 size += sizeof(data->id);
1235
1236 if (sample_type & PERF_SAMPLE_STREAM_ID)
1237 size += sizeof(data->stream_id);
1238
1239 if (sample_type & PERF_SAMPLE_CPU)
1240 size += sizeof(data->cpu_entry);
1241
6844c09d 1242 event->id_header_size = size;
c320c7b7
ACM
1243}
1244
8a49542c
PZ
1245static void perf_group_attach(struct perf_event *event)
1246{
c320c7b7 1247 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 1248
74c3337c
PZ
1249 /*
1250 * We can have double attach due to group movement in perf_event_open.
1251 */
1252 if (event->attach_state & PERF_ATTACH_GROUP)
1253 return;
1254
8a49542c
PZ
1255 event->attach_state |= PERF_ATTACH_GROUP;
1256
1257 if (group_leader == event)
1258 return;
1259
1260 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1261 !is_software_event(event))
1262 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1263
1264 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1265 group_leader->nr_siblings++;
c320c7b7
ACM
1266
1267 perf_event__header_size(group_leader);
1268
1269 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1270 perf_event__header_size(pos);
8a49542c
PZ
1271}
1272
a63eaf34 1273/*
cdd6c482 1274 * Remove a event from the lists for its context.
fccc714b 1275 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1276 */
04289bb9 1277static void
cdd6c482 1278list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1279{
68cacd29 1280 struct perf_cpu_context *cpuctx;
8a49542c
PZ
1281 /*
1282 * We can have double detach due to exit/hot-unplug + close.
1283 */
1284 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1285 return;
8a49542c
PZ
1286
1287 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1288
68cacd29 1289 if (is_cgroup_event(event)) {
e5d1367f 1290 ctx->nr_cgroups--;
68cacd29
SE
1291 cpuctx = __get_cpu_context(ctx);
1292 /*
1293 * if there are no more cgroup events
1294 * then cler cgrp to avoid stale pointer
1295 * in update_cgrp_time_from_cpuctx()
1296 */
1297 if (!ctx->nr_cgroups)
1298 cpuctx->cgrp = NULL;
1299 }
e5d1367f 1300
d010b332
SE
1301 if (has_branch_stack(event))
1302 ctx->nr_branch_stack--;
1303
cdd6c482
IM
1304 ctx->nr_events--;
1305 if (event->attr.inherit_stat)
bfbd3381 1306 ctx->nr_stat--;
8bc20959 1307
cdd6c482 1308 list_del_rcu(&event->event_entry);
04289bb9 1309
8a49542c
PZ
1310 if (event->group_leader == event)
1311 list_del_init(&event->group_entry);
5c148194 1312
96c21a46 1313 update_group_times(event);
b2e74a26
SE
1314
1315 /*
1316 * If event was in error state, then keep it
1317 * that way, otherwise bogus counts will be
1318 * returned on read(). The only way to get out
1319 * of error state is by explicit re-enabling
1320 * of the event
1321 */
1322 if (event->state > PERF_EVENT_STATE_OFF)
1323 event->state = PERF_EVENT_STATE_OFF;
5a3126d4
PZ
1324
1325 ctx->generation++;
050735b0
PZ
1326}
1327
8a49542c 1328static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1329{
1330 struct perf_event *sibling, *tmp;
8a49542c
PZ
1331 struct list_head *list = NULL;
1332
1333 /*
1334 * We can have double detach due to exit/hot-unplug + close.
1335 */
1336 if (!(event->attach_state & PERF_ATTACH_GROUP))
1337 return;
1338
1339 event->attach_state &= ~PERF_ATTACH_GROUP;
1340
1341 /*
1342 * If this is a sibling, remove it from its group.
1343 */
1344 if (event->group_leader != event) {
1345 list_del_init(&event->group_entry);
1346 event->group_leader->nr_siblings--;
c320c7b7 1347 goto out;
8a49542c
PZ
1348 }
1349
1350 if (!list_empty(&event->group_entry))
1351 list = &event->group_entry;
2e2af50b 1352
04289bb9 1353 /*
cdd6c482
IM
1354 * If this was a group event with sibling events then
1355 * upgrade the siblings to singleton events by adding them
8a49542c 1356 * to whatever list we are on.
04289bb9 1357 */
cdd6c482 1358 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c
PZ
1359 if (list)
1360 list_move_tail(&sibling->group_entry, list);
04289bb9 1361 sibling->group_leader = sibling;
d6f962b5
FW
1362
1363 /* Inherit group flags from the previous leader */
1364 sibling->group_flags = event->group_flags;
04289bb9 1365 }
c320c7b7
ACM
1366
1367out:
1368 perf_event__header_size(event->group_leader);
1369
1370 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1371 perf_event__header_size(tmp);
04289bb9
IM
1372}
1373
fa66f07a
SE
1374static inline int
1375event_filter_match(struct perf_event *event)
1376{
e5d1367f
SE
1377 return (event->cpu == -1 || event->cpu == smp_processor_id())
1378 && perf_cgroup_match(event);
fa66f07a
SE
1379}
1380
9ffcfa6f
SE
1381static void
1382event_sched_out(struct perf_event *event,
3b6f9e5c 1383 struct perf_cpu_context *cpuctx,
cdd6c482 1384 struct perf_event_context *ctx)
3b6f9e5c 1385{
4158755d 1386 u64 tstamp = perf_event_time(event);
fa66f07a
SE
1387 u64 delta;
1388 /*
1389 * An event which could not be activated because of
1390 * filter mismatch still needs to have its timings
1391 * maintained, otherwise bogus information is return
1392 * via read() for time_enabled, time_running:
1393 */
1394 if (event->state == PERF_EVENT_STATE_INACTIVE
1395 && !event_filter_match(event)) {
e5d1367f 1396 delta = tstamp - event->tstamp_stopped;
fa66f07a 1397 event->tstamp_running += delta;
4158755d 1398 event->tstamp_stopped = tstamp;
fa66f07a
SE
1399 }
1400
cdd6c482 1401 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1402 return;
3b6f9e5c 1403
44377277
AS
1404 perf_pmu_disable(event->pmu);
1405
cdd6c482
IM
1406 event->state = PERF_EVENT_STATE_INACTIVE;
1407 if (event->pending_disable) {
1408 event->pending_disable = 0;
1409 event->state = PERF_EVENT_STATE_OFF;
970892a9 1410 }
4158755d 1411 event->tstamp_stopped = tstamp;
a4eaf7f1 1412 event->pmu->del(event, 0);
cdd6c482 1413 event->oncpu = -1;
3b6f9e5c 1414
cdd6c482 1415 if (!is_software_event(event))
3b6f9e5c
PM
1416 cpuctx->active_oncpu--;
1417 ctx->nr_active--;
0f5a2601
PZ
1418 if (event->attr.freq && event->attr.sample_freq)
1419 ctx->nr_freq--;
cdd6c482 1420 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c 1421 cpuctx->exclusive = 0;
44377277
AS
1422
1423 perf_pmu_enable(event->pmu);
3b6f9e5c
PM
1424}
1425
d859e29f 1426static void
cdd6c482 1427group_sched_out(struct perf_event *group_event,
d859e29f 1428 struct perf_cpu_context *cpuctx,
cdd6c482 1429 struct perf_event_context *ctx)
d859e29f 1430{
cdd6c482 1431 struct perf_event *event;
fa66f07a 1432 int state = group_event->state;
d859e29f 1433
cdd6c482 1434 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
1435
1436 /*
1437 * Schedule out siblings (if any):
1438 */
cdd6c482
IM
1439 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1440 event_sched_out(event, cpuctx, ctx);
d859e29f 1441
fa66f07a 1442 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29f
PM
1443 cpuctx->exclusive = 0;
1444}
1445
0793a61d 1446/*
cdd6c482 1447 * Cross CPU call to remove a performance event
0793a61d 1448 *
cdd6c482 1449 * We disable the event on the hardware level first. After that we
0793a61d
TG
1450 * remove it from the context list.
1451 */
fe4b04fa 1452static int __perf_remove_from_context(void *info)
0793a61d 1453{
cdd6c482
IM
1454 struct perf_event *event = info;
1455 struct perf_event_context *ctx = event->ctx;
108b02cf 1456 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
0793a61d 1457
e625cce1 1458 raw_spin_lock(&ctx->lock);
cdd6c482 1459 event_sched_out(event, cpuctx, ctx);
cdd6c482 1460 list_del_event(event, ctx);
64ce3126
PZ
1461 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1462 ctx->is_active = 0;
1463 cpuctx->task_ctx = NULL;
1464 }
e625cce1 1465 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1466
1467 return 0;
0793a61d
TG
1468}
1469
1470
1471/*
cdd6c482 1472 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 1473 *
cdd6c482 1474 * CPU events are removed with a smp call. For task events we only
0793a61d 1475 * call when the task is on a CPU.
c93f7669 1476 *
cdd6c482
IM
1477 * If event->ctx is a cloned context, callers must make sure that
1478 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
1479 * remains valid. This is OK when called from perf_release since
1480 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 1481 * When called from perf_event_exit_task, it's OK because the
c93f7669 1482 * context has been detached from its task.
0793a61d 1483 */
fe4b04fa 1484static void perf_remove_from_context(struct perf_event *event)
0793a61d 1485{
cdd6c482 1486 struct perf_event_context *ctx = event->ctx;
0793a61d
TG
1487 struct task_struct *task = ctx->task;
1488
fe4b04fa
PZ
1489 lockdep_assert_held(&ctx->mutex);
1490
0793a61d
TG
1491 if (!task) {
1492 /*
cdd6c482 1493 * Per cpu events are removed via an smp call and
af901ca1 1494 * the removal is always successful.
0793a61d 1495 */
fe4b04fa 1496 cpu_function_call(event->cpu, __perf_remove_from_context, event);
0793a61d
TG
1497 return;
1498 }
1499
1500retry:
fe4b04fa
PZ
1501 if (!task_function_call(task, __perf_remove_from_context, event))
1502 return;
0793a61d 1503
e625cce1 1504 raw_spin_lock_irq(&ctx->lock);
0793a61d 1505 /*
fe4b04fa
PZ
1506 * If we failed to find a running task, but find the context active now
1507 * that we've acquired the ctx->lock, retry.
0793a61d 1508 */
fe4b04fa 1509 if (ctx->is_active) {
e625cce1 1510 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1511 goto retry;
1512 }
1513
1514 /*
fe4b04fa
PZ
1515 * Since the task isn't running, its safe to remove the event, us
1516 * holding the ctx->lock ensures the task won't get scheduled in.
0793a61d 1517 */
fe4b04fa 1518 list_del_event(event, ctx);
e625cce1 1519 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1520}
1521
d859e29f 1522/*
cdd6c482 1523 * Cross CPU call to disable a performance event
d859e29f 1524 */
500ad2d8 1525int __perf_event_disable(void *info)
d859e29f 1526{
cdd6c482 1527 struct perf_event *event = info;
cdd6c482 1528 struct perf_event_context *ctx = event->ctx;
108b02cf 1529 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f
PM
1530
1531 /*
cdd6c482
IM
1532 * If this is a per-task event, need to check whether this
1533 * event's task is the current task on this cpu.
fe4b04fa
PZ
1534 *
1535 * Can trigger due to concurrent perf_event_context_sched_out()
1536 * flipping contexts around.
d859e29f 1537 */
665c2142 1538 if (ctx->task && cpuctx->task_ctx != ctx)
fe4b04fa 1539 return -EINVAL;
d859e29f 1540
e625cce1 1541 raw_spin_lock(&ctx->lock);
d859e29f
PM
1542
1543 /*
cdd6c482 1544 * If the event is on, turn it off.
d859e29f
PM
1545 * If it is in error state, leave it in error state.
1546 */
cdd6c482 1547 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
4af4998b 1548 update_context_time(ctx);
e5d1367f 1549 update_cgrp_time_from_event(event);
cdd6c482
IM
1550 update_group_times(event);
1551 if (event == event->group_leader)
1552 group_sched_out(event, cpuctx, ctx);
d859e29f 1553 else
cdd6c482
IM
1554 event_sched_out(event, cpuctx, ctx);
1555 event->state = PERF_EVENT_STATE_OFF;
d859e29f
PM
1556 }
1557
e625cce1 1558 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1559
1560 return 0;
d859e29f
PM
1561}
1562
1563/*
cdd6c482 1564 * Disable a event.
c93f7669 1565 *
cdd6c482
IM
1566 * If event->ctx is a cloned context, callers must make sure that
1567 * every task struct that event->ctx->task could possibly point to
c93f7669 1568 * remains valid. This condition is satisifed when called through
cdd6c482
IM
1569 * perf_event_for_each_child or perf_event_for_each because they
1570 * hold the top-level event's child_mutex, so any descendant that
1571 * goes to exit will block in sync_child_event.
1572 * When called from perf_pending_event it's OK because event->ctx
c93f7669 1573 * is the current context on this CPU and preemption is disabled,
cdd6c482 1574 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 1575 */
44234adc 1576void perf_event_disable(struct perf_event *event)
d859e29f 1577{
cdd6c482 1578 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
1579 struct task_struct *task = ctx->task;
1580
1581 if (!task) {
1582 /*
cdd6c482 1583 * Disable the event on the cpu that it's on
d859e29f 1584 */
fe4b04fa 1585 cpu_function_call(event->cpu, __perf_event_disable, event);
d859e29f
PM
1586 return;
1587 }
1588
9ed6060d 1589retry:
fe4b04fa
PZ
1590 if (!task_function_call(task, __perf_event_disable, event))
1591 return;
d859e29f 1592
e625cce1 1593 raw_spin_lock_irq(&ctx->lock);
d859e29f 1594 /*
cdd6c482 1595 * If the event is still active, we need to retry the cross-call.
d859e29f 1596 */
cdd6c482 1597 if (event->state == PERF_EVENT_STATE_ACTIVE) {
e625cce1 1598 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
1599 /*
1600 * Reload the task pointer, it might have been changed by
1601 * a concurrent perf_event_context_sched_out().
1602 */
1603 task = ctx->task;
d859e29f
PM
1604 goto retry;
1605 }
1606
1607 /*
1608 * Since we have the lock this context can't be scheduled
1609 * in, so we can change the state safely.
1610 */
cdd6c482
IM
1611 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1612 update_group_times(event);
1613 event->state = PERF_EVENT_STATE_OFF;
53cfbf59 1614 }
e625cce1 1615 raw_spin_unlock_irq(&ctx->lock);
d859e29f 1616}
dcfce4a0 1617EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 1618
e5d1367f
SE
1619static void perf_set_shadow_time(struct perf_event *event,
1620 struct perf_event_context *ctx,
1621 u64 tstamp)
1622{
1623 /*
1624 * use the correct time source for the time snapshot
1625 *
1626 * We could get by without this by leveraging the
1627 * fact that to get to this function, the caller
1628 * has most likely already called update_context_time()
1629 * and update_cgrp_time_xx() and thus both timestamp
1630 * are identical (or very close). Given that tstamp is,
1631 * already adjusted for cgroup, we could say that:
1632 * tstamp - ctx->timestamp
1633 * is equivalent to
1634 * tstamp - cgrp->timestamp.
1635 *
1636 * Then, in perf_output_read(), the calculation would
1637 * work with no changes because:
1638 * - event is guaranteed scheduled in
1639 * - no scheduled out in between
1640 * - thus the timestamp would be the same
1641 *
1642 * But this is a bit hairy.
1643 *
1644 * So instead, we have an explicit cgroup call to remain
1645 * within the time time source all along. We believe it
1646 * is cleaner and simpler to understand.
1647 */
1648 if (is_cgroup_event(event))
1649 perf_cgroup_set_shadow_time(event, tstamp);
1650 else
1651 event->shadow_ctx_time = tstamp - ctx->timestamp;
1652}
1653
4fe757dd
PZ
1654#define MAX_INTERRUPTS (~0ULL)
1655
1656static void perf_log_throttle(struct perf_event *event, int enable);
1657
235c7fc7 1658static int
9ffcfa6f 1659event_sched_in(struct perf_event *event,
235c7fc7 1660 struct perf_cpu_context *cpuctx,
6e37738a 1661 struct perf_event_context *ctx)
235c7fc7 1662{
4158755d 1663 u64 tstamp = perf_event_time(event);
44377277 1664 int ret = 0;
4158755d 1665
cdd6c482 1666 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
1667 return 0;
1668
cdd6c482 1669 event->state = PERF_EVENT_STATE_ACTIVE;
6e37738a 1670 event->oncpu = smp_processor_id();
4fe757dd
PZ
1671
1672 /*
1673 * Unthrottle events, since we scheduled we might have missed several
1674 * ticks already, also for a heavily scheduling task there is little
1675 * guarantee it'll get a tick in a timely manner.
1676 */
1677 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1678 perf_log_throttle(event, 1);
1679 event->hw.interrupts = 0;
1680 }
1681
235c7fc7
IM
1682 /*
1683 * The new state must be visible before we turn it on in the hardware:
1684 */
1685 smp_wmb();
1686
44377277
AS
1687 perf_pmu_disable(event->pmu);
1688
a4eaf7f1 1689 if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482
IM
1690 event->state = PERF_EVENT_STATE_INACTIVE;
1691 event->oncpu = -1;
44377277
AS
1692 ret = -EAGAIN;
1693 goto out;
235c7fc7
IM
1694 }
1695
4158755d 1696 event->tstamp_running += tstamp - event->tstamp_stopped;
9ffcfa6f 1697
e5d1367f 1698 perf_set_shadow_time(event, ctx, tstamp);
eed01528 1699
cdd6c482 1700 if (!is_software_event(event))
3b6f9e5c 1701 cpuctx->active_oncpu++;
235c7fc7 1702 ctx->nr_active++;
0f5a2601
PZ
1703 if (event->attr.freq && event->attr.sample_freq)
1704 ctx->nr_freq++;
235c7fc7 1705
cdd6c482 1706 if (event->attr.exclusive)
3b6f9e5c
PM
1707 cpuctx->exclusive = 1;
1708
44377277
AS
1709out:
1710 perf_pmu_enable(event->pmu);
1711
1712 return ret;
235c7fc7
IM
1713}
1714
6751b71e 1715static int
cdd6c482 1716group_sched_in(struct perf_event *group_event,
6751b71e 1717 struct perf_cpu_context *cpuctx,
6e37738a 1718 struct perf_event_context *ctx)
6751b71e 1719{
6bde9b6c 1720 struct perf_event *event, *partial_group = NULL;
4a234593 1721 struct pmu *pmu = ctx->pmu;
d7842da4
SE
1722 u64 now = ctx->time;
1723 bool simulate = false;
6751b71e 1724
cdd6c482 1725 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
1726 return 0;
1727
ad5133b7 1728 pmu->start_txn(pmu);
6bde9b6c 1729
9ffcfa6f 1730 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 1731 pmu->cancel_txn(pmu);
9e630205 1732 perf_cpu_hrtimer_restart(cpuctx);
6751b71e 1733 return -EAGAIN;
90151c35 1734 }
6751b71e
PM
1735
1736 /*
1737 * Schedule in siblings as one group (if any):
1738 */
cdd6c482 1739 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f 1740 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 1741 partial_group = event;
6751b71e
PM
1742 goto group_error;
1743 }
1744 }
1745
9ffcfa6f 1746 if (!pmu->commit_txn(pmu))
6e85158c 1747 return 0;
9ffcfa6f 1748
6751b71e
PM
1749group_error:
1750 /*
1751 * Groups can be scheduled in as one unit only, so undo any
1752 * partial group before returning:
d7842da4
SE
1753 * The events up to the failed event are scheduled out normally,
1754 * tstamp_stopped will be updated.
1755 *
1756 * The failed events and the remaining siblings need to have
1757 * their timings updated as if they had gone thru event_sched_in()
1758 * and event_sched_out(). This is required to get consistent timings
1759 * across the group. This also takes care of the case where the group
1760 * could never be scheduled by ensuring tstamp_stopped is set to mark
1761 * the time the event was actually stopped, such that time delta
1762 * calculation in update_event_times() is correct.
6751b71e 1763 */
cdd6c482
IM
1764 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1765 if (event == partial_group)
d7842da4
SE
1766 simulate = true;
1767
1768 if (simulate) {
1769 event->tstamp_running += now - event->tstamp_stopped;
1770 event->tstamp_stopped = now;
1771 } else {
1772 event_sched_out(event, cpuctx, ctx);
1773 }
6751b71e 1774 }
9ffcfa6f 1775 event_sched_out(group_event, cpuctx, ctx);
6751b71e 1776
ad5133b7 1777 pmu->cancel_txn(pmu);
90151c35 1778
9e630205
SE
1779 perf_cpu_hrtimer_restart(cpuctx);
1780
6751b71e
PM
1781 return -EAGAIN;
1782}
1783
3b6f9e5c 1784/*
cdd6c482 1785 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 1786 */
cdd6c482 1787static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
1788 struct perf_cpu_context *cpuctx,
1789 int can_add_hw)
1790{
1791 /*
cdd6c482 1792 * Groups consisting entirely of software events can always go on.
3b6f9e5c 1793 */
d6f962b5 1794 if (event->group_flags & PERF_GROUP_SOFTWARE)
3b6f9e5c
PM
1795 return 1;
1796 /*
1797 * If an exclusive group is already on, no other hardware
cdd6c482 1798 * events can go on.
3b6f9e5c
PM
1799 */
1800 if (cpuctx->exclusive)
1801 return 0;
1802 /*
1803 * If this group is exclusive and there are already
cdd6c482 1804 * events on the CPU, it can't go on.
3b6f9e5c 1805 */
cdd6c482 1806 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
1807 return 0;
1808 /*
1809 * Otherwise, try to add it if all previous groups were able
1810 * to go on.
1811 */
1812 return can_add_hw;
1813}
1814
cdd6c482
IM
1815static void add_event_to_ctx(struct perf_event *event,
1816 struct perf_event_context *ctx)
53cfbf59 1817{
4158755d
SE
1818 u64 tstamp = perf_event_time(event);
1819
cdd6c482 1820 list_add_event(event, ctx);
8a49542c 1821 perf_group_attach(event);
4158755d
SE
1822 event->tstamp_enabled = tstamp;
1823 event->tstamp_running = tstamp;
1824 event->tstamp_stopped = tstamp;
53cfbf59
PM
1825}
1826
2c29ef0f
PZ
1827static void task_ctx_sched_out(struct perf_event_context *ctx);
1828static void
1829ctx_sched_in(struct perf_event_context *ctx,
1830 struct perf_cpu_context *cpuctx,
1831 enum event_type_t event_type,
1832 struct task_struct *task);
fe4b04fa 1833
dce5855b
PZ
1834static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1835 struct perf_event_context *ctx,
1836 struct task_struct *task)
1837{
1838 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1839 if (ctx)
1840 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1841 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1842 if (ctx)
1843 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1844}
1845
0793a61d 1846/*
cdd6c482 1847 * Cross CPU call to install and enable a performance event
682076ae
PZ
1848 *
1849 * Must be called with ctx->mutex held
0793a61d 1850 */
fe4b04fa 1851static int __perf_install_in_context(void *info)
0793a61d 1852{
cdd6c482
IM
1853 struct perf_event *event = info;
1854 struct perf_event_context *ctx = event->ctx;
108b02cf 1855 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f
PZ
1856 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1857 struct task_struct *task = current;
1858
b58f6b0d 1859 perf_ctx_lock(cpuctx, task_ctx);
2c29ef0f 1860 perf_pmu_disable(cpuctx->ctx.pmu);
0793a61d
TG
1861
1862 /*
2c29ef0f 1863 * If there was an active task_ctx schedule it out.
0793a61d 1864 */
b58f6b0d 1865 if (task_ctx)
2c29ef0f 1866 task_ctx_sched_out(task_ctx);
b58f6b0d
PZ
1867
1868 /*
1869 * If the context we're installing events in is not the
1870 * active task_ctx, flip them.
1871 */
1872 if (ctx->task && task_ctx != ctx) {
1873 if (task_ctx)
1874 raw_spin_unlock(&task_ctx->lock);
1875 raw_spin_lock(&ctx->lock);
1876 task_ctx = ctx;
1877 }
1878
1879 if (task_ctx) {
1880 cpuctx->task_ctx = task_ctx;
2c29ef0f
PZ
1881 task = task_ctx->task;
1882 }
b58f6b0d 1883
2c29ef0f 1884 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
0793a61d 1885
4af4998b 1886 update_context_time(ctx);
e5d1367f
SE
1887 /*
1888 * update cgrp time only if current cgrp
1889 * matches event->cgrp. Must be done before
1890 * calling add_event_to_ctx()
1891 */
1892 update_cgrp_time_from_event(event);
0793a61d 1893
cdd6c482 1894 add_event_to_ctx(event, ctx);
0793a61d 1895
d859e29f 1896 /*
2c29ef0f 1897 * Schedule everything back in
d859e29f 1898 */
dce5855b 1899 perf_event_sched_in(cpuctx, task_ctx, task);
2c29ef0f
PZ
1900
1901 perf_pmu_enable(cpuctx->ctx.pmu);
1902 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa
PZ
1903
1904 return 0;
0793a61d
TG
1905}
1906
1907/*
cdd6c482 1908 * Attach a performance event to a context
0793a61d 1909 *
cdd6c482
IM
1910 * First we add the event to the list with the hardware enable bit
1911 * in event->hw_config cleared.
0793a61d 1912 *
cdd6c482 1913 * If the event is attached to a task which is on a CPU we use a smp
0793a61d
TG
1914 * call to enable it in the task context. The task might have been
1915 * scheduled away, but we check this in the smp call again.
1916 */
1917static void
cdd6c482
IM
1918perf_install_in_context(struct perf_event_context *ctx,
1919 struct perf_event *event,
0793a61d
TG
1920 int cpu)
1921{
1922 struct task_struct *task = ctx->task;
1923
fe4b04fa
PZ
1924 lockdep_assert_held(&ctx->mutex);
1925
c3f00c70 1926 event->ctx = ctx;
0cda4c02
YZ
1927 if (event->cpu != -1)
1928 event->cpu = cpu;
c3f00c70 1929
0793a61d
TG
1930 if (!task) {
1931 /*
cdd6c482 1932 * Per cpu events are installed via an smp call and
af901ca1 1933 * the install is always successful.
0793a61d 1934 */
fe4b04fa 1935 cpu_function_call(cpu, __perf_install_in_context, event);
0793a61d
TG
1936 return;
1937 }
1938
0793a61d 1939retry:
fe4b04fa
PZ
1940 if (!task_function_call(task, __perf_install_in_context, event))
1941 return;
0793a61d 1942
e625cce1 1943 raw_spin_lock_irq(&ctx->lock);
0793a61d 1944 /*
fe4b04fa
PZ
1945 * If we failed to find a running task, but find the context active now
1946 * that we've acquired the ctx->lock, retry.
0793a61d 1947 */
fe4b04fa 1948 if (ctx->is_active) {
e625cce1 1949 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1950 goto retry;
1951 }
1952
1953 /*
fe4b04fa
PZ
1954 * Since the task isn't running, its safe to add the event, us holding
1955 * the ctx->lock ensures the task won't get scheduled in.
0793a61d 1956 */
fe4b04fa 1957 add_event_to_ctx(event, ctx);
e625cce1 1958 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1959}
1960
fa289bec 1961/*
cdd6c482 1962 * Put a event into inactive state and update time fields.
fa289bec
PM
1963 * Enabling the leader of a group effectively enables all
1964 * the group members that aren't explicitly disabled, so we
1965 * have to update their ->tstamp_enabled also.
1966 * Note: this works for group members as well as group leaders
1967 * since the non-leader members' sibling_lists will be empty.
1968 */
1d9b482e 1969static void __perf_event_mark_enabled(struct perf_event *event)
fa289bec 1970{
cdd6c482 1971 struct perf_event *sub;
4158755d 1972 u64 tstamp = perf_event_time(event);
fa289bec 1973
cdd6c482 1974 event->state = PERF_EVENT_STATE_INACTIVE;
4158755d 1975 event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d 1976 list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d
SE
1977 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1978 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d 1979 }
fa289bec
PM
1980}
1981
d859e29f 1982/*
cdd6c482 1983 * Cross CPU call to enable a performance event
d859e29f 1984 */
fe4b04fa 1985static int __perf_event_enable(void *info)
04289bb9 1986{
cdd6c482 1987 struct perf_event *event = info;
cdd6c482
IM
1988 struct perf_event_context *ctx = event->ctx;
1989 struct perf_event *leader = event->group_leader;
108b02cf 1990 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f 1991 int err;
04289bb9 1992
06f41796
JO
1993 /*
1994 * There's a time window between 'ctx->is_active' check
1995 * in perf_event_enable function and this place having:
1996 * - IRQs on
1997 * - ctx->lock unlocked
1998 *
1999 * where the task could be killed and 'ctx' deactivated
2000 * by perf_event_exit_task.
2001 */
2002 if (!ctx->is_active)
fe4b04fa 2003 return -EINVAL;
3cbed429 2004
e625cce1 2005 raw_spin_lock(&ctx->lock);
4af4998b 2006 update_context_time(ctx);
d859e29f 2007
cdd6c482 2008 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f 2009 goto unlock;
e5d1367f
SE
2010
2011 /*
2012 * set current task's cgroup time reference point
2013 */
3f7cce3c 2014 perf_cgroup_set_timestamp(current, ctx);
e5d1367f 2015
1d9b482e 2016 __perf_event_mark_enabled(event);
04289bb9 2017
e5d1367f
SE
2018 if (!event_filter_match(event)) {
2019 if (is_cgroup_event(event))
2020 perf_cgroup_defer_enabled(event);
f4c4176f 2021 goto unlock;
e5d1367f 2022 }
f4c4176f 2023
04289bb9 2024 /*
cdd6c482 2025 * If the event is in a group and isn't the group leader,
d859e29f 2026 * then don't put it on unless the group is on.
04289bb9 2027 */
cdd6c482 2028 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
d859e29f 2029 goto unlock;
3b6f9e5c 2030
cdd6c482 2031 if (!group_can_go_on(event, cpuctx, 1)) {
d859e29f 2032 err = -EEXIST;
e758a33d 2033 } else {
cdd6c482 2034 if (event == leader)
6e37738a 2035 err = group_sched_in(event, cpuctx, ctx);
e758a33d 2036 else
6e37738a 2037 err = event_sched_in(event, cpuctx, ctx);
e758a33d 2038 }
d859e29f
PM
2039
2040 if (err) {
2041 /*
cdd6c482 2042 * If this event can't go on and it's part of a
d859e29f
PM
2043 * group, then the whole group has to come off.
2044 */
9e630205 2045 if (leader != event) {
d859e29f 2046 group_sched_out(leader, cpuctx, ctx);
9e630205
SE
2047 perf_cpu_hrtimer_restart(cpuctx);
2048 }
0d48696f 2049 if (leader->attr.pinned) {
53cfbf59 2050 update_group_times(leader);
cdd6c482 2051 leader->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2052 }
d859e29f
PM
2053 }
2054
9ed6060d 2055unlock:
e625cce1 2056 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
2057
2058 return 0;
d859e29f
PM
2059}
2060
2061/*
cdd6c482 2062 * Enable a event.
c93f7669 2063 *
cdd6c482
IM
2064 * If event->ctx is a cloned context, callers must make sure that
2065 * every task struct that event->ctx->task could possibly point to
c93f7669 2066 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2067 * perf_event_for_each_child or perf_event_for_each as described
2068 * for perf_event_disable.
d859e29f 2069 */
44234adc 2070void perf_event_enable(struct perf_event *event)
d859e29f 2071{
cdd6c482 2072 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
2073 struct task_struct *task = ctx->task;
2074
2075 if (!task) {
2076 /*
cdd6c482 2077 * Enable the event on the cpu that it's on
d859e29f 2078 */
fe4b04fa 2079 cpu_function_call(event->cpu, __perf_event_enable, event);
d859e29f
PM
2080 return;
2081 }
2082
e625cce1 2083 raw_spin_lock_irq(&ctx->lock);
cdd6c482 2084 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f
PM
2085 goto out;
2086
2087 /*
cdd6c482
IM
2088 * If the event is in error state, clear that first.
2089 * That way, if we see the event in error state below, we
d859e29f
PM
2090 * know that it has gone back into error state, as distinct
2091 * from the task having been scheduled away before the
2092 * cross-call arrived.
2093 */
cdd6c482
IM
2094 if (event->state == PERF_EVENT_STATE_ERROR)
2095 event->state = PERF_EVENT_STATE_OFF;
d859e29f 2096
9ed6060d 2097retry:
fe4b04fa 2098 if (!ctx->is_active) {
1d9b482e 2099 __perf_event_mark_enabled(event);
fe4b04fa
PZ
2100 goto out;
2101 }
2102
e625cce1 2103 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
2104
2105 if (!task_function_call(task, __perf_event_enable, event))
2106 return;
d859e29f 2107
e625cce1 2108 raw_spin_lock_irq(&ctx->lock);
d859e29f
PM
2109
2110 /*
cdd6c482 2111 * If the context is active and the event is still off,
d859e29f
PM
2112 * we need to retry the cross-call.
2113 */
fe4b04fa
PZ
2114 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
2115 /*
2116 * task could have been flipped by a concurrent
2117 * perf_event_context_sched_out()
2118 */
2119 task = ctx->task;
d859e29f 2120 goto retry;
fe4b04fa 2121 }
fa289bec 2122
9ed6060d 2123out:
e625cce1 2124 raw_spin_unlock_irq(&ctx->lock);
d859e29f 2125}
dcfce4a0 2126EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 2127
26ca5c11 2128int perf_event_refresh(struct perf_event *event, int refresh)
79f14641 2129{
2023b359 2130 /*
cdd6c482 2131 * not supported on inherited events
2023b359 2132 */
2e939d1d 2133 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
2134 return -EINVAL;
2135
cdd6c482
IM
2136 atomic_add(refresh, &event->event_limit);
2137 perf_event_enable(event);
2023b359
PZ
2138
2139 return 0;
79f14641 2140}
26ca5c11 2141EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 2142
5b0311e1
FW
2143static void ctx_sched_out(struct perf_event_context *ctx,
2144 struct perf_cpu_context *cpuctx,
2145 enum event_type_t event_type)
235c7fc7 2146{
cdd6c482 2147 struct perf_event *event;
db24d33e 2148 int is_active = ctx->is_active;
235c7fc7 2149
db24d33e 2150 ctx->is_active &= ~event_type;
cdd6c482 2151 if (likely(!ctx->nr_events))
facc4307
PZ
2152 return;
2153
4af4998b 2154 update_context_time(ctx);
e5d1367f 2155 update_cgrp_time_from_cpuctx(cpuctx);
5b0311e1 2156 if (!ctx->nr_active)
facc4307 2157 return;
5b0311e1 2158
075e0b00 2159 perf_pmu_disable(ctx->pmu);
db24d33e 2160 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
889ff015
FW
2161 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2162 group_sched_out(event, cpuctx, ctx);
9ed6060d 2163 }
889ff015 2164
db24d33e 2165 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
889ff015 2166 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e1 2167 group_sched_out(event, cpuctx, ctx);
9ed6060d 2168 }
1b9a644f 2169 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
2170}
2171
564c2b21 2172/*
5a3126d4
PZ
2173 * Test whether two contexts are equivalent, i.e. whether they have both been
2174 * cloned from the same version of the same context.
2175 *
2176 * Equivalence is measured using a generation number in the context that is
2177 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2178 * and list_del_event().
564c2b21 2179 */
cdd6c482
IM
2180static int context_equiv(struct perf_event_context *ctx1,
2181 struct perf_event_context *ctx2)
564c2b21 2182{
5a3126d4
PZ
2183 /* Pinning disables the swap optimization */
2184 if (ctx1->pin_count || ctx2->pin_count)
2185 return 0;
2186
2187 /* If ctx1 is the parent of ctx2 */
2188 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2189 return 1;
2190
2191 /* If ctx2 is the parent of ctx1 */
2192 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2193 return 1;
2194
2195 /*
2196 * If ctx1 and ctx2 have the same parent; we flatten the parent
2197 * hierarchy, see perf_event_init_context().
2198 */
2199 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2200 ctx1->parent_gen == ctx2->parent_gen)
2201 return 1;
2202
2203 /* Unmatched */
2204 return 0;
564c2b21
PM
2205}
2206
cdd6c482
IM
2207static void __perf_event_sync_stat(struct perf_event *event,
2208 struct perf_event *next_event)
bfbd3381
PZ
2209{
2210 u64 value;
2211
cdd6c482 2212 if (!event->attr.inherit_stat)
bfbd3381
PZ
2213 return;
2214
2215 /*
cdd6c482 2216 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
2217 * because we're in the middle of a context switch and have IRQs
2218 * disabled, which upsets smp_call_function_single(), however
cdd6c482 2219 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
2220 * don't need to use it.
2221 */
cdd6c482
IM
2222 switch (event->state) {
2223 case PERF_EVENT_STATE_ACTIVE:
3dbebf15
PZ
2224 event->pmu->read(event);
2225 /* fall-through */
bfbd3381 2226
cdd6c482
IM
2227 case PERF_EVENT_STATE_INACTIVE:
2228 update_event_times(event);
bfbd3381
PZ
2229 break;
2230
2231 default:
2232 break;
2233 }
2234
2235 /*
cdd6c482 2236 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
2237 * values when we flip the contexts.
2238 */
e7850595
PZ
2239 value = local64_read(&next_event->count);
2240 value = local64_xchg(&event->count, value);
2241 local64_set(&next_event->count, value);
bfbd3381 2242
cdd6c482
IM
2243 swap(event->total_time_enabled, next_event->total_time_enabled);
2244 swap(event->total_time_running, next_event->total_time_running);
19d2e755 2245
bfbd3381 2246 /*
19d2e755 2247 * Since we swizzled the values, update the user visible data too.
bfbd3381 2248 */
cdd6c482
IM
2249 perf_event_update_userpage(event);
2250 perf_event_update_userpage(next_event);
bfbd3381
PZ
2251}
2252
cdd6c482
IM
2253static void perf_event_sync_stat(struct perf_event_context *ctx,
2254 struct perf_event_context *next_ctx)
bfbd3381 2255{
cdd6c482 2256 struct perf_event *event, *next_event;
bfbd3381
PZ
2257
2258 if (!ctx->nr_stat)
2259 return;
2260
02ffdbc8
PZ
2261 update_context_time(ctx);
2262
cdd6c482
IM
2263 event = list_first_entry(&ctx->event_list,
2264 struct perf_event, event_entry);
bfbd3381 2265
cdd6c482
IM
2266 next_event = list_first_entry(&next_ctx->event_list,
2267 struct perf_event, event_entry);
bfbd3381 2268
cdd6c482
IM
2269 while (&event->event_entry != &ctx->event_list &&
2270 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 2271
cdd6c482 2272 __perf_event_sync_stat(event, next_event);
bfbd3381 2273
cdd6c482
IM
2274 event = list_next_entry(event, event_entry);
2275 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
2276 }
2277}
2278
fe4b04fa
PZ
2279static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2280 struct task_struct *next)
0793a61d 2281{
8dc85d54 2282 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482 2283 struct perf_event_context *next_ctx;
5a3126d4 2284 struct perf_event_context *parent, *next_parent;
108b02cf 2285 struct perf_cpu_context *cpuctx;
c93f7669 2286 int do_switch = 1;
0793a61d 2287
108b02cf
PZ
2288 if (likely(!ctx))
2289 return;
10989fb2 2290
108b02cf
PZ
2291 cpuctx = __get_cpu_context(ctx);
2292 if (!cpuctx->task_ctx)
0793a61d
TG
2293 return;
2294
c93f7669 2295 rcu_read_lock();
8dc85d54 2296 next_ctx = next->perf_event_ctxp[ctxn];
5a3126d4
PZ
2297 if (!next_ctx)
2298 goto unlock;
2299
2300 parent = rcu_dereference(ctx->parent_ctx);
2301 next_parent = rcu_dereference(next_ctx->parent_ctx);
2302
2303 /* If neither context have a parent context; they cannot be clones. */
2304 if (!parent && !next_parent)
2305 goto unlock;
2306
2307 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
c93f7669
PM
2308 /*
2309 * Looks like the two contexts are clones, so we might be
2310 * able to optimize the context switch. We lock both
2311 * contexts and check that they are clones under the
2312 * lock (including re-checking that neither has been
2313 * uncloned in the meantime). It doesn't matter which
2314 * order we take the locks because no other cpu could
2315 * be trying to lock both of these tasks.
2316 */
e625cce1
TG
2317 raw_spin_lock(&ctx->lock);
2318 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 2319 if (context_equiv(ctx, next_ctx)) {
665c2142
PZ
2320 /*
2321 * XXX do we need a memory barrier of sorts
cdd6c482 2322 * wrt to rcu_dereference() of perf_event_ctxp
665c2142 2323 */
8dc85d54
PZ
2324 task->perf_event_ctxp[ctxn] = next_ctx;
2325 next->perf_event_ctxp[ctxn] = ctx;
c93f7669
PM
2326 ctx->task = next;
2327 next_ctx->task = task;
2328 do_switch = 0;
bfbd3381 2329
cdd6c482 2330 perf_event_sync_stat(ctx, next_ctx);
c93f7669 2331 }
e625cce1
TG
2332 raw_spin_unlock(&next_ctx->lock);
2333 raw_spin_unlock(&ctx->lock);
564c2b21 2334 }
5a3126d4 2335unlock:
c93f7669 2336 rcu_read_unlock();
564c2b21 2337
c93f7669 2338 if (do_switch) {
facc4307 2339 raw_spin_lock(&ctx->lock);
5b0311e1 2340 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
c93f7669 2341 cpuctx->task_ctx = NULL;
facc4307 2342 raw_spin_unlock(&ctx->lock);
c93f7669 2343 }
0793a61d
TG
2344}
2345
8dc85d54
PZ
2346#define for_each_task_context_nr(ctxn) \
2347 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2348
2349/*
2350 * Called from scheduler to remove the events of the current task,
2351 * with interrupts disabled.
2352 *
2353 * We stop each event and update the event value in event->count.
2354 *
2355 * This does not protect us against NMI, but disable()
2356 * sets the disabled bit in the control field of event _before_
2357 * accessing the event control register. If a NMI hits, then it will
2358 * not restart the event.
2359 */
ab0cce56
JO
2360void __perf_event_task_sched_out(struct task_struct *task,
2361 struct task_struct *next)
8dc85d54
PZ
2362{
2363 int ctxn;
2364
8dc85d54
PZ
2365 for_each_task_context_nr(ctxn)
2366 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
2367
2368 /*
2369 * if cgroup events exist on this CPU, then we need
2370 * to check if we have to switch out PMU state.
2371 * cgroup event are system-wide mode only
2372 */
2373 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef 2374 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
2375}
2376
04dc2dbb 2377static void task_ctx_sched_out(struct perf_event_context *ctx)
a08b159f 2378{
108b02cf 2379 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
a08b159f 2380
a63eaf34
PM
2381 if (!cpuctx->task_ctx)
2382 return;
012b84da
IM
2383
2384 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2385 return;
2386
04dc2dbb 2387 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
a08b159f
PM
2388 cpuctx->task_ctx = NULL;
2389}
2390
5b0311e1
FW
2391/*
2392 * Called with IRQs disabled
2393 */
2394static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2395 enum event_type_t event_type)
2396{
2397 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
2398}
2399
235c7fc7 2400static void
5b0311e1 2401ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a 2402 struct perf_cpu_context *cpuctx)
0793a61d 2403{
cdd6c482 2404 struct perf_event *event;
0793a61d 2405
889ff015
FW
2406 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2407 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2408 continue;
5632ab12 2409 if (!event_filter_match(event))
3b6f9e5c
PM
2410 continue;
2411
e5d1367f
SE
2412 /* may need to reset tstamp_enabled */
2413 if (is_cgroup_event(event))
2414 perf_cgroup_mark_enabled(event, ctx);
2415
8c9ed8e1 2416 if (group_can_go_on(event, cpuctx, 1))
6e37738a 2417 group_sched_in(event, cpuctx, ctx);
3b6f9e5c
PM
2418
2419 /*
2420 * If this pinned group hasn't been scheduled,
2421 * put it in error state.
2422 */
cdd6c482
IM
2423 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2424 update_group_times(event);
2425 event->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2426 }
3b6f9e5c 2427 }
5b0311e1
FW
2428}
2429
2430static void
2431ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a 2432 struct perf_cpu_context *cpuctx)
5b0311e1
FW
2433{
2434 struct perf_event *event;
2435 int can_add_hw = 1;
3b6f9e5c 2436
889ff015
FW
2437 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2438 /* Ignore events in OFF or ERROR state */
2439 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2440 continue;
04289bb9
IM
2441 /*
2442 * Listen to the 'cpu' scheduling filter constraint
cdd6c482 2443 * of events:
04289bb9 2444 */
5632ab12 2445 if (!event_filter_match(event))
0793a61d
TG
2446 continue;
2447
e5d1367f
SE
2448 /* may need to reset tstamp_enabled */
2449 if (is_cgroup_event(event))
2450 perf_cgroup_mark_enabled(event, ctx);
2451
9ed6060d 2452 if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a 2453 if (group_sched_in(event, cpuctx, ctx))
dd0e6ba2 2454 can_add_hw = 0;
9ed6060d 2455 }
0793a61d 2456 }
5b0311e1
FW
2457}
2458
2459static void
2460ctx_sched_in(struct perf_event_context *ctx,
2461 struct perf_cpu_context *cpuctx,
e5d1367f
SE
2462 enum event_type_t event_type,
2463 struct task_struct *task)
5b0311e1 2464{
e5d1367f 2465 u64 now;
db24d33e 2466 int is_active = ctx->is_active;
e5d1367f 2467
db24d33e 2468 ctx->is_active |= event_type;
5b0311e1 2469 if (likely(!ctx->nr_events))
facc4307 2470 return;
5b0311e1 2471
e5d1367f
SE
2472 now = perf_clock();
2473 ctx->timestamp = now;
3f7cce3c 2474 perf_cgroup_set_timestamp(task, ctx);
5b0311e1
FW
2475 /*
2476 * First go through the list and put on any pinned groups
2477 * in order to give them the best chance of going on.
2478 */
db24d33e 2479 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
6e37738a 2480 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
2481
2482 /* Then walk through the lower prio flexible groups */
db24d33e 2483 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
6e37738a 2484 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
2485}
2486
329c0e01 2487static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
2488 enum event_type_t event_type,
2489 struct task_struct *task)
329c0e01
FW
2490{
2491 struct perf_event_context *ctx = &cpuctx->ctx;
2492
e5d1367f 2493 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
2494}
2495
e5d1367f
SE
2496static void perf_event_context_sched_in(struct perf_event_context *ctx,
2497 struct task_struct *task)
235c7fc7 2498{
108b02cf 2499 struct perf_cpu_context *cpuctx;
235c7fc7 2500
108b02cf 2501 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
2502 if (cpuctx->task_ctx == ctx)
2503 return;
2504
facc4307 2505 perf_ctx_lock(cpuctx, ctx);
1b9a644f 2506 perf_pmu_disable(ctx->pmu);
329c0e01
FW
2507 /*
2508 * We want to keep the following priority order:
2509 * cpu pinned (that don't need to move), task pinned,
2510 * cpu flexible, task flexible.
2511 */
2512 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2513
1d5f003f
GN
2514 if (ctx->nr_events)
2515 cpuctx->task_ctx = ctx;
9b33fa6b 2516
86b47c25
GN
2517 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2518
facc4307
PZ
2519 perf_pmu_enable(ctx->pmu);
2520 perf_ctx_unlock(cpuctx, ctx);
2521
b5ab4cd5
PZ
2522 /*
2523 * Since these rotations are per-cpu, we need to ensure the
2524 * cpu-context we got scheduled on is actually rotating.
2525 */
108b02cf 2526 perf_pmu_rotate_start(ctx->pmu);
235c7fc7
IM
2527}
2528
d010b332
SE
2529/*
2530 * When sampling the branck stack in system-wide, it may be necessary
2531 * to flush the stack on context switch. This happens when the branch
2532 * stack does not tag its entries with the pid of the current task.
2533 * Otherwise it becomes impossible to associate a branch entry with a
2534 * task. This ambiguity is more likely to appear when the branch stack
2535 * supports priv level filtering and the user sets it to monitor only
2536 * at the user level (which could be a useful measurement in system-wide
2537 * mode). In that case, the risk is high of having a branch stack with
2538 * branch from multiple tasks. Flushing may mean dropping the existing
2539 * entries or stashing them somewhere in the PMU specific code layer.
2540 *
2541 * This function provides the context switch callback to the lower code
2542 * layer. It is invoked ONLY when there is at least one system-wide context
2543 * with at least one active event using taken branch sampling.
2544 */
2545static void perf_branch_stack_sched_in(struct task_struct *prev,
2546 struct task_struct *task)
2547{
2548 struct perf_cpu_context *cpuctx;
2549 struct pmu *pmu;
2550 unsigned long flags;
2551
2552 /* no need to flush branch stack if not changing task */
2553 if (prev == task)
2554 return;
2555
2556 local_irq_save(flags);
2557
2558 rcu_read_lock();
2559
2560 list_for_each_entry_rcu(pmu, &pmus, entry) {
2561 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2562
2563 /*
2564 * check if the context has at least one
2565 * event using PERF_SAMPLE_BRANCH_STACK
2566 */
2567 if (cpuctx->ctx.nr_branch_stack > 0
2568 && pmu->flush_branch_stack) {
2569
d010b332
SE
2570 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2571
2572 perf_pmu_disable(pmu);
2573
2574 pmu->flush_branch_stack();
2575
2576 perf_pmu_enable(pmu);
2577
2578 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2579 }
2580 }
2581
2582 rcu_read_unlock();
2583
2584 local_irq_restore(flags);
2585}
2586
8dc85d54
PZ
2587/*
2588 * Called from scheduler to add the events of the current task
2589 * with interrupts disabled.
2590 *
2591 * We restore the event value and then enable it.
2592 *
2593 * This does not protect us against NMI, but enable()
2594 * sets the enabled bit in the control field of event _before_
2595 * accessing the event control register. If a NMI hits, then it will
2596 * keep the event running.
2597 */
ab0cce56
JO
2598void __perf_event_task_sched_in(struct task_struct *prev,
2599 struct task_struct *task)
8dc85d54
PZ
2600{
2601 struct perf_event_context *ctx;
2602 int ctxn;
2603
2604 for_each_task_context_nr(ctxn) {
2605 ctx = task->perf_event_ctxp[ctxn];
2606 if (likely(!ctx))
2607 continue;
2608
e5d1367f 2609 perf_event_context_sched_in(ctx, task);
8dc85d54 2610 }
e5d1367f
SE
2611 /*
2612 * if cgroup events exist on this CPU, then we need
2613 * to check if we have to switch in PMU state.
2614 * cgroup event are system-wide mode only
2615 */
2616 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef 2617 perf_cgroup_sched_in(prev, task);
d010b332
SE
2618
2619 /* check for system-wide branch_stack events */
2620 if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2621 perf_branch_stack_sched_in(prev, task);
235c7fc7
IM
2622}
2623
abd50713
PZ
2624static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2625{
2626 u64 frequency = event->attr.sample_freq;
2627 u64 sec = NSEC_PER_SEC;
2628 u64 divisor, dividend;
2629
2630 int count_fls, nsec_fls, frequency_fls, sec_fls;
2631
2632 count_fls = fls64(count);
2633 nsec_fls = fls64(nsec);
2634 frequency_fls = fls64(frequency);
2635 sec_fls = 30;
2636
2637 /*
2638 * We got @count in @nsec, with a target of sample_freq HZ
2639 * the target period becomes:
2640 *
2641 * @count * 10^9
2642 * period = -------------------
2643 * @nsec * sample_freq
2644 *
2645 */
2646
2647 /*
2648 * Reduce accuracy by one bit such that @a and @b converge
2649 * to a similar magnitude.
2650 */
fe4b04fa 2651#define REDUCE_FLS(a, b) \
abd50713
PZ
2652do { \
2653 if (a##_fls > b##_fls) { \
2654 a >>= 1; \
2655 a##_fls--; \
2656 } else { \
2657 b >>= 1; \
2658 b##_fls--; \
2659 } \
2660} while (0)
2661
2662 /*
2663 * Reduce accuracy until either term fits in a u64, then proceed with
2664 * the other, so that finally we can do a u64/u64 division.
2665 */
2666 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2667 REDUCE_FLS(nsec, frequency);
2668 REDUCE_FLS(sec, count);
2669 }
2670
2671 if (count_fls + sec_fls > 64) {
2672 divisor = nsec * frequency;
2673
2674 while (count_fls + sec_fls > 64) {
2675 REDUCE_FLS(count, sec);
2676 divisor >>= 1;
2677 }
2678
2679 dividend = count * sec;
2680 } else {
2681 dividend = count * sec;
2682
2683 while (nsec_fls + frequency_fls > 64) {
2684 REDUCE_FLS(nsec, frequency);
2685 dividend >>= 1;
2686 }
2687
2688 divisor = nsec * frequency;
2689 }
2690
f6ab91ad
PZ
2691 if (!divisor)
2692 return dividend;
2693
abd50713
PZ
2694 return div64_u64(dividend, divisor);
2695}
2696
e050e3f0
SE
2697static DEFINE_PER_CPU(int, perf_throttled_count);
2698static DEFINE_PER_CPU(u64, perf_throttled_seq);
2699
f39d47ff 2700static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 2701{
cdd6c482 2702 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 2703 s64 period, sample_period;
bd2b5b12
PZ
2704 s64 delta;
2705
abd50713 2706 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
2707
2708 delta = (s64)(period - hwc->sample_period);
2709 delta = (delta + 7) / 8; /* low pass filter */
2710
2711 sample_period = hwc->sample_period + delta;
2712
2713 if (!sample_period)
2714 sample_period = 1;
2715
bd2b5b12 2716 hwc->sample_period = sample_period;
abd50713 2717
e7850595 2718 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
2719 if (disable)
2720 event->pmu->stop(event, PERF_EF_UPDATE);
2721
e7850595 2722 local64_set(&hwc->period_left, 0);
f39d47ff
SE
2723
2724 if (disable)
2725 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 2726 }
bd2b5b12
PZ
2727}
2728
e050e3f0
SE
2729/*
2730 * combine freq adjustment with unthrottling to avoid two passes over the
2731 * events. At the same time, make sure, having freq events does not change
2732 * the rate of unthrottling as that would introduce bias.
2733 */
2734static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2735 int needs_unthr)
60db5e09 2736{
cdd6c482
IM
2737 struct perf_event *event;
2738 struct hw_perf_event *hwc;
e050e3f0 2739 u64 now, period = TICK_NSEC;
abd50713 2740 s64 delta;
60db5e09 2741
e050e3f0
SE
2742 /*
2743 * only need to iterate over all events iff:
2744 * - context have events in frequency mode (needs freq adjust)
2745 * - there are events to unthrottle on this cpu
2746 */
2747 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
2748 return;
2749
e050e3f0 2750 raw_spin_lock(&ctx->lock);
f39d47ff 2751 perf_pmu_disable(ctx->pmu);
e050e3f0 2752
03541f8b 2753 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 2754 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
2755 continue;
2756
5632ab12 2757 if (!event_filter_match(event))
5d27c23d
PZ
2758 continue;
2759
44377277
AS
2760 perf_pmu_disable(event->pmu);
2761
cdd6c482 2762 hwc = &event->hw;
6a24ed6c 2763
ae23bff1 2764 if (hwc->interrupts == MAX_INTERRUPTS) {
e050e3f0 2765 hwc->interrupts = 0;
cdd6c482 2766 perf_log_throttle(event, 1);
a4eaf7f1 2767 event->pmu->start(event, 0);
a78ac325
PZ
2768 }
2769
cdd6c482 2770 if (!event->attr.freq || !event->attr.sample_freq)
44377277 2771 goto next;
60db5e09 2772
e050e3f0
SE
2773 /*
2774 * stop the event and update event->count
2775 */
2776 event->pmu->stop(event, PERF_EF_UPDATE);
2777
e7850595 2778 now = local64_read(&event->count);
abd50713
PZ
2779 delta = now - hwc->freq_count_stamp;
2780 hwc->freq_count_stamp = now;
60db5e09 2781
e050e3f0
SE
2782 /*
2783 * restart the event
2784 * reload only if value has changed
f39d47ff
SE
2785 * we have stopped the event so tell that
2786 * to perf_adjust_period() to avoid stopping it
2787 * twice.
e050e3f0 2788 */
abd50713 2789 if (delta > 0)
f39d47ff 2790 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
2791
2792 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
44377277
AS
2793 next:
2794 perf_pmu_enable(event->pmu);
60db5e09 2795 }
e050e3f0 2796
f39d47ff 2797 perf_pmu_enable(ctx->pmu);
e050e3f0 2798 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
2799}
2800
235c7fc7 2801/*
cdd6c482 2802 * Round-robin a context's events:
235c7fc7 2803 */
cdd6c482 2804static void rotate_ctx(struct perf_event_context *ctx)
0793a61d 2805{
dddd3379
TG
2806 /*
2807 * Rotate the first entry last of non-pinned groups. Rotation might be
2808 * disabled by the inheritance code.
2809 */
2810 if (!ctx->rotate_disable)
2811 list_rotate_left(&ctx->flexible_groups);
235c7fc7
IM
2812}
2813
b5ab4cd5 2814/*
e9d2b064
PZ
2815 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2816 * because they're strictly cpu affine and rotate_start is called with IRQs
2817 * disabled, while rotate_context is called from IRQ context.
b5ab4cd5 2818 */
9e630205 2819static int perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7 2820{
8dc85d54 2821 struct perf_event_context *ctx = NULL;
e050e3f0 2822 int rotate = 0, remove = 1;
7fc23a53 2823
b5ab4cd5 2824 if (cpuctx->ctx.nr_events) {
e9d2b064 2825 remove = 0;
b5ab4cd5
PZ
2826 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2827 rotate = 1;
2828 }
235c7fc7 2829
8dc85d54 2830 ctx = cpuctx->task_ctx;
b5ab4cd5 2831 if (ctx && ctx->nr_events) {
e9d2b064 2832 remove = 0;
b5ab4cd5
PZ
2833 if (ctx->nr_events != ctx->nr_active)
2834 rotate = 1;
2835 }
9717e6cd 2836
e050e3f0 2837 if (!rotate)
0f5a2601
PZ
2838 goto done;
2839
facc4307 2840 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 2841 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 2842
e050e3f0
SE
2843 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2844 if (ctx)
2845 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d 2846
e050e3f0
SE
2847 rotate_ctx(&cpuctx->ctx);
2848 if (ctx)
2849 rotate_ctx(ctx);
235c7fc7 2850
e050e3f0 2851 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 2852
0f5a2601
PZ
2853 perf_pmu_enable(cpuctx->ctx.pmu);
2854 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
b5ab4cd5 2855done:
e9d2b064
PZ
2856 if (remove)
2857 list_del_init(&cpuctx->rotation_list);
9e630205
SE
2858
2859 return rotate;
e9d2b064
PZ
2860}
2861
026249ef
FW
2862#ifdef CONFIG_NO_HZ_FULL
2863bool perf_event_can_stop_tick(void)
2864{
948b26b6 2865 if (atomic_read(&nr_freq_events) ||
d84153d6 2866 __this_cpu_read(perf_throttled_count))
026249ef 2867 return false;
d84153d6
FW
2868 else
2869 return true;
026249ef
FW
2870}
2871#endif
2872
e9d2b064
PZ
2873void perf_event_task_tick(void)
2874{
2875 struct list_head *head = &__get_cpu_var(rotation_list);
2876 struct perf_cpu_context *cpuctx, *tmp;
e050e3f0
SE
2877 struct perf_event_context *ctx;
2878 int throttled;
b5ab4cd5 2879
e9d2b064
PZ
2880 WARN_ON(!irqs_disabled());
2881
e050e3f0
SE
2882 __this_cpu_inc(perf_throttled_seq);
2883 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2884
e9d2b064 2885 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
e050e3f0
SE
2886 ctx = &cpuctx->ctx;
2887 perf_adjust_freq_unthr_context(ctx, throttled);
2888
2889 ctx = cpuctx->task_ctx;
2890 if (ctx)
2891 perf_adjust_freq_unthr_context(ctx, throttled);
e9d2b064 2892 }
0793a61d
TG
2893}
2894
889ff015
FW
2895static int event_enable_on_exec(struct perf_event *event,
2896 struct perf_event_context *ctx)
2897{
2898 if (!event->attr.enable_on_exec)
2899 return 0;
2900
2901 event->attr.enable_on_exec = 0;
2902 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2903 return 0;
2904
1d9b482e 2905 __perf_event_mark_enabled(event);
889ff015
FW
2906
2907 return 1;
2908}
2909
57e7986e 2910/*
cdd6c482 2911 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
2912 * This expects task == current.
2913 */
8dc85d54 2914static void perf_event_enable_on_exec(struct perf_event_context *ctx)
57e7986e 2915{
cdd6c482 2916 struct perf_event *event;
57e7986e
PM
2917 unsigned long flags;
2918 int enabled = 0;
889ff015 2919 int ret;
57e7986e
PM
2920
2921 local_irq_save(flags);
cdd6c482 2922 if (!ctx || !ctx->nr_events)
57e7986e
PM
2923 goto out;
2924
e566b76e
SE
2925 /*
2926 * We must ctxsw out cgroup events to avoid conflict
2927 * when invoking perf_task_event_sched_in() later on
2928 * in this function. Otherwise we end up trying to
2929 * ctxswin cgroup events which are already scheduled
2930 * in.
2931 */
a8d757ef 2932 perf_cgroup_sched_out(current, NULL);
57e7986e 2933
e625cce1 2934 raw_spin_lock(&ctx->lock);
04dc2dbb 2935 task_ctx_sched_out(ctx);
57e7986e 2936
b79387ef 2937 list_for_each_entry(event, &ctx->event_list, event_entry) {
889ff015
FW
2938 ret = event_enable_on_exec(event, ctx);
2939 if (ret)
2940 enabled = 1;
57e7986e
PM
2941 }
2942
2943 /*
cdd6c482 2944 * Unclone this context if we enabled any event.
57e7986e 2945 */
71a851b4
PZ
2946 if (enabled)
2947 unclone_ctx(ctx);
57e7986e 2948
e625cce1 2949 raw_spin_unlock(&ctx->lock);
57e7986e 2950
e566b76e
SE
2951 /*
2952 * Also calls ctxswin for cgroup events, if any:
2953 */
e5d1367f 2954 perf_event_context_sched_in(ctx, ctx->task);
9ed6060d 2955out:
57e7986e
PM
2956 local_irq_restore(flags);
2957}
2958
0793a61d 2959/*
cdd6c482 2960 * Cross CPU call to read the hardware event
0793a61d 2961 */
cdd6c482 2962static void __perf_event_read(void *info)
0793a61d 2963{
cdd6c482
IM
2964 struct perf_event *event = info;
2965 struct perf_event_context *ctx = event->ctx;
108b02cf 2966 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
621a01ea 2967
e1ac3614
PM
2968 /*
2969 * If this is a task context, we need to check whether it is
2970 * the current task context of this cpu. If not it has been
2971 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
2972 * event->count would have been updated to a recent sample
2973 * when the event was scheduled out.
e1ac3614
PM
2974 */
2975 if (ctx->task && cpuctx->task_ctx != ctx)
2976 return;
2977
e625cce1 2978 raw_spin_lock(&ctx->lock);
e5d1367f 2979 if (ctx->is_active) {
542e72fc 2980 update_context_time(ctx);
e5d1367f
SE
2981 update_cgrp_time_from_event(event);
2982 }
cdd6c482 2983 update_event_times(event);
542e72fc
PZ
2984 if (event->state == PERF_EVENT_STATE_ACTIVE)
2985 event->pmu->read(event);
e625cce1 2986 raw_spin_unlock(&ctx->lock);
0793a61d
TG
2987}
2988
b5e58793
PZ
2989static inline u64 perf_event_count(struct perf_event *event)
2990{
e7850595 2991 return local64_read(&event->count) + atomic64_read(&event->child_count);
b5e58793
PZ
2992}
2993
cdd6c482 2994static u64 perf_event_read(struct perf_event *event)
0793a61d
TG
2995{
2996 /*
cdd6c482
IM
2997 * If event is enabled and currently active on a CPU, update the
2998 * value in the event structure:
0793a61d 2999 */
cdd6c482
IM
3000 if (event->state == PERF_EVENT_STATE_ACTIVE) {
3001 smp_call_function_single(event->oncpu,
3002 __perf_event_read, event, 1);
3003 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
3004 struct perf_event_context *ctx = event->ctx;
3005 unsigned long flags;
3006
e625cce1 3007 raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9
SE
3008 /*
3009 * may read while context is not active
3010 * (e.g., thread is blocked), in that case
3011 * we cannot update context time
3012 */
e5d1367f 3013 if (ctx->is_active) {
c530ccd9 3014 update_context_time(ctx);
e5d1367f
SE
3015 update_cgrp_time_from_event(event);
3016 }
cdd6c482 3017 update_event_times(event);
e625cce1 3018 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d
TG
3019 }
3020
b5e58793 3021 return perf_event_count(event);
0793a61d
TG
3022}
3023
a63eaf34 3024/*
cdd6c482 3025 * Initialize the perf_event context in a task_struct:
a63eaf34 3026 */
eb184479 3027static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 3028{
e625cce1 3029 raw_spin_lock_init(&ctx->lock);
a63eaf34 3030 mutex_init(&ctx->mutex);
889ff015
FW
3031 INIT_LIST_HEAD(&ctx->pinned_groups);
3032 INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34
PM
3033 INIT_LIST_HEAD(&ctx->event_list);
3034 atomic_set(&ctx->refcount, 1);
eb184479
PZ
3035}
3036
3037static struct perf_event_context *
3038alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3039{
3040 struct perf_event_context *ctx;
3041
3042 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3043 if (!ctx)
3044 return NULL;
3045
3046 __perf_event_init_context(ctx);
3047 if (task) {
3048 ctx->task = task;
3049 get_task_struct(task);
0793a61d 3050 }
eb184479
PZ
3051 ctx->pmu = pmu;
3052
3053 return ctx;
a63eaf34
PM
3054}
3055
2ebd4ffb
MH
3056static struct task_struct *
3057find_lively_task_by_vpid(pid_t vpid)
3058{
3059 struct task_struct *task;
3060 int err;
0793a61d
TG
3061
3062 rcu_read_lock();
2ebd4ffb 3063 if (!vpid)
0793a61d
TG
3064 task = current;
3065 else
2ebd4ffb 3066 task = find_task_by_vpid(vpid);
0793a61d
TG
3067 if (task)
3068 get_task_struct(task);
3069 rcu_read_unlock();
3070
3071 if (!task)
3072 return ERR_PTR(-ESRCH);
3073
0793a61d 3074 /* Reuse ptrace permission checks for now. */
c93f7669
PM
3075 err = -EACCES;
3076 if (!ptrace_may_access(task, PTRACE_MODE_READ))
3077 goto errout;
3078
2ebd4ffb
MH
3079 return task;
3080errout:
3081 put_task_struct(task);
3082 return ERR_PTR(err);
3083
3084}
3085
fe4b04fa
PZ
3086/*
3087 * Returns a matching context with refcount and pincount.
3088 */
108b02cf 3089static struct perf_event_context *
38a81da2 3090find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
0793a61d 3091{
cdd6c482 3092 struct perf_event_context *ctx;
22a4f650 3093 struct perf_cpu_context *cpuctx;
25346b93 3094 unsigned long flags;
8dc85d54 3095 int ctxn, err;
0793a61d 3096
22a4ec72 3097 if (!task) {
cdd6c482 3098 /* Must be root to operate on a CPU event: */
0764771d 3099 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
3100 return ERR_PTR(-EACCES);
3101
0793a61d 3102 /*
cdd6c482 3103 * We could be clever and allow to attach a event to an
0793a61d
TG
3104 * offline CPU and activate it when the CPU comes up, but
3105 * that's for later.
3106 */
f6325e30 3107 if (!cpu_online(cpu))
0793a61d
TG
3108 return ERR_PTR(-ENODEV);
3109
108b02cf 3110 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 3111 ctx = &cpuctx->ctx;
c93f7669 3112 get_ctx(ctx);
fe4b04fa 3113 ++ctx->pin_count;
0793a61d 3114
0793a61d
TG
3115 return ctx;
3116 }
3117
8dc85d54
PZ
3118 err = -EINVAL;
3119 ctxn = pmu->task_ctx_nr;
3120 if (ctxn < 0)
3121 goto errout;
3122
9ed6060d 3123retry:
8dc85d54 3124 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 3125 if (ctx) {
71a851b4 3126 unclone_ctx(ctx);
fe4b04fa 3127 ++ctx->pin_count;
e625cce1 3128 raw_spin_unlock_irqrestore(&ctx->lock, flags);
9137fb28 3129 } else {
eb184479 3130 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
3131 err = -ENOMEM;
3132 if (!ctx)
3133 goto errout;
eb184479 3134
dbe08d82
ON
3135 err = 0;
3136 mutex_lock(&task->perf_event_mutex);
3137 /*
3138 * If it has already passed perf_event_exit_task().
3139 * we must see PF_EXITING, it takes this mutex too.
3140 */
3141 if (task->flags & PF_EXITING)
3142 err = -ESRCH;
3143 else if (task->perf_event_ctxp[ctxn])
3144 err = -EAGAIN;
fe4b04fa 3145 else {
9137fb28 3146 get_ctx(ctx);
fe4b04fa 3147 ++ctx->pin_count;
dbe08d82 3148 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 3149 }
dbe08d82
ON
3150 mutex_unlock(&task->perf_event_mutex);
3151
3152 if (unlikely(err)) {
9137fb28 3153 put_ctx(ctx);
dbe08d82
ON
3154
3155 if (err == -EAGAIN)
3156 goto retry;
3157 goto errout;
a63eaf34
PM
3158 }
3159 }
3160
0793a61d 3161 return ctx;
c93f7669 3162
9ed6060d 3163errout:
c93f7669 3164 return ERR_PTR(err);
0793a61d
TG
3165}
3166
6fb2915d
LZ
3167static void perf_event_free_filter(struct perf_event *event);
3168
cdd6c482 3169static void free_event_rcu(struct rcu_head *head)
592903cd 3170{
cdd6c482 3171 struct perf_event *event;
592903cd 3172
cdd6c482
IM
3173 event = container_of(head, struct perf_event, rcu_head);
3174 if (event->ns)
3175 put_pid_ns(event->ns);
6fb2915d 3176 perf_event_free_filter(event);
cdd6c482 3177 kfree(event);
592903cd
PZ
3178}
3179
76369139 3180static void ring_buffer_put(struct ring_buffer *rb);
9bb5d40c 3181static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
925d519a 3182
4beb31f3 3183static void unaccount_event_cpu(struct perf_event *event, int cpu)
f1600952 3184{
4beb31f3
FW
3185 if (event->parent)
3186 return;
3187
3188 if (has_branch_stack(event)) {
3189 if (!(event->attach_state & PERF_ATTACH_TASK))
3190 atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
3191 }
3192 if (is_cgroup_event(event))
3193 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3194}
925d519a 3195
4beb31f3
FW
3196static void unaccount_event(struct perf_event *event)
3197{
3198 if (event->parent)
3199 return;
3200
3201 if (event->attach_state & PERF_ATTACH_TASK)
3202 static_key_slow_dec_deferred(&perf_sched_events);
3203 if (event->attr.mmap || event->attr.mmap_data)
3204 atomic_dec(&nr_mmap_events);
3205 if (event->attr.comm)
3206 atomic_dec(&nr_comm_events);
3207 if (event->attr.task)
3208 atomic_dec(&nr_task_events);
948b26b6
FW
3209 if (event->attr.freq)
3210 atomic_dec(&nr_freq_events);
4beb31f3
FW
3211 if (is_cgroup_event(event))
3212 static_key_slow_dec_deferred(&perf_sched_events);
3213 if (has_branch_stack(event))
3214 static_key_slow_dec_deferred(&perf_sched_events);
3215
3216 unaccount_event_cpu(event, event->cpu);
3217}
925d519a 3218
766d6c07
FW
3219static void __free_event(struct perf_event *event)
3220{
cdd6c482 3221 if (!event->parent) {
927c7a9e
FW
3222 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3223 put_callchain_buffers();
f344011c 3224 }
9ee318a7 3225
766d6c07
FW
3226 if (event->destroy)
3227 event->destroy(event);
3228
3229 if (event->ctx)
3230 put_ctx(event->ctx);
3231
3232 call_rcu(&event->rcu_head, free_event_rcu);
3233}
cdd6c482 3234static void free_event(struct perf_event *event)
f1600952 3235{
e360adbe 3236 irq_work_sync(&event->pending);
925d519a 3237
4beb31f3 3238 unaccount_event(event);
9ee318a7 3239
76369139 3240 if (event->rb) {
9bb5d40c
PZ
3241 struct ring_buffer *rb;
3242
3243 /*
3244 * Can happen when we close an event with re-directed output.
3245 *
3246 * Since we have a 0 refcount, perf_mmap_close() will skip
3247 * over us; possibly making our ring_buffer_put() the last.
3248 */
3249 mutex_lock(&event->mmap_mutex);
3250 rb = event->rb;
3251 if (rb) {
3252 rcu_assign_pointer(event->rb, NULL);
3253 ring_buffer_detach(event, rb);
3254 ring_buffer_put(rb); /* could be last */
3255 }
3256 mutex_unlock(&event->mmap_mutex);
a4be7c27
PZ
3257 }
3258
e5d1367f
SE
3259 if (is_cgroup_event(event))
3260 perf_detach_cgroup(event);
3261
0c67b408 3262
766d6c07 3263 __free_event(event);
f1600952
PZ
3264}
3265
a66a3052 3266int perf_event_release_kernel(struct perf_event *event)
0793a61d 3267{
cdd6c482 3268 struct perf_event_context *ctx = event->ctx;
0793a61d 3269
ad3a37de 3270 WARN_ON_ONCE(ctx->parent_ctx);
a0507c84
PZ
3271 /*
3272 * There are two ways this annotation is useful:
3273 *
3274 * 1) there is a lock recursion from perf_event_exit_task
3275 * see the comment there.
3276 *
3277 * 2) there is a lock-inversion with mmap_sem through
3278 * perf_event_read_group(), which takes faults while
3279 * holding ctx->mutex, however this is called after
3280 * the last filedesc died, so there is no possibility
3281 * to trigger the AB-BA case.
3282 */
3283 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
050735b0 3284 raw_spin_lock_irq(&ctx->lock);
8a49542c 3285 perf_group_detach(event);
050735b0 3286 raw_spin_unlock_irq(&ctx->lock);
e03a9a55 3287 perf_remove_from_context(event);
d859e29f 3288 mutex_unlock(&ctx->mutex);
0793a61d 3289
cdd6c482 3290 free_event(event);
0793a61d
TG
3291
3292 return 0;
3293}
a66a3052 3294EXPORT_SYMBOL_GPL(perf_event_release_kernel);
0793a61d 3295
a66a3052
PZ
3296/*
3297 * Called when the last reference to the file is gone.
3298 */
a6fa941d 3299static void put_event(struct perf_event *event)
fb0459d7 3300{
8882135b 3301 struct task_struct *owner;
fb0459d7 3302
a6fa941d
AV
3303 if (!atomic_long_dec_and_test(&event->refcount))
3304 return;
fb0459d7 3305
8882135b
PZ
3306 rcu_read_lock();
3307 owner = ACCESS_ONCE(event->owner);
3308 /*
3309 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
3310 * !owner it means the list deletion is complete and we can indeed
3311 * free this event, otherwise we need to serialize on
3312 * owner->perf_event_mutex.
3313 */
3314 smp_read_barrier_depends();
3315 if (owner) {
3316 /*
3317 * Since delayed_put_task_struct() also drops the last
3318 * task reference we can safely take a new reference
3319 * while holding the rcu_read_lock().
3320 */
3321 get_task_struct(owner);
3322 }
3323 rcu_read_unlock();
3324
3325 if (owner) {
3326 mutex_lock(&owner->perf_event_mutex);
3327 /*
3328 * We have to re-check the event->owner field, if it is cleared
3329 * we raced with perf_event_exit_task(), acquiring the mutex
3330 * ensured they're done, and we can proceed with freeing the
3331 * event.
3332 */
3333 if (event->owner)
3334 list_del_init(&event->owner_entry);
3335 mutex_unlock(&owner->perf_event_mutex);
3336 put_task_struct(owner);
3337 }
3338
a6fa941d
AV
3339 perf_event_release_kernel(event);
3340}
3341
3342static int perf_release(struct inode *inode, struct file *file)
3343{
3344 put_event(file->private_data);
3345 return 0;
fb0459d7 3346}
fb0459d7 3347
59ed446f 3348u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 3349{
cdd6c482 3350 struct perf_event *child;
e53c0994
PZ
3351 u64 total = 0;
3352
59ed446f
PZ
3353 *enabled = 0;
3354 *running = 0;
3355
6f10581a 3356 mutex_lock(&event->child_mutex);
cdd6c482 3357 total += perf_event_read(event);
59ed446f
PZ
3358 *enabled += event->total_time_enabled +
3359 atomic64_read(&event->child_total_time_enabled);
3360 *running += event->total_time_running +
3361 atomic64_read(&event->child_total_time_running);
3362
3363 list_for_each_entry(child, &event->child_list, child_list) {
cdd6c482 3364 total += perf_event_read(child);
59ed446f
PZ
3365 *enabled += child->total_time_enabled;
3366 *running += child->total_time_running;
3367 }
6f10581a 3368 mutex_unlock(&event->child_mutex);
e53c0994
PZ
3369
3370 return total;
3371}
fb0459d7 3372EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 3373
cdd6c482 3374static int perf_event_read_group(struct perf_event *event,
3dab77fb
PZ
3375 u64 read_format, char __user *buf)
3376{
cdd6c482 3377 struct perf_event *leader = event->group_leader, *sub;
6f10581a
PZ
3378 int n = 0, size = 0, ret = -EFAULT;
3379 struct perf_event_context *ctx = leader->ctx;
abf4868b 3380 u64 values[5];
59ed446f 3381 u64 count, enabled, running;
abf4868b 3382
6f10581a 3383 mutex_lock(&ctx->mutex);
59ed446f 3384 count = perf_event_read_value(leader, &enabled, &running);
3dab77fb
PZ
3385
3386 values[n++] = 1 + leader->nr_siblings;
59ed446f
PZ
3387 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3388 values[n++] = enabled;
3389 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3390 values[n++] = running;
abf4868b
PZ
3391 values[n++] = count;
3392 if (read_format & PERF_FORMAT_ID)
3393 values[n++] = primary_event_id(leader);
3dab77fb
PZ
3394
3395 size = n * sizeof(u64);
3396
3397 if (copy_to_user(buf, values, size))
6f10581a 3398 goto unlock;
3dab77fb 3399
6f10581a 3400 ret = size;
3dab77fb 3401
65abc865 3402 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
abf4868b 3403 n = 0;
3dab77fb 3404
59ed446f 3405 values[n++] = perf_event_read_value(sub, &enabled, &running);
abf4868b
PZ
3406 if (read_format & PERF_FORMAT_ID)
3407 values[n++] = primary_event_id(sub);
3408
3409 size = n * sizeof(u64);
3410
184d3da8 3411 if (copy_to_user(buf + ret, values, size)) {
6f10581a
PZ
3412 ret = -EFAULT;
3413 goto unlock;
3414 }
abf4868b
PZ
3415
3416 ret += size;
3dab77fb 3417 }
6f10581a
PZ
3418unlock:
3419 mutex_unlock(&ctx->mutex);
3dab77fb 3420
abf4868b 3421 return ret;
3dab77fb
PZ
3422}
3423
cdd6c482 3424static int perf_event_read_one(struct perf_event *event,
3dab77fb
PZ
3425 u64 read_format, char __user *buf)
3426{
59ed446f 3427 u64 enabled, running;
3dab77fb
PZ
3428 u64 values[4];
3429 int n = 0;
3430
59ed446f
PZ
3431 values[n++] = perf_event_read_value(event, &enabled, &running);
3432 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3433 values[n++] = enabled;
3434 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3435 values[n++] = running;
3dab77fb 3436 if (read_format & PERF_FORMAT_ID)
cdd6c482 3437 values[n++] = primary_event_id(event);
3dab77fb
PZ
3438
3439 if (copy_to_user(buf, values, n * sizeof(u64)))
3440 return -EFAULT;
3441
3442 return n * sizeof(u64);
3443}
3444
0793a61d 3445/*
cdd6c482 3446 * Read the performance event - simple non blocking version for now
0793a61d
TG
3447 */
3448static ssize_t
cdd6c482 3449perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
0793a61d 3450{
cdd6c482 3451 u64 read_format = event->attr.read_format;
3dab77fb 3452 int ret;
0793a61d 3453
3b6f9e5c 3454 /*
cdd6c482 3455 * Return end-of-file for a read on a event that is in
3b6f9e5c
PM
3456 * error state (i.e. because it was pinned but it couldn't be
3457 * scheduled on to the CPU at some point).
3458 */
cdd6c482 3459 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
3460 return 0;
3461
c320c7b7 3462 if (count < event->read_size)
3dab77fb
PZ
3463 return -ENOSPC;
3464
cdd6c482 3465 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 3466 if (read_format & PERF_FORMAT_GROUP)
cdd6c482 3467 ret = perf_event_read_group(event, read_format, buf);
3dab77fb 3468 else
cdd6c482 3469 ret = perf_event_read_one(event, read_format, buf);
0793a61d 3470
3dab77fb 3471 return ret;
0793a61d
TG
3472}
3473
0793a61d
TG
3474static ssize_t
3475perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3476{
cdd6c482 3477 struct perf_event *event = file->private_data;
0793a61d 3478
cdd6c482 3479 return perf_read_hw(event, buf, count);
0793a61d
TG
3480}
3481
3482static unsigned int perf_poll(struct file *file, poll_table *wait)
3483{
cdd6c482 3484 struct perf_event *event = file->private_data;
76369139 3485 struct ring_buffer *rb;
c33a0bc4 3486 unsigned int events = POLL_HUP;
c7138f37 3487
10c6db11 3488 /*
9bb5d40c
PZ
3489 * Pin the event->rb by taking event->mmap_mutex; otherwise
3490 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
10c6db11
PZ
3491 */
3492 mutex_lock(&event->mmap_mutex);
9bb5d40c
PZ
3493 rb = event->rb;
3494 if (rb)
76369139 3495 events = atomic_xchg(&rb->poll, 0);
10c6db11
PZ
3496 mutex_unlock(&event->mmap_mutex);
3497
cdd6c482 3498 poll_wait(file, &event->waitq, wait);
0793a61d 3499
0793a61d
TG
3500 return events;
3501}
3502
cdd6c482 3503static void perf_event_reset(struct perf_event *event)
6de6a7b9 3504{
cdd6c482 3505 (void)perf_event_read(event);
e7850595 3506 local64_set(&event->count, 0);
cdd6c482 3507 perf_event_update_userpage(event);
3df5edad
PZ
3508}
3509
c93f7669 3510/*
cdd6c482
IM
3511 * Holding the top-level event's child_mutex means that any
3512 * descendant process that has inherited this event will block
3513 * in sync_child_event if it goes to exit, thus satisfying the
3514 * task existence requirements of perf_event_enable/disable.
c93f7669 3515 */
cdd6c482
IM
3516static void perf_event_for_each_child(struct perf_event *event,
3517 void (*func)(struct perf_event *))
3df5edad 3518{
cdd6c482 3519 struct perf_event *child;
3df5edad 3520
cdd6c482
IM
3521 WARN_ON_ONCE(event->ctx->parent_ctx);
3522 mutex_lock(&event->child_mutex);
3523 func(event);
3524 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 3525 func(child);
cdd6c482 3526 mutex_unlock(&event->child_mutex);
3df5edad
PZ
3527}
3528
cdd6c482
IM
3529static void perf_event_for_each(struct perf_event *event,
3530 void (*func)(struct perf_event *))
3df5edad 3531{
cdd6c482
IM
3532 struct perf_event_context *ctx = event->ctx;
3533 struct perf_event *sibling;
3df5edad 3534
75f937f2
PZ
3535 WARN_ON_ONCE(ctx->parent_ctx);
3536 mutex_lock(&ctx->mutex);
cdd6c482 3537 event = event->group_leader;
75f937f2 3538
cdd6c482 3539 perf_event_for_each_child(event, func);
cdd6c482 3540 list_for_each_entry(sibling, &event->sibling_list, group_entry)
724b6daa 3541 perf_event_for_each_child(sibling, func);
75f937f2 3542 mutex_unlock(&ctx->mutex);
6de6a7b9
PZ
3543}
3544
cdd6c482 3545static int perf_event_period(struct perf_event *event, u64 __user *arg)
08247e31 3546{
cdd6c482 3547 struct perf_event_context *ctx = event->ctx;
bad7192b 3548 int ret = 0, active;
08247e31
PZ
3549 u64 value;
3550
6c7e550f 3551 if (!is_sampling_event(event))
08247e31
PZ
3552 return -EINVAL;
3553
ad0cf347 3554 if (copy_from_user(&value, arg, sizeof(value)))
08247e31
PZ
3555 return -EFAULT;
3556
3557 if (!value)
3558 return -EINVAL;
3559
e625cce1 3560 raw_spin_lock_irq(&ctx->lock);
cdd6c482
IM
3561 if (event->attr.freq) {
3562 if (value > sysctl_perf_event_sample_rate) {
08247e31
PZ
3563 ret = -EINVAL;
3564 goto unlock;
3565 }
3566
cdd6c482 3567 event->attr.sample_freq = value;
08247e31 3568 } else {
cdd6c482
IM
3569 event->attr.sample_period = value;
3570 event->hw.sample_period = value;
08247e31 3571 }
bad7192b
PZ
3572
3573 active = (event->state == PERF_EVENT_STATE_ACTIVE);
3574 if (active) {
3575 perf_pmu_disable(ctx->pmu);
3576 event->pmu->stop(event, PERF_EF_UPDATE);
3577 }
3578
3579 local64_set(&event->hw.period_left, 0);
3580
3581 if (active) {
3582 event->pmu->start(event, PERF_EF_RELOAD);
3583 perf_pmu_enable(ctx->pmu);
3584 }
3585
08247e31 3586unlock:
e625cce1 3587 raw_spin_unlock_irq(&ctx->lock);
08247e31
PZ
3588
3589 return ret;
3590}
3591
ac9721f3
PZ
3592static const struct file_operations perf_fops;
3593
2903ff01 3594static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 3595{
2903ff01
AV
3596 struct fd f = fdget(fd);
3597 if (!f.file)
3598 return -EBADF;
ac9721f3 3599
2903ff01
AV
3600 if (f.file->f_op != &perf_fops) {
3601 fdput(f);
3602 return -EBADF;
ac9721f3 3603 }
2903ff01
AV
3604 *p = f;
3605 return 0;
ac9721f3
PZ
3606}
3607
3608static int perf_event_set_output(struct perf_event *event,
3609 struct perf_event *output_event);
6fb2915d 3610static int perf_event_set_filter(struct perf_event *event, void __user *arg);
a4be7c27 3611
d859e29f
PM
3612static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3613{
cdd6c482
IM
3614 struct perf_event *event = file->private_data;
3615 void (*func)(struct perf_event *);
3df5edad 3616 u32 flags = arg;
d859e29f
PM
3617
3618 switch (cmd) {
cdd6c482
IM
3619 case PERF_EVENT_IOC_ENABLE:
3620 func = perf_event_enable;
d859e29f 3621 break;
cdd6c482
IM
3622 case PERF_EVENT_IOC_DISABLE:
3623 func = perf_event_disable;
79f14641 3624 break;
cdd6c482
IM
3625 case PERF_EVENT_IOC_RESET:
3626 func = perf_event_reset;
6de6a7b9 3627 break;
3df5edad 3628
cdd6c482
IM
3629 case PERF_EVENT_IOC_REFRESH:
3630 return perf_event_refresh(event, arg);
08247e31 3631
cdd6c482
IM
3632 case PERF_EVENT_IOC_PERIOD:
3633 return perf_event_period(event, (u64 __user *)arg);
08247e31 3634
cf4957f1
JO
3635 case PERF_EVENT_IOC_ID:
3636 {
3637 u64 id = primary_event_id(event);
3638
3639 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
3640 return -EFAULT;
3641 return 0;
3642 }
3643
cdd6c482 3644 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 3645 {
ac9721f3 3646 int ret;
ac9721f3 3647 if (arg != -1) {
2903ff01
AV
3648 struct perf_event *output_event;
3649 struct fd output;
3650 ret = perf_fget_light(arg, &output);
3651 if (ret)
3652 return ret;
3653 output_event = output.file->private_data;
3654 ret = perf_event_set_output(event, output_event);
3655 fdput(output);
3656 } else {
3657 ret = perf_event_set_output(event, NULL);
ac9721f3 3658 }
ac9721f3
PZ
3659 return ret;
3660 }
a4be7c27 3661
6fb2915d
LZ
3662 case PERF_EVENT_IOC_SET_FILTER:
3663 return perf_event_set_filter(event, (void __user *)arg);
3664
d859e29f 3665 default:
3df5edad 3666 return -ENOTTY;
d859e29f 3667 }
3df5edad
PZ
3668
3669 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 3670 perf_event_for_each(event, func);
3df5edad 3671 else
cdd6c482 3672 perf_event_for_each_child(event, func);
3df5edad
PZ
3673
3674 return 0;
d859e29f
PM
3675}
3676
cdd6c482 3677int perf_event_task_enable(void)
771d7cde 3678{
cdd6c482 3679 struct perf_event *event;
771d7cde 3680
cdd6c482
IM
3681 mutex_lock(&current->perf_event_mutex);
3682 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3683 perf_event_for_each_child(event, perf_event_enable);
3684 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
3685
3686 return 0;
3687}
3688
cdd6c482 3689int perf_event_task_disable(void)
771d7cde 3690{
cdd6c482 3691 struct perf_event *event;
771d7cde 3692
cdd6c482
IM
3693 mutex_lock(&current->perf_event_mutex);
3694 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3695 perf_event_for_each_child(event, perf_event_disable);
3696 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
3697
3698 return 0;
3699}
3700
cdd6c482 3701static int perf_event_index(struct perf_event *event)
194002b2 3702{
a4eaf7f1
PZ
3703 if (event->hw.state & PERF_HES_STOPPED)
3704 return 0;
3705
cdd6c482 3706 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
3707 return 0;
3708
35edc2a5 3709 return event->pmu->event_idx(event);
194002b2
PZ
3710}
3711
c4794295 3712static void calc_timer_values(struct perf_event *event,
e3f3541c 3713 u64 *now,
7f310a5d
EM
3714 u64 *enabled,
3715 u64 *running)
c4794295 3716{
e3f3541c 3717 u64 ctx_time;
c4794295 3718
e3f3541c
PZ
3719 *now = perf_clock();
3720 ctx_time = event->shadow_ctx_time + *now;
c4794295
EM
3721 *enabled = ctx_time - event->tstamp_enabled;
3722 *running = ctx_time - event->tstamp_running;
3723}
3724
fa731587
PZ
3725static void perf_event_init_userpage(struct perf_event *event)
3726{
3727 struct perf_event_mmap_page *userpg;
3728 struct ring_buffer *rb;
3729
3730 rcu_read_lock();
3731 rb = rcu_dereference(event->rb);
3732 if (!rb)
3733 goto unlock;
3734
3735 userpg = rb->user_page;
3736
3737 /* Allow new userspace to detect that bit 0 is deprecated */
3738 userpg->cap_bit0_is_deprecated = 1;
3739 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3740
3741unlock:
3742 rcu_read_unlock();
3743}
3744
c7206205 3745void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
3746{
3747}
3748
38ff667b
PZ
3749/*
3750 * Callers need to ensure there can be no nesting of this function, otherwise
3751 * the seqlock logic goes bad. We can not serialize this because the arch
3752 * code calls this from NMI context.
3753 */
cdd6c482 3754void perf_event_update_userpage(struct perf_event *event)
37d81828 3755{
cdd6c482 3756 struct perf_event_mmap_page *userpg;
76369139 3757 struct ring_buffer *rb;
e3f3541c 3758 u64 enabled, running, now;
38ff667b
PZ
3759
3760 rcu_read_lock();
5ec4c599
PZ
3761 rb = rcu_dereference(event->rb);
3762 if (!rb)
3763 goto unlock;
3764
0d641208
EM
3765 /*
3766 * compute total_time_enabled, total_time_running
3767 * based on snapshot values taken when the event
3768 * was last scheduled in.
3769 *
3770 * we cannot simply called update_context_time()
3771 * because of locking issue as we can be called in
3772 * NMI context
3773 */
e3f3541c 3774 calc_timer_values(event, &now, &enabled, &running);
38ff667b 3775
76369139 3776 userpg = rb->user_page;
7b732a75
PZ
3777 /*
3778 * Disable preemption so as to not let the corresponding user-space
3779 * spin too long if we get preempted.
3780 */
3781 preempt_disable();
37d81828 3782 ++userpg->lock;
92f22a38 3783 barrier();
cdd6c482 3784 userpg->index = perf_event_index(event);
b5e58793 3785 userpg->offset = perf_event_count(event);
365a4038 3786 if (userpg->index)
e7850595 3787 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 3788
0d641208 3789 userpg->time_enabled = enabled +
cdd6c482 3790 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 3791
0d641208 3792 userpg->time_running = running +
cdd6c482 3793 atomic64_read(&event->child_total_time_running);
7f8b4e4e 3794
c7206205 3795 arch_perf_update_userpage(userpg, now);
e3f3541c 3796
92f22a38 3797 barrier();
37d81828 3798 ++userpg->lock;
7b732a75 3799 preempt_enable();
38ff667b 3800unlock:
7b732a75 3801 rcu_read_unlock();
37d81828
PM
3802}
3803
906010b2
PZ
3804static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3805{
3806 struct perf_event *event = vma->vm_file->private_data;
76369139 3807 struct ring_buffer *rb;
906010b2
PZ
3808 int ret = VM_FAULT_SIGBUS;
3809
3810 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3811 if (vmf->pgoff == 0)
3812 ret = 0;
3813 return ret;
3814 }
3815
3816 rcu_read_lock();
76369139
FW
3817 rb = rcu_dereference(event->rb);
3818 if (!rb)
906010b2
PZ
3819 goto unlock;
3820
3821 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3822 goto unlock;
3823
76369139 3824 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
3825 if (!vmf->page)
3826 goto unlock;
3827
3828 get_page(vmf->page);
3829 vmf->page->mapping = vma->vm_file->f_mapping;
3830 vmf->page->index = vmf->pgoff;
3831
3832 ret = 0;
3833unlock:
3834 rcu_read_unlock();
3835
3836 return ret;
3837}
3838
10c6db11
PZ
3839static void ring_buffer_attach(struct perf_event *event,
3840 struct ring_buffer *rb)
3841{
3842 unsigned long flags;
3843
3844 if (!list_empty(&event->rb_entry))
3845 return;
3846
3847 spin_lock_irqsave(&rb->event_lock, flags);
9bb5d40c
PZ
3848 if (list_empty(&event->rb_entry))
3849 list_add(&event->rb_entry, &rb->event_list);
10c6db11
PZ
3850 spin_unlock_irqrestore(&rb->event_lock, flags);
3851}
3852
9bb5d40c 3853static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
10c6db11
PZ
3854{
3855 unsigned long flags;
3856
3857 if (list_empty(&event->rb_entry))
3858 return;
3859
3860 spin_lock_irqsave(&rb->event_lock, flags);
3861 list_del_init(&event->rb_entry);
3862 wake_up_all(&event->waitq);
3863 spin_unlock_irqrestore(&rb->event_lock, flags);
3864}
3865
3866static void ring_buffer_wakeup(struct perf_event *event)
3867{
3868 struct ring_buffer *rb;
3869
3870 rcu_read_lock();
3871 rb = rcu_dereference(event->rb);
9bb5d40c
PZ
3872 if (rb) {
3873 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3874 wake_up_all(&event->waitq);
3875 }
10c6db11
PZ
3876 rcu_read_unlock();
3877}
3878
76369139 3879static void rb_free_rcu(struct rcu_head *rcu_head)
906010b2 3880{
76369139 3881 struct ring_buffer *rb;
906010b2 3882
76369139
FW
3883 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3884 rb_free(rb);
7b732a75
PZ
3885}
3886
76369139 3887static struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 3888{
76369139 3889 struct ring_buffer *rb;
7b732a75 3890
ac9721f3 3891 rcu_read_lock();
76369139
FW
3892 rb = rcu_dereference(event->rb);
3893 if (rb) {
3894 if (!atomic_inc_not_zero(&rb->refcount))
3895 rb = NULL;
ac9721f3
PZ
3896 }
3897 rcu_read_unlock();
3898
76369139 3899 return rb;
ac9721f3
PZ
3900}
3901
76369139 3902static void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 3903{
76369139 3904 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 3905 return;
7b732a75 3906
9bb5d40c 3907 WARN_ON_ONCE(!list_empty(&rb->event_list));
10c6db11 3908
76369139 3909 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
3910}
3911
3912static void perf_mmap_open(struct vm_area_struct *vma)
3913{
cdd6c482 3914 struct perf_event *event = vma->vm_file->private_data;
7b732a75 3915
cdd6c482 3916 atomic_inc(&event->mmap_count);
9bb5d40c 3917 atomic_inc(&event->rb->mmap_count);
7b732a75
PZ
3918}
3919
9bb5d40c
PZ
3920/*
3921 * A buffer can be mmap()ed multiple times; either directly through the same
3922 * event, or through other events by use of perf_event_set_output().
3923 *
3924 * In order to undo the VM accounting done by perf_mmap() we need to destroy
3925 * the buffer here, where we still have a VM context. This means we need
3926 * to detach all events redirecting to us.
3927 */
7b732a75
PZ
3928static void perf_mmap_close(struct vm_area_struct *vma)
3929{
cdd6c482 3930 struct perf_event *event = vma->vm_file->private_data;
7b732a75 3931
9bb5d40c
PZ
3932 struct ring_buffer *rb = event->rb;
3933 struct user_struct *mmap_user = rb->mmap_user;
3934 int mmap_locked = rb->mmap_locked;
3935 unsigned long size = perf_data_size(rb);
789f90fc 3936
9bb5d40c
PZ
3937 atomic_dec(&rb->mmap_count);
3938
3939 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3940 return;
3941
3942 /* Detach current event from the buffer. */
3943 rcu_assign_pointer(event->rb, NULL);
3944 ring_buffer_detach(event, rb);
3945 mutex_unlock(&event->mmap_mutex);
3946
3947 /* If there's still other mmap()s of this buffer, we're done. */
3948 if (atomic_read(&rb->mmap_count)) {
3949 ring_buffer_put(rb); /* can't be last */
3950 return;
3951 }
ac9721f3 3952
9bb5d40c
PZ
3953 /*
3954 * No other mmap()s, detach from all other events that might redirect
3955 * into the now unreachable buffer. Somewhat complicated by the
3956 * fact that rb::event_lock otherwise nests inside mmap_mutex.
3957 */
3958again:
3959 rcu_read_lock();
3960 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3961 if (!atomic_long_inc_not_zero(&event->refcount)) {
3962 /*
3963 * This event is en-route to free_event() which will
3964 * detach it and remove it from the list.
3965 */
3966 continue;
3967 }
3968 rcu_read_unlock();
789f90fc 3969
9bb5d40c
PZ
3970 mutex_lock(&event->mmap_mutex);
3971 /*
3972 * Check we didn't race with perf_event_set_output() which can
3973 * swizzle the rb from under us while we were waiting to
3974 * acquire mmap_mutex.
3975 *
3976 * If we find a different rb; ignore this event, a next
3977 * iteration will no longer find it on the list. We have to
3978 * still restart the iteration to make sure we're not now
3979 * iterating the wrong list.
3980 */
3981 if (event->rb == rb) {
3982 rcu_assign_pointer(event->rb, NULL);
3983 ring_buffer_detach(event, rb);
3984 ring_buffer_put(rb); /* can't be last, we still have one */
26cb63ad 3985 }
cdd6c482 3986 mutex_unlock(&event->mmap_mutex);
9bb5d40c 3987 put_event(event);
ac9721f3 3988
9bb5d40c
PZ
3989 /*
3990 * Restart the iteration; either we're on the wrong list or
3991 * destroyed its integrity by doing a deletion.
3992 */
3993 goto again;
7b732a75 3994 }
9bb5d40c
PZ
3995 rcu_read_unlock();
3996
3997 /*
3998 * It could be there's still a few 0-ref events on the list; they'll
3999 * get cleaned up by free_event() -- they'll also still have their
4000 * ref on the rb and will free it whenever they are done with it.
4001 *
4002 * Aside from that, this buffer is 'fully' detached and unmapped,
4003 * undo the VM accounting.
4004 */
4005
4006 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
4007 vma->vm_mm->pinned_vm -= mmap_locked;
4008 free_uid(mmap_user);
4009
4010 ring_buffer_put(rb); /* could be last */
37d81828
PM
4011}
4012
f0f37e2f 4013static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8
PZ
4014 .open = perf_mmap_open,
4015 .close = perf_mmap_close,
4016 .fault = perf_mmap_fault,
4017 .page_mkwrite = perf_mmap_fault,
37d81828
PM
4018};
4019
4020static int perf_mmap(struct file *file, struct vm_area_struct *vma)
4021{
cdd6c482 4022 struct perf_event *event = file->private_data;
22a4f650 4023 unsigned long user_locked, user_lock_limit;
789f90fc 4024 struct user_struct *user = current_user();
22a4f650 4025 unsigned long locked, lock_limit;
76369139 4026 struct ring_buffer *rb;
7b732a75
PZ
4027 unsigned long vma_size;
4028 unsigned long nr_pages;
789f90fc 4029 long user_extra, extra;
d57e34fd 4030 int ret = 0, flags = 0;
37d81828 4031
c7920614
PZ
4032 /*
4033 * Don't allow mmap() of inherited per-task counters. This would
4034 * create a performance issue due to all children writing to the
76369139 4035 * same rb.
c7920614
PZ
4036 */
4037 if (event->cpu == -1 && event->attr.inherit)
4038 return -EINVAL;
4039
43a21ea8 4040 if (!(vma->vm_flags & VM_SHARED))
37d81828 4041 return -EINVAL;
7b732a75
PZ
4042
4043 vma_size = vma->vm_end - vma->vm_start;
4044 nr_pages = (vma_size / PAGE_SIZE) - 1;
4045
7730d865 4046 /*
76369139 4047 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
4048 * can do bitmasks instead of modulo.
4049 */
4050 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
4051 return -EINVAL;
4052
7b732a75 4053 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
4054 return -EINVAL;
4055
7b732a75
PZ
4056 if (vma->vm_pgoff != 0)
4057 return -EINVAL;
37d81828 4058
cdd6c482 4059 WARN_ON_ONCE(event->ctx->parent_ctx);
9bb5d40c 4060again:
cdd6c482 4061 mutex_lock(&event->mmap_mutex);
76369139 4062 if (event->rb) {
9bb5d40c 4063 if (event->rb->nr_pages != nr_pages) {
ebb3c4c4 4064 ret = -EINVAL;
9bb5d40c
PZ
4065 goto unlock;
4066 }
4067
4068 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
4069 /*
4070 * Raced against perf_mmap_close() through
4071 * perf_event_set_output(). Try again, hope for better
4072 * luck.
4073 */
4074 mutex_unlock(&event->mmap_mutex);
4075 goto again;
4076 }
4077
ebb3c4c4
PZ
4078 goto unlock;
4079 }
4080
789f90fc 4081 user_extra = nr_pages + 1;
cdd6c482 4082 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
4083
4084 /*
4085 * Increase the limit linearly with more CPUs:
4086 */
4087 user_lock_limit *= num_online_cpus();
4088
789f90fc 4089 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 4090
789f90fc
PZ
4091 extra = 0;
4092 if (user_locked > user_lock_limit)
4093 extra = user_locked - user_lock_limit;
7b732a75 4094
78d7d407 4095 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 4096 lock_limit >>= PAGE_SHIFT;
bc3e53f6 4097 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 4098
459ec28a
IM
4099 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
4100 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
4101 ret = -EPERM;
4102 goto unlock;
4103 }
7b732a75 4104
76369139 4105 WARN_ON(event->rb);
906010b2 4106
d57e34fd 4107 if (vma->vm_flags & VM_WRITE)
76369139 4108 flags |= RING_BUFFER_WRITABLE;
d57e34fd 4109
4ec8363d
VW
4110 rb = rb_alloc(nr_pages,
4111 event->attr.watermark ? event->attr.wakeup_watermark : 0,
4112 event->cpu, flags);
4113
76369139 4114 if (!rb) {
ac9721f3 4115 ret = -ENOMEM;
ebb3c4c4 4116 goto unlock;
ac9721f3 4117 }
26cb63ad 4118
9bb5d40c 4119 atomic_set(&rb->mmap_count, 1);
26cb63ad
PZ
4120 rb->mmap_locked = extra;
4121 rb->mmap_user = get_current_user();
43a21ea8 4122
ac9721f3 4123 atomic_long_add(user_extra, &user->locked_vm);
26cb63ad
PZ
4124 vma->vm_mm->pinned_vm += extra;
4125
9bb5d40c 4126 ring_buffer_attach(event, rb);
26cb63ad 4127 rcu_assign_pointer(event->rb, rb);
ac9721f3 4128
fa731587 4129 perf_event_init_userpage(event);
9a0f05cb
PZ
4130 perf_event_update_userpage(event);
4131
ebb3c4c4 4132unlock:
ac9721f3
PZ
4133 if (!ret)
4134 atomic_inc(&event->mmap_count);
cdd6c482 4135 mutex_unlock(&event->mmap_mutex);
37d81828 4136
9bb5d40c
PZ
4137 /*
4138 * Since pinned accounting is per vm we cannot allow fork() to copy our
4139 * vma.
4140 */
26cb63ad 4141 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
37d81828 4142 vma->vm_ops = &perf_mmap_vmops;
7b732a75
PZ
4143
4144 return ret;
37d81828
PM
4145}
4146
3c446b3d
PZ
4147static int perf_fasync(int fd, struct file *filp, int on)
4148{
496ad9aa 4149 struct inode *inode = file_inode(filp);
cdd6c482 4150 struct perf_event *event = filp->private_data;
3c446b3d
PZ
4151 int retval;
4152
4153 mutex_lock(&inode->i_mutex);
cdd6c482 4154 retval = fasync_helper(fd, filp, on, &event->fasync);
3c446b3d
PZ
4155 mutex_unlock(&inode->i_mutex);
4156
4157 if (retval < 0)
4158 return retval;
4159
4160 return 0;
4161}
4162
0793a61d 4163static const struct file_operations perf_fops = {
3326c1ce 4164 .llseek = no_llseek,
0793a61d
TG
4165 .release = perf_release,
4166 .read = perf_read,
4167 .poll = perf_poll,
d859e29f
PM
4168 .unlocked_ioctl = perf_ioctl,
4169 .compat_ioctl = perf_ioctl,
37d81828 4170 .mmap = perf_mmap,
3c446b3d 4171 .fasync = perf_fasync,
0793a61d
TG
4172};
4173
925d519a 4174/*
cdd6c482 4175 * Perf event wakeup
925d519a
PZ
4176 *
4177 * If there's data, ensure we set the poll() state and publish everything
4178 * to user-space before waking everybody up.
4179 */
4180
cdd6c482 4181void perf_event_wakeup(struct perf_event *event)
925d519a 4182{
10c6db11 4183 ring_buffer_wakeup(event);
4c9e2542 4184
cdd6c482
IM
4185 if (event->pending_kill) {
4186 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
4187 event->pending_kill = 0;
4c9e2542 4188 }
925d519a
PZ
4189}
4190
e360adbe 4191static void perf_pending_event(struct irq_work *entry)
79f14641 4192{
cdd6c482
IM
4193 struct perf_event *event = container_of(entry,
4194 struct perf_event, pending);
79f14641 4195
cdd6c482
IM
4196 if (event->pending_disable) {
4197 event->pending_disable = 0;
4198 __perf_event_disable(event);
79f14641
PZ
4199 }
4200
cdd6c482
IM
4201 if (event->pending_wakeup) {
4202 event->pending_wakeup = 0;
4203 perf_event_wakeup(event);
79f14641
PZ
4204 }
4205}
4206
39447b38
ZY
4207/*
4208 * We assume there is only KVM supporting the callbacks.
4209 * Later on, we might change it to a list if there is
4210 * another virtualization implementation supporting the callbacks.
4211 */
4212struct perf_guest_info_callbacks *perf_guest_cbs;
4213
4214int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4215{
4216 perf_guest_cbs = cbs;
4217 return 0;
4218}
4219EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
4220
4221int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4222{
4223 perf_guest_cbs = NULL;
4224 return 0;
4225}
4226EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
4227
4018994f
JO
4228static void
4229perf_output_sample_regs(struct perf_output_handle *handle,
4230 struct pt_regs *regs, u64 mask)
4231{
4232 int bit;
4233
4234 for_each_set_bit(bit, (const unsigned long *) &mask,
4235 sizeof(mask) * BITS_PER_BYTE) {
4236 u64 val;
4237
4238 val = perf_reg_value(regs, bit);
4239 perf_output_put(handle, val);
4240 }
4241}
4242
4243static void perf_sample_regs_user(struct perf_regs_user *regs_user,
4244 struct pt_regs *regs)
4245{
4246 if (!user_mode(regs)) {
4247 if (current->mm)
4248 regs = task_pt_regs(current);
4249 else
4250 regs = NULL;
4251 }
4252
4253 if (regs) {
4254 regs_user->regs = regs;
4255 regs_user->abi = perf_reg_abi(current);
4256 }
4257}
4258
c5ebcedb
JO
4259/*
4260 * Get remaining task size from user stack pointer.
4261 *
4262 * It'd be better to take stack vma map and limit this more
4263 * precisly, but there's no way to get it safely under interrupt,
4264 * so using TASK_SIZE as limit.
4265 */
4266static u64 perf_ustack_task_size(struct pt_regs *regs)
4267{
4268 unsigned long addr = perf_user_stack_pointer(regs);
4269
4270 if (!addr || addr >= TASK_SIZE)
4271 return 0;
4272
4273 return TASK_SIZE - addr;
4274}
4275
4276static u16
4277perf_sample_ustack_size(u16 stack_size, u16 header_size,
4278 struct pt_regs *regs)
4279{
4280 u64 task_size;
4281
4282 /* No regs, no stack pointer, no dump. */
4283 if (!regs)
4284 return 0;
4285
4286 /*
4287 * Check if we fit in with the requested stack size into the:
4288 * - TASK_SIZE
4289 * If we don't, we limit the size to the TASK_SIZE.
4290 *
4291 * - remaining sample size
4292 * If we don't, we customize the stack size to
4293 * fit in to the remaining sample size.
4294 */
4295
4296 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
4297 stack_size = min(stack_size, (u16) task_size);
4298
4299 /* Current header size plus static size and dynamic size. */
4300 header_size += 2 * sizeof(u64);
4301
4302 /* Do we fit in with the current stack dump size? */
4303 if ((u16) (header_size + stack_size) < header_size) {
4304 /*
4305 * If we overflow the maximum size for the sample,
4306 * we customize the stack dump size to fit in.
4307 */
4308 stack_size = USHRT_MAX - header_size - sizeof(u64);
4309 stack_size = round_up(stack_size, sizeof(u64));
4310 }
4311
4312 return stack_size;
4313}
4314
4315static void
4316perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
4317 struct pt_regs *regs)
4318{
4319 /* Case of a kernel thread, nothing to dump */
4320 if (!regs) {
4321 u64 size = 0;
4322 perf_output_put(handle, size);
4323 } else {
4324 unsigned long sp;
4325 unsigned int rem;
4326 u64 dyn_size;
4327
4328 /*
4329 * We dump:
4330 * static size
4331 * - the size requested by user or the best one we can fit
4332 * in to the sample max size
4333 * data
4334 * - user stack dump data
4335 * dynamic size
4336 * - the actual dumped size
4337 */
4338
4339 /* Static size. */
4340 perf_output_put(handle, dump_size);
4341
4342 /* Data. */
4343 sp = perf_user_stack_pointer(regs);
4344 rem = __output_copy_user(handle, (void *) sp, dump_size);
4345 dyn_size = dump_size - rem;
4346
4347 perf_output_skip(handle, rem);
4348
4349 /* Dynamic size. */
4350 perf_output_put(handle, dyn_size);
4351 }
4352}
4353
c980d109
ACM
4354static void __perf_event_header__init_id(struct perf_event_header *header,
4355 struct perf_sample_data *data,
4356 struct perf_event *event)
6844c09d
ACM
4357{
4358 u64 sample_type = event->attr.sample_type;
4359
4360 data->type = sample_type;
4361 header->size += event->id_header_size;
4362
4363 if (sample_type & PERF_SAMPLE_TID) {
4364 /* namespace issues */
4365 data->tid_entry.pid = perf_event_pid(event, current);
4366 data->tid_entry.tid = perf_event_tid(event, current);
4367 }
4368
4369 if (sample_type & PERF_SAMPLE_TIME)
4370 data->time = perf_clock();
4371
ff3d527c 4372 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
6844c09d
ACM
4373 data->id = primary_event_id(event);
4374
4375 if (sample_type & PERF_SAMPLE_STREAM_ID)
4376 data->stream_id = event->id;
4377
4378 if (sample_type & PERF_SAMPLE_CPU) {
4379 data->cpu_entry.cpu = raw_smp_processor_id();
4380 data->cpu_entry.reserved = 0;
4381 }
4382}
4383
76369139
FW
4384void perf_event_header__init_id(struct perf_event_header *header,
4385 struct perf_sample_data *data,
4386 struct perf_event *event)
c980d109
ACM
4387{
4388 if (event->attr.sample_id_all)
4389 __perf_event_header__init_id(header, data, event);
4390}
4391
4392static void __perf_event__output_id_sample(struct perf_output_handle *handle,
4393 struct perf_sample_data *data)
4394{
4395 u64 sample_type = data->type;
4396
4397 if (sample_type & PERF_SAMPLE_TID)
4398 perf_output_put(handle, data->tid_entry);
4399
4400 if (sample_type & PERF_SAMPLE_TIME)
4401 perf_output_put(handle, data->time);
4402
4403 if (sample_type & PERF_SAMPLE_ID)
4404 perf_output_put(handle, data->id);
4405
4406 if (sample_type & PERF_SAMPLE_STREAM_ID)
4407 perf_output_put(handle, data->stream_id);
4408
4409 if (sample_type & PERF_SAMPLE_CPU)
4410 perf_output_put(handle, data->cpu_entry);
ff3d527c
AH
4411
4412 if (sample_type & PERF_SAMPLE_IDENTIFIER)
4413 perf_output_put(handle, data->id);
c980d109
ACM
4414}
4415
76369139
FW
4416void perf_event__output_id_sample(struct perf_event *event,
4417 struct perf_output_handle *handle,
4418 struct perf_sample_data *sample)
c980d109
ACM
4419{
4420 if (event->attr.sample_id_all)
4421 __perf_event__output_id_sample(handle, sample);
4422}
4423
3dab77fb 4424static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
4425 struct perf_event *event,
4426 u64 enabled, u64 running)
3dab77fb 4427{
cdd6c482 4428 u64 read_format = event->attr.read_format;
3dab77fb
PZ
4429 u64 values[4];
4430 int n = 0;
4431
b5e58793 4432 values[n++] = perf_event_count(event);
3dab77fb 4433 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 4434 values[n++] = enabled +
cdd6c482 4435 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
4436 }
4437 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 4438 values[n++] = running +
cdd6c482 4439 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
4440 }
4441 if (read_format & PERF_FORMAT_ID)
cdd6c482 4442 values[n++] = primary_event_id(event);
3dab77fb 4443
76369139 4444 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
4445}
4446
4447/*
cdd6c482 4448 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb
PZ
4449 */
4450static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
4451 struct perf_event *event,
4452 u64 enabled, u64 running)
3dab77fb 4453{
cdd6c482
IM
4454 struct perf_event *leader = event->group_leader, *sub;
4455 u64 read_format = event->attr.read_format;
3dab77fb
PZ
4456 u64 values[5];
4457 int n = 0;
4458
4459 values[n++] = 1 + leader->nr_siblings;
4460
4461 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 4462 values[n++] = enabled;
3dab77fb
PZ
4463
4464 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 4465 values[n++] = running;
3dab77fb 4466
cdd6c482 4467 if (leader != event)
3dab77fb
PZ
4468 leader->pmu->read(leader);
4469
b5e58793 4470 values[n++] = perf_event_count(leader);
3dab77fb 4471 if (read_format & PERF_FORMAT_ID)
cdd6c482 4472 values[n++] = primary_event_id(leader);
3dab77fb 4473
76369139 4474 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 4475
65abc865 4476 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb
PZ
4477 n = 0;
4478
6f5ab001
JO
4479 if ((sub != event) &&
4480 (sub->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
4481 sub->pmu->read(sub);
4482
b5e58793 4483 values[n++] = perf_event_count(sub);
3dab77fb 4484 if (read_format & PERF_FORMAT_ID)
cdd6c482 4485 values[n++] = primary_event_id(sub);
3dab77fb 4486
76369139 4487 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
4488 }
4489}
4490
eed01528
SE
4491#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
4492 PERF_FORMAT_TOTAL_TIME_RUNNING)
4493
3dab77fb 4494static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 4495 struct perf_event *event)
3dab77fb 4496{
e3f3541c 4497 u64 enabled = 0, running = 0, now;
eed01528
SE
4498 u64 read_format = event->attr.read_format;
4499
4500 /*
4501 * compute total_time_enabled, total_time_running
4502 * based on snapshot values taken when the event
4503 * was last scheduled in.
4504 *
4505 * we cannot simply called update_context_time()
4506 * because of locking issue as we are called in
4507 * NMI context
4508 */
c4794295 4509 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 4510 calc_timer_values(event, &now, &enabled, &running);
eed01528 4511
cdd6c482 4512 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 4513 perf_output_read_group(handle, event, enabled, running);
3dab77fb 4514 else
eed01528 4515 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
4516}
4517
5622f295
MM
4518void perf_output_sample(struct perf_output_handle *handle,
4519 struct perf_event_header *header,
4520 struct perf_sample_data *data,
cdd6c482 4521 struct perf_event *event)
5622f295
MM
4522{
4523 u64 sample_type = data->type;
4524
4525 perf_output_put(handle, *header);
4526
ff3d527c
AH
4527 if (sample_type & PERF_SAMPLE_IDENTIFIER)
4528 perf_output_put(handle, data->id);
4529
5622f295
MM
4530 if (sample_type & PERF_SAMPLE_IP)
4531 perf_output_put(handle, data->ip);
4532
4533 if (sample_type & PERF_SAMPLE_TID)
4534 perf_output_put(handle, data->tid_entry);
4535
4536 if (sample_type & PERF_SAMPLE_TIME)
4537 perf_output_put(handle, data->time);
4538
4539 if (sample_type & PERF_SAMPLE_ADDR)
4540 perf_output_put(handle, data->addr);
4541
4542 if (sample_type & PERF_SAMPLE_ID)
4543 perf_output_put(handle, data->id);
4544
4545 if (sample_type & PERF_SAMPLE_STREAM_ID)
4546 perf_output_put(handle, data->stream_id);
4547
4548 if (sample_type & PERF_SAMPLE_CPU)
4549 perf_output_put(handle, data->cpu_entry);
4550
4551 if (sample_type & PERF_SAMPLE_PERIOD)
4552 perf_output_put(handle, data->period);
4553
4554 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 4555 perf_output_read(handle, event);
5622f295
MM
4556
4557 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4558 if (data->callchain) {
4559 int size = 1;
4560
4561 if (data->callchain)
4562 size += data->callchain->nr;
4563
4564 size *= sizeof(u64);
4565
76369139 4566 __output_copy(handle, data->callchain, size);
5622f295
MM
4567 } else {
4568 u64 nr = 0;
4569 perf_output_put(handle, nr);
4570 }
4571 }
4572
4573 if (sample_type & PERF_SAMPLE_RAW) {
4574 if (data->raw) {
4575 perf_output_put(handle, data->raw->size);
76369139
FW
4576 __output_copy(handle, data->raw->data,
4577 data->raw->size);
5622f295
MM
4578 } else {
4579 struct {
4580 u32 size;
4581 u32 data;
4582 } raw = {
4583 .size = sizeof(u32),
4584 .data = 0,
4585 };
4586 perf_output_put(handle, raw);
4587 }
4588 }
a7ac67ea 4589
bce38cd5
SE
4590 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4591 if (data->br_stack) {
4592 size_t size;
4593
4594 size = data->br_stack->nr
4595 * sizeof(struct perf_branch_entry);
4596
4597 perf_output_put(handle, data->br_stack->nr);
4598 perf_output_copy(handle, data->br_stack->entries, size);
4599 } else {
4600 /*
4601 * we always store at least the value of nr
4602 */
4603 u64 nr = 0;
4604 perf_output_put(handle, nr);
4605 }
4606 }
4018994f
JO
4607
4608 if (sample_type & PERF_SAMPLE_REGS_USER) {
4609 u64 abi = data->regs_user.abi;
4610
4611 /*
4612 * If there are no regs to dump, notice it through
4613 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
4614 */
4615 perf_output_put(handle, abi);
4616
4617 if (abi) {
4618 u64 mask = event->attr.sample_regs_user;
4619 perf_output_sample_regs(handle,
4620 data->regs_user.regs,
4621 mask);
4622 }
4623 }
c5ebcedb 4624
a5cdd40c 4625 if (sample_type & PERF_SAMPLE_STACK_USER) {
c5ebcedb
JO
4626 perf_output_sample_ustack(handle,
4627 data->stack_user_size,
4628 data->regs_user.regs);
a5cdd40c 4629 }
c3feedf2
AK
4630
4631 if (sample_type & PERF_SAMPLE_WEIGHT)
4632 perf_output_put(handle, data->weight);
d6be9ad6
SE
4633
4634 if (sample_type & PERF_SAMPLE_DATA_SRC)
4635 perf_output_put(handle, data->data_src.val);
a5cdd40c 4636
fdfbbd07
AK
4637 if (sample_type & PERF_SAMPLE_TRANSACTION)
4638 perf_output_put(handle, data->txn);
4639
a5cdd40c
PZ
4640 if (!event->attr.watermark) {
4641 int wakeup_events = event->attr.wakeup_events;
4642
4643 if (wakeup_events) {
4644 struct ring_buffer *rb = handle->rb;
4645 int events = local_inc_return(&rb->events);
4646
4647 if (events >= wakeup_events) {
4648 local_sub(wakeup_events, &rb->events);
4649 local_inc(&rb->wakeup);
4650 }
4651 }
4652 }
5622f295
MM
4653}
4654
4655void perf_prepare_sample(struct perf_event_header *header,
4656 struct perf_sample_data *data,
cdd6c482 4657 struct perf_event *event,
5622f295 4658 struct pt_regs *regs)
7b732a75 4659{
cdd6c482 4660 u64 sample_type = event->attr.sample_type;
7b732a75 4661
cdd6c482 4662 header->type = PERF_RECORD_SAMPLE;
c320c7b7 4663 header->size = sizeof(*header) + event->header_size;
5622f295
MM
4664
4665 header->misc = 0;
4666 header->misc |= perf_misc_flags(regs);
6fab0192 4667
c980d109 4668 __perf_event_header__init_id(header, data, event);
6844c09d 4669
c320c7b7 4670 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
4671 data->ip = perf_instruction_pointer(regs);
4672
b23f3325 4673 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 4674 int size = 1;
394ee076 4675
e6dab5ff 4676 data->callchain = perf_callchain(event, regs);
5622f295
MM
4677
4678 if (data->callchain)
4679 size += data->callchain->nr;
4680
4681 header->size += size * sizeof(u64);
394ee076
PZ
4682 }
4683
3a43ce68 4684 if (sample_type & PERF_SAMPLE_RAW) {
a044560c
PZ
4685 int size = sizeof(u32);
4686
4687 if (data->raw)
4688 size += data->raw->size;
4689 else
4690 size += sizeof(u32);
4691
4692 WARN_ON_ONCE(size & (sizeof(u64)-1));
5622f295 4693 header->size += size;
7f453c24 4694 }
bce38cd5
SE
4695
4696 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4697 int size = sizeof(u64); /* nr */
4698 if (data->br_stack) {
4699 size += data->br_stack->nr
4700 * sizeof(struct perf_branch_entry);
4701 }
4702 header->size += size;
4703 }
4018994f
JO
4704
4705 if (sample_type & PERF_SAMPLE_REGS_USER) {
4706 /* regs dump ABI info */
4707 int size = sizeof(u64);
4708
4709 perf_sample_regs_user(&data->regs_user, regs);
4710
4711 if (data->regs_user.regs) {
4712 u64 mask = event->attr.sample_regs_user;
4713 size += hweight64(mask) * sizeof(u64);
4714 }
4715
4716 header->size += size;
4717 }
c5ebcedb
JO
4718
4719 if (sample_type & PERF_SAMPLE_STACK_USER) {
4720 /*
4721 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
4722 * processed as the last one or have additional check added
4723 * in case new sample type is added, because we could eat
4724 * up the rest of the sample size.
4725 */
4726 struct perf_regs_user *uregs = &data->regs_user;
4727 u16 stack_size = event->attr.sample_stack_user;
4728 u16 size = sizeof(u64);
4729
4730 if (!uregs->abi)
4731 perf_sample_regs_user(uregs, regs);
4732
4733 stack_size = perf_sample_ustack_size(stack_size, header->size,
4734 uregs->regs);
4735
4736 /*
4737 * If there is something to dump, add space for the dump
4738 * itself and for the field that tells the dynamic size,
4739 * which is how many have been actually dumped.
4740 */
4741 if (stack_size)
4742 size += sizeof(u64) + stack_size;
4743
4744 data->stack_user_size = stack_size;
4745 header->size += size;
4746 }
5622f295 4747}
7f453c24 4748
a8b0ca17 4749static void perf_event_output(struct perf_event *event,
5622f295
MM
4750 struct perf_sample_data *data,
4751 struct pt_regs *regs)
4752{
4753 struct perf_output_handle handle;
4754 struct perf_event_header header;
689802b2 4755
927c7a9e
FW
4756 /* protect the callchain buffers */
4757 rcu_read_lock();
4758
cdd6c482 4759 perf_prepare_sample(&header, data, event, regs);
5c148194 4760
a7ac67ea 4761 if (perf_output_begin(&handle, event, header.size))
927c7a9e 4762 goto exit;
0322cd6e 4763
cdd6c482 4764 perf_output_sample(&handle, &header, data, event);
f413cdb8 4765
8a057d84 4766 perf_output_end(&handle);
927c7a9e
FW
4767
4768exit:
4769 rcu_read_unlock();
0322cd6e
PZ
4770}
4771
38b200d6 4772/*
cdd6c482 4773 * read event_id
38b200d6
PZ
4774 */
4775
4776struct perf_read_event {
4777 struct perf_event_header header;
4778
4779 u32 pid;
4780 u32 tid;
38b200d6
PZ
4781};
4782
4783static void
cdd6c482 4784perf_event_read_event(struct perf_event *event,
38b200d6
PZ
4785 struct task_struct *task)
4786{
4787 struct perf_output_handle handle;
c980d109 4788 struct perf_sample_data sample;
dfc65094 4789 struct perf_read_event read_event = {
38b200d6 4790 .header = {
cdd6c482 4791 .type = PERF_RECORD_READ,
38b200d6 4792 .misc = 0,
c320c7b7 4793 .size = sizeof(read_event) + event->read_size,
38b200d6 4794 },
cdd6c482
IM
4795 .pid = perf_event_pid(event, task),
4796 .tid = perf_event_tid(event, task),
38b200d6 4797 };
3dab77fb 4798 int ret;
38b200d6 4799
c980d109 4800 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 4801 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
4802 if (ret)
4803 return;
4804
dfc65094 4805 perf_output_put(&handle, read_event);
cdd6c482 4806 perf_output_read(&handle, event);
c980d109 4807 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 4808
38b200d6
PZ
4809 perf_output_end(&handle);
4810}
4811
52d857a8
JO
4812typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
4813
4814static void
4815perf_event_aux_ctx(struct perf_event_context *ctx,
52d857a8
JO
4816 perf_event_aux_output_cb output,
4817 void *data)
4818{
4819 struct perf_event *event;
4820
4821 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4822 if (event->state < PERF_EVENT_STATE_INACTIVE)
4823 continue;
4824 if (!event_filter_match(event))
4825 continue;
67516844 4826 output(event, data);
52d857a8
JO
4827 }
4828}
4829
4830static void
67516844 4831perf_event_aux(perf_event_aux_output_cb output, void *data,
52d857a8
JO
4832 struct perf_event_context *task_ctx)
4833{
4834 struct perf_cpu_context *cpuctx;
4835 struct perf_event_context *ctx;
4836 struct pmu *pmu;
4837 int ctxn;
4838
4839 rcu_read_lock();
4840 list_for_each_entry_rcu(pmu, &pmus, entry) {
4841 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4842 if (cpuctx->unique_pmu != pmu)
4843 goto next;
67516844 4844 perf_event_aux_ctx(&cpuctx->ctx, output, data);
52d857a8
JO
4845 if (task_ctx)
4846 goto next;
4847 ctxn = pmu->task_ctx_nr;
4848 if (ctxn < 0)
4849 goto next;
4850 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4851 if (ctx)
67516844 4852 perf_event_aux_ctx(ctx, output, data);
52d857a8
JO
4853next:
4854 put_cpu_ptr(pmu->pmu_cpu_context);
4855 }
4856
4857 if (task_ctx) {
4858 preempt_disable();
67516844 4859 perf_event_aux_ctx(task_ctx, output, data);
52d857a8
JO
4860 preempt_enable();
4861 }
4862 rcu_read_unlock();
4863}
4864
60313ebe 4865/*
9f498cc5
PZ
4866 * task tracking -- fork/exit
4867 *
13d7a241 4868 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
60313ebe
PZ
4869 */
4870
9f498cc5 4871struct perf_task_event {
3a80b4a3 4872 struct task_struct *task;
cdd6c482 4873 struct perf_event_context *task_ctx;
60313ebe
PZ
4874
4875 struct {
4876 struct perf_event_header header;
4877
4878 u32 pid;
4879 u32 ppid;
9f498cc5
PZ
4880 u32 tid;
4881 u32 ptid;
393b2ad8 4882 u64 time;
cdd6c482 4883 } event_id;
60313ebe
PZ
4884};
4885
67516844
JO
4886static int perf_event_task_match(struct perf_event *event)
4887{
13d7a241
SE
4888 return event->attr.comm || event->attr.mmap ||
4889 event->attr.mmap2 || event->attr.mmap_data ||
4890 event->attr.task;
67516844
JO
4891}
4892
cdd6c482 4893static void perf_event_task_output(struct perf_event *event,
52d857a8 4894 void *data)
60313ebe 4895{
52d857a8 4896 struct perf_task_event *task_event = data;
60313ebe 4897 struct perf_output_handle handle;
c980d109 4898 struct perf_sample_data sample;
9f498cc5 4899 struct task_struct *task = task_event->task;
c980d109 4900 int ret, size = task_event->event_id.header.size;
8bb39f9a 4901
67516844
JO
4902 if (!perf_event_task_match(event))
4903 return;
4904
c980d109 4905 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 4906
c980d109 4907 ret = perf_output_begin(&handle, event,
a7ac67ea 4908 task_event->event_id.header.size);
ef60777c 4909 if (ret)
c980d109 4910 goto out;
60313ebe 4911
cdd6c482
IM
4912 task_event->event_id.pid = perf_event_pid(event, task);
4913 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 4914
cdd6c482
IM
4915 task_event->event_id.tid = perf_event_tid(event, task);
4916 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 4917
cdd6c482 4918 perf_output_put(&handle, task_event->event_id);
393b2ad8 4919
c980d109
ACM
4920 perf_event__output_id_sample(event, &handle, &sample);
4921
60313ebe 4922 perf_output_end(&handle);
c980d109
ACM
4923out:
4924 task_event->event_id.header.size = size;
60313ebe
PZ
4925}
4926
cdd6c482
IM
4927static void perf_event_task(struct task_struct *task,
4928 struct perf_event_context *task_ctx,
3a80b4a3 4929 int new)
60313ebe 4930{
9f498cc5 4931 struct perf_task_event task_event;
60313ebe 4932
cdd6c482
IM
4933 if (!atomic_read(&nr_comm_events) &&
4934 !atomic_read(&nr_mmap_events) &&
4935 !atomic_read(&nr_task_events))
60313ebe
PZ
4936 return;
4937
9f498cc5 4938 task_event = (struct perf_task_event){
3a80b4a3
PZ
4939 .task = task,
4940 .task_ctx = task_ctx,
cdd6c482 4941 .event_id = {
60313ebe 4942 .header = {
cdd6c482 4943 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 4944 .misc = 0,
cdd6c482 4945 .size = sizeof(task_event.event_id),
60313ebe 4946 },
573402db
PZ
4947 /* .pid */
4948 /* .ppid */
9f498cc5
PZ
4949 /* .tid */
4950 /* .ptid */
6f93d0a7 4951 .time = perf_clock(),
60313ebe
PZ
4952 },
4953 };
4954
67516844 4955 perf_event_aux(perf_event_task_output,
52d857a8
JO
4956 &task_event,
4957 task_ctx);
9f498cc5
PZ
4958}
4959
cdd6c482 4960void perf_event_fork(struct task_struct *task)
9f498cc5 4961{
cdd6c482 4962 perf_event_task(task, NULL, 1);
60313ebe
PZ
4963}
4964
8d1b2d93
PZ
4965/*
4966 * comm tracking
4967 */
4968
4969struct perf_comm_event {
22a4f650
IM
4970 struct task_struct *task;
4971 char *comm;
8d1b2d93
PZ
4972 int comm_size;
4973
4974 struct {
4975 struct perf_event_header header;
4976
4977 u32 pid;
4978 u32 tid;
cdd6c482 4979 } event_id;
8d1b2d93
PZ
4980};
4981
67516844
JO
4982static int perf_event_comm_match(struct perf_event *event)
4983{
4984 return event->attr.comm;
4985}
4986
cdd6c482 4987static void perf_event_comm_output(struct perf_event *event,
52d857a8 4988 void *data)
8d1b2d93 4989{
52d857a8 4990 struct perf_comm_event *comm_event = data;
8d1b2d93 4991 struct perf_output_handle handle;
c980d109 4992 struct perf_sample_data sample;
cdd6c482 4993 int size = comm_event->event_id.header.size;
c980d109
ACM
4994 int ret;
4995
67516844
JO
4996 if (!perf_event_comm_match(event))
4997 return;
4998
c980d109
ACM
4999 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
5000 ret = perf_output_begin(&handle, event,
a7ac67ea 5001 comm_event->event_id.header.size);
8d1b2d93
PZ
5002
5003 if (ret)
c980d109 5004 goto out;
8d1b2d93 5005
cdd6c482
IM
5006 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
5007 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 5008
cdd6c482 5009 perf_output_put(&handle, comm_event->event_id);
76369139 5010 __output_copy(&handle, comm_event->comm,
8d1b2d93 5011 comm_event->comm_size);
c980d109
ACM
5012
5013 perf_event__output_id_sample(event, &handle, &sample);
5014
8d1b2d93 5015 perf_output_end(&handle);
c980d109
ACM
5016out:
5017 comm_event->event_id.header.size = size;
8d1b2d93
PZ
5018}
5019
cdd6c482 5020static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93 5021{
413ee3b4 5022 char comm[TASK_COMM_LEN];
8d1b2d93 5023 unsigned int size;
8d1b2d93 5024
413ee3b4 5025 memset(comm, 0, sizeof(comm));
96b02d78 5026 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 5027 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
5028
5029 comm_event->comm = comm;
5030 comm_event->comm_size = size;
5031
cdd6c482 5032 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8dc85d54 5033
67516844 5034 perf_event_aux(perf_event_comm_output,
52d857a8
JO
5035 comm_event,
5036 NULL);
8d1b2d93
PZ
5037}
5038
cdd6c482 5039void perf_event_comm(struct task_struct *task)
8d1b2d93 5040{
9ee318a7 5041 struct perf_comm_event comm_event;
8dc85d54
PZ
5042 struct perf_event_context *ctx;
5043 int ctxn;
9ee318a7 5044
c79aa0d9 5045 rcu_read_lock();
8dc85d54
PZ
5046 for_each_task_context_nr(ctxn) {
5047 ctx = task->perf_event_ctxp[ctxn];
5048 if (!ctx)
5049 continue;
9ee318a7 5050
8dc85d54
PZ
5051 perf_event_enable_on_exec(ctx);
5052 }
c79aa0d9 5053 rcu_read_unlock();
9ee318a7 5054
cdd6c482 5055 if (!atomic_read(&nr_comm_events))
9ee318a7 5056 return;
a63eaf34 5057
9ee318a7 5058 comm_event = (struct perf_comm_event){
8d1b2d93 5059 .task = task,
573402db
PZ
5060 /* .comm */
5061 /* .comm_size */
cdd6c482 5062 .event_id = {
573402db 5063 .header = {
cdd6c482 5064 .type = PERF_RECORD_COMM,
573402db
PZ
5065 .misc = 0,
5066 /* .size */
5067 },
5068 /* .pid */
5069 /* .tid */
8d1b2d93
PZ
5070 },
5071 };
5072
cdd6c482 5073 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
5074}
5075
0a4a9391
PZ
5076/*
5077 * mmap tracking
5078 */
5079
5080struct perf_mmap_event {
089dd79d
PZ
5081 struct vm_area_struct *vma;
5082
5083 const char *file_name;
5084 int file_size;
13d7a241
SE
5085 int maj, min;
5086 u64 ino;
5087 u64 ino_generation;
0a4a9391
PZ
5088
5089 struct {
5090 struct perf_event_header header;
5091
5092 u32 pid;
5093 u32 tid;
5094 u64 start;
5095 u64 len;
5096 u64 pgoff;
cdd6c482 5097 } event_id;
0a4a9391
PZ
5098};
5099
67516844
JO
5100static int perf_event_mmap_match(struct perf_event *event,
5101 void *data)
5102{
5103 struct perf_mmap_event *mmap_event = data;
5104 struct vm_area_struct *vma = mmap_event->vma;
5105 int executable = vma->vm_flags & VM_EXEC;
5106
5107 return (!executable && event->attr.mmap_data) ||
13d7a241 5108 (executable && (event->attr.mmap || event->attr.mmap2));
67516844
JO
5109}
5110
cdd6c482 5111static void perf_event_mmap_output(struct perf_event *event,
52d857a8 5112 void *data)
0a4a9391 5113{
52d857a8 5114 struct perf_mmap_event *mmap_event = data;
0a4a9391 5115 struct perf_output_handle handle;
c980d109 5116 struct perf_sample_data sample;
cdd6c482 5117 int size = mmap_event->event_id.header.size;
c980d109 5118 int ret;
0a4a9391 5119
67516844
JO
5120 if (!perf_event_mmap_match(event, data))
5121 return;
5122
13d7a241
SE
5123 if (event->attr.mmap2) {
5124 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
5125 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
5126 mmap_event->event_id.header.size += sizeof(mmap_event->min);
5127 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
d008d525 5128 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
13d7a241
SE
5129 }
5130
c980d109
ACM
5131 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
5132 ret = perf_output_begin(&handle, event,
a7ac67ea 5133 mmap_event->event_id.header.size);
0a4a9391 5134 if (ret)
c980d109 5135 goto out;
0a4a9391 5136
cdd6c482
IM
5137 mmap_event->event_id.pid = perf_event_pid(event, current);
5138 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 5139
cdd6c482 5140 perf_output_put(&handle, mmap_event->event_id);
13d7a241
SE
5141
5142 if (event->attr.mmap2) {
5143 perf_output_put(&handle, mmap_event->maj);
5144 perf_output_put(&handle, mmap_event->min);
5145 perf_output_put(&handle, mmap_event->ino);
5146 perf_output_put(&handle, mmap_event->ino_generation);
5147 }
5148
76369139 5149 __output_copy(&handle, mmap_event->file_name,
0a4a9391 5150 mmap_event->file_size);
c980d109
ACM
5151
5152 perf_event__output_id_sample(event, &handle, &sample);
5153
78d613eb 5154 perf_output_end(&handle);
c980d109
ACM
5155out:
5156 mmap_event->event_id.header.size = size;
0a4a9391
PZ
5157}
5158
cdd6c482 5159static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391 5160{
089dd79d
PZ
5161 struct vm_area_struct *vma = mmap_event->vma;
5162 struct file *file = vma->vm_file;
13d7a241
SE
5163 int maj = 0, min = 0;
5164 u64 ino = 0, gen = 0;
0a4a9391
PZ
5165 unsigned int size;
5166 char tmp[16];
5167 char *buf = NULL;
2c42cfbf 5168 char *name;
413ee3b4 5169
0a4a9391 5170 if (file) {
13d7a241
SE
5171 struct inode *inode;
5172 dev_t dev;
3ea2f2b9 5173
2c42cfbf 5174 buf = kmalloc(PATH_MAX, GFP_KERNEL);
0a4a9391 5175 if (!buf) {
c7e548b4
ON
5176 name = "//enomem";
5177 goto cpy_name;
0a4a9391 5178 }
413ee3b4 5179 /*
3ea2f2b9 5180 * d_path() works from the end of the rb backwards, so we
413ee3b4
AB
5181 * need to add enough zero bytes after the string to handle
5182 * the 64bit alignment we do later.
5183 */
3ea2f2b9 5184 name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
0a4a9391 5185 if (IS_ERR(name)) {
c7e548b4
ON
5186 name = "//toolong";
5187 goto cpy_name;
0a4a9391 5188 }
13d7a241
SE
5189 inode = file_inode(vma->vm_file);
5190 dev = inode->i_sb->s_dev;
5191 ino = inode->i_ino;
5192 gen = inode->i_generation;
5193 maj = MAJOR(dev);
5194 min = MINOR(dev);
c7e548b4 5195 goto got_name;
0a4a9391 5196 } else {
2c42cfbf 5197 name = (char *)arch_vma_name(vma);
c7e548b4
ON
5198 if (name)
5199 goto cpy_name;
089dd79d 5200
32c5fb7e 5201 if (vma->vm_start <= vma->vm_mm->start_brk &&
3af9e859 5202 vma->vm_end >= vma->vm_mm->brk) {
c7e548b4
ON
5203 name = "[heap]";
5204 goto cpy_name;
32c5fb7e
ON
5205 }
5206 if (vma->vm_start <= vma->vm_mm->start_stack &&
3af9e859 5207 vma->vm_end >= vma->vm_mm->start_stack) {
c7e548b4
ON
5208 name = "[stack]";
5209 goto cpy_name;
089dd79d
PZ
5210 }
5211
c7e548b4
ON
5212 name = "//anon";
5213 goto cpy_name;
0a4a9391
PZ
5214 }
5215
c7e548b4
ON
5216cpy_name:
5217 strlcpy(tmp, name, sizeof(tmp));
5218 name = tmp;
0a4a9391 5219got_name:
2c42cfbf
PZ
5220 /*
5221 * Since our buffer works in 8 byte units we need to align our string
5222 * size to a multiple of 8. However, we must guarantee the tail end is
5223 * zero'd out to avoid leaking random bits to userspace.
5224 */
5225 size = strlen(name)+1;
5226 while (!IS_ALIGNED(size, sizeof(u64)))
5227 name[size++] = '\0';
0a4a9391
PZ
5228
5229 mmap_event->file_name = name;
5230 mmap_event->file_size = size;
13d7a241
SE
5231 mmap_event->maj = maj;
5232 mmap_event->min = min;
5233 mmap_event->ino = ino;
5234 mmap_event->ino_generation = gen;
0a4a9391 5235
2fe85427
SE
5236 if (!(vma->vm_flags & VM_EXEC))
5237 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
5238
cdd6c482 5239 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 5240
67516844 5241 perf_event_aux(perf_event_mmap_output,
52d857a8
JO
5242 mmap_event,
5243 NULL);
665c2142 5244
0a4a9391
PZ
5245 kfree(buf);
5246}
5247
3af9e859 5248void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 5249{
9ee318a7
PZ
5250 struct perf_mmap_event mmap_event;
5251
cdd6c482 5252 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
5253 return;
5254
5255 mmap_event = (struct perf_mmap_event){
089dd79d 5256 .vma = vma,
573402db
PZ
5257 /* .file_name */
5258 /* .file_size */
cdd6c482 5259 .event_id = {
573402db 5260 .header = {
cdd6c482 5261 .type = PERF_RECORD_MMAP,
39447b38 5262 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
5263 /* .size */
5264 },
5265 /* .pid */
5266 /* .tid */
089dd79d
PZ
5267 .start = vma->vm_start,
5268 .len = vma->vm_end - vma->vm_start,
3a0304e9 5269 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391 5270 },
13d7a241
SE
5271 /* .maj (attr_mmap2 only) */
5272 /* .min (attr_mmap2 only) */
5273 /* .ino (attr_mmap2 only) */
5274 /* .ino_generation (attr_mmap2 only) */
0a4a9391
PZ
5275 };
5276
cdd6c482 5277 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
5278}
5279
a78ac325
PZ
5280/*
5281 * IRQ throttle logging
5282 */
5283
cdd6c482 5284static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
5285{
5286 struct perf_output_handle handle;
c980d109 5287 struct perf_sample_data sample;
a78ac325
PZ
5288 int ret;
5289
5290 struct {
5291 struct perf_event_header header;
5292 u64 time;
cca3f454 5293 u64 id;
7f453c24 5294 u64 stream_id;
a78ac325
PZ
5295 } throttle_event = {
5296 .header = {
cdd6c482 5297 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
5298 .misc = 0,
5299 .size = sizeof(throttle_event),
5300 },
def0a9b2 5301 .time = perf_clock(),
cdd6c482
IM
5302 .id = primary_event_id(event),
5303 .stream_id = event->id,
a78ac325
PZ
5304 };
5305
966ee4d6 5306 if (enable)
cdd6c482 5307 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 5308
c980d109
ACM
5309 perf_event_header__init_id(&throttle_event.header, &sample, event);
5310
5311 ret = perf_output_begin(&handle, event,
a7ac67ea 5312 throttle_event.header.size);
a78ac325
PZ
5313 if (ret)
5314 return;
5315
5316 perf_output_put(&handle, throttle_event);
c980d109 5317 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
5318 perf_output_end(&handle);
5319}
5320
f6c7d5fe 5321/*
cdd6c482 5322 * Generic event overflow handling, sampling.
f6c7d5fe
PZ
5323 */
5324
a8b0ca17 5325static int __perf_event_overflow(struct perf_event *event,
5622f295
MM
5326 int throttle, struct perf_sample_data *data,
5327 struct pt_regs *regs)
f6c7d5fe 5328{
cdd6c482
IM
5329 int events = atomic_read(&event->event_limit);
5330 struct hw_perf_event *hwc = &event->hw;
e050e3f0 5331 u64 seq;
79f14641
PZ
5332 int ret = 0;
5333
96398826
PZ
5334 /*
5335 * Non-sampling counters might still use the PMI to fold short
5336 * hardware counters, ignore those.
5337 */
5338 if (unlikely(!is_sampling_event(event)))
5339 return 0;
5340
e050e3f0
SE
5341 seq = __this_cpu_read(perf_throttled_seq);
5342 if (seq != hwc->interrupts_seq) {
5343 hwc->interrupts_seq = seq;
5344 hwc->interrupts = 1;
5345 } else {
5346 hwc->interrupts++;
5347 if (unlikely(throttle
5348 && hwc->interrupts >= max_samples_per_tick)) {
5349 __this_cpu_inc(perf_throttled_count);
163ec435
PZ
5350 hwc->interrupts = MAX_INTERRUPTS;
5351 perf_log_throttle(event, 0);
d84153d6 5352 tick_nohz_full_kick();
a78ac325
PZ
5353 ret = 1;
5354 }
e050e3f0 5355 }
60db5e09 5356
cdd6c482 5357 if (event->attr.freq) {
def0a9b2 5358 u64 now = perf_clock();
abd50713 5359 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 5360
abd50713 5361 hwc->freq_time_stamp = now;
bd2b5b12 5362
abd50713 5363 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 5364 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
5365 }
5366
2023b359
PZ
5367 /*
5368 * XXX event_limit might not quite work as expected on inherited
cdd6c482 5369 * events
2023b359
PZ
5370 */
5371
cdd6c482
IM
5372 event->pending_kill = POLL_IN;
5373 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 5374 ret = 1;
cdd6c482 5375 event->pending_kill = POLL_HUP;
a8b0ca17
PZ
5376 event->pending_disable = 1;
5377 irq_work_queue(&event->pending);
79f14641
PZ
5378 }
5379
453f19ee 5380 if (event->overflow_handler)
a8b0ca17 5381 event->overflow_handler(event, data, regs);
453f19ee 5382 else
a8b0ca17 5383 perf_event_output(event, data, regs);
453f19ee 5384
f506b3dc 5385 if (event->fasync && event->pending_kill) {
a8b0ca17
PZ
5386 event->pending_wakeup = 1;
5387 irq_work_queue(&event->pending);
f506b3dc
PZ
5388 }
5389
79f14641 5390 return ret;
f6c7d5fe
PZ
5391}
5392
a8b0ca17 5393int perf_event_overflow(struct perf_event *event,
5622f295
MM
5394 struct perf_sample_data *data,
5395 struct pt_regs *regs)
850bc73f 5396{
a8b0ca17 5397 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
5398}
5399
15dbf27c 5400/*
cdd6c482 5401 * Generic software event infrastructure
15dbf27c
PZ
5402 */
5403
b28ab83c
PZ
5404struct swevent_htable {
5405 struct swevent_hlist *swevent_hlist;
5406 struct mutex hlist_mutex;
5407 int hlist_refcount;
5408
5409 /* Recursion avoidance in each contexts */
5410 int recursion[PERF_NR_CONTEXTS];
5411};
5412
5413static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
5414
7b4b6658 5415/*
cdd6c482
IM
5416 * We directly increment event->count and keep a second value in
5417 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
5418 * is kept in the range [-sample_period, 0] so that we can use the
5419 * sign as trigger.
5420 */
5421
ab573844 5422u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 5423{
cdd6c482 5424 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
5425 u64 period = hwc->last_period;
5426 u64 nr, offset;
5427 s64 old, val;
5428
5429 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
5430
5431again:
e7850595 5432 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
5433 if (val < 0)
5434 return 0;
15dbf27c 5435
7b4b6658
PZ
5436 nr = div64_u64(period + val, period);
5437 offset = nr * period;
5438 val -= offset;
e7850595 5439 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 5440 goto again;
15dbf27c 5441
7b4b6658 5442 return nr;
15dbf27c
PZ
5443}
5444
0cff784a 5445static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 5446 struct perf_sample_data *data,
5622f295 5447 struct pt_regs *regs)
15dbf27c 5448{
cdd6c482 5449 struct hw_perf_event *hwc = &event->hw;
850bc73f 5450 int throttle = 0;
15dbf27c 5451
0cff784a
PZ
5452 if (!overflow)
5453 overflow = perf_swevent_set_period(event);
15dbf27c 5454
7b4b6658
PZ
5455 if (hwc->interrupts == MAX_INTERRUPTS)
5456 return;
15dbf27c 5457
7b4b6658 5458 for (; overflow; overflow--) {
a8b0ca17 5459 if (__perf_event_overflow(event, throttle,
5622f295 5460 data, regs)) {
7b4b6658
PZ
5461 /*
5462 * We inhibit the overflow from happening when
5463 * hwc->interrupts == MAX_INTERRUPTS.
5464 */
5465 break;
5466 }
cf450a73 5467 throttle = 1;
7b4b6658 5468 }
15dbf27c
PZ
5469}
5470
a4eaf7f1 5471static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 5472 struct perf_sample_data *data,
5622f295 5473 struct pt_regs *regs)
7b4b6658 5474{
cdd6c482 5475 struct hw_perf_event *hwc = &event->hw;
d6d020e9 5476
e7850595 5477 local64_add(nr, &event->count);
d6d020e9 5478
0cff784a
PZ
5479 if (!regs)
5480 return;
5481
6c7e550f 5482 if (!is_sampling_event(event))
7b4b6658 5483 return;
d6d020e9 5484
5d81e5cf
AV
5485 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
5486 data->period = nr;
5487 return perf_swevent_overflow(event, 1, data, regs);
5488 } else
5489 data->period = event->hw.last_period;
5490
0cff784a 5491 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 5492 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 5493
e7850595 5494 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 5495 return;
df1a132b 5496
a8b0ca17 5497 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
5498}
5499
f5ffe02e
FW
5500static int perf_exclude_event(struct perf_event *event,
5501 struct pt_regs *regs)
5502{
a4eaf7f1 5503 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 5504 return 1;
a4eaf7f1 5505
f5ffe02e
FW
5506 if (regs) {
5507 if (event->attr.exclude_user && user_mode(regs))
5508 return 1;
5509
5510 if (event->attr.exclude_kernel && !user_mode(regs))
5511 return 1;
5512 }
5513
5514 return 0;
5515}
5516
cdd6c482 5517static int perf_swevent_match(struct perf_event *event,
1c432d89 5518 enum perf_type_id type,
6fb2915d
LZ
5519 u32 event_id,
5520 struct perf_sample_data *data,
5521 struct pt_regs *regs)
15dbf27c 5522{
cdd6c482 5523 if (event->attr.type != type)
a21ca2ca 5524 return 0;
f5ffe02e 5525
cdd6c482 5526 if (event->attr.config != event_id)
15dbf27c
PZ
5527 return 0;
5528
f5ffe02e
FW
5529 if (perf_exclude_event(event, regs))
5530 return 0;
15dbf27c
PZ
5531
5532 return 1;
5533}
5534
76e1d904
FW
5535static inline u64 swevent_hash(u64 type, u32 event_id)
5536{
5537 u64 val = event_id | (type << 32);
5538
5539 return hash_64(val, SWEVENT_HLIST_BITS);
5540}
5541
49f135ed
FW
5542static inline struct hlist_head *
5543__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 5544{
49f135ed
FW
5545 u64 hash = swevent_hash(type, event_id);
5546
5547 return &hlist->heads[hash];
5548}
76e1d904 5549
49f135ed
FW
5550/* For the read side: events when they trigger */
5551static inline struct hlist_head *
b28ab83c 5552find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
5553{
5554 struct swevent_hlist *hlist;
76e1d904 5555
b28ab83c 5556 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
5557 if (!hlist)
5558 return NULL;
5559
49f135ed
FW
5560 return __find_swevent_head(hlist, type, event_id);
5561}
5562
5563/* For the event head insertion and removal in the hlist */
5564static inline struct hlist_head *
b28ab83c 5565find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
5566{
5567 struct swevent_hlist *hlist;
5568 u32 event_id = event->attr.config;
5569 u64 type = event->attr.type;
5570
5571 /*
5572 * Event scheduling is always serialized against hlist allocation
5573 * and release. Which makes the protected version suitable here.
5574 * The context lock guarantees that.
5575 */
b28ab83c 5576 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
5577 lockdep_is_held(&event->ctx->lock));
5578 if (!hlist)
5579 return NULL;
5580
5581 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
5582}
5583
5584static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 5585 u64 nr,
76e1d904
FW
5586 struct perf_sample_data *data,
5587 struct pt_regs *regs)
15dbf27c 5588{
b28ab83c 5589 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482 5590 struct perf_event *event;
76e1d904 5591 struct hlist_head *head;
15dbf27c 5592
76e1d904 5593 rcu_read_lock();
b28ab83c 5594 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
5595 if (!head)
5596 goto end;
5597
b67bfe0d 5598 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6fb2915d 5599 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 5600 perf_swevent_event(event, nr, data, regs);
15dbf27c 5601 }
76e1d904
FW
5602end:
5603 rcu_read_unlock();
15dbf27c
PZ
5604}
5605
4ed7c92d 5606int perf_swevent_get_recursion_context(void)
96f6d444 5607{
b28ab83c 5608 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
96f6d444 5609
b28ab83c 5610 return get_recursion_context(swhash->recursion);
96f6d444 5611}
645e8cc0 5612EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 5613
fa9f90be 5614inline void perf_swevent_put_recursion_context(int rctx)
15dbf27c 5615{
b28ab83c 5616 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
927c7a9e 5617
b28ab83c 5618 put_recursion_context(swhash->recursion, rctx);
ce71b9df 5619}
15dbf27c 5620
a8b0ca17 5621void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 5622{
a4234bfc 5623 struct perf_sample_data data;
4ed7c92d
PZ
5624 int rctx;
5625
1c024eca 5626 preempt_disable_notrace();
4ed7c92d
PZ
5627 rctx = perf_swevent_get_recursion_context();
5628 if (rctx < 0)
5629 return;
a4234bfc 5630
fd0d000b 5631 perf_sample_data_init(&data, addr, 0);
92bf309a 5632
a8b0ca17 5633 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4ed7c92d
PZ
5634
5635 perf_swevent_put_recursion_context(rctx);
1c024eca 5636 preempt_enable_notrace();
b8e83514
PZ
5637}
5638
cdd6c482 5639static void perf_swevent_read(struct perf_event *event)
15dbf27c 5640{
15dbf27c
PZ
5641}
5642
a4eaf7f1 5643static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 5644{
b28ab83c 5645 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482 5646 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
5647 struct hlist_head *head;
5648
6c7e550f 5649 if (is_sampling_event(event)) {
7b4b6658 5650 hwc->last_period = hwc->sample_period;
cdd6c482 5651 perf_swevent_set_period(event);
7b4b6658 5652 }
76e1d904 5653
a4eaf7f1
PZ
5654 hwc->state = !(flags & PERF_EF_START);
5655
b28ab83c 5656 head = find_swevent_head(swhash, event);
76e1d904
FW
5657 if (WARN_ON_ONCE(!head))
5658 return -EINVAL;
5659
5660 hlist_add_head_rcu(&event->hlist_entry, head);
5661
15dbf27c
PZ
5662 return 0;
5663}
5664
a4eaf7f1 5665static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 5666{
76e1d904 5667 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
5668}
5669
a4eaf7f1 5670static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 5671{
a4eaf7f1 5672 event->hw.state = 0;
d6d020e9 5673}
aa9c4c0f 5674
a4eaf7f1 5675static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 5676{
a4eaf7f1 5677 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
5678}
5679
49f135ed
FW
5680/* Deref the hlist from the update side */
5681static inline struct swevent_hlist *
b28ab83c 5682swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 5683{
b28ab83c
PZ
5684 return rcu_dereference_protected(swhash->swevent_hlist,
5685 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
5686}
5687
b28ab83c 5688static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 5689{
b28ab83c 5690 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 5691
49f135ed 5692 if (!hlist)
76e1d904
FW
5693 return;
5694
b28ab83c 5695 rcu_assign_pointer(swhash->swevent_hlist, NULL);
fa4bbc4c 5696 kfree_rcu(hlist, rcu_head);
76e1d904
FW
5697}
5698
5699static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5700{
b28ab83c 5701 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 5702
b28ab83c 5703 mutex_lock(&swhash->hlist_mutex);
76e1d904 5704
b28ab83c
PZ
5705 if (!--swhash->hlist_refcount)
5706 swevent_hlist_release(swhash);
76e1d904 5707
b28ab83c 5708 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
5709}
5710
5711static void swevent_hlist_put(struct perf_event *event)
5712{
5713 int cpu;
5714
76e1d904
FW
5715 for_each_possible_cpu(cpu)
5716 swevent_hlist_put_cpu(event, cpu);
5717}
5718
5719static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5720{
b28ab83c 5721 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
5722 int err = 0;
5723
b28ab83c 5724 mutex_lock(&swhash->hlist_mutex);
76e1d904 5725
b28ab83c 5726 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d904
FW
5727 struct swevent_hlist *hlist;
5728
5729 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5730 if (!hlist) {
5731 err = -ENOMEM;
5732 goto exit;
5733 }
b28ab83c 5734 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 5735 }
b28ab83c 5736 swhash->hlist_refcount++;
9ed6060d 5737exit:
b28ab83c 5738 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
5739
5740 return err;
5741}
5742
5743static int swevent_hlist_get(struct perf_event *event)
5744{
5745 int err;
5746 int cpu, failed_cpu;
5747
76e1d904
FW
5748 get_online_cpus();
5749 for_each_possible_cpu(cpu) {
5750 err = swevent_hlist_get_cpu(event, cpu);
5751 if (err) {
5752 failed_cpu = cpu;
5753 goto fail;
5754 }
5755 }
5756 put_online_cpus();
5757
5758 return 0;
9ed6060d 5759fail:
76e1d904
FW
5760 for_each_possible_cpu(cpu) {
5761 if (cpu == failed_cpu)
5762 break;
5763 swevent_hlist_put_cpu(event, cpu);
5764 }
5765
5766 put_online_cpus();
5767 return err;
5768}
5769
c5905afb 5770struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 5771
b0a873eb
PZ
5772static void sw_perf_event_destroy(struct perf_event *event)
5773{
5774 u64 event_id = event->attr.config;
95476b64 5775
b0a873eb
PZ
5776 WARN_ON(event->parent);
5777
c5905afb 5778 static_key_slow_dec(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
5779 swevent_hlist_put(event);
5780}
5781
5782static int perf_swevent_init(struct perf_event *event)
5783{
8176cced 5784 u64 event_id = event->attr.config;
b0a873eb
PZ
5785
5786 if (event->attr.type != PERF_TYPE_SOFTWARE)
5787 return -ENOENT;
5788
2481c5fa
SE
5789 /*
5790 * no branch sampling for software events
5791 */
5792 if (has_branch_stack(event))
5793 return -EOPNOTSUPP;
5794
b0a873eb
PZ
5795 switch (event_id) {
5796 case PERF_COUNT_SW_CPU_CLOCK:
5797 case PERF_COUNT_SW_TASK_CLOCK:
5798 return -ENOENT;
5799
5800 default:
5801 break;
5802 }
5803
ce677831 5804 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
5805 return -ENOENT;
5806
5807 if (!event->parent) {
5808 int err;
5809
5810 err = swevent_hlist_get(event);
5811 if (err)
5812 return err;
5813
c5905afb 5814 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
5815 event->destroy = sw_perf_event_destroy;
5816 }
5817
5818 return 0;
5819}
5820
35edc2a5
PZ
5821static int perf_swevent_event_idx(struct perf_event *event)
5822{
5823 return 0;
5824}
5825
b0a873eb 5826static struct pmu perf_swevent = {
89a1e187 5827 .task_ctx_nr = perf_sw_context,
95476b64 5828
b0a873eb 5829 .event_init = perf_swevent_init,
a4eaf7f1
PZ
5830 .add = perf_swevent_add,
5831 .del = perf_swevent_del,
5832 .start = perf_swevent_start,
5833 .stop = perf_swevent_stop,
1c024eca 5834 .read = perf_swevent_read,
35edc2a5
PZ
5835
5836 .event_idx = perf_swevent_event_idx,
1c024eca
PZ
5837};
5838
b0a873eb
PZ
5839#ifdef CONFIG_EVENT_TRACING
5840
1c024eca
PZ
5841static int perf_tp_filter_match(struct perf_event *event,
5842 struct perf_sample_data *data)
5843{
5844 void *record = data->raw->data;
5845
5846 if (likely(!event->filter) || filter_match_preds(event->filter, record))
5847 return 1;
5848 return 0;
5849}
5850
5851static int perf_tp_event_match(struct perf_event *event,
5852 struct perf_sample_data *data,
5853 struct pt_regs *regs)
5854{
a0f7d0f7
FW
5855 if (event->hw.state & PERF_HES_STOPPED)
5856 return 0;
580d607c
PZ
5857 /*
5858 * All tracepoints are from kernel-space.
5859 */
5860 if (event->attr.exclude_kernel)
1c024eca
PZ
5861 return 0;
5862
5863 if (!perf_tp_filter_match(event, data))
5864 return 0;
5865
5866 return 1;
5867}
5868
5869void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
e6dab5ff
AV
5870 struct pt_regs *regs, struct hlist_head *head, int rctx,
5871 struct task_struct *task)
95476b64
FW
5872{
5873 struct perf_sample_data data;
1c024eca 5874 struct perf_event *event;
1c024eca 5875
95476b64
FW
5876 struct perf_raw_record raw = {
5877 .size = entry_size,
5878 .data = record,
5879 };
5880
fd0d000b 5881 perf_sample_data_init(&data, addr, 0);
95476b64
FW
5882 data.raw = &raw;
5883
b67bfe0d 5884 hlist_for_each_entry_rcu(event, head, hlist_entry) {
1c024eca 5885 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 5886 perf_swevent_event(event, count, &data, regs);
4f41c013 5887 }
ecc55f84 5888
e6dab5ff
AV
5889 /*
5890 * If we got specified a target task, also iterate its context and
5891 * deliver this event there too.
5892 */
5893 if (task && task != current) {
5894 struct perf_event_context *ctx;
5895 struct trace_entry *entry = record;
5896
5897 rcu_read_lock();
5898 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
5899 if (!ctx)
5900 goto unlock;
5901
5902 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5903 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5904 continue;
5905 if (event->attr.config != entry->type)
5906 continue;
5907 if (perf_tp_event_match(event, &data, regs))
5908 perf_swevent_event(event, count, &data, regs);
5909 }
5910unlock:
5911 rcu_read_unlock();
5912 }
5913
ecc55f84 5914 perf_swevent_put_recursion_context(rctx);
95476b64
FW
5915}
5916EXPORT_SYMBOL_GPL(perf_tp_event);
5917
cdd6c482 5918static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 5919{
1c024eca 5920 perf_trace_destroy(event);
e077df4f
PZ
5921}
5922
b0a873eb 5923static int perf_tp_event_init(struct perf_event *event)
e077df4f 5924{
76e1d904
FW
5925 int err;
5926
b0a873eb
PZ
5927 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5928 return -ENOENT;
5929
2481c5fa
SE
5930 /*
5931 * no branch sampling for tracepoint events
5932 */
5933 if (has_branch_stack(event))
5934 return -EOPNOTSUPP;
5935
1c024eca
PZ
5936 err = perf_trace_init(event);
5937 if (err)
b0a873eb 5938 return err;
e077df4f 5939
cdd6c482 5940 event->destroy = tp_perf_event_destroy;
e077df4f 5941
b0a873eb
PZ
5942 return 0;
5943}
5944
5945static struct pmu perf_tracepoint = {
89a1e187
PZ
5946 .task_ctx_nr = perf_sw_context,
5947
b0a873eb 5948 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
5949 .add = perf_trace_add,
5950 .del = perf_trace_del,
5951 .start = perf_swevent_start,
5952 .stop = perf_swevent_stop,
b0a873eb 5953 .read = perf_swevent_read,
35edc2a5
PZ
5954
5955 .event_idx = perf_swevent_event_idx,
b0a873eb
PZ
5956};
5957
5958static inline void perf_tp_register(void)
5959{
2e80a82a 5960 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f 5961}
6fb2915d
LZ
5962
5963static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5964{
5965 char *filter_str;
5966 int ret;
5967
5968 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5969 return -EINVAL;
5970
5971 filter_str = strndup_user(arg, PAGE_SIZE);
5972 if (IS_ERR(filter_str))
5973 return PTR_ERR(filter_str);
5974
5975 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5976
5977 kfree(filter_str);
5978 return ret;
5979}
5980
5981static void perf_event_free_filter(struct perf_event *event)
5982{
5983 ftrace_profile_free_filter(event);
5984}
5985
e077df4f 5986#else
6fb2915d 5987
b0a873eb 5988static inline void perf_tp_register(void)
e077df4f 5989{
e077df4f 5990}
6fb2915d
LZ
5991
5992static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5993{
5994 return -ENOENT;
5995}
5996
5997static void perf_event_free_filter(struct perf_event *event)
5998{
5999}
6000
07b139c8 6001#endif /* CONFIG_EVENT_TRACING */
e077df4f 6002
24f1e32c 6003#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 6004void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 6005{
f5ffe02e
FW
6006 struct perf_sample_data sample;
6007 struct pt_regs *regs = data;
6008
fd0d000b 6009 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 6010
a4eaf7f1 6011 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 6012 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
6013}
6014#endif
6015
b0a873eb
PZ
6016/*
6017 * hrtimer based swevent callback
6018 */
f29ac756 6019
b0a873eb 6020static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 6021{
b0a873eb
PZ
6022 enum hrtimer_restart ret = HRTIMER_RESTART;
6023 struct perf_sample_data data;
6024 struct pt_regs *regs;
6025 struct perf_event *event;
6026 u64 period;
f29ac756 6027
b0a873eb 6028 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
6029
6030 if (event->state != PERF_EVENT_STATE_ACTIVE)
6031 return HRTIMER_NORESTART;
6032
b0a873eb 6033 event->pmu->read(event);
f344011c 6034
fd0d000b 6035 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
6036 regs = get_irq_regs();
6037
6038 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 6039 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 6040 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
6041 ret = HRTIMER_NORESTART;
6042 }
24f1e32c 6043
b0a873eb
PZ
6044 period = max_t(u64, 10000, event->hw.sample_period);
6045 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 6046
b0a873eb 6047 return ret;
f29ac756
PZ
6048}
6049
b0a873eb 6050static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 6051{
b0a873eb 6052 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
6053 s64 period;
6054
6055 if (!is_sampling_event(event))
6056 return;
f5ffe02e 6057
5d508e82
FBH
6058 period = local64_read(&hwc->period_left);
6059 if (period) {
6060 if (period < 0)
6061 period = 10000;
fa407f35 6062
5d508e82
FBH
6063 local64_set(&hwc->period_left, 0);
6064 } else {
6065 period = max_t(u64, 10000, hwc->sample_period);
6066 }
6067 __hrtimer_start_range_ns(&hwc->hrtimer,
b0a873eb 6068 ns_to_ktime(period), 0,
b5ab4cd5 6069 HRTIMER_MODE_REL_PINNED, 0);
24f1e32c 6070}
b0a873eb
PZ
6071
6072static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 6073{
b0a873eb
PZ
6074 struct hw_perf_event *hwc = &event->hw;
6075
6c7e550f 6076 if (is_sampling_event(event)) {
b0a873eb 6077 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 6078 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
6079
6080 hrtimer_cancel(&hwc->hrtimer);
6081 }
24f1e32c
FW
6082}
6083
ba3dd36c
PZ
6084static void perf_swevent_init_hrtimer(struct perf_event *event)
6085{
6086 struct hw_perf_event *hwc = &event->hw;
6087
6088 if (!is_sampling_event(event))
6089 return;
6090
6091 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6092 hwc->hrtimer.function = perf_swevent_hrtimer;
6093
6094 /*
6095 * Since hrtimers have a fixed rate, we can do a static freq->period
6096 * mapping and avoid the whole period adjust feedback stuff.
6097 */
6098 if (event->attr.freq) {
6099 long freq = event->attr.sample_freq;
6100
6101 event->attr.sample_period = NSEC_PER_SEC / freq;
6102 hwc->sample_period = event->attr.sample_period;
6103 local64_set(&hwc->period_left, hwc->sample_period);
778141e3 6104 hwc->last_period = hwc->sample_period;
ba3dd36c
PZ
6105 event->attr.freq = 0;
6106 }
6107}
6108
b0a873eb
PZ
6109/*
6110 * Software event: cpu wall time clock
6111 */
6112
6113static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 6114{
b0a873eb
PZ
6115 s64 prev;
6116 u64 now;
6117
a4eaf7f1 6118 now = local_clock();
b0a873eb
PZ
6119 prev = local64_xchg(&event->hw.prev_count, now);
6120 local64_add(now - prev, &event->count);
24f1e32c 6121}
24f1e32c 6122
a4eaf7f1 6123static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 6124{
a4eaf7f1 6125 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 6126 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
6127}
6128
a4eaf7f1 6129static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 6130{
b0a873eb
PZ
6131 perf_swevent_cancel_hrtimer(event);
6132 cpu_clock_event_update(event);
6133}
f29ac756 6134
a4eaf7f1
PZ
6135static int cpu_clock_event_add(struct perf_event *event, int flags)
6136{
6137 if (flags & PERF_EF_START)
6138 cpu_clock_event_start(event, flags);
6139
6140 return 0;
6141}
6142
6143static void cpu_clock_event_del(struct perf_event *event, int flags)
6144{
6145 cpu_clock_event_stop(event, flags);
6146}
6147
b0a873eb
PZ
6148static void cpu_clock_event_read(struct perf_event *event)
6149{
6150 cpu_clock_event_update(event);
6151}
f344011c 6152
b0a873eb
PZ
6153static int cpu_clock_event_init(struct perf_event *event)
6154{
6155 if (event->attr.type != PERF_TYPE_SOFTWARE)
6156 return -ENOENT;
6157
6158 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
6159 return -ENOENT;
6160
2481c5fa
SE
6161 /*
6162 * no branch sampling for software events
6163 */
6164 if (has_branch_stack(event))
6165 return -EOPNOTSUPP;
6166
ba3dd36c
PZ
6167 perf_swevent_init_hrtimer(event);
6168
b0a873eb 6169 return 0;
f29ac756
PZ
6170}
6171
b0a873eb 6172static struct pmu perf_cpu_clock = {
89a1e187
PZ
6173 .task_ctx_nr = perf_sw_context,
6174
b0a873eb 6175 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
6176 .add = cpu_clock_event_add,
6177 .del = cpu_clock_event_del,
6178 .start = cpu_clock_event_start,
6179 .stop = cpu_clock_event_stop,
b0a873eb 6180 .read = cpu_clock_event_read,
35edc2a5
PZ
6181
6182 .event_idx = perf_swevent_event_idx,
b0a873eb
PZ
6183};
6184
6185/*
6186 * Software event: task time clock
6187 */
6188
6189static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 6190{
b0a873eb
PZ
6191 u64 prev;
6192 s64 delta;
5c92d124 6193
b0a873eb
PZ
6194 prev = local64_xchg(&event->hw.prev_count, now);
6195 delta = now - prev;
6196 local64_add(delta, &event->count);
6197}
5c92d124 6198
a4eaf7f1 6199static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 6200{
a4eaf7f1 6201 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 6202 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
6203}
6204
a4eaf7f1 6205static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
6206{
6207 perf_swevent_cancel_hrtimer(event);
6208 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
6209}
6210
6211static int task_clock_event_add(struct perf_event *event, int flags)
6212{
6213 if (flags & PERF_EF_START)
6214 task_clock_event_start(event, flags);
b0a873eb 6215
a4eaf7f1
PZ
6216 return 0;
6217}
6218
6219static void task_clock_event_del(struct perf_event *event, int flags)
6220{
6221 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
6222}
6223
6224static void task_clock_event_read(struct perf_event *event)
6225{
768a06e2
PZ
6226 u64 now = perf_clock();
6227 u64 delta = now - event->ctx->timestamp;
6228 u64 time = event->ctx->time + delta;
b0a873eb
PZ
6229
6230 task_clock_event_update(event, time);
6231}
6232
6233static int task_clock_event_init(struct perf_event *event)
6fb2915d 6234{
b0a873eb
PZ
6235 if (event->attr.type != PERF_TYPE_SOFTWARE)
6236 return -ENOENT;
6237
6238 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
6239 return -ENOENT;
6240
2481c5fa
SE
6241 /*
6242 * no branch sampling for software events
6243 */
6244 if (has_branch_stack(event))
6245 return -EOPNOTSUPP;
6246
ba3dd36c
PZ
6247 perf_swevent_init_hrtimer(event);
6248
b0a873eb 6249 return 0;
6fb2915d
LZ
6250}
6251
b0a873eb 6252static struct pmu perf_task_clock = {
89a1e187
PZ
6253 .task_ctx_nr = perf_sw_context,
6254
b0a873eb 6255 .event_init = task_clock_event_init,
a4eaf7f1
PZ
6256 .add = task_clock_event_add,
6257 .del = task_clock_event_del,
6258 .start = task_clock_event_start,
6259 .stop = task_clock_event_stop,
b0a873eb 6260 .read = task_clock_event_read,
35edc2a5
PZ
6261
6262 .event_idx = perf_swevent_event_idx,
b0a873eb 6263};
6fb2915d 6264
ad5133b7 6265static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 6266{
e077df4f 6267}
6fb2915d 6268
ad5133b7 6269static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 6270{
ad5133b7 6271 return 0;
6fb2915d
LZ
6272}
6273
ad5133b7 6274static void perf_pmu_start_txn(struct pmu *pmu)
6fb2915d 6275{
ad5133b7 6276 perf_pmu_disable(pmu);
6fb2915d
LZ
6277}
6278
ad5133b7
PZ
6279static int perf_pmu_commit_txn(struct pmu *pmu)
6280{
6281 perf_pmu_enable(pmu);
6282 return 0;
6283}
e077df4f 6284
ad5133b7 6285static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 6286{
ad5133b7 6287 perf_pmu_enable(pmu);
24f1e32c
FW
6288}
6289
35edc2a5
PZ
6290static int perf_event_idx_default(struct perf_event *event)
6291{
6292 return event->hw.idx + 1;
6293}
6294
8dc85d54
PZ
6295/*
6296 * Ensures all contexts with the same task_ctx_nr have the same
6297 * pmu_cpu_context too.
6298 */
9e317041 6299static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
24f1e32c 6300{
8dc85d54 6301 struct pmu *pmu;
b326e956 6302
8dc85d54
PZ
6303 if (ctxn < 0)
6304 return NULL;
24f1e32c 6305
8dc85d54
PZ
6306 list_for_each_entry(pmu, &pmus, entry) {
6307 if (pmu->task_ctx_nr == ctxn)
6308 return pmu->pmu_cpu_context;
6309 }
24f1e32c 6310
8dc85d54 6311 return NULL;
24f1e32c
FW
6312}
6313
51676957 6314static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c 6315{
51676957
PZ
6316 int cpu;
6317
6318 for_each_possible_cpu(cpu) {
6319 struct perf_cpu_context *cpuctx;
6320
6321 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6322
3f1f3320
PZ
6323 if (cpuctx->unique_pmu == old_pmu)
6324 cpuctx->unique_pmu = pmu;
51676957
PZ
6325 }
6326}
6327
6328static void free_pmu_context(struct pmu *pmu)
6329{
6330 struct pmu *i;
f5ffe02e 6331
8dc85d54 6332 mutex_lock(&pmus_lock);
0475f9ea 6333 /*
8dc85d54 6334 * Like a real lame refcount.
0475f9ea 6335 */
51676957
PZ
6336 list_for_each_entry(i, &pmus, entry) {
6337 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
6338 update_pmu_context(i, pmu);
8dc85d54 6339 goto out;
51676957 6340 }
8dc85d54 6341 }
d6d020e9 6342
51676957 6343 free_percpu(pmu->pmu_cpu_context);
8dc85d54
PZ
6344out:
6345 mutex_unlock(&pmus_lock);
24f1e32c 6346}
2e80a82a 6347static struct idr pmu_idr;
d6d020e9 6348
abe43400
PZ
6349static ssize_t
6350type_show(struct device *dev, struct device_attribute *attr, char *page)
6351{
6352 struct pmu *pmu = dev_get_drvdata(dev);
6353
6354 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
6355}
90826ca7 6356static DEVICE_ATTR_RO(type);
abe43400 6357
62b85639
SE
6358static ssize_t
6359perf_event_mux_interval_ms_show(struct device *dev,
6360 struct device_attribute *attr,
6361 char *page)
6362{
6363 struct pmu *pmu = dev_get_drvdata(dev);
6364
6365 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
6366}
6367
6368static ssize_t
6369perf_event_mux_interval_ms_store(struct device *dev,
6370 struct device_attribute *attr,
6371 const char *buf, size_t count)
6372{
6373 struct pmu *pmu = dev_get_drvdata(dev);
6374 int timer, cpu, ret;
6375
6376 ret = kstrtoint(buf, 0, &timer);
6377 if (ret)
6378 return ret;
6379
6380 if (timer < 1)
6381 return -EINVAL;
6382
6383 /* same value, noting to do */
6384 if (timer == pmu->hrtimer_interval_ms)
6385 return count;
6386
6387 pmu->hrtimer_interval_ms = timer;
6388
6389 /* update all cpuctx for this PMU */
6390 for_each_possible_cpu(cpu) {
6391 struct perf_cpu_context *cpuctx;
6392 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6393 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
6394
6395 if (hrtimer_active(&cpuctx->hrtimer))
6396 hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
6397 }
6398
6399 return count;
6400}
90826ca7 6401static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
62b85639 6402
90826ca7
GKH
6403static struct attribute *pmu_dev_attrs[] = {
6404 &dev_attr_type.attr,
6405 &dev_attr_perf_event_mux_interval_ms.attr,
6406 NULL,
abe43400 6407};
90826ca7 6408ATTRIBUTE_GROUPS(pmu_dev);
abe43400
PZ
6409
6410static int pmu_bus_running;
6411static struct bus_type pmu_bus = {
6412 .name = "event_source",
90826ca7 6413 .dev_groups = pmu_dev_groups,
abe43400
PZ
6414};
6415
6416static void pmu_dev_release(struct device *dev)
6417{
6418 kfree(dev);
6419}
6420
6421static int pmu_dev_alloc(struct pmu *pmu)
6422{
6423 int ret = -ENOMEM;
6424
6425 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
6426 if (!pmu->dev)
6427 goto out;
6428
0c9d42ed 6429 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
6430 device_initialize(pmu->dev);
6431 ret = dev_set_name(pmu->dev, "%s", pmu->name);
6432 if (ret)
6433 goto free_dev;
6434
6435 dev_set_drvdata(pmu->dev, pmu);
6436 pmu->dev->bus = &pmu_bus;
6437 pmu->dev->release = pmu_dev_release;
6438 ret = device_add(pmu->dev);
6439 if (ret)
6440 goto free_dev;
6441
6442out:
6443 return ret;
6444
6445free_dev:
6446 put_device(pmu->dev);
6447 goto out;
6448}
6449
547e9fd7 6450static struct lock_class_key cpuctx_mutex;
facc4307 6451static struct lock_class_key cpuctx_lock;
547e9fd7 6452
03d8e80b 6453int perf_pmu_register(struct pmu *pmu, const char *name, int type)
24f1e32c 6454{
108b02cf 6455 int cpu, ret;
24f1e32c 6456
b0a873eb 6457 mutex_lock(&pmus_lock);
33696fc0
PZ
6458 ret = -ENOMEM;
6459 pmu->pmu_disable_count = alloc_percpu(int);
6460 if (!pmu->pmu_disable_count)
6461 goto unlock;
f29ac756 6462
2e80a82a
PZ
6463 pmu->type = -1;
6464 if (!name)
6465 goto skip_type;
6466 pmu->name = name;
6467
6468 if (type < 0) {
0e9c3be2
TH
6469 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
6470 if (type < 0) {
6471 ret = type;
2e80a82a
PZ
6472 goto free_pdc;
6473 }
6474 }
6475 pmu->type = type;
6476
abe43400
PZ
6477 if (pmu_bus_running) {
6478 ret = pmu_dev_alloc(pmu);
6479 if (ret)
6480 goto free_idr;
6481 }
6482
2e80a82a 6483skip_type:
8dc85d54
PZ
6484 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
6485 if (pmu->pmu_cpu_context)
6486 goto got_cpu_context;
f29ac756 6487
c4814202 6488 ret = -ENOMEM;
108b02cf
PZ
6489 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
6490 if (!pmu->pmu_cpu_context)
abe43400 6491 goto free_dev;
f344011c 6492
108b02cf
PZ
6493 for_each_possible_cpu(cpu) {
6494 struct perf_cpu_context *cpuctx;
6495
6496 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 6497 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 6498 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 6499 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
b04243ef 6500 cpuctx->ctx.type = cpu_context;
108b02cf 6501 cpuctx->ctx.pmu = pmu;
9e630205
SE
6502
6503 __perf_cpu_hrtimer_init(cpuctx, cpu);
6504
e9d2b064 6505 INIT_LIST_HEAD(&cpuctx->rotation_list);
3f1f3320 6506 cpuctx->unique_pmu = pmu;
108b02cf 6507 }
76e1d904 6508
8dc85d54 6509got_cpu_context:
ad5133b7
PZ
6510 if (!pmu->start_txn) {
6511 if (pmu->pmu_enable) {
6512 /*
6513 * If we have pmu_enable/pmu_disable calls, install
6514 * transaction stubs that use that to try and batch
6515 * hardware accesses.
6516 */
6517 pmu->start_txn = perf_pmu_start_txn;
6518 pmu->commit_txn = perf_pmu_commit_txn;
6519 pmu->cancel_txn = perf_pmu_cancel_txn;
6520 } else {
6521 pmu->start_txn = perf_pmu_nop_void;
6522 pmu->commit_txn = perf_pmu_nop_int;
6523 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 6524 }
5c92d124 6525 }
15dbf27c 6526
ad5133b7
PZ
6527 if (!pmu->pmu_enable) {
6528 pmu->pmu_enable = perf_pmu_nop_void;
6529 pmu->pmu_disable = perf_pmu_nop_void;
6530 }
6531
35edc2a5
PZ
6532 if (!pmu->event_idx)
6533 pmu->event_idx = perf_event_idx_default;
6534
b0a873eb 6535 list_add_rcu(&pmu->entry, &pmus);
33696fc0
PZ
6536 ret = 0;
6537unlock:
b0a873eb
PZ
6538 mutex_unlock(&pmus_lock);
6539
33696fc0 6540 return ret;
108b02cf 6541
abe43400
PZ
6542free_dev:
6543 device_del(pmu->dev);
6544 put_device(pmu->dev);
6545
2e80a82a
PZ
6546free_idr:
6547 if (pmu->type >= PERF_TYPE_MAX)
6548 idr_remove(&pmu_idr, pmu->type);
6549
108b02cf
PZ
6550free_pdc:
6551 free_percpu(pmu->pmu_disable_count);
6552 goto unlock;
f29ac756
PZ
6553}
6554
b0a873eb 6555void perf_pmu_unregister(struct pmu *pmu)
5c92d124 6556{
b0a873eb
PZ
6557 mutex_lock(&pmus_lock);
6558 list_del_rcu(&pmu->entry);
6559 mutex_unlock(&pmus_lock);
5c92d124 6560
0475f9ea 6561 /*
cde8e884
PZ
6562 * We dereference the pmu list under both SRCU and regular RCU, so
6563 * synchronize against both of those.
0475f9ea 6564 */
b0a873eb 6565 synchronize_srcu(&pmus_srcu);
cde8e884 6566 synchronize_rcu();
d6d020e9 6567
33696fc0 6568 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
6569 if (pmu->type >= PERF_TYPE_MAX)
6570 idr_remove(&pmu_idr, pmu->type);
abe43400
PZ
6571 device_del(pmu->dev);
6572 put_device(pmu->dev);
51676957 6573 free_pmu_context(pmu);
b0a873eb 6574}
d6d020e9 6575
b0a873eb
PZ
6576struct pmu *perf_init_event(struct perf_event *event)
6577{
6578 struct pmu *pmu = NULL;
6579 int idx;
940c5b29 6580 int ret;
b0a873eb
PZ
6581
6582 idx = srcu_read_lock(&pmus_srcu);
2e80a82a
PZ
6583
6584 rcu_read_lock();
6585 pmu = idr_find(&pmu_idr, event->attr.type);
6586 rcu_read_unlock();
940c5b29 6587 if (pmu) {
7e5b2a01 6588 event->pmu = pmu;
940c5b29
LM
6589 ret = pmu->event_init(event);
6590 if (ret)
6591 pmu = ERR_PTR(ret);
2e80a82a 6592 goto unlock;
940c5b29 6593 }
2e80a82a 6594
b0a873eb 6595 list_for_each_entry_rcu(pmu, &pmus, entry) {
7e5b2a01 6596 event->pmu = pmu;
940c5b29 6597 ret = pmu->event_init(event);
b0a873eb 6598 if (!ret)
e5f4d339 6599 goto unlock;
76e1d904 6600
b0a873eb
PZ
6601 if (ret != -ENOENT) {
6602 pmu = ERR_PTR(ret);
e5f4d339 6603 goto unlock;
f344011c 6604 }
5c92d124 6605 }
e5f4d339
PZ
6606 pmu = ERR_PTR(-ENOENT);
6607unlock:
b0a873eb 6608 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 6609
4aeb0b42 6610 return pmu;
5c92d124
IM
6611}
6612
4beb31f3
FW
6613static void account_event_cpu(struct perf_event *event, int cpu)
6614{
6615 if (event->parent)
6616 return;
6617
6618 if (has_branch_stack(event)) {
6619 if (!(event->attach_state & PERF_ATTACH_TASK))
6620 atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
6621 }
6622 if (is_cgroup_event(event))
6623 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
6624}
6625
766d6c07
FW
6626static void account_event(struct perf_event *event)
6627{
4beb31f3
FW
6628 if (event->parent)
6629 return;
6630
766d6c07
FW
6631 if (event->attach_state & PERF_ATTACH_TASK)
6632 static_key_slow_inc(&perf_sched_events.key);
6633 if (event->attr.mmap || event->attr.mmap_data)
6634 atomic_inc(&nr_mmap_events);
6635 if (event->attr.comm)
6636 atomic_inc(&nr_comm_events);
6637 if (event->attr.task)
6638 atomic_inc(&nr_task_events);
948b26b6
FW
6639 if (event->attr.freq) {
6640 if (atomic_inc_return(&nr_freq_events) == 1)
6641 tick_nohz_full_kick_all();
6642 }
4beb31f3 6643 if (has_branch_stack(event))
766d6c07 6644 static_key_slow_inc(&perf_sched_events.key);
4beb31f3 6645 if (is_cgroup_event(event))
766d6c07 6646 static_key_slow_inc(&perf_sched_events.key);
4beb31f3
FW
6647
6648 account_event_cpu(event, event->cpu);
766d6c07
FW
6649}
6650
0793a61d 6651/*
cdd6c482 6652 * Allocate and initialize a event structure
0793a61d 6653 */
cdd6c482 6654static struct perf_event *
c3f00c70 6655perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
6656 struct task_struct *task,
6657 struct perf_event *group_leader,
6658 struct perf_event *parent_event,
4dc0da86
AK
6659 perf_overflow_handler_t overflow_handler,
6660 void *context)
0793a61d 6661{
51b0fe39 6662 struct pmu *pmu;
cdd6c482
IM
6663 struct perf_event *event;
6664 struct hw_perf_event *hwc;
90983b16 6665 long err = -EINVAL;
0793a61d 6666
66832eb4
ON
6667 if ((unsigned)cpu >= nr_cpu_ids) {
6668 if (!task || cpu != -1)
6669 return ERR_PTR(-EINVAL);
6670 }
6671
c3f00c70 6672 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 6673 if (!event)
d5d2bc0d 6674 return ERR_PTR(-ENOMEM);
0793a61d 6675
04289bb9 6676 /*
cdd6c482 6677 * Single events are their own group leaders, with an
04289bb9
IM
6678 * empty sibling list:
6679 */
6680 if (!group_leader)
cdd6c482 6681 group_leader = event;
04289bb9 6682
cdd6c482
IM
6683 mutex_init(&event->child_mutex);
6684 INIT_LIST_HEAD(&event->child_list);
fccc714b 6685
cdd6c482
IM
6686 INIT_LIST_HEAD(&event->group_entry);
6687 INIT_LIST_HEAD(&event->event_entry);
6688 INIT_LIST_HEAD(&event->sibling_list);
10c6db11 6689 INIT_LIST_HEAD(&event->rb_entry);
71ad88ef 6690 INIT_LIST_HEAD(&event->active_entry);
f3ae75de
SE
6691 INIT_HLIST_NODE(&event->hlist_entry);
6692
10c6db11 6693
cdd6c482 6694 init_waitqueue_head(&event->waitq);
e360adbe 6695 init_irq_work(&event->pending, perf_pending_event);
0793a61d 6696
cdd6c482 6697 mutex_init(&event->mmap_mutex);
7b732a75 6698
a6fa941d 6699 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
6700 event->cpu = cpu;
6701 event->attr = *attr;
6702 event->group_leader = group_leader;
6703 event->pmu = NULL;
cdd6c482 6704 event->oncpu = -1;
a96bbc16 6705
cdd6c482 6706 event->parent = parent_event;
b84fbc9f 6707
17cf22c3 6708 event->ns = get_pid_ns(task_active_pid_ns(current));
cdd6c482 6709 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 6710
cdd6c482 6711 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 6712
d580ff86
PZ
6713 if (task) {
6714 event->attach_state = PERF_ATTACH_TASK;
f22c1bb6
ON
6715
6716 if (attr->type == PERF_TYPE_TRACEPOINT)
6717 event->hw.tp_target = task;
d580ff86
PZ
6718#ifdef CONFIG_HAVE_HW_BREAKPOINT
6719 /*
6720 * hw_breakpoint is a bit difficult here..
6721 */
f22c1bb6 6722 else if (attr->type == PERF_TYPE_BREAKPOINT)
d580ff86
PZ
6723 event->hw.bp_target = task;
6724#endif
6725 }
6726
4dc0da86 6727 if (!overflow_handler && parent_event) {
b326e956 6728 overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
6729 context = parent_event->overflow_handler_context;
6730 }
66832eb4 6731
b326e956 6732 event->overflow_handler = overflow_handler;
4dc0da86 6733 event->overflow_handler_context = context;
97eaf530 6734
0231bb53 6735 perf_event__state_init(event);
a86ed508 6736
4aeb0b42 6737 pmu = NULL;
b8e83514 6738
cdd6c482 6739 hwc = &event->hw;
bd2b5b12 6740 hwc->sample_period = attr->sample_period;
0d48696f 6741 if (attr->freq && attr->sample_freq)
bd2b5b12 6742 hwc->sample_period = 1;
eced1dfc 6743 hwc->last_period = hwc->sample_period;
bd2b5b12 6744
e7850595 6745 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 6746
2023b359 6747 /*
cdd6c482 6748 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b359 6749 */
3dab77fb 6750 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
90983b16 6751 goto err_ns;
2023b359 6752
b0a873eb 6753 pmu = perf_init_event(event);
4aeb0b42 6754 if (!pmu)
90983b16
FW
6755 goto err_ns;
6756 else if (IS_ERR(pmu)) {
4aeb0b42 6757 err = PTR_ERR(pmu);
90983b16 6758 goto err_ns;
621a01ea 6759 }
d5d2bc0d 6760
cdd6c482 6761 if (!event->parent) {
927c7a9e
FW
6762 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6763 err = get_callchain_buffers();
90983b16
FW
6764 if (err)
6765 goto err_pmu;
d010b332 6766 }
f344011c 6767 }
9ee318a7 6768
cdd6c482 6769 return event;
90983b16
FW
6770
6771err_pmu:
6772 if (event->destroy)
6773 event->destroy(event);
6774err_ns:
6775 if (event->ns)
6776 put_pid_ns(event->ns);
6777 kfree(event);
6778
6779 return ERR_PTR(err);
0793a61d
TG
6780}
6781
cdd6c482
IM
6782static int perf_copy_attr(struct perf_event_attr __user *uattr,
6783 struct perf_event_attr *attr)
974802ea 6784{
974802ea 6785 u32 size;
cdf8073d 6786 int ret;
974802ea
PZ
6787
6788 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6789 return -EFAULT;
6790
6791 /*
6792 * zero the full structure, so that a short copy will be nice.
6793 */
6794 memset(attr, 0, sizeof(*attr));
6795
6796 ret = get_user(size, &uattr->size);
6797 if (ret)
6798 return ret;
6799
6800 if (size > PAGE_SIZE) /* silly large */
6801 goto err_size;
6802
6803 if (!size) /* abi compat */
6804 size = PERF_ATTR_SIZE_VER0;
6805
6806 if (size < PERF_ATTR_SIZE_VER0)
6807 goto err_size;
6808
6809 /*
6810 * If we're handed a bigger struct than we know of,
cdf8073d
IS
6811 * ensure all the unknown bits are 0 - i.e. new
6812 * user-space does not rely on any kernel feature
6813 * extensions we dont know about yet.
974802ea
PZ
6814 */
6815 if (size > sizeof(*attr)) {
cdf8073d
IS
6816 unsigned char __user *addr;
6817 unsigned char __user *end;
6818 unsigned char val;
974802ea 6819
cdf8073d
IS
6820 addr = (void __user *)uattr + sizeof(*attr);
6821 end = (void __user *)uattr + size;
974802ea 6822
cdf8073d 6823 for (; addr < end; addr++) {
974802ea
PZ
6824 ret = get_user(val, addr);
6825 if (ret)
6826 return ret;
6827 if (val)
6828 goto err_size;
6829 }
b3e62e35 6830 size = sizeof(*attr);
974802ea
PZ
6831 }
6832
6833 ret = copy_from_user(attr, uattr, size);
6834 if (ret)
6835 return -EFAULT;
6836
3090ffb5
SE
6837 /* disabled for now */
6838 if (attr->mmap2)
6839 return -EINVAL;
6840
cd757645 6841 if (attr->__reserved_1)
974802ea
PZ
6842 return -EINVAL;
6843
6844 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6845 return -EINVAL;
6846
6847 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6848 return -EINVAL;
6849
bce38cd5
SE
6850 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6851 u64 mask = attr->branch_sample_type;
6852
6853 /* only using defined bits */
6854 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6855 return -EINVAL;
6856
6857 /* at least one branch bit must be set */
6858 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6859 return -EINVAL;
6860
bce38cd5
SE
6861 /* propagate priv level, when not set for branch */
6862 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6863
6864 /* exclude_kernel checked on syscall entry */
6865 if (!attr->exclude_kernel)
6866 mask |= PERF_SAMPLE_BRANCH_KERNEL;
6867
6868 if (!attr->exclude_user)
6869 mask |= PERF_SAMPLE_BRANCH_USER;
6870
6871 if (!attr->exclude_hv)
6872 mask |= PERF_SAMPLE_BRANCH_HV;
6873 /*
6874 * adjust user setting (for HW filter setup)
6875 */
6876 attr->branch_sample_type = mask;
6877 }
e712209a
SE
6878 /* privileged levels capture (kernel, hv): check permissions */
6879 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
2b923c8f
SE
6880 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6881 return -EACCES;
bce38cd5 6882 }
4018994f 6883
c5ebcedb 6884 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
4018994f 6885 ret = perf_reg_validate(attr->sample_regs_user);
c5ebcedb
JO
6886 if (ret)
6887 return ret;
6888 }
6889
6890 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
6891 if (!arch_perf_have_user_stack_dump())
6892 return -ENOSYS;
6893
6894 /*
6895 * We have __u32 type for the size, but so far
6896 * we can only use __u16 as maximum due to the
6897 * __u16 sample size limit.
6898 */
6899 if (attr->sample_stack_user >= USHRT_MAX)
6900 ret = -EINVAL;
6901 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
6902 ret = -EINVAL;
6903 }
4018994f 6904
974802ea
PZ
6905out:
6906 return ret;
6907
6908err_size:
6909 put_user(sizeof(*attr), &uattr->size);
6910 ret = -E2BIG;
6911 goto out;
6912}
6913
ac9721f3
PZ
6914static int
6915perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 6916{
76369139 6917 struct ring_buffer *rb = NULL, *old_rb = NULL;
a4be7c27
PZ
6918 int ret = -EINVAL;
6919
ac9721f3 6920 if (!output_event)
a4be7c27
PZ
6921 goto set;
6922
ac9721f3
PZ
6923 /* don't allow circular references */
6924 if (event == output_event)
a4be7c27
PZ
6925 goto out;
6926
0f139300
PZ
6927 /*
6928 * Don't allow cross-cpu buffers
6929 */
6930 if (output_event->cpu != event->cpu)
6931 goto out;
6932
6933 /*
76369139 6934 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
6935 */
6936 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6937 goto out;
6938
a4be7c27 6939set:
cdd6c482 6940 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
6941 /* Can't redirect output if we've got an active mmap() */
6942 if (atomic_read(&event->mmap_count))
6943 goto unlock;
a4be7c27 6944
9bb5d40c
PZ
6945 old_rb = event->rb;
6946
ac9721f3 6947 if (output_event) {
76369139
FW
6948 /* get the rb we want to redirect to */
6949 rb = ring_buffer_get(output_event);
6950 if (!rb)
ac9721f3 6951 goto unlock;
a4be7c27
PZ
6952 }
6953
10c6db11
PZ
6954 if (old_rb)
6955 ring_buffer_detach(event, old_rb);
9bb5d40c
PZ
6956
6957 if (rb)
6958 ring_buffer_attach(event, rb);
6959
6960 rcu_assign_pointer(event->rb, rb);
6961
6962 if (old_rb) {
6963 ring_buffer_put(old_rb);
6964 /*
6965 * Since we detached before setting the new rb, so that we
6966 * could attach the new rb, we could have missed a wakeup.
6967 * Provide it now.
6968 */
6969 wake_up_all(&event->waitq);
6970 }
6971
a4be7c27 6972 ret = 0;
ac9721f3
PZ
6973unlock:
6974 mutex_unlock(&event->mmap_mutex);
6975
a4be7c27 6976out:
a4be7c27
PZ
6977 return ret;
6978}
6979
0793a61d 6980/**
cdd6c482 6981 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 6982 *
cdd6c482 6983 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 6984 * @pid: target pid
9f66a381 6985 * @cpu: target cpu
cdd6c482 6986 * @group_fd: group leader event fd
0793a61d 6987 */
cdd6c482
IM
6988SYSCALL_DEFINE5(perf_event_open,
6989 struct perf_event_attr __user *, attr_uptr,
2743a5b0 6990 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 6991{
b04243ef
PZ
6992 struct perf_event *group_leader = NULL, *output_event = NULL;
6993 struct perf_event *event, *sibling;
cdd6c482
IM
6994 struct perf_event_attr attr;
6995 struct perf_event_context *ctx;
6996 struct file *event_file = NULL;
2903ff01 6997 struct fd group = {NULL, 0};
38a81da2 6998 struct task_struct *task = NULL;
89a1e187 6999 struct pmu *pmu;
ea635c64 7000 int event_fd;
b04243ef 7001 int move_group = 0;
dc86cabe 7002 int err;
a21b0b35 7003 int f_flags = O_RDWR;
0793a61d 7004
2743a5b0 7005 /* for future expandability... */
e5d1367f 7006 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
7007 return -EINVAL;
7008
dc86cabe
IM
7009 err = perf_copy_attr(attr_uptr, &attr);
7010 if (err)
7011 return err;
eab656ae 7012
0764771d
PZ
7013 if (!attr.exclude_kernel) {
7014 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
7015 return -EACCES;
7016 }
7017
df58ab24 7018 if (attr.freq) {
cdd6c482 7019 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24
PZ
7020 return -EINVAL;
7021 }
7022
e5d1367f
SE
7023 /*
7024 * In cgroup mode, the pid argument is used to pass the fd
7025 * opened to the cgroup directory in cgroupfs. The cpu argument
7026 * designates the cpu on which to monitor threads from that
7027 * cgroup.
7028 */
7029 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
7030 return -EINVAL;
7031
a21b0b35
YD
7032 if (flags & PERF_FLAG_FD_CLOEXEC)
7033 f_flags |= O_CLOEXEC;
7034
7035 event_fd = get_unused_fd_flags(f_flags);
ea635c64
AV
7036 if (event_fd < 0)
7037 return event_fd;
7038
ac9721f3 7039 if (group_fd != -1) {
2903ff01
AV
7040 err = perf_fget_light(group_fd, &group);
7041 if (err)
d14b12d7 7042 goto err_fd;
2903ff01 7043 group_leader = group.file->private_data;
ac9721f3
PZ
7044 if (flags & PERF_FLAG_FD_OUTPUT)
7045 output_event = group_leader;
7046 if (flags & PERF_FLAG_FD_NO_GROUP)
7047 group_leader = NULL;
7048 }
7049
e5d1367f 7050 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
7051 task = find_lively_task_by_vpid(pid);
7052 if (IS_ERR(task)) {
7053 err = PTR_ERR(task);
7054 goto err_group_fd;
7055 }
7056 }
7057
fbfc623f
YZ
7058 get_online_cpus();
7059
4dc0da86
AK
7060 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
7061 NULL, NULL);
d14b12d7
SE
7062 if (IS_ERR(event)) {
7063 err = PTR_ERR(event);
c6be5a5c 7064 goto err_task;
d14b12d7
SE
7065 }
7066
e5d1367f
SE
7067 if (flags & PERF_FLAG_PID_CGROUP) {
7068 err = perf_cgroup_connect(pid, event, &attr, group_leader);
766d6c07
FW
7069 if (err) {
7070 __free_event(event);
7071 goto err_task;
7072 }
e5d1367f
SE
7073 }
7074
766d6c07
FW
7075 account_event(event);
7076
89a1e187
PZ
7077 /*
7078 * Special case software events and allow them to be part of
7079 * any hardware group.
7080 */
7081 pmu = event->pmu;
b04243ef
PZ
7082
7083 if (group_leader &&
7084 (is_software_event(event) != is_software_event(group_leader))) {
7085 if (is_software_event(event)) {
7086 /*
7087 * If event and group_leader are not both a software
7088 * event, and event is, then group leader is not.
7089 *
7090 * Allow the addition of software events to !software
7091 * groups, this is safe because software events never
7092 * fail to schedule.
7093 */
7094 pmu = group_leader->pmu;
7095 } else if (is_software_event(group_leader) &&
7096 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
7097 /*
7098 * In case the group is a pure software group, and we
7099 * try to add a hardware event, move the whole group to
7100 * the hardware context.
7101 */
7102 move_group = 1;
7103 }
7104 }
89a1e187
PZ
7105
7106 /*
7107 * Get the target context (task or percpu):
7108 */
e2d37cd2 7109 ctx = find_get_context(pmu, task, event->cpu);
89a1e187
PZ
7110 if (IS_ERR(ctx)) {
7111 err = PTR_ERR(ctx);
c6be5a5c 7112 goto err_alloc;
89a1e187
PZ
7113 }
7114
fd1edb3a
PZ
7115 if (task) {
7116 put_task_struct(task);
7117 task = NULL;
7118 }
7119
ccff286d 7120 /*
cdd6c482 7121 * Look up the group leader (we will attach this event to it):
04289bb9 7122 */
ac9721f3 7123 if (group_leader) {
dc86cabe 7124 err = -EINVAL;
04289bb9 7125
04289bb9 7126 /*
ccff286d
IM
7127 * Do not allow a recursive hierarchy (this new sibling
7128 * becoming part of another group-sibling):
7129 */
7130 if (group_leader->group_leader != group_leader)
c3f00c70 7131 goto err_context;
ccff286d
IM
7132 /*
7133 * Do not allow to attach to a group in a different
7134 * task or CPU context:
04289bb9 7135 */
b04243ef
PZ
7136 if (move_group) {
7137 if (group_leader->ctx->type != ctx->type)
7138 goto err_context;
7139 } else {
7140 if (group_leader->ctx != ctx)
7141 goto err_context;
7142 }
7143
3b6f9e5c
PM
7144 /*
7145 * Only a group leader can be exclusive or pinned
7146 */
0d48696f 7147 if (attr.exclusive || attr.pinned)
c3f00c70 7148 goto err_context;
ac9721f3
PZ
7149 }
7150
7151 if (output_event) {
7152 err = perf_event_set_output(event, output_event);
7153 if (err)
c3f00c70 7154 goto err_context;
ac9721f3 7155 }
0793a61d 7156
a21b0b35
YD
7157 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
7158 f_flags);
ea635c64
AV
7159 if (IS_ERR(event_file)) {
7160 err = PTR_ERR(event_file);
c3f00c70 7161 goto err_context;
ea635c64 7162 }
9b51f66d 7163
b04243ef
PZ
7164 if (move_group) {
7165 struct perf_event_context *gctx = group_leader->ctx;
7166
7167 mutex_lock(&gctx->mutex);
fe4b04fa 7168 perf_remove_from_context(group_leader);
0231bb53
JO
7169
7170 /*
7171 * Removing from the context ends up with disabled
7172 * event. What we want here is event in the initial
7173 * startup state, ready to be add into new context.
7174 */
7175 perf_event__state_init(group_leader);
b04243ef
PZ
7176 list_for_each_entry(sibling, &group_leader->sibling_list,
7177 group_entry) {
fe4b04fa 7178 perf_remove_from_context(sibling);
0231bb53 7179 perf_event__state_init(sibling);
b04243ef
PZ
7180 put_ctx(gctx);
7181 }
7182 mutex_unlock(&gctx->mutex);
7183 put_ctx(gctx);
ea635c64 7184 }
9b51f66d 7185
ad3a37de 7186 WARN_ON_ONCE(ctx->parent_ctx);
d859e29f 7187 mutex_lock(&ctx->mutex);
b04243ef
PZ
7188
7189 if (move_group) {
0cda4c02 7190 synchronize_rcu();
e2d37cd2 7191 perf_install_in_context(ctx, group_leader, event->cpu);
b04243ef
PZ
7192 get_ctx(ctx);
7193 list_for_each_entry(sibling, &group_leader->sibling_list,
7194 group_entry) {
e2d37cd2 7195 perf_install_in_context(ctx, sibling, event->cpu);
b04243ef
PZ
7196 get_ctx(ctx);
7197 }
7198 }
7199
e2d37cd2 7200 perf_install_in_context(ctx, event, event->cpu);
fe4b04fa 7201 perf_unpin_context(ctx);
d859e29f 7202 mutex_unlock(&ctx->mutex);
9b51f66d 7203
fbfc623f
YZ
7204 put_online_cpus();
7205
cdd6c482 7206 event->owner = current;
8882135b 7207
cdd6c482
IM
7208 mutex_lock(&current->perf_event_mutex);
7209 list_add_tail(&event->owner_entry, &current->perf_event_list);
7210 mutex_unlock(&current->perf_event_mutex);
082ff5a2 7211
c320c7b7
ACM
7212 /*
7213 * Precalculate sample_data sizes
7214 */
7215 perf_event__header_size(event);
6844c09d 7216 perf_event__id_header_size(event);
c320c7b7 7217
8a49542c
PZ
7218 /*
7219 * Drop the reference on the group_event after placing the
7220 * new event on the sibling_list. This ensures destruction
7221 * of the group leader will find the pointer to itself in
7222 * perf_group_detach().
7223 */
2903ff01 7224 fdput(group);
ea635c64
AV
7225 fd_install(event_fd, event_file);
7226 return event_fd;
0793a61d 7227
c3f00c70 7228err_context:
fe4b04fa 7229 perf_unpin_context(ctx);
ea635c64 7230 put_ctx(ctx);
c6be5a5c 7231err_alloc:
ea635c64 7232 free_event(event);
e7d0bc04 7233err_task:
fbfc623f 7234 put_online_cpus();
e7d0bc04
PZ
7235 if (task)
7236 put_task_struct(task);
89a1e187 7237err_group_fd:
2903ff01 7238 fdput(group);
ea635c64
AV
7239err_fd:
7240 put_unused_fd(event_fd);
dc86cabe 7241 return err;
0793a61d
TG
7242}
7243
fb0459d7
AV
7244/**
7245 * perf_event_create_kernel_counter
7246 *
7247 * @attr: attributes of the counter to create
7248 * @cpu: cpu in which the counter is bound
38a81da2 7249 * @task: task to profile (NULL for percpu)
fb0459d7
AV
7250 */
7251struct perf_event *
7252perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 7253 struct task_struct *task,
4dc0da86
AK
7254 perf_overflow_handler_t overflow_handler,
7255 void *context)
fb0459d7 7256{
fb0459d7 7257 struct perf_event_context *ctx;
c3f00c70 7258 struct perf_event *event;
fb0459d7 7259 int err;
d859e29f 7260
fb0459d7
AV
7261 /*
7262 * Get the target context (task or percpu):
7263 */
d859e29f 7264
4dc0da86
AK
7265 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
7266 overflow_handler, context);
c3f00c70
PZ
7267 if (IS_ERR(event)) {
7268 err = PTR_ERR(event);
7269 goto err;
7270 }
d859e29f 7271
766d6c07
FW
7272 account_event(event);
7273
38a81da2 7274 ctx = find_get_context(event->pmu, task, cpu);
c6567f64
FW
7275 if (IS_ERR(ctx)) {
7276 err = PTR_ERR(ctx);
c3f00c70 7277 goto err_free;
d859e29f 7278 }
fb0459d7 7279
fb0459d7
AV
7280 WARN_ON_ONCE(ctx->parent_ctx);
7281 mutex_lock(&ctx->mutex);
7282 perf_install_in_context(ctx, event, cpu);
fe4b04fa 7283 perf_unpin_context(ctx);
fb0459d7
AV
7284 mutex_unlock(&ctx->mutex);
7285
fb0459d7
AV
7286 return event;
7287
c3f00c70
PZ
7288err_free:
7289 free_event(event);
7290err:
c6567f64 7291 return ERR_PTR(err);
9b51f66d 7292}
fb0459d7 7293EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 7294
0cda4c02
YZ
7295void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7296{
7297 struct perf_event_context *src_ctx;
7298 struct perf_event_context *dst_ctx;
7299 struct perf_event *event, *tmp;
7300 LIST_HEAD(events);
7301
7302 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
7303 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
7304
7305 mutex_lock(&src_ctx->mutex);
7306 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7307 event_entry) {
7308 perf_remove_from_context(event);
9a545de0 7309 unaccount_event_cpu(event, src_cpu);
0cda4c02 7310 put_ctx(src_ctx);
9886167d 7311 list_add(&event->migrate_entry, &events);
0cda4c02
YZ
7312 }
7313 mutex_unlock(&src_ctx->mutex);
7314
7315 synchronize_rcu();
7316
7317 mutex_lock(&dst_ctx->mutex);
9886167d
PZ
7318 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7319 list_del(&event->migrate_entry);
0cda4c02
YZ
7320 if (event->state >= PERF_EVENT_STATE_OFF)
7321 event->state = PERF_EVENT_STATE_INACTIVE;
9a545de0 7322 account_event_cpu(event, dst_cpu);
0cda4c02
YZ
7323 perf_install_in_context(dst_ctx, event, dst_cpu);
7324 get_ctx(dst_ctx);
7325 }
7326 mutex_unlock(&dst_ctx->mutex);
7327}
7328EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
7329
cdd6c482 7330static void sync_child_event(struct perf_event *child_event,
38b200d6 7331 struct task_struct *child)
d859e29f 7332{
cdd6c482 7333 struct perf_event *parent_event = child_event->parent;
8bc20959 7334 u64 child_val;
d859e29f 7335
cdd6c482
IM
7336 if (child_event->attr.inherit_stat)
7337 perf_event_read_event(child_event, child);
38b200d6 7338
b5e58793 7339 child_val = perf_event_count(child_event);
d859e29f
PM
7340
7341 /*
7342 * Add back the child's count to the parent's count:
7343 */
a6e6dea6 7344 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
7345 atomic64_add(child_event->total_time_enabled,
7346 &parent_event->child_total_time_enabled);
7347 atomic64_add(child_event->total_time_running,
7348 &parent_event->child_total_time_running);
d859e29f
PM
7349
7350 /*
cdd6c482 7351 * Remove this event from the parent's list
d859e29f 7352 */
cdd6c482
IM
7353 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7354 mutex_lock(&parent_event->child_mutex);
7355 list_del_init(&child_event->child_list);
7356 mutex_unlock(&parent_event->child_mutex);
d859e29f
PM
7357
7358 /*
cdd6c482 7359 * Release the parent event, if this was the last
d859e29f
PM
7360 * reference to it.
7361 */
a6fa941d 7362 put_event(parent_event);
d859e29f
PM
7363}
7364
9b51f66d 7365static void
cdd6c482
IM
7366__perf_event_exit_task(struct perf_event *child_event,
7367 struct perf_event_context *child_ctx,
38b200d6 7368 struct task_struct *child)
9b51f66d 7369{
38b435b1
PZ
7370 if (child_event->parent) {
7371 raw_spin_lock_irq(&child_ctx->lock);
7372 perf_group_detach(child_event);
7373 raw_spin_unlock_irq(&child_ctx->lock);
7374 }
9b51f66d 7375
fe4b04fa 7376 perf_remove_from_context(child_event);
0cc0c027 7377
9b51f66d 7378 /*
38b435b1 7379 * It can happen that the parent exits first, and has events
9b51f66d 7380 * that are still around due to the child reference. These
38b435b1 7381 * events need to be zapped.
9b51f66d 7382 */
38b435b1 7383 if (child_event->parent) {
cdd6c482
IM
7384 sync_child_event(child_event, child);
7385 free_event(child_event);
4bcf349a 7386 }
9b51f66d
IM
7387}
7388
8dc85d54 7389static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 7390{
cdd6c482
IM
7391 struct perf_event *child_event, *tmp;
7392 struct perf_event_context *child_ctx;
a63eaf34 7393 unsigned long flags;
9b51f66d 7394
8dc85d54 7395 if (likely(!child->perf_event_ctxp[ctxn])) {
cdd6c482 7396 perf_event_task(child, NULL, 0);
9b51f66d 7397 return;
9f498cc5 7398 }
9b51f66d 7399
a63eaf34 7400 local_irq_save(flags);
ad3a37de
PM
7401 /*
7402 * We can't reschedule here because interrupts are disabled,
7403 * and either child is current or it is a task that can't be
7404 * scheduled, so we are now safe from rescheduling changing
7405 * our context.
7406 */
806839b2 7407 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
c93f7669
PM
7408
7409 /*
7410 * Take the context lock here so that if find_get_context is
cdd6c482 7411 * reading child->perf_event_ctxp, we wait until it has
c93f7669
PM
7412 * incremented the context's refcount before we do put_ctx below.
7413 */
e625cce1 7414 raw_spin_lock(&child_ctx->lock);
04dc2dbb 7415 task_ctx_sched_out(child_ctx);
8dc85d54 7416 child->perf_event_ctxp[ctxn] = NULL;
71a851b4
PZ
7417 /*
7418 * If this context is a clone; unclone it so it can't get
7419 * swapped to another process while we're removing all
cdd6c482 7420 * the events from it.
71a851b4
PZ
7421 */
7422 unclone_ctx(child_ctx);
5e942bb3 7423 update_context_time(child_ctx);
e625cce1 7424 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
9f498cc5
PZ
7425
7426 /*
cdd6c482
IM
7427 * Report the task dead after unscheduling the events so that we
7428 * won't get any samples after PERF_RECORD_EXIT. We can however still
7429 * get a few PERF_RECORD_READ events.
9f498cc5 7430 */
cdd6c482 7431 perf_event_task(child, child_ctx, 0);
a63eaf34 7432
66fff224
PZ
7433 /*
7434 * We can recurse on the same lock type through:
7435 *
cdd6c482
IM
7436 * __perf_event_exit_task()
7437 * sync_child_event()
a6fa941d
AV
7438 * put_event()
7439 * mutex_lock(&ctx->mutex)
66fff224
PZ
7440 *
7441 * But since its the parent context it won't be the same instance.
7442 */
a0507c84 7443 mutex_lock(&child_ctx->mutex);
a63eaf34 7444
8bc20959 7445again:
889ff015
FW
7446 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
7447 group_entry)
7448 __perf_event_exit_task(child_event, child_ctx, child);
7449
7450 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
65abc865 7451 group_entry)
cdd6c482 7452 __perf_event_exit_task(child_event, child_ctx, child);
8bc20959
PZ
7453
7454 /*
cdd6c482 7455 * If the last event was a group event, it will have appended all
8bc20959
PZ
7456 * its siblings to the list, but we obtained 'tmp' before that which
7457 * will still point to the list head terminating the iteration.
7458 */
889ff015
FW
7459 if (!list_empty(&child_ctx->pinned_groups) ||
7460 !list_empty(&child_ctx->flexible_groups))
8bc20959 7461 goto again;
a63eaf34
PM
7462
7463 mutex_unlock(&child_ctx->mutex);
7464
7465 put_ctx(child_ctx);
9b51f66d
IM
7466}
7467
8dc85d54
PZ
7468/*
7469 * When a child task exits, feed back event values to parent events.
7470 */
7471void perf_event_exit_task(struct task_struct *child)
7472{
8882135b 7473 struct perf_event *event, *tmp;
8dc85d54
PZ
7474 int ctxn;
7475
8882135b
PZ
7476 mutex_lock(&child->perf_event_mutex);
7477 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
7478 owner_entry) {
7479 list_del_init(&event->owner_entry);
7480
7481 /*
7482 * Ensure the list deletion is visible before we clear
7483 * the owner, closes a race against perf_release() where
7484 * we need to serialize on the owner->perf_event_mutex.
7485 */
7486 smp_wmb();
7487 event->owner = NULL;
7488 }
7489 mutex_unlock(&child->perf_event_mutex);
7490
8dc85d54
PZ
7491 for_each_task_context_nr(ctxn)
7492 perf_event_exit_task_context(child, ctxn);
7493}
7494
889ff015
FW
7495static void perf_free_event(struct perf_event *event,
7496 struct perf_event_context *ctx)
7497{
7498 struct perf_event *parent = event->parent;
7499
7500 if (WARN_ON_ONCE(!parent))
7501 return;
7502
7503 mutex_lock(&parent->child_mutex);
7504 list_del_init(&event->child_list);
7505 mutex_unlock(&parent->child_mutex);
7506
a6fa941d 7507 put_event(parent);
889ff015 7508
8a49542c 7509 perf_group_detach(event);
889ff015
FW
7510 list_del_event(event, ctx);
7511 free_event(event);
7512}
7513
bbbee908
PZ
7514/*
7515 * free an unexposed, unused context as created by inheritance by
8dc85d54 7516 * perf_event_init_task below, used by fork() in case of fail.
bbbee908 7517 */
cdd6c482 7518void perf_event_free_task(struct task_struct *task)
bbbee908 7519{
8dc85d54 7520 struct perf_event_context *ctx;
cdd6c482 7521 struct perf_event *event, *tmp;
8dc85d54 7522 int ctxn;
bbbee908 7523
8dc85d54
PZ
7524 for_each_task_context_nr(ctxn) {
7525 ctx = task->perf_event_ctxp[ctxn];
7526 if (!ctx)
7527 continue;
bbbee908 7528
8dc85d54 7529 mutex_lock(&ctx->mutex);
bbbee908 7530again:
8dc85d54
PZ
7531 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
7532 group_entry)
7533 perf_free_event(event, ctx);
bbbee908 7534
8dc85d54
PZ
7535 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
7536 group_entry)
7537 perf_free_event(event, ctx);
bbbee908 7538
8dc85d54
PZ
7539 if (!list_empty(&ctx->pinned_groups) ||
7540 !list_empty(&ctx->flexible_groups))
7541 goto again;
bbbee908 7542
8dc85d54 7543 mutex_unlock(&ctx->mutex);
bbbee908 7544
8dc85d54
PZ
7545 put_ctx(ctx);
7546 }
889ff015
FW
7547}
7548
4e231c79
PZ
7549void perf_event_delayed_put(struct task_struct *task)
7550{
7551 int ctxn;
7552
7553 for_each_task_context_nr(ctxn)
7554 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
7555}
7556
97dee4f3
PZ
7557/*
7558 * inherit a event from parent task to child task:
7559 */
7560static struct perf_event *
7561inherit_event(struct perf_event *parent_event,
7562 struct task_struct *parent,
7563 struct perf_event_context *parent_ctx,
7564 struct task_struct *child,
7565 struct perf_event *group_leader,
7566 struct perf_event_context *child_ctx)
7567{
7568 struct perf_event *child_event;
cee010ec 7569 unsigned long flags;
97dee4f3
PZ
7570
7571 /*
7572 * Instead of creating recursive hierarchies of events,
7573 * we link inherited events back to the original parent,
7574 * which has a filp for sure, which we use as the reference
7575 * count:
7576 */
7577 if (parent_event->parent)
7578 parent_event = parent_event->parent;
7579
7580 child_event = perf_event_alloc(&parent_event->attr,
7581 parent_event->cpu,
d580ff86 7582 child,
97dee4f3 7583 group_leader, parent_event,
4dc0da86 7584 NULL, NULL);
97dee4f3
PZ
7585 if (IS_ERR(child_event))
7586 return child_event;
a6fa941d
AV
7587
7588 if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
7589 free_event(child_event);
7590 return NULL;
7591 }
7592
97dee4f3
PZ
7593 get_ctx(child_ctx);
7594
7595 /*
7596 * Make the child state follow the state of the parent event,
7597 * not its attr.disabled bit. We hold the parent's mutex,
7598 * so we won't race with perf_event_{en, dis}able_family.
7599 */
7600 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
7601 child_event->state = PERF_EVENT_STATE_INACTIVE;
7602 else
7603 child_event->state = PERF_EVENT_STATE_OFF;
7604
7605 if (parent_event->attr.freq) {
7606 u64 sample_period = parent_event->hw.sample_period;
7607 struct hw_perf_event *hwc = &child_event->hw;
7608
7609 hwc->sample_period = sample_period;
7610 hwc->last_period = sample_period;
7611
7612 local64_set(&hwc->period_left, sample_period);
7613 }
7614
7615 child_event->ctx = child_ctx;
7616 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
7617 child_event->overflow_handler_context
7618 = parent_event->overflow_handler_context;
97dee4f3 7619
614b6780
TG
7620 /*
7621 * Precalculate sample_data sizes
7622 */
7623 perf_event__header_size(child_event);
6844c09d 7624 perf_event__id_header_size(child_event);
614b6780 7625
97dee4f3
PZ
7626 /*
7627 * Link it up in the child's context:
7628 */
cee010ec 7629 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 7630 add_event_to_ctx(child_event, child_ctx);
cee010ec 7631 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 7632
97dee4f3
PZ
7633 /*
7634 * Link this into the parent event's child list
7635 */
7636 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7637 mutex_lock(&parent_event->child_mutex);
7638 list_add_tail(&child_event->child_list, &parent_event->child_list);
7639 mutex_unlock(&parent_event->child_mutex);
7640
7641 return child_event;
7642}
7643
7644static int inherit_group(struct perf_event *parent_event,
7645 struct task_struct *parent,
7646 struct perf_event_context *parent_ctx,
7647 struct task_struct *child,
7648 struct perf_event_context *child_ctx)
7649{
7650 struct perf_event *leader;
7651 struct perf_event *sub;
7652 struct perf_event *child_ctr;
7653
7654 leader = inherit_event(parent_event, parent, parent_ctx,
7655 child, NULL, child_ctx);
7656 if (IS_ERR(leader))
7657 return PTR_ERR(leader);
7658 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
7659 child_ctr = inherit_event(sub, parent, parent_ctx,
7660 child, leader, child_ctx);
7661 if (IS_ERR(child_ctr))
7662 return PTR_ERR(child_ctr);
7663 }
7664 return 0;
889ff015
FW
7665}
7666
7667static int
7668inherit_task_group(struct perf_event *event, struct task_struct *parent,
7669 struct perf_event_context *parent_ctx,
8dc85d54 7670 struct task_struct *child, int ctxn,
889ff015
FW
7671 int *inherited_all)
7672{
7673 int ret;
8dc85d54 7674 struct perf_event_context *child_ctx;
889ff015
FW
7675
7676 if (!event->attr.inherit) {
7677 *inherited_all = 0;
7678 return 0;
bbbee908
PZ
7679 }
7680
fe4b04fa 7681 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
7682 if (!child_ctx) {
7683 /*
7684 * This is executed from the parent task context, so
7685 * inherit events that have been marked for cloning.
7686 * First allocate and initialize a context for the
7687 * child.
7688 */
bbbee908 7689
734df5ab 7690 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
889ff015
FW
7691 if (!child_ctx)
7692 return -ENOMEM;
bbbee908 7693
8dc85d54 7694 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
7695 }
7696
7697 ret = inherit_group(event, parent, parent_ctx,
7698 child, child_ctx);
7699
7700 if (ret)
7701 *inherited_all = 0;
7702
7703 return ret;
bbbee908
PZ
7704}
7705
9b51f66d 7706/*
cdd6c482 7707 * Initialize the perf_event context in task_struct
9b51f66d 7708 */
8dc85d54 7709int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 7710{
889ff015 7711 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
7712 struct perf_event_context *cloned_ctx;
7713 struct perf_event *event;
9b51f66d 7714 struct task_struct *parent = current;
564c2b21 7715 int inherited_all = 1;
dddd3379 7716 unsigned long flags;
6ab423e0 7717 int ret = 0;
9b51f66d 7718
8dc85d54 7719 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
7720 return 0;
7721
ad3a37de 7722 /*
25346b93
PM
7723 * If the parent's context is a clone, pin it so it won't get
7724 * swapped under us.
ad3a37de 7725 */
8dc85d54 7726 parent_ctx = perf_pin_task_context(parent, ctxn);
25346b93 7727
ad3a37de
PM
7728 /*
7729 * No need to check if parent_ctx != NULL here; since we saw
7730 * it non-NULL earlier, the only reason for it to become NULL
7731 * is if we exit, and since we're currently in the middle of
7732 * a fork we can't be exiting at the same time.
7733 */
ad3a37de 7734
9b51f66d
IM
7735 /*
7736 * Lock the parent list. No need to lock the child - not PID
7737 * hashed yet and not running, so nobody can access it.
7738 */
d859e29f 7739 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
7740
7741 /*
7742 * We dont have to disable NMIs - we are only looking at
7743 * the list, not manipulating it:
7744 */
889ff015 7745 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d54
PZ
7746 ret = inherit_task_group(event, parent, parent_ctx,
7747 child, ctxn, &inherited_all);
889ff015
FW
7748 if (ret)
7749 break;
7750 }
b93f7978 7751
dddd3379
TG
7752 /*
7753 * We can't hold ctx->lock when iterating the ->flexible_group list due
7754 * to allocations, but we need to prevent rotation because
7755 * rotate_ctx() will change the list from interrupt context.
7756 */
7757 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7758 parent_ctx->rotate_disable = 1;
7759 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7760
889ff015 7761 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d54
PZ
7762 ret = inherit_task_group(event, parent, parent_ctx,
7763 child, ctxn, &inherited_all);
889ff015 7764 if (ret)
9b51f66d 7765 break;
564c2b21
PM
7766 }
7767
dddd3379
TG
7768 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7769 parent_ctx->rotate_disable = 0;
dddd3379 7770
8dc85d54 7771 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 7772
05cbaa28 7773 if (child_ctx && inherited_all) {
564c2b21
PM
7774 /*
7775 * Mark the child context as a clone of the parent
7776 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
7777 *
7778 * Note that if the parent is a clone, the holding of
7779 * parent_ctx->lock avoids it from being uncloned.
564c2b21 7780 */
c5ed5145 7781 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
7782 if (cloned_ctx) {
7783 child_ctx->parent_ctx = cloned_ctx;
25346b93 7784 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
7785 } else {
7786 child_ctx->parent_ctx = parent_ctx;
7787 child_ctx->parent_gen = parent_ctx->generation;
7788 }
7789 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
7790 }
7791
c5ed5145 7792 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29f 7793 mutex_unlock(&parent_ctx->mutex);
6ab423e0 7794
25346b93 7795 perf_unpin_context(parent_ctx);
fe4b04fa 7796 put_ctx(parent_ctx);
ad3a37de 7797
6ab423e0 7798 return ret;
9b51f66d
IM
7799}
7800
8dc85d54
PZ
7801/*
7802 * Initialize the perf_event context in task_struct
7803 */
7804int perf_event_init_task(struct task_struct *child)
7805{
7806 int ctxn, ret;
7807
8550d7cb
ON
7808 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
7809 mutex_init(&child->perf_event_mutex);
7810 INIT_LIST_HEAD(&child->perf_event_list);
7811
8dc85d54
PZ
7812 for_each_task_context_nr(ctxn) {
7813 ret = perf_event_init_context(child, ctxn);
7814 if (ret)
7815 return ret;
7816 }
7817
7818 return 0;
7819}
7820
220b140b
PM
7821static void __init perf_event_init_all_cpus(void)
7822{
b28ab83c 7823 struct swevent_htable *swhash;
220b140b 7824 int cpu;
220b140b
PM
7825
7826 for_each_possible_cpu(cpu) {
b28ab83c
PZ
7827 swhash = &per_cpu(swevent_htable, cpu);
7828 mutex_init(&swhash->hlist_mutex);
e9d2b064 7829 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
220b140b
PM
7830 }
7831}
7832
0db0628d 7833static void perf_event_init_cpu(int cpu)
0793a61d 7834{
108b02cf 7835 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 7836
b28ab83c 7837 mutex_lock(&swhash->hlist_mutex);
4536e4d1 7838 if (swhash->hlist_refcount > 0) {
76e1d904
FW
7839 struct swevent_hlist *hlist;
7840
b28ab83c
PZ
7841 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7842 WARN_ON(!hlist);
7843 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 7844 }
b28ab83c 7845 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
7846}
7847
c277443c 7848#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
e9d2b064 7849static void perf_pmu_rotate_stop(struct pmu *pmu)
0793a61d 7850{
e9d2b064
PZ
7851 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7852
7853 WARN_ON(!irqs_disabled());
7854
7855 list_del_init(&cpuctx->rotation_list);
7856}
7857
108b02cf 7858static void __perf_event_exit_context(void *__info)
0793a61d 7859{
108b02cf 7860 struct perf_event_context *ctx = __info;
e3703f8c 7861 struct perf_event *event;
0793a61d 7862
108b02cf 7863 perf_pmu_rotate_stop(ctx->pmu);
b5ab4cd5 7864
e3703f8c
PZ
7865 rcu_read_lock();
7866 list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
fe4b04fa 7867 __perf_remove_from_context(event);
e3703f8c 7868 rcu_read_unlock();
0793a61d 7869}
108b02cf
PZ
7870
7871static void perf_event_exit_cpu_context(int cpu)
7872{
7873 struct perf_event_context *ctx;
7874 struct pmu *pmu;
7875 int idx;
7876
7877 idx = srcu_read_lock(&pmus_srcu);
7878 list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c 7879 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cf
PZ
7880
7881 mutex_lock(&ctx->mutex);
7882 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7883 mutex_unlock(&ctx->mutex);
7884 }
7885 srcu_read_unlock(&pmus_srcu, idx);
108b02cf
PZ
7886}
7887
cdd6c482 7888static void perf_event_exit_cpu(int cpu)
0793a61d 7889{
b28ab83c 7890 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
d859e29f 7891
e3703f8c
PZ
7892 perf_event_exit_cpu_context(cpu);
7893
b28ab83c
PZ
7894 mutex_lock(&swhash->hlist_mutex);
7895 swevent_hlist_release(swhash);
7896 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
7897}
7898#else
cdd6c482 7899static inline void perf_event_exit_cpu(int cpu) { }
0793a61d
TG
7900#endif
7901
c277443c
PZ
7902static int
7903perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7904{
7905 int cpu;
7906
7907 for_each_online_cpu(cpu)
7908 perf_event_exit_cpu(cpu);
7909
7910 return NOTIFY_OK;
7911}
7912
7913/*
7914 * Run the perf reboot notifier at the very last possible moment so that
7915 * the generic watchdog code runs as long as possible.
7916 */
7917static struct notifier_block perf_reboot_notifier = {
7918 .notifier_call = perf_reboot,
7919 .priority = INT_MIN,
7920};
7921
0db0628d 7922static int
0793a61d
TG
7923perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7924{
7925 unsigned int cpu = (long)hcpu;
7926
4536e4d1 7927 switch (action & ~CPU_TASKS_FROZEN) {
0793a61d
TG
7928
7929 case CPU_UP_PREPARE:
5e11637e 7930 case CPU_DOWN_FAILED:
cdd6c482 7931 perf_event_init_cpu(cpu);
0793a61d
TG
7932 break;
7933
5e11637e 7934 case CPU_UP_CANCELED:
0793a61d 7935 case CPU_DOWN_PREPARE:
cdd6c482 7936 perf_event_exit_cpu(cpu);
0793a61d 7937 break;
0793a61d
TG
7938 default:
7939 break;
7940 }
7941
7942 return NOTIFY_OK;
7943}
7944
cdd6c482 7945void __init perf_event_init(void)
0793a61d 7946{
3c502e7a
JW
7947 int ret;
7948
2e80a82a
PZ
7949 idr_init(&pmu_idr);
7950
220b140b 7951 perf_event_init_all_cpus();
b0a873eb 7952 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
7953 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7954 perf_pmu_register(&perf_cpu_clock, NULL, -1);
7955 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb
PZ
7956 perf_tp_register();
7957 perf_cpu_notifier(perf_cpu_notify);
c277443c 7958 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
7959
7960 ret = init_hw_breakpoint();
7961 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520
GN
7962
7963 /* do not patch jump label more than once per second */
7964 jump_label_rate_limit(&perf_sched_events, HZ);
b01c3a00
JO
7965
7966 /*
7967 * Build time assertion that we keep the data_head at the intended
7968 * location. IOW, validation we got the __reserved[] size right.
7969 */
7970 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7971 != 1024);
0793a61d 7972}
abe43400
PZ
7973
7974static int __init perf_event_sysfs_init(void)
7975{
7976 struct pmu *pmu;
7977 int ret;
7978
7979 mutex_lock(&pmus_lock);
7980
7981 ret = bus_register(&pmu_bus);
7982 if (ret)
7983 goto unlock;
7984
7985 list_for_each_entry(pmu, &pmus, entry) {
7986 if (!pmu->name || pmu->type < 0)
7987 continue;
7988
7989 ret = pmu_dev_alloc(pmu);
7990 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7991 }
7992 pmu_bus_running = 1;
7993 ret = 0;
7994
7995unlock:
7996 mutex_unlock(&pmus_lock);
7997
7998 return ret;
7999}
8000device_initcall(perf_event_sysfs_init);
e5d1367f
SE
8001
8002#ifdef CONFIG_CGROUP_PERF
eb95419b
TH
8003static struct cgroup_subsys_state *
8004perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
e5d1367f
SE
8005{
8006 struct perf_cgroup *jc;
e5d1367f 8007
1b15d055 8008 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
8009 if (!jc)
8010 return ERR_PTR(-ENOMEM);
8011
e5d1367f
SE
8012 jc->info = alloc_percpu(struct perf_cgroup_info);
8013 if (!jc->info) {
8014 kfree(jc);
8015 return ERR_PTR(-ENOMEM);
8016 }
8017
e5d1367f
SE
8018 return &jc->css;
8019}
8020
eb95419b 8021static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
e5d1367f 8022{
eb95419b
TH
8023 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
8024
e5d1367f
SE
8025 free_percpu(jc->info);
8026 kfree(jc);
8027}
8028
8029static int __perf_cgroup_move(void *info)
8030{
8031 struct task_struct *task = info;
8032 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
8033 return 0;
8034}
8035
eb95419b
TH
8036static void perf_cgroup_attach(struct cgroup_subsys_state *css,
8037 struct cgroup_taskset *tset)
e5d1367f 8038{
bb9d97b6
TH
8039 struct task_struct *task;
8040
924f0d9a 8041 cgroup_taskset_for_each(task, tset)
bb9d97b6 8042 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
8043}
8044
eb95419b
TH
8045static void perf_cgroup_exit(struct cgroup_subsys_state *css,
8046 struct cgroup_subsys_state *old_css,
761b3ef5 8047 struct task_struct *task)
e5d1367f
SE
8048{
8049 /*
8050 * cgroup_exit() is called in the copy_process() failure path.
8051 * Ignore this case since the task hasn't ran yet, this avoids
8052 * trying to poke a half freed task state from generic code.
8053 */
8054 if (!(task->flags & PF_EXITING))
8055 return;
8056
bb9d97b6 8057 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
8058}
8059
073219e9 8060struct cgroup_subsys perf_event_cgrp_subsys = {
92fb9748
TH
8061 .css_alloc = perf_cgroup_css_alloc,
8062 .css_free = perf_cgroup_css_free,
e7e7ee2e 8063 .exit = perf_cgroup_exit,
bb9d97b6 8064 .attach = perf_cgroup_attach,
e5d1367f
SE
8065};
8066#endif /* CONFIG_CGROUP_PERF */