switch simple cases of fget_light to fdget
[linux-2.6-block.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e
IM
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
0793a61d 21#include <linux/sysfs.h>
22a4f650 22#include <linux/dcache.h>
0793a61d 23#include <linux/percpu.h>
22a4f650 24#include <linux/ptrace.h>
c277443c 25#include <linux/reboot.h>
b9cacc7b 26#include <linux/vmstat.h>
abe43400 27#include <linux/device.h>
6e5fdeed 28#include <linux/export.h>
906010b2 29#include <linux/vmalloc.h>
b9cacc7b
PZ
30#include <linux/hardirq.h>
31#include <linux/rculist.h>
0793a61d
TG
32#include <linux/uaccess.h>
33#include <linux/syscalls.h>
34#include <linux/anon_inodes.h>
aa9c4c0f 35#include <linux/kernel_stat.h>
cdd6c482 36#include <linux/perf_event.h>
6fb2915d 37#include <linux/ftrace_event.h>
3c502e7a 38#include <linux/hw_breakpoint.h>
0793a61d 39
76369139
FW
40#include "internal.h"
41
4e193bd4
TB
42#include <asm/irq_regs.h>
43
fe4b04fa 44struct remote_function_call {
e7e7ee2e
IM
45 struct task_struct *p;
46 int (*func)(void *info);
47 void *info;
48 int ret;
fe4b04fa
PZ
49};
50
51static void remote_function(void *data)
52{
53 struct remote_function_call *tfc = data;
54 struct task_struct *p = tfc->p;
55
56 if (p) {
57 tfc->ret = -EAGAIN;
58 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
59 return;
60 }
61
62 tfc->ret = tfc->func(tfc->info);
63}
64
65/**
66 * task_function_call - call a function on the cpu on which a task runs
67 * @p: the task to evaluate
68 * @func: the function to be called
69 * @info: the function call argument
70 *
71 * Calls the function @func when the task is currently running. This might
72 * be on the current CPU, which just calls the function directly
73 *
74 * returns: @func return value, or
75 * -ESRCH - when the process isn't running
76 * -EAGAIN - when the process moved away
77 */
78static int
79task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
80{
81 struct remote_function_call data = {
e7e7ee2e
IM
82 .p = p,
83 .func = func,
84 .info = info,
85 .ret = -ESRCH, /* No such (running) process */
fe4b04fa
PZ
86 };
87
88 if (task_curr(p))
89 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
90
91 return data.ret;
92}
93
94/**
95 * cpu_function_call - call a function on the cpu
96 * @func: the function to be called
97 * @info: the function call argument
98 *
99 * Calls the function @func on the remote cpu.
100 *
101 * returns: @func return value or -ENXIO when the cpu is offline
102 */
103static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
104{
105 struct remote_function_call data = {
e7e7ee2e
IM
106 .p = NULL,
107 .func = func,
108 .info = info,
109 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
110 };
111
112 smp_call_function_single(cpu, remote_function, &data, 1);
113
114 return data.ret;
115}
116
e5d1367f
SE
117#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
118 PERF_FLAG_FD_OUTPUT |\
119 PERF_FLAG_PID_CGROUP)
120
bce38cd5
SE
121/*
122 * branch priv levels that need permission checks
123 */
124#define PERF_SAMPLE_BRANCH_PERM_PLM \
125 (PERF_SAMPLE_BRANCH_KERNEL |\
126 PERF_SAMPLE_BRANCH_HV)
127
0b3fcf17
SE
128enum event_type_t {
129 EVENT_FLEXIBLE = 0x1,
130 EVENT_PINNED = 0x2,
131 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
132};
133
e5d1367f
SE
134/*
135 * perf_sched_events : >0 events exist
136 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
137 */
c5905afb 138struct static_key_deferred perf_sched_events __read_mostly;
e5d1367f 139static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
d010b332 140static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
e5d1367f 141
cdd6c482
IM
142static atomic_t nr_mmap_events __read_mostly;
143static atomic_t nr_comm_events __read_mostly;
144static atomic_t nr_task_events __read_mostly;
9ee318a7 145
108b02cf
PZ
146static LIST_HEAD(pmus);
147static DEFINE_MUTEX(pmus_lock);
148static struct srcu_struct pmus_srcu;
149
0764771d 150/*
cdd6c482 151 * perf event paranoia level:
0fbdea19
IM
152 * -1 - not paranoid at all
153 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 154 * 1 - disallow cpu events for unpriv
0fbdea19 155 * 2 - disallow kernel profiling for unpriv
0764771d 156 */
cdd6c482 157int sysctl_perf_event_paranoid __read_mostly = 1;
0764771d 158
20443384
FW
159/* Minimum for 512 kiB + 1 user control page */
160int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
161
162/*
cdd6c482 163 * max perf event sample rate
df58ab24 164 */
163ec435
PZ
165#define DEFAULT_MAX_SAMPLE_RATE 100000
166int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
167static int max_samples_per_tick __read_mostly =
168 DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
169
170int perf_proc_update_handler(struct ctl_table *table, int write,
171 void __user *buffer, size_t *lenp,
172 loff_t *ppos)
173{
174 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
175
176 if (ret || !write)
177 return ret;
178
179 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
180
181 return 0;
182}
1ccd1549 183
cdd6c482 184static atomic64_t perf_event_id;
a96bbc16 185
0b3fcf17
SE
186static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
187 enum event_type_t event_type);
188
189static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
190 enum event_type_t event_type,
191 struct task_struct *task);
192
193static void update_context_time(struct perf_event_context *ctx);
194static u64 perf_event_time(struct perf_event *event);
0b3fcf17 195
10c6db11
PZ
196static void ring_buffer_attach(struct perf_event *event,
197 struct ring_buffer *rb);
198
cdd6c482 199void __weak perf_event_print_debug(void) { }
0793a61d 200
84c79910 201extern __weak const char *perf_pmu_name(void)
0793a61d 202{
84c79910 203 return "pmu";
0793a61d
TG
204}
205
0b3fcf17
SE
206static inline u64 perf_clock(void)
207{
208 return local_clock();
209}
210
e5d1367f
SE
211static inline struct perf_cpu_context *
212__get_cpu_context(struct perf_event_context *ctx)
213{
214 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
215}
216
facc4307
PZ
217static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
218 struct perf_event_context *ctx)
219{
220 raw_spin_lock(&cpuctx->ctx.lock);
221 if (ctx)
222 raw_spin_lock(&ctx->lock);
223}
224
225static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
226 struct perf_event_context *ctx)
227{
228 if (ctx)
229 raw_spin_unlock(&ctx->lock);
230 raw_spin_unlock(&cpuctx->ctx.lock);
231}
232
e5d1367f
SE
233#ifdef CONFIG_CGROUP_PERF
234
3f7cce3c
SE
235/*
236 * Must ensure cgroup is pinned (css_get) before calling
237 * this function. In other words, we cannot call this function
238 * if there is no cgroup event for the current CPU context.
239 */
e5d1367f
SE
240static inline struct perf_cgroup *
241perf_cgroup_from_task(struct task_struct *task)
242{
243 return container_of(task_subsys_state(task, perf_subsys_id),
244 struct perf_cgroup, css);
245}
246
247static inline bool
248perf_cgroup_match(struct perf_event *event)
249{
250 struct perf_event_context *ctx = event->ctx;
251 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
252
253 return !event->cgrp || event->cgrp == cpuctx->cgrp;
254}
255
9c5da09d 256static inline bool perf_tryget_cgroup(struct perf_event *event)
e5d1367f 257{
9c5da09d 258 return css_tryget(&event->cgrp->css);
e5d1367f
SE
259}
260
261static inline void perf_put_cgroup(struct perf_event *event)
262{
263 css_put(&event->cgrp->css);
264}
265
266static inline void perf_detach_cgroup(struct perf_event *event)
267{
268 perf_put_cgroup(event);
269 event->cgrp = NULL;
270}
271
272static inline int is_cgroup_event(struct perf_event *event)
273{
274 return event->cgrp != NULL;
275}
276
277static inline u64 perf_cgroup_event_time(struct perf_event *event)
278{
279 struct perf_cgroup_info *t;
280
281 t = per_cpu_ptr(event->cgrp->info, event->cpu);
282 return t->time;
283}
284
285static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
286{
287 struct perf_cgroup_info *info;
288 u64 now;
289
290 now = perf_clock();
291
292 info = this_cpu_ptr(cgrp->info);
293
294 info->time += now - info->timestamp;
295 info->timestamp = now;
296}
297
298static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
299{
300 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
301 if (cgrp_out)
302 __update_cgrp_time(cgrp_out);
303}
304
305static inline void update_cgrp_time_from_event(struct perf_event *event)
306{
3f7cce3c
SE
307 struct perf_cgroup *cgrp;
308
e5d1367f 309 /*
3f7cce3c
SE
310 * ensure we access cgroup data only when needed and
311 * when we know the cgroup is pinned (css_get)
e5d1367f 312 */
3f7cce3c 313 if (!is_cgroup_event(event))
e5d1367f
SE
314 return;
315
3f7cce3c
SE
316 cgrp = perf_cgroup_from_task(current);
317 /*
318 * Do not update time when cgroup is not active
319 */
320 if (cgrp == event->cgrp)
321 __update_cgrp_time(event->cgrp);
e5d1367f
SE
322}
323
324static inline void
3f7cce3c
SE
325perf_cgroup_set_timestamp(struct task_struct *task,
326 struct perf_event_context *ctx)
e5d1367f
SE
327{
328 struct perf_cgroup *cgrp;
329 struct perf_cgroup_info *info;
330
3f7cce3c
SE
331 /*
332 * ctx->lock held by caller
333 * ensure we do not access cgroup data
334 * unless we have the cgroup pinned (css_get)
335 */
336 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
337 return;
338
339 cgrp = perf_cgroup_from_task(task);
340 info = this_cpu_ptr(cgrp->info);
3f7cce3c 341 info->timestamp = ctx->timestamp;
e5d1367f
SE
342}
343
344#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
345#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
346
347/*
348 * reschedule events based on the cgroup constraint of task.
349 *
350 * mode SWOUT : schedule out everything
351 * mode SWIN : schedule in based on cgroup for next
352 */
353void perf_cgroup_switch(struct task_struct *task, int mode)
354{
355 struct perf_cpu_context *cpuctx;
356 struct pmu *pmu;
357 unsigned long flags;
358
359 /*
360 * disable interrupts to avoid geting nr_cgroup
361 * changes via __perf_event_disable(). Also
362 * avoids preemption.
363 */
364 local_irq_save(flags);
365
366 /*
367 * we reschedule only in the presence of cgroup
368 * constrained events.
369 */
370 rcu_read_lock();
371
372 list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f
SE
373 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
374
e5d1367f
SE
375 /*
376 * perf_cgroup_events says at least one
377 * context on this CPU has cgroup events.
378 *
379 * ctx->nr_cgroups reports the number of cgroup
380 * events for a context.
381 */
382 if (cpuctx->ctx.nr_cgroups > 0) {
facc4307
PZ
383 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
384 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f
SE
385
386 if (mode & PERF_CGROUP_SWOUT) {
387 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
388 /*
389 * must not be done before ctxswout due
390 * to event_filter_match() in event_sched_out()
391 */
392 cpuctx->cgrp = NULL;
393 }
394
395 if (mode & PERF_CGROUP_SWIN) {
e566b76e 396 WARN_ON_ONCE(cpuctx->cgrp);
e5d1367f
SE
397 /* set cgrp before ctxsw in to
398 * allow event_filter_match() to not
399 * have to pass task around
400 */
401 cpuctx->cgrp = perf_cgroup_from_task(task);
402 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
403 }
facc4307
PZ
404 perf_pmu_enable(cpuctx->ctx.pmu);
405 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f 406 }
e5d1367f
SE
407 }
408
409 rcu_read_unlock();
410
411 local_irq_restore(flags);
412}
413
a8d757ef
SE
414static inline void perf_cgroup_sched_out(struct task_struct *task,
415 struct task_struct *next)
e5d1367f 416{
a8d757ef
SE
417 struct perf_cgroup *cgrp1;
418 struct perf_cgroup *cgrp2 = NULL;
419
420 /*
421 * we come here when we know perf_cgroup_events > 0
422 */
423 cgrp1 = perf_cgroup_from_task(task);
424
425 /*
426 * next is NULL when called from perf_event_enable_on_exec()
427 * that will systematically cause a cgroup_switch()
428 */
429 if (next)
430 cgrp2 = perf_cgroup_from_task(next);
431
432 /*
433 * only schedule out current cgroup events if we know
434 * that we are switching to a different cgroup. Otherwise,
435 * do no touch the cgroup events.
436 */
437 if (cgrp1 != cgrp2)
438 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
e5d1367f
SE
439}
440
a8d757ef
SE
441static inline void perf_cgroup_sched_in(struct task_struct *prev,
442 struct task_struct *task)
e5d1367f 443{
a8d757ef
SE
444 struct perf_cgroup *cgrp1;
445 struct perf_cgroup *cgrp2 = NULL;
446
447 /*
448 * we come here when we know perf_cgroup_events > 0
449 */
450 cgrp1 = perf_cgroup_from_task(task);
451
452 /* prev can never be NULL */
453 cgrp2 = perf_cgroup_from_task(prev);
454
455 /*
456 * only need to schedule in cgroup events if we are changing
457 * cgroup during ctxsw. Cgroup events were not scheduled
458 * out of ctxsw out if that was not the case.
459 */
460 if (cgrp1 != cgrp2)
461 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
e5d1367f
SE
462}
463
464static inline int perf_cgroup_connect(int fd, struct perf_event *event,
465 struct perf_event_attr *attr,
466 struct perf_event *group_leader)
467{
468 struct perf_cgroup *cgrp;
469 struct cgroup_subsys_state *css;
2903ff01
AV
470 struct fd f = fdget(fd);
471 int ret = 0;
e5d1367f 472
2903ff01 473 if (!f.file)
e5d1367f
SE
474 return -EBADF;
475
2903ff01 476 css = cgroup_css_from_dir(f.file, perf_subsys_id);
3db272c0
LZ
477 if (IS_ERR(css)) {
478 ret = PTR_ERR(css);
479 goto out;
480 }
e5d1367f
SE
481
482 cgrp = container_of(css, struct perf_cgroup, css);
483 event->cgrp = cgrp;
484
f75e18cb 485 /* must be done before we fput() the file */
9c5da09d
SQ
486 if (!perf_tryget_cgroup(event)) {
487 event->cgrp = NULL;
488 ret = -ENOENT;
489 goto out;
490 }
f75e18cb 491
e5d1367f
SE
492 /*
493 * all events in a group must monitor
494 * the same cgroup because a task belongs
495 * to only one perf cgroup at a time
496 */
497 if (group_leader && group_leader->cgrp != cgrp) {
498 perf_detach_cgroup(event);
499 ret = -EINVAL;
e5d1367f 500 }
3db272c0 501out:
2903ff01 502 fdput(f);
e5d1367f
SE
503 return ret;
504}
505
506static inline void
507perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
508{
509 struct perf_cgroup_info *t;
510 t = per_cpu_ptr(event->cgrp->info, event->cpu);
511 event->shadow_ctx_time = now - t->timestamp;
512}
513
514static inline void
515perf_cgroup_defer_enabled(struct perf_event *event)
516{
517 /*
518 * when the current task's perf cgroup does not match
519 * the event's, we need to remember to call the
520 * perf_mark_enable() function the first time a task with
521 * a matching perf cgroup is scheduled in.
522 */
523 if (is_cgroup_event(event) && !perf_cgroup_match(event))
524 event->cgrp_defer_enabled = 1;
525}
526
527static inline void
528perf_cgroup_mark_enabled(struct perf_event *event,
529 struct perf_event_context *ctx)
530{
531 struct perf_event *sub;
532 u64 tstamp = perf_event_time(event);
533
534 if (!event->cgrp_defer_enabled)
535 return;
536
537 event->cgrp_defer_enabled = 0;
538
539 event->tstamp_enabled = tstamp - event->total_time_enabled;
540 list_for_each_entry(sub, &event->sibling_list, group_entry) {
541 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
542 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
543 sub->cgrp_defer_enabled = 0;
544 }
545 }
546}
547#else /* !CONFIG_CGROUP_PERF */
548
549static inline bool
550perf_cgroup_match(struct perf_event *event)
551{
552 return true;
553}
554
555static inline void perf_detach_cgroup(struct perf_event *event)
556{}
557
558static inline int is_cgroup_event(struct perf_event *event)
559{
560 return 0;
561}
562
563static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
564{
565 return 0;
566}
567
568static inline void update_cgrp_time_from_event(struct perf_event *event)
569{
570}
571
572static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
573{
574}
575
a8d757ef
SE
576static inline void perf_cgroup_sched_out(struct task_struct *task,
577 struct task_struct *next)
e5d1367f
SE
578{
579}
580
a8d757ef
SE
581static inline void perf_cgroup_sched_in(struct task_struct *prev,
582 struct task_struct *task)
e5d1367f
SE
583{
584}
585
586static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
587 struct perf_event_attr *attr,
588 struct perf_event *group_leader)
589{
590 return -EINVAL;
591}
592
593static inline void
3f7cce3c
SE
594perf_cgroup_set_timestamp(struct task_struct *task,
595 struct perf_event_context *ctx)
e5d1367f
SE
596{
597}
598
599void
600perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
601{
602}
603
604static inline void
605perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
606{
607}
608
609static inline u64 perf_cgroup_event_time(struct perf_event *event)
610{
611 return 0;
612}
613
614static inline void
615perf_cgroup_defer_enabled(struct perf_event *event)
616{
617}
618
619static inline void
620perf_cgroup_mark_enabled(struct perf_event *event,
621 struct perf_event_context *ctx)
622{
623}
624#endif
625
33696fc0 626void perf_pmu_disable(struct pmu *pmu)
9e35ad38 627{
33696fc0
PZ
628 int *count = this_cpu_ptr(pmu->pmu_disable_count);
629 if (!(*count)++)
630 pmu->pmu_disable(pmu);
9e35ad38 631}
9e35ad38 632
33696fc0 633void perf_pmu_enable(struct pmu *pmu)
9e35ad38 634{
33696fc0
PZ
635 int *count = this_cpu_ptr(pmu->pmu_disable_count);
636 if (!--(*count))
637 pmu->pmu_enable(pmu);
9e35ad38 638}
9e35ad38 639
e9d2b064
PZ
640static DEFINE_PER_CPU(struct list_head, rotation_list);
641
642/*
643 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
644 * because they're strictly cpu affine and rotate_start is called with IRQs
645 * disabled, while rotate_context is called from IRQ context.
646 */
108b02cf 647static void perf_pmu_rotate_start(struct pmu *pmu)
9e35ad38 648{
108b02cf 649 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
e9d2b064 650 struct list_head *head = &__get_cpu_var(rotation_list);
b5ab4cd5 651
e9d2b064 652 WARN_ON(!irqs_disabled());
b5ab4cd5 653
e9d2b064
PZ
654 if (list_empty(&cpuctx->rotation_list))
655 list_add(&cpuctx->rotation_list, head);
9e35ad38 656}
9e35ad38 657
cdd6c482 658static void get_ctx(struct perf_event_context *ctx)
a63eaf34 659{
e5289d4a 660 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
661}
662
cdd6c482 663static void put_ctx(struct perf_event_context *ctx)
a63eaf34 664{
564c2b21
PM
665 if (atomic_dec_and_test(&ctx->refcount)) {
666 if (ctx->parent_ctx)
667 put_ctx(ctx->parent_ctx);
c93f7669
PM
668 if (ctx->task)
669 put_task_struct(ctx->task);
cb796ff3 670 kfree_rcu(ctx, rcu_head);
564c2b21 671 }
a63eaf34
PM
672}
673
cdd6c482 674static void unclone_ctx(struct perf_event_context *ctx)
71a851b4
PZ
675{
676 if (ctx->parent_ctx) {
677 put_ctx(ctx->parent_ctx);
678 ctx->parent_ctx = NULL;
679 }
680}
681
6844c09d
ACM
682static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
683{
684 /*
685 * only top level events have the pid namespace they were created in
686 */
687 if (event->parent)
688 event = event->parent;
689
690 return task_tgid_nr_ns(p, event->ns);
691}
692
693static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
694{
695 /*
696 * only top level events have the pid namespace they were created in
697 */
698 if (event->parent)
699 event = event->parent;
700
701 return task_pid_nr_ns(p, event->ns);
702}
703
7f453c24 704/*
cdd6c482 705 * If we inherit events we want to return the parent event id
7f453c24
PZ
706 * to userspace.
707 */
cdd6c482 708static u64 primary_event_id(struct perf_event *event)
7f453c24 709{
cdd6c482 710 u64 id = event->id;
7f453c24 711
cdd6c482
IM
712 if (event->parent)
713 id = event->parent->id;
7f453c24
PZ
714
715 return id;
716}
717
25346b93 718/*
cdd6c482 719 * Get the perf_event_context for a task and lock it.
25346b93
PM
720 * This has to cope with with the fact that until it is locked,
721 * the context could get moved to another task.
722 */
cdd6c482 723static struct perf_event_context *
8dc85d54 724perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 725{
cdd6c482 726 struct perf_event_context *ctx;
25346b93
PM
727
728 rcu_read_lock();
9ed6060d 729retry:
8dc85d54 730 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
731 if (ctx) {
732 /*
733 * If this context is a clone of another, it might
734 * get swapped for another underneath us by
cdd6c482 735 * perf_event_task_sched_out, though the
25346b93
PM
736 * rcu_read_lock() protects us from any context
737 * getting freed. Lock the context and check if it
738 * got swapped before we could get the lock, and retry
739 * if so. If we locked the right context, then it
740 * can't get swapped on us any more.
741 */
e625cce1 742 raw_spin_lock_irqsave(&ctx->lock, *flags);
8dc85d54 743 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
e625cce1 744 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
25346b93
PM
745 goto retry;
746 }
b49a9e7e
PZ
747
748 if (!atomic_inc_not_zero(&ctx->refcount)) {
e625cce1 749 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
b49a9e7e
PZ
750 ctx = NULL;
751 }
25346b93
PM
752 }
753 rcu_read_unlock();
754 return ctx;
755}
756
757/*
758 * Get the context for a task and increment its pin_count so it
759 * can't get swapped to another task. This also increments its
760 * reference count so that the context can't get freed.
761 */
8dc85d54
PZ
762static struct perf_event_context *
763perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 764{
cdd6c482 765 struct perf_event_context *ctx;
25346b93
PM
766 unsigned long flags;
767
8dc85d54 768 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
769 if (ctx) {
770 ++ctx->pin_count;
e625cce1 771 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
772 }
773 return ctx;
774}
775
cdd6c482 776static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
777{
778 unsigned long flags;
779
e625cce1 780 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 781 --ctx->pin_count;
e625cce1 782 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
783}
784
f67218c3
PZ
785/*
786 * Update the record of the current time in a context.
787 */
788static void update_context_time(struct perf_event_context *ctx)
789{
790 u64 now = perf_clock();
791
792 ctx->time += now - ctx->timestamp;
793 ctx->timestamp = now;
794}
795
4158755d
SE
796static u64 perf_event_time(struct perf_event *event)
797{
798 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
799
800 if (is_cgroup_event(event))
801 return perf_cgroup_event_time(event);
802
4158755d
SE
803 return ctx ? ctx->time : 0;
804}
805
f67218c3
PZ
806/*
807 * Update the total_time_enabled and total_time_running fields for a event.
b7526f0c 808 * The caller of this function needs to hold the ctx->lock.
f67218c3
PZ
809 */
810static void update_event_times(struct perf_event *event)
811{
812 struct perf_event_context *ctx = event->ctx;
813 u64 run_end;
814
815 if (event->state < PERF_EVENT_STATE_INACTIVE ||
816 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
817 return;
e5d1367f
SE
818 /*
819 * in cgroup mode, time_enabled represents
820 * the time the event was enabled AND active
821 * tasks were in the monitored cgroup. This is
822 * independent of the activity of the context as
823 * there may be a mix of cgroup and non-cgroup events.
824 *
825 * That is why we treat cgroup events differently
826 * here.
827 */
828 if (is_cgroup_event(event))
46cd6a7f 829 run_end = perf_cgroup_event_time(event);
e5d1367f
SE
830 else if (ctx->is_active)
831 run_end = ctx->time;
acd1d7c1
PZ
832 else
833 run_end = event->tstamp_stopped;
834
835 event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3
PZ
836
837 if (event->state == PERF_EVENT_STATE_INACTIVE)
838 run_end = event->tstamp_stopped;
839 else
4158755d 840 run_end = perf_event_time(event);
f67218c3
PZ
841
842 event->total_time_running = run_end - event->tstamp_running;
e5d1367f 843
f67218c3
PZ
844}
845
96c21a46
PZ
846/*
847 * Update total_time_enabled and total_time_running for all events in a group.
848 */
849static void update_group_times(struct perf_event *leader)
850{
851 struct perf_event *event;
852
853 update_event_times(leader);
854 list_for_each_entry(event, &leader->sibling_list, group_entry)
855 update_event_times(event);
856}
857
889ff015
FW
858static struct list_head *
859ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
860{
861 if (event->attr.pinned)
862 return &ctx->pinned_groups;
863 else
864 return &ctx->flexible_groups;
865}
866
fccc714b 867/*
cdd6c482 868 * Add a event from the lists for its context.
fccc714b
PZ
869 * Must be called with ctx->mutex and ctx->lock held.
870 */
04289bb9 871static void
cdd6c482 872list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 873{
8a49542c
PZ
874 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
875 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9
IM
876
877 /*
8a49542c
PZ
878 * If we're a stand alone event or group leader, we go to the context
879 * list, group events are kept attached to the group so that
880 * perf_group_detach can, at all times, locate all siblings.
04289bb9 881 */
8a49542c 882 if (event->group_leader == event) {
889ff015
FW
883 struct list_head *list;
884
d6f962b5
FW
885 if (is_software_event(event))
886 event->group_flags |= PERF_GROUP_SOFTWARE;
887
889ff015
FW
888 list = ctx_group_list(event, ctx);
889 list_add_tail(&event->group_entry, list);
5c148194 890 }
592903cd 891
08309379 892 if (is_cgroup_event(event))
e5d1367f 893 ctx->nr_cgroups++;
e5d1367f 894
d010b332
SE
895 if (has_branch_stack(event))
896 ctx->nr_branch_stack++;
897
cdd6c482 898 list_add_rcu(&event->event_entry, &ctx->event_list);
b5ab4cd5 899 if (!ctx->nr_events)
108b02cf 900 perf_pmu_rotate_start(ctx->pmu);
cdd6c482
IM
901 ctx->nr_events++;
902 if (event->attr.inherit_stat)
bfbd3381 903 ctx->nr_stat++;
04289bb9
IM
904}
905
c320c7b7
ACM
906/*
907 * Called at perf_event creation and when events are attached/detached from a
908 * group.
909 */
910static void perf_event__read_size(struct perf_event *event)
911{
912 int entry = sizeof(u64); /* value */
913 int size = 0;
914 int nr = 1;
915
916 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
917 size += sizeof(u64);
918
919 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
920 size += sizeof(u64);
921
922 if (event->attr.read_format & PERF_FORMAT_ID)
923 entry += sizeof(u64);
924
925 if (event->attr.read_format & PERF_FORMAT_GROUP) {
926 nr += event->group_leader->nr_siblings;
927 size += sizeof(u64);
928 }
929
930 size += entry * nr;
931 event->read_size = size;
932}
933
934static void perf_event__header_size(struct perf_event *event)
935{
936 struct perf_sample_data *data;
937 u64 sample_type = event->attr.sample_type;
938 u16 size = 0;
939
940 perf_event__read_size(event);
941
942 if (sample_type & PERF_SAMPLE_IP)
943 size += sizeof(data->ip);
944
6844c09d
ACM
945 if (sample_type & PERF_SAMPLE_ADDR)
946 size += sizeof(data->addr);
947
948 if (sample_type & PERF_SAMPLE_PERIOD)
949 size += sizeof(data->period);
950
951 if (sample_type & PERF_SAMPLE_READ)
952 size += event->read_size;
953
954 event->header_size = size;
955}
956
957static void perf_event__id_header_size(struct perf_event *event)
958{
959 struct perf_sample_data *data;
960 u64 sample_type = event->attr.sample_type;
961 u16 size = 0;
962
c320c7b7
ACM
963 if (sample_type & PERF_SAMPLE_TID)
964 size += sizeof(data->tid_entry);
965
966 if (sample_type & PERF_SAMPLE_TIME)
967 size += sizeof(data->time);
968
c320c7b7
ACM
969 if (sample_type & PERF_SAMPLE_ID)
970 size += sizeof(data->id);
971
972 if (sample_type & PERF_SAMPLE_STREAM_ID)
973 size += sizeof(data->stream_id);
974
975 if (sample_type & PERF_SAMPLE_CPU)
976 size += sizeof(data->cpu_entry);
977
6844c09d 978 event->id_header_size = size;
c320c7b7
ACM
979}
980
8a49542c
PZ
981static void perf_group_attach(struct perf_event *event)
982{
c320c7b7 983 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 984
74c3337c
PZ
985 /*
986 * We can have double attach due to group movement in perf_event_open.
987 */
988 if (event->attach_state & PERF_ATTACH_GROUP)
989 return;
990
8a49542c
PZ
991 event->attach_state |= PERF_ATTACH_GROUP;
992
993 if (group_leader == event)
994 return;
995
996 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
997 !is_software_event(event))
998 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
999
1000 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1001 group_leader->nr_siblings++;
c320c7b7
ACM
1002
1003 perf_event__header_size(group_leader);
1004
1005 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1006 perf_event__header_size(pos);
8a49542c
PZ
1007}
1008
a63eaf34 1009/*
cdd6c482 1010 * Remove a event from the lists for its context.
fccc714b 1011 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1012 */
04289bb9 1013static void
cdd6c482 1014list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1015{
68cacd29 1016 struct perf_cpu_context *cpuctx;
8a49542c
PZ
1017 /*
1018 * We can have double detach due to exit/hot-unplug + close.
1019 */
1020 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1021 return;
8a49542c
PZ
1022
1023 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1024
68cacd29 1025 if (is_cgroup_event(event)) {
e5d1367f 1026 ctx->nr_cgroups--;
68cacd29
SE
1027 cpuctx = __get_cpu_context(ctx);
1028 /*
1029 * if there are no more cgroup events
1030 * then cler cgrp to avoid stale pointer
1031 * in update_cgrp_time_from_cpuctx()
1032 */
1033 if (!ctx->nr_cgroups)
1034 cpuctx->cgrp = NULL;
1035 }
e5d1367f 1036
d010b332
SE
1037 if (has_branch_stack(event))
1038 ctx->nr_branch_stack--;
1039
cdd6c482
IM
1040 ctx->nr_events--;
1041 if (event->attr.inherit_stat)
bfbd3381 1042 ctx->nr_stat--;
8bc20959 1043
cdd6c482 1044 list_del_rcu(&event->event_entry);
04289bb9 1045
8a49542c
PZ
1046 if (event->group_leader == event)
1047 list_del_init(&event->group_entry);
5c148194 1048
96c21a46 1049 update_group_times(event);
b2e74a26
SE
1050
1051 /*
1052 * If event was in error state, then keep it
1053 * that way, otherwise bogus counts will be
1054 * returned on read(). The only way to get out
1055 * of error state is by explicit re-enabling
1056 * of the event
1057 */
1058 if (event->state > PERF_EVENT_STATE_OFF)
1059 event->state = PERF_EVENT_STATE_OFF;
050735b0
PZ
1060}
1061
8a49542c 1062static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1063{
1064 struct perf_event *sibling, *tmp;
8a49542c
PZ
1065 struct list_head *list = NULL;
1066
1067 /*
1068 * We can have double detach due to exit/hot-unplug + close.
1069 */
1070 if (!(event->attach_state & PERF_ATTACH_GROUP))
1071 return;
1072
1073 event->attach_state &= ~PERF_ATTACH_GROUP;
1074
1075 /*
1076 * If this is a sibling, remove it from its group.
1077 */
1078 if (event->group_leader != event) {
1079 list_del_init(&event->group_entry);
1080 event->group_leader->nr_siblings--;
c320c7b7 1081 goto out;
8a49542c
PZ
1082 }
1083
1084 if (!list_empty(&event->group_entry))
1085 list = &event->group_entry;
2e2af50b 1086
04289bb9 1087 /*
cdd6c482
IM
1088 * If this was a group event with sibling events then
1089 * upgrade the siblings to singleton events by adding them
8a49542c 1090 * to whatever list we are on.
04289bb9 1091 */
cdd6c482 1092 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c
PZ
1093 if (list)
1094 list_move_tail(&sibling->group_entry, list);
04289bb9 1095 sibling->group_leader = sibling;
d6f962b5
FW
1096
1097 /* Inherit group flags from the previous leader */
1098 sibling->group_flags = event->group_flags;
04289bb9 1099 }
c320c7b7
ACM
1100
1101out:
1102 perf_event__header_size(event->group_leader);
1103
1104 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1105 perf_event__header_size(tmp);
04289bb9
IM
1106}
1107
fa66f07a
SE
1108static inline int
1109event_filter_match(struct perf_event *event)
1110{
e5d1367f
SE
1111 return (event->cpu == -1 || event->cpu == smp_processor_id())
1112 && perf_cgroup_match(event);
fa66f07a
SE
1113}
1114
9ffcfa6f
SE
1115static void
1116event_sched_out(struct perf_event *event,
3b6f9e5c 1117 struct perf_cpu_context *cpuctx,
cdd6c482 1118 struct perf_event_context *ctx)
3b6f9e5c 1119{
4158755d 1120 u64 tstamp = perf_event_time(event);
fa66f07a
SE
1121 u64 delta;
1122 /*
1123 * An event which could not be activated because of
1124 * filter mismatch still needs to have its timings
1125 * maintained, otherwise bogus information is return
1126 * via read() for time_enabled, time_running:
1127 */
1128 if (event->state == PERF_EVENT_STATE_INACTIVE
1129 && !event_filter_match(event)) {
e5d1367f 1130 delta = tstamp - event->tstamp_stopped;
fa66f07a 1131 event->tstamp_running += delta;
4158755d 1132 event->tstamp_stopped = tstamp;
fa66f07a
SE
1133 }
1134
cdd6c482 1135 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1136 return;
3b6f9e5c 1137
cdd6c482
IM
1138 event->state = PERF_EVENT_STATE_INACTIVE;
1139 if (event->pending_disable) {
1140 event->pending_disable = 0;
1141 event->state = PERF_EVENT_STATE_OFF;
970892a9 1142 }
4158755d 1143 event->tstamp_stopped = tstamp;
a4eaf7f1 1144 event->pmu->del(event, 0);
cdd6c482 1145 event->oncpu = -1;
3b6f9e5c 1146
cdd6c482 1147 if (!is_software_event(event))
3b6f9e5c
PM
1148 cpuctx->active_oncpu--;
1149 ctx->nr_active--;
0f5a2601
PZ
1150 if (event->attr.freq && event->attr.sample_freq)
1151 ctx->nr_freq--;
cdd6c482 1152 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c
PM
1153 cpuctx->exclusive = 0;
1154}
1155
d859e29f 1156static void
cdd6c482 1157group_sched_out(struct perf_event *group_event,
d859e29f 1158 struct perf_cpu_context *cpuctx,
cdd6c482 1159 struct perf_event_context *ctx)
d859e29f 1160{
cdd6c482 1161 struct perf_event *event;
fa66f07a 1162 int state = group_event->state;
d859e29f 1163
cdd6c482 1164 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
1165
1166 /*
1167 * Schedule out siblings (if any):
1168 */
cdd6c482
IM
1169 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1170 event_sched_out(event, cpuctx, ctx);
d859e29f 1171
fa66f07a 1172 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29f
PM
1173 cpuctx->exclusive = 0;
1174}
1175
0793a61d 1176/*
cdd6c482 1177 * Cross CPU call to remove a performance event
0793a61d 1178 *
cdd6c482 1179 * We disable the event on the hardware level first. After that we
0793a61d
TG
1180 * remove it from the context list.
1181 */
fe4b04fa 1182static int __perf_remove_from_context(void *info)
0793a61d 1183{
cdd6c482
IM
1184 struct perf_event *event = info;
1185 struct perf_event_context *ctx = event->ctx;
108b02cf 1186 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
0793a61d 1187
e625cce1 1188 raw_spin_lock(&ctx->lock);
cdd6c482 1189 event_sched_out(event, cpuctx, ctx);
cdd6c482 1190 list_del_event(event, ctx);
64ce3126
PZ
1191 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1192 ctx->is_active = 0;
1193 cpuctx->task_ctx = NULL;
1194 }
e625cce1 1195 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1196
1197 return 0;
0793a61d
TG
1198}
1199
1200
1201/*
cdd6c482 1202 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 1203 *
cdd6c482 1204 * CPU events are removed with a smp call. For task events we only
0793a61d 1205 * call when the task is on a CPU.
c93f7669 1206 *
cdd6c482
IM
1207 * If event->ctx is a cloned context, callers must make sure that
1208 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
1209 * remains valid. This is OK when called from perf_release since
1210 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 1211 * When called from perf_event_exit_task, it's OK because the
c93f7669 1212 * context has been detached from its task.
0793a61d 1213 */
fe4b04fa 1214static void perf_remove_from_context(struct perf_event *event)
0793a61d 1215{
cdd6c482 1216 struct perf_event_context *ctx = event->ctx;
0793a61d
TG
1217 struct task_struct *task = ctx->task;
1218
fe4b04fa
PZ
1219 lockdep_assert_held(&ctx->mutex);
1220
0793a61d
TG
1221 if (!task) {
1222 /*
cdd6c482 1223 * Per cpu events are removed via an smp call and
af901ca1 1224 * the removal is always successful.
0793a61d 1225 */
fe4b04fa 1226 cpu_function_call(event->cpu, __perf_remove_from_context, event);
0793a61d
TG
1227 return;
1228 }
1229
1230retry:
fe4b04fa
PZ
1231 if (!task_function_call(task, __perf_remove_from_context, event))
1232 return;
0793a61d 1233
e625cce1 1234 raw_spin_lock_irq(&ctx->lock);
0793a61d 1235 /*
fe4b04fa
PZ
1236 * If we failed to find a running task, but find the context active now
1237 * that we've acquired the ctx->lock, retry.
0793a61d 1238 */
fe4b04fa 1239 if (ctx->is_active) {
e625cce1 1240 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1241 goto retry;
1242 }
1243
1244 /*
fe4b04fa
PZ
1245 * Since the task isn't running, its safe to remove the event, us
1246 * holding the ctx->lock ensures the task won't get scheduled in.
0793a61d 1247 */
fe4b04fa 1248 list_del_event(event, ctx);
e625cce1 1249 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1250}
1251
d859e29f 1252/*
cdd6c482 1253 * Cross CPU call to disable a performance event
d859e29f 1254 */
500ad2d8 1255int __perf_event_disable(void *info)
d859e29f 1256{
cdd6c482 1257 struct perf_event *event = info;
cdd6c482 1258 struct perf_event_context *ctx = event->ctx;
108b02cf 1259 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f
PM
1260
1261 /*
cdd6c482
IM
1262 * If this is a per-task event, need to check whether this
1263 * event's task is the current task on this cpu.
fe4b04fa
PZ
1264 *
1265 * Can trigger due to concurrent perf_event_context_sched_out()
1266 * flipping contexts around.
d859e29f 1267 */
665c2142 1268 if (ctx->task && cpuctx->task_ctx != ctx)
fe4b04fa 1269 return -EINVAL;
d859e29f 1270
e625cce1 1271 raw_spin_lock(&ctx->lock);
d859e29f
PM
1272
1273 /*
cdd6c482 1274 * If the event is on, turn it off.
d859e29f
PM
1275 * If it is in error state, leave it in error state.
1276 */
cdd6c482 1277 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
4af4998b 1278 update_context_time(ctx);
e5d1367f 1279 update_cgrp_time_from_event(event);
cdd6c482
IM
1280 update_group_times(event);
1281 if (event == event->group_leader)
1282 group_sched_out(event, cpuctx, ctx);
d859e29f 1283 else
cdd6c482
IM
1284 event_sched_out(event, cpuctx, ctx);
1285 event->state = PERF_EVENT_STATE_OFF;
d859e29f
PM
1286 }
1287
e625cce1 1288 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1289
1290 return 0;
d859e29f
PM
1291}
1292
1293/*
cdd6c482 1294 * Disable a event.
c93f7669 1295 *
cdd6c482
IM
1296 * If event->ctx is a cloned context, callers must make sure that
1297 * every task struct that event->ctx->task could possibly point to
c93f7669 1298 * remains valid. This condition is satisifed when called through
cdd6c482
IM
1299 * perf_event_for_each_child or perf_event_for_each because they
1300 * hold the top-level event's child_mutex, so any descendant that
1301 * goes to exit will block in sync_child_event.
1302 * When called from perf_pending_event it's OK because event->ctx
c93f7669 1303 * is the current context on this CPU and preemption is disabled,
cdd6c482 1304 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 1305 */
44234adc 1306void perf_event_disable(struct perf_event *event)
d859e29f 1307{
cdd6c482 1308 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
1309 struct task_struct *task = ctx->task;
1310
1311 if (!task) {
1312 /*
cdd6c482 1313 * Disable the event on the cpu that it's on
d859e29f 1314 */
fe4b04fa 1315 cpu_function_call(event->cpu, __perf_event_disable, event);
d859e29f
PM
1316 return;
1317 }
1318
9ed6060d 1319retry:
fe4b04fa
PZ
1320 if (!task_function_call(task, __perf_event_disable, event))
1321 return;
d859e29f 1322
e625cce1 1323 raw_spin_lock_irq(&ctx->lock);
d859e29f 1324 /*
cdd6c482 1325 * If the event is still active, we need to retry the cross-call.
d859e29f 1326 */
cdd6c482 1327 if (event->state == PERF_EVENT_STATE_ACTIVE) {
e625cce1 1328 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
1329 /*
1330 * Reload the task pointer, it might have been changed by
1331 * a concurrent perf_event_context_sched_out().
1332 */
1333 task = ctx->task;
d859e29f
PM
1334 goto retry;
1335 }
1336
1337 /*
1338 * Since we have the lock this context can't be scheduled
1339 * in, so we can change the state safely.
1340 */
cdd6c482
IM
1341 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1342 update_group_times(event);
1343 event->state = PERF_EVENT_STATE_OFF;
53cfbf59 1344 }
e625cce1 1345 raw_spin_unlock_irq(&ctx->lock);
d859e29f 1346}
dcfce4a0 1347EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 1348
e5d1367f
SE
1349static void perf_set_shadow_time(struct perf_event *event,
1350 struct perf_event_context *ctx,
1351 u64 tstamp)
1352{
1353 /*
1354 * use the correct time source for the time snapshot
1355 *
1356 * We could get by without this by leveraging the
1357 * fact that to get to this function, the caller
1358 * has most likely already called update_context_time()
1359 * and update_cgrp_time_xx() and thus both timestamp
1360 * are identical (or very close). Given that tstamp is,
1361 * already adjusted for cgroup, we could say that:
1362 * tstamp - ctx->timestamp
1363 * is equivalent to
1364 * tstamp - cgrp->timestamp.
1365 *
1366 * Then, in perf_output_read(), the calculation would
1367 * work with no changes because:
1368 * - event is guaranteed scheduled in
1369 * - no scheduled out in between
1370 * - thus the timestamp would be the same
1371 *
1372 * But this is a bit hairy.
1373 *
1374 * So instead, we have an explicit cgroup call to remain
1375 * within the time time source all along. We believe it
1376 * is cleaner and simpler to understand.
1377 */
1378 if (is_cgroup_event(event))
1379 perf_cgroup_set_shadow_time(event, tstamp);
1380 else
1381 event->shadow_ctx_time = tstamp - ctx->timestamp;
1382}
1383
4fe757dd
PZ
1384#define MAX_INTERRUPTS (~0ULL)
1385
1386static void perf_log_throttle(struct perf_event *event, int enable);
1387
235c7fc7 1388static int
9ffcfa6f 1389event_sched_in(struct perf_event *event,
235c7fc7 1390 struct perf_cpu_context *cpuctx,
6e37738a 1391 struct perf_event_context *ctx)
235c7fc7 1392{
4158755d
SE
1393 u64 tstamp = perf_event_time(event);
1394
cdd6c482 1395 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
1396 return 0;
1397
cdd6c482 1398 event->state = PERF_EVENT_STATE_ACTIVE;
6e37738a 1399 event->oncpu = smp_processor_id();
4fe757dd
PZ
1400
1401 /*
1402 * Unthrottle events, since we scheduled we might have missed several
1403 * ticks already, also for a heavily scheduling task there is little
1404 * guarantee it'll get a tick in a timely manner.
1405 */
1406 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1407 perf_log_throttle(event, 1);
1408 event->hw.interrupts = 0;
1409 }
1410
235c7fc7
IM
1411 /*
1412 * The new state must be visible before we turn it on in the hardware:
1413 */
1414 smp_wmb();
1415
a4eaf7f1 1416 if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482
IM
1417 event->state = PERF_EVENT_STATE_INACTIVE;
1418 event->oncpu = -1;
235c7fc7
IM
1419 return -EAGAIN;
1420 }
1421
4158755d 1422 event->tstamp_running += tstamp - event->tstamp_stopped;
9ffcfa6f 1423
e5d1367f 1424 perf_set_shadow_time(event, ctx, tstamp);
eed01528 1425
cdd6c482 1426 if (!is_software_event(event))
3b6f9e5c 1427 cpuctx->active_oncpu++;
235c7fc7 1428 ctx->nr_active++;
0f5a2601
PZ
1429 if (event->attr.freq && event->attr.sample_freq)
1430 ctx->nr_freq++;
235c7fc7 1431
cdd6c482 1432 if (event->attr.exclusive)
3b6f9e5c
PM
1433 cpuctx->exclusive = 1;
1434
235c7fc7
IM
1435 return 0;
1436}
1437
6751b71e 1438static int
cdd6c482 1439group_sched_in(struct perf_event *group_event,
6751b71e 1440 struct perf_cpu_context *cpuctx,
6e37738a 1441 struct perf_event_context *ctx)
6751b71e 1442{
6bde9b6c 1443 struct perf_event *event, *partial_group = NULL;
51b0fe39 1444 struct pmu *pmu = group_event->pmu;
d7842da4
SE
1445 u64 now = ctx->time;
1446 bool simulate = false;
6751b71e 1447
cdd6c482 1448 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
1449 return 0;
1450
ad5133b7 1451 pmu->start_txn(pmu);
6bde9b6c 1452
9ffcfa6f 1453 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 1454 pmu->cancel_txn(pmu);
6751b71e 1455 return -EAGAIN;
90151c35 1456 }
6751b71e
PM
1457
1458 /*
1459 * Schedule in siblings as one group (if any):
1460 */
cdd6c482 1461 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f 1462 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 1463 partial_group = event;
6751b71e
PM
1464 goto group_error;
1465 }
1466 }
1467
9ffcfa6f 1468 if (!pmu->commit_txn(pmu))
6e85158c 1469 return 0;
9ffcfa6f 1470
6751b71e
PM
1471group_error:
1472 /*
1473 * Groups can be scheduled in as one unit only, so undo any
1474 * partial group before returning:
d7842da4
SE
1475 * The events up to the failed event are scheduled out normally,
1476 * tstamp_stopped will be updated.
1477 *
1478 * The failed events and the remaining siblings need to have
1479 * their timings updated as if they had gone thru event_sched_in()
1480 * and event_sched_out(). This is required to get consistent timings
1481 * across the group. This also takes care of the case where the group
1482 * could never be scheduled by ensuring tstamp_stopped is set to mark
1483 * the time the event was actually stopped, such that time delta
1484 * calculation in update_event_times() is correct.
6751b71e 1485 */
cdd6c482
IM
1486 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1487 if (event == partial_group)
d7842da4
SE
1488 simulate = true;
1489
1490 if (simulate) {
1491 event->tstamp_running += now - event->tstamp_stopped;
1492 event->tstamp_stopped = now;
1493 } else {
1494 event_sched_out(event, cpuctx, ctx);
1495 }
6751b71e 1496 }
9ffcfa6f 1497 event_sched_out(group_event, cpuctx, ctx);
6751b71e 1498
ad5133b7 1499 pmu->cancel_txn(pmu);
90151c35 1500
6751b71e
PM
1501 return -EAGAIN;
1502}
1503
3b6f9e5c 1504/*
cdd6c482 1505 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 1506 */
cdd6c482 1507static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
1508 struct perf_cpu_context *cpuctx,
1509 int can_add_hw)
1510{
1511 /*
cdd6c482 1512 * Groups consisting entirely of software events can always go on.
3b6f9e5c 1513 */
d6f962b5 1514 if (event->group_flags & PERF_GROUP_SOFTWARE)
3b6f9e5c
PM
1515 return 1;
1516 /*
1517 * If an exclusive group is already on, no other hardware
cdd6c482 1518 * events can go on.
3b6f9e5c
PM
1519 */
1520 if (cpuctx->exclusive)
1521 return 0;
1522 /*
1523 * If this group is exclusive and there are already
cdd6c482 1524 * events on the CPU, it can't go on.
3b6f9e5c 1525 */
cdd6c482 1526 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
1527 return 0;
1528 /*
1529 * Otherwise, try to add it if all previous groups were able
1530 * to go on.
1531 */
1532 return can_add_hw;
1533}
1534
cdd6c482
IM
1535static void add_event_to_ctx(struct perf_event *event,
1536 struct perf_event_context *ctx)
53cfbf59 1537{
4158755d
SE
1538 u64 tstamp = perf_event_time(event);
1539
cdd6c482 1540 list_add_event(event, ctx);
8a49542c 1541 perf_group_attach(event);
4158755d
SE
1542 event->tstamp_enabled = tstamp;
1543 event->tstamp_running = tstamp;
1544 event->tstamp_stopped = tstamp;
53cfbf59
PM
1545}
1546
2c29ef0f
PZ
1547static void task_ctx_sched_out(struct perf_event_context *ctx);
1548static void
1549ctx_sched_in(struct perf_event_context *ctx,
1550 struct perf_cpu_context *cpuctx,
1551 enum event_type_t event_type,
1552 struct task_struct *task);
fe4b04fa 1553
dce5855b
PZ
1554static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1555 struct perf_event_context *ctx,
1556 struct task_struct *task)
1557{
1558 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1559 if (ctx)
1560 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1561 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1562 if (ctx)
1563 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1564}
1565
0793a61d 1566/*
cdd6c482 1567 * Cross CPU call to install and enable a performance event
682076ae
PZ
1568 *
1569 * Must be called with ctx->mutex held
0793a61d 1570 */
fe4b04fa 1571static int __perf_install_in_context(void *info)
0793a61d 1572{
cdd6c482
IM
1573 struct perf_event *event = info;
1574 struct perf_event_context *ctx = event->ctx;
108b02cf 1575 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f
PZ
1576 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1577 struct task_struct *task = current;
1578
b58f6b0d 1579 perf_ctx_lock(cpuctx, task_ctx);
2c29ef0f 1580 perf_pmu_disable(cpuctx->ctx.pmu);
0793a61d
TG
1581
1582 /*
2c29ef0f 1583 * If there was an active task_ctx schedule it out.
0793a61d 1584 */
b58f6b0d 1585 if (task_ctx)
2c29ef0f 1586 task_ctx_sched_out(task_ctx);
b58f6b0d
PZ
1587
1588 /*
1589 * If the context we're installing events in is not the
1590 * active task_ctx, flip them.
1591 */
1592 if (ctx->task && task_ctx != ctx) {
1593 if (task_ctx)
1594 raw_spin_unlock(&task_ctx->lock);
1595 raw_spin_lock(&ctx->lock);
1596 task_ctx = ctx;
1597 }
1598
1599 if (task_ctx) {
1600 cpuctx->task_ctx = task_ctx;
2c29ef0f
PZ
1601 task = task_ctx->task;
1602 }
b58f6b0d 1603
2c29ef0f 1604 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
0793a61d 1605
4af4998b 1606 update_context_time(ctx);
e5d1367f
SE
1607 /*
1608 * update cgrp time only if current cgrp
1609 * matches event->cgrp. Must be done before
1610 * calling add_event_to_ctx()
1611 */
1612 update_cgrp_time_from_event(event);
0793a61d 1613
cdd6c482 1614 add_event_to_ctx(event, ctx);
0793a61d 1615
d859e29f 1616 /*
2c29ef0f 1617 * Schedule everything back in
d859e29f 1618 */
dce5855b 1619 perf_event_sched_in(cpuctx, task_ctx, task);
2c29ef0f
PZ
1620
1621 perf_pmu_enable(cpuctx->ctx.pmu);
1622 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa
PZ
1623
1624 return 0;
0793a61d
TG
1625}
1626
1627/*
cdd6c482 1628 * Attach a performance event to a context
0793a61d 1629 *
cdd6c482
IM
1630 * First we add the event to the list with the hardware enable bit
1631 * in event->hw_config cleared.
0793a61d 1632 *
cdd6c482 1633 * If the event is attached to a task which is on a CPU we use a smp
0793a61d
TG
1634 * call to enable it in the task context. The task might have been
1635 * scheduled away, but we check this in the smp call again.
1636 */
1637static void
cdd6c482
IM
1638perf_install_in_context(struct perf_event_context *ctx,
1639 struct perf_event *event,
0793a61d
TG
1640 int cpu)
1641{
1642 struct task_struct *task = ctx->task;
1643
fe4b04fa
PZ
1644 lockdep_assert_held(&ctx->mutex);
1645
c3f00c70 1646 event->ctx = ctx;
0cda4c02
YZ
1647 if (event->cpu != -1)
1648 event->cpu = cpu;
c3f00c70 1649
0793a61d
TG
1650 if (!task) {
1651 /*
cdd6c482 1652 * Per cpu events are installed via an smp call and
af901ca1 1653 * the install is always successful.
0793a61d 1654 */
fe4b04fa 1655 cpu_function_call(cpu, __perf_install_in_context, event);
0793a61d
TG
1656 return;
1657 }
1658
0793a61d 1659retry:
fe4b04fa
PZ
1660 if (!task_function_call(task, __perf_install_in_context, event))
1661 return;
0793a61d 1662
e625cce1 1663 raw_spin_lock_irq(&ctx->lock);
0793a61d 1664 /*
fe4b04fa
PZ
1665 * If we failed to find a running task, but find the context active now
1666 * that we've acquired the ctx->lock, retry.
0793a61d 1667 */
fe4b04fa 1668 if (ctx->is_active) {
e625cce1 1669 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1670 goto retry;
1671 }
1672
1673 /*
fe4b04fa
PZ
1674 * Since the task isn't running, its safe to add the event, us holding
1675 * the ctx->lock ensures the task won't get scheduled in.
0793a61d 1676 */
fe4b04fa 1677 add_event_to_ctx(event, ctx);
e625cce1 1678 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1679}
1680
fa289bec 1681/*
cdd6c482 1682 * Put a event into inactive state and update time fields.
fa289bec
PM
1683 * Enabling the leader of a group effectively enables all
1684 * the group members that aren't explicitly disabled, so we
1685 * have to update their ->tstamp_enabled also.
1686 * Note: this works for group members as well as group leaders
1687 * since the non-leader members' sibling_lists will be empty.
1688 */
1d9b482e 1689static void __perf_event_mark_enabled(struct perf_event *event)
fa289bec 1690{
cdd6c482 1691 struct perf_event *sub;
4158755d 1692 u64 tstamp = perf_event_time(event);
fa289bec 1693
cdd6c482 1694 event->state = PERF_EVENT_STATE_INACTIVE;
4158755d 1695 event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d 1696 list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d
SE
1697 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1698 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d 1699 }
fa289bec
PM
1700}
1701
d859e29f 1702/*
cdd6c482 1703 * Cross CPU call to enable a performance event
d859e29f 1704 */
fe4b04fa 1705static int __perf_event_enable(void *info)
04289bb9 1706{
cdd6c482 1707 struct perf_event *event = info;
cdd6c482
IM
1708 struct perf_event_context *ctx = event->ctx;
1709 struct perf_event *leader = event->group_leader;
108b02cf 1710 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f 1711 int err;
04289bb9 1712
fe4b04fa
PZ
1713 if (WARN_ON_ONCE(!ctx->is_active))
1714 return -EINVAL;
3cbed429 1715
e625cce1 1716 raw_spin_lock(&ctx->lock);
4af4998b 1717 update_context_time(ctx);
d859e29f 1718
cdd6c482 1719 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f 1720 goto unlock;
e5d1367f
SE
1721
1722 /*
1723 * set current task's cgroup time reference point
1724 */
3f7cce3c 1725 perf_cgroup_set_timestamp(current, ctx);
e5d1367f 1726
1d9b482e 1727 __perf_event_mark_enabled(event);
04289bb9 1728
e5d1367f
SE
1729 if (!event_filter_match(event)) {
1730 if (is_cgroup_event(event))
1731 perf_cgroup_defer_enabled(event);
f4c4176f 1732 goto unlock;
e5d1367f 1733 }
f4c4176f 1734
04289bb9 1735 /*
cdd6c482 1736 * If the event is in a group and isn't the group leader,
d859e29f 1737 * then don't put it on unless the group is on.
04289bb9 1738 */
cdd6c482 1739 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
d859e29f 1740 goto unlock;
3b6f9e5c 1741
cdd6c482 1742 if (!group_can_go_on(event, cpuctx, 1)) {
d859e29f 1743 err = -EEXIST;
e758a33d 1744 } else {
cdd6c482 1745 if (event == leader)
6e37738a 1746 err = group_sched_in(event, cpuctx, ctx);
e758a33d 1747 else
6e37738a 1748 err = event_sched_in(event, cpuctx, ctx);
e758a33d 1749 }
d859e29f
PM
1750
1751 if (err) {
1752 /*
cdd6c482 1753 * If this event can't go on and it's part of a
d859e29f
PM
1754 * group, then the whole group has to come off.
1755 */
cdd6c482 1756 if (leader != event)
d859e29f 1757 group_sched_out(leader, cpuctx, ctx);
0d48696f 1758 if (leader->attr.pinned) {
53cfbf59 1759 update_group_times(leader);
cdd6c482 1760 leader->state = PERF_EVENT_STATE_ERROR;
53cfbf59 1761 }
d859e29f
PM
1762 }
1763
9ed6060d 1764unlock:
e625cce1 1765 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1766
1767 return 0;
d859e29f
PM
1768}
1769
1770/*
cdd6c482 1771 * Enable a event.
c93f7669 1772 *
cdd6c482
IM
1773 * If event->ctx is a cloned context, callers must make sure that
1774 * every task struct that event->ctx->task could possibly point to
c93f7669 1775 * remains valid. This condition is satisfied when called through
cdd6c482
IM
1776 * perf_event_for_each_child or perf_event_for_each as described
1777 * for perf_event_disable.
d859e29f 1778 */
44234adc 1779void perf_event_enable(struct perf_event *event)
d859e29f 1780{
cdd6c482 1781 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
1782 struct task_struct *task = ctx->task;
1783
1784 if (!task) {
1785 /*
cdd6c482 1786 * Enable the event on the cpu that it's on
d859e29f 1787 */
fe4b04fa 1788 cpu_function_call(event->cpu, __perf_event_enable, event);
d859e29f
PM
1789 return;
1790 }
1791
e625cce1 1792 raw_spin_lock_irq(&ctx->lock);
cdd6c482 1793 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f
PM
1794 goto out;
1795
1796 /*
cdd6c482
IM
1797 * If the event is in error state, clear that first.
1798 * That way, if we see the event in error state below, we
d859e29f
PM
1799 * know that it has gone back into error state, as distinct
1800 * from the task having been scheduled away before the
1801 * cross-call arrived.
1802 */
cdd6c482
IM
1803 if (event->state == PERF_EVENT_STATE_ERROR)
1804 event->state = PERF_EVENT_STATE_OFF;
d859e29f 1805
9ed6060d 1806retry:
fe4b04fa 1807 if (!ctx->is_active) {
1d9b482e 1808 __perf_event_mark_enabled(event);
fe4b04fa
PZ
1809 goto out;
1810 }
1811
e625cce1 1812 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
1813
1814 if (!task_function_call(task, __perf_event_enable, event))
1815 return;
d859e29f 1816
e625cce1 1817 raw_spin_lock_irq(&ctx->lock);
d859e29f
PM
1818
1819 /*
cdd6c482 1820 * If the context is active and the event is still off,
d859e29f
PM
1821 * we need to retry the cross-call.
1822 */
fe4b04fa
PZ
1823 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
1824 /*
1825 * task could have been flipped by a concurrent
1826 * perf_event_context_sched_out()
1827 */
1828 task = ctx->task;
d859e29f 1829 goto retry;
fe4b04fa 1830 }
fa289bec 1831
9ed6060d 1832out:
e625cce1 1833 raw_spin_unlock_irq(&ctx->lock);
d859e29f 1834}
dcfce4a0 1835EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 1836
26ca5c11 1837int perf_event_refresh(struct perf_event *event, int refresh)
79f14641 1838{
2023b359 1839 /*
cdd6c482 1840 * not supported on inherited events
2023b359 1841 */
2e939d1d 1842 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
1843 return -EINVAL;
1844
cdd6c482
IM
1845 atomic_add(refresh, &event->event_limit);
1846 perf_event_enable(event);
2023b359
PZ
1847
1848 return 0;
79f14641 1849}
26ca5c11 1850EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 1851
5b0311e1
FW
1852static void ctx_sched_out(struct perf_event_context *ctx,
1853 struct perf_cpu_context *cpuctx,
1854 enum event_type_t event_type)
235c7fc7 1855{
cdd6c482 1856 struct perf_event *event;
db24d33e 1857 int is_active = ctx->is_active;
235c7fc7 1858
db24d33e 1859 ctx->is_active &= ~event_type;
cdd6c482 1860 if (likely(!ctx->nr_events))
facc4307
PZ
1861 return;
1862
4af4998b 1863 update_context_time(ctx);
e5d1367f 1864 update_cgrp_time_from_cpuctx(cpuctx);
5b0311e1 1865 if (!ctx->nr_active)
facc4307 1866 return;
5b0311e1 1867
075e0b00 1868 perf_pmu_disable(ctx->pmu);
db24d33e 1869 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
889ff015
FW
1870 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1871 group_sched_out(event, cpuctx, ctx);
9ed6060d 1872 }
889ff015 1873
db24d33e 1874 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
889ff015 1875 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e1 1876 group_sched_out(event, cpuctx, ctx);
9ed6060d 1877 }
1b9a644f 1878 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
1879}
1880
564c2b21
PM
1881/*
1882 * Test whether two contexts are equivalent, i.e. whether they
1883 * have both been cloned from the same version of the same context
cdd6c482
IM
1884 * and they both have the same number of enabled events.
1885 * If the number of enabled events is the same, then the set
1886 * of enabled events should be the same, because these are both
1887 * inherited contexts, therefore we can't access individual events
564c2b21 1888 * in them directly with an fd; we can only enable/disable all
cdd6c482 1889 * events via prctl, or enable/disable all events in a family
564c2b21
PM
1890 * via ioctl, which will have the same effect on both contexts.
1891 */
cdd6c482
IM
1892static int context_equiv(struct perf_event_context *ctx1,
1893 struct perf_event_context *ctx2)
564c2b21
PM
1894{
1895 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
ad3a37de 1896 && ctx1->parent_gen == ctx2->parent_gen
25346b93 1897 && !ctx1->pin_count && !ctx2->pin_count;
564c2b21
PM
1898}
1899
cdd6c482
IM
1900static void __perf_event_sync_stat(struct perf_event *event,
1901 struct perf_event *next_event)
bfbd3381
PZ
1902{
1903 u64 value;
1904
cdd6c482 1905 if (!event->attr.inherit_stat)
bfbd3381
PZ
1906 return;
1907
1908 /*
cdd6c482 1909 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
1910 * because we're in the middle of a context switch and have IRQs
1911 * disabled, which upsets smp_call_function_single(), however
cdd6c482 1912 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
1913 * don't need to use it.
1914 */
cdd6c482
IM
1915 switch (event->state) {
1916 case PERF_EVENT_STATE_ACTIVE:
3dbebf15
PZ
1917 event->pmu->read(event);
1918 /* fall-through */
bfbd3381 1919
cdd6c482
IM
1920 case PERF_EVENT_STATE_INACTIVE:
1921 update_event_times(event);
bfbd3381
PZ
1922 break;
1923
1924 default:
1925 break;
1926 }
1927
1928 /*
cdd6c482 1929 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
1930 * values when we flip the contexts.
1931 */
e7850595
PZ
1932 value = local64_read(&next_event->count);
1933 value = local64_xchg(&event->count, value);
1934 local64_set(&next_event->count, value);
bfbd3381 1935
cdd6c482
IM
1936 swap(event->total_time_enabled, next_event->total_time_enabled);
1937 swap(event->total_time_running, next_event->total_time_running);
19d2e755 1938
bfbd3381 1939 /*
19d2e755 1940 * Since we swizzled the values, update the user visible data too.
bfbd3381 1941 */
cdd6c482
IM
1942 perf_event_update_userpage(event);
1943 perf_event_update_userpage(next_event);
bfbd3381
PZ
1944}
1945
1946#define list_next_entry(pos, member) \
1947 list_entry(pos->member.next, typeof(*pos), member)
1948
cdd6c482
IM
1949static void perf_event_sync_stat(struct perf_event_context *ctx,
1950 struct perf_event_context *next_ctx)
bfbd3381 1951{
cdd6c482 1952 struct perf_event *event, *next_event;
bfbd3381
PZ
1953
1954 if (!ctx->nr_stat)
1955 return;
1956
02ffdbc8
PZ
1957 update_context_time(ctx);
1958
cdd6c482
IM
1959 event = list_first_entry(&ctx->event_list,
1960 struct perf_event, event_entry);
bfbd3381 1961
cdd6c482
IM
1962 next_event = list_first_entry(&next_ctx->event_list,
1963 struct perf_event, event_entry);
bfbd3381 1964
cdd6c482
IM
1965 while (&event->event_entry != &ctx->event_list &&
1966 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 1967
cdd6c482 1968 __perf_event_sync_stat(event, next_event);
bfbd3381 1969
cdd6c482
IM
1970 event = list_next_entry(event, event_entry);
1971 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
1972 }
1973}
1974
fe4b04fa
PZ
1975static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1976 struct task_struct *next)
0793a61d 1977{
8dc85d54 1978 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482
IM
1979 struct perf_event_context *next_ctx;
1980 struct perf_event_context *parent;
108b02cf 1981 struct perf_cpu_context *cpuctx;
c93f7669 1982 int do_switch = 1;
0793a61d 1983
108b02cf
PZ
1984 if (likely(!ctx))
1985 return;
10989fb2 1986
108b02cf
PZ
1987 cpuctx = __get_cpu_context(ctx);
1988 if (!cpuctx->task_ctx)
0793a61d
TG
1989 return;
1990
c93f7669
PM
1991 rcu_read_lock();
1992 parent = rcu_dereference(ctx->parent_ctx);
8dc85d54 1993 next_ctx = next->perf_event_ctxp[ctxn];
c93f7669
PM
1994 if (parent && next_ctx &&
1995 rcu_dereference(next_ctx->parent_ctx) == parent) {
1996 /*
1997 * Looks like the two contexts are clones, so we might be
1998 * able to optimize the context switch. We lock both
1999 * contexts and check that they are clones under the
2000 * lock (including re-checking that neither has been
2001 * uncloned in the meantime). It doesn't matter which
2002 * order we take the locks because no other cpu could
2003 * be trying to lock both of these tasks.
2004 */
e625cce1
TG
2005 raw_spin_lock(&ctx->lock);
2006 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 2007 if (context_equiv(ctx, next_ctx)) {
665c2142
PZ
2008 /*
2009 * XXX do we need a memory barrier of sorts
cdd6c482 2010 * wrt to rcu_dereference() of perf_event_ctxp
665c2142 2011 */
8dc85d54
PZ
2012 task->perf_event_ctxp[ctxn] = next_ctx;
2013 next->perf_event_ctxp[ctxn] = ctx;
c93f7669
PM
2014 ctx->task = next;
2015 next_ctx->task = task;
2016 do_switch = 0;
bfbd3381 2017
cdd6c482 2018 perf_event_sync_stat(ctx, next_ctx);
c93f7669 2019 }
e625cce1
TG
2020 raw_spin_unlock(&next_ctx->lock);
2021 raw_spin_unlock(&ctx->lock);
564c2b21 2022 }
c93f7669 2023 rcu_read_unlock();
564c2b21 2024
c93f7669 2025 if (do_switch) {
facc4307 2026 raw_spin_lock(&ctx->lock);
5b0311e1 2027 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
c93f7669 2028 cpuctx->task_ctx = NULL;
facc4307 2029 raw_spin_unlock(&ctx->lock);
c93f7669 2030 }
0793a61d
TG
2031}
2032
8dc85d54
PZ
2033#define for_each_task_context_nr(ctxn) \
2034 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2035
2036/*
2037 * Called from scheduler to remove the events of the current task,
2038 * with interrupts disabled.
2039 *
2040 * We stop each event and update the event value in event->count.
2041 *
2042 * This does not protect us against NMI, but disable()
2043 * sets the disabled bit in the control field of event _before_
2044 * accessing the event control register. If a NMI hits, then it will
2045 * not restart the event.
2046 */
ab0cce56
JO
2047void __perf_event_task_sched_out(struct task_struct *task,
2048 struct task_struct *next)
8dc85d54
PZ
2049{
2050 int ctxn;
2051
8dc85d54
PZ
2052 for_each_task_context_nr(ctxn)
2053 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
2054
2055 /*
2056 * if cgroup events exist on this CPU, then we need
2057 * to check if we have to switch out PMU state.
2058 * cgroup event are system-wide mode only
2059 */
2060 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef 2061 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
2062}
2063
04dc2dbb 2064static void task_ctx_sched_out(struct perf_event_context *ctx)
a08b159f 2065{
108b02cf 2066 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
a08b159f 2067
a63eaf34
PM
2068 if (!cpuctx->task_ctx)
2069 return;
012b84da
IM
2070
2071 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2072 return;
2073
04dc2dbb 2074 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
a08b159f
PM
2075 cpuctx->task_ctx = NULL;
2076}
2077
5b0311e1
FW
2078/*
2079 * Called with IRQs disabled
2080 */
2081static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2082 enum event_type_t event_type)
2083{
2084 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
2085}
2086
235c7fc7 2087static void
5b0311e1 2088ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a 2089 struct perf_cpu_context *cpuctx)
0793a61d 2090{
cdd6c482 2091 struct perf_event *event;
0793a61d 2092
889ff015
FW
2093 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2094 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2095 continue;
5632ab12 2096 if (!event_filter_match(event))
3b6f9e5c
PM
2097 continue;
2098
e5d1367f
SE
2099 /* may need to reset tstamp_enabled */
2100 if (is_cgroup_event(event))
2101 perf_cgroup_mark_enabled(event, ctx);
2102
8c9ed8e1 2103 if (group_can_go_on(event, cpuctx, 1))
6e37738a 2104 group_sched_in(event, cpuctx, ctx);
3b6f9e5c
PM
2105
2106 /*
2107 * If this pinned group hasn't been scheduled,
2108 * put it in error state.
2109 */
cdd6c482
IM
2110 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2111 update_group_times(event);
2112 event->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2113 }
3b6f9e5c 2114 }
5b0311e1
FW
2115}
2116
2117static void
2118ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a 2119 struct perf_cpu_context *cpuctx)
5b0311e1
FW
2120{
2121 struct perf_event *event;
2122 int can_add_hw = 1;
3b6f9e5c 2123
889ff015
FW
2124 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2125 /* Ignore events in OFF or ERROR state */
2126 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2127 continue;
04289bb9
IM
2128 /*
2129 * Listen to the 'cpu' scheduling filter constraint
cdd6c482 2130 * of events:
04289bb9 2131 */
5632ab12 2132 if (!event_filter_match(event))
0793a61d
TG
2133 continue;
2134
e5d1367f
SE
2135 /* may need to reset tstamp_enabled */
2136 if (is_cgroup_event(event))
2137 perf_cgroup_mark_enabled(event, ctx);
2138
9ed6060d 2139 if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a 2140 if (group_sched_in(event, cpuctx, ctx))
dd0e6ba2 2141 can_add_hw = 0;
9ed6060d 2142 }
0793a61d 2143 }
5b0311e1
FW
2144}
2145
2146static void
2147ctx_sched_in(struct perf_event_context *ctx,
2148 struct perf_cpu_context *cpuctx,
e5d1367f
SE
2149 enum event_type_t event_type,
2150 struct task_struct *task)
5b0311e1 2151{
e5d1367f 2152 u64 now;
db24d33e 2153 int is_active = ctx->is_active;
e5d1367f 2154
db24d33e 2155 ctx->is_active |= event_type;
5b0311e1 2156 if (likely(!ctx->nr_events))
facc4307 2157 return;
5b0311e1 2158
e5d1367f
SE
2159 now = perf_clock();
2160 ctx->timestamp = now;
3f7cce3c 2161 perf_cgroup_set_timestamp(task, ctx);
5b0311e1
FW
2162 /*
2163 * First go through the list and put on any pinned groups
2164 * in order to give them the best chance of going on.
2165 */
db24d33e 2166 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
6e37738a 2167 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
2168
2169 /* Then walk through the lower prio flexible groups */
db24d33e 2170 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
6e37738a 2171 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
2172}
2173
329c0e01 2174static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
2175 enum event_type_t event_type,
2176 struct task_struct *task)
329c0e01
FW
2177{
2178 struct perf_event_context *ctx = &cpuctx->ctx;
2179
e5d1367f 2180 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
2181}
2182
e5d1367f
SE
2183static void perf_event_context_sched_in(struct perf_event_context *ctx,
2184 struct task_struct *task)
235c7fc7 2185{
108b02cf 2186 struct perf_cpu_context *cpuctx;
235c7fc7 2187
108b02cf 2188 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
2189 if (cpuctx->task_ctx == ctx)
2190 return;
2191
facc4307 2192 perf_ctx_lock(cpuctx, ctx);
1b9a644f 2193 perf_pmu_disable(ctx->pmu);
329c0e01
FW
2194 /*
2195 * We want to keep the following priority order:
2196 * cpu pinned (that don't need to move), task pinned,
2197 * cpu flexible, task flexible.
2198 */
2199 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2200
1d5f003f
GN
2201 if (ctx->nr_events)
2202 cpuctx->task_ctx = ctx;
9b33fa6b 2203
86b47c25
GN
2204 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2205
facc4307
PZ
2206 perf_pmu_enable(ctx->pmu);
2207 perf_ctx_unlock(cpuctx, ctx);
2208
b5ab4cd5
PZ
2209 /*
2210 * Since these rotations are per-cpu, we need to ensure the
2211 * cpu-context we got scheduled on is actually rotating.
2212 */
108b02cf 2213 perf_pmu_rotate_start(ctx->pmu);
235c7fc7
IM
2214}
2215
d010b332
SE
2216/*
2217 * When sampling the branck stack in system-wide, it may be necessary
2218 * to flush the stack on context switch. This happens when the branch
2219 * stack does not tag its entries with the pid of the current task.
2220 * Otherwise it becomes impossible to associate a branch entry with a
2221 * task. This ambiguity is more likely to appear when the branch stack
2222 * supports priv level filtering and the user sets it to monitor only
2223 * at the user level (which could be a useful measurement in system-wide
2224 * mode). In that case, the risk is high of having a branch stack with
2225 * branch from multiple tasks. Flushing may mean dropping the existing
2226 * entries or stashing them somewhere in the PMU specific code layer.
2227 *
2228 * This function provides the context switch callback to the lower code
2229 * layer. It is invoked ONLY when there is at least one system-wide context
2230 * with at least one active event using taken branch sampling.
2231 */
2232static void perf_branch_stack_sched_in(struct task_struct *prev,
2233 struct task_struct *task)
2234{
2235 struct perf_cpu_context *cpuctx;
2236 struct pmu *pmu;
2237 unsigned long flags;
2238
2239 /* no need to flush branch stack if not changing task */
2240 if (prev == task)
2241 return;
2242
2243 local_irq_save(flags);
2244
2245 rcu_read_lock();
2246
2247 list_for_each_entry_rcu(pmu, &pmus, entry) {
2248 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2249
2250 /*
2251 * check if the context has at least one
2252 * event using PERF_SAMPLE_BRANCH_STACK
2253 */
2254 if (cpuctx->ctx.nr_branch_stack > 0
2255 && pmu->flush_branch_stack) {
2256
2257 pmu = cpuctx->ctx.pmu;
2258
2259 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2260
2261 perf_pmu_disable(pmu);
2262
2263 pmu->flush_branch_stack();
2264
2265 perf_pmu_enable(pmu);
2266
2267 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2268 }
2269 }
2270
2271 rcu_read_unlock();
2272
2273 local_irq_restore(flags);
2274}
2275
8dc85d54
PZ
2276/*
2277 * Called from scheduler to add the events of the current task
2278 * with interrupts disabled.
2279 *
2280 * We restore the event value and then enable it.
2281 *
2282 * This does not protect us against NMI, but enable()
2283 * sets the enabled bit in the control field of event _before_
2284 * accessing the event control register. If a NMI hits, then it will
2285 * keep the event running.
2286 */
ab0cce56
JO
2287void __perf_event_task_sched_in(struct task_struct *prev,
2288 struct task_struct *task)
8dc85d54
PZ
2289{
2290 struct perf_event_context *ctx;
2291 int ctxn;
2292
2293 for_each_task_context_nr(ctxn) {
2294 ctx = task->perf_event_ctxp[ctxn];
2295 if (likely(!ctx))
2296 continue;
2297
e5d1367f 2298 perf_event_context_sched_in(ctx, task);
8dc85d54 2299 }
e5d1367f
SE
2300 /*
2301 * if cgroup events exist on this CPU, then we need
2302 * to check if we have to switch in PMU state.
2303 * cgroup event are system-wide mode only
2304 */
2305 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef 2306 perf_cgroup_sched_in(prev, task);
d010b332
SE
2307
2308 /* check for system-wide branch_stack events */
2309 if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2310 perf_branch_stack_sched_in(prev, task);
235c7fc7
IM
2311}
2312
abd50713
PZ
2313static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2314{
2315 u64 frequency = event->attr.sample_freq;
2316 u64 sec = NSEC_PER_SEC;
2317 u64 divisor, dividend;
2318
2319 int count_fls, nsec_fls, frequency_fls, sec_fls;
2320
2321 count_fls = fls64(count);
2322 nsec_fls = fls64(nsec);
2323 frequency_fls = fls64(frequency);
2324 sec_fls = 30;
2325
2326 /*
2327 * We got @count in @nsec, with a target of sample_freq HZ
2328 * the target period becomes:
2329 *
2330 * @count * 10^9
2331 * period = -------------------
2332 * @nsec * sample_freq
2333 *
2334 */
2335
2336 /*
2337 * Reduce accuracy by one bit such that @a and @b converge
2338 * to a similar magnitude.
2339 */
fe4b04fa 2340#define REDUCE_FLS(a, b) \
abd50713
PZ
2341do { \
2342 if (a##_fls > b##_fls) { \
2343 a >>= 1; \
2344 a##_fls--; \
2345 } else { \
2346 b >>= 1; \
2347 b##_fls--; \
2348 } \
2349} while (0)
2350
2351 /*
2352 * Reduce accuracy until either term fits in a u64, then proceed with
2353 * the other, so that finally we can do a u64/u64 division.
2354 */
2355 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2356 REDUCE_FLS(nsec, frequency);
2357 REDUCE_FLS(sec, count);
2358 }
2359
2360 if (count_fls + sec_fls > 64) {
2361 divisor = nsec * frequency;
2362
2363 while (count_fls + sec_fls > 64) {
2364 REDUCE_FLS(count, sec);
2365 divisor >>= 1;
2366 }
2367
2368 dividend = count * sec;
2369 } else {
2370 dividend = count * sec;
2371
2372 while (nsec_fls + frequency_fls > 64) {
2373 REDUCE_FLS(nsec, frequency);
2374 dividend >>= 1;
2375 }
2376
2377 divisor = nsec * frequency;
2378 }
2379
f6ab91ad
PZ
2380 if (!divisor)
2381 return dividend;
2382
abd50713
PZ
2383 return div64_u64(dividend, divisor);
2384}
2385
e050e3f0
SE
2386static DEFINE_PER_CPU(int, perf_throttled_count);
2387static DEFINE_PER_CPU(u64, perf_throttled_seq);
2388
f39d47ff 2389static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 2390{
cdd6c482 2391 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 2392 s64 period, sample_period;
bd2b5b12
PZ
2393 s64 delta;
2394
abd50713 2395 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
2396
2397 delta = (s64)(period - hwc->sample_period);
2398 delta = (delta + 7) / 8; /* low pass filter */
2399
2400 sample_period = hwc->sample_period + delta;
2401
2402 if (!sample_period)
2403 sample_period = 1;
2404
bd2b5b12 2405 hwc->sample_period = sample_period;
abd50713 2406
e7850595 2407 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
2408 if (disable)
2409 event->pmu->stop(event, PERF_EF_UPDATE);
2410
e7850595 2411 local64_set(&hwc->period_left, 0);
f39d47ff
SE
2412
2413 if (disable)
2414 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 2415 }
bd2b5b12
PZ
2416}
2417
e050e3f0
SE
2418/*
2419 * combine freq adjustment with unthrottling to avoid two passes over the
2420 * events. At the same time, make sure, having freq events does not change
2421 * the rate of unthrottling as that would introduce bias.
2422 */
2423static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2424 int needs_unthr)
60db5e09 2425{
cdd6c482
IM
2426 struct perf_event *event;
2427 struct hw_perf_event *hwc;
e050e3f0 2428 u64 now, period = TICK_NSEC;
abd50713 2429 s64 delta;
60db5e09 2430
e050e3f0
SE
2431 /*
2432 * only need to iterate over all events iff:
2433 * - context have events in frequency mode (needs freq adjust)
2434 * - there are events to unthrottle on this cpu
2435 */
2436 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
2437 return;
2438
e050e3f0 2439 raw_spin_lock(&ctx->lock);
f39d47ff 2440 perf_pmu_disable(ctx->pmu);
e050e3f0 2441
03541f8b 2442 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 2443 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
2444 continue;
2445
5632ab12 2446 if (!event_filter_match(event))
5d27c23d
PZ
2447 continue;
2448
cdd6c482 2449 hwc = &event->hw;
6a24ed6c 2450
e050e3f0
SE
2451 if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
2452 hwc->interrupts = 0;
cdd6c482 2453 perf_log_throttle(event, 1);
a4eaf7f1 2454 event->pmu->start(event, 0);
a78ac325
PZ
2455 }
2456
cdd6c482 2457 if (!event->attr.freq || !event->attr.sample_freq)
60db5e09
PZ
2458 continue;
2459
e050e3f0
SE
2460 /*
2461 * stop the event and update event->count
2462 */
2463 event->pmu->stop(event, PERF_EF_UPDATE);
2464
e7850595 2465 now = local64_read(&event->count);
abd50713
PZ
2466 delta = now - hwc->freq_count_stamp;
2467 hwc->freq_count_stamp = now;
60db5e09 2468
e050e3f0
SE
2469 /*
2470 * restart the event
2471 * reload only if value has changed
f39d47ff
SE
2472 * we have stopped the event so tell that
2473 * to perf_adjust_period() to avoid stopping it
2474 * twice.
e050e3f0 2475 */
abd50713 2476 if (delta > 0)
f39d47ff 2477 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
2478
2479 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
60db5e09 2480 }
e050e3f0 2481
f39d47ff 2482 perf_pmu_enable(ctx->pmu);
e050e3f0 2483 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
2484}
2485
235c7fc7 2486/*
cdd6c482 2487 * Round-robin a context's events:
235c7fc7 2488 */
cdd6c482 2489static void rotate_ctx(struct perf_event_context *ctx)
0793a61d 2490{
dddd3379
TG
2491 /*
2492 * Rotate the first entry last of non-pinned groups. Rotation might be
2493 * disabled by the inheritance code.
2494 */
2495 if (!ctx->rotate_disable)
2496 list_rotate_left(&ctx->flexible_groups);
235c7fc7
IM
2497}
2498
b5ab4cd5 2499/*
e9d2b064
PZ
2500 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2501 * because they're strictly cpu affine and rotate_start is called with IRQs
2502 * disabled, while rotate_context is called from IRQ context.
b5ab4cd5 2503 */
e9d2b064 2504static void perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7 2505{
8dc85d54 2506 struct perf_event_context *ctx = NULL;
e050e3f0 2507 int rotate = 0, remove = 1;
7fc23a53 2508
b5ab4cd5 2509 if (cpuctx->ctx.nr_events) {
e9d2b064 2510 remove = 0;
b5ab4cd5
PZ
2511 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2512 rotate = 1;
2513 }
235c7fc7 2514
8dc85d54 2515 ctx = cpuctx->task_ctx;
b5ab4cd5 2516 if (ctx && ctx->nr_events) {
e9d2b064 2517 remove = 0;
b5ab4cd5
PZ
2518 if (ctx->nr_events != ctx->nr_active)
2519 rotate = 1;
2520 }
9717e6cd 2521
e050e3f0 2522 if (!rotate)
0f5a2601
PZ
2523 goto done;
2524
facc4307 2525 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 2526 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 2527
e050e3f0
SE
2528 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2529 if (ctx)
2530 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d 2531
e050e3f0
SE
2532 rotate_ctx(&cpuctx->ctx);
2533 if (ctx)
2534 rotate_ctx(ctx);
235c7fc7 2535
e050e3f0 2536 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 2537
0f5a2601
PZ
2538 perf_pmu_enable(cpuctx->ctx.pmu);
2539 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
b5ab4cd5 2540done:
e9d2b064
PZ
2541 if (remove)
2542 list_del_init(&cpuctx->rotation_list);
e9d2b064
PZ
2543}
2544
2545void perf_event_task_tick(void)
2546{
2547 struct list_head *head = &__get_cpu_var(rotation_list);
2548 struct perf_cpu_context *cpuctx, *tmp;
e050e3f0
SE
2549 struct perf_event_context *ctx;
2550 int throttled;
b5ab4cd5 2551
e9d2b064
PZ
2552 WARN_ON(!irqs_disabled());
2553
e050e3f0
SE
2554 __this_cpu_inc(perf_throttled_seq);
2555 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2556
e9d2b064 2557 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
e050e3f0
SE
2558 ctx = &cpuctx->ctx;
2559 perf_adjust_freq_unthr_context(ctx, throttled);
2560
2561 ctx = cpuctx->task_ctx;
2562 if (ctx)
2563 perf_adjust_freq_unthr_context(ctx, throttled);
2564
e9d2b064
PZ
2565 if (cpuctx->jiffies_interval == 1 ||
2566 !(jiffies % cpuctx->jiffies_interval))
2567 perf_rotate_context(cpuctx);
2568 }
0793a61d
TG
2569}
2570
889ff015
FW
2571static int event_enable_on_exec(struct perf_event *event,
2572 struct perf_event_context *ctx)
2573{
2574 if (!event->attr.enable_on_exec)
2575 return 0;
2576
2577 event->attr.enable_on_exec = 0;
2578 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2579 return 0;
2580
1d9b482e 2581 __perf_event_mark_enabled(event);
889ff015
FW
2582
2583 return 1;
2584}
2585
57e7986e 2586/*
cdd6c482 2587 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
2588 * This expects task == current.
2589 */
8dc85d54 2590static void perf_event_enable_on_exec(struct perf_event_context *ctx)
57e7986e 2591{
cdd6c482 2592 struct perf_event *event;
57e7986e
PM
2593 unsigned long flags;
2594 int enabled = 0;
889ff015 2595 int ret;
57e7986e
PM
2596
2597 local_irq_save(flags);
cdd6c482 2598 if (!ctx || !ctx->nr_events)
57e7986e
PM
2599 goto out;
2600
e566b76e
SE
2601 /*
2602 * We must ctxsw out cgroup events to avoid conflict
2603 * when invoking perf_task_event_sched_in() later on
2604 * in this function. Otherwise we end up trying to
2605 * ctxswin cgroup events which are already scheduled
2606 * in.
2607 */
a8d757ef 2608 perf_cgroup_sched_out(current, NULL);
57e7986e 2609
e625cce1 2610 raw_spin_lock(&ctx->lock);
04dc2dbb 2611 task_ctx_sched_out(ctx);
57e7986e 2612
b79387ef 2613 list_for_each_entry(event, &ctx->event_list, event_entry) {
889ff015
FW
2614 ret = event_enable_on_exec(event, ctx);
2615 if (ret)
2616 enabled = 1;
57e7986e
PM
2617 }
2618
2619 /*
cdd6c482 2620 * Unclone this context if we enabled any event.
57e7986e 2621 */
71a851b4
PZ
2622 if (enabled)
2623 unclone_ctx(ctx);
57e7986e 2624
e625cce1 2625 raw_spin_unlock(&ctx->lock);
57e7986e 2626
e566b76e
SE
2627 /*
2628 * Also calls ctxswin for cgroup events, if any:
2629 */
e5d1367f 2630 perf_event_context_sched_in(ctx, ctx->task);
9ed6060d 2631out:
57e7986e
PM
2632 local_irq_restore(flags);
2633}
2634
0793a61d 2635/*
cdd6c482 2636 * Cross CPU call to read the hardware event
0793a61d 2637 */
cdd6c482 2638static void __perf_event_read(void *info)
0793a61d 2639{
cdd6c482
IM
2640 struct perf_event *event = info;
2641 struct perf_event_context *ctx = event->ctx;
108b02cf 2642 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
621a01ea 2643
e1ac3614
PM
2644 /*
2645 * If this is a task context, we need to check whether it is
2646 * the current task context of this cpu. If not it has been
2647 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
2648 * event->count would have been updated to a recent sample
2649 * when the event was scheduled out.
e1ac3614
PM
2650 */
2651 if (ctx->task && cpuctx->task_ctx != ctx)
2652 return;
2653
e625cce1 2654 raw_spin_lock(&ctx->lock);
e5d1367f 2655 if (ctx->is_active) {
542e72fc 2656 update_context_time(ctx);
e5d1367f
SE
2657 update_cgrp_time_from_event(event);
2658 }
cdd6c482 2659 update_event_times(event);
542e72fc
PZ
2660 if (event->state == PERF_EVENT_STATE_ACTIVE)
2661 event->pmu->read(event);
e625cce1 2662 raw_spin_unlock(&ctx->lock);
0793a61d
TG
2663}
2664
b5e58793
PZ
2665static inline u64 perf_event_count(struct perf_event *event)
2666{
e7850595 2667 return local64_read(&event->count) + atomic64_read(&event->child_count);
b5e58793
PZ
2668}
2669
cdd6c482 2670static u64 perf_event_read(struct perf_event *event)
0793a61d
TG
2671{
2672 /*
cdd6c482
IM
2673 * If event is enabled and currently active on a CPU, update the
2674 * value in the event structure:
0793a61d 2675 */
cdd6c482
IM
2676 if (event->state == PERF_EVENT_STATE_ACTIVE) {
2677 smp_call_function_single(event->oncpu,
2678 __perf_event_read, event, 1);
2679 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
2680 struct perf_event_context *ctx = event->ctx;
2681 unsigned long flags;
2682
e625cce1 2683 raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9
SE
2684 /*
2685 * may read while context is not active
2686 * (e.g., thread is blocked), in that case
2687 * we cannot update context time
2688 */
e5d1367f 2689 if (ctx->is_active) {
c530ccd9 2690 update_context_time(ctx);
e5d1367f
SE
2691 update_cgrp_time_from_event(event);
2692 }
cdd6c482 2693 update_event_times(event);
e625cce1 2694 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d
TG
2695 }
2696
b5e58793 2697 return perf_event_count(event);
0793a61d
TG
2698}
2699
a63eaf34 2700/*
cdd6c482 2701 * Initialize the perf_event context in a task_struct:
a63eaf34 2702 */
eb184479 2703static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 2704{
e625cce1 2705 raw_spin_lock_init(&ctx->lock);
a63eaf34 2706 mutex_init(&ctx->mutex);
889ff015
FW
2707 INIT_LIST_HEAD(&ctx->pinned_groups);
2708 INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34
PM
2709 INIT_LIST_HEAD(&ctx->event_list);
2710 atomic_set(&ctx->refcount, 1);
eb184479
PZ
2711}
2712
2713static struct perf_event_context *
2714alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2715{
2716 struct perf_event_context *ctx;
2717
2718 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2719 if (!ctx)
2720 return NULL;
2721
2722 __perf_event_init_context(ctx);
2723 if (task) {
2724 ctx->task = task;
2725 get_task_struct(task);
0793a61d 2726 }
eb184479
PZ
2727 ctx->pmu = pmu;
2728
2729 return ctx;
a63eaf34
PM
2730}
2731
2ebd4ffb
MH
2732static struct task_struct *
2733find_lively_task_by_vpid(pid_t vpid)
2734{
2735 struct task_struct *task;
2736 int err;
0793a61d
TG
2737
2738 rcu_read_lock();
2ebd4ffb 2739 if (!vpid)
0793a61d
TG
2740 task = current;
2741 else
2ebd4ffb 2742 task = find_task_by_vpid(vpid);
0793a61d
TG
2743 if (task)
2744 get_task_struct(task);
2745 rcu_read_unlock();
2746
2747 if (!task)
2748 return ERR_PTR(-ESRCH);
2749
0793a61d 2750 /* Reuse ptrace permission checks for now. */
c93f7669
PM
2751 err = -EACCES;
2752 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2753 goto errout;
2754
2ebd4ffb
MH
2755 return task;
2756errout:
2757 put_task_struct(task);
2758 return ERR_PTR(err);
2759
2760}
2761
fe4b04fa
PZ
2762/*
2763 * Returns a matching context with refcount and pincount.
2764 */
108b02cf 2765static struct perf_event_context *
38a81da2 2766find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
0793a61d 2767{
cdd6c482 2768 struct perf_event_context *ctx;
22a4f650 2769 struct perf_cpu_context *cpuctx;
25346b93 2770 unsigned long flags;
8dc85d54 2771 int ctxn, err;
0793a61d 2772
22a4ec72 2773 if (!task) {
cdd6c482 2774 /* Must be root to operate on a CPU event: */
0764771d 2775 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
2776 return ERR_PTR(-EACCES);
2777
0793a61d 2778 /*
cdd6c482 2779 * We could be clever and allow to attach a event to an
0793a61d
TG
2780 * offline CPU and activate it when the CPU comes up, but
2781 * that's for later.
2782 */
f6325e30 2783 if (!cpu_online(cpu))
0793a61d
TG
2784 return ERR_PTR(-ENODEV);
2785
108b02cf 2786 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 2787 ctx = &cpuctx->ctx;
c93f7669 2788 get_ctx(ctx);
fe4b04fa 2789 ++ctx->pin_count;
0793a61d 2790
0793a61d
TG
2791 return ctx;
2792 }
2793
8dc85d54
PZ
2794 err = -EINVAL;
2795 ctxn = pmu->task_ctx_nr;
2796 if (ctxn < 0)
2797 goto errout;
2798
9ed6060d 2799retry:
8dc85d54 2800 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 2801 if (ctx) {
71a851b4 2802 unclone_ctx(ctx);
fe4b04fa 2803 ++ctx->pin_count;
e625cce1 2804 raw_spin_unlock_irqrestore(&ctx->lock, flags);
9137fb28 2805 } else {
eb184479 2806 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
2807 err = -ENOMEM;
2808 if (!ctx)
2809 goto errout;
eb184479 2810
dbe08d82
ON
2811 err = 0;
2812 mutex_lock(&task->perf_event_mutex);
2813 /*
2814 * If it has already passed perf_event_exit_task().
2815 * we must see PF_EXITING, it takes this mutex too.
2816 */
2817 if (task->flags & PF_EXITING)
2818 err = -ESRCH;
2819 else if (task->perf_event_ctxp[ctxn])
2820 err = -EAGAIN;
fe4b04fa 2821 else {
9137fb28 2822 get_ctx(ctx);
fe4b04fa 2823 ++ctx->pin_count;
dbe08d82 2824 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 2825 }
dbe08d82
ON
2826 mutex_unlock(&task->perf_event_mutex);
2827
2828 if (unlikely(err)) {
9137fb28 2829 put_ctx(ctx);
dbe08d82
ON
2830
2831 if (err == -EAGAIN)
2832 goto retry;
2833 goto errout;
a63eaf34
PM
2834 }
2835 }
2836
0793a61d 2837 return ctx;
c93f7669 2838
9ed6060d 2839errout:
c93f7669 2840 return ERR_PTR(err);
0793a61d
TG
2841}
2842
6fb2915d
LZ
2843static void perf_event_free_filter(struct perf_event *event);
2844
cdd6c482 2845static void free_event_rcu(struct rcu_head *head)
592903cd 2846{
cdd6c482 2847 struct perf_event *event;
592903cd 2848
cdd6c482
IM
2849 event = container_of(head, struct perf_event, rcu_head);
2850 if (event->ns)
2851 put_pid_ns(event->ns);
6fb2915d 2852 perf_event_free_filter(event);
cdd6c482 2853 kfree(event);
592903cd
PZ
2854}
2855
76369139 2856static void ring_buffer_put(struct ring_buffer *rb);
925d519a 2857
cdd6c482 2858static void free_event(struct perf_event *event)
f1600952 2859{
e360adbe 2860 irq_work_sync(&event->pending);
925d519a 2861
cdd6c482 2862 if (!event->parent) {
82cd6def 2863 if (event->attach_state & PERF_ATTACH_TASK)
c5905afb 2864 static_key_slow_dec_deferred(&perf_sched_events);
3af9e859 2865 if (event->attr.mmap || event->attr.mmap_data)
cdd6c482
IM
2866 atomic_dec(&nr_mmap_events);
2867 if (event->attr.comm)
2868 atomic_dec(&nr_comm_events);
2869 if (event->attr.task)
2870 atomic_dec(&nr_task_events);
927c7a9e
FW
2871 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2872 put_callchain_buffers();
08309379
PZ
2873 if (is_cgroup_event(event)) {
2874 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
c5905afb 2875 static_key_slow_dec_deferred(&perf_sched_events);
08309379 2876 }
d010b332
SE
2877
2878 if (has_branch_stack(event)) {
2879 static_key_slow_dec_deferred(&perf_sched_events);
2880 /* is system-wide event */
2881 if (!(event->attach_state & PERF_ATTACH_TASK))
2882 atomic_dec(&per_cpu(perf_branch_stack_events,
2883 event->cpu));
2884 }
f344011c 2885 }
9ee318a7 2886
76369139
FW
2887 if (event->rb) {
2888 ring_buffer_put(event->rb);
2889 event->rb = NULL;
a4be7c27
PZ
2890 }
2891
e5d1367f
SE
2892 if (is_cgroup_event(event))
2893 perf_detach_cgroup(event);
2894
cdd6c482
IM
2895 if (event->destroy)
2896 event->destroy(event);
e077df4f 2897
0c67b408
PZ
2898 if (event->ctx)
2899 put_ctx(event->ctx);
2900
cdd6c482 2901 call_rcu(&event->rcu_head, free_event_rcu);
f1600952
PZ
2902}
2903
a66a3052 2904int perf_event_release_kernel(struct perf_event *event)
0793a61d 2905{
cdd6c482 2906 struct perf_event_context *ctx = event->ctx;
0793a61d 2907
ad3a37de 2908 WARN_ON_ONCE(ctx->parent_ctx);
a0507c84
PZ
2909 /*
2910 * There are two ways this annotation is useful:
2911 *
2912 * 1) there is a lock recursion from perf_event_exit_task
2913 * see the comment there.
2914 *
2915 * 2) there is a lock-inversion with mmap_sem through
2916 * perf_event_read_group(), which takes faults while
2917 * holding ctx->mutex, however this is called after
2918 * the last filedesc died, so there is no possibility
2919 * to trigger the AB-BA case.
2920 */
2921 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
050735b0 2922 raw_spin_lock_irq(&ctx->lock);
8a49542c 2923 perf_group_detach(event);
050735b0 2924 raw_spin_unlock_irq(&ctx->lock);
e03a9a55 2925 perf_remove_from_context(event);
d859e29f 2926 mutex_unlock(&ctx->mutex);
0793a61d 2927
cdd6c482 2928 free_event(event);
0793a61d
TG
2929
2930 return 0;
2931}
a66a3052 2932EXPORT_SYMBOL_GPL(perf_event_release_kernel);
0793a61d 2933
a66a3052
PZ
2934/*
2935 * Called when the last reference to the file is gone.
2936 */
a6fa941d 2937static void put_event(struct perf_event *event)
fb0459d7 2938{
8882135b 2939 struct task_struct *owner;
fb0459d7 2940
a6fa941d
AV
2941 if (!atomic_long_dec_and_test(&event->refcount))
2942 return;
fb0459d7 2943
8882135b
PZ
2944 rcu_read_lock();
2945 owner = ACCESS_ONCE(event->owner);
2946 /*
2947 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2948 * !owner it means the list deletion is complete and we can indeed
2949 * free this event, otherwise we need to serialize on
2950 * owner->perf_event_mutex.
2951 */
2952 smp_read_barrier_depends();
2953 if (owner) {
2954 /*
2955 * Since delayed_put_task_struct() also drops the last
2956 * task reference we can safely take a new reference
2957 * while holding the rcu_read_lock().
2958 */
2959 get_task_struct(owner);
2960 }
2961 rcu_read_unlock();
2962
2963 if (owner) {
2964 mutex_lock(&owner->perf_event_mutex);
2965 /*
2966 * We have to re-check the event->owner field, if it is cleared
2967 * we raced with perf_event_exit_task(), acquiring the mutex
2968 * ensured they're done, and we can proceed with freeing the
2969 * event.
2970 */
2971 if (event->owner)
2972 list_del_init(&event->owner_entry);
2973 mutex_unlock(&owner->perf_event_mutex);
2974 put_task_struct(owner);
2975 }
2976
a6fa941d
AV
2977 perf_event_release_kernel(event);
2978}
2979
2980static int perf_release(struct inode *inode, struct file *file)
2981{
2982 put_event(file->private_data);
2983 return 0;
fb0459d7 2984}
fb0459d7 2985
59ed446f 2986u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 2987{
cdd6c482 2988 struct perf_event *child;
e53c0994
PZ
2989 u64 total = 0;
2990
59ed446f
PZ
2991 *enabled = 0;
2992 *running = 0;
2993
6f10581a 2994 mutex_lock(&event->child_mutex);
cdd6c482 2995 total += perf_event_read(event);
59ed446f
PZ
2996 *enabled += event->total_time_enabled +
2997 atomic64_read(&event->child_total_time_enabled);
2998 *running += event->total_time_running +
2999 atomic64_read(&event->child_total_time_running);
3000
3001 list_for_each_entry(child, &event->child_list, child_list) {
cdd6c482 3002 total += perf_event_read(child);
59ed446f
PZ
3003 *enabled += child->total_time_enabled;
3004 *running += child->total_time_running;
3005 }
6f10581a 3006 mutex_unlock(&event->child_mutex);
e53c0994
PZ
3007
3008 return total;
3009}
fb0459d7 3010EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 3011
cdd6c482 3012static int perf_event_read_group(struct perf_event *event,
3dab77fb
PZ
3013 u64 read_format, char __user *buf)
3014{
cdd6c482 3015 struct perf_event *leader = event->group_leader, *sub;
6f10581a
PZ
3016 int n = 0, size = 0, ret = -EFAULT;
3017 struct perf_event_context *ctx = leader->ctx;
abf4868b 3018 u64 values[5];
59ed446f 3019 u64 count, enabled, running;
abf4868b 3020
6f10581a 3021 mutex_lock(&ctx->mutex);
59ed446f 3022 count = perf_event_read_value(leader, &enabled, &running);
3dab77fb
PZ
3023
3024 values[n++] = 1 + leader->nr_siblings;
59ed446f
PZ
3025 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3026 values[n++] = enabled;
3027 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3028 values[n++] = running;
abf4868b
PZ
3029 values[n++] = count;
3030 if (read_format & PERF_FORMAT_ID)
3031 values[n++] = primary_event_id(leader);
3dab77fb
PZ
3032
3033 size = n * sizeof(u64);
3034
3035 if (copy_to_user(buf, values, size))
6f10581a 3036 goto unlock;
3dab77fb 3037
6f10581a 3038 ret = size;
3dab77fb 3039
65abc865 3040 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
abf4868b 3041 n = 0;
3dab77fb 3042
59ed446f 3043 values[n++] = perf_event_read_value(sub, &enabled, &running);
abf4868b
PZ
3044 if (read_format & PERF_FORMAT_ID)
3045 values[n++] = primary_event_id(sub);
3046
3047 size = n * sizeof(u64);
3048
184d3da8 3049 if (copy_to_user(buf + ret, values, size)) {
6f10581a
PZ
3050 ret = -EFAULT;
3051 goto unlock;
3052 }
abf4868b
PZ
3053
3054 ret += size;
3dab77fb 3055 }
6f10581a
PZ
3056unlock:
3057 mutex_unlock(&ctx->mutex);
3dab77fb 3058
abf4868b 3059 return ret;
3dab77fb
PZ
3060}
3061
cdd6c482 3062static int perf_event_read_one(struct perf_event *event,
3dab77fb
PZ
3063 u64 read_format, char __user *buf)
3064{
59ed446f 3065 u64 enabled, running;
3dab77fb
PZ
3066 u64 values[4];
3067 int n = 0;
3068
59ed446f
PZ
3069 values[n++] = perf_event_read_value(event, &enabled, &running);
3070 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3071 values[n++] = enabled;
3072 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3073 values[n++] = running;
3dab77fb 3074 if (read_format & PERF_FORMAT_ID)
cdd6c482 3075 values[n++] = primary_event_id(event);
3dab77fb
PZ
3076
3077 if (copy_to_user(buf, values, n * sizeof(u64)))
3078 return -EFAULT;
3079
3080 return n * sizeof(u64);
3081}
3082
0793a61d 3083/*
cdd6c482 3084 * Read the performance event - simple non blocking version for now
0793a61d
TG
3085 */
3086static ssize_t
cdd6c482 3087perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
0793a61d 3088{
cdd6c482 3089 u64 read_format = event->attr.read_format;
3dab77fb 3090 int ret;
0793a61d 3091
3b6f9e5c 3092 /*
cdd6c482 3093 * Return end-of-file for a read on a event that is in
3b6f9e5c
PM
3094 * error state (i.e. because it was pinned but it couldn't be
3095 * scheduled on to the CPU at some point).
3096 */
cdd6c482 3097 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
3098 return 0;
3099
c320c7b7 3100 if (count < event->read_size)
3dab77fb
PZ
3101 return -ENOSPC;
3102
cdd6c482 3103 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 3104 if (read_format & PERF_FORMAT_GROUP)
cdd6c482 3105 ret = perf_event_read_group(event, read_format, buf);
3dab77fb 3106 else
cdd6c482 3107 ret = perf_event_read_one(event, read_format, buf);
0793a61d 3108
3dab77fb 3109 return ret;
0793a61d
TG
3110}
3111
0793a61d
TG
3112static ssize_t
3113perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3114{
cdd6c482 3115 struct perf_event *event = file->private_data;
0793a61d 3116
cdd6c482 3117 return perf_read_hw(event, buf, count);
0793a61d
TG
3118}
3119
3120static unsigned int perf_poll(struct file *file, poll_table *wait)
3121{
cdd6c482 3122 struct perf_event *event = file->private_data;
76369139 3123 struct ring_buffer *rb;
c33a0bc4 3124 unsigned int events = POLL_HUP;
c7138f37 3125
10c6db11
PZ
3126 /*
3127 * Race between perf_event_set_output() and perf_poll(): perf_poll()
3128 * grabs the rb reference but perf_event_set_output() overrides it.
3129 * Here is the timeline for two threads T1, T2:
3130 * t0: T1, rb = rcu_dereference(event->rb)
3131 * t1: T2, old_rb = event->rb
3132 * t2: T2, event->rb = new rb
3133 * t3: T2, ring_buffer_detach(old_rb)
3134 * t4: T1, ring_buffer_attach(rb1)
3135 * t5: T1, poll_wait(event->waitq)
3136 *
3137 * To avoid this problem, we grab mmap_mutex in perf_poll()
3138 * thereby ensuring that the assignment of the new ring buffer
3139 * and the detachment of the old buffer appear atomic to perf_poll()
3140 */
3141 mutex_lock(&event->mmap_mutex);
3142
c7138f37 3143 rcu_read_lock();
76369139 3144 rb = rcu_dereference(event->rb);
10c6db11
PZ
3145 if (rb) {
3146 ring_buffer_attach(event, rb);
76369139 3147 events = atomic_xchg(&rb->poll, 0);
10c6db11 3148 }
c7138f37 3149 rcu_read_unlock();
0793a61d 3150
10c6db11
PZ
3151 mutex_unlock(&event->mmap_mutex);
3152
cdd6c482 3153 poll_wait(file, &event->waitq, wait);
0793a61d 3154
0793a61d
TG
3155 return events;
3156}
3157
cdd6c482 3158static void perf_event_reset(struct perf_event *event)
6de6a7b9 3159{
cdd6c482 3160 (void)perf_event_read(event);
e7850595 3161 local64_set(&event->count, 0);
cdd6c482 3162 perf_event_update_userpage(event);
3df5edad
PZ
3163}
3164
c93f7669 3165/*
cdd6c482
IM
3166 * Holding the top-level event's child_mutex means that any
3167 * descendant process that has inherited this event will block
3168 * in sync_child_event if it goes to exit, thus satisfying the
3169 * task existence requirements of perf_event_enable/disable.
c93f7669 3170 */
cdd6c482
IM
3171static void perf_event_for_each_child(struct perf_event *event,
3172 void (*func)(struct perf_event *))
3df5edad 3173{
cdd6c482 3174 struct perf_event *child;
3df5edad 3175
cdd6c482
IM
3176 WARN_ON_ONCE(event->ctx->parent_ctx);
3177 mutex_lock(&event->child_mutex);
3178 func(event);
3179 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 3180 func(child);
cdd6c482 3181 mutex_unlock(&event->child_mutex);
3df5edad
PZ
3182}
3183
cdd6c482
IM
3184static void perf_event_for_each(struct perf_event *event,
3185 void (*func)(struct perf_event *))
3df5edad 3186{
cdd6c482
IM
3187 struct perf_event_context *ctx = event->ctx;
3188 struct perf_event *sibling;
3df5edad 3189
75f937f2
PZ
3190 WARN_ON_ONCE(ctx->parent_ctx);
3191 mutex_lock(&ctx->mutex);
cdd6c482 3192 event = event->group_leader;
75f937f2 3193
cdd6c482 3194 perf_event_for_each_child(event, func);
cdd6c482 3195 list_for_each_entry(sibling, &event->sibling_list, group_entry)
724b6daa 3196 perf_event_for_each_child(sibling, func);
75f937f2 3197 mutex_unlock(&ctx->mutex);
6de6a7b9
PZ
3198}
3199
cdd6c482 3200static int perf_event_period(struct perf_event *event, u64 __user *arg)
08247e31 3201{
cdd6c482 3202 struct perf_event_context *ctx = event->ctx;
08247e31
PZ
3203 int ret = 0;
3204 u64 value;
3205
6c7e550f 3206 if (!is_sampling_event(event))
08247e31
PZ
3207 return -EINVAL;
3208
ad0cf347 3209 if (copy_from_user(&value, arg, sizeof(value)))
08247e31
PZ
3210 return -EFAULT;
3211
3212 if (!value)
3213 return -EINVAL;
3214
e625cce1 3215 raw_spin_lock_irq(&ctx->lock);
cdd6c482
IM
3216 if (event->attr.freq) {
3217 if (value > sysctl_perf_event_sample_rate) {
08247e31
PZ
3218 ret = -EINVAL;
3219 goto unlock;
3220 }
3221
cdd6c482 3222 event->attr.sample_freq = value;
08247e31 3223 } else {
cdd6c482
IM
3224 event->attr.sample_period = value;
3225 event->hw.sample_period = value;
08247e31
PZ
3226 }
3227unlock:
e625cce1 3228 raw_spin_unlock_irq(&ctx->lock);
08247e31
PZ
3229
3230 return ret;
3231}
3232
ac9721f3
PZ
3233static const struct file_operations perf_fops;
3234
2903ff01 3235static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 3236{
2903ff01
AV
3237 struct fd f = fdget(fd);
3238 if (!f.file)
3239 return -EBADF;
ac9721f3 3240
2903ff01
AV
3241 if (f.file->f_op != &perf_fops) {
3242 fdput(f);
3243 return -EBADF;
ac9721f3 3244 }
2903ff01
AV
3245 *p = f;
3246 return 0;
ac9721f3
PZ
3247}
3248
3249static int perf_event_set_output(struct perf_event *event,
3250 struct perf_event *output_event);
6fb2915d 3251static int perf_event_set_filter(struct perf_event *event, void __user *arg);
a4be7c27 3252
d859e29f
PM
3253static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3254{
cdd6c482
IM
3255 struct perf_event *event = file->private_data;
3256 void (*func)(struct perf_event *);
3df5edad 3257 u32 flags = arg;
d859e29f
PM
3258
3259 switch (cmd) {
cdd6c482
IM
3260 case PERF_EVENT_IOC_ENABLE:
3261 func = perf_event_enable;
d859e29f 3262 break;
cdd6c482
IM
3263 case PERF_EVENT_IOC_DISABLE:
3264 func = perf_event_disable;
79f14641 3265 break;
cdd6c482
IM
3266 case PERF_EVENT_IOC_RESET:
3267 func = perf_event_reset;
6de6a7b9 3268 break;
3df5edad 3269
cdd6c482
IM
3270 case PERF_EVENT_IOC_REFRESH:
3271 return perf_event_refresh(event, arg);
08247e31 3272
cdd6c482
IM
3273 case PERF_EVENT_IOC_PERIOD:
3274 return perf_event_period(event, (u64 __user *)arg);
08247e31 3275
cdd6c482 3276 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 3277 {
ac9721f3 3278 int ret;
ac9721f3 3279 if (arg != -1) {
2903ff01
AV
3280 struct perf_event *output_event;
3281 struct fd output;
3282 ret = perf_fget_light(arg, &output);
3283 if (ret)
3284 return ret;
3285 output_event = output.file->private_data;
3286 ret = perf_event_set_output(event, output_event);
3287 fdput(output);
3288 } else {
3289 ret = perf_event_set_output(event, NULL);
ac9721f3 3290 }
ac9721f3
PZ
3291 return ret;
3292 }
a4be7c27 3293
6fb2915d
LZ
3294 case PERF_EVENT_IOC_SET_FILTER:
3295 return perf_event_set_filter(event, (void __user *)arg);
3296
d859e29f 3297 default:
3df5edad 3298 return -ENOTTY;
d859e29f 3299 }
3df5edad
PZ
3300
3301 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 3302 perf_event_for_each(event, func);
3df5edad 3303 else
cdd6c482 3304 perf_event_for_each_child(event, func);
3df5edad
PZ
3305
3306 return 0;
d859e29f
PM
3307}
3308
cdd6c482 3309int perf_event_task_enable(void)
771d7cde 3310{
cdd6c482 3311 struct perf_event *event;
771d7cde 3312
cdd6c482
IM
3313 mutex_lock(&current->perf_event_mutex);
3314 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3315 perf_event_for_each_child(event, perf_event_enable);
3316 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
3317
3318 return 0;
3319}
3320
cdd6c482 3321int perf_event_task_disable(void)
771d7cde 3322{
cdd6c482 3323 struct perf_event *event;
771d7cde 3324
cdd6c482
IM
3325 mutex_lock(&current->perf_event_mutex);
3326 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3327 perf_event_for_each_child(event, perf_event_disable);
3328 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
3329
3330 return 0;
3331}
3332
cdd6c482 3333static int perf_event_index(struct perf_event *event)
194002b2 3334{
a4eaf7f1
PZ
3335 if (event->hw.state & PERF_HES_STOPPED)
3336 return 0;
3337
cdd6c482 3338 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
3339 return 0;
3340
35edc2a5 3341 return event->pmu->event_idx(event);
194002b2
PZ
3342}
3343
c4794295 3344static void calc_timer_values(struct perf_event *event,
e3f3541c 3345 u64 *now,
7f310a5d
EM
3346 u64 *enabled,
3347 u64 *running)
c4794295 3348{
e3f3541c 3349 u64 ctx_time;
c4794295 3350
e3f3541c
PZ
3351 *now = perf_clock();
3352 ctx_time = event->shadow_ctx_time + *now;
c4794295
EM
3353 *enabled = ctx_time - event->tstamp_enabled;
3354 *running = ctx_time - event->tstamp_running;
3355}
3356
c7206205 3357void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
3358{
3359}
3360
38ff667b
PZ
3361/*
3362 * Callers need to ensure there can be no nesting of this function, otherwise
3363 * the seqlock logic goes bad. We can not serialize this because the arch
3364 * code calls this from NMI context.
3365 */
cdd6c482 3366void perf_event_update_userpage(struct perf_event *event)
37d81828 3367{
cdd6c482 3368 struct perf_event_mmap_page *userpg;
76369139 3369 struct ring_buffer *rb;
e3f3541c 3370 u64 enabled, running, now;
38ff667b
PZ
3371
3372 rcu_read_lock();
0d641208
EM
3373 /*
3374 * compute total_time_enabled, total_time_running
3375 * based on snapshot values taken when the event
3376 * was last scheduled in.
3377 *
3378 * we cannot simply called update_context_time()
3379 * because of locking issue as we can be called in
3380 * NMI context
3381 */
e3f3541c 3382 calc_timer_values(event, &now, &enabled, &running);
76369139
FW
3383 rb = rcu_dereference(event->rb);
3384 if (!rb)
38ff667b
PZ
3385 goto unlock;
3386
76369139 3387 userpg = rb->user_page;
37d81828 3388
7b732a75
PZ
3389 /*
3390 * Disable preemption so as to not let the corresponding user-space
3391 * spin too long if we get preempted.
3392 */
3393 preempt_disable();
37d81828 3394 ++userpg->lock;
92f22a38 3395 barrier();
cdd6c482 3396 userpg->index = perf_event_index(event);
b5e58793 3397 userpg->offset = perf_event_count(event);
365a4038 3398 if (userpg->index)
e7850595 3399 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 3400
0d641208 3401 userpg->time_enabled = enabled +
cdd6c482 3402 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 3403
0d641208 3404 userpg->time_running = running +
cdd6c482 3405 atomic64_read(&event->child_total_time_running);
7f8b4e4e 3406
c7206205 3407 arch_perf_update_userpage(userpg, now);
e3f3541c 3408
92f22a38 3409 barrier();
37d81828 3410 ++userpg->lock;
7b732a75 3411 preempt_enable();
38ff667b 3412unlock:
7b732a75 3413 rcu_read_unlock();
37d81828
PM
3414}
3415
906010b2
PZ
3416static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3417{
3418 struct perf_event *event = vma->vm_file->private_data;
76369139 3419 struct ring_buffer *rb;
906010b2
PZ
3420 int ret = VM_FAULT_SIGBUS;
3421
3422 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3423 if (vmf->pgoff == 0)
3424 ret = 0;
3425 return ret;
3426 }
3427
3428 rcu_read_lock();
76369139
FW
3429 rb = rcu_dereference(event->rb);
3430 if (!rb)
906010b2
PZ
3431 goto unlock;
3432
3433 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3434 goto unlock;
3435
76369139 3436 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
3437 if (!vmf->page)
3438 goto unlock;
3439
3440 get_page(vmf->page);
3441 vmf->page->mapping = vma->vm_file->f_mapping;
3442 vmf->page->index = vmf->pgoff;
3443
3444 ret = 0;
3445unlock:
3446 rcu_read_unlock();
3447
3448 return ret;
3449}
3450
10c6db11
PZ
3451static void ring_buffer_attach(struct perf_event *event,
3452 struct ring_buffer *rb)
3453{
3454 unsigned long flags;
3455
3456 if (!list_empty(&event->rb_entry))
3457 return;
3458
3459 spin_lock_irqsave(&rb->event_lock, flags);
3460 if (!list_empty(&event->rb_entry))
3461 goto unlock;
3462
3463 list_add(&event->rb_entry, &rb->event_list);
3464unlock:
3465 spin_unlock_irqrestore(&rb->event_lock, flags);
3466}
3467
3468static void ring_buffer_detach(struct perf_event *event,
3469 struct ring_buffer *rb)
3470{
3471 unsigned long flags;
3472
3473 if (list_empty(&event->rb_entry))
3474 return;
3475
3476 spin_lock_irqsave(&rb->event_lock, flags);
3477 list_del_init(&event->rb_entry);
3478 wake_up_all(&event->waitq);
3479 spin_unlock_irqrestore(&rb->event_lock, flags);
3480}
3481
3482static void ring_buffer_wakeup(struct perf_event *event)
3483{
3484 struct ring_buffer *rb;
3485
3486 rcu_read_lock();
3487 rb = rcu_dereference(event->rb);
44b7f4b9
WD
3488 if (!rb)
3489 goto unlock;
3490
3491 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
10c6db11 3492 wake_up_all(&event->waitq);
44b7f4b9
WD
3493
3494unlock:
10c6db11
PZ
3495 rcu_read_unlock();
3496}
3497
76369139 3498static void rb_free_rcu(struct rcu_head *rcu_head)
906010b2 3499{
76369139 3500 struct ring_buffer *rb;
906010b2 3501
76369139
FW
3502 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3503 rb_free(rb);
7b732a75
PZ
3504}
3505
76369139 3506static struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 3507{
76369139 3508 struct ring_buffer *rb;
7b732a75 3509
ac9721f3 3510 rcu_read_lock();
76369139
FW
3511 rb = rcu_dereference(event->rb);
3512 if (rb) {
3513 if (!atomic_inc_not_zero(&rb->refcount))
3514 rb = NULL;
ac9721f3
PZ
3515 }
3516 rcu_read_unlock();
3517
76369139 3518 return rb;
ac9721f3
PZ
3519}
3520
76369139 3521static void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 3522{
10c6db11
PZ
3523 struct perf_event *event, *n;
3524 unsigned long flags;
3525
76369139 3526 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 3527 return;
7b732a75 3528
10c6db11
PZ
3529 spin_lock_irqsave(&rb->event_lock, flags);
3530 list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3531 list_del_init(&event->rb_entry);
3532 wake_up_all(&event->waitq);
3533 }
3534 spin_unlock_irqrestore(&rb->event_lock, flags);
3535
76369139 3536 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
3537}
3538
3539static void perf_mmap_open(struct vm_area_struct *vma)
3540{
cdd6c482 3541 struct perf_event *event = vma->vm_file->private_data;
7b732a75 3542
cdd6c482 3543 atomic_inc(&event->mmap_count);
7b732a75
PZ
3544}
3545
3546static void perf_mmap_close(struct vm_area_struct *vma)
3547{
cdd6c482 3548 struct perf_event *event = vma->vm_file->private_data;
7b732a75 3549
cdd6c482 3550 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
76369139 3551 unsigned long size = perf_data_size(event->rb);
ac9721f3 3552 struct user_struct *user = event->mmap_user;
76369139 3553 struct ring_buffer *rb = event->rb;
789f90fc 3554
906010b2 3555 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
bc3e53f6 3556 vma->vm_mm->pinned_vm -= event->mmap_locked;
76369139 3557 rcu_assign_pointer(event->rb, NULL);
10c6db11 3558 ring_buffer_detach(event, rb);
cdd6c482 3559 mutex_unlock(&event->mmap_mutex);
ac9721f3 3560
76369139 3561 ring_buffer_put(rb);
ac9721f3 3562 free_uid(user);
7b732a75 3563 }
37d81828
PM
3564}
3565
f0f37e2f 3566static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8
PZ
3567 .open = perf_mmap_open,
3568 .close = perf_mmap_close,
3569 .fault = perf_mmap_fault,
3570 .page_mkwrite = perf_mmap_fault,
37d81828
PM
3571};
3572
3573static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3574{
cdd6c482 3575 struct perf_event *event = file->private_data;
22a4f650 3576 unsigned long user_locked, user_lock_limit;
789f90fc 3577 struct user_struct *user = current_user();
22a4f650 3578 unsigned long locked, lock_limit;
76369139 3579 struct ring_buffer *rb;
7b732a75
PZ
3580 unsigned long vma_size;
3581 unsigned long nr_pages;
789f90fc 3582 long user_extra, extra;
d57e34fd 3583 int ret = 0, flags = 0;
37d81828 3584
c7920614
PZ
3585 /*
3586 * Don't allow mmap() of inherited per-task counters. This would
3587 * create a performance issue due to all children writing to the
76369139 3588 * same rb.
c7920614
PZ
3589 */
3590 if (event->cpu == -1 && event->attr.inherit)
3591 return -EINVAL;
3592
43a21ea8 3593 if (!(vma->vm_flags & VM_SHARED))
37d81828 3594 return -EINVAL;
7b732a75
PZ
3595
3596 vma_size = vma->vm_end - vma->vm_start;
3597 nr_pages = (vma_size / PAGE_SIZE) - 1;
3598
7730d865 3599 /*
76369139 3600 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
3601 * can do bitmasks instead of modulo.
3602 */
3603 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
3604 return -EINVAL;
3605
7b732a75 3606 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
3607 return -EINVAL;
3608
7b732a75
PZ
3609 if (vma->vm_pgoff != 0)
3610 return -EINVAL;
37d81828 3611
cdd6c482
IM
3612 WARN_ON_ONCE(event->ctx->parent_ctx);
3613 mutex_lock(&event->mmap_mutex);
76369139
FW
3614 if (event->rb) {
3615 if (event->rb->nr_pages == nr_pages)
3616 atomic_inc(&event->rb->refcount);
ac9721f3 3617 else
ebb3c4c4
PZ
3618 ret = -EINVAL;
3619 goto unlock;
3620 }
3621
789f90fc 3622 user_extra = nr_pages + 1;
cdd6c482 3623 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
3624
3625 /*
3626 * Increase the limit linearly with more CPUs:
3627 */
3628 user_lock_limit *= num_online_cpus();
3629
789f90fc 3630 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 3631
789f90fc
PZ
3632 extra = 0;
3633 if (user_locked > user_lock_limit)
3634 extra = user_locked - user_lock_limit;
7b732a75 3635
78d7d407 3636 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 3637 lock_limit >>= PAGE_SHIFT;
bc3e53f6 3638 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 3639
459ec28a
IM
3640 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3641 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
3642 ret = -EPERM;
3643 goto unlock;
3644 }
7b732a75 3645
76369139 3646 WARN_ON(event->rb);
906010b2 3647
d57e34fd 3648 if (vma->vm_flags & VM_WRITE)
76369139 3649 flags |= RING_BUFFER_WRITABLE;
d57e34fd 3650
4ec8363d
VW
3651 rb = rb_alloc(nr_pages,
3652 event->attr.watermark ? event->attr.wakeup_watermark : 0,
3653 event->cpu, flags);
3654
76369139 3655 if (!rb) {
ac9721f3 3656 ret = -ENOMEM;
ebb3c4c4 3657 goto unlock;
ac9721f3 3658 }
76369139 3659 rcu_assign_pointer(event->rb, rb);
43a21ea8 3660
ac9721f3
PZ
3661 atomic_long_add(user_extra, &user->locked_vm);
3662 event->mmap_locked = extra;
3663 event->mmap_user = get_current_user();
bc3e53f6 3664 vma->vm_mm->pinned_vm += event->mmap_locked;
ac9721f3 3665
9a0f05cb
PZ
3666 perf_event_update_userpage(event);
3667
ebb3c4c4 3668unlock:
ac9721f3
PZ
3669 if (!ret)
3670 atomic_inc(&event->mmap_count);
cdd6c482 3671 mutex_unlock(&event->mmap_mutex);
37d81828 3672
37d81828
PM
3673 vma->vm_flags |= VM_RESERVED;
3674 vma->vm_ops = &perf_mmap_vmops;
7b732a75
PZ
3675
3676 return ret;
37d81828
PM
3677}
3678
3c446b3d
PZ
3679static int perf_fasync(int fd, struct file *filp, int on)
3680{
3c446b3d 3681 struct inode *inode = filp->f_path.dentry->d_inode;
cdd6c482 3682 struct perf_event *event = filp->private_data;
3c446b3d
PZ
3683 int retval;
3684
3685 mutex_lock(&inode->i_mutex);
cdd6c482 3686 retval = fasync_helper(fd, filp, on, &event->fasync);
3c446b3d
PZ
3687 mutex_unlock(&inode->i_mutex);
3688
3689 if (retval < 0)
3690 return retval;
3691
3692 return 0;
3693}
3694
0793a61d 3695static const struct file_operations perf_fops = {
3326c1ce 3696 .llseek = no_llseek,
0793a61d
TG
3697 .release = perf_release,
3698 .read = perf_read,
3699 .poll = perf_poll,
d859e29f
PM
3700 .unlocked_ioctl = perf_ioctl,
3701 .compat_ioctl = perf_ioctl,
37d81828 3702 .mmap = perf_mmap,
3c446b3d 3703 .fasync = perf_fasync,
0793a61d
TG
3704};
3705
925d519a 3706/*
cdd6c482 3707 * Perf event wakeup
925d519a
PZ
3708 *
3709 * If there's data, ensure we set the poll() state and publish everything
3710 * to user-space before waking everybody up.
3711 */
3712
cdd6c482 3713void perf_event_wakeup(struct perf_event *event)
925d519a 3714{
10c6db11 3715 ring_buffer_wakeup(event);
4c9e2542 3716
cdd6c482
IM
3717 if (event->pending_kill) {
3718 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3719 event->pending_kill = 0;
4c9e2542 3720 }
925d519a
PZ
3721}
3722
e360adbe 3723static void perf_pending_event(struct irq_work *entry)
79f14641 3724{
cdd6c482
IM
3725 struct perf_event *event = container_of(entry,
3726 struct perf_event, pending);
79f14641 3727
cdd6c482
IM
3728 if (event->pending_disable) {
3729 event->pending_disable = 0;
3730 __perf_event_disable(event);
79f14641
PZ
3731 }
3732
cdd6c482
IM
3733 if (event->pending_wakeup) {
3734 event->pending_wakeup = 0;
3735 perf_event_wakeup(event);
79f14641
PZ
3736 }
3737}
3738
39447b38
ZY
3739/*
3740 * We assume there is only KVM supporting the callbacks.
3741 * Later on, we might change it to a list if there is
3742 * another virtualization implementation supporting the callbacks.
3743 */
3744struct perf_guest_info_callbacks *perf_guest_cbs;
3745
3746int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3747{
3748 perf_guest_cbs = cbs;
3749 return 0;
3750}
3751EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3752
3753int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3754{
3755 perf_guest_cbs = NULL;
3756 return 0;
3757}
3758EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3759
c980d109
ACM
3760static void __perf_event_header__init_id(struct perf_event_header *header,
3761 struct perf_sample_data *data,
3762 struct perf_event *event)
6844c09d
ACM
3763{
3764 u64 sample_type = event->attr.sample_type;
3765
3766 data->type = sample_type;
3767 header->size += event->id_header_size;
3768
3769 if (sample_type & PERF_SAMPLE_TID) {
3770 /* namespace issues */
3771 data->tid_entry.pid = perf_event_pid(event, current);
3772 data->tid_entry.tid = perf_event_tid(event, current);
3773 }
3774
3775 if (sample_type & PERF_SAMPLE_TIME)
3776 data->time = perf_clock();
3777
3778 if (sample_type & PERF_SAMPLE_ID)
3779 data->id = primary_event_id(event);
3780
3781 if (sample_type & PERF_SAMPLE_STREAM_ID)
3782 data->stream_id = event->id;
3783
3784 if (sample_type & PERF_SAMPLE_CPU) {
3785 data->cpu_entry.cpu = raw_smp_processor_id();
3786 data->cpu_entry.reserved = 0;
3787 }
3788}
3789
76369139
FW
3790void perf_event_header__init_id(struct perf_event_header *header,
3791 struct perf_sample_data *data,
3792 struct perf_event *event)
c980d109
ACM
3793{
3794 if (event->attr.sample_id_all)
3795 __perf_event_header__init_id(header, data, event);
3796}
3797
3798static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3799 struct perf_sample_data *data)
3800{
3801 u64 sample_type = data->type;
3802
3803 if (sample_type & PERF_SAMPLE_TID)
3804 perf_output_put(handle, data->tid_entry);
3805
3806 if (sample_type & PERF_SAMPLE_TIME)
3807 perf_output_put(handle, data->time);
3808
3809 if (sample_type & PERF_SAMPLE_ID)
3810 perf_output_put(handle, data->id);
3811
3812 if (sample_type & PERF_SAMPLE_STREAM_ID)
3813 perf_output_put(handle, data->stream_id);
3814
3815 if (sample_type & PERF_SAMPLE_CPU)
3816 perf_output_put(handle, data->cpu_entry);
3817}
3818
76369139
FW
3819void perf_event__output_id_sample(struct perf_event *event,
3820 struct perf_output_handle *handle,
3821 struct perf_sample_data *sample)
c980d109
ACM
3822{
3823 if (event->attr.sample_id_all)
3824 __perf_event__output_id_sample(handle, sample);
3825}
3826
3dab77fb 3827static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
3828 struct perf_event *event,
3829 u64 enabled, u64 running)
3dab77fb 3830{
cdd6c482 3831 u64 read_format = event->attr.read_format;
3dab77fb
PZ
3832 u64 values[4];
3833 int n = 0;
3834
b5e58793 3835 values[n++] = perf_event_count(event);
3dab77fb 3836 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 3837 values[n++] = enabled +
cdd6c482 3838 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
3839 }
3840 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 3841 values[n++] = running +
cdd6c482 3842 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
3843 }
3844 if (read_format & PERF_FORMAT_ID)
cdd6c482 3845 values[n++] = primary_event_id(event);
3dab77fb 3846
76369139 3847 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
3848}
3849
3850/*
cdd6c482 3851 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb
PZ
3852 */
3853static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
3854 struct perf_event *event,
3855 u64 enabled, u64 running)
3dab77fb 3856{
cdd6c482
IM
3857 struct perf_event *leader = event->group_leader, *sub;
3858 u64 read_format = event->attr.read_format;
3dab77fb
PZ
3859 u64 values[5];
3860 int n = 0;
3861
3862 values[n++] = 1 + leader->nr_siblings;
3863
3864 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 3865 values[n++] = enabled;
3dab77fb
PZ
3866
3867 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 3868 values[n++] = running;
3dab77fb 3869
cdd6c482 3870 if (leader != event)
3dab77fb
PZ
3871 leader->pmu->read(leader);
3872
b5e58793 3873 values[n++] = perf_event_count(leader);
3dab77fb 3874 if (read_format & PERF_FORMAT_ID)
cdd6c482 3875 values[n++] = primary_event_id(leader);
3dab77fb 3876
76369139 3877 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 3878
65abc865 3879 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb
PZ
3880 n = 0;
3881
cdd6c482 3882 if (sub != event)
3dab77fb
PZ
3883 sub->pmu->read(sub);
3884
b5e58793 3885 values[n++] = perf_event_count(sub);
3dab77fb 3886 if (read_format & PERF_FORMAT_ID)
cdd6c482 3887 values[n++] = primary_event_id(sub);
3dab77fb 3888
76369139 3889 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
3890 }
3891}
3892
eed01528
SE
3893#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3894 PERF_FORMAT_TOTAL_TIME_RUNNING)
3895
3dab77fb 3896static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 3897 struct perf_event *event)
3dab77fb 3898{
e3f3541c 3899 u64 enabled = 0, running = 0, now;
eed01528
SE
3900 u64 read_format = event->attr.read_format;
3901
3902 /*
3903 * compute total_time_enabled, total_time_running
3904 * based on snapshot values taken when the event
3905 * was last scheduled in.
3906 *
3907 * we cannot simply called update_context_time()
3908 * because of locking issue as we are called in
3909 * NMI context
3910 */
c4794295 3911 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 3912 calc_timer_values(event, &now, &enabled, &running);
eed01528 3913
cdd6c482 3914 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 3915 perf_output_read_group(handle, event, enabled, running);
3dab77fb 3916 else
eed01528 3917 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
3918}
3919
5622f295
MM
3920void perf_output_sample(struct perf_output_handle *handle,
3921 struct perf_event_header *header,
3922 struct perf_sample_data *data,
cdd6c482 3923 struct perf_event *event)
5622f295
MM
3924{
3925 u64 sample_type = data->type;
3926
3927 perf_output_put(handle, *header);
3928
3929 if (sample_type & PERF_SAMPLE_IP)
3930 perf_output_put(handle, data->ip);
3931
3932 if (sample_type & PERF_SAMPLE_TID)
3933 perf_output_put(handle, data->tid_entry);
3934
3935 if (sample_type & PERF_SAMPLE_TIME)
3936 perf_output_put(handle, data->time);
3937
3938 if (sample_type & PERF_SAMPLE_ADDR)
3939 perf_output_put(handle, data->addr);
3940
3941 if (sample_type & PERF_SAMPLE_ID)
3942 perf_output_put(handle, data->id);
3943
3944 if (sample_type & PERF_SAMPLE_STREAM_ID)
3945 perf_output_put(handle, data->stream_id);
3946
3947 if (sample_type & PERF_SAMPLE_CPU)
3948 perf_output_put(handle, data->cpu_entry);
3949
3950 if (sample_type & PERF_SAMPLE_PERIOD)
3951 perf_output_put(handle, data->period);
3952
3953 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 3954 perf_output_read(handle, event);
5622f295
MM
3955
3956 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3957 if (data->callchain) {
3958 int size = 1;
3959
3960 if (data->callchain)
3961 size += data->callchain->nr;
3962
3963 size *= sizeof(u64);
3964
76369139 3965 __output_copy(handle, data->callchain, size);
5622f295
MM
3966 } else {
3967 u64 nr = 0;
3968 perf_output_put(handle, nr);
3969 }
3970 }
3971
3972 if (sample_type & PERF_SAMPLE_RAW) {
3973 if (data->raw) {
3974 perf_output_put(handle, data->raw->size);
76369139
FW
3975 __output_copy(handle, data->raw->data,
3976 data->raw->size);
5622f295
MM
3977 } else {
3978 struct {
3979 u32 size;
3980 u32 data;
3981 } raw = {
3982 .size = sizeof(u32),
3983 .data = 0,
3984 };
3985 perf_output_put(handle, raw);
3986 }
3987 }
a7ac67ea
PZ
3988
3989 if (!event->attr.watermark) {
3990 int wakeup_events = event->attr.wakeup_events;
3991
3992 if (wakeup_events) {
3993 struct ring_buffer *rb = handle->rb;
3994 int events = local_inc_return(&rb->events);
3995
3996 if (events >= wakeup_events) {
3997 local_sub(wakeup_events, &rb->events);
3998 local_inc(&rb->wakeup);
3999 }
4000 }
4001 }
bce38cd5
SE
4002
4003 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4004 if (data->br_stack) {
4005 size_t size;
4006
4007 size = data->br_stack->nr
4008 * sizeof(struct perf_branch_entry);
4009
4010 perf_output_put(handle, data->br_stack->nr);
4011 perf_output_copy(handle, data->br_stack->entries, size);
4012 } else {
4013 /*
4014 * we always store at least the value of nr
4015 */
4016 u64 nr = 0;
4017 perf_output_put(handle, nr);
4018 }
4019 }
5622f295
MM
4020}
4021
4022void perf_prepare_sample(struct perf_event_header *header,
4023 struct perf_sample_data *data,
cdd6c482 4024 struct perf_event *event,
5622f295 4025 struct pt_regs *regs)
7b732a75 4026{
cdd6c482 4027 u64 sample_type = event->attr.sample_type;
7b732a75 4028
cdd6c482 4029 header->type = PERF_RECORD_SAMPLE;
c320c7b7 4030 header->size = sizeof(*header) + event->header_size;
5622f295
MM
4031
4032 header->misc = 0;
4033 header->misc |= perf_misc_flags(regs);
6fab0192 4034
c980d109 4035 __perf_event_header__init_id(header, data, event);
6844c09d 4036
c320c7b7 4037 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
4038 data->ip = perf_instruction_pointer(regs);
4039
b23f3325 4040 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 4041 int size = 1;
394ee076 4042
e6dab5ff 4043 data->callchain = perf_callchain(event, regs);
5622f295
MM
4044
4045 if (data->callchain)
4046 size += data->callchain->nr;
4047
4048 header->size += size * sizeof(u64);
394ee076
PZ
4049 }
4050
3a43ce68 4051 if (sample_type & PERF_SAMPLE_RAW) {
a044560c
PZ
4052 int size = sizeof(u32);
4053
4054 if (data->raw)
4055 size += data->raw->size;
4056 else
4057 size += sizeof(u32);
4058
4059 WARN_ON_ONCE(size & (sizeof(u64)-1));
5622f295 4060 header->size += size;
7f453c24 4061 }
bce38cd5
SE
4062
4063 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4064 int size = sizeof(u64); /* nr */
4065 if (data->br_stack) {
4066 size += data->br_stack->nr
4067 * sizeof(struct perf_branch_entry);
4068 }
4069 header->size += size;
4070 }
5622f295 4071}
7f453c24 4072
a8b0ca17 4073static void perf_event_output(struct perf_event *event,
5622f295
MM
4074 struct perf_sample_data *data,
4075 struct pt_regs *regs)
4076{
4077 struct perf_output_handle handle;
4078 struct perf_event_header header;
689802b2 4079
927c7a9e
FW
4080 /* protect the callchain buffers */
4081 rcu_read_lock();
4082
cdd6c482 4083 perf_prepare_sample(&header, data, event, regs);
5c148194 4084
a7ac67ea 4085 if (perf_output_begin(&handle, event, header.size))
927c7a9e 4086 goto exit;
0322cd6e 4087
cdd6c482 4088 perf_output_sample(&handle, &header, data, event);
f413cdb8 4089
8a057d84 4090 perf_output_end(&handle);
927c7a9e
FW
4091
4092exit:
4093 rcu_read_unlock();
0322cd6e
PZ
4094}
4095
38b200d6 4096/*
cdd6c482 4097 * read event_id
38b200d6
PZ
4098 */
4099
4100struct perf_read_event {
4101 struct perf_event_header header;
4102
4103 u32 pid;
4104 u32 tid;
38b200d6
PZ
4105};
4106
4107static void
cdd6c482 4108perf_event_read_event(struct perf_event *event,
38b200d6
PZ
4109 struct task_struct *task)
4110{
4111 struct perf_output_handle handle;
c980d109 4112 struct perf_sample_data sample;
dfc65094 4113 struct perf_read_event read_event = {
38b200d6 4114 .header = {
cdd6c482 4115 .type = PERF_RECORD_READ,
38b200d6 4116 .misc = 0,
c320c7b7 4117 .size = sizeof(read_event) + event->read_size,
38b200d6 4118 },
cdd6c482
IM
4119 .pid = perf_event_pid(event, task),
4120 .tid = perf_event_tid(event, task),
38b200d6 4121 };
3dab77fb 4122 int ret;
38b200d6 4123
c980d109 4124 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 4125 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
4126 if (ret)
4127 return;
4128
dfc65094 4129 perf_output_put(&handle, read_event);
cdd6c482 4130 perf_output_read(&handle, event);
c980d109 4131 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 4132
38b200d6
PZ
4133 perf_output_end(&handle);
4134}
4135
60313ebe 4136/*
9f498cc5
PZ
4137 * task tracking -- fork/exit
4138 *
3af9e859 4139 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
60313ebe
PZ
4140 */
4141
9f498cc5 4142struct perf_task_event {
3a80b4a3 4143 struct task_struct *task;
cdd6c482 4144 struct perf_event_context *task_ctx;
60313ebe
PZ
4145
4146 struct {
4147 struct perf_event_header header;
4148
4149 u32 pid;
4150 u32 ppid;
9f498cc5
PZ
4151 u32 tid;
4152 u32 ptid;
393b2ad8 4153 u64 time;
cdd6c482 4154 } event_id;
60313ebe
PZ
4155};
4156
cdd6c482 4157static void perf_event_task_output(struct perf_event *event,
9f498cc5 4158 struct perf_task_event *task_event)
60313ebe
PZ
4159{
4160 struct perf_output_handle handle;
c980d109 4161 struct perf_sample_data sample;
9f498cc5 4162 struct task_struct *task = task_event->task;
c980d109 4163 int ret, size = task_event->event_id.header.size;
8bb39f9a 4164
c980d109 4165 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 4166
c980d109 4167 ret = perf_output_begin(&handle, event,
a7ac67ea 4168 task_event->event_id.header.size);
ef60777c 4169 if (ret)
c980d109 4170 goto out;
60313ebe 4171
cdd6c482
IM
4172 task_event->event_id.pid = perf_event_pid(event, task);
4173 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 4174
cdd6c482
IM
4175 task_event->event_id.tid = perf_event_tid(event, task);
4176 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 4177
cdd6c482 4178 perf_output_put(&handle, task_event->event_id);
393b2ad8 4179
c980d109
ACM
4180 perf_event__output_id_sample(event, &handle, &sample);
4181
60313ebe 4182 perf_output_end(&handle);
c980d109
ACM
4183out:
4184 task_event->event_id.header.size = size;
60313ebe
PZ
4185}
4186
cdd6c482 4187static int perf_event_task_match(struct perf_event *event)
60313ebe 4188{
6f93d0a7 4189 if (event->state < PERF_EVENT_STATE_INACTIVE)
22e19085
PZ
4190 return 0;
4191
5632ab12 4192 if (!event_filter_match(event))
5d27c23d
PZ
4193 return 0;
4194
3af9e859
EM
4195 if (event->attr.comm || event->attr.mmap ||
4196 event->attr.mmap_data || event->attr.task)
60313ebe
PZ
4197 return 1;
4198
4199 return 0;
4200}
4201
cdd6c482 4202static void perf_event_task_ctx(struct perf_event_context *ctx,
9f498cc5 4203 struct perf_task_event *task_event)
60313ebe 4204{
cdd6c482 4205 struct perf_event *event;
60313ebe 4206
cdd6c482
IM
4207 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4208 if (perf_event_task_match(event))
4209 perf_event_task_output(event, task_event);
60313ebe 4210 }
60313ebe
PZ
4211}
4212
cdd6c482 4213static void perf_event_task_event(struct perf_task_event *task_event)
60313ebe
PZ
4214{
4215 struct perf_cpu_context *cpuctx;
8dc85d54 4216 struct perf_event_context *ctx;
108b02cf 4217 struct pmu *pmu;
8dc85d54 4218 int ctxn;
60313ebe 4219
d6ff86cf 4220 rcu_read_lock();
108b02cf 4221 list_for_each_entry_rcu(pmu, &pmus, entry) {
41945f6c 4222 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
51676957
PZ
4223 if (cpuctx->active_pmu != pmu)
4224 goto next;
108b02cf 4225 perf_event_task_ctx(&cpuctx->ctx, task_event);
8dc85d54
PZ
4226
4227 ctx = task_event->task_ctx;
4228 if (!ctx) {
4229 ctxn = pmu->task_ctx_nr;
4230 if (ctxn < 0)
41945f6c 4231 goto next;
8dc85d54
PZ
4232 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4233 }
4234 if (ctx)
4235 perf_event_task_ctx(ctx, task_event);
41945f6c
PZ
4236next:
4237 put_cpu_ptr(pmu->pmu_cpu_context);
108b02cf 4238 }
60313ebe
PZ
4239 rcu_read_unlock();
4240}
4241
cdd6c482
IM
4242static void perf_event_task(struct task_struct *task,
4243 struct perf_event_context *task_ctx,
3a80b4a3 4244 int new)
60313ebe 4245{
9f498cc5 4246 struct perf_task_event task_event;
60313ebe 4247
cdd6c482
IM
4248 if (!atomic_read(&nr_comm_events) &&
4249 !atomic_read(&nr_mmap_events) &&
4250 !atomic_read(&nr_task_events))
60313ebe
PZ
4251 return;
4252
9f498cc5 4253 task_event = (struct perf_task_event){
3a80b4a3
PZ
4254 .task = task,
4255 .task_ctx = task_ctx,
cdd6c482 4256 .event_id = {
60313ebe 4257 .header = {
cdd6c482 4258 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 4259 .misc = 0,
cdd6c482 4260 .size = sizeof(task_event.event_id),
60313ebe 4261 },
573402db
PZ
4262 /* .pid */
4263 /* .ppid */
9f498cc5
PZ
4264 /* .tid */
4265 /* .ptid */
6f93d0a7 4266 .time = perf_clock(),
60313ebe
PZ
4267 },
4268 };
4269
cdd6c482 4270 perf_event_task_event(&task_event);
9f498cc5
PZ
4271}
4272
cdd6c482 4273void perf_event_fork(struct task_struct *task)
9f498cc5 4274{
cdd6c482 4275 perf_event_task(task, NULL, 1);
60313ebe
PZ
4276}
4277
8d1b2d93
PZ
4278/*
4279 * comm tracking
4280 */
4281
4282struct perf_comm_event {
22a4f650
IM
4283 struct task_struct *task;
4284 char *comm;
8d1b2d93
PZ
4285 int comm_size;
4286
4287 struct {
4288 struct perf_event_header header;
4289
4290 u32 pid;
4291 u32 tid;
cdd6c482 4292 } event_id;
8d1b2d93
PZ
4293};
4294
cdd6c482 4295static void perf_event_comm_output(struct perf_event *event,
8d1b2d93
PZ
4296 struct perf_comm_event *comm_event)
4297{
4298 struct perf_output_handle handle;
c980d109 4299 struct perf_sample_data sample;
cdd6c482 4300 int size = comm_event->event_id.header.size;
c980d109
ACM
4301 int ret;
4302
4303 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4304 ret = perf_output_begin(&handle, event,
a7ac67ea 4305 comm_event->event_id.header.size);
8d1b2d93
PZ
4306
4307 if (ret)
c980d109 4308 goto out;
8d1b2d93 4309
cdd6c482
IM
4310 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4311 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 4312
cdd6c482 4313 perf_output_put(&handle, comm_event->event_id);
76369139 4314 __output_copy(&handle, comm_event->comm,
8d1b2d93 4315 comm_event->comm_size);
c980d109
ACM
4316
4317 perf_event__output_id_sample(event, &handle, &sample);
4318
8d1b2d93 4319 perf_output_end(&handle);
c980d109
ACM
4320out:
4321 comm_event->event_id.header.size = size;
8d1b2d93
PZ
4322}
4323
cdd6c482 4324static int perf_event_comm_match(struct perf_event *event)
8d1b2d93 4325{
6f93d0a7 4326 if (event->state < PERF_EVENT_STATE_INACTIVE)
22e19085
PZ
4327 return 0;
4328
5632ab12 4329 if (!event_filter_match(event))
5d27c23d
PZ
4330 return 0;
4331
cdd6c482 4332 if (event->attr.comm)
8d1b2d93
PZ
4333 return 1;
4334
4335 return 0;
4336}
4337
cdd6c482 4338static void perf_event_comm_ctx(struct perf_event_context *ctx,
8d1b2d93
PZ
4339 struct perf_comm_event *comm_event)
4340{
cdd6c482 4341 struct perf_event *event;
8d1b2d93 4342
cdd6c482
IM
4343 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4344 if (perf_event_comm_match(event))
4345 perf_event_comm_output(event, comm_event);
8d1b2d93 4346 }
8d1b2d93
PZ
4347}
4348
cdd6c482 4349static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93
PZ
4350{
4351 struct perf_cpu_context *cpuctx;
cdd6c482 4352 struct perf_event_context *ctx;
413ee3b4 4353 char comm[TASK_COMM_LEN];
8d1b2d93 4354 unsigned int size;
108b02cf 4355 struct pmu *pmu;
8dc85d54 4356 int ctxn;
8d1b2d93 4357
413ee3b4 4358 memset(comm, 0, sizeof(comm));
96b02d78 4359 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 4360 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
4361
4362 comm_event->comm = comm;
4363 comm_event->comm_size = size;
4364
cdd6c482 4365 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
f6595f3a 4366 rcu_read_lock();
108b02cf 4367 list_for_each_entry_rcu(pmu, &pmus, entry) {
41945f6c 4368 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
51676957
PZ
4369 if (cpuctx->active_pmu != pmu)
4370 goto next;
108b02cf 4371 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
8dc85d54
PZ
4372
4373 ctxn = pmu->task_ctx_nr;
4374 if (ctxn < 0)
41945f6c 4375 goto next;
8dc85d54
PZ
4376
4377 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4378 if (ctx)
4379 perf_event_comm_ctx(ctx, comm_event);
41945f6c
PZ
4380next:
4381 put_cpu_ptr(pmu->pmu_cpu_context);
108b02cf 4382 }
665c2142 4383 rcu_read_unlock();
8d1b2d93
PZ
4384}
4385
cdd6c482 4386void perf_event_comm(struct task_struct *task)
8d1b2d93 4387{
9ee318a7 4388 struct perf_comm_event comm_event;
8dc85d54
PZ
4389 struct perf_event_context *ctx;
4390 int ctxn;
9ee318a7 4391
8dc85d54
PZ
4392 for_each_task_context_nr(ctxn) {
4393 ctx = task->perf_event_ctxp[ctxn];
4394 if (!ctx)
4395 continue;
9ee318a7 4396
8dc85d54
PZ
4397 perf_event_enable_on_exec(ctx);
4398 }
9ee318a7 4399
cdd6c482 4400 if (!atomic_read(&nr_comm_events))
9ee318a7 4401 return;
a63eaf34 4402
9ee318a7 4403 comm_event = (struct perf_comm_event){
8d1b2d93 4404 .task = task,
573402db
PZ
4405 /* .comm */
4406 /* .comm_size */
cdd6c482 4407 .event_id = {
573402db 4408 .header = {
cdd6c482 4409 .type = PERF_RECORD_COMM,
573402db
PZ
4410 .misc = 0,
4411 /* .size */
4412 },
4413 /* .pid */
4414 /* .tid */
8d1b2d93
PZ
4415 },
4416 };
4417
cdd6c482 4418 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
4419}
4420
0a4a9391
PZ
4421/*
4422 * mmap tracking
4423 */
4424
4425struct perf_mmap_event {
089dd79d
PZ
4426 struct vm_area_struct *vma;
4427
4428 const char *file_name;
4429 int file_size;
0a4a9391
PZ
4430
4431 struct {
4432 struct perf_event_header header;
4433
4434 u32 pid;
4435 u32 tid;
4436 u64 start;
4437 u64 len;
4438 u64 pgoff;
cdd6c482 4439 } event_id;
0a4a9391
PZ
4440};
4441
cdd6c482 4442static void perf_event_mmap_output(struct perf_event *event,
0a4a9391
PZ
4443 struct perf_mmap_event *mmap_event)
4444{
4445 struct perf_output_handle handle;
c980d109 4446 struct perf_sample_data sample;
cdd6c482 4447 int size = mmap_event->event_id.header.size;
c980d109 4448 int ret;
0a4a9391 4449
c980d109
ACM
4450 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4451 ret = perf_output_begin(&handle, event,
a7ac67ea 4452 mmap_event->event_id.header.size);
0a4a9391 4453 if (ret)
c980d109 4454 goto out;
0a4a9391 4455
cdd6c482
IM
4456 mmap_event->event_id.pid = perf_event_pid(event, current);
4457 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 4458
cdd6c482 4459 perf_output_put(&handle, mmap_event->event_id);
76369139 4460 __output_copy(&handle, mmap_event->file_name,
0a4a9391 4461 mmap_event->file_size);
c980d109
ACM
4462
4463 perf_event__output_id_sample(event, &handle, &sample);
4464
78d613eb 4465 perf_output_end(&handle);
c980d109
ACM
4466out:
4467 mmap_event->event_id.header.size = size;
0a4a9391
PZ
4468}
4469
cdd6c482 4470static int perf_event_mmap_match(struct perf_event *event,
3af9e859
EM
4471 struct perf_mmap_event *mmap_event,
4472 int executable)
0a4a9391 4473{
6f93d0a7 4474 if (event->state < PERF_EVENT_STATE_INACTIVE)
22e19085
PZ
4475 return 0;
4476
5632ab12 4477 if (!event_filter_match(event))
5d27c23d
PZ
4478 return 0;
4479
3af9e859
EM
4480 if ((!executable && event->attr.mmap_data) ||
4481 (executable && event->attr.mmap))
0a4a9391
PZ
4482 return 1;
4483
4484 return 0;
4485}
4486
cdd6c482 4487static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3af9e859
EM
4488 struct perf_mmap_event *mmap_event,
4489 int executable)
0a4a9391 4490{
cdd6c482 4491 struct perf_event *event;
0a4a9391 4492
cdd6c482 4493 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3af9e859 4494 if (perf_event_mmap_match(event, mmap_event, executable))
cdd6c482 4495 perf_event_mmap_output(event, mmap_event);
0a4a9391 4496 }
0a4a9391
PZ
4497}
4498
cdd6c482 4499static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391
PZ
4500{
4501 struct perf_cpu_context *cpuctx;
cdd6c482 4502 struct perf_event_context *ctx;
089dd79d
PZ
4503 struct vm_area_struct *vma = mmap_event->vma;
4504 struct file *file = vma->vm_file;
0a4a9391
PZ
4505 unsigned int size;
4506 char tmp[16];
4507 char *buf = NULL;
089dd79d 4508 const char *name;
108b02cf 4509 struct pmu *pmu;
8dc85d54 4510 int ctxn;
0a4a9391 4511
413ee3b4
AB
4512 memset(tmp, 0, sizeof(tmp));
4513
0a4a9391 4514 if (file) {
413ee3b4 4515 /*
76369139 4516 * d_path works from the end of the rb backwards, so we
413ee3b4
AB
4517 * need to add enough zero bytes after the string to handle
4518 * the 64bit alignment we do later.
4519 */
4520 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
0a4a9391
PZ
4521 if (!buf) {
4522 name = strncpy(tmp, "//enomem", sizeof(tmp));
4523 goto got_name;
4524 }
d3d21c41 4525 name = d_path(&file->f_path, buf, PATH_MAX);
0a4a9391
PZ
4526 if (IS_ERR(name)) {
4527 name = strncpy(tmp, "//toolong", sizeof(tmp));
4528 goto got_name;
4529 }
4530 } else {
413ee3b4
AB
4531 if (arch_vma_name(mmap_event->vma)) {
4532 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4533 sizeof(tmp));
089dd79d 4534 goto got_name;
413ee3b4 4535 }
089dd79d
PZ
4536
4537 if (!vma->vm_mm) {
4538 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4539 goto got_name;
3af9e859
EM
4540 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4541 vma->vm_end >= vma->vm_mm->brk) {
4542 name = strncpy(tmp, "[heap]", sizeof(tmp));
4543 goto got_name;
4544 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4545 vma->vm_end >= vma->vm_mm->start_stack) {
4546 name = strncpy(tmp, "[stack]", sizeof(tmp));
4547 goto got_name;
089dd79d
PZ
4548 }
4549
0a4a9391
PZ
4550 name = strncpy(tmp, "//anon", sizeof(tmp));
4551 goto got_name;
4552 }
4553
4554got_name:
888fcee0 4555 size = ALIGN(strlen(name)+1, sizeof(u64));
0a4a9391
PZ
4556
4557 mmap_event->file_name = name;
4558 mmap_event->file_size = size;
4559
cdd6c482 4560 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 4561
f6d9dd23 4562 rcu_read_lock();
108b02cf 4563 list_for_each_entry_rcu(pmu, &pmus, entry) {
41945f6c 4564 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
51676957
PZ
4565 if (cpuctx->active_pmu != pmu)
4566 goto next;
108b02cf
PZ
4567 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4568 vma->vm_flags & VM_EXEC);
8dc85d54
PZ
4569
4570 ctxn = pmu->task_ctx_nr;
4571 if (ctxn < 0)
41945f6c 4572 goto next;
8dc85d54
PZ
4573
4574 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4575 if (ctx) {
4576 perf_event_mmap_ctx(ctx, mmap_event,
4577 vma->vm_flags & VM_EXEC);
4578 }
41945f6c
PZ
4579next:
4580 put_cpu_ptr(pmu->pmu_cpu_context);
108b02cf 4581 }
665c2142
PZ
4582 rcu_read_unlock();
4583
0a4a9391
PZ
4584 kfree(buf);
4585}
4586
3af9e859 4587void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 4588{
9ee318a7
PZ
4589 struct perf_mmap_event mmap_event;
4590
cdd6c482 4591 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
4592 return;
4593
4594 mmap_event = (struct perf_mmap_event){
089dd79d 4595 .vma = vma,
573402db
PZ
4596 /* .file_name */
4597 /* .file_size */
cdd6c482 4598 .event_id = {
573402db 4599 .header = {
cdd6c482 4600 .type = PERF_RECORD_MMAP,
39447b38 4601 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
4602 /* .size */
4603 },
4604 /* .pid */
4605 /* .tid */
089dd79d
PZ
4606 .start = vma->vm_start,
4607 .len = vma->vm_end - vma->vm_start,
3a0304e9 4608 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391
PZ
4609 },
4610 };
4611
cdd6c482 4612 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
4613}
4614
a78ac325
PZ
4615/*
4616 * IRQ throttle logging
4617 */
4618
cdd6c482 4619static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
4620{
4621 struct perf_output_handle handle;
c980d109 4622 struct perf_sample_data sample;
a78ac325
PZ
4623 int ret;
4624
4625 struct {
4626 struct perf_event_header header;
4627 u64 time;
cca3f454 4628 u64 id;
7f453c24 4629 u64 stream_id;
a78ac325
PZ
4630 } throttle_event = {
4631 .header = {
cdd6c482 4632 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
4633 .misc = 0,
4634 .size = sizeof(throttle_event),
4635 },
def0a9b2 4636 .time = perf_clock(),
cdd6c482
IM
4637 .id = primary_event_id(event),
4638 .stream_id = event->id,
a78ac325
PZ
4639 };
4640
966ee4d6 4641 if (enable)
cdd6c482 4642 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 4643
c980d109
ACM
4644 perf_event_header__init_id(&throttle_event.header, &sample, event);
4645
4646 ret = perf_output_begin(&handle, event,
a7ac67ea 4647 throttle_event.header.size);
a78ac325
PZ
4648 if (ret)
4649 return;
4650
4651 perf_output_put(&handle, throttle_event);
c980d109 4652 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
4653 perf_output_end(&handle);
4654}
4655
f6c7d5fe 4656/*
cdd6c482 4657 * Generic event overflow handling, sampling.
f6c7d5fe
PZ
4658 */
4659
a8b0ca17 4660static int __perf_event_overflow(struct perf_event *event,
5622f295
MM
4661 int throttle, struct perf_sample_data *data,
4662 struct pt_regs *regs)
f6c7d5fe 4663{
cdd6c482
IM
4664 int events = atomic_read(&event->event_limit);
4665 struct hw_perf_event *hwc = &event->hw;
e050e3f0 4666 u64 seq;
79f14641
PZ
4667 int ret = 0;
4668
96398826
PZ
4669 /*
4670 * Non-sampling counters might still use the PMI to fold short
4671 * hardware counters, ignore those.
4672 */
4673 if (unlikely(!is_sampling_event(event)))
4674 return 0;
4675
e050e3f0
SE
4676 seq = __this_cpu_read(perf_throttled_seq);
4677 if (seq != hwc->interrupts_seq) {
4678 hwc->interrupts_seq = seq;
4679 hwc->interrupts = 1;
4680 } else {
4681 hwc->interrupts++;
4682 if (unlikely(throttle
4683 && hwc->interrupts >= max_samples_per_tick)) {
4684 __this_cpu_inc(perf_throttled_count);
163ec435
PZ
4685 hwc->interrupts = MAX_INTERRUPTS;
4686 perf_log_throttle(event, 0);
a78ac325
PZ
4687 ret = 1;
4688 }
e050e3f0 4689 }
60db5e09 4690
cdd6c482 4691 if (event->attr.freq) {
def0a9b2 4692 u64 now = perf_clock();
abd50713 4693 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 4694
abd50713 4695 hwc->freq_time_stamp = now;
bd2b5b12 4696
abd50713 4697 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 4698 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
4699 }
4700
2023b359
PZ
4701 /*
4702 * XXX event_limit might not quite work as expected on inherited
cdd6c482 4703 * events
2023b359
PZ
4704 */
4705
cdd6c482
IM
4706 event->pending_kill = POLL_IN;
4707 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 4708 ret = 1;
cdd6c482 4709 event->pending_kill = POLL_HUP;
a8b0ca17
PZ
4710 event->pending_disable = 1;
4711 irq_work_queue(&event->pending);
79f14641
PZ
4712 }
4713
453f19ee 4714 if (event->overflow_handler)
a8b0ca17 4715 event->overflow_handler(event, data, regs);
453f19ee 4716 else
a8b0ca17 4717 perf_event_output(event, data, regs);
453f19ee 4718
f506b3dc 4719 if (event->fasync && event->pending_kill) {
a8b0ca17
PZ
4720 event->pending_wakeup = 1;
4721 irq_work_queue(&event->pending);
f506b3dc
PZ
4722 }
4723
79f14641 4724 return ret;
f6c7d5fe
PZ
4725}
4726
a8b0ca17 4727int perf_event_overflow(struct perf_event *event,
5622f295
MM
4728 struct perf_sample_data *data,
4729 struct pt_regs *regs)
850bc73f 4730{
a8b0ca17 4731 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
4732}
4733
15dbf27c 4734/*
cdd6c482 4735 * Generic software event infrastructure
15dbf27c
PZ
4736 */
4737
b28ab83c
PZ
4738struct swevent_htable {
4739 struct swevent_hlist *swevent_hlist;
4740 struct mutex hlist_mutex;
4741 int hlist_refcount;
4742
4743 /* Recursion avoidance in each contexts */
4744 int recursion[PERF_NR_CONTEXTS];
4745};
4746
4747static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4748
7b4b6658 4749/*
cdd6c482
IM
4750 * We directly increment event->count and keep a second value in
4751 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
4752 * is kept in the range [-sample_period, 0] so that we can use the
4753 * sign as trigger.
4754 */
4755
cdd6c482 4756static u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 4757{
cdd6c482 4758 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
4759 u64 period = hwc->last_period;
4760 u64 nr, offset;
4761 s64 old, val;
4762
4763 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
4764
4765again:
e7850595 4766 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
4767 if (val < 0)
4768 return 0;
15dbf27c 4769
7b4b6658
PZ
4770 nr = div64_u64(period + val, period);
4771 offset = nr * period;
4772 val -= offset;
e7850595 4773 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 4774 goto again;
15dbf27c 4775
7b4b6658 4776 return nr;
15dbf27c
PZ
4777}
4778
0cff784a 4779static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 4780 struct perf_sample_data *data,
5622f295 4781 struct pt_regs *regs)
15dbf27c 4782{
cdd6c482 4783 struct hw_perf_event *hwc = &event->hw;
850bc73f 4784 int throttle = 0;
15dbf27c 4785
0cff784a
PZ
4786 if (!overflow)
4787 overflow = perf_swevent_set_period(event);
15dbf27c 4788
7b4b6658
PZ
4789 if (hwc->interrupts == MAX_INTERRUPTS)
4790 return;
15dbf27c 4791
7b4b6658 4792 for (; overflow; overflow--) {
a8b0ca17 4793 if (__perf_event_overflow(event, throttle,
5622f295 4794 data, regs)) {
7b4b6658
PZ
4795 /*
4796 * We inhibit the overflow from happening when
4797 * hwc->interrupts == MAX_INTERRUPTS.
4798 */
4799 break;
4800 }
cf450a73 4801 throttle = 1;
7b4b6658 4802 }
15dbf27c
PZ
4803}
4804
a4eaf7f1 4805static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 4806 struct perf_sample_data *data,
5622f295 4807 struct pt_regs *regs)
7b4b6658 4808{
cdd6c482 4809 struct hw_perf_event *hwc = &event->hw;
d6d020e9 4810
e7850595 4811 local64_add(nr, &event->count);
d6d020e9 4812
0cff784a
PZ
4813 if (!regs)
4814 return;
4815
6c7e550f 4816 if (!is_sampling_event(event))
7b4b6658 4817 return;
d6d020e9 4818
5d81e5cf
AV
4819 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
4820 data->period = nr;
4821 return perf_swevent_overflow(event, 1, data, regs);
4822 } else
4823 data->period = event->hw.last_period;
4824
0cff784a 4825 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 4826 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 4827
e7850595 4828 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 4829 return;
df1a132b 4830
a8b0ca17 4831 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
4832}
4833
f5ffe02e
FW
4834static int perf_exclude_event(struct perf_event *event,
4835 struct pt_regs *regs)
4836{
a4eaf7f1 4837 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 4838 return 1;
a4eaf7f1 4839
f5ffe02e
FW
4840 if (regs) {
4841 if (event->attr.exclude_user && user_mode(regs))
4842 return 1;
4843
4844 if (event->attr.exclude_kernel && !user_mode(regs))
4845 return 1;
4846 }
4847
4848 return 0;
4849}
4850
cdd6c482 4851static int perf_swevent_match(struct perf_event *event,
1c432d89 4852 enum perf_type_id type,
6fb2915d
LZ
4853 u32 event_id,
4854 struct perf_sample_data *data,
4855 struct pt_regs *regs)
15dbf27c 4856{
cdd6c482 4857 if (event->attr.type != type)
a21ca2ca 4858 return 0;
f5ffe02e 4859
cdd6c482 4860 if (event->attr.config != event_id)
15dbf27c
PZ
4861 return 0;
4862
f5ffe02e
FW
4863 if (perf_exclude_event(event, regs))
4864 return 0;
15dbf27c
PZ
4865
4866 return 1;
4867}
4868
76e1d904
FW
4869static inline u64 swevent_hash(u64 type, u32 event_id)
4870{
4871 u64 val = event_id | (type << 32);
4872
4873 return hash_64(val, SWEVENT_HLIST_BITS);
4874}
4875
49f135ed
FW
4876static inline struct hlist_head *
4877__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 4878{
49f135ed
FW
4879 u64 hash = swevent_hash(type, event_id);
4880
4881 return &hlist->heads[hash];
4882}
76e1d904 4883
49f135ed
FW
4884/* For the read side: events when they trigger */
4885static inline struct hlist_head *
b28ab83c 4886find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
4887{
4888 struct swevent_hlist *hlist;
76e1d904 4889
b28ab83c 4890 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
4891 if (!hlist)
4892 return NULL;
4893
49f135ed
FW
4894 return __find_swevent_head(hlist, type, event_id);
4895}
4896
4897/* For the event head insertion and removal in the hlist */
4898static inline struct hlist_head *
b28ab83c 4899find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
4900{
4901 struct swevent_hlist *hlist;
4902 u32 event_id = event->attr.config;
4903 u64 type = event->attr.type;
4904
4905 /*
4906 * Event scheduling is always serialized against hlist allocation
4907 * and release. Which makes the protected version suitable here.
4908 * The context lock guarantees that.
4909 */
b28ab83c 4910 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
4911 lockdep_is_held(&event->ctx->lock));
4912 if (!hlist)
4913 return NULL;
4914
4915 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
4916}
4917
4918static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 4919 u64 nr,
76e1d904
FW
4920 struct perf_sample_data *data,
4921 struct pt_regs *regs)
15dbf27c 4922{
b28ab83c 4923 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482 4924 struct perf_event *event;
76e1d904
FW
4925 struct hlist_node *node;
4926 struct hlist_head *head;
15dbf27c 4927
76e1d904 4928 rcu_read_lock();
b28ab83c 4929 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
4930 if (!head)
4931 goto end;
4932
4933 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
6fb2915d 4934 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 4935 perf_swevent_event(event, nr, data, regs);
15dbf27c 4936 }
76e1d904
FW
4937end:
4938 rcu_read_unlock();
15dbf27c
PZ
4939}
4940
4ed7c92d 4941int perf_swevent_get_recursion_context(void)
96f6d444 4942{
b28ab83c 4943 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
96f6d444 4944
b28ab83c 4945 return get_recursion_context(swhash->recursion);
96f6d444 4946}
645e8cc0 4947EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 4948
fa9f90be 4949inline void perf_swevent_put_recursion_context(int rctx)
15dbf27c 4950{
b28ab83c 4951 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
927c7a9e 4952
b28ab83c 4953 put_recursion_context(swhash->recursion, rctx);
ce71b9df 4954}
15dbf27c 4955
a8b0ca17 4956void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 4957{
a4234bfc 4958 struct perf_sample_data data;
4ed7c92d
PZ
4959 int rctx;
4960
1c024eca 4961 preempt_disable_notrace();
4ed7c92d
PZ
4962 rctx = perf_swevent_get_recursion_context();
4963 if (rctx < 0)
4964 return;
a4234bfc 4965
fd0d000b 4966 perf_sample_data_init(&data, addr, 0);
92bf309a 4967
a8b0ca17 4968 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4ed7c92d
PZ
4969
4970 perf_swevent_put_recursion_context(rctx);
1c024eca 4971 preempt_enable_notrace();
b8e83514
PZ
4972}
4973
cdd6c482 4974static void perf_swevent_read(struct perf_event *event)
15dbf27c 4975{
15dbf27c
PZ
4976}
4977
a4eaf7f1 4978static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 4979{
b28ab83c 4980 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482 4981 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
4982 struct hlist_head *head;
4983
6c7e550f 4984 if (is_sampling_event(event)) {
7b4b6658 4985 hwc->last_period = hwc->sample_period;
cdd6c482 4986 perf_swevent_set_period(event);
7b4b6658 4987 }
76e1d904 4988
a4eaf7f1
PZ
4989 hwc->state = !(flags & PERF_EF_START);
4990
b28ab83c 4991 head = find_swevent_head(swhash, event);
76e1d904
FW
4992 if (WARN_ON_ONCE(!head))
4993 return -EINVAL;
4994
4995 hlist_add_head_rcu(&event->hlist_entry, head);
4996
15dbf27c
PZ
4997 return 0;
4998}
4999
a4eaf7f1 5000static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 5001{
76e1d904 5002 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
5003}
5004
a4eaf7f1 5005static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 5006{
a4eaf7f1 5007 event->hw.state = 0;
d6d020e9 5008}
aa9c4c0f 5009
a4eaf7f1 5010static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 5011{
a4eaf7f1 5012 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
5013}
5014
49f135ed
FW
5015/* Deref the hlist from the update side */
5016static inline struct swevent_hlist *
b28ab83c 5017swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 5018{
b28ab83c
PZ
5019 return rcu_dereference_protected(swhash->swevent_hlist,
5020 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
5021}
5022
b28ab83c 5023static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 5024{
b28ab83c 5025 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 5026
49f135ed 5027 if (!hlist)
76e1d904
FW
5028 return;
5029
b28ab83c 5030 rcu_assign_pointer(swhash->swevent_hlist, NULL);
fa4bbc4c 5031 kfree_rcu(hlist, rcu_head);
76e1d904
FW
5032}
5033
5034static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5035{
b28ab83c 5036 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 5037
b28ab83c 5038 mutex_lock(&swhash->hlist_mutex);
76e1d904 5039
b28ab83c
PZ
5040 if (!--swhash->hlist_refcount)
5041 swevent_hlist_release(swhash);
76e1d904 5042
b28ab83c 5043 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
5044}
5045
5046static void swevent_hlist_put(struct perf_event *event)
5047{
5048 int cpu;
5049
5050 if (event->cpu != -1) {
5051 swevent_hlist_put_cpu(event, event->cpu);
5052 return;
5053 }
5054
5055 for_each_possible_cpu(cpu)
5056 swevent_hlist_put_cpu(event, cpu);
5057}
5058
5059static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5060{
b28ab83c 5061 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
5062 int err = 0;
5063
b28ab83c 5064 mutex_lock(&swhash->hlist_mutex);
76e1d904 5065
b28ab83c 5066 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d904
FW
5067 struct swevent_hlist *hlist;
5068
5069 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5070 if (!hlist) {
5071 err = -ENOMEM;
5072 goto exit;
5073 }
b28ab83c 5074 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 5075 }
b28ab83c 5076 swhash->hlist_refcount++;
9ed6060d 5077exit:
b28ab83c 5078 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
5079
5080 return err;
5081}
5082
5083static int swevent_hlist_get(struct perf_event *event)
5084{
5085 int err;
5086 int cpu, failed_cpu;
5087
5088 if (event->cpu != -1)
5089 return swevent_hlist_get_cpu(event, event->cpu);
5090
5091 get_online_cpus();
5092 for_each_possible_cpu(cpu) {
5093 err = swevent_hlist_get_cpu(event, cpu);
5094 if (err) {
5095 failed_cpu = cpu;
5096 goto fail;
5097 }
5098 }
5099 put_online_cpus();
5100
5101 return 0;
9ed6060d 5102fail:
76e1d904
FW
5103 for_each_possible_cpu(cpu) {
5104 if (cpu == failed_cpu)
5105 break;
5106 swevent_hlist_put_cpu(event, cpu);
5107 }
5108
5109 put_online_cpus();
5110 return err;
5111}
5112
c5905afb 5113struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 5114
b0a873eb
PZ
5115static void sw_perf_event_destroy(struct perf_event *event)
5116{
5117 u64 event_id = event->attr.config;
95476b64 5118
b0a873eb
PZ
5119 WARN_ON(event->parent);
5120
c5905afb 5121 static_key_slow_dec(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
5122 swevent_hlist_put(event);
5123}
5124
5125static int perf_swevent_init(struct perf_event *event)
5126{
5127 int event_id = event->attr.config;
5128
5129 if (event->attr.type != PERF_TYPE_SOFTWARE)
5130 return -ENOENT;
5131
2481c5fa
SE
5132 /*
5133 * no branch sampling for software events
5134 */
5135 if (has_branch_stack(event))
5136 return -EOPNOTSUPP;
5137
b0a873eb
PZ
5138 switch (event_id) {
5139 case PERF_COUNT_SW_CPU_CLOCK:
5140 case PERF_COUNT_SW_TASK_CLOCK:
5141 return -ENOENT;
5142
5143 default:
5144 break;
5145 }
5146
ce677831 5147 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
5148 return -ENOENT;
5149
5150 if (!event->parent) {
5151 int err;
5152
5153 err = swevent_hlist_get(event);
5154 if (err)
5155 return err;
5156
c5905afb 5157 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
5158 event->destroy = sw_perf_event_destroy;
5159 }
5160
5161 return 0;
5162}
5163
35edc2a5
PZ
5164static int perf_swevent_event_idx(struct perf_event *event)
5165{
5166 return 0;
5167}
5168
b0a873eb 5169static struct pmu perf_swevent = {
89a1e187 5170 .task_ctx_nr = perf_sw_context,
95476b64 5171
b0a873eb 5172 .event_init = perf_swevent_init,
a4eaf7f1
PZ
5173 .add = perf_swevent_add,
5174 .del = perf_swevent_del,
5175 .start = perf_swevent_start,
5176 .stop = perf_swevent_stop,
1c024eca 5177 .read = perf_swevent_read,
35edc2a5
PZ
5178
5179 .event_idx = perf_swevent_event_idx,
1c024eca
PZ
5180};
5181
b0a873eb
PZ
5182#ifdef CONFIG_EVENT_TRACING
5183
1c024eca
PZ
5184static int perf_tp_filter_match(struct perf_event *event,
5185 struct perf_sample_data *data)
5186{
5187 void *record = data->raw->data;
5188
5189 if (likely(!event->filter) || filter_match_preds(event->filter, record))
5190 return 1;
5191 return 0;
5192}
5193
5194static int perf_tp_event_match(struct perf_event *event,
5195 struct perf_sample_data *data,
5196 struct pt_regs *regs)
5197{
a0f7d0f7
FW
5198 if (event->hw.state & PERF_HES_STOPPED)
5199 return 0;
580d607c
PZ
5200 /*
5201 * All tracepoints are from kernel-space.
5202 */
5203 if (event->attr.exclude_kernel)
1c024eca
PZ
5204 return 0;
5205
5206 if (!perf_tp_filter_match(event, data))
5207 return 0;
5208
5209 return 1;
5210}
5211
5212void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
e6dab5ff
AV
5213 struct pt_regs *regs, struct hlist_head *head, int rctx,
5214 struct task_struct *task)
95476b64
FW
5215{
5216 struct perf_sample_data data;
1c024eca
PZ
5217 struct perf_event *event;
5218 struct hlist_node *node;
5219
95476b64
FW
5220 struct perf_raw_record raw = {
5221 .size = entry_size,
5222 .data = record,
5223 };
5224
fd0d000b 5225 perf_sample_data_init(&data, addr, 0);
95476b64
FW
5226 data.raw = &raw;
5227
1c024eca
PZ
5228 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
5229 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 5230 perf_swevent_event(event, count, &data, regs);
4f41c013 5231 }
ecc55f84 5232
e6dab5ff
AV
5233 /*
5234 * If we got specified a target task, also iterate its context and
5235 * deliver this event there too.
5236 */
5237 if (task && task != current) {
5238 struct perf_event_context *ctx;
5239 struct trace_entry *entry = record;
5240
5241 rcu_read_lock();
5242 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
5243 if (!ctx)
5244 goto unlock;
5245
5246 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5247 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5248 continue;
5249 if (event->attr.config != entry->type)
5250 continue;
5251 if (perf_tp_event_match(event, &data, regs))
5252 perf_swevent_event(event, count, &data, regs);
5253 }
5254unlock:
5255 rcu_read_unlock();
5256 }
5257
ecc55f84 5258 perf_swevent_put_recursion_context(rctx);
95476b64
FW
5259}
5260EXPORT_SYMBOL_GPL(perf_tp_event);
5261
cdd6c482 5262static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 5263{
1c024eca 5264 perf_trace_destroy(event);
e077df4f
PZ
5265}
5266
b0a873eb 5267static int perf_tp_event_init(struct perf_event *event)
e077df4f 5268{
76e1d904
FW
5269 int err;
5270
b0a873eb
PZ
5271 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5272 return -ENOENT;
5273
2481c5fa
SE
5274 /*
5275 * no branch sampling for tracepoint events
5276 */
5277 if (has_branch_stack(event))
5278 return -EOPNOTSUPP;
5279
1c024eca
PZ
5280 err = perf_trace_init(event);
5281 if (err)
b0a873eb 5282 return err;
e077df4f 5283
cdd6c482 5284 event->destroy = tp_perf_event_destroy;
e077df4f 5285
b0a873eb
PZ
5286 return 0;
5287}
5288
5289static struct pmu perf_tracepoint = {
89a1e187
PZ
5290 .task_ctx_nr = perf_sw_context,
5291
b0a873eb 5292 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
5293 .add = perf_trace_add,
5294 .del = perf_trace_del,
5295 .start = perf_swevent_start,
5296 .stop = perf_swevent_stop,
b0a873eb 5297 .read = perf_swevent_read,
35edc2a5
PZ
5298
5299 .event_idx = perf_swevent_event_idx,
b0a873eb
PZ
5300};
5301
5302static inline void perf_tp_register(void)
5303{
2e80a82a 5304 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f 5305}
6fb2915d
LZ
5306
5307static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5308{
5309 char *filter_str;
5310 int ret;
5311
5312 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5313 return -EINVAL;
5314
5315 filter_str = strndup_user(arg, PAGE_SIZE);
5316 if (IS_ERR(filter_str))
5317 return PTR_ERR(filter_str);
5318
5319 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5320
5321 kfree(filter_str);
5322 return ret;
5323}
5324
5325static void perf_event_free_filter(struct perf_event *event)
5326{
5327 ftrace_profile_free_filter(event);
5328}
5329
e077df4f 5330#else
6fb2915d 5331
b0a873eb 5332static inline void perf_tp_register(void)
e077df4f 5333{
e077df4f 5334}
6fb2915d
LZ
5335
5336static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5337{
5338 return -ENOENT;
5339}
5340
5341static void perf_event_free_filter(struct perf_event *event)
5342{
5343}
5344
07b139c8 5345#endif /* CONFIG_EVENT_TRACING */
e077df4f 5346
24f1e32c 5347#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 5348void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 5349{
f5ffe02e
FW
5350 struct perf_sample_data sample;
5351 struct pt_regs *regs = data;
5352
fd0d000b 5353 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 5354
a4eaf7f1 5355 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 5356 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
5357}
5358#endif
5359
b0a873eb
PZ
5360/*
5361 * hrtimer based swevent callback
5362 */
f29ac756 5363
b0a873eb 5364static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 5365{
b0a873eb
PZ
5366 enum hrtimer_restart ret = HRTIMER_RESTART;
5367 struct perf_sample_data data;
5368 struct pt_regs *regs;
5369 struct perf_event *event;
5370 u64 period;
f29ac756 5371
b0a873eb 5372 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
5373
5374 if (event->state != PERF_EVENT_STATE_ACTIVE)
5375 return HRTIMER_NORESTART;
5376
b0a873eb 5377 event->pmu->read(event);
f344011c 5378
fd0d000b 5379 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
5380 regs = get_irq_regs();
5381
5382 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 5383 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 5384 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
5385 ret = HRTIMER_NORESTART;
5386 }
24f1e32c 5387
b0a873eb
PZ
5388 period = max_t(u64, 10000, event->hw.sample_period);
5389 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 5390
b0a873eb 5391 return ret;
f29ac756
PZ
5392}
5393
b0a873eb 5394static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 5395{
b0a873eb 5396 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
5397 s64 period;
5398
5399 if (!is_sampling_event(event))
5400 return;
f5ffe02e 5401
5d508e82
FBH
5402 period = local64_read(&hwc->period_left);
5403 if (period) {
5404 if (period < 0)
5405 period = 10000;
fa407f35 5406
5d508e82
FBH
5407 local64_set(&hwc->period_left, 0);
5408 } else {
5409 period = max_t(u64, 10000, hwc->sample_period);
5410 }
5411 __hrtimer_start_range_ns(&hwc->hrtimer,
b0a873eb 5412 ns_to_ktime(period), 0,
b5ab4cd5 5413 HRTIMER_MODE_REL_PINNED, 0);
24f1e32c 5414}
b0a873eb
PZ
5415
5416static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 5417{
b0a873eb
PZ
5418 struct hw_perf_event *hwc = &event->hw;
5419
6c7e550f 5420 if (is_sampling_event(event)) {
b0a873eb 5421 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 5422 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
5423
5424 hrtimer_cancel(&hwc->hrtimer);
5425 }
24f1e32c
FW
5426}
5427
ba3dd36c
PZ
5428static void perf_swevent_init_hrtimer(struct perf_event *event)
5429{
5430 struct hw_perf_event *hwc = &event->hw;
5431
5432 if (!is_sampling_event(event))
5433 return;
5434
5435 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5436 hwc->hrtimer.function = perf_swevent_hrtimer;
5437
5438 /*
5439 * Since hrtimers have a fixed rate, we can do a static freq->period
5440 * mapping and avoid the whole period adjust feedback stuff.
5441 */
5442 if (event->attr.freq) {
5443 long freq = event->attr.sample_freq;
5444
5445 event->attr.sample_period = NSEC_PER_SEC / freq;
5446 hwc->sample_period = event->attr.sample_period;
5447 local64_set(&hwc->period_left, hwc->sample_period);
5448 event->attr.freq = 0;
5449 }
5450}
5451
b0a873eb
PZ
5452/*
5453 * Software event: cpu wall time clock
5454 */
5455
5456static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 5457{
b0a873eb
PZ
5458 s64 prev;
5459 u64 now;
5460
a4eaf7f1 5461 now = local_clock();
b0a873eb
PZ
5462 prev = local64_xchg(&event->hw.prev_count, now);
5463 local64_add(now - prev, &event->count);
24f1e32c 5464}
24f1e32c 5465
a4eaf7f1 5466static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 5467{
a4eaf7f1 5468 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 5469 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
5470}
5471
a4eaf7f1 5472static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 5473{
b0a873eb
PZ
5474 perf_swevent_cancel_hrtimer(event);
5475 cpu_clock_event_update(event);
5476}
f29ac756 5477
a4eaf7f1
PZ
5478static int cpu_clock_event_add(struct perf_event *event, int flags)
5479{
5480 if (flags & PERF_EF_START)
5481 cpu_clock_event_start(event, flags);
5482
5483 return 0;
5484}
5485
5486static void cpu_clock_event_del(struct perf_event *event, int flags)
5487{
5488 cpu_clock_event_stop(event, flags);
5489}
5490
b0a873eb
PZ
5491static void cpu_clock_event_read(struct perf_event *event)
5492{
5493 cpu_clock_event_update(event);
5494}
f344011c 5495
b0a873eb
PZ
5496static int cpu_clock_event_init(struct perf_event *event)
5497{
5498 if (event->attr.type != PERF_TYPE_SOFTWARE)
5499 return -ENOENT;
5500
5501 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5502 return -ENOENT;
5503
2481c5fa
SE
5504 /*
5505 * no branch sampling for software events
5506 */
5507 if (has_branch_stack(event))
5508 return -EOPNOTSUPP;
5509
ba3dd36c
PZ
5510 perf_swevent_init_hrtimer(event);
5511
b0a873eb 5512 return 0;
f29ac756
PZ
5513}
5514
b0a873eb 5515static struct pmu perf_cpu_clock = {
89a1e187
PZ
5516 .task_ctx_nr = perf_sw_context,
5517
b0a873eb 5518 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
5519 .add = cpu_clock_event_add,
5520 .del = cpu_clock_event_del,
5521 .start = cpu_clock_event_start,
5522 .stop = cpu_clock_event_stop,
b0a873eb 5523 .read = cpu_clock_event_read,
35edc2a5
PZ
5524
5525 .event_idx = perf_swevent_event_idx,
b0a873eb
PZ
5526};
5527
5528/*
5529 * Software event: task time clock
5530 */
5531
5532static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 5533{
b0a873eb
PZ
5534 u64 prev;
5535 s64 delta;
5c92d124 5536
b0a873eb
PZ
5537 prev = local64_xchg(&event->hw.prev_count, now);
5538 delta = now - prev;
5539 local64_add(delta, &event->count);
5540}
5c92d124 5541
a4eaf7f1 5542static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 5543{
a4eaf7f1 5544 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 5545 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
5546}
5547
a4eaf7f1 5548static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
5549{
5550 perf_swevent_cancel_hrtimer(event);
5551 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
5552}
5553
5554static int task_clock_event_add(struct perf_event *event, int flags)
5555{
5556 if (flags & PERF_EF_START)
5557 task_clock_event_start(event, flags);
b0a873eb 5558
a4eaf7f1
PZ
5559 return 0;
5560}
5561
5562static void task_clock_event_del(struct perf_event *event, int flags)
5563{
5564 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
5565}
5566
5567static void task_clock_event_read(struct perf_event *event)
5568{
768a06e2
PZ
5569 u64 now = perf_clock();
5570 u64 delta = now - event->ctx->timestamp;
5571 u64 time = event->ctx->time + delta;
b0a873eb
PZ
5572
5573 task_clock_event_update(event, time);
5574}
5575
5576static int task_clock_event_init(struct perf_event *event)
6fb2915d 5577{
b0a873eb
PZ
5578 if (event->attr.type != PERF_TYPE_SOFTWARE)
5579 return -ENOENT;
5580
5581 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5582 return -ENOENT;
5583
2481c5fa
SE
5584 /*
5585 * no branch sampling for software events
5586 */
5587 if (has_branch_stack(event))
5588 return -EOPNOTSUPP;
5589
ba3dd36c
PZ
5590 perf_swevent_init_hrtimer(event);
5591
b0a873eb 5592 return 0;
6fb2915d
LZ
5593}
5594
b0a873eb 5595static struct pmu perf_task_clock = {
89a1e187
PZ
5596 .task_ctx_nr = perf_sw_context,
5597
b0a873eb 5598 .event_init = task_clock_event_init,
a4eaf7f1
PZ
5599 .add = task_clock_event_add,
5600 .del = task_clock_event_del,
5601 .start = task_clock_event_start,
5602 .stop = task_clock_event_stop,
b0a873eb 5603 .read = task_clock_event_read,
35edc2a5
PZ
5604
5605 .event_idx = perf_swevent_event_idx,
b0a873eb 5606};
6fb2915d 5607
ad5133b7 5608static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 5609{
e077df4f 5610}
6fb2915d 5611
ad5133b7 5612static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 5613{
ad5133b7 5614 return 0;
6fb2915d
LZ
5615}
5616
ad5133b7 5617static void perf_pmu_start_txn(struct pmu *pmu)
6fb2915d 5618{
ad5133b7 5619 perf_pmu_disable(pmu);
6fb2915d
LZ
5620}
5621
ad5133b7
PZ
5622static int perf_pmu_commit_txn(struct pmu *pmu)
5623{
5624 perf_pmu_enable(pmu);
5625 return 0;
5626}
e077df4f 5627
ad5133b7 5628static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 5629{
ad5133b7 5630 perf_pmu_enable(pmu);
24f1e32c
FW
5631}
5632
35edc2a5
PZ
5633static int perf_event_idx_default(struct perf_event *event)
5634{
5635 return event->hw.idx + 1;
5636}
5637
8dc85d54
PZ
5638/*
5639 * Ensures all contexts with the same task_ctx_nr have the same
5640 * pmu_cpu_context too.
5641 */
5642static void *find_pmu_context(int ctxn)
24f1e32c 5643{
8dc85d54 5644 struct pmu *pmu;
b326e956 5645
8dc85d54
PZ
5646 if (ctxn < 0)
5647 return NULL;
24f1e32c 5648
8dc85d54
PZ
5649 list_for_each_entry(pmu, &pmus, entry) {
5650 if (pmu->task_ctx_nr == ctxn)
5651 return pmu->pmu_cpu_context;
5652 }
24f1e32c 5653
8dc85d54 5654 return NULL;
24f1e32c
FW
5655}
5656
51676957 5657static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c 5658{
51676957
PZ
5659 int cpu;
5660
5661 for_each_possible_cpu(cpu) {
5662 struct perf_cpu_context *cpuctx;
5663
5664 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5665
5666 if (cpuctx->active_pmu == old_pmu)
5667 cpuctx->active_pmu = pmu;
5668 }
5669}
5670
5671static void free_pmu_context(struct pmu *pmu)
5672{
5673 struct pmu *i;
f5ffe02e 5674
8dc85d54 5675 mutex_lock(&pmus_lock);
0475f9ea 5676 /*
8dc85d54 5677 * Like a real lame refcount.
0475f9ea 5678 */
51676957
PZ
5679 list_for_each_entry(i, &pmus, entry) {
5680 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5681 update_pmu_context(i, pmu);
8dc85d54 5682 goto out;
51676957 5683 }
8dc85d54 5684 }
d6d020e9 5685
51676957 5686 free_percpu(pmu->pmu_cpu_context);
8dc85d54
PZ
5687out:
5688 mutex_unlock(&pmus_lock);
24f1e32c 5689}
2e80a82a 5690static struct idr pmu_idr;
d6d020e9 5691
abe43400
PZ
5692static ssize_t
5693type_show(struct device *dev, struct device_attribute *attr, char *page)
5694{
5695 struct pmu *pmu = dev_get_drvdata(dev);
5696
5697 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5698}
5699
5700static struct device_attribute pmu_dev_attrs[] = {
5701 __ATTR_RO(type),
5702 __ATTR_NULL,
5703};
5704
5705static int pmu_bus_running;
5706static struct bus_type pmu_bus = {
5707 .name = "event_source",
5708 .dev_attrs = pmu_dev_attrs,
5709};
5710
5711static void pmu_dev_release(struct device *dev)
5712{
5713 kfree(dev);
5714}
5715
5716static int pmu_dev_alloc(struct pmu *pmu)
5717{
5718 int ret = -ENOMEM;
5719
5720 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5721 if (!pmu->dev)
5722 goto out;
5723
0c9d42ed 5724 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
5725 device_initialize(pmu->dev);
5726 ret = dev_set_name(pmu->dev, "%s", pmu->name);
5727 if (ret)
5728 goto free_dev;
5729
5730 dev_set_drvdata(pmu->dev, pmu);
5731 pmu->dev->bus = &pmu_bus;
5732 pmu->dev->release = pmu_dev_release;
5733 ret = device_add(pmu->dev);
5734 if (ret)
5735 goto free_dev;
5736
5737out:
5738 return ret;
5739
5740free_dev:
5741 put_device(pmu->dev);
5742 goto out;
5743}
5744
547e9fd7 5745static struct lock_class_key cpuctx_mutex;
facc4307 5746static struct lock_class_key cpuctx_lock;
547e9fd7 5747
2e80a82a 5748int perf_pmu_register(struct pmu *pmu, char *name, int type)
24f1e32c 5749{
108b02cf 5750 int cpu, ret;
24f1e32c 5751
b0a873eb 5752 mutex_lock(&pmus_lock);
33696fc0
PZ
5753 ret = -ENOMEM;
5754 pmu->pmu_disable_count = alloc_percpu(int);
5755 if (!pmu->pmu_disable_count)
5756 goto unlock;
f29ac756 5757
2e80a82a
PZ
5758 pmu->type = -1;
5759 if (!name)
5760 goto skip_type;
5761 pmu->name = name;
5762
5763 if (type < 0) {
5764 int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5765 if (!err)
5766 goto free_pdc;
5767
5768 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5769 if (err) {
5770 ret = err;
5771 goto free_pdc;
5772 }
5773 }
5774 pmu->type = type;
5775
abe43400
PZ
5776 if (pmu_bus_running) {
5777 ret = pmu_dev_alloc(pmu);
5778 if (ret)
5779 goto free_idr;
5780 }
5781
2e80a82a 5782skip_type:
8dc85d54
PZ
5783 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5784 if (pmu->pmu_cpu_context)
5785 goto got_cpu_context;
f29ac756 5786
108b02cf
PZ
5787 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5788 if (!pmu->pmu_cpu_context)
abe43400 5789 goto free_dev;
f344011c 5790
108b02cf
PZ
5791 for_each_possible_cpu(cpu) {
5792 struct perf_cpu_context *cpuctx;
5793
5794 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 5795 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 5796 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 5797 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
b04243ef 5798 cpuctx->ctx.type = cpu_context;
108b02cf 5799 cpuctx->ctx.pmu = pmu;
e9d2b064
PZ
5800 cpuctx->jiffies_interval = 1;
5801 INIT_LIST_HEAD(&cpuctx->rotation_list);
51676957 5802 cpuctx->active_pmu = pmu;
108b02cf 5803 }
76e1d904 5804
8dc85d54 5805got_cpu_context:
ad5133b7
PZ
5806 if (!pmu->start_txn) {
5807 if (pmu->pmu_enable) {
5808 /*
5809 * If we have pmu_enable/pmu_disable calls, install
5810 * transaction stubs that use that to try and batch
5811 * hardware accesses.
5812 */
5813 pmu->start_txn = perf_pmu_start_txn;
5814 pmu->commit_txn = perf_pmu_commit_txn;
5815 pmu->cancel_txn = perf_pmu_cancel_txn;
5816 } else {
5817 pmu->start_txn = perf_pmu_nop_void;
5818 pmu->commit_txn = perf_pmu_nop_int;
5819 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 5820 }
5c92d124 5821 }
15dbf27c 5822
ad5133b7
PZ
5823 if (!pmu->pmu_enable) {
5824 pmu->pmu_enable = perf_pmu_nop_void;
5825 pmu->pmu_disable = perf_pmu_nop_void;
5826 }
5827
35edc2a5
PZ
5828 if (!pmu->event_idx)
5829 pmu->event_idx = perf_event_idx_default;
5830
b0a873eb 5831 list_add_rcu(&pmu->entry, &pmus);
33696fc0
PZ
5832 ret = 0;
5833unlock:
b0a873eb
PZ
5834 mutex_unlock(&pmus_lock);
5835
33696fc0 5836 return ret;
108b02cf 5837
abe43400
PZ
5838free_dev:
5839 device_del(pmu->dev);
5840 put_device(pmu->dev);
5841
2e80a82a
PZ
5842free_idr:
5843 if (pmu->type >= PERF_TYPE_MAX)
5844 idr_remove(&pmu_idr, pmu->type);
5845
108b02cf
PZ
5846free_pdc:
5847 free_percpu(pmu->pmu_disable_count);
5848 goto unlock;
f29ac756
PZ
5849}
5850
b0a873eb 5851void perf_pmu_unregister(struct pmu *pmu)
5c92d124 5852{
b0a873eb
PZ
5853 mutex_lock(&pmus_lock);
5854 list_del_rcu(&pmu->entry);
5855 mutex_unlock(&pmus_lock);
5c92d124 5856
0475f9ea 5857 /*
cde8e884
PZ
5858 * We dereference the pmu list under both SRCU and regular RCU, so
5859 * synchronize against both of those.
0475f9ea 5860 */
b0a873eb 5861 synchronize_srcu(&pmus_srcu);
cde8e884 5862 synchronize_rcu();
d6d020e9 5863
33696fc0 5864 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
5865 if (pmu->type >= PERF_TYPE_MAX)
5866 idr_remove(&pmu_idr, pmu->type);
abe43400
PZ
5867 device_del(pmu->dev);
5868 put_device(pmu->dev);
51676957 5869 free_pmu_context(pmu);
b0a873eb 5870}
d6d020e9 5871
b0a873eb
PZ
5872struct pmu *perf_init_event(struct perf_event *event)
5873{
5874 struct pmu *pmu = NULL;
5875 int idx;
940c5b29 5876 int ret;
b0a873eb
PZ
5877
5878 idx = srcu_read_lock(&pmus_srcu);
2e80a82a
PZ
5879
5880 rcu_read_lock();
5881 pmu = idr_find(&pmu_idr, event->attr.type);
5882 rcu_read_unlock();
940c5b29 5883 if (pmu) {
7e5b2a01 5884 event->pmu = pmu;
940c5b29
LM
5885 ret = pmu->event_init(event);
5886 if (ret)
5887 pmu = ERR_PTR(ret);
2e80a82a 5888 goto unlock;
940c5b29 5889 }
2e80a82a 5890
b0a873eb 5891 list_for_each_entry_rcu(pmu, &pmus, entry) {
7e5b2a01 5892 event->pmu = pmu;
940c5b29 5893 ret = pmu->event_init(event);
b0a873eb 5894 if (!ret)
e5f4d339 5895 goto unlock;
76e1d904 5896
b0a873eb
PZ
5897 if (ret != -ENOENT) {
5898 pmu = ERR_PTR(ret);
e5f4d339 5899 goto unlock;
f344011c 5900 }
5c92d124 5901 }
e5f4d339
PZ
5902 pmu = ERR_PTR(-ENOENT);
5903unlock:
b0a873eb 5904 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 5905
4aeb0b42 5906 return pmu;
5c92d124
IM
5907}
5908
0793a61d 5909/*
cdd6c482 5910 * Allocate and initialize a event structure
0793a61d 5911 */
cdd6c482 5912static struct perf_event *
c3f00c70 5913perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
5914 struct task_struct *task,
5915 struct perf_event *group_leader,
5916 struct perf_event *parent_event,
4dc0da86
AK
5917 perf_overflow_handler_t overflow_handler,
5918 void *context)
0793a61d 5919{
51b0fe39 5920 struct pmu *pmu;
cdd6c482
IM
5921 struct perf_event *event;
5922 struct hw_perf_event *hwc;
d5d2bc0d 5923 long err;
0793a61d 5924
66832eb4
ON
5925 if ((unsigned)cpu >= nr_cpu_ids) {
5926 if (!task || cpu != -1)
5927 return ERR_PTR(-EINVAL);
5928 }
5929
c3f00c70 5930 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 5931 if (!event)
d5d2bc0d 5932 return ERR_PTR(-ENOMEM);
0793a61d 5933
04289bb9 5934 /*
cdd6c482 5935 * Single events are their own group leaders, with an
04289bb9
IM
5936 * empty sibling list:
5937 */
5938 if (!group_leader)
cdd6c482 5939 group_leader = event;
04289bb9 5940
cdd6c482
IM
5941 mutex_init(&event->child_mutex);
5942 INIT_LIST_HEAD(&event->child_list);
fccc714b 5943
cdd6c482
IM
5944 INIT_LIST_HEAD(&event->group_entry);
5945 INIT_LIST_HEAD(&event->event_entry);
5946 INIT_LIST_HEAD(&event->sibling_list);
10c6db11
PZ
5947 INIT_LIST_HEAD(&event->rb_entry);
5948
cdd6c482 5949 init_waitqueue_head(&event->waitq);
e360adbe 5950 init_irq_work(&event->pending, perf_pending_event);
0793a61d 5951
cdd6c482 5952 mutex_init(&event->mmap_mutex);
7b732a75 5953
a6fa941d 5954 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
5955 event->cpu = cpu;
5956 event->attr = *attr;
5957 event->group_leader = group_leader;
5958 event->pmu = NULL;
cdd6c482 5959 event->oncpu = -1;
a96bbc16 5960
cdd6c482 5961 event->parent = parent_event;
b84fbc9f 5962
cdd6c482
IM
5963 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5964 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 5965
cdd6c482 5966 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 5967
d580ff86
PZ
5968 if (task) {
5969 event->attach_state = PERF_ATTACH_TASK;
5970#ifdef CONFIG_HAVE_HW_BREAKPOINT
5971 /*
5972 * hw_breakpoint is a bit difficult here..
5973 */
5974 if (attr->type == PERF_TYPE_BREAKPOINT)
5975 event->hw.bp_target = task;
5976#endif
5977 }
5978
4dc0da86 5979 if (!overflow_handler && parent_event) {
b326e956 5980 overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
5981 context = parent_event->overflow_handler_context;
5982 }
66832eb4 5983
b326e956 5984 event->overflow_handler = overflow_handler;
4dc0da86 5985 event->overflow_handler_context = context;
97eaf530 5986
0d48696f 5987 if (attr->disabled)
cdd6c482 5988 event->state = PERF_EVENT_STATE_OFF;
a86ed508 5989
4aeb0b42 5990 pmu = NULL;
b8e83514 5991
cdd6c482 5992 hwc = &event->hw;
bd2b5b12 5993 hwc->sample_period = attr->sample_period;
0d48696f 5994 if (attr->freq && attr->sample_freq)
bd2b5b12 5995 hwc->sample_period = 1;
eced1dfc 5996 hwc->last_period = hwc->sample_period;
bd2b5b12 5997
e7850595 5998 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 5999
2023b359 6000 /*
cdd6c482 6001 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b359 6002 */
3dab77fb 6003 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
2023b359
PZ
6004 goto done;
6005
b0a873eb 6006 pmu = perf_init_event(event);
974802ea 6007
d5d2bc0d
PM
6008done:
6009 err = 0;
4aeb0b42 6010 if (!pmu)
d5d2bc0d 6011 err = -EINVAL;
4aeb0b42
RR
6012 else if (IS_ERR(pmu))
6013 err = PTR_ERR(pmu);
5c92d124 6014
d5d2bc0d 6015 if (err) {
cdd6c482
IM
6016 if (event->ns)
6017 put_pid_ns(event->ns);
6018 kfree(event);
d5d2bc0d 6019 return ERR_PTR(err);
621a01ea 6020 }
d5d2bc0d 6021
cdd6c482 6022 if (!event->parent) {
82cd6def 6023 if (event->attach_state & PERF_ATTACH_TASK)
c5905afb 6024 static_key_slow_inc(&perf_sched_events.key);
3af9e859 6025 if (event->attr.mmap || event->attr.mmap_data)
cdd6c482
IM
6026 atomic_inc(&nr_mmap_events);
6027 if (event->attr.comm)
6028 atomic_inc(&nr_comm_events);
6029 if (event->attr.task)
6030 atomic_inc(&nr_task_events);
927c7a9e
FW
6031 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6032 err = get_callchain_buffers();
6033 if (err) {
6034 free_event(event);
6035 return ERR_PTR(err);
6036 }
6037 }
d010b332
SE
6038 if (has_branch_stack(event)) {
6039 static_key_slow_inc(&perf_sched_events.key);
6040 if (!(event->attach_state & PERF_ATTACH_TASK))
6041 atomic_inc(&per_cpu(perf_branch_stack_events,
6042 event->cpu));
6043 }
f344011c 6044 }
9ee318a7 6045
cdd6c482 6046 return event;
0793a61d
TG
6047}
6048
cdd6c482
IM
6049static int perf_copy_attr(struct perf_event_attr __user *uattr,
6050 struct perf_event_attr *attr)
974802ea 6051{
974802ea 6052 u32 size;
cdf8073d 6053 int ret;
974802ea
PZ
6054
6055 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6056 return -EFAULT;
6057
6058 /*
6059 * zero the full structure, so that a short copy will be nice.
6060 */
6061 memset(attr, 0, sizeof(*attr));
6062
6063 ret = get_user(size, &uattr->size);
6064 if (ret)
6065 return ret;
6066
6067 if (size > PAGE_SIZE) /* silly large */
6068 goto err_size;
6069
6070 if (!size) /* abi compat */
6071 size = PERF_ATTR_SIZE_VER0;
6072
6073 if (size < PERF_ATTR_SIZE_VER0)
6074 goto err_size;
6075
6076 /*
6077 * If we're handed a bigger struct than we know of,
cdf8073d
IS
6078 * ensure all the unknown bits are 0 - i.e. new
6079 * user-space does not rely on any kernel feature
6080 * extensions we dont know about yet.
974802ea
PZ
6081 */
6082 if (size > sizeof(*attr)) {
cdf8073d
IS
6083 unsigned char __user *addr;
6084 unsigned char __user *end;
6085 unsigned char val;
974802ea 6086
cdf8073d
IS
6087 addr = (void __user *)uattr + sizeof(*attr);
6088 end = (void __user *)uattr + size;
974802ea 6089
cdf8073d 6090 for (; addr < end; addr++) {
974802ea
PZ
6091 ret = get_user(val, addr);
6092 if (ret)
6093 return ret;
6094 if (val)
6095 goto err_size;
6096 }
b3e62e35 6097 size = sizeof(*attr);
974802ea
PZ
6098 }
6099
6100 ret = copy_from_user(attr, uattr, size);
6101 if (ret)
6102 return -EFAULT;
6103
cd757645 6104 if (attr->__reserved_1)
974802ea
PZ
6105 return -EINVAL;
6106
6107 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6108 return -EINVAL;
6109
6110 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6111 return -EINVAL;
6112
bce38cd5
SE
6113 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6114 u64 mask = attr->branch_sample_type;
6115
6116 /* only using defined bits */
6117 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6118 return -EINVAL;
6119
6120 /* at least one branch bit must be set */
6121 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6122 return -EINVAL;
6123
6124 /* kernel level capture: check permissions */
6125 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
6126 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6127 return -EACCES;
6128
6129 /* propagate priv level, when not set for branch */
6130 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6131
6132 /* exclude_kernel checked on syscall entry */
6133 if (!attr->exclude_kernel)
6134 mask |= PERF_SAMPLE_BRANCH_KERNEL;
6135
6136 if (!attr->exclude_user)
6137 mask |= PERF_SAMPLE_BRANCH_USER;
6138
6139 if (!attr->exclude_hv)
6140 mask |= PERF_SAMPLE_BRANCH_HV;
6141 /*
6142 * adjust user setting (for HW filter setup)
6143 */
6144 attr->branch_sample_type = mask;
6145 }
6146 }
974802ea
PZ
6147out:
6148 return ret;
6149
6150err_size:
6151 put_user(sizeof(*attr), &uattr->size);
6152 ret = -E2BIG;
6153 goto out;
6154}
6155
ac9721f3
PZ
6156static int
6157perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 6158{
76369139 6159 struct ring_buffer *rb = NULL, *old_rb = NULL;
a4be7c27
PZ
6160 int ret = -EINVAL;
6161
ac9721f3 6162 if (!output_event)
a4be7c27
PZ
6163 goto set;
6164
ac9721f3
PZ
6165 /* don't allow circular references */
6166 if (event == output_event)
a4be7c27
PZ
6167 goto out;
6168
0f139300
PZ
6169 /*
6170 * Don't allow cross-cpu buffers
6171 */
6172 if (output_event->cpu != event->cpu)
6173 goto out;
6174
6175 /*
76369139 6176 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
6177 */
6178 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6179 goto out;
6180
a4be7c27 6181set:
cdd6c482 6182 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
6183 /* Can't redirect output if we've got an active mmap() */
6184 if (atomic_read(&event->mmap_count))
6185 goto unlock;
a4be7c27 6186
ac9721f3 6187 if (output_event) {
76369139
FW
6188 /* get the rb we want to redirect to */
6189 rb = ring_buffer_get(output_event);
6190 if (!rb)
ac9721f3 6191 goto unlock;
a4be7c27
PZ
6192 }
6193
76369139
FW
6194 old_rb = event->rb;
6195 rcu_assign_pointer(event->rb, rb);
10c6db11
PZ
6196 if (old_rb)
6197 ring_buffer_detach(event, old_rb);
a4be7c27 6198 ret = 0;
ac9721f3
PZ
6199unlock:
6200 mutex_unlock(&event->mmap_mutex);
6201
76369139
FW
6202 if (old_rb)
6203 ring_buffer_put(old_rb);
a4be7c27 6204out:
a4be7c27
PZ
6205 return ret;
6206}
6207
0793a61d 6208/**
cdd6c482 6209 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 6210 *
cdd6c482 6211 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 6212 * @pid: target pid
9f66a381 6213 * @cpu: target cpu
cdd6c482 6214 * @group_fd: group leader event fd
0793a61d 6215 */
cdd6c482
IM
6216SYSCALL_DEFINE5(perf_event_open,
6217 struct perf_event_attr __user *, attr_uptr,
2743a5b0 6218 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 6219{
b04243ef
PZ
6220 struct perf_event *group_leader = NULL, *output_event = NULL;
6221 struct perf_event *event, *sibling;
cdd6c482
IM
6222 struct perf_event_attr attr;
6223 struct perf_event_context *ctx;
6224 struct file *event_file = NULL;
2903ff01 6225 struct fd group = {NULL, 0};
38a81da2 6226 struct task_struct *task = NULL;
89a1e187 6227 struct pmu *pmu;
ea635c64 6228 int event_fd;
b04243ef 6229 int move_group = 0;
dc86cabe 6230 int err;
0793a61d 6231
2743a5b0 6232 /* for future expandability... */
e5d1367f 6233 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
6234 return -EINVAL;
6235
dc86cabe
IM
6236 err = perf_copy_attr(attr_uptr, &attr);
6237 if (err)
6238 return err;
eab656ae 6239
0764771d
PZ
6240 if (!attr.exclude_kernel) {
6241 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6242 return -EACCES;
6243 }
6244
df58ab24 6245 if (attr.freq) {
cdd6c482 6246 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24
PZ
6247 return -EINVAL;
6248 }
6249
e5d1367f
SE
6250 /*
6251 * In cgroup mode, the pid argument is used to pass the fd
6252 * opened to the cgroup directory in cgroupfs. The cpu argument
6253 * designates the cpu on which to monitor threads from that
6254 * cgroup.
6255 */
6256 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6257 return -EINVAL;
6258
ab72a702 6259 event_fd = get_unused_fd();
ea635c64
AV
6260 if (event_fd < 0)
6261 return event_fd;
6262
ac9721f3 6263 if (group_fd != -1) {
2903ff01
AV
6264 err = perf_fget_light(group_fd, &group);
6265 if (err)
d14b12d7 6266 goto err_fd;
2903ff01 6267 group_leader = group.file->private_data;
ac9721f3
PZ
6268 if (flags & PERF_FLAG_FD_OUTPUT)
6269 output_event = group_leader;
6270 if (flags & PERF_FLAG_FD_NO_GROUP)
6271 group_leader = NULL;
6272 }
6273
e5d1367f 6274 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
6275 task = find_lively_task_by_vpid(pid);
6276 if (IS_ERR(task)) {
6277 err = PTR_ERR(task);
6278 goto err_group_fd;
6279 }
6280 }
6281
fbfc623f
YZ
6282 get_online_cpus();
6283
4dc0da86
AK
6284 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6285 NULL, NULL);
d14b12d7
SE
6286 if (IS_ERR(event)) {
6287 err = PTR_ERR(event);
c6be5a5c 6288 goto err_task;
d14b12d7
SE
6289 }
6290
e5d1367f
SE
6291 if (flags & PERF_FLAG_PID_CGROUP) {
6292 err = perf_cgroup_connect(pid, event, &attr, group_leader);
6293 if (err)
6294 goto err_alloc;
08309379
PZ
6295 /*
6296 * one more event:
6297 * - that has cgroup constraint on event->cpu
6298 * - that may need work on context switch
6299 */
6300 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
c5905afb 6301 static_key_slow_inc(&perf_sched_events.key);
e5d1367f
SE
6302 }
6303
89a1e187
PZ
6304 /*
6305 * Special case software events and allow them to be part of
6306 * any hardware group.
6307 */
6308 pmu = event->pmu;
b04243ef
PZ
6309
6310 if (group_leader &&
6311 (is_software_event(event) != is_software_event(group_leader))) {
6312 if (is_software_event(event)) {
6313 /*
6314 * If event and group_leader are not both a software
6315 * event, and event is, then group leader is not.
6316 *
6317 * Allow the addition of software events to !software
6318 * groups, this is safe because software events never
6319 * fail to schedule.
6320 */
6321 pmu = group_leader->pmu;
6322 } else if (is_software_event(group_leader) &&
6323 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
6324 /*
6325 * In case the group is a pure software group, and we
6326 * try to add a hardware event, move the whole group to
6327 * the hardware context.
6328 */
6329 move_group = 1;
6330 }
6331 }
89a1e187
PZ
6332
6333 /*
6334 * Get the target context (task or percpu):
6335 */
e2d37cd2 6336 ctx = find_get_context(pmu, task, event->cpu);
89a1e187
PZ
6337 if (IS_ERR(ctx)) {
6338 err = PTR_ERR(ctx);
c6be5a5c 6339 goto err_alloc;
89a1e187
PZ
6340 }
6341
fd1edb3a
PZ
6342 if (task) {
6343 put_task_struct(task);
6344 task = NULL;
6345 }
6346
ccff286d 6347 /*
cdd6c482 6348 * Look up the group leader (we will attach this event to it):
04289bb9 6349 */
ac9721f3 6350 if (group_leader) {
dc86cabe 6351 err = -EINVAL;
04289bb9 6352
04289bb9 6353 /*
ccff286d
IM
6354 * Do not allow a recursive hierarchy (this new sibling
6355 * becoming part of another group-sibling):
6356 */
6357 if (group_leader->group_leader != group_leader)
c3f00c70 6358 goto err_context;
ccff286d
IM
6359 /*
6360 * Do not allow to attach to a group in a different
6361 * task or CPU context:
04289bb9 6362 */
b04243ef
PZ
6363 if (move_group) {
6364 if (group_leader->ctx->type != ctx->type)
6365 goto err_context;
6366 } else {
6367 if (group_leader->ctx != ctx)
6368 goto err_context;
6369 }
6370
3b6f9e5c
PM
6371 /*
6372 * Only a group leader can be exclusive or pinned
6373 */
0d48696f 6374 if (attr.exclusive || attr.pinned)
c3f00c70 6375 goto err_context;
ac9721f3
PZ
6376 }
6377
6378 if (output_event) {
6379 err = perf_event_set_output(event, output_event);
6380 if (err)
c3f00c70 6381 goto err_context;
ac9721f3 6382 }
0793a61d 6383
ea635c64
AV
6384 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6385 if (IS_ERR(event_file)) {
6386 err = PTR_ERR(event_file);
c3f00c70 6387 goto err_context;
ea635c64 6388 }
9b51f66d 6389
b04243ef
PZ
6390 if (move_group) {
6391 struct perf_event_context *gctx = group_leader->ctx;
6392
6393 mutex_lock(&gctx->mutex);
fe4b04fa 6394 perf_remove_from_context(group_leader);
b04243ef
PZ
6395 list_for_each_entry(sibling, &group_leader->sibling_list,
6396 group_entry) {
fe4b04fa 6397 perf_remove_from_context(sibling);
b04243ef
PZ
6398 put_ctx(gctx);
6399 }
6400 mutex_unlock(&gctx->mutex);
6401 put_ctx(gctx);
ea635c64 6402 }
9b51f66d 6403
ad3a37de 6404 WARN_ON_ONCE(ctx->parent_ctx);
d859e29f 6405 mutex_lock(&ctx->mutex);
b04243ef
PZ
6406
6407 if (move_group) {
0cda4c02 6408 synchronize_rcu();
e2d37cd2 6409 perf_install_in_context(ctx, group_leader, event->cpu);
b04243ef
PZ
6410 get_ctx(ctx);
6411 list_for_each_entry(sibling, &group_leader->sibling_list,
6412 group_entry) {
e2d37cd2 6413 perf_install_in_context(ctx, sibling, event->cpu);
b04243ef
PZ
6414 get_ctx(ctx);
6415 }
6416 }
6417
e2d37cd2 6418 perf_install_in_context(ctx, event, event->cpu);
ad3a37de 6419 ++ctx->generation;
fe4b04fa 6420 perf_unpin_context(ctx);
d859e29f 6421 mutex_unlock(&ctx->mutex);
9b51f66d 6422
fbfc623f
YZ
6423 put_online_cpus();
6424
cdd6c482 6425 event->owner = current;
8882135b 6426
cdd6c482
IM
6427 mutex_lock(&current->perf_event_mutex);
6428 list_add_tail(&event->owner_entry, &current->perf_event_list);
6429 mutex_unlock(&current->perf_event_mutex);
082ff5a2 6430
c320c7b7
ACM
6431 /*
6432 * Precalculate sample_data sizes
6433 */
6434 perf_event__header_size(event);
6844c09d 6435 perf_event__id_header_size(event);
c320c7b7 6436
8a49542c
PZ
6437 /*
6438 * Drop the reference on the group_event after placing the
6439 * new event on the sibling_list. This ensures destruction
6440 * of the group leader will find the pointer to itself in
6441 * perf_group_detach().
6442 */
2903ff01 6443 fdput(group);
ea635c64
AV
6444 fd_install(event_fd, event_file);
6445 return event_fd;
0793a61d 6446
c3f00c70 6447err_context:
fe4b04fa 6448 perf_unpin_context(ctx);
ea635c64 6449 put_ctx(ctx);
c6be5a5c 6450err_alloc:
ea635c64 6451 free_event(event);
e7d0bc04 6452err_task:
fbfc623f 6453 put_online_cpus();
e7d0bc04
PZ
6454 if (task)
6455 put_task_struct(task);
89a1e187 6456err_group_fd:
2903ff01 6457 fdput(group);
ea635c64
AV
6458err_fd:
6459 put_unused_fd(event_fd);
dc86cabe 6460 return err;
0793a61d
TG
6461}
6462
fb0459d7
AV
6463/**
6464 * perf_event_create_kernel_counter
6465 *
6466 * @attr: attributes of the counter to create
6467 * @cpu: cpu in which the counter is bound
38a81da2 6468 * @task: task to profile (NULL for percpu)
fb0459d7
AV
6469 */
6470struct perf_event *
6471perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 6472 struct task_struct *task,
4dc0da86
AK
6473 perf_overflow_handler_t overflow_handler,
6474 void *context)
fb0459d7 6475{
fb0459d7 6476 struct perf_event_context *ctx;
c3f00c70 6477 struct perf_event *event;
fb0459d7 6478 int err;
d859e29f 6479
fb0459d7
AV
6480 /*
6481 * Get the target context (task or percpu):
6482 */
d859e29f 6483
4dc0da86
AK
6484 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
6485 overflow_handler, context);
c3f00c70
PZ
6486 if (IS_ERR(event)) {
6487 err = PTR_ERR(event);
6488 goto err;
6489 }
d859e29f 6490
38a81da2 6491 ctx = find_get_context(event->pmu, task, cpu);
c6567f64
FW
6492 if (IS_ERR(ctx)) {
6493 err = PTR_ERR(ctx);
c3f00c70 6494 goto err_free;
d859e29f 6495 }
fb0459d7 6496
fb0459d7
AV
6497 WARN_ON_ONCE(ctx->parent_ctx);
6498 mutex_lock(&ctx->mutex);
6499 perf_install_in_context(ctx, event, cpu);
6500 ++ctx->generation;
fe4b04fa 6501 perf_unpin_context(ctx);
fb0459d7
AV
6502 mutex_unlock(&ctx->mutex);
6503
fb0459d7
AV
6504 return event;
6505
c3f00c70
PZ
6506err_free:
6507 free_event(event);
6508err:
c6567f64 6509 return ERR_PTR(err);
9b51f66d 6510}
fb0459d7 6511EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 6512
0cda4c02
YZ
6513void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
6514{
6515 struct perf_event_context *src_ctx;
6516 struct perf_event_context *dst_ctx;
6517 struct perf_event *event, *tmp;
6518 LIST_HEAD(events);
6519
6520 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
6521 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
6522
6523 mutex_lock(&src_ctx->mutex);
6524 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
6525 event_entry) {
6526 perf_remove_from_context(event);
6527 put_ctx(src_ctx);
6528 list_add(&event->event_entry, &events);
6529 }
6530 mutex_unlock(&src_ctx->mutex);
6531
6532 synchronize_rcu();
6533
6534 mutex_lock(&dst_ctx->mutex);
6535 list_for_each_entry_safe(event, tmp, &events, event_entry) {
6536 list_del(&event->event_entry);
6537 if (event->state >= PERF_EVENT_STATE_OFF)
6538 event->state = PERF_EVENT_STATE_INACTIVE;
6539 perf_install_in_context(dst_ctx, event, dst_cpu);
6540 get_ctx(dst_ctx);
6541 }
6542 mutex_unlock(&dst_ctx->mutex);
6543}
6544EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
6545
cdd6c482 6546static void sync_child_event(struct perf_event *child_event,
38b200d6 6547 struct task_struct *child)
d859e29f 6548{
cdd6c482 6549 struct perf_event *parent_event = child_event->parent;
8bc20959 6550 u64 child_val;
d859e29f 6551
cdd6c482
IM
6552 if (child_event->attr.inherit_stat)
6553 perf_event_read_event(child_event, child);
38b200d6 6554
b5e58793 6555 child_val = perf_event_count(child_event);
d859e29f
PM
6556
6557 /*
6558 * Add back the child's count to the parent's count:
6559 */
a6e6dea6 6560 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
6561 atomic64_add(child_event->total_time_enabled,
6562 &parent_event->child_total_time_enabled);
6563 atomic64_add(child_event->total_time_running,
6564 &parent_event->child_total_time_running);
d859e29f
PM
6565
6566 /*
cdd6c482 6567 * Remove this event from the parent's list
d859e29f 6568 */
cdd6c482
IM
6569 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6570 mutex_lock(&parent_event->child_mutex);
6571 list_del_init(&child_event->child_list);
6572 mutex_unlock(&parent_event->child_mutex);
d859e29f
PM
6573
6574 /*
cdd6c482 6575 * Release the parent event, if this was the last
d859e29f
PM
6576 * reference to it.
6577 */
a6fa941d 6578 put_event(parent_event);
d859e29f
PM
6579}
6580
9b51f66d 6581static void
cdd6c482
IM
6582__perf_event_exit_task(struct perf_event *child_event,
6583 struct perf_event_context *child_ctx,
38b200d6 6584 struct task_struct *child)
9b51f66d 6585{
38b435b1
PZ
6586 if (child_event->parent) {
6587 raw_spin_lock_irq(&child_ctx->lock);
6588 perf_group_detach(child_event);
6589 raw_spin_unlock_irq(&child_ctx->lock);
6590 }
9b51f66d 6591
fe4b04fa 6592 perf_remove_from_context(child_event);
0cc0c027 6593
9b51f66d 6594 /*
38b435b1 6595 * It can happen that the parent exits first, and has events
9b51f66d 6596 * that are still around due to the child reference. These
38b435b1 6597 * events need to be zapped.
9b51f66d 6598 */
38b435b1 6599 if (child_event->parent) {
cdd6c482
IM
6600 sync_child_event(child_event, child);
6601 free_event(child_event);
4bcf349a 6602 }
9b51f66d
IM
6603}
6604
8dc85d54 6605static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 6606{
cdd6c482
IM
6607 struct perf_event *child_event, *tmp;
6608 struct perf_event_context *child_ctx;
a63eaf34 6609 unsigned long flags;
9b51f66d 6610
8dc85d54 6611 if (likely(!child->perf_event_ctxp[ctxn])) {
cdd6c482 6612 perf_event_task(child, NULL, 0);
9b51f66d 6613 return;
9f498cc5 6614 }
9b51f66d 6615
a63eaf34 6616 local_irq_save(flags);
ad3a37de
PM
6617 /*
6618 * We can't reschedule here because interrupts are disabled,
6619 * and either child is current or it is a task that can't be
6620 * scheduled, so we are now safe from rescheduling changing
6621 * our context.
6622 */
806839b2 6623 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
c93f7669
PM
6624
6625 /*
6626 * Take the context lock here so that if find_get_context is
cdd6c482 6627 * reading child->perf_event_ctxp, we wait until it has
c93f7669
PM
6628 * incremented the context's refcount before we do put_ctx below.
6629 */
e625cce1 6630 raw_spin_lock(&child_ctx->lock);
04dc2dbb 6631 task_ctx_sched_out(child_ctx);
8dc85d54 6632 child->perf_event_ctxp[ctxn] = NULL;
71a851b4
PZ
6633 /*
6634 * If this context is a clone; unclone it so it can't get
6635 * swapped to another process while we're removing all
cdd6c482 6636 * the events from it.
71a851b4
PZ
6637 */
6638 unclone_ctx(child_ctx);
5e942bb3 6639 update_context_time(child_ctx);
e625cce1 6640 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
9f498cc5
PZ
6641
6642 /*
cdd6c482
IM
6643 * Report the task dead after unscheduling the events so that we
6644 * won't get any samples after PERF_RECORD_EXIT. We can however still
6645 * get a few PERF_RECORD_READ events.
9f498cc5 6646 */
cdd6c482 6647 perf_event_task(child, child_ctx, 0);
a63eaf34 6648
66fff224
PZ
6649 /*
6650 * We can recurse on the same lock type through:
6651 *
cdd6c482
IM
6652 * __perf_event_exit_task()
6653 * sync_child_event()
a6fa941d
AV
6654 * put_event()
6655 * mutex_lock(&ctx->mutex)
66fff224
PZ
6656 *
6657 * But since its the parent context it won't be the same instance.
6658 */
a0507c84 6659 mutex_lock(&child_ctx->mutex);
a63eaf34 6660
8bc20959 6661again:
889ff015
FW
6662 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6663 group_entry)
6664 __perf_event_exit_task(child_event, child_ctx, child);
6665
6666 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
65abc865 6667 group_entry)
cdd6c482 6668 __perf_event_exit_task(child_event, child_ctx, child);
8bc20959
PZ
6669
6670 /*
cdd6c482 6671 * If the last event was a group event, it will have appended all
8bc20959
PZ
6672 * its siblings to the list, but we obtained 'tmp' before that which
6673 * will still point to the list head terminating the iteration.
6674 */
889ff015
FW
6675 if (!list_empty(&child_ctx->pinned_groups) ||
6676 !list_empty(&child_ctx->flexible_groups))
8bc20959 6677 goto again;
a63eaf34
PM
6678
6679 mutex_unlock(&child_ctx->mutex);
6680
6681 put_ctx(child_ctx);
9b51f66d
IM
6682}
6683
8dc85d54
PZ
6684/*
6685 * When a child task exits, feed back event values to parent events.
6686 */
6687void perf_event_exit_task(struct task_struct *child)
6688{
8882135b 6689 struct perf_event *event, *tmp;
8dc85d54
PZ
6690 int ctxn;
6691
8882135b
PZ
6692 mutex_lock(&child->perf_event_mutex);
6693 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6694 owner_entry) {
6695 list_del_init(&event->owner_entry);
6696
6697 /*
6698 * Ensure the list deletion is visible before we clear
6699 * the owner, closes a race against perf_release() where
6700 * we need to serialize on the owner->perf_event_mutex.
6701 */
6702 smp_wmb();
6703 event->owner = NULL;
6704 }
6705 mutex_unlock(&child->perf_event_mutex);
6706
8dc85d54
PZ
6707 for_each_task_context_nr(ctxn)
6708 perf_event_exit_task_context(child, ctxn);
6709}
6710
889ff015
FW
6711static void perf_free_event(struct perf_event *event,
6712 struct perf_event_context *ctx)
6713{
6714 struct perf_event *parent = event->parent;
6715
6716 if (WARN_ON_ONCE(!parent))
6717 return;
6718
6719 mutex_lock(&parent->child_mutex);
6720 list_del_init(&event->child_list);
6721 mutex_unlock(&parent->child_mutex);
6722
a6fa941d 6723 put_event(parent);
889ff015 6724
8a49542c 6725 perf_group_detach(event);
889ff015
FW
6726 list_del_event(event, ctx);
6727 free_event(event);
6728}
6729
bbbee908
PZ
6730/*
6731 * free an unexposed, unused context as created by inheritance by
8dc85d54 6732 * perf_event_init_task below, used by fork() in case of fail.
bbbee908 6733 */
cdd6c482 6734void perf_event_free_task(struct task_struct *task)
bbbee908 6735{
8dc85d54 6736 struct perf_event_context *ctx;
cdd6c482 6737 struct perf_event *event, *tmp;
8dc85d54 6738 int ctxn;
bbbee908 6739
8dc85d54
PZ
6740 for_each_task_context_nr(ctxn) {
6741 ctx = task->perf_event_ctxp[ctxn];
6742 if (!ctx)
6743 continue;
bbbee908 6744
8dc85d54 6745 mutex_lock(&ctx->mutex);
bbbee908 6746again:
8dc85d54
PZ
6747 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6748 group_entry)
6749 perf_free_event(event, ctx);
bbbee908 6750
8dc85d54
PZ
6751 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6752 group_entry)
6753 perf_free_event(event, ctx);
bbbee908 6754
8dc85d54
PZ
6755 if (!list_empty(&ctx->pinned_groups) ||
6756 !list_empty(&ctx->flexible_groups))
6757 goto again;
bbbee908 6758
8dc85d54 6759 mutex_unlock(&ctx->mutex);
bbbee908 6760
8dc85d54
PZ
6761 put_ctx(ctx);
6762 }
889ff015
FW
6763}
6764
4e231c79
PZ
6765void perf_event_delayed_put(struct task_struct *task)
6766{
6767 int ctxn;
6768
6769 for_each_task_context_nr(ctxn)
6770 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6771}
6772
97dee4f3
PZ
6773/*
6774 * inherit a event from parent task to child task:
6775 */
6776static struct perf_event *
6777inherit_event(struct perf_event *parent_event,
6778 struct task_struct *parent,
6779 struct perf_event_context *parent_ctx,
6780 struct task_struct *child,
6781 struct perf_event *group_leader,
6782 struct perf_event_context *child_ctx)
6783{
6784 struct perf_event *child_event;
cee010ec 6785 unsigned long flags;
97dee4f3
PZ
6786
6787 /*
6788 * Instead of creating recursive hierarchies of events,
6789 * we link inherited events back to the original parent,
6790 * which has a filp for sure, which we use as the reference
6791 * count:
6792 */
6793 if (parent_event->parent)
6794 parent_event = parent_event->parent;
6795
6796 child_event = perf_event_alloc(&parent_event->attr,
6797 parent_event->cpu,
d580ff86 6798 child,
97dee4f3 6799 group_leader, parent_event,
4dc0da86 6800 NULL, NULL);
97dee4f3
PZ
6801 if (IS_ERR(child_event))
6802 return child_event;
a6fa941d
AV
6803
6804 if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
6805 free_event(child_event);
6806 return NULL;
6807 }
6808
97dee4f3
PZ
6809 get_ctx(child_ctx);
6810
6811 /*
6812 * Make the child state follow the state of the parent event,
6813 * not its attr.disabled bit. We hold the parent's mutex,
6814 * so we won't race with perf_event_{en, dis}able_family.
6815 */
6816 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6817 child_event->state = PERF_EVENT_STATE_INACTIVE;
6818 else
6819 child_event->state = PERF_EVENT_STATE_OFF;
6820
6821 if (parent_event->attr.freq) {
6822 u64 sample_period = parent_event->hw.sample_period;
6823 struct hw_perf_event *hwc = &child_event->hw;
6824
6825 hwc->sample_period = sample_period;
6826 hwc->last_period = sample_period;
6827
6828 local64_set(&hwc->period_left, sample_period);
6829 }
6830
6831 child_event->ctx = child_ctx;
6832 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
6833 child_event->overflow_handler_context
6834 = parent_event->overflow_handler_context;
97dee4f3 6835
614b6780
TG
6836 /*
6837 * Precalculate sample_data sizes
6838 */
6839 perf_event__header_size(child_event);
6844c09d 6840 perf_event__id_header_size(child_event);
614b6780 6841
97dee4f3
PZ
6842 /*
6843 * Link it up in the child's context:
6844 */
cee010ec 6845 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 6846 add_event_to_ctx(child_event, child_ctx);
cee010ec 6847 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 6848
97dee4f3
PZ
6849 /*
6850 * Link this into the parent event's child list
6851 */
6852 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6853 mutex_lock(&parent_event->child_mutex);
6854 list_add_tail(&child_event->child_list, &parent_event->child_list);
6855 mutex_unlock(&parent_event->child_mutex);
6856
6857 return child_event;
6858}
6859
6860static int inherit_group(struct perf_event *parent_event,
6861 struct task_struct *parent,
6862 struct perf_event_context *parent_ctx,
6863 struct task_struct *child,
6864 struct perf_event_context *child_ctx)
6865{
6866 struct perf_event *leader;
6867 struct perf_event *sub;
6868 struct perf_event *child_ctr;
6869
6870 leader = inherit_event(parent_event, parent, parent_ctx,
6871 child, NULL, child_ctx);
6872 if (IS_ERR(leader))
6873 return PTR_ERR(leader);
6874 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6875 child_ctr = inherit_event(sub, parent, parent_ctx,
6876 child, leader, child_ctx);
6877 if (IS_ERR(child_ctr))
6878 return PTR_ERR(child_ctr);
6879 }
6880 return 0;
889ff015
FW
6881}
6882
6883static int
6884inherit_task_group(struct perf_event *event, struct task_struct *parent,
6885 struct perf_event_context *parent_ctx,
8dc85d54 6886 struct task_struct *child, int ctxn,
889ff015
FW
6887 int *inherited_all)
6888{
6889 int ret;
8dc85d54 6890 struct perf_event_context *child_ctx;
889ff015
FW
6891
6892 if (!event->attr.inherit) {
6893 *inherited_all = 0;
6894 return 0;
bbbee908
PZ
6895 }
6896
fe4b04fa 6897 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
6898 if (!child_ctx) {
6899 /*
6900 * This is executed from the parent task context, so
6901 * inherit events that have been marked for cloning.
6902 * First allocate and initialize a context for the
6903 * child.
6904 */
bbbee908 6905
eb184479 6906 child_ctx = alloc_perf_context(event->pmu, child);
889ff015
FW
6907 if (!child_ctx)
6908 return -ENOMEM;
bbbee908 6909
8dc85d54 6910 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
6911 }
6912
6913 ret = inherit_group(event, parent, parent_ctx,
6914 child, child_ctx);
6915
6916 if (ret)
6917 *inherited_all = 0;
6918
6919 return ret;
bbbee908
PZ
6920}
6921
9b51f66d 6922/*
cdd6c482 6923 * Initialize the perf_event context in task_struct
9b51f66d 6924 */
8dc85d54 6925int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 6926{
889ff015 6927 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
6928 struct perf_event_context *cloned_ctx;
6929 struct perf_event *event;
9b51f66d 6930 struct task_struct *parent = current;
564c2b21 6931 int inherited_all = 1;
dddd3379 6932 unsigned long flags;
6ab423e0 6933 int ret = 0;
9b51f66d 6934
8dc85d54 6935 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
6936 return 0;
6937
ad3a37de 6938 /*
25346b93
PM
6939 * If the parent's context is a clone, pin it so it won't get
6940 * swapped under us.
ad3a37de 6941 */
8dc85d54 6942 parent_ctx = perf_pin_task_context(parent, ctxn);
25346b93 6943
ad3a37de
PM
6944 /*
6945 * No need to check if parent_ctx != NULL here; since we saw
6946 * it non-NULL earlier, the only reason for it to become NULL
6947 * is if we exit, and since we're currently in the middle of
6948 * a fork we can't be exiting at the same time.
6949 */
ad3a37de 6950
9b51f66d
IM
6951 /*
6952 * Lock the parent list. No need to lock the child - not PID
6953 * hashed yet and not running, so nobody can access it.
6954 */
d859e29f 6955 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
6956
6957 /*
6958 * We dont have to disable NMIs - we are only looking at
6959 * the list, not manipulating it:
6960 */
889ff015 6961 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d54
PZ
6962 ret = inherit_task_group(event, parent, parent_ctx,
6963 child, ctxn, &inherited_all);
889ff015
FW
6964 if (ret)
6965 break;
6966 }
b93f7978 6967
dddd3379
TG
6968 /*
6969 * We can't hold ctx->lock when iterating the ->flexible_group list due
6970 * to allocations, but we need to prevent rotation because
6971 * rotate_ctx() will change the list from interrupt context.
6972 */
6973 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6974 parent_ctx->rotate_disable = 1;
6975 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6976
889ff015 6977 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d54
PZ
6978 ret = inherit_task_group(event, parent, parent_ctx,
6979 child, ctxn, &inherited_all);
889ff015 6980 if (ret)
9b51f66d 6981 break;
564c2b21
PM
6982 }
6983
dddd3379
TG
6984 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6985 parent_ctx->rotate_disable = 0;
dddd3379 6986
8dc85d54 6987 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 6988
05cbaa28 6989 if (child_ctx && inherited_all) {
564c2b21
PM
6990 /*
6991 * Mark the child context as a clone of the parent
6992 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
6993 *
6994 * Note that if the parent is a clone, the holding of
6995 * parent_ctx->lock avoids it from being uncloned.
564c2b21 6996 */
c5ed5145 6997 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
6998 if (cloned_ctx) {
6999 child_ctx->parent_ctx = cloned_ctx;
25346b93 7000 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
7001 } else {
7002 child_ctx->parent_ctx = parent_ctx;
7003 child_ctx->parent_gen = parent_ctx->generation;
7004 }
7005 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
7006 }
7007
c5ed5145 7008 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29f 7009 mutex_unlock(&parent_ctx->mutex);
6ab423e0 7010
25346b93 7011 perf_unpin_context(parent_ctx);
fe4b04fa 7012 put_ctx(parent_ctx);
ad3a37de 7013
6ab423e0 7014 return ret;
9b51f66d
IM
7015}
7016
8dc85d54
PZ
7017/*
7018 * Initialize the perf_event context in task_struct
7019 */
7020int perf_event_init_task(struct task_struct *child)
7021{
7022 int ctxn, ret;
7023
8550d7cb
ON
7024 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
7025 mutex_init(&child->perf_event_mutex);
7026 INIT_LIST_HEAD(&child->perf_event_list);
7027
8dc85d54
PZ
7028 for_each_task_context_nr(ctxn) {
7029 ret = perf_event_init_context(child, ctxn);
7030 if (ret)
7031 return ret;
7032 }
7033
7034 return 0;
7035}
7036
220b140b
PM
7037static void __init perf_event_init_all_cpus(void)
7038{
b28ab83c 7039 struct swevent_htable *swhash;
220b140b 7040 int cpu;
220b140b
PM
7041
7042 for_each_possible_cpu(cpu) {
b28ab83c
PZ
7043 swhash = &per_cpu(swevent_htable, cpu);
7044 mutex_init(&swhash->hlist_mutex);
e9d2b064 7045 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
220b140b
PM
7046 }
7047}
7048
cdd6c482 7049static void __cpuinit perf_event_init_cpu(int cpu)
0793a61d 7050{
108b02cf 7051 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 7052
b28ab83c 7053 mutex_lock(&swhash->hlist_mutex);
4536e4d1 7054 if (swhash->hlist_refcount > 0) {
76e1d904
FW
7055 struct swevent_hlist *hlist;
7056
b28ab83c
PZ
7057 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7058 WARN_ON(!hlist);
7059 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 7060 }
b28ab83c 7061 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
7062}
7063
c277443c 7064#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
e9d2b064 7065static void perf_pmu_rotate_stop(struct pmu *pmu)
0793a61d 7066{
e9d2b064
PZ
7067 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7068
7069 WARN_ON(!irqs_disabled());
7070
7071 list_del_init(&cpuctx->rotation_list);
7072}
7073
108b02cf 7074static void __perf_event_exit_context(void *__info)
0793a61d 7075{
108b02cf 7076 struct perf_event_context *ctx = __info;
cdd6c482 7077 struct perf_event *event, *tmp;
0793a61d 7078
108b02cf 7079 perf_pmu_rotate_stop(ctx->pmu);
b5ab4cd5 7080
889ff015 7081 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
fe4b04fa 7082 __perf_remove_from_context(event);
889ff015 7083 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
fe4b04fa 7084 __perf_remove_from_context(event);
0793a61d 7085}
108b02cf
PZ
7086
7087static void perf_event_exit_cpu_context(int cpu)
7088{
7089 struct perf_event_context *ctx;
7090 struct pmu *pmu;
7091 int idx;
7092
7093 idx = srcu_read_lock(&pmus_srcu);
7094 list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c 7095 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cf
PZ
7096
7097 mutex_lock(&ctx->mutex);
7098 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7099 mutex_unlock(&ctx->mutex);
7100 }
7101 srcu_read_unlock(&pmus_srcu, idx);
108b02cf
PZ
7102}
7103
cdd6c482 7104static void perf_event_exit_cpu(int cpu)
0793a61d 7105{
b28ab83c 7106 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
d859e29f 7107
b28ab83c
PZ
7108 mutex_lock(&swhash->hlist_mutex);
7109 swevent_hlist_release(swhash);
7110 mutex_unlock(&swhash->hlist_mutex);
76e1d904 7111
108b02cf 7112 perf_event_exit_cpu_context(cpu);
0793a61d
TG
7113}
7114#else
cdd6c482 7115static inline void perf_event_exit_cpu(int cpu) { }
0793a61d
TG
7116#endif
7117
c277443c
PZ
7118static int
7119perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7120{
7121 int cpu;
7122
7123 for_each_online_cpu(cpu)
7124 perf_event_exit_cpu(cpu);
7125
7126 return NOTIFY_OK;
7127}
7128
7129/*
7130 * Run the perf reboot notifier at the very last possible moment so that
7131 * the generic watchdog code runs as long as possible.
7132 */
7133static struct notifier_block perf_reboot_notifier = {
7134 .notifier_call = perf_reboot,
7135 .priority = INT_MIN,
7136};
7137
0793a61d
TG
7138static int __cpuinit
7139perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7140{
7141 unsigned int cpu = (long)hcpu;
7142
4536e4d1 7143 switch (action & ~CPU_TASKS_FROZEN) {
0793a61d
TG
7144
7145 case CPU_UP_PREPARE:
5e11637e 7146 case CPU_DOWN_FAILED:
cdd6c482 7147 perf_event_init_cpu(cpu);
0793a61d
TG
7148 break;
7149
5e11637e 7150 case CPU_UP_CANCELED:
0793a61d 7151 case CPU_DOWN_PREPARE:
cdd6c482 7152 perf_event_exit_cpu(cpu);
0793a61d
TG
7153 break;
7154
7155 default:
7156 break;
7157 }
7158
7159 return NOTIFY_OK;
7160}
7161
cdd6c482 7162void __init perf_event_init(void)
0793a61d 7163{
3c502e7a
JW
7164 int ret;
7165
2e80a82a
PZ
7166 idr_init(&pmu_idr);
7167
220b140b 7168 perf_event_init_all_cpus();
b0a873eb 7169 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
7170 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7171 perf_pmu_register(&perf_cpu_clock, NULL, -1);
7172 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb
PZ
7173 perf_tp_register();
7174 perf_cpu_notifier(perf_cpu_notify);
c277443c 7175 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
7176
7177 ret = init_hw_breakpoint();
7178 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520
GN
7179
7180 /* do not patch jump label more than once per second */
7181 jump_label_rate_limit(&perf_sched_events, HZ);
b01c3a00
JO
7182
7183 /*
7184 * Build time assertion that we keep the data_head at the intended
7185 * location. IOW, validation we got the __reserved[] size right.
7186 */
7187 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7188 != 1024);
0793a61d 7189}
abe43400
PZ
7190
7191static int __init perf_event_sysfs_init(void)
7192{
7193 struct pmu *pmu;
7194 int ret;
7195
7196 mutex_lock(&pmus_lock);
7197
7198 ret = bus_register(&pmu_bus);
7199 if (ret)
7200 goto unlock;
7201
7202 list_for_each_entry(pmu, &pmus, entry) {
7203 if (!pmu->name || pmu->type < 0)
7204 continue;
7205
7206 ret = pmu_dev_alloc(pmu);
7207 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7208 }
7209 pmu_bus_running = 1;
7210 ret = 0;
7211
7212unlock:
7213 mutex_unlock(&pmus_lock);
7214
7215 return ret;
7216}
7217device_initcall(perf_event_sysfs_init);
e5d1367f
SE
7218
7219#ifdef CONFIG_CGROUP_PERF
761b3ef5 7220static struct cgroup_subsys_state *perf_cgroup_create(struct cgroup *cont)
e5d1367f
SE
7221{
7222 struct perf_cgroup *jc;
e5d1367f 7223
1b15d055 7224 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
7225 if (!jc)
7226 return ERR_PTR(-ENOMEM);
7227
e5d1367f
SE
7228 jc->info = alloc_percpu(struct perf_cgroup_info);
7229 if (!jc->info) {
7230 kfree(jc);
7231 return ERR_PTR(-ENOMEM);
7232 }
7233
e5d1367f
SE
7234 return &jc->css;
7235}
7236
761b3ef5 7237static void perf_cgroup_destroy(struct cgroup *cont)
e5d1367f
SE
7238{
7239 struct perf_cgroup *jc;
7240 jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
7241 struct perf_cgroup, css);
7242 free_percpu(jc->info);
7243 kfree(jc);
7244}
7245
7246static int __perf_cgroup_move(void *info)
7247{
7248 struct task_struct *task = info;
7249 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
7250 return 0;
7251}
7252
761b3ef5 7253static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
e5d1367f 7254{
bb9d97b6
TH
7255 struct task_struct *task;
7256
7257 cgroup_taskset_for_each(task, cgrp, tset)
7258 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
7259}
7260
761b3ef5
LZ
7261static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7262 struct task_struct *task)
e5d1367f
SE
7263{
7264 /*
7265 * cgroup_exit() is called in the copy_process() failure path.
7266 * Ignore this case since the task hasn't ran yet, this avoids
7267 * trying to poke a half freed task state from generic code.
7268 */
7269 if (!(task->flags & PF_EXITING))
7270 return;
7271
bb9d97b6 7272 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
7273}
7274
7275struct cgroup_subsys perf_subsys = {
e7e7ee2e
IM
7276 .name = "perf_event",
7277 .subsys_id = perf_subsys_id,
7278 .create = perf_cgroup_create,
7279 .destroy = perf_cgroup_destroy,
7280 .exit = perf_cgroup_exit,
bb9d97b6 7281 .attach = perf_cgroup_attach,
e5d1367f
SE
7282};
7283#endif /* CONFIG_CGROUP_PERF */