Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e
IM
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
0793a61d 21#include <linux/sysfs.h>
22a4f650 22#include <linux/dcache.h>
0793a61d 23#include <linux/percpu.h>
22a4f650 24#include <linux/ptrace.h>
c277443c 25#include <linux/reboot.h>
b9cacc7b 26#include <linux/vmstat.h>
abe43400 27#include <linux/device.h>
6e5fdeed 28#include <linux/export.h>
906010b2 29#include <linux/vmalloc.h>
b9cacc7b
PZ
30#include <linux/hardirq.h>
31#include <linux/rculist.h>
0793a61d
TG
32#include <linux/uaccess.h>
33#include <linux/syscalls.h>
34#include <linux/anon_inodes.h>
aa9c4c0f 35#include <linux/kernel_stat.h>
cdd6c482 36#include <linux/perf_event.h>
6fb2915d 37#include <linux/ftrace_event.h>
3c502e7a 38#include <linux/hw_breakpoint.h>
0793a61d 39
76369139
FW
40#include "internal.h"
41
4e193bd4
TB
42#include <asm/irq_regs.h>
43
fe4b04fa 44struct remote_function_call {
e7e7ee2e
IM
45 struct task_struct *p;
46 int (*func)(void *info);
47 void *info;
48 int ret;
fe4b04fa
PZ
49};
50
51static void remote_function(void *data)
52{
53 struct remote_function_call *tfc = data;
54 struct task_struct *p = tfc->p;
55
56 if (p) {
57 tfc->ret = -EAGAIN;
58 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
59 return;
60 }
61
62 tfc->ret = tfc->func(tfc->info);
63}
64
65/**
66 * task_function_call - call a function on the cpu on which a task runs
67 * @p: the task to evaluate
68 * @func: the function to be called
69 * @info: the function call argument
70 *
71 * Calls the function @func when the task is currently running. This might
72 * be on the current CPU, which just calls the function directly
73 *
74 * returns: @func return value, or
75 * -ESRCH - when the process isn't running
76 * -EAGAIN - when the process moved away
77 */
78static int
79task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
80{
81 struct remote_function_call data = {
e7e7ee2e
IM
82 .p = p,
83 .func = func,
84 .info = info,
85 .ret = -ESRCH, /* No such (running) process */
fe4b04fa
PZ
86 };
87
88 if (task_curr(p))
89 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
90
91 return data.ret;
92}
93
94/**
95 * cpu_function_call - call a function on the cpu
96 * @func: the function to be called
97 * @info: the function call argument
98 *
99 * Calls the function @func on the remote cpu.
100 *
101 * returns: @func return value or -ENXIO when the cpu is offline
102 */
103static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
104{
105 struct remote_function_call data = {
e7e7ee2e
IM
106 .p = NULL,
107 .func = func,
108 .info = info,
109 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
110 };
111
112 smp_call_function_single(cpu, remote_function, &data, 1);
113
114 return data.ret;
115}
116
e5d1367f
SE
117#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
118 PERF_FLAG_FD_OUTPUT |\
119 PERF_FLAG_PID_CGROUP)
120
0b3fcf17
SE
121enum event_type_t {
122 EVENT_FLEXIBLE = 0x1,
123 EVENT_PINNED = 0x2,
124 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
125};
126
e5d1367f
SE
127/*
128 * perf_sched_events : >0 events exist
129 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
130 */
b2029520 131struct jump_label_key_deferred perf_sched_events __read_mostly;
e5d1367f
SE
132static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
133
cdd6c482
IM
134static atomic_t nr_mmap_events __read_mostly;
135static atomic_t nr_comm_events __read_mostly;
136static atomic_t nr_task_events __read_mostly;
9ee318a7 137
108b02cf
PZ
138static LIST_HEAD(pmus);
139static DEFINE_MUTEX(pmus_lock);
140static struct srcu_struct pmus_srcu;
141
0764771d 142/*
cdd6c482 143 * perf event paranoia level:
0fbdea19
IM
144 * -1 - not paranoid at all
145 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 146 * 1 - disallow cpu events for unpriv
0fbdea19 147 * 2 - disallow kernel profiling for unpriv
0764771d 148 */
cdd6c482 149int sysctl_perf_event_paranoid __read_mostly = 1;
0764771d 150
20443384
FW
151/* Minimum for 512 kiB + 1 user control page */
152int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
153
154/*
cdd6c482 155 * max perf event sample rate
df58ab24 156 */
163ec435
PZ
157#define DEFAULT_MAX_SAMPLE_RATE 100000
158int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
159static int max_samples_per_tick __read_mostly =
160 DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
161
162int perf_proc_update_handler(struct ctl_table *table, int write,
163 void __user *buffer, size_t *lenp,
164 loff_t *ppos)
165{
166 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
167
168 if (ret || !write)
169 return ret;
170
171 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
172
173 return 0;
174}
1ccd1549 175
cdd6c482 176static atomic64_t perf_event_id;
a96bbc16 177
0b3fcf17
SE
178static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
179 enum event_type_t event_type);
180
181static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
182 enum event_type_t event_type,
183 struct task_struct *task);
184
185static void update_context_time(struct perf_event_context *ctx);
186static u64 perf_event_time(struct perf_event *event);
0b3fcf17 187
10c6db11
PZ
188static void ring_buffer_attach(struct perf_event *event,
189 struct ring_buffer *rb);
190
cdd6c482 191void __weak perf_event_print_debug(void) { }
0793a61d 192
84c79910 193extern __weak const char *perf_pmu_name(void)
0793a61d 194{
84c79910 195 return "pmu";
0793a61d
TG
196}
197
0b3fcf17
SE
198static inline u64 perf_clock(void)
199{
200 return local_clock();
201}
202
e5d1367f
SE
203static inline struct perf_cpu_context *
204__get_cpu_context(struct perf_event_context *ctx)
205{
206 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
207}
208
facc4307
PZ
209static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
210 struct perf_event_context *ctx)
211{
212 raw_spin_lock(&cpuctx->ctx.lock);
213 if (ctx)
214 raw_spin_lock(&ctx->lock);
215}
216
217static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
218 struct perf_event_context *ctx)
219{
220 if (ctx)
221 raw_spin_unlock(&ctx->lock);
222 raw_spin_unlock(&cpuctx->ctx.lock);
223}
224
e5d1367f
SE
225#ifdef CONFIG_CGROUP_PERF
226
3f7cce3c
SE
227/*
228 * Must ensure cgroup is pinned (css_get) before calling
229 * this function. In other words, we cannot call this function
230 * if there is no cgroup event for the current CPU context.
231 */
e5d1367f
SE
232static inline struct perf_cgroup *
233perf_cgroup_from_task(struct task_struct *task)
234{
235 return container_of(task_subsys_state(task, perf_subsys_id),
236 struct perf_cgroup, css);
237}
238
239static inline bool
240perf_cgroup_match(struct perf_event *event)
241{
242 struct perf_event_context *ctx = event->ctx;
243 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
244
245 return !event->cgrp || event->cgrp == cpuctx->cgrp;
246}
247
248static inline void perf_get_cgroup(struct perf_event *event)
249{
250 css_get(&event->cgrp->css);
251}
252
253static inline void perf_put_cgroup(struct perf_event *event)
254{
255 css_put(&event->cgrp->css);
256}
257
258static inline void perf_detach_cgroup(struct perf_event *event)
259{
260 perf_put_cgroup(event);
261 event->cgrp = NULL;
262}
263
264static inline int is_cgroup_event(struct perf_event *event)
265{
266 return event->cgrp != NULL;
267}
268
269static inline u64 perf_cgroup_event_time(struct perf_event *event)
270{
271 struct perf_cgroup_info *t;
272
273 t = per_cpu_ptr(event->cgrp->info, event->cpu);
274 return t->time;
275}
276
277static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
278{
279 struct perf_cgroup_info *info;
280 u64 now;
281
282 now = perf_clock();
283
284 info = this_cpu_ptr(cgrp->info);
285
286 info->time += now - info->timestamp;
287 info->timestamp = now;
288}
289
290static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
291{
292 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
293 if (cgrp_out)
294 __update_cgrp_time(cgrp_out);
295}
296
297static inline void update_cgrp_time_from_event(struct perf_event *event)
298{
3f7cce3c
SE
299 struct perf_cgroup *cgrp;
300
e5d1367f 301 /*
3f7cce3c
SE
302 * ensure we access cgroup data only when needed and
303 * when we know the cgroup is pinned (css_get)
e5d1367f 304 */
3f7cce3c 305 if (!is_cgroup_event(event))
e5d1367f
SE
306 return;
307
3f7cce3c
SE
308 cgrp = perf_cgroup_from_task(current);
309 /*
310 * Do not update time when cgroup is not active
311 */
312 if (cgrp == event->cgrp)
313 __update_cgrp_time(event->cgrp);
e5d1367f
SE
314}
315
316static inline void
3f7cce3c
SE
317perf_cgroup_set_timestamp(struct task_struct *task,
318 struct perf_event_context *ctx)
e5d1367f
SE
319{
320 struct perf_cgroup *cgrp;
321 struct perf_cgroup_info *info;
322
3f7cce3c
SE
323 /*
324 * ctx->lock held by caller
325 * ensure we do not access cgroup data
326 * unless we have the cgroup pinned (css_get)
327 */
328 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
329 return;
330
331 cgrp = perf_cgroup_from_task(task);
332 info = this_cpu_ptr(cgrp->info);
3f7cce3c 333 info->timestamp = ctx->timestamp;
e5d1367f
SE
334}
335
336#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
337#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
338
339/*
340 * reschedule events based on the cgroup constraint of task.
341 *
342 * mode SWOUT : schedule out everything
343 * mode SWIN : schedule in based on cgroup for next
344 */
345void perf_cgroup_switch(struct task_struct *task, int mode)
346{
347 struct perf_cpu_context *cpuctx;
348 struct pmu *pmu;
349 unsigned long flags;
350
351 /*
352 * disable interrupts to avoid geting nr_cgroup
353 * changes via __perf_event_disable(). Also
354 * avoids preemption.
355 */
356 local_irq_save(flags);
357
358 /*
359 * we reschedule only in the presence of cgroup
360 * constrained events.
361 */
362 rcu_read_lock();
363
364 list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f
SE
365 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
366
e5d1367f
SE
367 /*
368 * perf_cgroup_events says at least one
369 * context on this CPU has cgroup events.
370 *
371 * ctx->nr_cgroups reports the number of cgroup
372 * events for a context.
373 */
374 if (cpuctx->ctx.nr_cgroups > 0) {
facc4307
PZ
375 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
376 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f
SE
377
378 if (mode & PERF_CGROUP_SWOUT) {
379 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
380 /*
381 * must not be done before ctxswout due
382 * to event_filter_match() in event_sched_out()
383 */
384 cpuctx->cgrp = NULL;
385 }
386
387 if (mode & PERF_CGROUP_SWIN) {
e566b76e 388 WARN_ON_ONCE(cpuctx->cgrp);
e5d1367f
SE
389 /* set cgrp before ctxsw in to
390 * allow event_filter_match() to not
391 * have to pass task around
392 */
393 cpuctx->cgrp = perf_cgroup_from_task(task);
394 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
395 }
facc4307
PZ
396 perf_pmu_enable(cpuctx->ctx.pmu);
397 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f 398 }
e5d1367f
SE
399 }
400
401 rcu_read_unlock();
402
403 local_irq_restore(flags);
404}
405
a8d757ef
SE
406static inline void perf_cgroup_sched_out(struct task_struct *task,
407 struct task_struct *next)
e5d1367f 408{
a8d757ef
SE
409 struct perf_cgroup *cgrp1;
410 struct perf_cgroup *cgrp2 = NULL;
411
412 /*
413 * we come here when we know perf_cgroup_events > 0
414 */
415 cgrp1 = perf_cgroup_from_task(task);
416
417 /*
418 * next is NULL when called from perf_event_enable_on_exec()
419 * that will systematically cause a cgroup_switch()
420 */
421 if (next)
422 cgrp2 = perf_cgroup_from_task(next);
423
424 /*
425 * only schedule out current cgroup events if we know
426 * that we are switching to a different cgroup. Otherwise,
427 * do no touch the cgroup events.
428 */
429 if (cgrp1 != cgrp2)
430 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
e5d1367f
SE
431}
432
a8d757ef
SE
433static inline void perf_cgroup_sched_in(struct task_struct *prev,
434 struct task_struct *task)
e5d1367f 435{
a8d757ef
SE
436 struct perf_cgroup *cgrp1;
437 struct perf_cgroup *cgrp2 = NULL;
438
439 /*
440 * we come here when we know perf_cgroup_events > 0
441 */
442 cgrp1 = perf_cgroup_from_task(task);
443
444 /* prev can never be NULL */
445 cgrp2 = perf_cgroup_from_task(prev);
446
447 /*
448 * only need to schedule in cgroup events if we are changing
449 * cgroup during ctxsw. Cgroup events were not scheduled
450 * out of ctxsw out if that was not the case.
451 */
452 if (cgrp1 != cgrp2)
453 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
e5d1367f
SE
454}
455
456static inline int perf_cgroup_connect(int fd, struct perf_event *event,
457 struct perf_event_attr *attr,
458 struct perf_event *group_leader)
459{
460 struct perf_cgroup *cgrp;
461 struct cgroup_subsys_state *css;
462 struct file *file;
463 int ret = 0, fput_needed;
464
465 file = fget_light(fd, &fput_needed);
466 if (!file)
467 return -EBADF;
468
469 css = cgroup_css_from_dir(file, perf_subsys_id);
3db272c0
LZ
470 if (IS_ERR(css)) {
471 ret = PTR_ERR(css);
472 goto out;
473 }
e5d1367f
SE
474
475 cgrp = container_of(css, struct perf_cgroup, css);
476 event->cgrp = cgrp;
477
f75e18cb
LZ
478 /* must be done before we fput() the file */
479 perf_get_cgroup(event);
480
e5d1367f
SE
481 /*
482 * all events in a group must monitor
483 * the same cgroup because a task belongs
484 * to only one perf cgroup at a time
485 */
486 if (group_leader && group_leader->cgrp != cgrp) {
487 perf_detach_cgroup(event);
488 ret = -EINVAL;
e5d1367f 489 }
3db272c0 490out:
e5d1367f
SE
491 fput_light(file, fput_needed);
492 return ret;
493}
494
495static inline void
496perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
497{
498 struct perf_cgroup_info *t;
499 t = per_cpu_ptr(event->cgrp->info, event->cpu);
500 event->shadow_ctx_time = now - t->timestamp;
501}
502
503static inline void
504perf_cgroup_defer_enabled(struct perf_event *event)
505{
506 /*
507 * when the current task's perf cgroup does not match
508 * the event's, we need to remember to call the
509 * perf_mark_enable() function the first time a task with
510 * a matching perf cgroup is scheduled in.
511 */
512 if (is_cgroup_event(event) && !perf_cgroup_match(event))
513 event->cgrp_defer_enabled = 1;
514}
515
516static inline void
517perf_cgroup_mark_enabled(struct perf_event *event,
518 struct perf_event_context *ctx)
519{
520 struct perf_event *sub;
521 u64 tstamp = perf_event_time(event);
522
523 if (!event->cgrp_defer_enabled)
524 return;
525
526 event->cgrp_defer_enabled = 0;
527
528 event->tstamp_enabled = tstamp - event->total_time_enabled;
529 list_for_each_entry(sub, &event->sibling_list, group_entry) {
530 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
531 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
532 sub->cgrp_defer_enabled = 0;
533 }
534 }
535}
536#else /* !CONFIG_CGROUP_PERF */
537
538static inline bool
539perf_cgroup_match(struct perf_event *event)
540{
541 return true;
542}
543
544static inline void perf_detach_cgroup(struct perf_event *event)
545{}
546
547static inline int is_cgroup_event(struct perf_event *event)
548{
549 return 0;
550}
551
552static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
553{
554 return 0;
555}
556
557static inline void update_cgrp_time_from_event(struct perf_event *event)
558{
559}
560
561static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
562{
563}
564
a8d757ef
SE
565static inline void perf_cgroup_sched_out(struct task_struct *task,
566 struct task_struct *next)
e5d1367f
SE
567{
568}
569
a8d757ef
SE
570static inline void perf_cgroup_sched_in(struct task_struct *prev,
571 struct task_struct *task)
e5d1367f
SE
572{
573}
574
575static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
576 struct perf_event_attr *attr,
577 struct perf_event *group_leader)
578{
579 return -EINVAL;
580}
581
582static inline void
3f7cce3c
SE
583perf_cgroup_set_timestamp(struct task_struct *task,
584 struct perf_event_context *ctx)
e5d1367f
SE
585{
586}
587
588void
589perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
590{
591}
592
593static inline void
594perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
595{
596}
597
598static inline u64 perf_cgroup_event_time(struct perf_event *event)
599{
600 return 0;
601}
602
603static inline void
604perf_cgroup_defer_enabled(struct perf_event *event)
605{
606}
607
608static inline void
609perf_cgroup_mark_enabled(struct perf_event *event,
610 struct perf_event_context *ctx)
611{
612}
613#endif
614
33696fc0 615void perf_pmu_disable(struct pmu *pmu)
9e35ad38 616{
33696fc0
PZ
617 int *count = this_cpu_ptr(pmu->pmu_disable_count);
618 if (!(*count)++)
619 pmu->pmu_disable(pmu);
9e35ad38 620}
9e35ad38 621
33696fc0 622void perf_pmu_enable(struct pmu *pmu)
9e35ad38 623{
33696fc0
PZ
624 int *count = this_cpu_ptr(pmu->pmu_disable_count);
625 if (!--(*count))
626 pmu->pmu_enable(pmu);
9e35ad38 627}
9e35ad38 628
e9d2b064
PZ
629static DEFINE_PER_CPU(struct list_head, rotation_list);
630
631/*
632 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
633 * because they're strictly cpu affine and rotate_start is called with IRQs
634 * disabled, while rotate_context is called from IRQ context.
635 */
108b02cf 636static void perf_pmu_rotate_start(struct pmu *pmu)
9e35ad38 637{
108b02cf 638 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
e9d2b064 639 struct list_head *head = &__get_cpu_var(rotation_list);
b5ab4cd5 640
e9d2b064 641 WARN_ON(!irqs_disabled());
b5ab4cd5 642
e9d2b064
PZ
643 if (list_empty(&cpuctx->rotation_list))
644 list_add(&cpuctx->rotation_list, head);
9e35ad38 645}
9e35ad38 646
cdd6c482 647static void get_ctx(struct perf_event_context *ctx)
a63eaf34 648{
e5289d4a 649 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
650}
651
cdd6c482 652static void put_ctx(struct perf_event_context *ctx)
a63eaf34 653{
564c2b21
PM
654 if (atomic_dec_and_test(&ctx->refcount)) {
655 if (ctx->parent_ctx)
656 put_ctx(ctx->parent_ctx);
c93f7669
PM
657 if (ctx->task)
658 put_task_struct(ctx->task);
cb796ff3 659 kfree_rcu(ctx, rcu_head);
564c2b21 660 }
a63eaf34
PM
661}
662
cdd6c482 663static void unclone_ctx(struct perf_event_context *ctx)
71a851b4
PZ
664{
665 if (ctx->parent_ctx) {
666 put_ctx(ctx->parent_ctx);
667 ctx->parent_ctx = NULL;
668 }
669}
670
6844c09d
ACM
671static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
672{
673 /*
674 * only top level events have the pid namespace they were created in
675 */
676 if (event->parent)
677 event = event->parent;
678
679 return task_tgid_nr_ns(p, event->ns);
680}
681
682static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
683{
684 /*
685 * only top level events have the pid namespace they were created in
686 */
687 if (event->parent)
688 event = event->parent;
689
690 return task_pid_nr_ns(p, event->ns);
691}
692
7f453c24 693/*
cdd6c482 694 * If we inherit events we want to return the parent event id
7f453c24
PZ
695 * to userspace.
696 */
cdd6c482 697static u64 primary_event_id(struct perf_event *event)
7f453c24 698{
cdd6c482 699 u64 id = event->id;
7f453c24 700
cdd6c482
IM
701 if (event->parent)
702 id = event->parent->id;
7f453c24
PZ
703
704 return id;
705}
706
25346b93 707/*
cdd6c482 708 * Get the perf_event_context for a task and lock it.
25346b93
PM
709 * This has to cope with with the fact that until it is locked,
710 * the context could get moved to another task.
711 */
cdd6c482 712static struct perf_event_context *
8dc85d54 713perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 714{
cdd6c482 715 struct perf_event_context *ctx;
25346b93
PM
716
717 rcu_read_lock();
9ed6060d 718retry:
8dc85d54 719 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
720 if (ctx) {
721 /*
722 * If this context is a clone of another, it might
723 * get swapped for another underneath us by
cdd6c482 724 * perf_event_task_sched_out, though the
25346b93
PM
725 * rcu_read_lock() protects us from any context
726 * getting freed. Lock the context and check if it
727 * got swapped before we could get the lock, and retry
728 * if so. If we locked the right context, then it
729 * can't get swapped on us any more.
730 */
e625cce1 731 raw_spin_lock_irqsave(&ctx->lock, *flags);
8dc85d54 732 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
e625cce1 733 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
25346b93
PM
734 goto retry;
735 }
b49a9e7e
PZ
736
737 if (!atomic_inc_not_zero(&ctx->refcount)) {
e625cce1 738 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
b49a9e7e
PZ
739 ctx = NULL;
740 }
25346b93
PM
741 }
742 rcu_read_unlock();
743 return ctx;
744}
745
746/*
747 * Get the context for a task and increment its pin_count so it
748 * can't get swapped to another task. This also increments its
749 * reference count so that the context can't get freed.
750 */
8dc85d54
PZ
751static struct perf_event_context *
752perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 753{
cdd6c482 754 struct perf_event_context *ctx;
25346b93
PM
755 unsigned long flags;
756
8dc85d54 757 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
758 if (ctx) {
759 ++ctx->pin_count;
e625cce1 760 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
761 }
762 return ctx;
763}
764
cdd6c482 765static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
766{
767 unsigned long flags;
768
e625cce1 769 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 770 --ctx->pin_count;
e625cce1 771 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
772}
773
f67218c3
PZ
774/*
775 * Update the record of the current time in a context.
776 */
777static void update_context_time(struct perf_event_context *ctx)
778{
779 u64 now = perf_clock();
780
781 ctx->time += now - ctx->timestamp;
782 ctx->timestamp = now;
783}
784
4158755d
SE
785static u64 perf_event_time(struct perf_event *event)
786{
787 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
788
789 if (is_cgroup_event(event))
790 return perf_cgroup_event_time(event);
791
4158755d
SE
792 return ctx ? ctx->time : 0;
793}
794
f67218c3
PZ
795/*
796 * Update the total_time_enabled and total_time_running fields for a event.
b7526f0c 797 * The caller of this function needs to hold the ctx->lock.
f67218c3
PZ
798 */
799static void update_event_times(struct perf_event *event)
800{
801 struct perf_event_context *ctx = event->ctx;
802 u64 run_end;
803
804 if (event->state < PERF_EVENT_STATE_INACTIVE ||
805 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
806 return;
e5d1367f
SE
807 /*
808 * in cgroup mode, time_enabled represents
809 * the time the event was enabled AND active
810 * tasks were in the monitored cgroup. This is
811 * independent of the activity of the context as
812 * there may be a mix of cgroup and non-cgroup events.
813 *
814 * That is why we treat cgroup events differently
815 * here.
816 */
817 if (is_cgroup_event(event))
46cd6a7f 818 run_end = perf_cgroup_event_time(event);
e5d1367f
SE
819 else if (ctx->is_active)
820 run_end = ctx->time;
acd1d7c1
PZ
821 else
822 run_end = event->tstamp_stopped;
823
824 event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3
PZ
825
826 if (event->state == PERF_EVENT_STATE_INACTIVE)
827 run_end = event->tstamp_stopped;
828 else
4158755d 829 run_end = perf_event_time(event);
f67218c3
PZ
830
831 event->total_time_running = run_end - event->tstamp_running;
e5d1367f 832
f67218c3
PZ
833}
834
96c21a46
PZ
835/*
836 * Update total_time_enabled and total_time_running for all events in a group.
837 */
838static void update_group_times(struct perf_event *leader)
839{
840 struct perf_event *event;
841
842 update_event_times(leader);
843 list_for_each_entry(event, &leader->sibling_list, group_entry)
844 update_event_times(event);
845}
846
889ff015
FW
847static struct list_head *
848ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
849{
850 if (event->attr.pinned)
851 return &ctx->pinned_groups;
852 else
853 return &ctx->flexible_groups;
854}
855
fccc714b 856/*
cdd6c482 857 * Add a event from the lists for its context.
fccc714b
PZ
858 * Must be called with ctx->mutex and ctx->lock held.
859 */
04289bb9 860static void
cdd6c482 861list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 862{
8a49542c
PZ
863 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
864 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9
IM
865
866 /*
8a49542c
PZ
867 * If we're a stand alone event or group leader, we go to the context
868 * list, group events are kept attached to the group so that
869 * perf_group_detach can, at all times, locate all siblings.
04289bb9 870 */
8a49542c 871 if (event->group_leader == event) {
889ff015
FW
872 struct list_head *list;
873
d6f962b5
FW
874 if (is_software_event(event))
875 event->group_flags |= PERF_GROUP_SOFTWARE;
876
889ff015
FW
877 list = ctx_group_list(event, ctx);
878 list_add_tail(&event->group_entry, list);
5c148194 879 }
592903cd 880
08309379 881 if (is_cgroup_event(event))
e5d1367f 882 ctx->nr_cgroups++;
e5d1367f 883
cdd6c482 884 list_add_rcu(&event->event_entry, &ctx->event_list);
b5ab4cd5 885 if (!ctx->nr_events)
108b02cf 886 perf_pmu_rotate_start(ctx->pmu);
cdd6c482
IM
887 ctx->nr_events++;
888 if (event->attr.inherit_stat)
bfbd3381 889 ctx->nr_stat++;
04289bb9
IM
890}
891
c320c7b7
ACM
892/*
893 * Called at perf_event creation and when events are attached/detached from a
894 * group.
895 */
896static void perf_event__read_size(struct perf_event *event)
897{
898 int entry = sizeof(u64); /* value */
899 int size = 0;
900 int nr = 1;
901
902 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
903 size += sizeof(u64);
904
905 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
906 size += sizeof(u64);
907
908 if (event->attr.read_format & PERF_FORMAT_ID)
909 entry += sizeof(u64);
910
911 if (event->attr.read_format & PERF_FORMAT_GROUP) {
912 nr += event->group_leader->nr_siblings;
913 size += sizeof(u64);
914 }
915
916 size += entry * nr;
917 event->read_size = size;
918}
919
920static void perf_event__header_size(struct perf_event *event)
921{
922 struct perf_sample_data *data;
923 u64 sample_type = event->attr.sample_type;
924 u16 size = 0;
925
926 perf_event__read_size(event);
927
928 if (sample_type & PERF_SAMPLE_IP)
929 size += sizeof(data->ip);
930
6844c09d
ACM
931 if (sample_type & PERF_SAMPLE_ADDR)
932 size += sizeof(data->addr);
933
934 if (sample_type & PERF_SAMPLE_PERIOD)
935 size += sizeof(data->period);
936
937 if (sample_type & PERF_SAMPLE_READ)
938 size += event->read_size;
939
940 event->header_size = size;
941}
942
943static void perf_event__id_header_size(struct perf_event *event)
944{
945 struct perf_sample_data *data;
946 u64 sample_type = event->attr.sample_type;
947 u16 size = 0;
948
c320c7b7
ACM
949 if (sample_type & PERF_SAMPLE_TID)
950 size += sizeof(data->tid_entry);
951
952 if (sample_type & PERF_SAMPLE_TIME)
953 size += sizeof(data->time);
954
c320c7b7
ACM
955 if (sample_type & PERF_SAMPLE_ID)
956 size += sizeof(data->id);
957
958 if (sample_type & PERF_SAMPLE_STREAM_ID)
959 size += sizeof(data->stream_id);
960
961 if (sample_type & PERF_SAMPLE_CPU)
962 size += sizeof(data->cpu_entry);
963
6844c09d 964 event->id_header_size = size;
c320c7b7
ACM
965}
966
8a49542c
PZ
967static void perf_group_attach(struct perf_event *event)
968{
c320c7b7 969 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 970
74c3337c
PZ
971 /*
972 * We can have double attach due to group movement in perf_event_open.
973 */
974 if (event->attach_state & PERF_ATTACH_GROUP)
975 return;
976
8a49542c
PZ
977 event->attach_state |= PERF_ATTACH_GROUP;
978
979 if (group_leader == event)
980 return;
981
982 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
983 !is_software_event(event))
984 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
985
986 list_add_tail(&event->group_entry, &group_leader->sibling_list);
987 group_leader->nr_siblings++;
c320c7b7
ACM
988
989 perf_event__header_size(group_leader);
990
991 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
992 perf_event__header_size(pos);
8a49542c
PZ
993}
994
a63eaf34 995/*
cdd6c482 996 * Remove a event from the lists for its context.
fccc714b 997 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 998 */
04289bb9 999static void
cdd6c482 1000list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1001{
68cacd29 1002 struct perf_cpu_context *cpuctx;
8a49542c
PZ
1003 /*
1004 * We can have double detach due to exit/hot-unplug + close.
1005 */
1006 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1007 return;
8a49542c
PZ
1008
1009 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1010
68cacd29 1011 if (is_cgroup_event(event)) {
e5d1367f 1012 ctx->nr_cgroups--;
68cacd29
SE
1013 cpuctx = __get_cpu_context(ctx);
1014 /*
1015 * if there are no more cgroup events
1016 * then cler cgrp to avoid stale pointer
1017 * in update_cgrp_time_from_cpuctx()
1018 */
1019 if (!ctx->nr_cgroups)
1020 cpuctx->cgrp = NULL;
1021 }
e5d1367f 1022
cdd6c482
IM
1023 ctx->nr_events--;
1024 if (event->attr.inherit_stat)
bfbd3381 1025 ctx->nr_stat--;
8bc20959 1026
cdd6c482 1027 list_del_rcu(&event->event_entry);
04289bb9 1028
8a49542c
PZ
1029 if (event->group_leader == event)
1030 list_del_init(&event->group_entry);
5c148194 1031
96c21a46 1032 update_group_times(event);
b2e74a26
SE
1033
1034 /*
1035 * If event was in error state, then keep it
1036 * that way, otherwise bogus counts will be
1037 * returned on read(). The only way to get out
1038 * of error state is by explicit re-enabling
1039 * of the event
1040 */
1041 if (event->state > PERF_EVENT_STATE_OFF)
1042 event->state = PERF_EVENT_STATE_OFF;
050735b0
PZ
1043}
1044
8a49542c 1045static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1046{
1047 struct perf_event *sibling, *tmp;
8a49542c
PZ
1048 struct list_head *list = NULL;
1049
1050 /*
1051 * We can have double detach due to exit/hot-unplug + close.
1052 */
1053 if (!(event->attach_state & PERF_ATTACH_GROUP))
1054 return;
1055
1056 event->attach_state &= ~PERF_ATTACH_GROUP;
1057
1058 /*
1059 * If this is a sibling, remove it from its group.
1060 */
1061 if (event->group_leader != event) {
1062 list_del_init(&event->group_entry);
1063 event->group_leader->nr_siblings--;
c320c7b7 1064 goto out;
8a49542c
PZ
1065 }
1066
1067 if (!list_empty(&event->group_entry))
1068 list = &event->group_entry;
2e2af50b 1069
04289bb9 1070 /*
cdd6c482
IM
1071 * If this was a group event with sibling events then
1072 * upgrade the siblings to singleton events by adding them
8a49542c 1073 * to whatever list we are on.
04289bb9 1074 */
cdd6c482 1075 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c
PZ
1076 if (list)
1077 list_move_tail(&sibling->group_entry, list);
04289bb9 1078 sibling->group_leader = sibling;
d6f962b5
FW
1079
1080 /* Inherit group flags from the previous leader */
1081 sibling->group_flags = event->group_flags;
04289bb9 1082 }
c320c7b7
ACM
1083
1084out:
1085 perf_event__header_size(event->group_leader);
1086
1087 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1088 perf_event__header_size(tmp);
04289bb9
IM
1089}
1090
fa66f07a
SE
1091static inline int
1092event_filter_match(struct perf_event *event)
1093{
e5d1367f
SE
1094 return (event->cpu == -1 || event->cpu == smp_processor_id())
1095 && perf_cgroup_match(event);
fa66f07a
SE
1096}
1097
9ffcfa6f
SE
1098static void
1099event_sched_out(struct perf_event *event,
3b6f9e5c 1100 struct perf_cpu_context *cpuctx,
cdd6c482 1101 struct perf_event_context *ctx)
3b6f9e5c 1102{
4158755d 1103 u64 tstamp = perf_event_time(event);
fa66f07a
SE
1104 u64 delta;
1105 /*
1106 * An event which could not be activated because of
1107 * filter mismatch still needs to have its timings
1108 * maintained, otherwise bogus information is return
1109 * via read() for time_enabled, time_running:
1110 */
1111 if (event->state == PERF_EVENT_STATE_INACTIVE
1112 && !event_filter_match(event)) {
e5d1367f 1113 delta = tstamp - event->tstamp_stopped;
fa66f07a 1114 event->tstamp_running += delta;
4158755d 1115 event->tstamp_stopped = tstamp;
fa66f07a
SE
1116 }
1117
cdd6c482 1118 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1119 return;
3b6f9e5c 1120
cdd6c482
IM
1121 event->state = PERF_EVENT_STATE_INACTIVE;
1122 if (event->pending_disable) {
1123 event->pending_disable = 0;
1124 event->state = PERF_EVENT_STATE_OFF;
970892a9 1125 }
4158755d 1126 event->tstamp_stopped = tstamp;
a4eaf7f1 1127 event->pmu->del(event, 0);
cdd6c482 1128 event->oncpu = -1;
3b6f9e5c 1129
cdd6c482 1130 if (!is_software_event(event))
3b6f9e5c
PM
1131 cpuctx->active_oncpu--;
1132 ctx->nr_active--;
0f5a2601
PZ
1133 if (event->attr.freq && event->attr.sample_freq)
1134 ctx->nr_freq--;
cdd6c482 1135 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c
PM
1136 cpuctx->exclusive = 0;
1137}
1138
d859e29f 1139static void
cdd6c482 1140group_sched_out(struct perf_event *group_event,
d859e29f 1141 struct perf_cpu_context *cpuctx,
cdd6c482 1142 struct perf_event_context *ctx)
d859e29f 1143{
cdd6c482 1144 struct perf_event *event;
fa66f07a 1145 int state = group_event->state;
d859e29f 1146
cdd6c482 1147 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
1148
1149 /*
1150 * Schedule out siblings (if any):
1151 */
cdd6c482
IM
1152 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1153 event_sched_out(event, cpuctx, ctx);
d859e29f 1154
fa66f07a 1155 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29f
PM
1156 cpuctx->exclusive = 0;
1157}
1158
0793a61d 1159/*
cdd6c482 1160 * Cross CPU call to remove a performance event
0793a61d 1161 *
cdd6c482 1162 * We disable the event on the hardware level first. After that we
0793a61d
TG
1163 * remove it from the context list.
1164 */
fe4b04fa 1165static int __perf_remove_from_context(void *info)
0793a61d 1166{
cdd6c482
IM
1167 struct perf_event *event = info;
1168 struct perf_event_context *ctx = event->ctx;
108b02cf 1169 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
0793a61d 1170
e625cce1 1171 raw_spin_lock(&ctx->lock);
cdd6c482 1172 event_sched_out(event, cpuctx, ctx);
cdd6c482 1173 list_del_event(event, ctx);
64ce3126
PZ
1174 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1175 ctx->is_active = 0;
1176 cpuctx->task_ctx = NULL;
1177 }
e625cce1 1178 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1179
1180 return 0;
0793a61d
TG
1181}
1182
1183
1184/*
cdd6c482 1185 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 1186 *
cdd6c482 1187 * CPU events are removed with a smp call. For task events we only
0793a61d 1188 * call when the task is on a CPU.
c93f7669 1189 *
cdd6c482
IM
1190 * If event->ctx is a cloned context, callers must make sure that
1191 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
1192 * remains valid. This is OK when called from perf_release since
1193 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 1194 * When called from perf_event_exit_task, it's OK because the
c93f7669 1195 * context has been detached from its task.
0793a61d 1196 */
fe4b04fa 1197static void perf_remove_from_context(struct perf_event *event)
0793a61d 1198{
cdd6c482 1199 struct perf_event_context *ctx = event->ctx;
0793a61d
TG
1200 struct task_struct *task = ctx->task;
1201
fe4b04fa
PZ
1202 lockdep_assert_held(&ctx->mutex);
1203
0793a61d
TG
1204 if (!task) {
1205 /*
cdd6c482 1206 * Per cpu events are removed via an smp call and
af901ca1 1207 * the removal is always successful.
0793a61d 1208 */
fe4b04fa 1209 cpu_function_call(event->cpu, __perf_remove_from_context, event);
0793a61d
TG
1210 return;
1211 }
1212
1213retry:
fe4b04fa
PZ
1214 if (!task_function_call(task, __perf_remove_from_context, event))
1215 return;
0793a61d 1216
e625cce1 1217 raw_spin_lock_irq(&ctx->lock);
0793a61d 1218 /*
fe4b04fa
PZ
1219 * If we failed to find a running task, but find the context active now
1220 * that we've acquired the ctx->lock, retry.
0793a61d 1221 */
fe4b04fa 1222 if (ctx->is_active) {
e625cce1 1223 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1224 goto retry;
1225 }
1226
1227 /*
fe4b04fa
PZ
1228 * Since the task isn't running, its safe to remove the event, us
1229 * holding the ctx->lock ensures the task won't get scheduled in.
0793a61d 1230 */
fe4b04fa 1231 list_del_event(event, ctx);
e625cce1 1232 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1233}
1234
d859e29f 1235/*
cdd6c482 1236 * Cross CPU call to disable a performance event
d859e29f 1237 */
fe4b04fa 1238static int __perf_event_disable(void *info)
d859e29f 1239{
cdd6c482 1240 struct perf_event *event = info;
cdd6c482 1241 struct perf_event_context *ctx = event->ctx;
108b02cf 1242 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f
PM
1243
1244 /*
cdd6c482
IM
1245 * If this is a per-task event, need to check whether this
1246 * event's task is the current task on this cpu.
fe4b04fa
PZ
1247 *
1248 * Can trigger due to concurrent perf_event_context_sched_out()
1249 * flipping contexts around.
d859e29f 1250 */
665c2142 1251 if (ctx->task && cpuctx->task_ctx != ctx)
fe4b04fa 1252 return -EINVAL;
d859e29f 1253
e625cce1 1254 raw_spin_lock(&ctx->lock);
d859e29f
PM
1255
1256 /*
cdd6c482 1257 * If the event is on, turn it off.
d859e29f
PM
1258 * If it is in error state, leave it in error state.
1259 */
cdd6c482 1260 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
4af4998b 1261 update_context_time(ctx);
e5d1367f 1262 update_cgrp_time_from_event(event);
cdd6c482
IM
1263 update_group_times(event);
1264 if (event == event->group_leader)
1265 group_sched_out(event, cpuctx, ctx);
d859e29f 1266 else
cdd6c482
IM
1267 event_sched_out(event, cpuctx, ctx);
1268 event->state = PERF_EVENT_STATE_OFF;
d859e29f
PM
1269 }
1270
e625cce1 1271 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1272
1273 return 0;
d859e29f
PM
1274}
1275
1276/*
cdd6c482 1277 * Disable a event.
c93f7669 1278 *
cdd6c482
IM
1279 * If event->ctx is a cloned context, callers must make sure that
1280 * every task struct that event->ctx->task could possibly point to
c93f7669 1281 * remains valid. This condition is satisifed when called through
cdd6c482
IM
1282 * perf_event_for_each_child or perf_event_for_each because they
1283 * hold the top-level event's child_mutex, so any descendant that
1284 * goes to exit will block in sync_child_event.
1285 * When called from perf_pending_event it's OK because event->ctx
c93f7669 1286 * is the current context on this CPU and preemption is disabled,
cdd6c482 1287 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 1288 */
44234adc 1289void perf_event_disable(struct perf_event *event)
d859e29f 1290{
cdd6c482 1291 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
1292 struct task_struct *task = ctx->task;
1293
1294 if (!task) {
1295 /*
cdd6c482 1296 * Disable the event on the cpu that it's on
d859e29f 1297 */
fe4b04fa 1298 cpu_function_call(event->cpu, __perf_event_disable, event);
d859e29f
PM
1299 return;
1300 }
1301
9ed6060d 1302retry:
fe4b04fa
PZ
1303 if (!task_function_call(task, __perf_event_disable, event))
1304 return;
d859e29f 1305
e625cce1 1306 raw_spin_lock_irq(&ctx->lock);
d859e29f 1307 /*
cdd6c482 1308 * If the event is still active, we need to retry the cross-call.
d859e29f 1309 */
cdd6c482 1310 if (event->state == PERF_EVENT_STATE_ACTIVE) {
e625cce1 1311 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
1312 /*
1313 * Reload the task pointer, it might have been changed by
1314 * a concurrent perf_event_context_sched_out().
1315 */
1316 task = ctx->task;
d859e29f
PM
1317 goto retry;
1318 }
1319
1320 /*
1321 * Since we have the lock this context can't be scheduled
1322 * in, so we can change the state safely.
1323 */
cdd6c482
IM
1324 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1325 update_group_times(event);
1326 event->state = PERF_EVENT_STATE_OFF;
53cfbf59 1327 }
e625cce1 1328 raw_spin_unlock_irq(&ctx->lock);
d859e29f 1329}
dcfce4a0 1330EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 1331
e5d1367f
SE
1332static void perf_set_shadow_time(struct perf_event *event,
1333 struct perf_event_context *ctx,
1334 u64 tstamp)
1335{
1336 /*
1337 * use the correct time source for the time snapshot
1338 *
1339 * We could get by without this by leveraging the
1340 * fact that to get to this function, the caller
1341 * has most likely already called update_context_time()
1342 * and update_cgrp_time_xx() and thus both timestamp
1343 * are identical (or very close). Given that tstamp is,
1344 * already adjusted for cgroup, we could say that:
1345 * tstamp - ctx->timestamp
1346 * is equivalent to
1347 * tstamp - cgrp->timestamp.
1348 *
1349 * Then, in perf_output_read(), the calculation would
1350 * work with no changes because:
1351 * - event is guaranteed scheduled in
1352 * - no scheduled out in between
1353 * - thus the timestamp would be the same
1354 *
1355 * But this is a bit hairy.
1356 *
1357 * So instead, we have an explicit cgroup call to remain
1358 * within the time time source all along. We believe it
1359 * is cleaner and simpler to understand.
1360 */
1361 if (is_cgroup_event(event))
1362 perf_cgroup_set_shadow_time(event, tstamp);
1363 else
1364 event->shadow_ctx_time = tstamp - ctx->timestamp;
1365}
1366
4fe757dd
PZ
1367#define MAX_INTERRUPTS (~0ULL)
1368
1369static void perf_log_throttle(struct perf_event *event, int enable);
1370
235c7fc7 1371static int
9ffcfa6f 1372event_sched_in(struct perf_event *event,
235c7fc7 1373 struct perf_cpu_context *cpuctx,
6e37738a 1374 struct perf_event_context *ctx)
235c7fc7 1375{
4158755d
SE
1376 u64 tstamp = perf_event_time(event);
1377
cdd6c482 1378 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
1379 return 0;
1380
cdd6c482 1381 event->state = PERF_EVENT_STATE_ACTIVE;
6e37738a 1382 event->oncpu = smp_processor_id();
4fe757dd
PZ
1383
1384 /*
1385 * Unthrottle events, since we scheduled we might have missed several
1386 * ticks already, also for a heavily scheduling task there is little
1387 * guarantee it'll get a tick in a timely manner.
1388 */
1389 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1390 perf_log_throttle(event, 1);
1391 event->hw.interrupts = 0;
1392 }
1393
235c7fc7
IM
1394 /*
1395 * The new state must be visible before we turn it on in the hardware:
1396 */
1397 smp_wmb();
1398
a4eaf7f1 1399 if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482
IM
1400 event->state = PERF_EVENT_STATE_INACTIVE;
1401 event->oncpu = -1;
235c7fc7
IM
1402 return -EAGAIN;
1403 }
1404
4158755d 1405 event->tstamp_running += tstamp - event->tstamp_stopped;
9ffcfa6f 1406
e5d1367f 1407 perf_set_shadow_time(event, ctx, tstamp);
eed01528 1408
cdd6c482 1409 if (!is_software_event(event))
3b6f9e5c 1410 cpuctx->active_oncpu++;
235c7fc7 1411 ctx->nr_active++;
0f5a2601
PZ
1412 if (event->attr.freq && event->attr.sample_freq)
1413 ctx->nr_freq++;
235c7fc7 1414
cdd6c482 1415 if (event->attr.exclusive)
3b6f9e5c
PM
1416 cpuctx->exclusive = 1;
1417
235c7fc7
IM
1418 return 0;
1419}
1420
6751b71e 1421static int
cdd6c482 1422group_sched_in(struct perf_event *group_event,
6751b71e 1423 struct perf_cpu_context *cpuctx,
6e37738a 1424 struct perf_event_context *ctx)
6751b71e 1425{
6bde9b6c 1426 struct perf_event *event, *partial_group = NULL;
51b0fe39 1427 struct pmu *pmu = group_event->pmu;
d7842da4
SE
1428 u64 now = ctx->time;
1429 bool simulate = false;
6751b71e 1430
cdd6c482 1431 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
1432 return 0;
1433
ad5133b7 1434 pmu->start_txn(pmu);
6bde9b6c 1435
9ffcfa6f 1436 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 1437 pmu->cancel_txn(pmu);
6751b71e 1438 return -EAGAIN;
90151c35 1439 }
6751b71e
PM
1440
1441 /*
1442 * Schedule in siblings as one group (if any):
1443 */
cdd6c482 1444 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f 1445 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 1446 partial_group = event;
6751b71e
PM
1447 goto group_error;
1448 }
1449 }
1450
9ffcfa6f 1451 if (!pmu->commit_txn(pmu))
6e85158c 1452 return 0;
9ffcfa6f 1453
6751b71e
PM
1454group_error:
1455 /*
1456 * Groups can be scheduled in as one unit only, so undo any
1457 * partial group before returning:
d7842da4
SE
1458 * The events up to the failed event are scheduled out normally,
1459 * tstamp_stopped will be updated.
1460 *
1461 * The failed events and the remaining siblings need to have
1462 * their timings updated as if they had gone thru event_sched_in()
1463 * and event_sched_out(). This is required to get consistent timings
1464 * across the group. This also takes care of the case where the group
1465 * could never be scheduled by ensuring tstamp_stopped is set to mark
1466 * the time the event was actually stopped, such that time delta
1467 * calculation in update_event_times() is correct.
6751b71e 1468 */
cdd6c482
IM
1469 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1470 if (event == partial_group)
d7842da4
SE
1471 simulate = true;
1472
1473 if (simulate) {
1474 event->tstamp_running += now - event->tstamp_stopped;
1475 event->tstamp_stopped = now;
1476 } else {
1477 event_sched_out(event, cpuctx, ctx);
1478 }
6751b71e 1479 }
9ffcfa6f 1480 event_sched_out(group_event, cpuctx, ctx);
6751b71e 1481
ad5133b7 1482 pmu->cancel_txn(pmu);
90151c35 1483
6751b71e
PM
1484 return -EAGAIN;
1485}
1486
3b6f9e5c 1487/*
cdd6c482 1488 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 1489 */
cdd6c482 1490static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
1491 struct perf_cpu_context *cpuctx,
1492 int can_add_hw)
1493{
1494 /*
cdd6c482 1495 * Groups consisting entirely of software events can always go on.
3b6f9e5c 1496 */
d6f962b5 1497 if (event->group_flags & PERF_GROUP_SOFTWARE)
3b6f9e5c
PM
1498 return 1;
1499 /*
1500 * If an exclusive group is already on, no other hardware
cdd6c482 1501 * events can go on.
3b6f9e5c
PM
1502 */
1503 if (cpuctx->exclusive)
1504 return 0;
1505 /*
1506 * If this group is exclusive and there are already
cdd6c482 1507 * events on the CPU, it can't go on.
3b6f9e5c 1508 */
cdd6c482 1509 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
1510 return 0;
1511 /*
1512 * Otherwise, try to add it if all previous groups were able
1513 * to go on.
1514 */
1515 return can_add_hw;
1516}
1517
cdd6c482
IM
1518static void add_event_to_ctx(struct perf_event *event,
1519 struct perf_event_context *ctx)
53cfbf59 1520{
4158755d
SE
1521 u64 tstamp = perf_event_time(event);
1522
cdd6c482 1523 list_add_event(event, ctx);
8a49542c 1524 perf_group_attach(event);
4158755d
SE
1525 event->tstamp_enabled = tstamp;
1526 event->tstamp_running = tstamp;
1527 event->tstamp_stopped = tstamp;
53cfbf59
PM
1528}
1529
2c29ef0f
PZ
1530static void task_ctx_sched_out(struct perf_event_context *ctx);
1531static void
1532ctx_sched_in(struct perf_event_context *ctx,
1533 struct perf_cpu_context *cpuctx,
1534 enum event_type_t event_type,
1535 struct task_struct *task);
fe4b04fa 1536
dce5855b
PZ
1537static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1538 struct perf_event_context *ctx,
1539 struct task_struct *task)
1540{
1541 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1542 if (ctx)
1543 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1544 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1545 if (ctx)
1546 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1547}
1548
0793a61d 1549/*
cdd6c482 1550 * Cross CPU call to install and enable a performance event
682076ae
PZ
1551 *
1552 * Must be called with ctx->mutex held
0793a61d 1553 */
fe4b04fa 1554static int __perf_install_in_context(void *info)
0793a61d 1555{
cdd6c482
IM
1556 struct perf_event *event = info;
1557 struct perf_event_context *ctx = event->ctx;
108b02cf 1558 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f
PZ
1559 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1560 struct task_struct *task = current;
1561
b58f6b0d 1562 perf_ctx_lock(cpuctx, task_ctx);
2c29ef0f 1563 perf_pmu_disable(cpuctx->ctx.pmu);
0793a61d
TG
1564
1565 /*
2c29ef0f 1566 * If there was an active task_ctx schedule it out.
0793a61d 1567 */
b58f6b0d 1568 if (task_ctx)
2c29ef0f 1569 task_ctx_sched_out(task_ctx);
b58f6b0d
PZ
1570
1571 /*
1572 * If the context we're installing events in is not the
1573 * active task_ctx, flip them.
1574 */
1575 if (ctx->task && task_ctx != ctx) {
1576 if (task_ctx)
1577 raw_spin_unlock(&task_ctx->lock);
1578 raw_spin_lock(&ctx->lock);
1579 task_ctx = ctx;
1580 }
1581
1582 if (task_ctx) {
1583 cpuctx->task_ctx = task_ctx;
2c29ef0f
PZ
1584 task = task_ctx->task;
1585 }
b58f6b0d 1586
2c29ef0f 1587 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
0793a61d 1588
4af4998b 1589 update_context_time(ctx);
e5d1367f
SE
1590 /*
1591 * update cgrp time only if current cgrp
1592 * matches event->cgrp. Must be done before
1593 * calling add_event_to_ctx()
1594 */
1595 update_cgrp_time_from_event(event);
0793a61d 1596
cdd6c482 1597 add_event_to_ctx(event, ctx);
0793a61d 1598
d859e29f 1599 /*
2c29ef0f 1600 * Schedule everything back in
d859e29f 1601 */
dce5855b 1602 perf_event_sched_in(cpuctx, task_ctx, task);
2c29ef0f
PZ
1603
1604 perf_pmu_enable(cpuctx->ctx.pmu);
1605 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa
PZ
1606
1607 return 0;
0793a61d
TG
1608}
1609
1610/*
cdd6c482 1611 * Attach a performance event to a context
0793a61d 1612 *
cdd6c482
IM
1613 * First we add the event to the list with the hardware enable bit
1614 * in event->hw_config cleared.
0793a61d 1615 *
cdd6c482 1616 * If the event is attached to a task which is on a CPU we use a smp
0793a61d
TG
1617 * call to enable it in the task context. The task might have been
1618 * scheduled away, but we check this in the smp call again.
1619 */
1620static void
cdd6c482
IM
1621perf_install_in_context(struct perf_event_context *ctx,
1622 struct perf_event *event,
0793a61d
TG
1623 int cpu)
1624{
1625 struct task_struct *task = ctx->task;
1626
fe4b04fa
PZ
1627 lockdep_assert_held(&ctx->mutex);
1628
c3f00c70
PZ
1629 event->ctx = ctx;
1630
0793a61d
TG
1631 if (!task) {
1632 /*
cdd6c482 1633 * Per cpu events are installed via an smp call and
af901ca1 1634 * the install is always successful.
0793a61d 1635 */
fe4b04fa 1636 cpu_function_call(cpu, __perf_install_in_context, event);
0793a61d
TG
1637 return;
1638 }
1639
0793a61d 1640retry:
fe4b04fa
PZ
1641 if (!task_function_call(task, __perf_install_in_context, event))
1642 return;
0793a61d 1643
e625cce1 1644 raw_spin_lock_irq(&ctx->lock);
0793a61d 1645 /*
fe4b04fa
PZ
1646 * If we failed to find a running task, but find the context active now
1647 * that we've acquired the ctx->lock, retry.
0793a61d 1648 */
fe4b04fa 1649 if (ctx->is_active) {
e625cce1 1650 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1651 goto retry;
1652 }
1653
1654 /*
fe4b04fa
PZ
1655 * Since the task isn't running, its safe to add the event, us holding
1656 * the ctx->lock ensures the task won't get scheduled in.
0793a61d 1657 */
fe4b04fa 1658 add_event_to_ctx(event, ctx);
e625cce1 1659 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1660}
1661
fa289bec 1662/*
cdd6c482 1663 * Put a event into inactive state and update time fields.
fa289bec
PM
1664 * Enabling the leader of a group effectively enables all
1665 * the group members that aren't explicitly disabled, so we
1666 * have to update their ->tstamp_enabled also.
1667 * Note: this works for group members as well as group leaders
1668 * since the non-leader members' sibling_lists will be empty.
1669 */
1d9b482e 1670static void __perf_event_mark_enabled(struct perf_event *event)
fa289bec 1671{
cdd6c482 1672 struct perf_event *sub;
4158755d 1673 u64 tstamp = perf_event_time(event);
fa289bec 1674
cdd6c482 1675 event->state = PERF_EVENT_STATE_INACTIVE;
4158755d 1676 event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d 1677 list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d
SE
1678 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1679 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d 1680 }
fa289bec
PM
1681}
1682
d859e29f 1683/*
cdd6c482 1684 * Cross CPU call to enable a performance event
d859e29f 1685 */
fe4b04fa 1686static int __perf_event_enable(void *info)
04289bb9 1687{
cdd6c482 1688 struct perf_event *event = info;
cdd6c482
IM
1689 struct perf_event_context *ctx = event->ctx;
1690 struct perf_event *leader = event->group_leader;
108b02cf 1691 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f 1692 int err;
04289bb9 1693
fe4b04fa
PZ
1694 if (WARN_ON_ONCE(!ctx->is_active))
1695 return -EINVAL;
3cbed429 1696
e625cce1 1697 raw_spin_lock(&ctx->lock);
4af4998b 1698 update_context_time(ctx);
d859e29f 1699
cdd6c482 1700 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f 1701 goto unlock;
e5d1367f
SE
1702
1703 /*
1704 * set current task's cgroup time reference point
1705 */
3f7cce3c 1706 perf_cgroup_set_timestamp(current, ctx);
e5d1367f 1707
1d9b482e 1708 __perf_event_mark_enabled(event);
04289bb9 1709
e5d1367f
SE
1710 if (!event_filter_match(event)) {
1711 if (is_cgroup_event(event))
1712 perf_cgroup_defer_enabled(event);
f4c4176f 1713 goto unlock;
e5d1367f 1714 }
f4c4176f 1715
04289bb9 1716 /*
cdd6c482 1717 * If the event is in a group and isn't the group leader,
d859e29f 1718 * then don't put it on unless the group is on.
04289bb9 1719 */
cdd6c482 1720 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
d859e29f 1721 goto unlock;
3b6f9e5c 1722
cdd6c482 1723 if (!group_can_go_on(event, cpuctx, 1)) {
d859e29f 1724 err = -EEXIST;
e758a33d 1725 } else {
cdd6c482 1726 if (event == leader)
6e37738a 1727 err = group_sched_in(event, cpuctx, ctx);
e758a33d 1728 else
6e37738a 1729 err = event_sched_in(event, cpuctx, ctx);
e758a33d 1730 }
d859e29f
PM
1731
1732 if (err) {
1733 /*
cdd6c482 1734 * If this event can't go on and it's part of a
d859e29f
PM
1735 * group, then the whole group has to come off.
1736 */
cdd6c482 1737 if (leader != event)
d859e29f 1738 group_sched_out(leader, cpuctx, ctx);
0d48696f 1739 if (leader->attr.pinned) {
53cfbf59 1740 update_group_times(leader);
cdd6c482 1741 leader->state = PERF_EVENT_STATE_ERROR;
53cfbf59 1742 }
d859e29f
PM
1743 }
1744
9ed6060d 1745unlock:
e625cce1 1746 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1747
1748 return 0;
d859e29f
PM
1749}
1750
1751/*
cdd6c482 1752 * Enable a event.
c93f7669 1753 *
cdd6c482
IM
1754 * If event->ctx is a cloned context, callers must make sure that
1755 * every task struct that event->ctx->task could possibly point to
c93f7669 1756 * remains valid. This condition is satisfied when called through
cdd6c482
IM
1757 * perf_event_for_each_child or perf_event_for_each as described
1758 * for perf_event_disable.
d859e29f 1759 */
44234adc 1760void perf_event_enable(struct perf_event *event)
d859e29f 1761{
cdd6c482 1762 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
1763 struct task_struct *task = ctx->task;
1764
1765 if (!task) {
1766 /*
cdd6c482 1767 * Enable the event on the cpu that it's on
d859e29f 1768 */
fe4b04fa 1769 cpu_function_call(event->cpu, __perf_event_enable, event);
d859e29f
PM
1770 return;
1771 }
1772
e625cce1 1773 raw_spin_lock_irq(&ctx->lock);
cdd6c482 1774 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f
PM
1775 goto out;
1776
1777 /*
cdd6c482
IM
1778 * If the event is in error state, clear that first.
1779 * That way, if we see the event in error state below, we
d859e29f
PM
1780 * know that it has gone back into error state, as distinct
1781 * from the task having been scheduled away before the
1782 * cross-call arrived.
1783 */
cdd6c482
IM
1784 if (event->state == PERF_EVENT_STATE_ERROR)
1785 event->state = PERF_EVENT_STATE_OFF;
d859e29f 1786
9ed6060d 1787retry:
fe4b04fa 1788 if (!ctx->is_active) {
1d9b482e 1789 __perf_event_mark_enabled(event);
fe4b04fa
PZ
1790 goto out;
1791 }
1792
e625cce1 1793 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
1794
1795 if (!task_function_call(task, __perf_event_enable, event))
1796 return;
d859e29f 1797
e625cce1 1798 raw_spin_lock_irq(&ctx->lock);
d859e29f
PM
1799
1800 /*
cdd6c482 1801 * If the context is active and the event is still off,
d859e29f
PM
1802 * we need to retry the cross-call.
1803 */
fe4b04fa
PZ
1804 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
1805 /*
1806 * task could have been flipped by a concurrent
1807 * perf_event_context_sched_out()
1808 */
1809 task = ctx->task;
d859e29f 1810 goto retry;
fe4b04fa 1811 }
fa289bec 1812
9ed6060d 1813out:
e625cce1 1814 raw_spin_unlock_irq(&ctx->lock);
d859e29f 1815}
dcfce4a0 1816EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 1817
26ca5c11 1818int perf_event_refresh(struct perf_event *event, int refresh)
79f14641 1819{
2023b359 1820 /*
cdd6c482 1821 * not supported on inherited events
2023b359 1822 */
2e939d1d 1823 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
1824 return -EINVAL;
1825
cdd6c482
IM
1826 atomic_add(refresh, &event->event_limit);
1827 perf_event_enable(event);
2023b359
PZ
1828
1829 return 0;
79f14641 1830}
26ca5c11 1831EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 1832
5b0311e1
FW
1833static void ctx_sched_out(struct perf_event_context *ctx,
1834 struct perf_cpu_context *cpuctx,
1835 enum event_type_t event_type)
235c7fc7 1836{
cdd6c482 1837 struct perf_event *event;
db24d33e 1838 int is_active = ctx->is_active;
235c7fc7 1839
db24d33e 1840 ctx->is_active &= ~event_type;
cdd6c482 1841 if (likely(!ctx->nr_events))
facc4307
PZ
1842 return;
1843
4af4998b 1844 update_context_time(ctx);
e5d1367f 1845 update_cgrp_time_from_cpuctx(cpuctx);
5b0311e1 1846 if (!ctx->nr_active)
facc4307 1847 return;
5b0311e1 1848
075e0b00 1849 perf_pmu_disable(ctx->pmu);
db24d33e 1850 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
889ff015
FW
1851 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1852 group_sched_out(event, cpuctx, ctx);
9ed6060d 1853 }
889ff015 1854
db24d33e 1855 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
889ff015 1856 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e1 1857 group_sched_out(event, cpuctx, ctx);
9ed6060d 1858 }
1b9a644f 1859 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
1860}
1861
564c2b21
PM
1862/*
1863 * Test whether two contexts are equivalent, i.e. whether they
1864 * have both been cloned from the same version of the same context
cdd6c482
IM
1865 * and they both have the same number of enabled events.
1866 * If the number of enabled events is the same, then the set
1867 * of enabled events should be the same, because these are both
1868 * inherited contexts, therefore we can't access individual events
564c2b21 1869 * in them directly with an fd; we can only enable/disable all
cdd6c482 1870 * events via prctl, or enable/disable all events in a family
564c2b21
PM
1871 * via ioctl, which will have the same effect on both contexts.
1872 */
cdd6c482
IM
1873static int context_equiv(struct perf_event_context *ctx1,
1874 struct perf_event_context *ctx2)
564c2b21
PM
1875{
1876 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
ad3a37de 1877 && ctx1->parent_gen == ctx2->parent_gen
25346b93 1878 && !ctx1->pin_count && !ctx2->pin_count;
564c2b21
PM
1879}
1880
cdd6c482
IM
1881static void __perf_event_sync_stat(struct perf_event *event,
1882 struct perf_event *next_event)
bfbd3381
PZ
1883{
1884 u64 value;
1885
cdd6c482 1886 if (!event->attr.inherit_stat)
bfbd3381
PZ
1887 return;
1888
1889 /*
cdd6c482 1890 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
1891 * because we're in the middle of a context switch and have IRQs
1892 * disabled, which upsets smp_call_function_single(), however
cdd6c482 1893 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
1894 * don't need to use it.
1895 */
cdd6c482
IM
1896 switch (event->state) {
1897 case PERF_EVENT_STATE_ACTIVE:
3dbebf15
PZ
1898 event->pmu->read(event);
1899 /* fall-through */
bfbd3381 1900
cdd6c482
IM
1901 case PERF_EVENT_STATE_INACTIVE:
1902 update_event_times(event);
bfbd3381
PZ
1903 break;
1904
1905 default:
1906 break;
1907 }
1908
1909 /*
cdd6c482 1910 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
1911 * values when we flip the contexts.
1912 */
e7850595
PZ
1913 value = local64_read(&next_event->count);
1914 value = local64_xchg(&event->count, value);
1915 local64_set(&next_event->count, value);
bfbd3381 1916
cdd6c482
IM
1917 swap(event->total_time_enabled, next_event->total_time_enabled);
1918 swap(event->total_time_running, next_event->total_time_running);
19d2e755 1919
bfbd3381 1920 /*
19d2e755 1921 * Since we swizzled the values, update the user visible data too.
bfbd3381 1922 */
cdd6c482
IM
1923 perf_event_update_userpage(event);
1924 perf_event_update_userpage(next_event);
bfbd3381
PZ
1925}
1926
1927#define list_next_entry(pos, member) \
1928 list_entry(pos->member.next, typeof(*pos), member)
1929
cdd6c482
IM
1930static void perf_event_sync_stat(struct perf_event_context *ctx,
1931 struct perf_event_context *next_ctx)
bfbd3381 1932{
cdd6c482 1933 struct perf_event *event, *next_event;
bfbd3381
PZ
1934
1935 if (!ctx->nr_stat)
1936 return;
1937
02ffdbc8
PZ
1938 update_context_time(ctx);
1939
cdd6c482
IM
1940 event = list_first_entry(&ctx->event_list,
1941 struct perf_event, event_entry);
bfbd3381 1942
cdd6c482
IM
1943 next_event = list_first_entry(&next_ctx->event_list,
1944 struct perf_event, event_entry);
bfbd3381 1945
cdd6c482
IM
1946 while (&event->event_entry != &ctx->event_list &&
1947 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 1948
cdd6c482 1949 __perf_event_sync_stat(event, next_event);
bfbd3381 1950
cdd6c482
IM
1951 event = list_next_entry(event, event_entry);
1952 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
1953 }
1954}
1955
fe4b04fa
PZ
1956static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1957 struct task_struct *next)
0793a61d 1958{
8dc85d54 1959 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482
IM
1960 struct perf_event_context *next_ctx;
1961 struct perf_event_context *parent;
108b02cf 1962 struct perf_cpu_context *cpuctx;
c93f7669 1963 int do_switch = 1;
0793a61d 1964
108b02cf
PZ
1965 if (likely(!ctx))
1966 return;
10989fb2 1967
108b02cf
PZ
1968 cpuctx = __get_cpu_context(ctx);
1969 if (!cpuctx->task_ctx)
0793a61d
TG
1970 return;
1971
c93f7669
PM
1972 rcu_read_lock();
1973 parent = rcu_dereference(ctx->parent_ctx);
8dc85d54 1974 next_ctx = next->perf_event_ctxp[ctxn];
c93f7669
PM
1975 if (parent && next_ctx &&
1976 rcu_dereference(next_ctx->parent_ctx) == parent) {
1977 /*
1978 * Looks like the two contexts are clones, so we might be
1979 * able to optimize the context switch. We lock both
1980 * contexts and check that they are clones under the
1981 * lock (including re-checking that neither has been
1982 * uncloned in the meantime). It doesn't matter which
1983 * order we take the locks because no other cpu could
1984 * be trying to lock both of these tasks.
1985 */
e625cce1
TG
1986 raw_spin_lock(&ctx->lock);
1987 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 1988 if (context_equiv(ctx, next_ctx)) {
665c2142
PZ
1989 /*
1990 * XXX do we need a memory barrier of sorts
cdd6c482 1991 * wrt to rcu_dereference() of perf_event_ctxp
665c2142 1992 */
8dc85d54
PZ
1993 task->perf_event_ctxp[ctxn] = next_ctx;
1994 next->perf_event_ctxp[ctxn] = ctx;
c93f7669
PM
1995 ctx->task = next;
1996 next_ctx->task = task;
1997 do_switch = 0;
bfbd3381 1998
cdd6c482 1999 perf_event_sync_stat(ctx, next_ctx);
c93f7669 2000 }
e625cce1
TG
2001 raw_spin_unlock(&next_ctx->lock);
2002 raw_spin_unlock(&ctx->lock);
564c2b21 2003 }
c93f7669 2004 rcu_read_unlock();
564c2b21 2005
c93f7669 2006 if (do_switch) {
facc4307 2007 raw_spin_lock(&ctx->lock);
5b0311e1 2008 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
c93f7669 2009 cpuctx->task_ctx = NULL;
facc4307 2010 raw_spin_unlock(&ctx->lock);
c93f7669 2011 }
0793a61d
TG
2012}
2013
8dc85d54
PZ
2014#define for_each_task_context_nr(ctxn) \
2015 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2016
2017/*
2018 * Called from scheduler to remove the events of the current task,
2019 * with interrupts disabled.
2020 *
2021 * We stop each event and update the event value in event->count.
2022 *
2023 * This does not protect us against NMI, but disable()
2024 * sets the disabled bit in the control field of event _before_
2025 * accessing the event control register. If a NMI hits, then it will
2026 * not restart the event.
2027 */
82cd6def
PZ
2028void __perf_event_task_sched_out(struct task_struct *task,
2029 struct task_struct *next)
8dc85d54
PZ
2030{
2031 int ctxn;
2032
8dc85d54
PZ
2033 for_each_task_context_nr(ctxn)
2034 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
2035
2036 /*
2037 * if cgroup events exist on this CPU, then we need
2038 * to check if we have to switch out PMU state.
2039 * cgroup event are system-wide mode only
2040 */
2041 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef 2042 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
2043}
2044
04dc2dbb 2045static void task_ctx_sched_out(struct perf_event_context *ctx)
a08b159f 2046{
108b02cf 2047 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
a08b159f 2048
a63eaf34
PM
2049 if (!cpuctx->task_ctx)
2050 return;
012b84da
IM
2051
2052 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2053 return;
2054
04dc2dbb 2055 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
a08b159f
PM
2056 cpuctx->task_ctx = NULL;
2057}
2058
5b0311e1
FW
2059/*
2060 * Called with IRQs disabled
2061 */
2062static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2063 enum event_type_t event_type)
2064{
2065 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
2066}
2067
235c7fc7 2068static void
5b0311e1 2069ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a 2070 struct perf_cpu_context *cpuctx)
0793a61d 2071{
cdd6c482 2072 struct perf_event *event;
0793a61d 2073
889ff015
FW
2074 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2075 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2076 continue;
5632ab12 2077 if (!event_filter_match(event))
3b6f9e5c
PM
2078 continue;
2079
e5d1367f
SE
2080 /* may need to reset tstamp_enabled */
2081 if (is_cgroup_event(event))
2082 perf_cgroup_mark_enabled(event, ctx);
2083
8c9ed8e1 2084 if (group_can_go_on(event, cpuctx, 1))
6e37738a 2085 group_sched_in(event, cpuctx, ctx);
3b6f9e5c
PM
2086
2087 /*
2088 * If this pinned group hasn't been scheduled,
2089 * put it in error state.
2090 */
cdd6c482
IM
2091 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2092 update_group_times(event);
2093 event->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2094 }
3b6f9e5c 2095 }
5b0311e1
FW
2096}
2097
2098static void
2099ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a 2100 struct perf_cpu_context *cpuctx)
5b0311e1
FW
2101{
2102 struct perf_event *event;
2103 int can_add_hw = 1;
3b6f9e5c 2104
889ff015
FW
2105 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2106 /* Ignore events in OFF or ERROR state */
2107 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2108 continue;
04289bb9
IM
2109 /*
2110 * Listen to the 'cpu' scheduling filter constraint
cdd6c482 2111 * of events:
04289bb9 2112 */
5632ab12 2113 if (!event_filter_match(event))
0793a61d
TG
2114 continue;
2115
e5d1367f
SE
2116 /* may need to reset tstamp_enabled */
2117 if (is_cgroup_event(event))
2118 perf_cgroup_mark_enabled(event, ctx);
2119
9ed6060d 2120 if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a 2121 if (group_sched_in(event, cpuctx, ctx))
dd0e6ba2 2122 can_add_hw = 0;
9ed6060d 2123 }
0793a61d 2124 }
5b0311e1
FW
2125}
2126
2127static void
2128ctx_sched_in(struct perf_event_context *ctx,
2129 struct perf_cpu_context *cpuctx,
e5d1367f
SE
2130 enum event_type_t event_type,
2131 struct task_struct *task)
5b0311e1 2132{
e5d1367f 2133 u64 now;
db24d33e 2134 int is_active = ctx->is_active;
e5d1367f 2135
db24d33e 2136 ctx->is_active |= event_type;
5b0311e1 2137 if (likely(!ctx->nr_events))
facc4307 2138 return;
5b0311e1 2139
e5d1367f
SE
2140 now = perf_clock();
2141 ctx->timestamp = now;
3f7cce3c 2142 perf_cgroup_set_timestamp(task, ctx);
5b0311e1
FW
2143 /*
2144 * First go through the list and put on any pinned groups
2145 * in order to give them the best chance of going on.
2146 */
db24d33e 2147 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
6e37738a 2148 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
2149
2150 /* Then walk through the lower prio flexible groups */
db24d33e 2151 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
6e37738a 2152 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
2153}
2154
329c0e01 2155static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
2156 enum event_type_t event_type,
2157 struct task_struct *task)
329c0e01
FW
2158{
2159 struct perf_event_context *ctx = &cpuctx->ctx;
2160
e5d1367f 2161 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
2162}
2163
e5d1367f
SE
2164static void perf_event_context_sched_in(struct perf_event_context *ctx,
2165 struct task_struct *task)
235c7fc7 2166{
108b02cf 2167 struct perf_cpu_context *cpuctx;
235c7fc7 2168
108b02cf 2169 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
2170 if (cpuctx->task_ctx == ctx)
2171 return;
2172
facc4307 2173 perf_ctx_lock(cpuctx, ctx);
1b9a644f 2174 perf_pmu_disable(ctx->pmu);
329c0e01
FW
2175 /*
2176 * We want to keep the following priority order:
2177 * cpu pinned (that don't need to move), task pinned,
2178 * cpu flexible, task flexible.
2179 */
2180 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2181
1d5f003f
GN
2182 if (ctx->nr_events)
2183 cpuctx->task_ctx = ctx;
9b33fa6b 2184
86b47c25
GN
2185 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2186
facc4307
PZ
2187 perf_pmu_enable(ctx->pmu);
2188 perf_ctx_unlock(cpuctx, ctx);
2189
b5ab4cd5
PZ
2190 /*
2191 * Since these rotations are per-cpu, we need to ensure the
2192 * cpu-context we got scheduled on is actually rotating.
2193 */
108b02cf 2194 perf_pmu_rotate_start(ctx->pmu);
235c7fc7
IM
2195}
2196
8dc85d54
PZ
2197/*
2198 * Called from scheduler to add the events of the current task
2199 * with interrupts disabled.
2200 *
2201 * We restore the event value and then enable it.
2202 *
2203 * This does not protect us against NMI, but enable()
2204 * sets the enabled bit in the control field of event _before_
2205 * accessing the event control register. If a NMI hits, then it will
2206 * keep the event running.
2207 */
a8d757ef
SE
2208void __perf_event_task_sched_in(struct task_struct *prev,
2209 struct task_struct *task)
8dc85d54
PZ
2210{
2211 struct perf_event_context *ctx;
2212 int ctxn;
2213
2214 for_each_task_context_nr(ctxn) {
2215 ctx = task->perf_event_ctxp[ctxn];
2216 if (likely(!ctx))
2217 continue;
2218
e5d1367f 2219 perf_event_context_sched_in(ctx, task);
8dc85d54 2220 }
e5d1367f
SE
2221 /*
2222 * if cgroup events exist on this CPU, then we need
2223 * to check if we have to switch in PMU state.
2224 * cgroup event are system-wide mode only
2225 */
2226 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef 2227 perf_cgroup_sched_in(prev, task);
235c7fc7
IM
2228}
2229
abd50713
PZ
2230static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2231{
2232 u64 frequency = event->attr.sample_freq;
2233 u64 sec = NSEC_PER_SEC;
2234 u64 divisor, dividend;
2235
2236 int count_fls, nsec_fls, frequency_fls, sec_fls;
2237
2238 count_fls = fls64(count);
2239 nsec_fls = fls64(nsec);
2240 frequency_fls = fls64(frequency);
2241 sec_fls = 30;
2242
2243 /*
2244 * We got @count in @nsec, with a target of sample_freq HZ
2245 * the target period becomes:
2246 *
2247 * @count * 10^9
2248 * period = -------------------
2249 * @nsec * sample_freq
2250 *
2251 */
2252
2253 /*
2254 * Reduce accuracy by one bit such that @a and @b converge
2255 * to a similar magnitude.
2256 */
fe4b04fa 2257#define REDUCE_FLS(a, b) \
abd50713
PZ
2258do { \
2259 if (a##_fls > b##_fls) { \
2260 a >>= 1; \
2261 a##_fls--; \
2262 } else { \
2263 b >>= 1; \
2264 b##_fls--; \
2265 } \
2266} while (0)
2267
2268 /*
2269 * Reduce accuracy until either term fits in a u64, then proceed with
2270 * the other, so that finally we can do a u64/u64 division.
2271 */
2272 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2273 REDUCE_FLS(nsec, frequency);
2274 REDUCE_FLS(sec, count);
2275 }
2276
2277 if (count_fls + sec_fls > 64) {
2278 divisor = nsec * frequency;
2279
2280 while (count_fls + sec_fls > 64) {
2281 REDUCE_FLS(count, sec);
2282 divisor >>= 1;
2283 }
2284
2285 dividend = count * sec;
2286 } else {
2287 dividend = count * sec;
2288
2289 while (nsec_fls + frequency_fls > 64) {
2290 REDUCE_FLS(nsec, frequency);
2291 dividend >>= 1;
2292 }
2293
2294 divisor = nsec * frequency;
2295 }
2296
f6ab91ad
PZ
2297 if (!divisor)
2298 return dividend;
2299
abd50713
PZ
2300 return div64_u64(dividend, divisor);
2301}
2302
e050e3f0
SE
2303static DEFINE_PER_CPU(int, perf_throttled_count);
2304static DEFINE_PER_CPU(u64, perf_throttled_seq);
2305
abd50713 2306static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
bd2b5b12 2307{
cdd6c482 2308 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 2309 s64 period, sample_period;
bd2b5b12
PZ
2310 s64 delta;
2311
abd50713 2312 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
2313
2314 delta = (s64)(period - hwc->sample_period);
2315 delta = (delta + 7) / 8; /* low pass filter */
2316
2317 sample_period = hwc->sample_period + delta;
2318
2319 if (!sample_period)
2320 sample_period = 1;
2321
bd2b5b12 2322 hwc->sample_period = sample_period;
abd50713 2323
e7850595 2324 if (local64_read(&hwc->period_left) > 8*sample_period) {
a4eaf7f1 2325 event->pmu->stop(event, PERF_EF_UPDATE);
e7850595 2326 local64_set(&hwc->period_left, 0);
a4eaf7f1 2327 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 2328 }
bd2b5b12
PZ
2329}
2330
e050e3f0
SE
2331/*
2332 * combine freq adjustment with unthrottling to avoid two passes over the
2333 * events. At the same time, make sure, having freq events does not change
2334 * the rate of unthrottling as that would introduce bias.
2335 */
2336static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2337 int needs_unthr)
60db5e09 2338{
cdd6c482
IM
2339 struct perf_event *event;
2340 struct hw_perf_event *hwc;
e050e3f0 2341 u64 now, period = TICK_NSEC;
abd50713 2342 s64 delta;
60db5e09 2343
e050e3f0
SE
2344 /*
2345 * only need to iterate over all events iff:
2346 * - context have events in frequency mode (needs freq adjust)
2347 * - there are events to unthrottle on this cpu
2348 */
2349 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
2350 return;
2351
e050e3f0
SE
2352 raw_spin_lock(&ctx->lock);
2353
03541f8b 2354 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 2355 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
2356 continue;
2357
5632ab12 2358 if (!event_filter_match(event))
5d27c23d
PZ
2359 continue;
2360
cdd6c482 2361 hwc = &event->hw;
6a24ed6c 2362
e050e3f0
SE
2363 if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
2364 hwc->interrupts = 0;
cdd6c482 2365 perf_log_throttle(event, 1);
a4eaf7f1 2366 event->pmu->start(event, 0);
a78ac325
PZ
2367 }
2368
cdd6c482 2369 if (!event->attr.freq || !event->attr.sample_freq)
60db5e09
PZ
2370 continue;
2371
e050e3f0
SE
2372 /*
2373 * stop the event and update event->count
2374 */
2375 event->pmu->stop(event, PERF_EF_UPDATE);
2376
e7850595 2377 now = local64_read(&event->count);
abd50713
PZ
2378 delta = now - hwc->freq_count_stamp;
2379 hwc->freq_count_stamp = now;
60db5e09 2380
e050e3f0
SE
2381 /*
2382 * restart the event
2383 * reload only if value has changed
2384 */
abd50713 2385 if (delta > 0)
b5ab4cd5 2386 perf_adjust_period(event, period, delta);
e050e3f0
SE
2387
2388 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
60db5e09 2389 }
e050e3f0
SE
2390
2391 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
2392}
2393
235c7fc7 2394/*
cdd6c482 2395 * Round-robin a context's events:
235c7fc7 2396 */
cdd6c482 2397static void rotate_ctx(struct perf_event_context *ctx)
0793a61d 2398{
dddd3379
TG
2399 /*
2400 * Rotate the first entry last of non-pinned groups. Rotation might be
2401 * disabled by the inheritance code.
2402 */
2403 if (!ctx->rotate_disable)
2404 list_rotate_left(&ctx->flexible_groups);
235c7fc7
IM
2405}
2406
b5ab4cd5 2407/*
e9d2b064
PZ
2408 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2409 * because they're strictly cpu affine and rotate_start is called with IRQs
2410 * disabled, while rotate_context is called from IRQ context.
b5ab4cd5 2411 */
e9d2b064 2412static void perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7 2413{
8dc85d54 2414 struct perf_event_context *ctx = NULL;
e050e3f0 2415 int rotate = 0, remove = 1;
7fc23a53 2416
b5ab4cd5 2417 if (cpuctx->ctx.nr_events) {
e9d2b064 2418 remove = 0;
b5ab4cd5
PZ
2419 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2420 rotate = 1;
2421 }
235c7fc7 2422
8dc85d54 2423 ctx = cpuctx->task_ctx;
b5ab4cd5 2424 if (ctx && ctx->nr_events) {
e9d2b064 2425 remove = 0;
b5ab4cd5
PZ
2426 if (ctx->nr_events != ctx->nr_active)
2427 rotate = 1;
2428 }
9717e6cd 2429
e050e3f0 2430 if (!rotate)
0f5a2601
PZ
2431 goto done;
2432
facc4307 2433 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 2434 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 2435
e050e3f0
SE
2436 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2437 if (ctx)
2438 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d 2439
e050e3f0
SE
2440 rotate_ctx(&cpuctx->ctx);
2441 if (ctx)
2442 rotate_ctx(ctx);
235c7fc7 2443
e050e3f0 2444 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 2445
0f5a2601
PZ
2446 perf_pmu_enable(cpuctx->ctx.pmu);
2447 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
b5ab4cd5 2448done:
e9d2b064
PZ
2449 if (remove)
2450 list_del_init(&cpuctx->rotation_list);
e9d2b064
PZ
2451}
2452
2453void perf_event_task_tick(void)
2454{
2455 struct list_head *head = &__get_cpu_var(rotation_list);
2456 struct perf_cpu_context *cpuctx, *tmp;
e050e3f0
SE
2457 struct perf_event_context *ctx;
2458 int throttled;
b5ab4cd5 2459
e9d2b064
PZ
2460 WARN_ON(!irqs_disabled());
2461
e050e3f0
SE
2462 __this_cpu_inc(perf_throttled_seq);
2463 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2464
e9d2b064 2465 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
e050e3f0
SE
2466 ctx = &cpuctx->ctx;
2467 perf_adjust_freq_unthr_context(ctx, throttled);
2468
2469 ctx = cpuctx->task_ctx;
2470 if (ctx)
2471 perf_adjust_freq_unthr_context(ctx, throttled);
2472
e9d2b064
PZ
2473 if (cpuctx->jiffies_interval == 1 ||
2474 !(jiffies % cpuctx->jiffies_interval))
2475 perf_rotate_context(cpuctx);
2476 }
0793a61d
TG
2477}
2478
889ff015
FW
2479static int event_enable_on_exec(struct perf_event *event,
2480 struct perf_event_context *ctx)
2481{
2482 if (!event->attr.enable_on_exec)
2483 return 0;
2484
2485 event->attr.enable_on_exec = 0;
2486 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2487 return 0;
2488
1d9b482e 2489 __perf_event_mark_enabled(event);
889ff015
FW
2490
2491 return 1;
2492}
2493
57e7986e 2494/*
cdd6c482 2495 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
2496 * This expects task == current.
2497 */
8dc85d54 2498static void perf_event_enable_on_exec(struct perf_event_context *ctx)
57e7986e 2499{
cdd6c482 2500 struct perf_event *event;
57e7986e
PM
2501 unsigned long flags;
2502 int enabled = 0;
889ff015 2503 int ret;
57e7986e
PM
2504
2505 local_irq_save(flags);
cdd6c482 2506 if (!ctx || !ctx->nr_events)
57e7986e
PM
2507 goto out;
2508
e566b76e
SE
2509 /*
2510 * We must ctxsw out cgroup events to avoid conflict
2511 * when invoking perf_task_event_sched_in() later on
2512 * in this function. Otherwise we end up trying to
2513 * ctxswin cgroup events which are already scheduled
2514 * in.
2515 */
a8d757ef 2516 perf_cgroup_sched_out(current, NULL);
57e7986e 2517
e625cce1 2518 raw_spin_lock(&ctx->lock);
04dc2dbb 2519 task_ctx_sched_out(ctx);
57e7986e 2520
b79387ef 2521 list_for_each_entry(event, &ctx->event_list, event_entry) {
889ff015
FW
2522 ret = event_enable_on_exec(event, ctx);
2523 if (ret)
2524 enabled = 1;
57e7986e
PM
2525 }
2526
2527 /*
cdd6c482 2528 * Unclone this context if we enabled any event.
57e7986e 2529 */
71a851b4
PZ
2530 if (enabled)
2531 unclone_ctx(ctx);
57e7986e 2532
e625cce1 2533 raw_spin_unlock(&ctx->lock);
57e7986e 2534
e566b76e
SE
2535 /*
2536 * Also calls ctxswin for cgroup events, if any:
2537 */
e5d1367f 2538 perf_event_context_sched_in(ctx, ctx->task);
9ed6060d 2539out:
57e7986e
PM
2540 local_irq_restore(flags);
2541}
2542
0793a61d 2543/*
cdd6c482 2544 * Cross CPU call to read the hardware event
0793a61d 2545 */
cdd6c482 2546static void __perf_event_read(void *info)
0793a61d 2547{
cdd6c482
IM
2548 struct perf_event *event = info;
2549 struct perf_event_context *ctx = event->ctx;
108b02cf 2550 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
621a01ea 2551
e1ac3614
PM
2552 /*
2553 * If this is a task context, we need to check whether it is
2554 * the current task context of this cpu. If not it has been
2555 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
2556 * event->count would have been updated to a recent sample
2557 * when the event was scheduled out.
e1ac3614
PM
2558 */
2559 if (ctx->task && cpuctx->task_ctx != ctx)
2560 return;
2561
e625cce1 2562 raw_spin_lock(&ctx->lock);
e5d1367f 2563 if (ctx->is_active) {
542e72fc 2564 update_context_time(ctx);
e5d1367f
SE
2565 update_cgrp_time_from_event(event);
2566 }
cdd6c482 2567 update_event_times(event);
542e72fc
PZ
2568 if (event->state == PERF_EVENT_STATE_ACTIVE)
2569 event->pmu->read(event);
e625cce1 2570 raw_spin_unlock(&ctx->lock);
0793a61d
TG
2571}
2572
b5e58793
PZ
2573static inline u64 perf_event_count(struct perf_event *event)
2574{
e7850595 2575 return local64_read(&event->count) + atomic64_read(&event->child_count);
b5e58793
PZ
2576}
2577
cdd6c482 2578static u64 perf_event_read(struct perf_event *event)
0793a61d
TG
2579{
2580 /*
cdd6c482
IM
2581 * If event is enabled and currently active on a CPU, update the
2582 * value in the event structure:
0793a61d 2583 */
cdd6c482
IM
2584 if (event->state == PERF_EVENT_STATE_ACTIVE) {
2585 smp_call_function_single(event->oncpu,
2586 __perf_event_read, event, 1);
2587 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
2588 struct perf_event_context *ctx = event->ctx;
2589 unsigned long flags;
2590
e625cce1 2591 raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9
SE
2592 /*
2593 * may read while context is not active
2594 * (e.g., thread is blocked), in that case
2595 * we cannot update context time
2596 */
e5d1367f 2597 if (ctx->is_active) {
c530ccd9 2598 update_context_time(ctx);
e5d1367f
SE
2599 update_cgrp_time_from_event(event);
2600 }
cdd6c482 2601 update_event_times(event);
e625cce1 2602 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d
TG
2603 }
2604
b5e58793 2605 return perf_event_count(event);
0793a61d
TG
2606}
2607
a63eaf34 2608/*
cdd6c482 2609 * Initialize the perf_event context in a task_struct:
a63eaf34 2610 */
eb184479 2611static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 2612{
e625cce1 2613 raw_spin_lock_init(&ctx->lock);
a63eaf34 2614 mutex_init(&ctx->mutex);
889ff015
FW
2615 INIT_LIST_HEAD(&ctx->pinned_groups);
2616 INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34
PM
2617 INIT_LIST_HEAD(&ctx->event_list);
2618 atomic_set(&ctx->refcount, 1);
eb184479
PZ
2619}
2620
2621static struct perf_event_context *
2622alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2623{
2624 struct perf_event_context *ctx;
2625
2626 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2627 if (!ctx)
2628 return NULL;
2629
2630 __perf_event_init_context(ctx);
2631 if (task) {
2632 ctx->task = task;
2633 get_task_struct(task);
0793a61d 2634 }
eb184479
PZ
2635 ctx->pmu = pmu;
2636
2637 return ctx;
a63eaf34
PM
2638}
2639
2ebd4ffb
MH
2640static struct task_struct *
2641find_lively_task_by_vpid(pid_t vpid)
2642{
2643 struct task_struct *task;
2644 int err;
0793a61d
TG
2645
2646 rcu_read_lock();
2ebd4ffb 2647 if (!vpid)
0793a61d
TG
2648 task = current;
2649 else
2ebd4ffb 2650 task = find_task_by_vpid(vpid);
0793a61d
TG
2651 if (task)
2652 get_task_struct(task);
2653 rcu_read_unlock();
2654
2655 if (!task)
2656 return ERR_PTR(-ESRCH);
2657
0793a61d 2658 /* Reuse ptrace permission checks for now. */
c93f7669
PM
2659 err = -EACCES;
2660 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2661 goto errout;
2662
2ebd4ffb
MH
2663 return task;
2664errout:
2665 put_task_struct(task);
2666 return ERR_PTR(err);
2667
2668}
2669
fe4b04fa
PZ
2670/*
2671 * Returns a matching context with refcount and pincount.
2672 */
108b02cf 2673static struct perf_event_context *
38a81da2 2674find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
0793a61d 2675{
cdd6c482 2676 struct perf_event_context *ctx;
22a4f650 2677 struct perf_cpu_context *cpuctx;
25346b93 2678 unsigned long flags;
8dc85d54 2679 int ctxn, err;
0793a61d 2680
22a4ec72 2681 if (!task) {
cdd6c482 2682 /* Must be root to operate on a CPU event: */
0764771d 2683 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
2684 return ERR_PTR(-EACCES);
2685
0793a61d 2686 /*
cdd6c482 2687 * We could be clever and allow to attach a event to an
0793a61d
TG
2688 * offline CPU and activate it when the CPU comes up, but
2689 * that's for later.
2690 */
f6325e30 2691 if (!cpu_online(cpu))
0793a61d
TG
2692 return ERR_PTR(-ENODEV);
2693
108b02cf 2694 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 2695 ctx = &cpuctx->ctx;
c93f7669 2696 get_ctx(ctx);
fe4b04fa 2697 ++ctx->pin_count;
0793a61d 2698
0793a61d
TG
2699 return ctx;
2700 }
2701
8dc85d54
PZ
2702 err = -EINVAL;
2703 ctxn = pmu->task_ctx_nr;
2704 if (ctxn < 0)
2705 goto errout;
2706
9ed6060d 2707retry:
8dc85d54 2708 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 2709 if (ctx) {
71a851b4 2710 unclone_ctx(ctx);
fe4b04fa 2711 ++ctx->pin_count;
e625cce1 2712 raw_spin_unlock_irqrestore(&ctx->lock, flags);
9137fb28 2713 } else {
eb184479 2714 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
2715 err = -ENOMEM;
2716 if (!ctx)
2717 goto errout;
eb184479 2718
dbe08d82
ON
2719 err = 0;
2720 mutex_lock(&task->perf_event_mutex);
2721 /*
2722 * If it has already passed perf_event_exit_task().
2723 * we must see PF_EXITING, it takes this mutex too.
2724 */
2725 if (task->flags & PF_EXITING)
2726 err = -ESRCH;
2727 else if (task->perf_event_ctxp[ctxn])
2728 err = -EAGAIN;
fe4b04fa 2729 else {
9137fb28 2730 get_ctx(ctx);
fe4b04fa 2731 ++ctx->pin_count;
dbe08d82 2732 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 2733 }
dbe08d82
ON
2734 mutex_unlock(&task->perf_event_mutex);
2735
2736 if (unlikely(err)) {
9137fb28 2737 put_ctx(ctx);
dbe08d82
ON
2738
2739 if (err == -EAGAIN)
2740 goto retry;
2741 goto errout;
a63eaf34
PM
2742 }
2743 }
2744
0793a61d 2745 return ctx;
c93f7669 2746
9ed6060d 2747errout:
c93f7669 2748 return ERR_PTR(err);
0793a61d
TG
2749}
2750
6fb2915d
LZ
2751static void perf_event_free_filter(struct perf_event *event);
2752
cdd6c482 2753static void free_event_rcu(struct rcu_head *head)
592903cd 2754{
cdd6c482 2755 struct perf_event *event;
592903cd 2756
cdd6c482
IM
2757 event = container_of(head, struct perf_event, rcu_head);
2758 if (event->ns)
2759 put_pid_ns(event->ns);
6fb2915d 2760 perf_event_free_filter(event);
cdd6c482 2761 kfree(event);
592903cd
PZ
2762}
2763
76369139 2764static void ring_buffer_put(struct ring_buffer *rb);
925d519a 2765
cdd6c482 2766static void free_event(struct perf_event *event)
f1600952 2767{
e360adbe 2768 irq_work_sync(&event->pending);
925d519a 2769
cdd6c482 2770 if (!event->parent) {
82cd6def 2771 if (event->attach_state & PERF_ATTACH_TASK)
b2029520 2772 jump_label_dec_deferred(&perf_sched_events);
3af9e859 2773 if (event->attr.mmap || event->attr.mmap_data)
cdd6c482
IM
2774 atomic_dec(&nr_mmap_events);
2775 if (event->attr.comm)
2776 atomic_dec(&nr_comm_events);
2777 if (event->attr.task)
2778 atomic_dec(&nr_task_events);
927c7a9e
FW
2779 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2780 put_callchain_buffers();
08309379
PZ
2781 if (is_cgroup_event(event)) {
2782 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
b2029520 2783 jump_label_dec_deferred(&perf_sched_events);
08309379 2784 }
f344011c 2785 }
9ee318a7 2786
76369139
FW
2787 if (event->rb) {
2788 ring_buffer_put(event->rb);
2789 event->rb = NULL;
a4be7c27
PZ
2790 }
2791
e5d1367f
SE
2792 if (is_cgroup_event(event))
2793 perf_detach_cgroup(event);
2794
cdd6c482
IM
2795 if (event->destroy)
2796 event->destroy(event);
e077df4f 2797
0c67b408
PZ
2798 if (event->ctx)
2799 put_ctx(event->ctx);
2800
cdd6c482 2801 call_rcu(&event->rcu_head, free_event_rcu);
f1600952
PZ
2802}
2803
a66a3052 2804int perf_event_release_kernel(struct perf_event *event)
0793a61d 2805{
cdd6c482 2806 struct perf_event_context *ctx = event->ctx;
0793a61d 2807
ad3a37de 2808 WARN_ON_ONCE(ctx->parent_ctx);
a0507c84
PZ
2809 /*
2810 * There are two ways this annotation is useful:
2811 *
2812 * 1) there is a lock recursion from perf_event_exit_task
2813 * see the comment there.
2814 *
2815 * 2) there is a lock-inversion with mmap_sem through
2816 * perf_event_read_group(), which takes faults while
2817 * holding ctx->mutex, however this is called after
2818 * the last filedesc died, so there is no possibility
2819 * to trigger the AB-BA case.
2820 */
2821 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
050735b0 2822 raw_spin_lock_irq(&ctx->lock);
8a49542c 2823 perf_group_detach(event);
050735b0 2824 raw_spin_unlock_irq(&ctx->lock);
e03a9a55 2825 perf_remove_from_context(event);
d859e29f 2826 mutex_unlock(&ctx->mutex);
0793a61d 2827
cdd6c482 2828 free_event(event);
0793a61d
TG
2829
2830 return 0;
2831}
a66a3052 2832EXPORT_SYMBOL_GPL(perf_event_release_kernel);
0793a61d 2833
a66a3052
PZ
2834/*
2835 * Called when the last reference to the file is gone.
2836 */
2837static int perf_release(struct inode *inode, struct file *file)
fb0459d7 2838{
a66a3052 2839 struct perf_event *event = file->private_data;
8882135b 2840 struct task_struct *owner;
fb0459d7 2841
a66a3052 2842 file->private_data = NULL;
fb0459d7 2843
8882135b
PZ
2844 rcu_read_lock();
2845 owner = ACCESS_ONCE(event->owner);
2846 /*
2847 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2848 * !owner it means the list deletion is complete and we can indeed
2849 * free this event, otherwise we need to serialize on
2850 * owner->perf_event_mutex.
2851 */
2852 smp_read_barrier_depends();
2853 if (owner) {
2854 /*
2855 * Since delayed_put_task_struct() also drops the last
2856 * task reference we can safely take a new reference
2857 * while holding the rcu_read_lock().
2858 */
2859 get_task_struct(owner);
2860 }
2861 rcu_read_unlock();
2862
2863 if (owner) {
2864 mutex_lock(&owner->perf_event_mutex);
2865 /*
2866 * We have to re-check the event->owner field, if it is cleared
2867 * we raced with perf_event_exit_task(), acquiring the mutex
2868 * ensured they're done, and we can proceed with freeing the
2869 * event.
2870 */
2871 if (event->owner)
2872 list_del_init(&event->owner_entry);
2873 mutex_unlock(&owner->perf_event_mutex);
2874 put_task_struct(owner);
2875 }
2876
a66a3052 2877 return perf_event_release_kernel(event);
fb0459d7 2878}
fb0459d7 2879
59ed446f 2880u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 2881{
cdd6c482 2882 struct perf_event *child;
e53c0994
PZ
2883 u64 total = 0;
2884
59ed446f
PZ
2885 *enabled = 0;
2886 *running = 0;
2887
6f10581a 2888 mutex_lock(&event->child_mutex);
cdd6c482 2889 total += perf_event_read(event);
59ed446f
PZ
2890 *enabled += event->total_time_enabled +
2891 atomic64_read(&event->child_total_time_enabled);
2892 *running += event->total_time_running +
2893 atomic64_read(&event->child_total_time_running);
2894
2895 list_for_each_entry(child, &event->child_list, child_list) {
cdd6c482 2896 total += perf_event_read(child);
59ed446f
PZ
2897 *enabled += child->total_time_enabled;
2898 *running += child->total_time_running;
2899 }
6f10581a 2900 mutex_unlock(&event->child_mutex);
e53c0994
PZ
2901
2902 return total;
2903}
fb0459d7 2904EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 2905
cdd6c482 2906static int perf_event_read_group(struct perf_event *event,
3dab77fb
PZ
2907 u64 read_format, char __user *buf)
2908{
cdd6c482 2909 struct perf_event *leader = event->group_leader, *sub;
6f10581a
PZ
2910 int n = 0, size = 0, ret = -EFAULT;
2911 struct perf_event_context *ctx = leader->ctx;
abf4868b 2912 u64 values[5];
59ed446f 2913 u64 count, enabled, running;
abf4868b 2914
6f10581a 2915 mutex_lock(&ctx->mutex);
59ed446f 2916 count = perf_event_read_value(leader, &enabled, &running);
3dab77fb
PZ
2917
2918 values[n++] = 1 + leader->nr_siblings;
59ed446f
PZ
2919 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2920 values[n++] = enabled;
2921 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2922 values[n++] = running;
abf4868b
PZ
2923 values[n++] = count;
2924 if (read_format & PERF_FORMAT_ID)
2925 values[n++] = primary_event_id(leader);
3dab77fb
PZ
2926
2927 size = n * sizeof(u64);
2928
2929 if (copy_to_user(buf, values, size))
6f10581a 2930 goto unlock;
3dab77fb 2931
6f10581a 2932 ret = size;
3dab77fb 2933
65abc865 2934 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
abf4868b 2935 n = 0;
3dab77fb 2936
59ed446f 2937 values[n++] = perf_event_read_value(sub, &enabled, &running);
abf4868b
PZ
2938 if (read_format & PERF_FORMAT_ID)
2939 values[n++] = primary_event_id(sub);
2940
2941 size = n * sizeof(u64);
2942
184d3da8 2943 if (copy_to_user(buf + ret, values, size)) {
6f10581a
PZ
2944 ret = -EFAULT;
2945 goto unlock;
2946 }
abf4868b
PZ
2947
2948 ret += size;
3dab77fb 2949 }
6f10581a
PZ
2950unlock:
2951 mutex_unlock(&ctx->mutex);
3dab77fb 2952
abf4868b 2953 return ret;
3dab77fb
PZ
2954}
2955
cdd6c482 2956static int perf_event_read_one(struct perf_event *event,
3dab77fb
PZ
2957 u64 read_format, char __user *buf)
2958{
59ed446f 2959 u64 enabled, running;
3dab77fb
PZ
2960 u64 values[4];
2961 int n = 0;
2962
59ed446f
PZ
2963 values[n++] = perf_event_read_value(event, &enabled, &running);
2964 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2965 values[n++] = enabled;
2966 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2967 values[n++] = running;
3dab77fb 2968 if (read_format & PERF_FORMAT_ID)
cdd6c482 2969 values[n++] = primary_event_id(event);
3dab77fb
PZ
2970
2971 if (copy_to_user(buf, values, n * sizeof(u64)))
2972 return -EFAULT;
2973
2974 return n * sizeof(u64);
2975}
2976
0793a61d 2977/*
cdd6c482 2978 * Read the performance event - simple non blocking version for now
0793a61d
TG
2979 */
2980static ssize_t
cdd6c482 2981perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
0793a61d 2982{
cdd6c482 2983 u64 read_format = event->attr.read_format;
3dab77fb 2984 int ret;
0793a61d 2985
3b6f9e5c 2986 /*
cdd6c482 2987 * Return end-of-file for a read on a event that is in
3b6f9e5c
PM
2988 * error state (i.e. because it was pinned but it couldn't be
2989 * scheduled on to the CPU at some point).
2990 */
cdd6c482 2991 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
2992 return 0;
2993
c320c7b7 2994 if (count < event->read_size)
3dab77fb
PZ
2995 return -ENOSPC;
2996
cdd6c482 2997 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 2998 if (read_format & PERF_FORMAT_GROUP)
cdd6c482 2999 ret = perf_event_read_group(event, read_format, buf);
3dab77fb 3000 else
cdd6c482 3001 ret = perf_event_read_one(event, read_format, buf);
0793a61d 3002
3dab77fb 3003 return ret;
0793a61d
TG
3004}
3005
0793a61d
TG
3006static ssize_t
3007perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3008{
cdd6c482 3009 struct perf_event *event = file->private_data;
0793a61d 3010
cdd6c482 3011 return perf_read_hw(event, buf, count);
0793a61d
TG
3012}
3013
3014static unsigned int perf_poll(struct file *file, poll_table *wait)
3015{
cdd6c482 3016 struct perf_event *event = file->private_data;
76369139 3017 struct ring_buffer *rb;
c33a0bc4 3018 unsigned int events = POLL_HUP;
c7138f37 3019
10c6db11
PZ
3020 /*
3021 * Race between perf_event_set_output() and perf_poll(): perf_poll()
3022 * grabs the rb reference but perf_event_set_output() overrides it.
3023 * Here is the timeline for two threads T1, T2:
3024 * t0: T1, rb = rcu_dereference(event->rb)
3025 * t1: T2, old_rb = event->rb
3026 * t2: T2, event->rb = new rb
3027 * t3: T2, ring_buffer_detach(old_rb)
3028 * t4: T1, ring_buffer_attach(rb1)
3029 * t5: T1, poll_wait(event->waitq)
3030 *
3031 * To avoid this problem, we grab mmap_mutex in perf_poll()
3032 * thereby ensuring that the assignment of the new ring buffer
3033 * and the detachment of the old buffer appear atomic to perf_poll()
3034 */
3035 mutex_lock(&event->mmap_mutex);
3036
c7138f37 3037 rcu_read_lock();
76369139 3038 rb = rcu_dereference(event->rb);
10c6db11
PZ
3039 if (rb) {
3040 ring_buffer_attach(event, rb);
76369139 3041 events = atomic_xchg(&rb->poll, 0);
10c6db11 3042 }
c7138f37 3043 rcu_read_unlock();
0793a61d 3044
10c6db11
PZ
3045 mutex_unlock(&event->mmap_mutex);
3046
cdd6c482 3047 poll_wait(file, &event->waitq, wait);
0793a61d 3048
0793a61d
TG
3049 return events;
3050}
3051
cdd6c482 3052static void perf_event_reset(struct perf_event *event)
6de6a7b9 3053{
cdd6c482 3054 (void)perf_event_read(event);
e7850595 3055 local64_set(&event->count, 0);
cdd6c482 3056 perf_event_update_userpage(event);
3df5edad
PZ
3057}
3058
c93f7669 3059/*
cdd6c482
IM
3060 * Holding the top-level event's child_mutex means that any
3061 * descendant process that has inherited this event will block
3062 * in sync_child_event if it goes to exit, thus satisfying the
3063 * task existence requirements of perf_event_enable/disable.
c93f7669 3064 */
cdd6c482
IM
3065static void perf_event_for_each_child(struct perf_event *event,
3066 void (*func)(struct perf_event *))
3df5edad 3067{
cdd6c482 3068 struct perf_event *child;
3df5edad 3069
cdd6c482
IM
3070 WARN_ON_ONCE(event->ctx->parent_ctx);
3071 mutex_lock(&event->child_mutex);
3072 func(event);
3073 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 3074 func(child);
cdd6c482 3075 mutex_unlock(&event->child_mutex);
3df5edad
PZ
3076}
3077
cdd6c482
IM
3078static void perf_event_for_each(struct perf_event *event,
3079 void (*func)(struct perf_event *))
3df5edad 3080{
cdd6c482
IM
3081 struct perf_event_context *ctx = event->ctx;
3082 struct perf_event *sibling;
3df5edad 3083
75f937f2
PZ
3084 WARN_ON_ONCE(ctx->parent_ctx);
3085 mutex_lock(&ctx->mutex);
cdd6c482 3086 event = event->group_leader;
75f937f2 3087
cdd6c482
IM
3088 perf_event_for_each_child(event, func);
3089 func(event);
3090 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3091 perf_event_for_each_child(event, func);
75f937f2 3092 mutex_unlock(&ctx->mutex);
6de6a7b9
PZ
3093}
3094
cdd6c482 3095static int perf_event_period(struct perf_event *event, u64 __user *arg)
08247e31 3096{
cdd6c482 3097 struct perf_event_context *ctx = event->ctx;
08247e31
PZ
3098 int ret = 0;
3099 u64 value;
3100
6c7e550f 3101 if (!is_sampling_event(event))
08247e31
PZ
3102 return -EINVAL;
3103
ad0cf347 3104 if (copy_from_user(&value, arg, sizeof(value)))
08247e31
PZ
3105 return -EFAULT;
3106
3107 if (!value)
3108 return -EINVAL;
3109
e625cce1 3110 raw_spin_lock_irq(&ctx->lock);
cdd6c482
IM
3111 if (event->attr.freq) {
3112 if (value > sysctl_perf_event_sample_rate) {
08247e31
PZ
3113 ret = -EINVAL;
3114 goto unlock;
3115 }
3116
cdd6c482 3117 event->attr.sample_freq = value;
08247e31 3118 } else {
cdd6c482
IM
3119 event->attr.sample_period = value;
3120 event->hw.sample_period = value;
08247e31
PZ
3121 }
3122unlock:
e625cce1 3123 raw_spin_unlock_irq(&ctx->lock);
08247e31
PZ
3124
3125 return ret;
3126}
3127
ac9721f3
PZ
3128static const struct file_operations perf_fops;
3129
3130static struct perf_event *perf_fget_light(int fd, int *fput_needed)
3131{
3132 struct file *file;
3133
3134 file = fget_light(fd, fput_needed);
3135 if (!file)
3136 return ERR_PTR(-EBADF);
3137
3138 if (file->f_op != &perf_fops) {
3139 fput_light(file, *fput_needed);
3140 *fput_needed = 0;
3141 return ERR_PTR(-EBADF);
3142 }
3143
3144 return file->private_data;
3145}
3146
3147static int perf_event_set_output(struct perf_event *event,
3148 struct perf_event *output_event);
6fb2915d 3149static int perf_event_set_filter(struct perf_event *event, void __user *arg);
a4be7c27 3150
d859e29f
PM
3151static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3152{
cdd6c482
IM
3153 struct perf_event *event = file->private_data;
3154 void (*func)(struct perf_event *);
3df5edad 3155 u32 flags = arg;
d859e29f
PM
3156
3157 switch (cmd) {
cdd6c482
IM
3158 case PERF_EVENT_IOC_ENABLE:
3159 func = perf_event_enable;
d859e29f 3160 break;
cdd6c482
IM
3161 case PERF_EVENT_IOC_DISABLE:
3162 func = perf_event_disable;
79f14641 3163 break;
cdd6c482
IM
3164 case PERF_EVENT_IOC_RESET:
3165 func = perf_event_reset;
6de6a7b9 3166 break;
3df5edad 3167
cdd6c482
IM
3168 case PERF_EVENT_IOC_REFRESH:
3169 return perf_event_refresh(event, arg);
08247e31 3170
cdd6c482
IM
3171 case PERF_EVENT_IOC_PERIOD:
3172 return perf_event_period(event, (u64 __user *)arg);
08247e31 3173
cdd6c482 3174 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3
PZ
3175 {
3176 struct perf_event *output_event = NULL;
3177 int fput_needed = 0;
3178 int ret;
3179
3180 if (arg != -1) {
3181 output_event = perf_fget_light(arg, &fput_needed);
3182 if (IS_ERR(output_event))
3183 return PTR_ERR(output_event);
3184 }
3185
3186 ret = perf_event_set_output(event, output_event);
3187 if (output_event)
3188 fput_light(output_event->filp, fput_needed);
3189
3190 return ret;
3191 }
a4be7c27 3192
6fb2915d
LZ
3193 case PERF_EVENT_IOC_SET_FILTER:
3194 return perf_event_set_filter(event, (void __user *)arg);
3195
d859e29f 3196 default:
3df5edad 3197 return -ENOTTY;
d859e29f 3198 }
3df5edad
PZ
3199
3200 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 3201 perf_event_for_each(event, func);
3df5edad 3202 else
cdd6c482 3203 perf_event_for_each_child(event, func);
3df5edad
PZ
3204
3205 return 0;
d859e29f
PM
3206}
3207
cdd6c482 3208int perf_event_task_enable(void)
771d7cde 3209{
cdd6c482 3210 struct perf_event *event;
771d7cde 3211
cdd6c482
IM
3212 mutex_lock(&current->perf_event_mutex);
3213 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3214 perf_event_for_each_child(event, perf_event_enable);
3215 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
3216
3217 return 0;
3218}
3219
cdd6c482 3220int perf_event_task_disable(void)
771d7cde 3221{
cdd6c482 3222 struct perf_event *event;
771d7cde 3223
cdd6c482
IM
3224 mutex_lock(&current->perf_event_mutex);
3225 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3226 perf_event_for_each_child(event, perf_event_disable);
3227 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
3228
3229 return 0;
3230}
3231
cdd6c482
IM
3232#ifndef PERF_EVENT_INDEX_OFFSET
3233# define PERF_EVENT_INDEX_OFFSET 0
f738eb1b
IM
3234#endif
3235
cdd6c482 3236static int perf_event_index(struct perf_event *event)
194002b2 3237{
a4eaf7f1
PZ
3238 if (event->hw.state & PERF_HES_STOPPED)
3239 return 0;
3240
cdd6c482 3241 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
3242 return 0;
3243
cdd6c482 3244 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
194002b2
PZ
3245}
3246
c4794295 3247static void calc_timer_values(struct perf_event *event,
7f310a5d
EM
3248 u64 *enabled,
3249 u64 *running)
c4794295
EM
3250{
3251 u64 now, ctx_time;
3252
3253 now = perf_clock();
3254 ctx_time = event->shadow_ctx_time + now;
3255 *enabled = ctx_time - event->tstamp_enabled;
3256 *running = ctx_time - event->tstamp_running;
3257}
3258
38ff667b
PZ
3259/*
3260 * Callers need to ensure there can be no nesting of this function, otherwise
3261 * the seqlock logic goes bad. We can not serialize this because the arch
3262 * code calls this from NMI context.
3263 */
cdd6c482 3264void perf_event_update_userpage(struct perf_event *event)
37d81828 3265{
cdd6c482 3266 struct perf_event_mmap_page *userpg;
76369139 3267 struct ring_buffer *rb;
0d641208 3268 u64 enabled, running;
38ff667b
PZ
3269
3270 rcu_read_lock();
0d641208
EM
3271 /*
3272 * compute total_time_enabled, total_time_running
3273 * based on snapshot values taken when the event
3274 * was last scheduled in.
3275 *
3276 * we cannot simply called update_context_time()
3277 * because of locking issue as we can be called in
3278 * NMI context
3279 */
3280 calc_timer_values(event, &enabled, &running);
76369139
FW
3281 rb = rcu_dereference(event->rb);
3282 if (!rb)
38ff667b
PZ
3283 goto unlock;
3284
76369139 3285 userpg = rb->user_page;
37d81828 3286
7b732a75
PZ
3287 /*
3288 * Disable preemption so as to not let the corresponding user-space
3289 * spin too long if we get preempted.
3290 */
3291 preempt_disable();
37d81828 3292 ++userpg->lock;
92f22a38 3293 barrier();
cdd6c482 3294 userpg->index = perf_event_index(event);
b5e58793 3295 userpg->offset = perf_event_count(event);
cdd6c482 3296 if (event->state == PERF_EVENT_STATE_ACTIVE)
e7850595 3297 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 3298
0d641208 3299 userpg->time_enabled = enabled +
cdd6c482 3300 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 3301
0d641208 3302 userpg->time_running = running +
cdd6c482 3303 atomic64_read(&event->child_total_time_running);
7f8b4e4e 3304
92f22a38 3305 barrier();
37d81828 3306 ++userpg->lock;
7b732a75 3307 preempt_enable();
38ff667b 3308unlock:
7b732a75 3309 rcu_read_unlock();
37d81828
PM
3310}
3311
906010b2
PZ
3312static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3313{
3314 struct perf_event *event = vma->vm_file->private_data;
76369139 3315 struct ring_buffer *rb;
906010b2
PZ
3316 int ret = VM_FAULT_SIGBUS;
3317
3318 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3319 if (vmf->pgoff == 0)
3320 ret = 0;
3321 return ret;
3322 }
3323
3324 rcu_read_lock();
76369139
FW
3325 rb = rcu_dereference(event->rb);
3326 if (!rb)
906010b2
PZ
3327 goto unlock;
3328
3329 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3330 goto unlock;
3331
76369139 3332 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
3333 if (!vmf->page)
3334 goto unlock;
3335
3336 get_page(vmf->page);
3337 vmf->page->mapping = vma->vm_file->f_mapping;
3338 vmf->page->index = vmf->pgoff;
3339
3340 ret = 0;
3341unlock:
3342 rcu_read_unlock();
3343
3344 return ret;
3345}
3346
10c6db11
PZ
3347static void ring_buffer_attach(struct perf_event *event,
3348 struct ring_buffer *rb)
3349{
3350 unsigned long flags;
3351
3352 if (!list_empty(&event->rb_entry))
3353 return;
3354
3355 spin_lock_irqsave(&rb->event_lock, flags);
3356 if (!list_empty(&event->rb_entry))
3357 goto unlock;
3358
3359 list_add(&event->rb_entry, &rb->event_list);
3360unlock:
3361 spin_unlock_irqrestore(&rb->event_lock, flags);
3362}
3363
3364static void ring_buffer_detach(struct perf_event *event,
3365 struct ring_buffer *rb)
3366{
3367 unsigned long flags;
3368
3369 if (list_empty(&event->rb_entry))
3370 return;
3371
3372 spin_lock_irqsave(&rb->event_lock, flags);
3373 list_del_init(&event->rb_entry);
3374 wake_up_all(&event->waitq);
3375 spin_unlock_irqrestore(&rb->event_lock, flags);
3376}
3377
3378static void ring_buffer_wakeup(struct perf_event *event)
3379{
3380 struct ring_buffer *rb;
3381
3382 rcu_read_lock();
3383 rb = rcu_dereference(event->rb);
44b7f4b9
WD
3384 if (!rb)
3385 goto unlock;
3386
3387 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
10c6db11 3388 wake_up_all(&event->waitq);
44b7f4b9
WD
3389
3390unlock:
10c6db11
PZ
3391 rcu_read_unlock();
3392}
3393
76369139 3394static void rb_free_rcu(struct rcu_head *rcu_head)
906010b2 3395{
76369139 3396 struct ring_buffer *rb;
906010b2 3397
76369139
FW
3398 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3399 rb_free(rb);
7b732a75
PZ
3400}
3401
76369139 3402static struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 3403{
76369139 3404 struct ring_buffer *rb;
7b732a75 3405
ac9721f3 3406 rcu_read_lock();
76369139
FW
3407 rb = rcu_dereference(event->rb);
3408 if (rb) {
3409 if (!atomic_inc_not_zero(&rb->refcount))
3410 rb = NULL;
ac9721f3
PZ
3411 }
3412 rcu_read_unlock();
3413
76369139 3414 return rb;
ac9721f3
PZ
3415}
3416
76369139 3417static void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 3418{
10c6db11
PZ
3419 struct perf_event *event, *n;
3420 unsigned long flags;
3421
76369139 3422 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 3423 return;
7b732a75 3424
10c6db11
PZ
3425 spin_lock_irqsave(&rb->event_lock, flags);
3426 list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3427 list_del_init(&event->rb_entry);
3428 wake_up_all(&event->waitq);
3429 }
3430 spin_unlock_irqrestore(&rb->event_lock, flags);
3431
76369139 3432 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
3433}
3434
3435static void perf_mmap_open(struct vm_area_struct *vma)
3436{
cdd6c482 3437 struct perf_event *event = vma->vm_file->private_data;
7b732a75 3438
cdd6c482 3439 atomic_inc(&event->mmap_count);
7b732a75
PZ
3440}
3441
3442static void perf_mmap_close(struct vm_area_struct *vma)
3443{
cdd6c482 3444 struct perf_event *event = vma->vm_file->private_data;
7b732a75 3445
cdd6c482 3446 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
76369139 3447 unsigned long size = perf_data_size(event->rb);
ac9721f3 3448 struct user_struct *user = event->mmap_user;
76369139 3449 struct ring_buffer *rb = event->rb;
789f90fc 3450
906010b2 3451 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
bc3e53f6 3452 vma->vm_mm->pinned_vm -= event->mmap_locked;
76369139 3453 rcu_assign_pointer(event->rb, NULL);
10c6db11 3454 ring_buffer_detach(event, rb);
cdd6c482 3455 mutex_unlock(&event->mmap_mutex);
ac9721f3 3456
76369139 3457 ring_buffer_put(rb);
ac9721f3 3458 free_uid(user);
7b732a75 3459 }
37d81828
PM
3460}
3461
f0f37e2f 3462static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8
PZ
3463 .open = perf_mmap_open,
3464 .close = perf_mmap_close,
3465 .fault = perf_mmap_fault,
3466 .page_mkwrite = perf_mmap_fault,
37d81828
PM
3467};
3468
3469static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3470{
cdd6c482 3471 struct perf_event *event = file->private_data;
22a4f650 3472 unsigned long user_locked, user_lock_limit;
789f90fc 3473 struct user_struct *user = current_user();
22a4f650 3474 unsigned long locked, lock_limit;
76369139 3475 struct ring_buffer *rb;
7b732a75
PZ
3476 unsigned long vma_size;
3477 unsigned long nr_pages;
789f90fc 3478 long user_extra, extra;
d57e34fd 3479 int ret = 0, flags = 0;
37d81828 3480
c7920614
PZ
3481 /*
3482 * Don't allow mmap() of inherited per-task counters. This would
3483 * create a performance issue due to all children writing to the
76369139 3484 * same rb.
c7920614
PZ
3485 */
3486 if (event->cpu == -1 && event->attr.inherit)
3487 return -EINVAL;
3488
43a21ea8 3489 if (!(vma->vm_flags & VM_SHARED))
37d81828 3490 return -EINVAL;
7b732a75
PZ
3491
3492 vma_size = vma->vm_end - vma->vm_start;
3493 nr_pages = (vma_size / PAGE_SIZE) - 1;
3494
7730d865 3495 /*
76369139 3496 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
3497 * can do bitmasks instead of modulo.
3498 */
3499 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
3500 return -EINVAL;
3501
7b732a75 3502 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
3503 return -EINVAL;
3504
7b732a75
PZ
3505 if (vma->vm_pgoff != 0)
3506 return -EINVAL;
37d81828 3507
cdd6c482
IM
3508 WARN_ON_ONCE(event->ctx->parent_ctx);
3509 mutex_lock(&event->mmap_mutex);
76369139
FW
3510 if (event->rb) {
3511 if (event->rb->nr_pages == nr_pages)
3512 atomic_inc(&event->rb->refcount);
ac9721f3 3513 else
ebb3c4c4
PZ
3514 ret = -EINVAL;
3515 goto unlock;
3516 }
3517
789f90fc 3518 user_extra = nr_pages + 1;
cdd6c482 3519 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
3520
3521 /*
3522 * Increase the limit linearly with more CPUs:
3523 */
3524 user_lock_limit *= num_online_cpus();
3525
789f90fc 3526 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 3527
789f90fc
PZ
3528 extra = 0;
3529 if (user_locked > user_lock_limit)
3530 extra = user_locked - user_lock_limit;
7b732a75 3531
78d7d407 3532 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 3533 lock_limit >>= PAGE_SHIFT;
bc3e53f6 3534 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 3535
459ec28a
IM
3536 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3537 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
3538 ret = -EPERM;
3539 goto unlock;
3540 }
7b732a75 3541
76369139 3542 WARN_ON(event->rb);
906010b2 3543
d57e34fd 3544 if (vma->vm_flags & VM_WRITE)
76369139 3545 flags |= RING_BUFFER_WRITABLE;
d57e34fd 3546
4ec8363d
VW
3547 rb = rb_alloc(nr_pages,
3548 event->attr.watermark ? event->attr.wakeup_watermark : 0,
3549 event->cpu, flags);
3550
76369139 3551 if (!rb) {
ac9721f3 3552 ret = -ENOMEM;
ebb3c4c4 3553 goto unlock;
ac9721f3 3554 }
76369139 3555 rcu_assign_pointer(event->rb, rb);
43a21ea8 3556
ac9721f3
PZ
3557 atomic_long_add(user_extra, &user->locked_vm);
3558 event->mmap_locked = extra;
3559 event->mmap_user = get_current_user();
bc3e53f6 3560 vma->vm_mm->pinned_vm += event->mmap_locked;
ac9721f3 3561
ebb3c4c4 3562unlock:
ac9721f3
PZ
3563 if (!ret)
3564 atomic_inc(&event->mmap_count);
cdd6c482 3565 mutex_unlock(&event->mmap_mutex);
37d81828 3566
37d81828
PM
3567 vma->vm_flags |= VM_RESERVED;
3568 vma->vm_ops = &perf_mmap_vmops;
7b732a75
PZ
3569
3570 return ret;
37d81828
PM
3571}
3572
3c446b3d
PZ
3573static int perf_fasync(int fd, struct file *filp, int on)
3574{
3c446b3d 3575 struct inode *inode = filp->f_path.dentry->d_inode;
cdd6c482 3576 struct perf_event *event = filp->private_data;
3c446b3d
PZ
3577 int retval;
3578
3579 mutex_lock(&inode->i_mutex);
cdd6c482 3580 retval = fasync_helper(fd, filp, on, &event->fasync);
3c446b3d
PZ
3581 mutex_unlock(&inode->i_mutex);
3582
3583 if (retval < 0)
3584 return retval;
3585
3586 return 0;
3587}
3588
0793a61d 3589static const struct file_operations perf_fops = {
3326c1ce 3590 .llseek = no_llseek,
0793a61d
TG
3591 .release = perf_release,
3592 .read = perf_read,
3593 .poll = perf_poll,
d859e29f
PM
3594 .unlocked_ioctl = perf_ioctl,
3595 .compat_ioctl = perf_ioctl,
37d81828 3596 .mmap = perf_mmap,
3c446b3d 3597 .fasync = perf_fasync,
0793a61d
TG
3598};
3599
925d519a 3600/*
cdd6c482 3601 * Perf event wakeup
925d519a
PZ
3602 *
3603 * If there's data, ensure we set the poll() state and publish everything
3604 * to user-space before waking everybody up.
3605 */
3606
cdd6c482 3607void perf_event_wakeup(struct perf_event *event)
925d519a 3608{
10c6db11 3609 ring_buffer_wakeup(event);
4c9e2542 3610
cdd6c482
IM
3611 if (event->pending_kill) {
3612 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3613 event->pending_kill = 0;
4c9e2542 3614 }
925d519a
PZ
3615}
3616
e360adbe 3617static void perf_pending_event(struct irq_work *entry)
79f14641 3618{
cdd6c482
IM
3619 struct perf_event *event = container_of(entry,
3620 struct perf_event, pending);
79f14641 3621
cdd6c482
IM
3622 if (event->pending_disable) {
3623 event->pending_disable = 0;
3624 __perf_event_disable(event);
79f14641
PZ
3625 }
3626
cdd6c482
IM
3627 if (event->pending_wakeup) {
3628 event->pending_wakeup = 0;
3629 perf_event_wakeup(event);
79f14641
PZ
3630 }
3631}
3632
39447b38
ZY
3633/*
3634 * We assume there is only KVM supporting the callbacks.
3635 * Later on, we might change it to a list if there is
3636 * another virtualization implementation supporting the callbacks.
3637 */
3638struct perf_guest_info_callbacks *perf_guest_cbs;
3639
3640int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3641{
3642 perf_guest_cbs = cbs;
3643 return 0;
3644}
3645EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3646
3647int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3648{
3649 perf_guest_cbs = NULL;
3650 return 0;
3651}
3652EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3653
c980d109
ACM
3654static void __perf_event_header__init_id(struct perf_event_header *header,
3655 struct perf_sample_data *data,
3656 struct perf_event *event)
6844c09d
ACM
3657{
3658 u64 sample_type = event->attr.sample_type;
3659
3660 data->type = sample_type;
3661 header->size += event->id_header_size;
3662
3663 if (sample_type & PERF_SAMPLE_TID) {
3664 /* namespace issues */
3665 data->tid_entry.pid = perf_event_pid(event, current);
3666 data->tid_entry.tid = perf_event_tid(event, current);
3667 }
3668
3669 if (sample_type & PERF_SAMPLE_TIME)
3670 data->time = perf_clock();
3671
3672 if (sample_type & PERF_SAMPLE_ID)
3673 data->id = primary_event_id(event);
3674
3675 if (sample_type & PERF_SAMPLE_STREAM_ID)
3676 data->stream_id = event->id;
3677
3678 if (sample_type & PERF_SAMPLE_CPU) {
3679 data->cpu_entry.cpu = raw_smp_processor_id();
3680 data->cpu_entry.reserved = 0;
3681 }
3682}
3683
76369139
FW
3684void perf_event_header__init_id(struct perf_event_header *header,
3685 struct perf_sample_data *data,
3686 struct perf_event *event)
c980d109
ACM
3687{
3688 if (event->attr.sample_id_all)
3689 __perf_event_header__init_id(header, data, event);
3690}
3691
3692static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3693 struct perf_sample_data *data)
3694{
3695 u64 sample_type = data->type;
3696
3697 if (sample_type & PERF_SAMPLE_TID)
3698 perf_output_put(handle, data->tid_entry);
3699
3700 if (sample_type & PERF_SAMPLE_TIME)
3701 perf_output_put(handle, data->time);
3702
3703 if (sample_type & PERF_SAMPLE_ID)
3704 perf_output_put(handle, data->id);
3705
3706 if (sample_type & PERF_SAMPLE_STREAM_ID)
3707 perf_output_put(handle, data->stream_id);
3708
3709 if (sample_type & PERF_SAMPLE_CPU)
3710 perf_output_put(handle, data->cpu_entry);
3711}
3712
76369139
FW
3713void perf_event__output_id_sample(struct perf_event *event,
3714 struct perf_output_handle *handle,
3715 struct perf_sample_data *sample)
c980d109
ACM
3716{
3717 if (event->attr.sample_id_all)
3718 __perf_event__output_id_sample(handle, sample);
3719}
3720
3dab77fb 3721static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
3722 struct perf_event *event,
3723 u64 enabled, u64 running)
3dab77fb 3724{
cdd6c482 3725 u64 read_format = event->attr.read_format;
3dab77fb
PZ
3726 u64 values[4];
3727 int n = 0;
3728
b5e58793 3729 values[n++] = perf_event_count(event);
3dab77fb 3730 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 3731 values[n++] = enabled +
cdd6c482 3732 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
3733 }
3734 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 3735 values[n++] = running +
cdd6c482 3736 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
3737 }
3738 if (read_format & PERF_FORMAT_ID)
cdd6c482 3739 values[n++] = primary_event_id(event);
3dab77fb 3740
76369139 3741 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
3742}
3743
3744/*
cdd6c482 3745 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb
PZ
3746 */
3747static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
3748 struct perf_event *event,
3749 u64 enabled, u64 running)
3dab77fb 3750{
cdd6c482
IM
3751 struct perf_event *leader = event->group_leader, *sub;
3752 u64 read_format = event->attr.read_format;
3dab77fb
PZ
3753 u64 values[5];
3754 int n = 0;
3755
3756 values[n++] = 1 + leader->nr_siblings;
3757
3758 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 3759 values[n++] = enabled;
3dab77fb
PZ
3760
3761 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 3762 values[n++] = running;
3dab77fb 3763
cdd6c482 3764 if (leader != event)
3dab77fb
PZ
3765 leader->pmu->read(leader);
3766
b5e58793 3767 values[n++] = perf_event_count(leader);
3dab77fb 3768 if (read_format & PERF_FORMAT_ID)
cdd6c482 3769 values[n++] = primary_event_id(leader);
3dab77fb 3770
76369139 3771 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 3772
65abc865 3773 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb
PZ
3774 n = 0;
3775
cdd6c482 3776 if (sub != event)
3dab77fb
PZ
3777 sub->pmu->read(sub);
3778
b5e58793 3779 values[n++] = perf_event_count(sub);
3dab77fb 3780 if (read_format & PERF_FORMAT_ID)
cdd6c482 3781 values[n++] = primary_event_id(sub);
3dab77fb 3782
76369139 3783 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
3784 }
3785}
3786
eed01528
SE
3787#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3788 PERF_FORMAT_TOTAL_TIME_RUNNING)
3789
3dab77fb 3790static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 3791 struct perf_event *event)
3dab77fb 3792{
c4794295 3793 u64 enabled = 0, running = 0;
eed01528
SE
3794 u64 read_format = event->attr.read_format;
3795
3796 /*
3797 * compute total_time_enabled, total_time_running
3798 * based on snapshot values taken when the event
3799 * was last scheduled in.
3800 *
3801 * we cannot simply called update_context_time()
3802 * because of locking issue as we are called in
3803 * NMI context
3804 */
c4794295
EM
3805 if (read_format & PERF_FORMAT_TOTAL_TIMES)
3806 calc_timer_values(event, &enabled, &running);
eed01528 3807
cdd6c482 3808 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 3809 perf_output_read_group(handle, event, enabled, running);
3dab77fb 3810 else
eed01528 3811 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
3812}
3813
5622f295
MM
3814void perf_output_sample(struct perf_output_handle *handle,
3815 struct perf_event_header *header,
3816 struct perf_sample_data *data,
cdd6c482 3817 struct perf_event *event)
5622f295
MM
3818{
3819 u64 sample_type = data->type;
3820
3821 perf_output_put(handle, *header);
3822
3823 if (sample_type & PERF_SAMPLE_IP)
3824 perf_output_put(handle, data->ip);
3825
3826 if (sample_type & PERF_SAMPLE_TID)
3827 perf_output_put(handle, data->tid_entry);
3828
3829 if (sample_type & PERF_SAMPLE_TIME)
3830 perf_output_put(handle, data->time);
3831
3832 if (sample_type & PERF_SAMPLE_ADDR)
3833 perf_output_put(handle, data->addr);
3834
3835 if (sample_type & PERF_SAMPLE_ID)
3836 perf_output_put(handle, data->id);
3837
3838 if (sample_type & PERF_SAMPLE_STREAM_ID)
3839 perf_output_put(handle, data->stream_id);
3840
3841 if (sample_type & PERF_SAMPLE_CPU)
3842 perf_output_put(handle, data->cpu_entry);
3843
3844 if (sample_type & PERF_SAMPLE_PERIOD)
3845 perf_output_put(handle, data->period);
3846
3847 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 3848 perf_output_read(handle, event);
5622f295
MM
3849
3850 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3851 if (data->callchain) {
3852 int size = 1;
3853
3854 if (data->callchain)
3855 size += data->callchain->nr;
3856
3857 size *= sizeof(u64);
3858
76369139 3859 __output_copy(handle, data->callchain, size);
5622f295
MM
3860 } else {
3861 u64 nr = 0;
3862 perf_output_put(handle, nr);
3863 }
3864 }
3865
3866 if (sample_type & PERF_SAMPLE_RAW) {
3867 if (data->raw) {
3868 perf_output_put(handle, data->raw->size);
76369139
FW
3869 __output_copy(handle, data->raw->data,
3870 data->raw->size);
5622f295
MM
3871 } else {
3872 struct {
3873 u32 size;
3874 u32 data;
3875 } raw = {
3876 .size = sizeof(u32),
3877 .data = 0,
3878 };
3879 perf_output_put(handle, raw);
3880 }
3881 }
a7ac67ea
PZ
3882
3883 if (!event->attr.watermark) {
3884 int wakeup_events = event->attr.wakeup_events;
3885
3886 if (wakeup_events) {
3887 struct ring_buffer *rb = handle->rb;
3888 int events = local_inc_return(&rb->events);
3889
3890 if (events >= wakeup_events) {
3891 local_sub(wakeup_events, &rb->events);
3892 local_inc(&rb->wakeup);
3893 }
3894 }
3895 }
5622f295
MM
3896}
3897
3898void perf_prepare_sample(struct perf_event_header *header,
3899 struct perf_sample_data *data,
cdd6c482 3900 struct perf_event *event,
5622f295 3901 struct pt_regs *regs)
7b732a75 3902{
cdd6c482 3903 u64 sample_type = event->attr.sample_type;
7b732a75 3904
cdd6c482 3905 header->type = PERF_RECORD_SAMPLE;
c320c7b7 3906 header->size = sizeof(*header) + event->header_size;
5622f295
MM
3907
3908 header->misc = 0;
3909 header->misc |= perf_misc_flags(regs);
6fab0192 3910
c980d109 3911 __perf_event_header__init_id(header, data, event);
6844c09d 3912
c320c7b7 3913 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
3914 data->ip = perf_instruction_pointer(regs);
3915
b23f3325 3916 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 3917 int size = 1;
394ee076 3918
5622f295
MM
3919 data->callchain = perf_callchain(regs);
3920
3921 if (data->callchain)
3922 size += data->callchain->nr;
3923
3924 header->size += size * sizeof(u64);
394ee076
PZ
3925 }
3926
3a43ce68 3927 if (sample_type & PERF_SAMPLE_RAW) {
a044560c
PZ
3928 int size = sizeof(u32);
3929
3930 if (data->raw)
3931 size += data->raw->size;
3932 else
3933 size += sizeof(u32);
3934
3935 WARN_ON_ONCE(size & (sizeof(u64)-1));
5622f295 3936 header->size += size;
7f453c24 3937 }
5622f295 3938}
7f453c24 3939
a8b0ca17 3940static void perf_event_output(struct perf_event *event,
5622f295
MM
3941 struct perf_sample_data *data,
3942 struct pt_regs *regs)
3943{
3944 struct perf_output_handle handle;
3945 struct perf_event_header header;
689802b2 3946
927c7a9e
FW
3947 /* protect the callchain buffers */
3948 rcu_read_lock();
3949
cdd6c482 3950 perf_prepare_sample(&header, data, event, regs);
5c148194 3951
a7ac67ea 3952 if (perf_output_begin(&handle, event, header.size))
927c7a9e 3953 goto exit;
0322cd6e 3954
cdd6c482 3955 perf_output_sample(&handle, &header, data, event);
f413cdb8 3956
8a057d84 3957 perf_output_end(&handle);
927c7a9e
FW
3958
3959exit:
3960 rcu_read_unlock();
0322cd6e
PZ
3961}
3962
38b200d6 3963/*
cdd6c482 3964 * read event_id
38b200d6
PZ
3965 */
3966
3967struct perf_read_event {
3968 struct perf_event_header header;
3969
3970 u32 pid;
3971 u32 tid;
38b200d6
PZ
3972};
3973
3974static void
cdd6c482 3975perf_event_read_event(struct perf_event *event,
38b200d6
PZ
3976 struct task_struct *task)
3977{
3978 struct perf_output_handle handle;
c980d109 3979 struct perf_sample_data sample;
dfc65094 3980 struct perf_read_event read_event = {
38b200d6 3981 .header = {
cdd6c482 3982 .type = PERF_RECORD_READ,
38b200d6 3983 .misc = 0,
c320c7b7 3984 .size = sizeof(read_event) + event->read_size,
38b200d6 3985 },
cdd6c482
IM
3986 .pid = perf_event_pid(event, task),
3987 .tid = perf_event_tid(event, task),
38b200d6 3988 };
3dab77fb 3989 int ret;
38b200d6 3990
c980d109 3991 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 3992 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
3993 if (ret)
3994 return;
3995
dfc65094 3996 perf_output_put(&handle, read_event);
cdd6c482 3997 perf_output_read(&handle, event);
c980d109 3998 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 3999
38b200d6
PZ
4000 perf_output_end(&handle);
4001}
4002
60313ebe 4003/*
9f498cc5
PZ
4004 * task tracking -- fork/exit
4005 *
3af9e859 4006 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
60313ebe
PZ
4007 */
4008
9f498cc5 4009struct perf_task_event {
3a80b4a3 4010 struct task_struct *task;
cdd6c482 4011 struct perf_event_context *task_ctx;
60313ebe
PZ
4012
4013 struct {
4014 struct perf_event_header header;
4015
4016 u32 pid;
4017 u32 ppid;
9f498cc5
PZ
4018 u32 tid;
4019 u32 ptid;
393b2ad8 4020 u64 time;
cdd6c482 4021 } event_id;
60313ebe
PZ
4022};
4023
cdd6c482 4024static void perf_event_task_output(struct perf_event *event,
9f498cc5 4025 struct perf_task_event *task_event)
60313ebe
PZ
4026{
4027 struct perf_output_handle handle;
c980d109 4028 struct perf_sample_data sample;
9f498cc5 4029 struct task_struct *task = task_event->task;
c980d109 4030 int ret, size = task_event->event_id.header.size;
8bb39f9a 4031
c980d109 4032 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 4033
c980d109 4034 ret = perf_output_begin(&handle, event,
a7ac67ea 4035 task_event->event_id.header.size);
ef60777c 4036 if (ret)
c980d109 4037 goto out;
60313ebe 4038
cdd6c482
IM
4039 task_event->event_id.pid = perf_event_pid(event, task);
4040 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 4041
cdd6c482
IM
4042 task_event->event_id.tid = perf_event_tid(event, task);
4043 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 4044
cdd6c482 4045 perf_output_put(&handle, task_event->event_id);
393b2ad8 4046
c980d109
ACM
4047 perf_event__output_id_sample(event, &handle, &sample);
4048
60313ebe 4049 perf_output_end(&handle);
c980d109
ACM
4050out:
4051 task_event->event_id.header.size = size;
60313ebe
PZ
4052}
4053
cdd6c482 4054static int perf_event_task_match(struct perf_event *event)
60313ebe 4055{
6f93d0a7 4056 if (event->state < PERF_EVENT_STATE_INACTIVE)
22e19085
PZ
4057 return 0;
4058
5632ab12 4059 if (!event_filter_match(event))
5d27c23d
PZ
4060 return 0;
4061
3af9e859
EM
4062 if (event->attr.comm || event->attr.mmap ||
4063 event->attr.mmap_data || event->attr.task)
60313ebe
PZ
4064 return 1;
4065
4066 return 0;
4067}
4068
cdd6c482 4069static void perf_event_task_ctx(struct perf_event_context *ctx,
9f498cc5 4070 struct perf_task_event *task_event)
60313ebe 4071{
cdd6c482 4072 struct perf_event *event;
60313ebe 4073
cdd6c482
IM
4074 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4075 if (perf_event_task_match(event))
4076 perf_event_task_output(event, task_event);
60313ebe 4077 }
60313ebe
PZ
4078}
4079
cdd6c482 4080static void perf_event_task_event(struct perf_task_event *task_event)
60313ebe
PZ
4081{
4082 struct perf_cpu_context *cpuctx;
8dc85d54 4083 struct perf_event_context *ctx;
108b02cf 4084 struct pmu *pmu;
8dc85d54 4085 int ctxn;
60313ebe 4086
d6ff86cf 4087 rcu_read_lock();
108b02cf 4088 list_for_each_entry_rcu(pmu, &pmus, entry) {
41945f6c 4089 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
51676957
PZ
4090 if (cpuctx->active_pmu != pmu)
4091 goto next;
108b02cf 4092 perf_event_task_ctx(&cpuctx->ctx, task_event);
8dc85d54
PZ
4093
4094 ctx = task_event->task_ctx;
4095 if (!ctx) {
4096 ctxn = pmu->task_ctx_nr;
4097 if (ctxn < 0)
41945f6c 4098 goto next;
8dc85d54
PZ
4099 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4100 }
4101 if (ctx)
4102 perf_event_task_ctx(ctx, task_event);
41945f6c
PZ
4103next:
4104 put_cpu_ptr(pmu->pmu_cpu_context);
108b02cf 4105 }
60313ebe
PZ
4106 rcu_read_unlock();
4107}
4108
cdd6c482
IM
4109static void perf_event_task(struct task_struct *task,
4110 struct perf_event_context *task_ctx,
3a80b4a3 4111 int new)
60313ebe 4112{
9f498cc5 4113 struct perf_task_event task_event;
60313ebe 4114
cdd6c482
IM
4115 if (!atomic_read(&nr_comm_events) &&
4116 !atomic_read(&nr_mmap_events) &&
4117 !atomic_read(&nr_task_events))
60313ebe
PZ
4118 return;
4119
9f498cc5 4120 task_event = (struct perf_task_event){
3a80b4a3
PZ
4121 .task = task,
4122 .task_ctx = task_ctx,
cdd6c482 4123 .event_id = {
60313ebe 4124 .header = {
cdd6c482 4125 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 4126 .misc = 0,
cdd6c482 4127 .size = sizeof(task_event.event_id),
60313ebe 4128 },
573402db
PZ
4129 /* .pid */
4130 /* .ppid */
9f498cc5
PZ
4131 /* .tid */
4132 /* .ptid */
6f93d0a7 4133 .time = perf_clock(),
60313ebe
PZ
4134 },
4135 };
4136
cdd6c482 4137 perf_event_task_event(&task_event);
9f498cc5
PZ
4138}
4139
cdd6c482 4140void perf_event_fork(struct task_struct *task)
9f498cc5 4141{
cdd6c482 4142 perf_event_task(task, NULL, 1);
60313ebe
PZ
4143}
4144
8d1b2d93
PZ
4145/*
4146 * comm tracking
4147 */
4148
4149struct perf_comm_event {
22a4f650
IM
4150 struct task_struct *task;
4151 char *comm;
8d1b2d93
PZ
4152 int comm_size;
4153
4154 struct {
4155 struct perf_event_header header;
4156
4157 u32 pid;
4158 u32 tid;
cdd6c482 4159 } event_id;
8d1b2d93
PZ
4160};
4161
cdd6c482 4162static void perf_event_comm_output(struct perf_event *event,
8d1b2d93
PZ
4163 struct perf_comm_event *comm_event)
4164{
4165 struct perf_output_handle handle;
c980d109 4166 struct perf_sample_data sample;
cdd6c482 4167 int size = comm_event->event_id.header.size;
c980d109
ACM
4168 int ret;
4169
4170 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4171 ret = perf_output_begin(&handle, event,
a7ac67ea 4172 comm_event->event_id.header.size);
8d1b2d93
PZ
4173
4174 if (ret)
c980d109 4175 goto out;
8d1b2d93 4176
cdd6c482
IM
4177 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4178 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 4179
cdd6c482 4180 perf_output_put(&handle, comm_event->event_id);
76369139 4181 __output_copy(&handle, comm_event->comm,
8d1b2d93 4182 comm_event->comm_size);
c980d109
ACM
4183
4184 perf_event__output_id_sample(event, &handle, &sample);
4185
8d1b2d93 4186 perf_output_end(&handle);
c980d109
ACM
4187out:
4188 comm_event->event_id.header.size = size;
8d1b2d93
PZ
4189}
4190
cdd6c482 4191static int perf_event_comm_match(struct perf_event *event)
8d1b2d93 4192{
6f93d0a7 4193 if (event->state < PERF_EVENT_STATE_INACTIVE)
22e19085
PZ
4194 return 0;
4195
5632ab12 4196 if (!event_filter_match(event))
5d27c23d
PZ
4197 return 0;
4198
cdd6c482 4199 if (event->attr.comm)
8d1b2d93
PZ
4200 return 1;
4201
4202 return 0;
4203}
4204
cdd6c482 4205static void perf_event_comm_ctx(struct perf_event_context *ctx,
8d1b2d93
PZ
4206 struct perf_comm_event *comm_event)
4207{
cdd6c482 4208 struct perf_event *event;
8d1b2d93 4209
cdd6c482
IM
4210 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4211 if (perf_event_comm_match(event))
4212 perf_event_comm_output(event, comm_event);
8d1b2d93 4213 }
8d1b2d93
PZ
4214}
4215
cdd6c482 4216static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93
PZ
4217{
4218 struct perf_cpu_context *cpuctx;
cdd6c482 4219 struct perf_event_context *ctx;
413ee3b4 4220 char comm[TASK_COMM_LEN];
8d1b2d93 4221 unsigned int size;
108b02cf 4222 struct pmu *pmu;
8dc85d54 4223 int ctxn;
8d1b2d93 4224
413ee3b4 4225 memset(comm, 0, sizeof(comm));
96b02d78 4226 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 4227 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
4228
4229 comm_event->comm = comm;
4230 comm_event->comm_size = size;
4231
cdd6c482 4232 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
f6595f3a 4233 rcu_read_lock();
108b02cf 4234 list_for_each_entry_rcu(pmu, &pmus, entry) {
41945f6c 4235 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
51676957
PZ
4236 if (cpuctx->active_pmu != pmu)
4237 goto next;
108b02cf 4238 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
8dc85d54
PZ
4239
4240 ctxn = pmu->task_ctx_nr;
4241 if (ctxn < 0)
41945f6c 4242 goto next;
8dc85d54
PZ
4243
4244 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4245 if (ctx)
4246 perf_event_comm_ctx(ctx, comm_event);
41945f6c
PZ
4247next:
4248 put_cpu_ptr(pmu->pmu_cpu_context);
108b02cf 4249 }
665c2142 4250 rcu_read_unlock();
8d1b2d93
PZ
4251}
4252
cdd6c482 4253void perf_event_comm(struct task_struct *task)
8d1b2d93 4254{
9ee318a7 4255 struct perf_comm_event comm_event;
8dc85d54
PZ
4256 struct perf_event_context *ctx;
4257 int ctxn;
9ee318a7 4258
8dc85d54
PZ
4259 for_each_task_context_nr(ctxn) {
4260 ctx = task->perf_event_ctxp[ctxn];
4261 if (!ctx)
4262 continue;
9ee318a7 4263
8dc85d54
PZ
4264 perf_event_enable_on_exec(ctx);
4265 }
9ee318a7 4266
cdd6c482 4267 if (!atomic_read(&nr_comm_events))
9ee318a7 4268 return;
a63eaf34 4269
9ee318a7 4270 comm_event = (struct perf_comm_event){
8d1b2d93 4271 .task = task,
573402db
PZ
4272 /* .comm */
4273 /* .comm_size */
cdd6c482 4274 .event_id = {
573402db 4275 .header = {
cdd6c482 4276 .type = PERF_RECORD_COMM,
573402db
PZ
4277 .misc = 0,
4278 /* .size */
4279 },
4280 /* .pid */
4281 /* .tid */
8d1b2d93
PZ
4282 },
4283 };
4284
cdd6c482 4285 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
4286}
4287
0a4a9391
PZ
4288/*
4289 * mmap tracking
4290 */
4291
4292struct perf_mmap_event {
089dd79d
PZ
4293 struct vm_area_struct *vma;
4294
4295 const char *file_name;
4296 int file_size;
0a4a9391
PZ
4297
4298 struct {
4299 struct perf_event_header header;
4300
4301 u32 pid;
4302 u32 tid;
4303 u64 start;
4304 u64 len;
4305 u64 pgoff;
cdd6c482 4306 } event_id;
0a4a9391
PZ
4307};
4308
cdd6c482 4309static void perf_event_mmap_output(struct perf_event *event,
0a4a9391
PZ
4310 struct perf_mmap_event *mmap_event)
4311{
4312 struct perf_output_handle handle;
c980d109 4313 struct perf_sample_data sample;
cdd6c482 4314 int size = mmap_event->event_id.header.size;
c980d109 4315 int ret;
0a4a9391 4316
c980d109
ACM
4317 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4318 ret = perf_output_begin(&handle, event,
a7ac67ea 4319 mmap_event->event_id.header.size);
0a4a9391 4320 if (ret)
c980d109 4321 goto out;
0a4a9391 4322
cdd6c482
IM
4323 mmap_event->event_id.pid = perf_event_pid(event, current);
4324 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 4325
cdd6c482 4326 perf_output_put(&handle, mmap_event->event_id);
76369139 4327 __output_copy(&handle, mmap_event->file_name,
0a4a9391 4328 mmap_event->file_size);
c980d109
ACM
4329
4330 perf_event__output_id_sample(event, &handle, &sample);
4331
78d613eb 4332 perf_output_end(&handle);
c980d109
ACM
4333out:
4334 mmap_event->event_id.header.size = size;
0a4a9391
PZ
4335}
4336
cdd6c482 4337static int perf_event_mmap_match(struct perf_event *event,
3af9e859
EM
4338 struct perf_mmap_event *mmap_event,
4339 int executable)
0a4a9391 4340{
6f93d0a7 4341 if (event->state < PERF_EVENT_STATE_INACTIVE)
22e19085
PZ
4342 return 0;
4343
5632ab12 4344 if (!event_filter_match(event))
5d27c23d
PZ
4345 return 0;
4346
3af9e859
EM
4347 if ((!executable && event->attr.mmap_data) ||
4348 (executable && event->attr.mmap))
0a4a9391
PZ
4349 return 1;
4350
4351 return 0;
4352}
4353
cdd6c482 4354static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3af9e859
EM
4355 struct perf_mmap_event *mmap_event,
4356 int executable)
0a4a9391 4357{
cdd6c482 4358 struct perf_event *event;
0a4a9391 4359
cdd6c482 4360 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3af9e859 4361 if (perf_event_mmap_match(event, mmap_event, executable))
cdd6c482 4362 perf_event_mmap_output(event, mmap_event);
0a4a9391 4363 }
0a4a9391
PZ
4364}
4365
cdd6c482 4366static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391
PZ
4367{
4368 struct perf_cpu_context *cpuctx;
cdd6c482 4369 struct perf_event_context *ctx;
089dd79d
PZ
4370 struct vm_area_struct *vma = mmap_event->vma;
4371 struct file *file = vma->vm_file;
0a4a9391
PZ
4372 unsigned int size;
4373 char tmp[16];
4374 char *buf = NULL;
089dd79d 4375 const char *name;
108b02cf 4376 struct pmu *pmu;
8dc85d54 4377 int ctxn;
0a4a9391 4378
413ee3b4
AB
4379 memset(tmp, 0, sizeof(tmp));
4380
0a4a9391 4381 if (file) {
413ee3b4 4382 /*
76369139 4383 * d_path works from the end of the rb backwards, so we
413ee3b4
AB
4384 * need to add enough zero bytes after the string to handle
4385 * the 64bit alignment we do later.
4386 */
4387 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
0a4a9391
PZ
4388 if (!buf) {
4389 name = strncpy(tmp, "//enomem", sizeof(tmp));
4390 goto got_name;
4391 }
d3d21c41 4392 name = d_path(&file->f_path, buf, PATH_MAX);
0a4a9391
PZ
4393 if (IS_ERR(name)) {
4394 name = strncpy(tmp, "//toolong", sizeof(tmp));
4395 goto got_name;
4396 }
4397 } else {
413ee3b4
AB
4398 if (arch_vma_name(mmap_event->vma)) {
4399 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4400 sizeof(tmp));
089dd79d 4401 goto got_name;
413ee3b4 4402 }
089dd79d
PZ
4403
4404 if (!vma->vm_mm) {
4405 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4406 goto got_name;
3af9e859
EM
4407 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4408 vma->vm_end >= vma->vm_mm->brk) {
4409 name = strncpy(tmp, "[heap]", sizeof(tmp));
4410 goto got_name;
4411 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4412 vma->vm_end >= vma->vm_mm->start_stack) {
4413 name = strncpy(tmp, "[stack]", sizeof(tmp));
4414 goto got_name;
089dd79d
PZ
4415 }
4416
0a4a9391
PZ
4417 name = strncpy(tmp, "//anon", sizeof(tmp));
4418 goto got_name;
4419 }
4420
4421got_name:
888fcee0 4422 size = ALIGN(strlen(name)+1, sizeof(u64));
0a4a9391
PZ
4423
4424 mmap_event->file_name = name;
4425 mmap_event->file_size = size;
4426
cdd6c482 4427 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 4428
f6d9dd23 4429 rcu_read_lock();
108b02cf 4430 list_for_each_entry_rcu(pmu, &pmus, entry) {
41945f6c 4431 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
51676957
PZ
4432 if (cpuctx->active_pmu != pmu)
4433 goto next;
108b02cf
PZ
4434 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4435 vma->vm_flags & VM_EXEC);
8dc85d54
PZ
4436
4437 ctxn = pmu->task_ctx_nr;
4438 if (ctxn < 0)
41945f6c 4439 goto next;
8dc85d54
PZ
4440
4441 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4442 if (ctx) {
4443 perf_event_mmap_ctx(ctx, mmap_event,
4444 vma->vm_flags & VM_EXEC);
4445 }
41945f6c
PZ
4446next:
4447 put_cpu_ptr(pmu->pmu_cpu_context);
108b02cf 4448 }
665c2142
PZ
4449 rcu_read_unlock();
4450
0a4a9391
PZ
4451 kfree(buf);
4452}
4453
3af9e859 4454void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 4455{
9ee318a7
PZ
4456 struct perf_mmap_event mmap_event;
4457
cdd6c482 4458 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
4459 return;
4460
4461 mmap_event = (struct perf_mmap_event){
089dd79d 4462 .vma = vma,
573402db
PZ
4463 /* .file_name */
4464 /* .file_size */
cdd6c482 4465 .event_id = {
573402db 4466 .header = {
cdd6c482 4467 .type = PERF_RECORD_MMAP,
39447b38 4468 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
4469 /* .size */
4470 },
4471 /* .pid */
4472 /* .tid */
089dd79d
PZ
4473 .start = vma->vm_start,
4474 .len = vma->vm_end - vma->vm_start,
3a0304e9 4475 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391
PZ
4476 },
4477 };
4478
cdd6c482 4479 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
4480}
4481
a78ac325
PZ
4482/*
4483 * IRQ throttle logging
4484 */
4485
cdd6c482 4486static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
4487{
4488 struct perf_output_handle handle;
c980d109 4489 struct perf_sample_data sample;
a78ac325
PZ
4490 int ret;
4491
4492 struct {
4493 struct perf_event_header header;
4494 u64 time;
cca3f454 4495 u64 id;
7f453c24 4496 u64 stream_id;
a78ac325
PZ
4497 } throttle_event = {
4498 .header = {
cdd6c482 4499 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
4500 .misc = 0,
4501 .size = sizeof(throttle_event),
4502 },
def0a9b2 4503 .time = perf_clock(),
cdd6c482
IM
4504 .id = primary_event_id(event),
4505 .stream_id = event->id,
a78ac325
PZ
4506 };
4507
966ee4d6 4508 if (enable)
cdd6c482 4509 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 4510
c980d109
ACM
4511 perf_event_header__init_id(&throttle_event.header, &sample, event);
4512
4513 ret = perf_output_begin(&handle, event,
a7ac67ea 4514 throttle_event.header.size);
a78ac325
PZ
4515 if (ret)
4516 return;
4517
4518 perf_output_put(&handle, throttle_event);
c980d109 4519 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
4520 perf_output_end(&handle);
4521}
4522
f6c7d5fe 4523/*
cdd6c482 4524 * Generic event overflow handling, sampling.
f6c7d5fe
PZ
4525 */
4526
a8b0ca17 4527static int __perf_event_overflow(struct perf_event *event,
5622f295
MM
4528 int throttle, struct perf_sample_data *data,
4529 struct pt_regs *regs)
f6c7d5fe 4530{
cdd6c482
IM
4531 int events = atomic_read(&event->event_limit);
4532 struct hw_perf_event *hwc = &event->hw;
e050e3f0 4533 u64 seq;
79f14641
PZ
4534 int ret = 0;
4535
96398826
PZ
4536 /*
4537 * Non-sampling counters might still use the PMI to fold short
4538 * hardware counters, ignore those.
4539 */
4540 if (unlikely(!is_sampling_event(event)))
4541 return 0;
4542
e050e3f0
SE
4543 seq = __this_cpu_read(perf_throttled_seq);
4544 if (seq != hwc->interrupts_seq) {
4545 hwc->interrupts_seq = seq;
4546 hwc->interrupts = 1;
4547 } else {
4548 hwc->interrupts++;
4549 if (unlikely(throttle
4550 && hwc->interrupts >= max_samples_per_tick)) {
4551 __this_cpu_inc(perf_throttled_count);
163ec435
PZ
4552 hwc->interrupts = MAX_INTERRUPTS;
4553 perf_log_throttle(event, 0);
a78ac325
PZ
4554 ret = 1;
4555 }
e050e3f0 4556 }
60db5e09 4557
cdd6c482 4558 if (event->attr.freq) {
def0a9b2 4559 u64 now = perf_clock();
abd50713 4560 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 4561
abd50713 4562 hwc->freq_time_stamp = now;
bd2b5b12 4563
abd50713
PZ
4564 if (delta > 0 && delta < 2*TICK_NSEC)
4565 perf_adjust_period(event, delta, hwc->last_period);
bd2b5b12
PZ
4566 }
4567
2023b359
PZ
4568 /*
4569 * XXX event_limit might not quite work as expected on inherited
cdd6c482 4570 * events
2023b359
PZ
4571 */
4572
cdd6c482
IM
4573 event->pending_kill = POLL_IN;
4574 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 4575 ret = 1;
cdd6c482 4576 event->pending_kill = POLL_HUP;
a8b0ca17
PZ
4577 event->pending_disable = 1;
4578 irq_work_queue(&event->pending);
79f14641
PZ
4579 }
4580
453f19ee 4581 if (event->overflow_handler)
a8b0ca17 4582 event->overflow_handler(event, data, regs);
453f19ee 4583 else
a8b0ca17 4584 perf_event_output(event, data, regs);
453f19ee 4585
f506b3dc 4586 if (event->fasync && event->pending_kill) {
a8b0ca17
PZ
4587 event->pending_wakeup = 1;
4588 irq_work_queue(&event->pending);
f506b3dc
PZ
4589 }
4590
79f14641 4591 return ret;
f6c7d5fe
PZ
4592}
4593
a8b0ca17 4594int perf_event_overflow(struct perf_event *event,
5622f295
MM
4595 struct perf_sample_data *data,
4596 struct pt_regs *regs)
850bc73f 4597{
a8b0ca17 4598 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
4599}
4600
15dbf27c 4601/*
cdd6c482 4602 * Generic software event infrastructure
15dbf27c
PZ
4603 */
4604
b28ab83c
PZ
4605struct swevent_htable {
4606 struct swevent_hlist *swevent_hlist;
4607 struct mutex hlist_mutex;
4608 int hlist_refcount;
4609
4610 /* Recursion avoidance in each contexts */
4611 int recursion[PERF_NR_CONTEXTS];
4612};
4613
4614static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4615
7b4b6658 4616/*
cdd6c482
IM
4617 * We directly increment event->count and keep a second value in
4618 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
4619 * is kept in the range [-sample_period, 0] so that we can use the
4620 * sign as trigger.
4621 */
4622
cdd6c482 4623static u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 4624{
cdd6c482 4625 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
4626 u64 period = hwc->last_period;
4627 u64 nr, offset;
4628 s64 old, val;
4629
4630 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
4631
4632again:
e7850595 4633 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
4634 if (val < 0)
4635 return 0;
15dbf27c 4636
7b4b6658
PZ
4637 nr = div64_u64(period + val, period);
4638 offset = nr * period;
4639 val -= offset;
e7850595 4640 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 4641 goto again;
15dbf27c 4642
7b4b6658 4643 return nr;
15dbf27c
PZ
4644}
4645
0cff784a 4646static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 4647 struct perf_sample_data *data,
5622f295 4648 struct pt_regs *regs)
15dbf27c 4649{
cdd6c482 4650 struct hw_perf_event *hwc = &event->hw;
850bc73f 4651 int throttle = 0;
15dbf27c 4652
0cff784a
PZ
4653 if (!overflow)
4654 overflow = perf_swevent_set_period(event);
15dbf27c 4655
7b4b6658
PZ
4656 if (hwc->interrupts == MAX_INTERRUPTS)
4657 return;
15dbf27c 4658
7b4b6658 4659 for (; overflow; overflow--) {
a8b0ca17 4660 if (__perf_event_overflow(event, throttle,
5622f295 4661 data, regs)) {
7b4b6658
PZ
4662 /*
4663 * We inhibit the overflow from happening when
4664 * hwc->interrupts == MAX_INTERRUPTS.
4665 */
4666 break;
4667 }
cf450a73 4668 throttle = 1;
7b4b6658 4669 }
15dbf27c
PZ
4670}
4671
a4eaf7f1 4672static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 4673 struct perf_sample_data *data,
5622f295 4674 struct pt_regs *regs)
7b4b6658 4675{
cdd6c482 4676 struct hw_perf_event *hwc = &event->hw;
d6d020e9 4677
e7850595 4678 local64_add(nr, &event->count);
d6d020e9 4679
0cff784a
PZ
4680 if (!regs)
4681 return;
4682
6c7e550f 4683 if (!is_sampling_event(event))
7b4b6658 4684 return;
d6d020e9 4685
5d81e5cf
AV
4686 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
4687 data->period = nr;
4688 return perf_swevent_overflow(event, 1, data, regs);
4689 } else
4690 data->period = event->hw.last_period;
4691
0cff784a 4692 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 4693 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 4694
e7850595 4695 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 4696 return;
df1a132b 4697
a8b0ca17 4698 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
4699}
4700
f5ffe02e
FW
4701static int perf_exclude_event(struct perf_event *event,
4702 struct pt_regs *regs)
4703{
a4eaf7f1 4704 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 4705 return 1;
a4eaf7f1 4706
f5ffe02e
FW
4707 if (regs) {
4708 if (event->attr.exclude_user && user_mode(regs))
4709 return 1;
4710
4711 if (event->attr.exclude_kernel && !user_mode(regs))
4712 return 1;
4713 }
4714
4715 return 0;
4716}
4717
cdd6c482 4718static int perf_swevent_match(struct perf_event *event,
1c432d89 4719 enum perf_type_id type,
6fb2915d
LZ
4720 u32 event_id,
4721 struct perf_sample_data *data,
4722 struct pt_regs *regs)
15dbf27c 4723{
cdd6c482 4724 if (event->attr.type != type)
a21ca2ca 4725 return 0;
f5ffe02e 4726
cdd6c482 4727 if (event->attr.config != event_id)
15dbf27c
PZ
4728 return 0;
4729
f5ffe02e
FW
4730 if (perf_exclude_event(event, regs))
4731 return 0;
15dbf27c
PZ
4732
4733 return 1;
4734}
4735
76e1d904
FW
4736static inline u64 swevent_hash(u64 type, u32 event_id)
4737{
4738 u64 val = event_id | (type << 32);
4739
4740 return hash_64(val, SWEVENT_HLIST_BITS);
4741}
4742
49f135ed
FW
4743static inline struct hlist_head *
4744__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 4745{
49f135ed
FW
4746 u64 hash = swevent_hash(type, event_id);
4747
4748 return &hlist->heads[hash];
4749}
76e1d904 4750
49f135ed
FW
4751/* For the read side: events when they trigger */
4752static inline struct hlist_head *
b28ab83c 4753find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
4754{
4755 struct swevent_hlist *hlist;
76e1d904 4756
b28ab83c 4757 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
4758 if (!hlist)
4759 return NULL;
4760
49f135ed
FW
4761 return __find_swevent_head(hlist, type, event_id);
4762}
4763
4764/* For the event head insertion and removal in the hlist */
4765static inline struct hlist_head *
b28ab83c 4766find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
4767{
4768 struct swevent_hlist *hlist;
4769 u32 event_id = event->attr.config;
4770 u64 type = event->attr.type;
4771
4772 /*
4773 * Event scheduling is always serialized against hlist allocation
4774 * and release. Which makes the protected version suitable here.
4775 * The context lock guarantees that.
4776 */
b28ab83c 4777 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
4778 lockdep_is_held(&event->ctx->lock));
4779 if (!hlist)
4780 return NULL;
4781
4782 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
4783}
4784
4785static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 4786 u64 nr,
76e1d904
FW
4787 struct perf_sample_data *data,
4788 struct pt_regs *regs)
15dbf27c 4789{
b28ab83c 4790 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482 4791 struct perf_event *event;
76e1d904
FW
4792 struct hlist_node *node;
4793 struct hlist_head *head;
15dbf27c 4794
76e1d904 4795 rcu_read_lock();
b28ab83c 4796 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
4797 if (!head)
4798 goto end;
4799
4800 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
6fb2915d 4801 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 4802 perf_swevent_event(event, nr, data, regs);
15dbf27c 4803 }
76e1d904
FW
4804end:
4805 rcu_read_unlock();
15dbf27c
PZ
4806}
4807
4ed7c92d 4808int perf_swevent_get_recursion_context(void)
96f6d444 4809{
b28ab83c 4810 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
96f6d444 4811
b28ab83c 4812 return get_recursion_context(swhash->recursion);
96f6d444 4813}
645e8cc0 4814EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 4815
fa9f90be 4816inline void perf_swevent_put_recursion_context(int rctx)
15dbf27c 4817{
b28ab83c 4818 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
927c7a9e 4819
b28ab83c 4820 put_recursion_context(swhash->recursion, rctx);
ce71b9df 4821}
15dbf27c 4822
a8b0ca17 4823void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 4824{
a4234bfc 4825 struct perf_sample_data data;
4ed7c92d
PZ
4826 int rctx;
4827
1c024eca 4828 preempt_disable_notrace();
4ed7c92d
PZ
4829 rctx = perf_swevent_get_recursion_context();
4830 if (rctx < 0)
4831 return;
a4234bfc 4832
dc1d628a 4833 perf_sample_data_init(&data, addr);
92bf309a 4834
a8b0ca17 4835 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4ed7c92d
PZ
4836
4837 perf_swevent_put_recursion_context(rctx);
1c024eca 4838 preempt_enable_notrace();
b8e83514
PZ
4839}
4840
cdd6c482 4841static void perf_swevent_read(struct perf_event *event)
15dbf27c 4842{
15dbf27c
PZ
4843}
4844
a4eaf7f1 4845static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 4846{
b28ab83c 4847 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482 4848 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
4849 struct hlist_head *head;
4850
6c7e550f 4851 if (is_sampling_event(event)) {
7b4b6658 4852 hwc->last_period = hwc->sample_period;
cdd6c482 4853 perf_swevent_set_period(event);
7b4b6658 4854 }
76e1d904 4855
a4eaf7f1
PZ
4856 hwc->state = !(flags & PERF_EF_START);
4857
b28ab83c 4858 head = find_swevent_head(swhash, event);
76e1d904
FW
4859 if (WARN_ON_ONCE(!head))
4860 return -EINVAL;
4861
4862 hlist_add_head_rcu(&event->hlist_entry, head);
4863
15dbf27c
PZ
4864 return 0;
4865}
4866
a4eaf7f1 4867static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 4868{
76e1d904 4869 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
4870}
4871
a4eaf7f1 4872static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 4873{
a4eaf7f1 4874 event->hw.state = 0;
d6d020e9 4875}
aa9c4c0f 4876
a4eaf7f1 4877static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 4878{
a4eaf7f1 4879 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
4880}
4881
49f135ed
FW
4882/* Deref the hlist from the update side */
4883static inline struct swevent_hlist *
b28ab83c 4884swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 4885{
b28ab83c
PZ
4886 return rcu_dereference_protected(swhash->swevent_hlist,
4887 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
4888}
4889
b28ab83c 4890static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 4891{
b28ab83c 4892 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 4893
49f135ed 4894 if (!hlist)
76e1d904
FW
4895 return;
4896
b28ab83c 4897 rcu_assign_pointer(swhash->swevent_hlist, NULL);
fa4bbc4c 4898 kfree_rcu(hlist, rcu_head);
76e1d904
FW
4899}
4900
4901static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4902{
b28ab83c 4903 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 4904
b28ab83c 4905 mutex_lock(&swhash->hlist_mutex);
76e1d904 4906
b28ab83c
PZ
4907 if (!--swhash->hlist_refcount)
4908 swevent_hlist_release(swhash);
76e1d904 4909
b28ab83c 4910 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
4911}
4912
4913static void swevent_hlist_put(struct perf_event *event)
4914{
4915 int cpu;
4916
4917 if (event->cpu != -1) {
4918 swevent_hlist_put_cpu(event, event->cpu);
4919 return;
4920 }
4921
4922 for_each_possible_cpu(cpu)
4923 swevent_hlist_put_cpu(event, cpu);
4924}
4925
4926static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4927{
b28ab83c 4928 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
4929 int err = 0;
4930
b28ab83c 4931 mutex_lock(&swhash->hlist_mutex);
76e1d904 4932
b28ab83c 4933 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d904
FW
4934 struct swevent_hlist *hlist;
4935
4936 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4937 if (!hlist) {
4938 err = -ENOMEM;
4939 goto exit;
4940 }
b28ab83c 4941 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 4942 }
b28ab83c 4943 swhash->hlist_refcount++;
9ed6060d 4944exit:
b28ab83c 4945 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
4946
4947 return err;
4948}
4949
4950static int swevent_hlist_get(struct perf_event *event)
4951{
4952 int err;
4953 int cpu, failed_cpu;
4954
4955 if (event->cpu != -1)
4956 return swevent_hlist_get_cpu(event, event->cpu);
4957
4958 get_online_cpus();
4959 for_each_possible_cpu(cpu) {
4960 err = swevent_hlist_get_cpu(event, cpu);
4961 if (err) {
4962 failed_cpu = cpu;
4963 goto fail;
4964 }
4965 }
4966 put_online_cpus();
4967
4968 return 0;
9ed6060d 4969fail:
76e1d904
FW
4970 for_each_possible_cpu(cpu) {
4971 if (cpu == failed_cpu)
4972 break;
4973 swevent_hlist_put_cpu(event, cpu);
4974 }
4975
4976 put_online_cpus();
4977 return err;
4978}
4979
d430d3d7 4980struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 4981
b0a873eb
PZ
4982static void sw_perf_event_destroy(struct perf_event *event)
4983{
4984 u64 event_id = event->attr.config;
95476b64 4985
b0a873eb
PZ
4986 WARN_ON(event->parent);
4987
7e54a5a0 4988 jump_label_dec(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
4989 swevent_hlist_put(event);
4990}
4991
4992static int perf_swevent_init(struct perf_event *event)
4993{
4994 int event_id = event->attr.config;
4995
4996 if (event->attr.type != PERF_TYPE_SOFTWARE)
4997 return -ENOENT;
4998
4999 switch (event_id) {
5000 case PERF_COUNT_SW_CPU_CLOCK:
5001 case PERF_COUNT_SW_TASK_CLOCK:
5002 return -ENOENT;
5003
5004 default:
5005 break;
5006 }
5007
ce677831 5008 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
5009 return -ENOENT;
5010
5011 if (!event->parent) {
5012 int err;
5013
5014 err = swevent_hlist_get(event);
5015 if (err)
5016 return err;
5017
7e54a5a0 5018 jump_label_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
5019 event->destroy = sw_perf_event_destroy;
5020 }
5021
5022 return 0;
5023}
5024
5025static struct pmu perf_swevent = {
89a1e187 5026 .task_ctx_nr = perf_sw_context,
95476b64 5027
b0a873eb 5028 .event_init = perf_swevent_init,
a4eaf7f1
PZ
5029 .add = perf_swevent_add,
5030 .del = perf_swevent_del,
5031 .start = perf_swevent_start,
5032 .stop = perf_swevent_stop,
1c024eca 5033 .read = perf_swevent_read,
1c024eca
PZ
5034};
5035
b0a873eb
PZ
5036#ifdef CONFIG_EVENT_TRACING
5037
1c024eca
PZ
5038static int perf_tp_filter_match(struct perf_event *event,
5039 struct perf_sample_data *data)
5040{
5041 void *record = data->raw->data;
5042
5043 if (likely(!event->filter) || filter_match_preds(event->filter, record))
5044 return 1;
5045 return 0;
5046}
5047
5048static int perf_tp_event_match(struct perf_event *event,
5049 struct perf_sample_data *data,
5050 struct pt_regs *regs)
5051{
a0f7d0f7
FW
5052 if (event->hw.state & PERF_HES_STOPPED)
5053 return 0;
580d607c
PZ
5054 /*
5055 * All tracepoints are from kernel-space.
5056 */
5057 if (event->attr.exclude_kernel)
1c024eca
PZ
5058 return 0;
5059
5060 if (!perf_tp_filter_match(event, data))
5061 return 0;
5062
5063 return 1;
5064}
5065
5066void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
ecc55f84 5067 struct pt_regs *regs, struct hlist_head *head, int rctx)
95476b64
FW
5068{
5069 struct perf_sample_data data;
1c024eca
PZ
5070 struct perf_event *event;
5071 struct hlist_node *node;
5072
95476b64
FW
5073 struct perf_raw_record raw = {
5074 .size = entry_size,
5075 .data = record,
5076 };
5077
5078 perf_sample_data_init(&data, addr);
5079 data.raw = &raw;
5080
1c024eca
PZ
5081 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
5082 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 5083 perf_swevent_event(event, count, &data, regs);
4f41c013 5084 }
ecc55f84
PZ
5085
5086 perf_swevent_put_recursion_context(rctx);
95476b64
FW
5087}
5088EXPORT_SYMBOL_GPL(perf_tp_event);
5089
cdd6c482 5090static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 5091{
1c024eca 5092 perf_trace_destroy(event);
e077df4f
PZ
5093}
5094
b0a873eb 5095static int perf_tp_event_init(struct perf_event *event)
e077df4f 5096{
76e1d904
FW
5097 int err;
5098
b0a873eb
PZ
5099 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5100 return -ENOENT;
5101
1c024eca
PZ
5102 err = perf_trace_init(event);
5103 if (err)
b0a873eb 5104 return err;
e077df4f 5105
cdd6c482 5106 event->destroy = tp_perf_event_destroy;
e077df4f 5107
b0a873eb
PZ
5108 return 0;
5109}
5110
5111static struct pmu perf_tracepoint = {
89a1e187
PZ
5112 .task_ctx_nr = perf_sw_context,
5113
b0a873eb 5114 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
5115 .add = perf_trace_add,
5116 .del = perf_trace_del,
5117 .start = perf_swevent_start,
5118 .stop = perf_swevent_stop,
b0a873eb 5119 .read = perf_swevent_read,
b0a873eb
PZ
5120};
5121
5122static inline void perf_tp_register(void)
5123{
2e80a82a 5124 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f 5125}
6fb2915d
LZ
5126
5127static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5128{
5129 char *filter_str;
5130 int ret;
5131
5132 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5133 return -EINVAL;
5134
5135 filter_str = strndup_user(arg, PAGE_SIZE);
5136 if (IS_ERR(filter_str))
5137 return PTR_ERR(filter_str);
5138
5139 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5140
5141 kfree(filter_str);
5142 return ret;
5143}
5144
5145static void perf_event_free_filter(struct perf_event *event)
5146{
5147 ftrace_profile_free_filter(event);
5148}
5149
e077df4f 5150#else
6fb2915d 5151
b0a873eb 5152static inline void perf_tp_register(void)
e077df4f 5153{
e077df4f 5154}
6fb2915d
LZ
5155
5156static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5157{
5158 return -ENOENT;
5159}
5160
5161static void perf_event_free_filter(struct perf_event *event)
5162{
5163}
5164
07b139c8 5165#endif /* CONFIG_EVENT_TRACING */
e077df4f 5166
24f1e32c 5167#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 5168void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 5169{
f5ffe02e
FW
5170 struct perf_sample_data sample;
5171 struct pt_regs *regs = data;
5172
dc1d628a 5173 perf_sample_data_init(&sample, bp->attr.bp_addr);
f5ffe02e 5174
a4eaf7f1 5175 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 5176 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
5177}
5178#endif
5179
b0a873eb
PZ
5180/*
5181 * hrtimer based swevent callback
5182 */
f29ac756 5183
b0a873eb 5184static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 5185{
b0a873eb
PZ
5186 enum hrtimer_restart ret = HRTIMER_RESTART;
5187 struct perf_sample_data data;
5188 struct pt_regs *regs;
5189 struct perf_event *event;
5190 u64 period;
f29ac756 5191
b0a873eb 5192 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
5193
5194 if (event->state != PERF_EVENT_STATE_ACTIVE)
5195 return HRTIMER_NORESTART;
5196
b0a873eb 5197 event->pmu->read(event);
f344011c 5198
b0a873eb
PZ
5199 perf_sample_data_init(&data, 0);
5200 data.period = event->hw.last_period;
5201 regs = get_irq_regs();
5202
5203 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 5204 if (!(event->attr.exclude_idle && is_idle_task(current)))
a8b0ca17 5205 if (perf_event_overflow(event, &data, regs))
b0a873eb
PZ
5206 ret = HRTIMER_NORESTART;
5207 }
24f1e32c 5208
b0a873eb
PZ
5209 period = max_t(u64, 10000, event->hw.sample_period);
5210 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 5211
b0a873eb 5212 return ret;
f29ac756
PZ
5213}
5214
b0a873eb 5215static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 5216{
b0a873eb 5217 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
5218 s64 period;
5219
5220 if (!is_sampling_event(event))
5221 return;
f5ffe02e 5222
5d508e82
FBH
5223 period = local64_read(&hwc->period_left);
5224 if (period) {
5225 if (period < 0)
5226 period = 10000;
fa407f35 5227
5d508e82
FBH
5228 local64_set(&hwc->period_left, 0);
5229 } else {
5230 period = max_t(u64, 10000, hwc->sample_period);
5231 }
5232 __hrtimer_start_range_ns(&hwc->hrtimer,
b0a873eb 5233 ns_to_ktime(period), 0,
b5ab4cd5 5234 HRTIMER_MODE_REL_PINNED, 0);
24f1e32c 5235}
b0a873eb
PZ
5236
5237static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 5238{
b0a873eb
PZ
5239 struct hw_perf_event *hwc = &event->hw;
5240
6c7e550f 5241 if (is_sampling_event(event)) {
b0a873eb 5242 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 5243 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
5244
5245 hrtimer_cancel(&hwc->hrtimer);
5246 }
24f1e32c
FW
5247}
5248
ba3dd36c
PZ
5249static void perf_swevent_init_hrtimer(struct perf_event *event)
5250{
5251 struct hw_perf_event *hwc = &event->hw;
5252
5253 if (!is_sampling_event(event))
5254 return;
5255
5256 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5257 hwc->hrtimer.function = perf_swevent_hrtimer;
5258
5259 /*
5260 * Since hrtimers have a fixed rate, we can do a static freq->period
5261 * mapping and avoid the whole period adjust feedback stuff.
5262 */
5263 if (event->attr.freq) {
5264 long freq = event->attr.sample_freq;
5265
5266 event->attr.sample_period = NSEC_PER_SEC / freq;
5267 hwc->sample_period = event->attr.sample_period;
5268 local64_set(&hwc->period_left, hwc->sample_period);
5269 event->attr.freq = 0;
5270 }
5271}
5272
b0a873eb
PZ
5273/*
5274 * Software event: cpu wall time clock
5275 */
5276
5277static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 5278{
b0a873eb
PZ
5279 s64 prev;
5280 u64 now;
5281
a4eaf7f1 5282 now = local_clock();
b0a873eb
PZ
5283 prev = local64_xchg(&event->hw.prev_count, now);
5284 local64_add(now - prev, &event->count);
24f1e32c 5285}
24f1e32c 5286
a4eaf7f1 5287static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 5288{
a4eaf7f1 5289 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 5290 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
5291}
5292
a4eaf7f1 5293static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 5294{
b0a873eb
PZ
5295 perf_swevent_cancel_hrtimer(event);
5296 cpu_clock_event_update(event);
5297}
f29ac756 5298
a4eaf7f1
PZ
5299static int cpu_clock_event_add(struct perf_event *event, int flags)
5300{
5301 if (flags & PERF_EF_START)
5302 cpu_clock_event_start(event, flags);
5303
5304 return 0;
5305}
5306
5307static void cpu_clock_event_del(struct perf_event *event, int flags)
5308{
5309 cpu_clock_event_stop(event, flags);
5310}
5311
b0a873eb
PZ
5312static void cpu_clock_event_read(struct perf_event *event)
5313{
5314 cpu_clock_event_update(event);
5315}
f344011c 5316
b0a873eb
PZ
5317static int cpu_clock_event_init(struct perf_event *event)
5318{
5319 if (event->attr.type != PERF_TYPE_SOFTWARE)
5320 return -ENOENT;
5321
5322 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5323 return -ENOENT;
5324
ba3dd36c
PZ
5325 perf_swevent_init_hrtimer(event);
5326
b0a873eb 5327 return 0;
f29ac756
PZ
5328}
5329
b0a873eb 5330static struct pmu perf_cpu_clock = {
89a1e187
PZ
5331 .task_ctx_nr = perf_sw_context,
5332
b0a873eb 5333 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
5334 .add = cpu_clock_event_add,
5335 .del = cpu_clock_event_del,
5336 .start = cpu_clock_event_start,
5337 .stop = cpu_clock_event_stop,
b0a873eb
PZ
5338 .read = cpu_clock_event_read,
5339};
5340
5341/*
5342 * Software event: task time clock
5343 */
5344
5345static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 5346{
b0a873eb
PZ
5347 u64 prev;
5348 s64 delta;
5c92d124 5349
b0a873eb
PZ
5350 prev = local64_xchg(&event->hw.prev_count, now);
5351 delta = now - prev;
5352 local64_add(delta, &event->count);
5353}
5c92d124 5354
a4eaf7f1 5355static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 5356{
a4eaf7f1 5357 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 5358 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
5359}
5360
a4eaf7f1 5361static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
5362{
5363 perf_swevent_cancel_hrtimer(event);
5364 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
5365}
5366
5367static int task_clock_event_add(struct perf_event *event, int flags)
5368{
5369 if (flags & PERF_EF_START)
5370 task_clock_event_start(event, flags);
b0a873eb 5371
a4eaf7f1
PZ
5372 return 0;
5373}
5374
5375static void task_clock_event_del(struct perf_event *event, int flags)
5376{
5377 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
5378}
5379
5380static void task_clock_event_read(struct perf_event *event)
5381{
768a06e2
PZ
5382 u64 now = perf_clock();
5383 u64 delta = now - event->ctx->timestamp;
5384 u64 time = event->ctx->time + delta;
b0a873eb
PZ
5385
5386 task_clock_event_update(event, time);
5387}
5388
5389static int task_clock_event_init(struct perf_event *event)
6fb2915d 5390{
b0a873eb
PZ
5391 if (event->attr.type != PERF_TYPE_SOFTWARE)
5392 return -ENOENT;
5393
5394 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5395 return -ENOENT;
5396
ba3dd36c
PZ
5397 perf_swevent_init_hrtimer(event);
5398
b0a873eb 5399 return 0;
6fb2915d
LZ
5400}
5401
b0a873eb 5402static struct pmu perf_task_clock = {
89a1e187
PZ
5403 .task_ctx_nr = perf_sw_context,
5404
b0a873eb 5405 .event_init = task_clock_event_init,
a4eaf7f1
PZ
5406 .add = task_clock_event_add,
5407 .del = task_clock_event_del,
5408 .start = task_clock_event_start,
5409 .stop = task_clock_event_stop,
b0a873eb
PZ
5410 .read = task_clock_event_read,
5411};
6fb2915d 5412
ad5133b7 5413static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 5414{
e077df4f 5415}
6fb2915d 5416
ad5133b7 5417static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 5418{
ad5133b7 5419 return 0;
6fb2915d
LZ
5420}
5421
ad5133b7 5422static void perf_pmu_start_txn(struct pmu *pmu)
6fb2915d 5423{
ad5133b7 5424 perf_pmu_disable(pmu);
6fb2915d
LZ
5425}
5426
ad5133b7
PZ
5427static int perf_pmu_commit_txn(struct pmu *pmu)
5428{
5429 perf_pmu_enable(pmu);
5430 return 0;
5431}
e077df4f 5432
ad5133b7 5433static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 5434{
ad5133b7 5435 perf_pmu_enable(pmu);
24f1e32c
FW
5436}
5437
8dc85d54
PZ
5438/*
5439 * Ensures all contexts with the same task_ctx_nr have the same
5440 * pmu_cpu_context too.
5441 */
5442static void *find_pmu_context(int ctxn)
24f1e32c 5443{
8dc85d54 5444 struct pmu *pmu;
b326e956 5445
8dc85d54
PZ
5446 if (ctxn < 0)
5447 return NULL;
24f1e32c 5448
8dc85d54
PZ
5449 list_for_each_entry(pmu, &pmus, entry) {
5450 if (pmu->task_ctx_nr == ctxn)
5451 return pmu->pmu_cpu_context;
5452 }
24f1e32c 5453
8dc85d54 5454 return NULL;
24f1e32c
FW
5455}
5456
51676957 5457static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c 5458{
51676957
PZ
5459 int cpu;
5460
5461 for_each_possible_cpu(cpu) {
5462 struct perf_cpu_context *cpuctx;
5463
5464 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5465
5466 if (cpuctx->active_pmu == old_pmu)
5467 cpuctx->active_pmu = pmu;
5468 }
5469}
5470
5471static void free_pmu_context(struct pmu *pmu)
5472{
5473 struct pmu *i;
f5ffe02e 5474
8dc85d54 5475 mutex_lock(&pmus_lock);
0475f9ea 5476 /*
8dc85d54 5477 * Like a real lame refcount.
0475f9ea 5478 */
51676957
PZ
5479 list_for_each_entry(i, &pmus, entry) {
5480 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5481 update_pmu_context(i, pmu);
8dc85d54 5482 goto out;
51676957 5483 }
8dc85d54 5484 }
d6d020e9 5485
51676957 5486 free_percpu(pmu->pmu_cpu_context);
8dc85d54
PZ
5487out:
5488 mutex_unlock(&pmus_lock);
24f1e32c 5489}
2e80a82a 5490static struct idr pmu_idr;
d6d020e9 5491
abe43400
PZ
5492static ssize_t
5493type_show(struct device *dev, struct device_attribute *attr, char *page)
5494{
5495 struct pmu *pmu = dev_get_drvdata(dev);
5496
5497 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5498}
5499
5500static struct device_attribute pmu_dev_attrs[] = {
5501 __ATTR_RO(type),
5502 __ATTR_NULL,
5503};
5504
5505static int pmu_bus_running;
5506static struct bus_type pmu_bus = {
5507 .name = "event_source",
5508 .dev_attrs = pmu_dev_attrs,
5509};
5510
5511static void pmu_dev_release(struct device *dev)
5512{
5513 kfree(dev);
5514}
5515
5516static int pmu_dev_alloc(struct pmu *pmu)
5517{
5518 int ret = -ENOMEM;
5519
5520 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5521 if (!pmu->dev)
5522 goto out;
5523
5524 device_initialize(pmu->dev);
5525 ret = dev_set_name(pmu->dev, "%s", pmu->name);
5526 if (ret)
5527 goto free_dev;
5528
5529 dev_set_drvdata(pmu->dev, pmu);
5530 pmu->dev->bus = &pmu_bus;
5531 pmu->dev->release = pmu_dev_release;
5532 ret = device_add(pmu->dev);
5533 if (ret)
5534 goto free_dev;
5535
5536out:
5537 return ret;
5538
5539free_dev:
5540 put_device(pmu->dev);
5541 goto out;
5542}
5543
547e9fd7 5544static struct lock_class_key cpuctx_mutex;
facc4307 5545static struct lock_class_key cpuctx_lock;
547e9fd7 5546
2e80a82a 5547int perf_pmu_register(struct pmu *pmu, char *name, int type)
24f1e32c 5548{
108b02cf 5549 int cpu, ret;
24f1e32c 5550
b0a873eb 5551 mutex_lock(&pmus_lock);
33696fc0
PZ
5552 ret = -ENOMEM;
5553 pmu->pmu_disable_count = alloc_percpu(int);
5554 if (!pmu->pmu_disable_count)
5555 goto unlock;
f29ac756 5556
2e80a82a
PZ
5557 pmu->type = -1;
5558 if (!name)
5559 goto skip_type;
5560 pmu->name = name;
5561
5562 if (type < 0) {
5563 int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5564 if (!err)
5565 goto free_pdc;
5566
5567 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5568 if (err) {
5569 ret = err;
5570 goto free_pdc;
5571 }
5572 }
5573 pmu->type = type;
5574
abe43400
PZ
5575 if (pmu_bus_running) {
5576 ret = pmu_dev_alloc(pmu);
5577 if (ret)
5578 goto free_idr;
5579 }
5580
2e80a82a 5581skip_type:
8dc85d54
PZ
5582 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5583 if (pmu->pmu_cpu_context)
5584 goto got_cpu_context;
f29ac756 5585
108b02cf
PZ
5586 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5587 if (!pmu->pmu_cpu_context)
abe43400 5588 goto free_dev;
f344011c 5589
108b02cf
PZ
5590 for_each_possible_cpu(cpu) {
5591 struct perf_cpu_context *cpuctx;
5592
5593 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 5594 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 5595 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 5596 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
b04243ef 5597 cpuctx->ctx.type = cpu_context;
108b02cf 5598 cpuctx->ctx.pmu = pmu;
e9d2b064
PZ
5599 cpuctx->jiffies_interval = 1;
5600 INIT_LIST_HEAD(&cpuctx->rotation_list);
51676957 5601 cpuctx->active_pmu = pmu;
108b02cf 5602 }
76e1d904 5603
8dc85d54 5604got_cpu_context:
ad5133b7
PZ
5605 if (!pmu->start_txn) {
5606 if (pmu->pmu_enable) {
5607 /*
5608 * If we have pmu_enable/pmu_disable calls, install
5609 * transaction stubs that use that to try and batch
5610 * hardware accesses.
5611 */
5612 pmu->start_txn = perf_pmu_start_txn;
5613 pmu->commit_txn = perf_pmu_commit_txn;
5614 pmu->cancel_txn = perf_pmu_cancel_txn;
5615 } else {
5616 pmu->start_txn = perf_pmu_nop_void;
5617 pmu->commit_txn = perf_pmu_nop_int;
5618 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 5619 }
5c92d124 5620 }
15dbf27c 5621
ad5133b7
PZ
5622 if (!pmu->pmu_enable) {
5623 pmu->pmu_enable = perf_pmu_nop_void;
5624 pmu->pmu_disable = perf_pmu_nop_void;
5625 }
5626
b0a873eb 5627 list_add_rcu(&pmu->entry, &pmus);
33696fc0
PZ
5628 ret = 0;
5629unlock:
b0a873eb
PZ
5630 mutex_unlock(&pmus_lock);
5631
33696fc0 5632 return ret;
108b02cf 5633
abe43400
PZ
5634free_dev:
5635 device_del(pmu->dev);
5636 put_device(pmu->dev);
5637
2e80a82a
PZ
5638free_idr:
5639 if (pmu->type >= PERF_TYPE_MAX)
5640 idr_remove(&pmu_idr, pmu->type);
5641
108b02cf
PZ
5642free_pdc:
5643 free_percpu(pmu->pmu_disable_count);
5644 goto unlock;
f29ac756
PZ
5645}
5646
b0a873eb 5647void perf_pmu_unregister(struct pmu *pmu)
5c92d124 5648{
b0a873eb
PZ
5649 mutex_lock(&pmus_lock);
5650 list_del_rcu(&pmu->entry);
5651 mutex_unlock(&pmus_lock);
5c92d124 5652
0475f9ea 5653 /*
cde8e884
PZ
5654 * We dereference the pmu list under both SRCU and regular RCU, so
5655 * synchronize against both of those.
0475f9ea 5656 */
b0a873eb 5657 synchronize_srcu(&pmus_srcu);
cde8e884 5658 synchronize_rcu();
d6d020e9 5659
33696fc0 5660 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
5661 if (pmu->type >= PERF_TYPE_MAX)
5662 idr_remove(&pmu_idr, pmu->type);
abe43400
PZ
5663 device_del(pmu->dev);
5664 put_device(pmu->dev);
51676957 5665 free_pmu_context(pmu);
b0a873eb 5666}
d6d020e9 5667
b0a873eb
PZ
5668struct pmu *perf_init_event(struct perf_event *event)
5669{
5670 struct pmu *pmu = NULL;
5671 int idx;
940c5b29 5672 int ret;
b0a873eb
PZ
5673
5674 idx = srcu_read_lock(&pmus_srcu);
2e80a82a
PZ
5675
5676 rcu_read_lock();
5677 pmu = idr_find(&pmu_idr, event->attr.type);
5678 rcu_read_unlock();
940c5b29 5679 if (pmu) {
7e5b2a01 5680 event->pmu = pmu;
940c5b29
LM
5681 ret = pmu->event_init(event);
5682 if (ret)
5683 pmu = ERR_PTR(ret);
2e80a82a 5684 goto unlock;
940c5b29 5685 }
2e80a82a 5686
b0a873eb 5687 list_for_each_entry_rcu(pmu, &pmus, entry) {
7e5b2a01 5688 event->pmu = pmu;
940c5b29 5689 ret = pmu->event_init(event);
b0a873eb 5690 if (!ret)
e5f4d339 5691 goto unlock;
76e1d904 5692
b0a873eb
PZ
5693 if (ret != -ENOENT) {
5694 pmu = ERR_PTR(ret);
e5f4d339 5695 goto unlock;
f344011c 5696 }
5c92d124 5697 }
e5f4d339
PZ
5698 pmu = ERR_PTR(-ENOENT);
5699unlock:
b0a873eb 5700 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 5701
4aeb0b42 5702 return pmu;
5c92d124
IM
5703}
5704
0793a61d 5705/*
cdd6c482 5706 * Allocate and initialize a event structure
0793a61d 5707 */
cdd6c482 5708static struct perf_event *
c3f00c70 5709perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
5710 struct task_struct *task,
5711 struct perf_event *group_leader,
5712 struct perf_event *parent_event,
4dc0da86
AK
5713 perf_overflow_handler_t overflow_handler,
5714 void *context)
0793a61d 5715{
51b0fe39 5716 struct pmu *pmu;
cdd6c482
IM
5717 struct perf_event *event;
5718 struct hw_perf_event *hwc;
d5d2bc0d 5719 long err;
0793a61d 5720
66832eb4
ON
5721 if ((unsigned)cpu >= nr_cpu_ids) {
5722 if (!task || cpu != -1)
5723 return ERR_PTR(-EINVAL);
5724 }
5725
c3f00c70 5726 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 5727 if (!event)
d5d2bc0d 5728 return ERR_PTR(-ENOMEM);
0793a61d 5729
04289bb9 5730 /*
cdd6c482 5731 * Single events are their own group leaders, with an
04289bb9
IM
5732 * empty sibling list:
5733 */
5734 if (!group_leader)
cdd6c482 5735 group_leader = event;
04289bb9 5736
cdd6c482
IM
5737 mutex_init(&event->child_mutex);
5738 INIT_LIST_HEAD(&event->child_list);
fccc714b 5739
cdd6c482
IM
5740 INIT_LIST_HEAD(&event->group_entry);
5741 INIT_LIST_HEAD(&event->event_entry);
5742 INIT_LIST_HEAD(&event->sibling_list);
10c6db11
PZ
5743 INIT_LIST_HEAD(&event->rb_entry);
5744
cdd6c482 5745 init_waitqueue_head(&event->waitq);
e360adbe 5746 init_irq_work(&event->pending, perf_pending_event);
0793a61d 5747
cdd6c482 5748 mutex_init(&event->mmap_mutex);
7b732a75 5749
cdd6c482
IM
5750 event->cpu = cpu;
5751 event->attr = *attr;
5752 event->group_leader = group_leader;
5753 event->pmu = NULL;
cdd6c482 5754 event->oncpu = -1;
a96bbc16 5755
cdd6c482 5756 event->parent = parent_event;
b84fbc9f 5757
cdd6c482
IM
5758 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5759 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 5760
cdd6c482 5761 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 5762
d580ff86
PZ
5763 if (task) {
5764 event->attach_state = PERF_ATTACH_TASK;
5765#ifdef CONFIG_HAVE_HW_BREAKPOINT
5766 /*
5767 * hw_breakpoint is a bit difficult here..
5768 */
5769 if (attr->type == PERF_TYPE_BREAKPOINT)
5770 event->hw.bp_target = task;
5771#endif
5772 }
5773
4dc0da86 5774 if (!overflow_handler && parent_event) {
b326e956 5775 overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
5776 context = parent_event->overflow_handler_context;
5777 }
66832eb4 5778
b326e956 5779 event->overflow_handler = overflow_handler;
4dc0da86 5780 event->overflow_handler_context = context;
97eaf530 5781
0d48696f 5782 if (attr->disabled)
cdd6c482 5783 event->state = PERF_EVENT_STATE_OFF;
a86ed508 5784
4aeb0b42 5785 pmu = NULL;
b8e83514 5786
cdd6c482 5787 hwc = &event->hw;
bd2b5b12 5788 hwc->sample_period = attr->sample_period;
0d48696f 5789 if (attr->freq && attr->sample_freq)
bd2b5b12 5790 hwc->sample_period = 1;
eced1dfc 5791 hwc->last_period = hwc->sample_period;
bd2b5b12 5792
e7850595 5793 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 5794
2023b359 5795 /*
cdd6c482 5796 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b359 5797 */
3dab77fb 5798 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
2023b359
PZ
5799 goto done;
5800
b0a873eb 5801 pmu = perf_init_event(event);
974802ea 5802
d5d2bc0d
PM
5803done:
5804 err = 0;
4aeb0b42 5805 if (!pmu)
d5d2bc0d 5806 err = -EINVAL;
4aeb0b42
RR
5807 else if (IS_ERR(pmu))
5808 err = PTR_ERR(pmu);
5c92d124 5809
d5d2bc0d 5810 if (err) {
cdd6c482
IM
5811 if (event->ns)
5812 put_pid_ns(event->ns);
5813 kfree(event);
d5d2bc0d 5814 return ERR_PTR(err);
621a01ea 5815 }
d5d2bc0d 5816
cdd6c482 5817 if (!event->parent) {
82cd6def 5818 if (event->attach_state & PERF_ATTACH_TASK)
b2029520 5819 jump_label_inc(&perf_sched_events.key);
3af9e859 5820 if (event->attr.mmap || event->attr.mmap_data)
cdd6c482
IM
5821 atomic_inc(&nr_mmap_events);
5822 if (event->attr.comm)
5823 atomic_inc(&nr_comm_events);
5824 if (event->attr.task)
5825 atomic_inc(&nr_task_events);
927c7a9e
FW
5826 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5827 err = get_callchain_buffers();
5828 if (err) {
5829 free_event(event);
5830 return ERR_PTR(err);
5831 }
5832 }
f344011c 5833 }
9ee318a7 5834
cdd6c482 5835 return event;
0793a61d
TG
5836}
5837
cdd6c482
IM
5838static int perf_copy_attr(struct perf_event_attr __user *uattr,
5839 struct perf_event_attr *attr)
974802ea 5840{
974802ea 5841 u32 size;
cdf8073d 5842 int ret;
974802ea
PZ
5843
5844 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5845 return -EFAULT;
5846
5847 /*
5848 * zero the full structure, so that a short copy will be nice.
5849 */
5850 memset(attr, 0, sizeof(*attr));
5851
5852 ret = get_user(size, &uattr->size);
5853 if (ret)
5854 return ret;
5855
5856 if (size > PAGE_SIZE) /* silly large */
5857 goto err_size;
5858
5859 if (!size) /* abi compat */
5860 size = PERF_ATTR_SIZE_VER0;
5861
5862 if (size < PERF_ATTR_SIZE_VER0)
5863 goto err_size;
5864
5865 /*
5866 * If we're handed a bigger struct than we know of,
cdf8073d
IS
5867 * ensure all the unknown bits are 0 - i.e. new
5868 * user-space does not rely on any kernel feature
5869 * extensions we dont know about yet.
974802ea
PZ
5870 */
5871 if (size > sizeof(*attr)) {
cdf8073d
IS
5872 unsigned char __user *addr;
5873 unsigned char __user *end;
5874 unsigned char val;
974802ea 5875
cdf8073d
IS
5876 addr = (void __user *)uattr + sizeof(*attr);
5877 end = (void __user *)uattr + size;
974802ea 5878
cdf8073d 5879 for (; addr < end; addr++) {
974802ea
PZ
5880 ret = get_user(val, addr);
5881 if (ret)
5882 return ret;
5883 if (val)
5884 goto err_size;
5885 }
b3e62e35 5886 size = sizeof(*attr);
974802ea
PZ
5887 }
5888
5889 ret = copy_from_user(attr, uattr, size);
5890 if (ret)
5891 return -EFAULT;
5892
cd757645 5893 if (attr->__reserved_1)
974802ea
PZ
5894 return -EINVAL;
5895
5896 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5897 return -EINVAL;
5898
5899 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5900 return -EINVAL;
5901
5902out:
5903 return ret;
5904
5905err_size:
5906 put_user(sizeof(*attr), &uattr->size);
5907 ret = -E2BIG;
5908 goto out;
5909}
5910
ac9721f3
PZ
5911static int
5912perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 5913{
76369139 5914 struct ring_buffer *rb = NULL, *old_rb = NULL;
a4be7c27
PZ
5915 int ret = -EINVAL;
5916
ac9721f3 5917 if (!output_event)
a4be7c27
PZ
5918 goto set;
5919
ac9721f3
PZ
5920 /* don't allow circular references */
5921 if (event == output_event)
a4be7c27
PZ
5922 goto out;
5923
0f139300
PZ
5924 /*
5925 * Don't allow cross-cpu buffers
5926 */
5927 if (output_event->cpu != event->cpu)
5928 goto out;
5929
5930 /*
76369139 5931 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
5932 */
5933 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5934 goto out;
5935
a4be7c27 5936set:
cdd6c482 5937 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
5938 /* Can't redirect output if we've got an active mmap() */
5939 if (atomic_read(&event->mmap_count))
5940 goto unlock;
a4be7c27 5941
ac9721f3 5942 if (output_event) {
76369139
FW
5943 /* get the rb we want to redirect to */
5944 rb = ring_buffer_get(output_event);
5945 if (!rb)
ac9721f3 5946 goto unlock;
a4be7c27
PZ
5947 }
5948
76369139
FW
5949 old_rb = event->rb;
5950 rcu_assign_pointer(event->rb, rb);
10c6db11
PZ
5951 if (old_rb)
5952 ring_buffer_detach(event, old_rb);
a4be7c27 5953 ret = 0;
ac9721f3
PZ
5954unlock:
5955 mutex_unlock(&event->mmap_mutex);
5956
76369139
FW
5957 if (old_rb)
5958 ring_buffer_put(old_rb);
a4be7c27 5959out:
a4be7c27
PZ
5960 return ret;
5961}
5962
0793a61d 5963/**
cdd6c482 5964 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 5965 *
cdd6c482 5966 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 5967 * @pid: target pid
9f66a381 5968 * @cpu: target cpu
cdd6c482 5969 * @group_fd: group leader event fd
0793a61d 5970 */
cdd6c482
IM
5971SYSCALL_DEFINE5(perf_event_open,
5972 struct perf_event_attr __user *, attr_uptr,
2743a5b0 5973 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 5974{
b04243ef
PZ
5975 struct perf_event *group_leader = NULL, *output_event = NULL;
5976 struct perf_event *event, *sibling;
cdd6c482
IM
5977 struct perf_event_attr attr;
5978 struct perf_event_context *ctx;
5979 struct file *event_file = NULL;
04289bb9 5980 struct file *group_file = NULL;
38a81da2 5981 struct task_struct *task = NULL;
89a1e187 5982 struct pmu *pmu;
ea635c64 5983 int event_fd;
b04243ef 5984 int move_group = 0;
04289bb9 5985 int fput_needed = 0;
dc86cabe 5986 int err;
0793a61d 5987
2743a5b0 5988 /* for future expandability... */
e5d1367f 5989 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
5990 return -EINVAL;
5991
dc86cabe
IM
5992 err = perf_copy_attr(attr_uptr, &attr);
5993 if (err)
5994 return err;
eab656ae 5995
0764771d
PZ
5996 if (!attr.exclude_kernel) {
5997 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5998 return -EACCES;
5999 }
6000
df58ab24 6001 if (attr.freq) {
cdd6c482 6002 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24
PZ
6003 return -EINVAL;
6004 }
6005
e5d1367f
SE
6006 /*
6007 * In cgroup mode, the pid argument is used to pass the fd
6008 * opened to the cgroup directory in cgroupfs. The cpu argument
6009 * designates the cpu on which to monitor threads from that
6010 * cgroup.
6011 */
6012 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6013 return -EINVAL;
6014
ea635c64
AV
6015 event_fd = get_unused_fd_flags(O_RDWR);
6016 if (event_fd < 0)
6017 return event_fd;
6018
ac9721f3
PZ
6019 if (group_fd != -1) {
6020 group_leader = perf_fget_light(group_fd, &fput_needed);
6021 if (IS_ERR(group_leader)) {
6022 err = PTR_ERR(group_leader);
d14b12d7 6023 goto err_fd;
ac9721f3
PZ
6024 }
6025 group_file = group_leader->filp;
6026 if (flags & PERF_FLAG_FD_OUTPUT)
6027 output_event = group_leader;
6028 if (flags & PERF_FLAG_FD_NO_GROUP)
6029 group_leader = NULL;
6030 }
6031
e5d1367f 6032 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
6033 task = find_lively_task_by_vpid(pid);
6034 if (IS_ERR(task)) {
6035 err = PTR_ERR(task);
6036 goto err_group_fd;
6037 }
6038 }
6039
4dc0da86
AK
6040 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6041 NULL, NULL);
d14b12d7
SE
6042 if (IS_ERR(event)) {
6043 err = PTR_ERR(event);
c6be5a5c 6044 goto err_task;
d14b12d7
SE
6045 }
6046
e5d1367f
SE
6047 if (flags & PERF_FLAG_PID_CGROUP) {
6048 err = perf_cgroup_connect(pid, event, &attr, group_leader);
6049 if (err)
6050 goto err_alloc;
08309379
PZ
6051 /*
6052 * one more event:
6053 * - that has cgroup constraint on event->cpu
6054 * - that may need work on context switch
6055 */
6056 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
b2029520 6057 jump_label_inc(&perf_sched_events.key);
e5d1367f
SE
6058 }
6059
89a1e187
PZ
6060 /*
6061 * Special case software events and allow them to be part of
6062 * any hardware group.
6063 */
6064 pmu = event->pmu;
b04243ef
PZ
6065
6066 if (group_leader &&
6067 (is_software_event(event) != is_software_event(group_leader))) {
6068 if (is_software_event(event)) {
6069 /*
6070 * If event and group_leader are not both a software
6071 * event, and event is, then group leader is not.
6072 *
6073 * Allow the addition of software events to !software
6074 * groups, this is safe because software events never
6075 * fail to schedule.
6076 */
6077 pmu = group_leader->pmu;
6078 } else if (is_software_event(group_leader) &&
6079 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
6080 /*
6081 * In case the group is a pure software group, and we
6082 * try to add a hardware event, move the whole group to
6083 * the hardware context.
6084 */
6085 move_group = 1;
6086 }
6087 }
89a1e187
PZ
6088
6089 /*
6090 * Get the target context (task or percpu):
6091 */
38a81da2 6092 ctx = find_get_context(pmu, task, cpu);
89a1e187
PZ
6093 if (IS_ERR(ctx)) {
6094 err = PTR_ERR(ctx);
c6be5a5c 6095 goto err_alloc;
89a1e187
PZ
6096 }
6097
fd1edb3a
PZ
6098 if (task) {
6099 put_task_struct(task);
6100 task = NULL;
6101 }
6102
ccff286d 6103 /*
cdd6c482 6104 * Look up the group leader (we will attach this event to it):
04289bb9 6105 */
ac9721f3 6106 if (group_leader) {
dc86cabe 6107 err = -EINVAL;
04289bb9 6108
04289bb9 6109 /*
ccff286d
IM
6110 * Do not allow a recursive hierarchy (this new sibling
6111 * becoming part of another group-sibling):
6112 */
6113 if (group_leader->group_leader != group_leader)
c3f00c70 6114 goto err_context;
ccff286d
IM
6115 /*
6116 * Do not allow to attach to a group in a different
6117 * task or CPU context:
04289bb9 6118 */
b04243ef
PZ
6119 if (move_group) {
6120 if (group_leader->ctx->type != ctx->type)
6121 goto err_context;
6122 } else {
6123 if (group_leader->ctx != ctx)
6124 goto err_context;
6125 }
6126
3b6f9e5c
PM
6127 /*
6128 * Only a group leader can be exclusive or pinned
6129 */
0d48696f 6130 if (attr.exclusive || attr.pinned)
c3f00c70 6131 goto err_context;
ac9721f3
PZ
6132 }
6133
6134 if (output_event) {
6135 err = perf_event_set_output(event, output_event);
6136 if (err)
c3f00c70 6137 goto err_context;
ac9721f3 6138 }
0793a61d 6139
ea635c64
AV
6140 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6141 if (IS_ERR(event_file)) {
6142 err = PTR_ERR(event_file);
c3f00c70 6143 goto err_context;
ea635c64 6144 }
9b51f66d 6145
b04243ef
PZ
6146 if (move_group) {
6147 struct perf_event_context *gctx = group_leader->ctx;
6148
6149 mutex_lock(&gctx->mutex);
fe4b04fa 6150 perf_remove_from_context(group_leader);
b04243ef
PZ
6151 list_for_each_entry(sibling, &group_leader->sibling_list,
6152 group_entry) {
fe4b04fa 6153 perf_remove_from_context(sibling);
b04243ef
PZ
6154 put_ctx(gctx);
6155 }
6156 mutex_unlock(&gctx->mutex);
6157 put_ctx(gctx);
ea635c64 6158 }
9b51f66d 6159
cdd6c482 6160 event->filp = event_file;
ad3a37de 6161 WARN_ON_ONCE(ctx->parent_ctx);
d859e29f 6162 mutex_lock(&ctx->mutex);
b04243ef
PZ
6163
6164 if (move_group) {
6165 perf_install_in_context(ctx, group_leader, cpu);
6166 get_ctx(ctx);
6167 list_for_each_entry(sibling, &group_leader->sibling_list,
6168 group_entry) {
6169 perf_install_in_context(ctx, sibling, cpu);
6170 get_ctx(ctx);
6171 }
6172 }
6173
cdd6c482 6174 perf_install_in_context(ctx, event, cpu);
ad3a37de 6175 ++ctx->generation;
fe4b04fa 6176 perf_unpin_context(ctx);
d859e29f 6177 mutex_unlock(&ctx->mutex);
9b51f66d 6178
cdd6c482 6179 event->owner = current;
8882135b 6180
cdd6c482
IM
6181 mutex_lock(&current->perf_event_mutex);
6182 list_add_tail(&event->owner_entry, &current->perf_event_list);
6183 mutex_unlock(&current->perf_event_mutex);
082ff5a2 6184
c320c7b7
ACM
6185 /*
6186 * Precalculate sample_data sizes
6187 */
6188 perf_event__header_size(event);
6844c09d 6189 perf_event__id_header_size(event);
c320c7b7 6190
8a49542c
PZ
6191 /*
6192 * Drop the reference on the group_event after placing the
6193 * new event on the sibling_list. This ensures destruction
6194 * of the group leader will find the pointer to itself in
6195 * perf_group_detach().
6196 */
ea635c64
AV
6197 fput_light(group_file, fput_needed);
6198 fd_install(event_fd, event_file);
6199 return event_fd;
0793a61d 6200
c3f00c70 6201err_context:
fe4b04fa 6202 perf_unpin_context(ctx);
ea635c64 6203 put_ctx(ctx);
c6be5a5c 6204err_alloc:
ea635c64 6205 free_event(event);
e7d0bc04
PZ
6206err_task:
6207 if (task)
6208 put_task_struct(task);
89a1e187 6209err_group_fd:
dc86cabe 6210 fput_light(group_file, fput_needed);
ea635c64
AV
6211err_fd:
6212 put_unused_fd(event_fd);
dc86cabe 6213 return err;
0793a61d
TG
6214}
6215
fb0459d7
AV
6216/**
6217 * perf_event_create_kernel_counter
6218 *
6219 * @attr: attributes of the counter to create
6220 * @cpu: cpu in which the counter is bound
38a81da2 6221 * @task: task to profile (NULL for percpu)
fb0459d7
AV
6222 */
6223struct perf_event *
6224perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 6225 struct task_struct *task,
4dc0da86
AK
6226 perf_overflow_handler_t overflow_handler,
6227 void *context)
fb0459d7 6228{
fb0459d7 6229 struct perf_event_context *ctx;
c3f00c70 6230 struct perf_event *event;
fb0459d7 6231 int err;
d859e29f 6232
fb0459d7
AV
6233 /*
6234 * Get the target context (task or percpu):
6235 */
d859e29f 6236
4dc0da86
AK
6237 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
6238 overflow_handler, context);
c3f00c70
PZ
6239 if (IS_ERR(event)) {
6240 err = PTR_ERR(event);
6241 goto err;
6242 }
d859e29f 6243
38a81da2 6244 ctx = find_get_context(event->pmu, task, cpu);
c6567f64
FW
6245 if (IS_ERR(ctx)) {
6246 err = PTR_ERR(ctx);
c3f00c70 6247 goto err_free;
d859e29f 6248 }
fb0459d7
AV
6249
6250 event->filp = NULL;
6251 WARN_ON_ONCE(ctx->parent_ctx);
6252 mutex_lock(&ctx->mutex);
6253 perf_install_in_context(ctx, event, cpu);
6254 ++ctx->generation;
fe4b04fa 6255 perf_unpin_context(ctx);
fb0459d7
AV
6256 mutex_unlock(&ctx->mutex);
6257
fb0459d7
AV
6258 return event;
6259
c3f00c70
PZ
6260err_free:
6261 free_event(event);
6262err:
c6567f64 6263 return ERR_PTR(err);
9b51f66d 6264}
fb0459d7 6265EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 6266
cdd6c482 6267static void sync_child_event(struct perf_event *child_event,
38b200d6 6268 struct task_struct *child)
d859e29f 6269{
cdd6c482 6270 struct perf_event *parent_event = child_event->parent;
8bc20959 6271 u64 child_val;
d859e29f 6272
cdd6c482
IM
6273 if (child_event->attr.inherit_stat)
6274 perf_event_read_event(child_event, child);
38b200d6 6275
b5e58793 6276 child_val = perf_event_count(child_event);
d859e29f
PM
6277
6278 /*
6279 * Add back the child's count to the parent's count:
6280 */
a6e6dea6 6281 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
6282 atomic64_add(child_event->total_time_enabled,
6283 &parent_event->child_total_time_enabled);
6284 atomic64_add(child_event->total_time_running,
6285 &parent_event->child_total_time_running);
d859e29f
PM
6286
6287 /*
cdd6c482 6288 * Remove this event from the parent's list
d859e29f 6289 */
cdd6c482
IM
6290 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6291 mutex_lock(&parent_event->child_mutex);
6292 list_del_init(&child_event->child_list);
6293 mutex_unlock(&parent_event->child_mutex);
d859e29f
PM
6294
6295 /*
cdd6c482 6296 * Release the parent event, if this was the last
d859e29f
PM
6297 * reference to it.
6298 */
cdd6c482 6299 fput(parent_event->filp);
d859e29f
PM
6300}
6301
9b51f66d 6302static void
cdd6c482
IM
6303__perf_event_exit_task(struct perf_event *child_event,
6304 struct perf_event_context *child_ctx,
38b200d6 6305 struct task_struct *child)
9b51f66d 6306{
38b435b1
PZ
6307 if (child_event->parent) {
6308 raw_spin_lock_irq(&child_ctx->lock);
6309 perf_group_detach(child_event);
6310 raw_spin_unlock_irq(&child_ctx->lock);
6311 }
9b51f66d 6312
fe4b04fa 6313 perf_remove_from_context(child_event);
0cc0c027 6314
9b51f66d 6315 /*
38b435b1 6316 * It can happen that the parent exits first, and has events
9b51f66d 6317 * that are still around due to the child reference. These
38b435b1 6318 * events need to be zapped.
9b51f66d 6319 */
38b435b1 6320 if (child_event->parent) {
cdd6c482
IM
6321 sync_child_event(child_event, child);
6322 free_event(child_event);
4bcf349a 6323 }
9b51f66d
IM
6324}
6325
8dc85d54 6326static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 6327{
cdd6c482
IM
6328 struct perf_event *child_event, *tmp;
6329 struct perf_event_context *child_ctx;
a63eaf34 6330 unsigned long flags;
9b51f66d 6331
8dc85d54 6332 if (likely(!child->perf_event_ctxp[ctxn])) {
cdd6c482 6333 perf_event_task(child, NULL, 0);
9b51f66d 6334 return;
9f498cc5 6335 }
9b51f66d 6336
a63eaf34 6337 local_irq_save(flags);
ad3a37de
PM
6338 /*
6339 * We can't reschedule here because interrupts are disabled,
6340 * and either child is current or it is a task that can't be
6341 * scheduled, so we are now safe from rescheduling changing
6342 * our context.
6343 */
806839b2 6344 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
c93f7669
PM
6345
6346 /*
6347 * Take the context lock here so that if find_get_context is
cdd6c482 6348 * reading child->perf_event_ctxp, we wait until it has
c93f7669
PM
6349 * incremented the context's refcount before we do put_ctx below.
6350 */
e625cce1 6351 raw_spin_lock(&child_ctx->lock);
04dc2dbb 6352 task_ctx_sched_out(child_ctx);
8dc85d54 6353 child->perf_event_ctxp[ctxn] = NULL;
71a851b4
PZ
6354 /*
6355 * If this context is a clone; unclone it so it can't get
6356 * swapped to another process while we're removing all
cdd6c482 6357 * the events from it.
71a851b4
PZ
6358 */
6359 unclone_ctx(child_ctx);
5e942bb3 6360 update_context_time(child_ctx);
e625cce1 6361 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
9f498cc5
PZ
6362
6363 /*
cdd6c482
IM
6364 * Report the task dead after unscheduling the events so that we
6365 * won't get any samples after PERF_RECORD_EXIT. We can however still
6366 * get a few PERF_RECORD_READ events.
9f498cc5 6367 */
cdd6c482 6368 perf_event_task(child, child_ctx, 0);
a63eaf34 6369
66fff224
PZ
6370 /*
6371 * We can recurse on the same lock type through:
6372 *
cdd6c482
IM
6373 * __perf_event_exit_task()
6374 * sync_child_event()
6375 * fput(parent_event->filp)
66fff224
PZ
6376 * perf_release()
6377 * mutex_lock(&ctx->mutex)
6378 *
6379 * But since its the parent context it won't be the same instance.
6380 */
a0507c84 6381 mutex_lock(&child_ctx->mutex);
a63eaf34 6382
8bc20959 6383again:
889ff015
FW
6384 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6385 group_entry)
6386 __perf_event_exit_task(child_event, child_ctx, child);
6387
6388 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
65abc865 6389 group_entry)
cdd6c482 6390 __perf_event_exit_task(child_event, child_ctx, child);
8bc20959
PZ
6391
6392 /*
cdd6c482 6393 * If the last event was a group event, it will have appended all
8bc20959
PZ
6394 * its siblings to the list, but we obtained 'tmp' before that which
6395 * will still point to the list head terminating the iteration.
6396 */
889ff015
FW
6397 if (!list_empty(&child_ctx->pinned_groups) ||
6398 !list_empty(&child_ctx->flexible_groups))
8bc20959 6399 goto again;
a63eaf34
PM
6400
6401 mutex_unlock(&child_ctx->mutex);
6402
6403 put_ctx(child_ctx);
9b51f66d
IM
6404}
6405
8dc85d54
PZ
6406/*
6407 * When a child task exits, feed back event values to parent events.
6408 */
6409void perf_event_exit_task(struct task_struct *child)
6410{
8882135b 6411 struct perf_event *event, *tmp;
8dc85d54
PZ
6412 int ctxn;
6413
8882135b
PZ
6414 mutex_lock(&child->perf_event_mutex);
6415 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6416 owner_entry) {
6417 list_del_init(&event->owner_entry);
6418
6419 /*
6420 * Ensure the list deletion is visible before we clear
6421 * the owner, closes a race against perf_release() where
6422 * we need to serialize on the owner->perf_event_mutex.
6423 */
6424 smp_wmb();
6425 event->owner = NULL;
6426 }
6427 mutex_unlock(&child->perf_event_mutex);
6428
8dc85d54
PZ
6429 for_each_task_context_nr(ctxn)
6430 perf_event_exit_task_context(child, ctxn);
6431}
6432
889ff015
FW
6433static void perf_free_event(struct perf_event *event,
6434 struct perf_event_context *ctx)
6435{
6436 struct perf_event *parent = event->parent;
6437
6438 if (WARN_ON_ONCE(!parent))
6439 return;
6440
6441 mutex_lock(&parent->child_mutex);
6442 list_del_init(&event->child_list);
6443 mutex_unlock(&parent->child_mutex);
6444
6445 fput(parent->filp);
6446
8a49542c 6447 perf_group_detach(event);
889ff015
FW
6448 list_del_event(event, ctx);
6449 free_event(event);
6450}
6451
bbbee908
PZ
6452/*
6453 * free an unexposed, unused context as created by inheritance by
8dc85d54 6454 * perf_event_init_task below, used by fork() in case of fail.
bbbee908 6455 */
cdd6c482 6456void perf_event_free_task(struct task_struct *task)
bbbee908 6457{
8dc85d54 6458 struct perf_event_context *ctx;
cdd6c482 6459 struct perf_event *event, *tmp;
8dc85d54 6460 int ctxn;
bbbee908 6461
8dc85d54
PZ
6462 for_each_task_context_nr(ctxn) {
6463 ctx = task->perf_event_ctxp[ctxn];
6464 if (!ctx)
6465 continue;
bbbee908 6466
8dc85d54 6467 mutex_lock(&ctx->mutex);
bbbee908 6468again:
8dc85d54
PZ
6469 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6470 group_entry)
6471 perf_free_event(event, ctx);
bbbee908 6472
8dc85d54
PZ
6473 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6474 group_entry)
6475 perf_free_event(event, ctx);
bbbee908 6476
8dc85d54
PZ
6477 if (!list_empty(&ctx->pinned_groups) ||
6478 !list_empty(&ctx->flexible_groups))
6479 goto again;
bbbee908 6480
8dc85d54 6481 mutex_unlock(&ctx->mutex);
bbbee908 6482
8dc85d54
PZ
6483 put_ctx(ctx);
6484 }
889ff015
FW
6485}
6486
4e231c79
PZ
6487void perf_event_delayed_put(struct task_struct *task)
6488{
6489 int ctxn;
6490
6491 for_each_task_context_nr(ctxn)
6492 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6493}
6494
97dee4f3
PZ
6495/*
6496 * inherit a event from parent task to child task:
6497 */
6498static struct perf_event *
6499inherit_event(struct perf_event *parent_event,
6500 struct task_struct *parent,
6501 struct perf_event_context *parent_ctx,
6502 struct task_struct *child,
6503 struct perf_event *group_leader,
6504 struct perf_event_context *child_ctx)
6505{
6506 struct perf_event *child_event;
cee010ec 6507 unsigned long flags;
97dee4f3
PZ
6508
6509 /*
6510 * Instead of creating recursive hierarchies of events,
6511 * we link inherited events back to the original parent,
6512 * which has a filp for sure, which we use as the reference
6513 * count:
6514 */
6515 if (parent_event->parent)
6516 parent_event = parent_event->parent;
6517
6518 child_event = perf_event_alloc(&parent_event->attr,
6519 parent_event->cpu,
d580ff86 6520 child,
97dee4f3 6521 group_leader, parent_event,
4dc0da86 6522 NULL, NULL);
97dee4f3
PZ
6523 if (IS_ERR(child_event))
6524 return child_event;
6525 get_ctx(child_ctx);
6526
6527 /*
6528 * Make the child state follow the state of the parent event,
6529 * not its attr.disabled bit. We hold the parent's mutex,
6530 * so we won't race with perf_event_{en, dis}able_family.
6531 */
6532 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6533 child_event->state = PERF_EVENT_STATE_INACTIVE;
6534 else
6535 child_event->state = PERF_EVENT_STATE_OFF;
6536
6537 if (parent_event->attr.freq) {
6538 u64 sample_period = parent_event->hw.sample_period;
6539 struct hw_perf_event *hwc = &child_event->hw;
6540
6541 hwc->sample_period = sample_period;
6542 hwc->last_period = sample_period;
6543
6544 local64_set(&hwc->period_left, sample_period);
6545 }
6546
6547 child_event->ctx = child_ctx;
6548 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
6549 child_event->overflow_handler_context
6550 = parent_event->overflow_handler_context;
97dee4f3 6551
614b6780
TG
6552 /*
6553 * Precalculate sample_data sizes
6554 */
6555 perf_event__header_size(child_event);
6844c09d 6556 perf_event__id_header_size(child_event);
614b6780 6557
97dee4f3
PZ
6558 /*
6559 * Link it up in the child's context:
6560 */
cee010ec 6561 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 6562 add_event_to_ctx(child_event, child_ctx);
cee010ec 6563 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3
PZ
6564
6565 /*
6566 * Get a reference to the parent filp - we will fput it
6567 * when the child event exits. This is safe to do because
6568 * we are in the parent and we know that the filp still
6569 * exists and has a nonzero count:
6570 */
6571 atomic_long_inc(&parent_event->filp->f_count);
6572
6573 /*
6574 * Link this into the parent event's child list
6575 */
6576 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6577 mutex_lock(&parent_event->child_mutex);
6578 list_add_tail(&child_event->child_list, &parent_event->child_list);
6579 mutex_unlock(&parent_event->child_mutex);
6580
6581 return child_event;
6582}
6583
6584static int inherit_group(struct perf_event *parent_event,
6585 struct task_struct *parent,
6586 struct perf_event_context *parent_ctx,
6587 struct task_struct *child,
6588 struct perf_event_context *child_ctx)
6589{
6590 struct perf_event *leader;
6591 struct perf_event *sub;
6592 struct perf_event *child_ctr;
6593
6594 leader = inherit_event(parent_event, parent, parent_ctx,
6595 child, NULL, child_ctx);
6596 if (IS_ERR(leader))
6597 return PTR_ERR(leader);
6598 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6599 child_ctr = inherit_event(sub, parent, parent_ctx,
6600 child, leader, child_ctx);
6601 if (IS_ERR(child_ctr))
6602 return PTR_ERR(child_ctr);
6603 }
6604 return 0;
889ff015
FW
6605}
6606
6607static int
6608inherit_task_group(struct perf_event *event, struct task_struct *parent,
6609 struct perf_event_context *parent_ctx,
8dc85d54 6610 struct task_struct *child, int ctxn,
889ff015
FW
6611 int *inherited_all)
6612{
6613 int ret;
8dc85d54 6614 struct perf_event_context *child_ctx;
889ff015
FW
6615
6616 if (!event->attr.inherit) {
6617 *inherited_all = 0;
6618 return 0;
bbbee908
PZ
6619 }
6620
fe4b04fa 6621 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
6622 if (!child_ctx) {
6623 /*
6624 * This is executed from the parent task context, so
6625 * inherit events that have been marked for cloning.
6626 * First allocate and initialize a context for the
6627 * child.
6628 */
bbbee908 6629
eb184479 6630 child_ctx = alloc_perf_context(event->pmu, child);
889ff015
FW
6631 if (!child_ctx)
6632 return -ENOMEM;
bbbee908 6633
8dc85d54 6634 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
6635 }
6636
6637 ret = inherit_group(event, parent, parent_ctx,
6638 child, child_ctx);
6639
6640 if (ret)
6641 *inherited_all = 0;
6642
6643 return ret;
bbbee908
PZ
6644}
6645
9b51f66d 6646/*
cdd6c482 6647 * Initialize the perf_event context in task_struct
9b51f66d 6648 */
8dc85d54 6649int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 6650{
889ff015 6651 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
6652 struct perf_event_context *cloned_ctx;
6653 struct perf_event *event;
9b51f66d 6654 struct task_struct *parent = current;
564c2b21 6655 int inherited_all = 1;
dddd3379 6656 unsigned long flags;
6ab423e0 6657 int ret = 0;
9b51f66d 6658
8dc85d54 6659 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
6660 return 0;
6661
ad3a37de 6662 /*
25346b93
PM
6663 * If the parent's context is a clone, pin it so it won't get
6664 * swapped under us.
ad3a37de 6665 */
8dc85d54 6666 parent_ctx = perf_pin_task_context(parent, ctxn);
25346b93 6667
ad3a37de
PM
6668 /*
6669 * No need to check if parent_ctx != NULL here; since we saw
6670 * it non-NULL earlier, the only reason for it to become NULL
6671 * is if we exit, and since we're currently in the middle of
6672 * a fork we can't be exiting at the same time.
6673 */
ad3a37de 6674
9b51f66d
IM
6675 /*
6676 * Lock the parent list. No need to lock the child - not PID
6677 * hashed yet and not running, so nobody can access it.
6678 */
d859e29f 6679 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
6680
6681 /*
6682 * We dont have to disable NMIs - we are only looking at
6683 * the list, not manipulating it:
6684 */
889ff015 6685 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d54
PZ
6686 ret = inherit_task_group(event, parent, parent_ctx,
6687 child, ctxn, &inherited_all);
889ff015
FW
6688 if (ret)
6689 break;
6690 }
b93f7978 6691
dddd3379
TG
6692 /*
6693 * We can't hold ctx->lock when iterating the ->flexible_group list due
6694 * to allocations, but we need to prevent rotation because
6695 * rotate_ctx() will change the list from interrupt context.
6696 */
6697 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6698 parent_ctx->rotate_disable = 1;
6699 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6700
889ff015 6701 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d54
PZ
6702 ret = inherit_task_group(event, parent, parent_ctx,
6703 child, ctxn, &inherited_all);
889ff015 6704 if (ret)
9b51f66d 6705 break;
564c2b21
PM
6706 }
6707
dddd3379
TG
6708 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6709 parent_ctx->rotate_disable = 0;
dddd3379 6710
8dc85d54 6711 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 6712
05cbaa28 6713 if (child_ctx && inherited_all) {
564c2b21
PM
6714 /*
6715 * Mark the child context as a clone of the parent
6716 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
6717 *
6718 * Note that if the parent is a clone, the holding of
6719 * parent_ctx->lock avoids it from being uncloned.
564c2b21 6720 */
c5ed5145 6721 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
6722 if (cloned_ctx) {
6723 child_ctx->parent_ctx = cloned_ctx;
25346b93 6724 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
6725 } else {
6726 child_ctx->parent_ctx = parent_ctx;
6727 child_ctx->parent_gen = parent_ctx->generation;
6728 }
6729 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
6730 }
6731
c5ed5145 6732 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29f 6733 mutex_unlock(&parent_ctx->mutex);
6ab423e0 6734
25346b93 6735 perf_unpin_context(parent_ctx);
fe4b04fa 6736 put_ctx(parent_ctx);
ad3a37de 6737
6ab423e0 6738 return ret;
9b51f66d
IM
6739}
6740
8dc85d54
PZ
6741/*
6742 * Initialize the perf_event context in task_struct
6743 */
6744int perf_event_init_task(struct task_struct *child)
6745{
6746 int ctxn, ret;
6747
8550d7cb
ON
6748 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
6749 mutex_init(&child->perf_event_mutex);
6750 INIT_LIST_HEAD(&child->perf_event_list);
6751
8dc85d54
PZ
6752 for_each_task_context_nr(ctxn) {
6753 ret = perf_event_init_context(child, ctxn);
6754 if (ret)
6755 return ret;
6756 }
6757
6758 return 0;
6759}
6760
220b140b
PM
6761static void __init perf_event_init_all_cpus(void)
6762{
b28ab83c 6763 struct swevent_htable *swhash;
220b140b 6764 int cpu;
220b140b
PM
6765
6766 for_each_possible_cpu(cpu) {
b28ab83c
PZ
6767 swhash = &per_cpu(swevent_htable, cpu);
6768 mutex_init(&swhash->hlist_mutex);
e9d2b064 6769 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
220b140b
PM
6770 }
6771}
6772
cdd6c482 6773static void __cpuinit perf_event_init_cpu(int cpu)
0793a61d 6774{
108b02cf 6775 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 6776
b28ab83c 6777 mutex_lock(&swhash->hlist_mutex);
4536e4d1 6778 if (swhash->hlist_refcount > 0) {
76e1d904
FW
6779 struct swevent_hlist *hlist;
6780
b28ab83c
PZ
6781 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6782 WARN_ON(!hlist);
6783 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 6784 }
b28ab83c 6785 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
6786}
6787
c277443c 6788#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
e9d2b064 6789static void perf_pmu_rotate_stop(struct pmu *pmu)
0793a61d 6790{
e9d2b064
PZ
6791 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6792
6793 WARN_ON(!irqs_disabled());
6794
6795 list_del_init(&cpuctx->rotation_list);
6796}
6797
108b02cf 6798static void __perf_event_exit_context(void *__info)
0793a61d 6799{
108b02cf 6800 struct perf_event_context *ctx = __info;
cdd6c482 6801 struct perf_event *event, *tmp;
0793a61d 6802
108b02cf 6803 perf_pmu_rotate_stop(ctx->pmu);
b5ab4cd5 6804
889ff015 6805 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
fe4b04fa 6806 __perf_remove_from_context(event);
889ff015 6807 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
fe4b04fa 6808 __perf_remove_from_context(event);
0793a61d 6809}
108b02cf
PZ
6810
6811static void perf_event_exit_cpu_context(int cpu)
6812{
6813 struct perf_event_context *ctx;
6814 struct pmu *pmu;
6815 int idx;
6816
6817 idx = srcu_read_lock(&pmus_srcu);
6818 list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c 6819 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cf
PZ
6820
6821 mutex_lock(&ctx->mutex);
6822 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6823 mutex_unlock(&ctx->mutex);
6824 }
6825 srcu_read_unlock(&pmus_srcu, idx);
108b02cf
PZ
6826}
6827
cdd6c482 6828static void perf_event_exit_cpu(int cpu)
0793a61d 6829{
b28ab83c 6830 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
d859e29f 6831
b28ab83c
PZ
6832 mutex_lock(&swhash->hlist_mutex);
6833 swevent_hlist_release(swhash);
6834 mutex_unlock(&swhash->hlist_mutex);
76e1d904 6835
108b02cf 6836 perf_event_exit_cpu_context(cpu);
0793a61d
TG
6837}
6838#else
cdd6c482 6839static inline void perf_event_exit_cpu(int cpu) { }
0793a61d
TG
6840#endif
6841
c277443c
PZ
6842static int
6843perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
6844{
6845 int cpu;
6846
6847 for_each_online_cpu(cpu)
6848 perf_event_exit_cpu(cpu);
6849
6850 return NOTIFY_OK;
6851}
6852
6853/*
6854 * Run the perf reboot notifier at the very last possible moment so that
6855 * the generic watchdog code runs as long as possible.
6856 */
6857static struct notifier_block perf_reboot_notifier = {
6858 .notifier_call = perf_reboot,
6859 .priority = INT_MIN,
6860};
6861
0793a61d
TG
6862static int __cpuinit
6863perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6864{
6865 unsigned int cpu = (long)hcpu;
6866
4536e4d1 6867 switch (action & ~CPU_TASKS_FROZEN) {
0793a61d
TG
6868
6869 case CPU_UP_PREPARE:
5e11637e 6870 case CPU_DOWN_FAILED:
cdd6c482 6871 perf_event_init_cpu(cpu);
0793a61d
TG
6872 break;
6873
5e11637e 6874 case CPU_UP_CANCELED:
0793a61d 6875 case CPU_DOWN_PREPARE:
cdd6c482 6876 perf_event_exit_cpu(cpu);
0793a61d
TG
6877 break;
6878
6879 default:
6880 break;
6881 }
6882
6883 return NOTIFY_OK;
6884}
6885
cdd6c482 6886void __init perf_event_init(void)
0793a61d 6887{
3c502e7a
JW
6888 int ret;
6889
2e80a82a
PZ
6890 idr_init(&pmu_idr);
6891
220b140b 6892 perf_event_init_all_cpus();
b0a873eb 6893 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
6894 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
6895 perf_pmu_register(&perf_cpu_clock, NULL, -1);
6896 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb
PZ
6897 perf_tp_register();
6898 perf_cpu_notifier(perf_cpu_notify);
c277443c 6899 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
6900
6901 ret = init_hw_breakpoint();
6902 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520
GN
6903
6904 /* do not patch jump label more than once per second */
6905 jump_label_rate_limit(&perf_sched_events, HZ);
0793a61d 6906}
abe43400
PZ
6907
6908static int __init perf_event_sysfs_init(void)
6909{
6910 struct pmu *pmu;
6911 int ret;
6912
6913 mutex_lock(&pmus_lock);
6914
6915 ret = bus_register(&pmu_bus);
6916 if (ret)
6917 goto unlock;
6918
6919 list_for_each_entry(pmu, &pmus, entry) {
6920 if (!pmu->name || pmu->type < 0)
6921 continue;
6922
6923 ret = pmu_dev_alloc(pmu);
6924 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
6925 }
6926 pmu_bus_running = 1;
6927 ret = 0;
6928
6929unlock:
6930 mutex_unlock(&pmus_lock);
6931
6932 return ret;
6933}
6934device_initcall(perf_event_sysfs_init);
e5d1367f
SE
6935
6936#ifdef CONFIG_CGROUP_PERF
6937static struct cgroup_subsys_state *perf_cgroup_create(
6938 struct cgroup_subsys *ss, struct cgroup *cont)
6939{
6940 struct perf_cgroup *jc;
e5d1367f 6941
1b15d055 6942 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
6943 if (!jc)
6944 return ERR_PTR(-ENOMEM);
6945
e5d1367f
SE
6946 jc->info = alloc_percpu(struct perf_cgroup_info);
6947 if (!jc->info) {
6948 kfree(jc);
6949 return ERR_PTR(-ENOMEM);
6950 }
6951
e5d1367f
SE
6952 return &jc->css;
6953}
6954
6955static void perf_cgroup_destroy(struct cgroup_subsys *ss,
6956 struct cgroup *cont)
6957{
6958 struct perf_cgroup *jc;
6959 jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
6960 struct perf_cgroup, css);
6961 free_percpu(jc->info);
6962 kfree(jc);
6963}
6964
6965static int __perf_cgroup_move(void *info)
6966{
6967 struct task_struct *task = info;
6968 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
6969 return 0;
6970}
6971
bb9d97b6
TH
6972static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
6973 struct cgroup_taskset *tset)
e5d1367f 6974{
bb9d97b6
TH
6975 struct task_struct *task;
6976
6977 cgroup_taskset_for_each(task, cgrp, tset)
6978 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
6979}
6980
e5d1367f
SE
6981static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
6982 struct cgroup *old_cgrp, struct task_struct *task)
6983{
6984 /*
6985 * cgroup_exit() is called in the copy_process() failure path.
6986 * Ignore this case since the task hasn't ran yet, this avoids
6987 * trying to poke a half freed task state from generic code.
6988 */
6989 if (!(task->flags & PF_EXITING))
6990 return;
6991
bb9d97b6 6992 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
6993}
6994
6995struct cgroup_subsys perf_subsys = {
e7e7ee2e
IM
6996 .name = "perf_event",
6997 .subsys_id = perf_subsys_id,
6998 .create = perf_cgroup_create,
6999 .destroy = perf_cgroup_destroy,
7000 .exit = perf_cgroup_exit,
bb9d97b6 7001 .attach = perf_cgroup_attach,
e5d1367f
SE
7002};
7003#endif /* CONFIG_CGROUP_PERF */