perf: Fix branch stack refcount leak on callchain init failure
[linux-2.6-block.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e
IM
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
12351ef8 21#include <linux/tick.h>
0793a61d 22#include <linux/sysfs.h>
22a4f650 23#include <linux/dcache.h>
0793a61d 24#include <linux/percpu.h>
22a4f650 25#include <linux/ptrace.h>
c277443c 26#include <linux/reboot.h>
b9cacc7b 27#include <linux/vmstat.h>
abe43400 28#include <linux/device.h>
6e5fdeed 29#include <linux/export.h>
906010b2 30#include <linux/vmalloc.h>
b9cacc7b
PZ
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
0793a61d
TG
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
aa9c4c0f 36#include <linux/kernel_stat.h>
cdd6c482 37#include <linux/perf_event.h>
6fb2915d 38#include <linux/ftrace_event.h>
3c502e7a 39#include <linux/hw_breakpoint.h>
c5ebcedb 40#include <linux/mm_types.h>
877c6856 41#include <linux/cgroup.h>
0793a61d 42
76369139
FW
43#include "internal.h"
44
4e193bd4
TB
45#include <asm/irq_regs.h>
46
fe4b04fa 47struct remote_function_call {
e7e7ee2e
IM
48 struct task_struct *p;
49 int (*func)(void *info);
50 void *info;
51 int ret;
fe4b04fa
PZ
52};
53
54static void remote_function(void *data)
55{
56 struct remote_function_call *tfc = data;
57 struct task_struct *p = tfc->p;
58
59 if (p) {
60 tfc->ret = -EAGAIN;
61 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
62 return;
63 }
64
65 tfc->ret = tfc->func(tfc->info);
66}
67
68/**
69 * task_function_call - call a function on the cpu on which a task runs
70 * @p: the task to evaluate
71 * @func: the function to be called
72 * @info: the function call argument
73 *
74 * Calls the function @func when the task is currently running. This might
75 * be on the current CPU, which just calls the function directly
76 *
77 * returns: @func return value, or
78 * -ESRCH - when the process isn't running
79 * -EAGAIN - when the process moved away
80 */
81static int
82task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
83{
84 struct remote_function_call data = {
e7e7ee2e
IM
85 .p = p,
86 .func = func,
87 .info = info,
88 .ret = -ESRCH, /* No such (running) process */
fe4b04fa
PZ
89 };
90
91 if (task_curr(p))
92 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
93
94 return data.ret;
95}
96
97/**
98 * cpu_function_call - call a function on the cpu
99 * @func: the function to be called
100 * @info: the function call argument
101 *
102 * Calls the function @func on the remote cpu.
103 *
104 * returns: @func return value or -ENXIO when the cpu is offline
105 */
106static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
107{
108 struct remote_function_call data = {
e7e7ee2e
IM
109 .p = NULL,
110 .func = func,
111 .info = info,
112 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
113 };
114
115 smp_call_function_single(cpu, remote_function, &data, 1);
116
117 return data.ret;
118}
119
e5d1367f
SE
120#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
121 PERF_FLAG_FD_OUTPUT |\
122 PERF_FLAG_PID_CGROUP)
123
bce38cd5
SE
124/*
125 * branch priv levels that need permission checks
126 */
127#define PERF_SAMPLE_BRANCH_PERM_PLM \
128 (PERF_SAMPLE_BRANCH_KERNEL |\
129 PERF_SAMPLE_BRANCH_HV)
130
0b3fcf17
SE
131enum event_type_t {
132 EVENT_FLEXIBLE = 0x1,
133 EVENT_PINNED = 0x2,
134 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
135};
136
e5d1367f
SE
137/*
138 * perf_sched_events : >0 events exist
139 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
140 */
c5905afb 141struct static_key_deferred perf_sched_events __read_mostly;
e5d1367f 142static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
d010b332 143static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
e5d1367f 144
cdd6c482
IM
145static atomic_t nr_mmap_events __read_mostly;
146static atomic_t nr_comm_events __read_mostly;
147static atomic_t nr_task_events __read_mostly;
9ee318a7 148
108b02cf
PZ
149static LIST_HEAD(pmus);
150static DEFINE_MUTEX(pmus_lock);
151static struct srcu_struct pmus_srcu;
152
0764771d 153/*
cdd6c482 154 * perf event paranoia level:
0fbdea19
IM
155 * -1 - not paranoid at all
156 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 157 * 1 - disallow cpu events for unpriv
0fbdea19 158 * 2 - disallow kernel profiling for unpriv
0764771d 159 */
cdd6c482 160int sysctl_perf_event_paranoid __read_mostly = 1;
0764771d 161
20443384
FW
162/* Minimum for 512 kiB + 1 user control page */
163int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
164
165/*
cdd6c482 166 * max perf event sample rate
df58ab24 167 */
14c63f17
DH
168#define DEFAULT_MAX_SAMPLE_RATE 100000
169#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
170#define DEFAULT_CPU_TIME_MAX_PERCENT 25
171
172int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
173
174static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
175static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
176
177static atomic_t perf_sample_allowed_ns __read_mostly =
178 ATOMIC_INIT( DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100);
179
180void update_perf_cpu_limits(void)
181{
182 u64 tmp = perf_sample_period_ns;
183
184 tmp *= sysctl_perf_cpu_time_max_percent;
e5302920 185 do_div(tmp, 100);
14c63f17
DH
186 atomic_set(&perf_sample_allowed_ns, tmp);
187}
163ec435 188
9e630205
SE
189static int perf_rotate_context(struct perf_cpu_context *cpuctx);
190
163ec435
PZ
191int perf_proc_update_handler(struct ctl_table *table, int write,
192 void __user *buffer, size_t *lenp,
193 loff_t *ppos)
194{
195 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
196
197 if (ret || !write)
198 return ret;
199
200 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
14c63f17
DH
201 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
202 update_perf_cpu_limits();
203
204 return 0;
205}
206
207int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
208
209int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
210 void __user *buffer, size_t *lenp,
211 loff_t *ppos)
212{
213 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
214
215 if (ret || !write)
216 return ret;
217
218 update_perf_cpu_limits();
163ec435
PZ
219
220 return 0;
221}
1ccd1549 222
14c63f17
DH
223/*
224 * perf samples are done in some very critical code paths (NMIs).
225 * If they take too much CPU time, the system can lock up and not
226 * get any real work done. This will drop the sample rate when
227 * we detect that events are taking too long.
228 */
229#define NR_ACCUMULATED_SAMPLES 128
230DEFINE_PER_CPU(u64, running_sample_length);
231
232void perf_sample_event_took(u64 sample_len_ns)
233{
234 u64 avg_local_sample_len;
e5302920 235 u64 local_samples_len;
14c63f17
DH
236
237 if (atomic_read(&perf_sample_allowed_ns) == 0)
238 return;
239
240 /* decay the counter by 1 average sample */
241 local_samples_len = __get_cpu_var(running_sample_length);
242 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
243 local_samples_len += sample_len_ns;
244 __get_cpu_var(running_sample_length) = local_samples_len;
245
246 /*
247 * note: this will be biased artifically low until we have
248 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
249 * from having to maintain a count.
250 */
251 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
252
253 if (avg_local_sample_len <= atomic_read(&perf_sample_allowed_ns))
254 return;
255
256 if (max_samples_per_tick <= 1)
257 return;
258
259 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
260 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
261 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
262
263 printk_ratelimited(KERN_WARNING
264 "perf samples too long (%lld > %d), lowering "
265 "kernel.perf_event_max_sample_rate to %d\n",
266 avg_local_sample_len,
267 atomic_read(&perf_sample_allowed_ns),
268 sysctl_perf_event_sample_rate);
269
270 update_perf_cpu_limits();
271}
272
cdd6c482 273static atomic64_t perf_event_id;
a96bbc16 274
0b3fcf17
SE
275static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
276 enum event_type_t event_type);
277
278static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
279 enum event_type_t event_type,
280 struct task_struct *task);
281
282static void update_context_time(struct perf_event_context *ctx);
283static u64 perf_event_time(struct perf_event *event);
0b3fcf17 284
cdd6c482 285void __weak perf_event_print_debug(void) { }
0793a61d 286
84c79910 287extern __weak const char *perf_pmu_name(void)
0793a61d 288{
84c79910 289 return "pmu";
0793a61d
TG
290}
291
0b3fcf17
SE
292static inline u64 perf_clock(void)
293{
294 return local_clock();
295}
296
e5d1367f
SE
297static inline struct perf_cpu_context *
298__get_cpu_context(struct perf_event_context *ctx)
299{
300 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
301}
302
facc4307
PZ
303static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
304 struct perf_event_context *ctx)
305{
306 raw_spin_lock(&cpuctx->ctx.lock);
307 if (ctx)
308 raw_spin_lock(&ctx->lock);
309}
310
311static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
312 struct perf_event_context *ctx)
313{
314 if (ctx)
315 raw_spin_unlock(&ctx->lock);
316 raw_spin_unlock(&cpuctx->ctx.lock);
317}
318
e5d1367f
SE
319#ifdef CONFIG_CGROUP_PERF
320
877c6856
LZ
321/*
322 * perf_cgroup_info keeps track of time_enabled for a cgroup.
323 * This is a per-cpu dynamically allocated data structure.
324 */
325struct perf_cgroup_info {
326 u64 time;
327 u64 timestamp;
328};
329
330struct perf_cgroup {
331 struct cgroup_subsys_state css;
86e213e1 332 struct perf_cgroup_info __percpu *info;
877c6856
LZ
333};
334
3f7cce3c
SE
335/*
336 * Must ensure cgroup is pinned (css_get) before calling
337 * this function. In other words, we cannot call this function
338 * if there is no cgroup event for the current CPU context.
339 */
e5d1367f
SE
340static inline struct perf_cgroup *
341perf_cgroup_from_task(struct task_struct *task)
342{
343 return container_of(task_subsys_state(task, perf_subsys_id),
344 struct perf_cgroup, css);
345}
346
347static inline bool
348perf_cgroup_match(struct perf_event *event)
349{
350 struct perf_event_context *ctx = event->ctx;
351 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
352
ef824fa1
TH
353 /* @event doesn't care about cgroup */
354 if (!event->cgrp)
355 return true;
356
357 /* wants specific cgroup scope but @cpuctx isn't associated with any */
358 if (!cpuctx->cgrp)
359 return false;
360
361 /*
362 * Cgroup scoping is recursive. An event enabled for a cgroup is
363 * also enabled for all its descendant cgroups. If @cpuctx's
364 * cgroup is a descendant of @event's (the test covers identity
365 * case), it's a match.
366 */
367 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
368 event->cgrp->css.cgroup);
e5d1367f
SE
369}
370
9c5da09d 371static inline bool perf_tryget_cgroup(struct perf_event *event)
e5d1367f 372{
9c5da09d 373 return css_tryget(&event->cgrp->css);
e5d1367f
SE
374}
375
376static inline void perf_put_cgroup(struct perf_event *event)
377{
378 css_put(&event->cgrp->css);
379}
380
381static inline void perf_detach_cgroup(struct perf_event *event)
382{
383 perf_put_cgroup(event);
384 event->cgrp = NULL;
385}
386
387static inline int is_cgroup_event(struct perf_event *event)
388{
389 return event->cgrp != NULL;
390}
391
392static inline u64 perf_cgroup_event_time(struct perf_event *event)
393{
394 struct perf_cgroup_info *t;
395
396 t = per_cpu_ptr(event->cgrp->info, event->cpu);
397 return t->time;
398}
399
400static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
401{
402 struct perf_cgroup_info *info;
403 u64 now;
404
405 now = perf_clock();
406
407 info = this_cpu_ptr(cgrp->info);
408
409 info->time += now - info->timestamp;
410 info->timestamp = now;
411}
412
413static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
414{
415 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
416 if (cgrp_out)
417 __update_cgrp_time(cgrp_out);
418}
419
420static inline void update_cgrp_time_from_event(struct perf_event *event)
421{
3f7cce3c
SE
422 struct perf_cgroup *cgrp;
423
e5d1367f 424 /*
3f7cce3c
SE
425 * ensure we access cgroup data only when needed and
426 * when we know the cgroup is pinned (css_get)
e5d1367f 427 */
3f7cce3c 428 if (!is_cgroup_event(event))
e5d1367f
SE
429 return;
430
3f7cce3c
SE
431 cgrp = perf_cgroup_from_task(current);
432 /*
433 * Do not update time when cgroup is not active
434 */
435 if (cgrp == event->cgrp)
436 __update_cgrp_time(event->cgrp);
e5d1367f
SE
437}
438
439static inline void
3f7cce3c
SE
440perf_cgroup_set_timestamp(struct task_struct *task,
441 struct perf_event_context *ctx)
e5d1367f
SE
442{
443 struct perf_cgroup *cgrp;
444 struct perf_cgroup_info *info;
445
3f7cce3c
SE
446 /*
447 * ctx->lock held by caller
448 * ensure we do not access cgroup data
449 * unless we have the cgroup pinned (css_get)
450 */
451 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
452 return;
453
454 cgrp = perf_cgroup_from_task(task);
455 info = this_cpu_ptr(cgrp->info);
3f7cce3c 456 info->timestamp = ctx->timestamp;
e5d1367f
SE
457}
458
459#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
460#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
461
462/*
463 * reschedule events based on the cgroup constraint of task.
464 *
465 * mode SWOUT : schedule out everything
466 * mode SWIN : schedule in based on cgroup for next
467 */
468void perf_cgroup_switch(struct task_struct *task, int mode)
469{
470 struct perf_cpu_context *cpuctx;
471 struct pmu *pmu;
472 unsigned long flags;
473
474 /*
475 * disable interrupts to avoid geting nr_cgroup
476 * changes via __perf_event_disable(). Also
477 * avoids preemption.
478 */
479 local_irq_save(flags);
480
481 /*
482 * we reschedule only in the presence of cgroup
483 * constrained events.
484 */
485 rcu_read_lock();
486
487 list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f 488 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95cf59ea
PZ
489 if (cpuctx->unique_pmu != pmu)
490 continue; /* ensure we process each cpuctx once */
e5d1367f 491
e5d1367f
SE
492 /*
493 * perf_cgroup_events says at least one
494 * context on this CPU has cgroup events.
495 *
496 * ctx->nr_cgroups reports the number of cgroup
497 * events for a context.
498 */
499 if (cpuctx->ctx.nr_cgroups > 0) {
facc4307
PZ
500 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
501 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f
SE
502
503 if (mode & PERF_CGROUP_SWOUT) {
504 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
505 /*
506 * must not be done before ctxswout due
507 * to event_filter_match() in event_sched_out()
508 */
509 cpuctx->cgrp = NULL;
510 }
511
512 if (mode & PERF_CGROUP_SWIN) {
e566b76e 513 WARN_ON_ONCE(cpuctx->cgrp);
95cf59ea
PZ
514 /*
515 * set cgrp before ctxsw in to allow
516 * event_filter_match() to not have to pass
517 * task around
e5d1367f
SE
518 */
519 cpuctx->cgrp = perf_cgroup_from_task(task);
520 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
521 }
facc4307
PZ
522 perf_pmu_enable(cpuctx->ctx.pmu);
523 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f 524 }
e5d1367f
SE
525 }
526
527 rcu_read_unlock();
528
529 local_irq_restore(flags);
530}
531
a8d757ef
SE
532static inline void perf_cgroup_sched_out(struct task_struct *task,
533 struct task_struct *next)
e5d1367f 534{
a8d757ef
SE
535 struct perf_cgroup *cgrp1;
536 struct perf_cgroup *cgrp2 = NULL;
537
538 /*
539 * we come here when we know perf_cgroup_events > 0
540 */
541 cgrp1 = perf_cgroup_from_task(task);
542
543 /*
544 * next is NULL when called from perf_event_enable_on_exec()
545 * that will systematically cause a cgroup_switch()
546 */
547 if (next)
548 cgrp2 = perf_cgroup_from_task(next);
549
550 /*
551 * only schedule out current cgroup events if we know
552 * that we are switching to a different cgroup. Otherwise,
553 * do no touch the cgroup events.
554 */
555 if (cgrp1 != cgrp2)
556 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
e5d1367f
SE
557}
558
a8d757ef
SE
559static inline void perf_cgroup_sched_in(struct task_struct *prev,
560 struct task_struct *task)
e5d1367f 561{
a8d757ef
SE
562 struct perf_cgroup *cgrp1;
563 struct perf_cgroup *cgrp2 = NULL;
564
565 /*
566 * we come here when we know perf_cgroup_events > 0
567 */
568 cgrp1 = perf_cgroup_from_task(task);
569
570 /* prev can never be NULL */
571 cgrp2 = perf_cgroup_from_task(prev);
572
573 /*
574 * only need to schedule in cgroup events if we are changing
575 * cgroup during ctxsw. Cgroup events were not scheduled
576 * out of ctxsw out if that was not the case.
577 */
578 if (cgrp1 != cgrp2)
579 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
e5d1367f
SE
580}
581
582static inline int perf_cgroup_connect(int fd, struct perf_event *event,
583 struct perf_event_attr *attr,
584 struct perf_event *group_leader)
585{
586 struct perf_cgroup *cgrp;
587 struct cgroup_subsys_state *css;
2903ff01
AV
588 struct fd f = fdget(fd);
589 int ret = 0;
e5d1367f 590
2903ff01 591 if (!f.file)
e5d1367f
SE
592 return -EBADF;
593
2903ff01 594 css = cgroup_css_from_dir(f.file, perf_subsys_id);
3db272c0
LZ
595 if (IS_ERR(css)) {
596 ret = PTR_ERR(css);
597 goto out;
598 }
e5d1367f
SE
599
600 cgrp = container_of(css, struct perf_cgroup, css);
601 event->cgrp = cgrp;
602
f75e18cb 603 /* must be done before we fput() the file */
9c5da09d
SQ
604 if (!perf_tryget_cgroup(event)) {
605 event->cgrp = NULL;
606 ret = -ENOENT;
607 goto out;
608 }
f75e18cb 609
e5d1367f
SE
610 /*
611 * all events in a group must monitor
612 * the same cgroup because a task belongs
613 * to only one perf cgroup at a time
614 */
615 if (group_leader && group_leader->cgrp != cgrp) {
616 perf_detach_cgroup(event);
617 ret = -EINVAL;
e5d1367f 618 }
3db272c0 619out:
2903ff01 620 fdput(f);
e5d1367f
SE
621 return ret;
622}
623
624static inline void
625perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
626{
627 struct perf_cgroup_info *t;
628 t = per_cpu_ptr(event->cgrp->info, event->cpu);
629 event->shadow_ctx_time = now - t->timestamp;
630}
631
632static inline void
633perf_cgroup_defer_enabled(struct perf_event *event)
634{
635 /*
636 * when the current task's perf cgroup does not match
637 * the event's, we need to remember to call the
638 * perf_mark_enable() function the first time a task with
639 * a matching perf cgroup is scheduled in.
640 */
641 if (is_cgroup_event(event) && !perf_cgroup_match(event))
642 event->cgrp_defer_enabled = 1;
643}
644
645static inline void
646perf_cgroup_mark_enabled(struct perf_event *event,
647 struct perf_event_context *ctx)
648{
649 struct perf_event *sub;
650 u64 tstamp = perf_event_time(event);
651
652 if (!event->cgrp_defer_enabled)
653 return;
654
655 event->cgrp_defer_enabled = 0;
656
657 event->tstamp_enabled = tstamp - event->total_time_enabled;
658 list_for_each_entry(sub, &event->sibling_list, group_entry) {
659 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
660 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
661 sub->cgrp_defer_enabled = 0;
662 }
663 }
664}
665#else /* !CONFIG_CGROUP_PERF */
666
667static inline bool
668perf_cgroup_match(struct perf_event *event)
669{
670 return true;
671}
672
673static inline void perf_detach_cgroup(struct perf_event *event)
674{}
675
676static inline int is_cgroup_event(struct perf_event *event)
677{
678 return 0;
679}
680
681static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
682{
683 return 0;
684}
685
686static inline void update_cgrp_time_from_event(struct perf_event *event)
687{
688}
689
690static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
691{
692}
693
a8d757ef
SE
694static inline void perf_cgroup_sched_out(struct task_struct *task,
695 struct task_struct *next)
e5d1367f
SE
696{
697}
698
a8d757ef
SE
699static inline void perf_cgroup_sched_in(struct task_struct *prev,
700 struct task_struct *task)
e5d1367f
SE
701{
702}
703
704static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
705 struct perf_event_attr *attr,
706 struct perf_event *group_leader)
707{
708 return -EINVAL;
709}
710
711static inline void
3f7cce3c
SE
712perf_cgroup_set_timestamp(struct task_struct *task,
713 struct perf_event_context *ctx)
e5d1367f
SE
714{
715}
716
717void
718perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
719{
720}
721
722static inline void
723perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
724{
725}
726
727static inline u64 perf_cgroup_event_time(struct perf_event *event)
728{
729 return 0;
730}
731
732static inline void
733perf_cgroup_defer_enabled(struct perf_event *event)
734{
735}
736
737static inline void
738perf_cgroup_mark_enabled(struct perf_event *event,
739 struct perf_event_context *ctx)
740{
741}
742#endif
743
9e630205
SE
744/*
745 * set default to be dependent on timer tick just
746 * like original code
747 */
748#define PERF_CPU_HRTIMER (1000 / HZ)
749/*
750 * function must be called with interrupts disbled
751 */
752static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
753{
754 struct perf_cpu_context *cpuctx;
755 enum hrtimer_restart ret = HRTIMER_NORESTART;
756 int rotations = 0;
757
758 WARN_ON(!irqs_disabled());
759
760 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
761
762 rotations = perf_rotate_context(cpuctx);
763
764 /*
765 * arm timer if needed
766 */
767 if (rotations) {
768 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
769 ret = HRTIMER_RESTART;
770 }
771
772 return ret;
773}
774
775/* CPU is going down */
776void perf_cpu_hrtimer_cancel(int cpu)
777{
778 struct perf_cpu_context *cpuctx;
779 struct pmu *pmu;
780 unsigned long flags;
781
782 if (WARN_ON(cpu != smp_processor_id()))
783 return;
784
785 local_irq_save(flags);
786
787 rcu_read_lock();
788
789 list_for_each_entry_rcu(pmu, &pmus, entry) {
790 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
791
792 if (pmu->task_ctx_nr == perf_sw_context)
793 continue;
794
795 hrtimer_cancel(&cpuctx->hrtimer);
796 }
797
798 rcu_read_unlock();
799
800 local_irq_restore(flags);
801}
802
803static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
804{
805 struct hrtimer *hr = &cpuctx->hrtimer;
806 struct pmu *pmu = cpuctx->ctx.pmu;
62b85639 807 int timer;
9e630205
SE
808
809 /* no multiplexing needed for SW PMU */
810 if (pmu->task_ctx_nr == perf_sw_context)
811 return;
812
62b85639
SE
813 /*
814 * check default is sane, if not set then force to
815 * default interval (1/tick)
816 */
817 timer = pmu->hrtimer_interval_ms;
818 if (timer < 1)
819 timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
820
821 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
9e630205
SE
822
823 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
824 hr->function = perf_cpu_hrtimer_handler;
825}
826
827static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
828{
829 struct hrtimer *hr = &cpuctx->hrtimer;
830 struct pmu *pmu = cpuctx->ctx.pmu;
831
832 /* not for SW PMU */
833 if (pmu->task_ctx_nr == perf_sw_context)
834 return;
835
836 if (hrtimer_active(hr))
837 return;
838
839 if (!hrtimer_callback_running(hr))
840 __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
841 0, HRTIMER_MODE_REL_PINNED, 0);
842}
843
33696fc0 844void perf_pmu_disable(struct pmu *pmu)
9e35ad38 845{
33696fc0
PZ
846 int *count = this_cpu_ptr(pmu->pmu_disable_count);
847 if (!(*count)++)
848 pmu->pmu_disable(pmu);
9e35ad38 849}
9e35ad38 850
33696fc0 851void perf_pmu_enable(struct pmu *pmu)
9e35ad38 852{
33696fc0
PZ
853 int *count = this_cpu_ptr(pmu->pmu_disable_count);
854 if (!--(*count))
855 pmu->pmu_enable(pmu);
9e35ad38 856}
9e35ad38 857
e9d2b064
PZ
858static DEFINE_PER_CPU(struct list_head, rotation_list);
859
860/*
861 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
862 * because they're strictly cpu affine and rotate_start is called with IRQs
863 * disabled, while rotate_context is called from IRQ context.
864 */
108b02cf 865static void perf_pmu_rotate_start(struct pmu *pmu)
9e35ad38 866{
108b02cf 867 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
e9d2b064 868 struct list_head *head = &__get_cpu_var(rotation_list);
b5ab4cd5 869
e9d2b064 870 WARN_ON(!irqs_disabled());
b5ab4cd5 871
12351ef8
FW
872 if (list_empty(&cpuctx->rotation_list)) {
873 int was_empty = list_empty(head);
e9d2b064 874 list_add(&cpuctx->rotation_list, head);
12351ef8
FW
875 if (was_empty)
876 tick_nohz_full_kick();
877 }
9e35ad38 878}
9e35ad38 879
cdd6c482 880static void get_ctx(struct perf_event_context *ctx)
a63eaf34 881{
e5289d4a 882 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
883}
884
cdd6c482 885static void put_ctx(struct perf_event_context *ctx)
a63eaf34 886{
564c2b21
PM
887 if (atomic_dec_and_test(&ctx->refcount)) {
888 if (ctx->parent_ctx)
889 put_ctx(ctx->parent_ctx);
c93f7669
PM
890 if (ctx->task)
891 put_task_struct(ctx->task);
cb796ff3 892 kfree_rcu(ctx, rcu_head);
564c2b21 893 }
a63eaf34
PM
894}
895
cdd6c482 896static void unclone_ctx(struct perf_event_context *ctx)
71a851b4
PZ
897{
898 if (ctx->parent_ctx) {
899 put_ctx(ctx->parent_ctx);
900 ctx->parent_ctx = NULL;
901 }
902}
903
6844c09d
ACM
904static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
905{
906 /*
907 * only top level events have the pid namespace they were created in
908 */
909 if (event->parent)
910 event = event->parent;
911
912 return task_tgid_nr_ns(p, event->ns);
913}
914
915static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
916{
917 /*
918 * only top level events have the pid namespace they were created in
919 */
920 if (event->parent)
921 event = event->parent;
922
923 return task_pid_nr_ns(p, event->ns);
924}
925
7f453c24 926/*
cdd6c482 927 * If we inherit events we want to return the parent event id
7f453c24
PZ
928 * to userspace.
929 */
cdd6c482 930static u64 primary_event_id(struct perf_event *event)
7f453c24 931{
cdd6c482 932 u64 id = event->id;
7f453c24 933
cdd6c482
IM
934 if (event->parent)
935 id = event->parent->id;
7f453c24
PZ
936
937 return id;
938}
939
25346b93 940/*
cdd6c482 941 * Get the perf_event_context for a task and lock it.
25346b93
PM
942 * This has to cope with with the fact that until it is locked,
943 * the context could get moved to another task.
944 */
cdd6c482 945static struct perf_event_context *
8dc85d54 946perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 947{
cdd6c482 948 struct perf_event_context *ctx;
25346b93 949
9ed6060d 950retry:
058ebd0e
PZ
951 /*
952 * One of the few rules of preemptible RCU is that one cannot do
953 * rcu_read_unlock() while holding a scheduler (or nested) lock when
954 * part of the read side critical section was preemptible -- see
955 * rcu_read_unlock_special().
956 *
957 * Since ctx->lock nests under rq->lock we must ensure the entire read
958 * side critical section is non-preemptible.
959 */
960 preempt_disable();
961 rcu_read_lock();
8dc85d54 962 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
963 if (ctx) {
964 /*
965 * If this context is a clone of another, it might
966 * get swapped for another underneath us by
cdd6c482 967 * perf_event_task_sched_out, though the
25346b93
PM
968 * rcu_read_lock() protects us from any context
969 * getting freed. Lock the context and check if it
970 * got swapped before we could get the lock, and retry
971 * if so. If we locked the right context, then it
972 * can't get swapped on us any more.
973 */
e625cce1 974 raw_spin_lock_irqsave(&ctx->lock, *flags);
8dc85d54 975 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
e625cce1 976 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
058ebd0e
PZ
977 rcu_read_unlock();
978 preempt_enable();
25346b93
PM
979 goto retry;
980 }
b49a9e7e
PZ
981
982 if (!atomic_inc_not_zero(&ctx->refcount)) {
e625cce1 983 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
b49a9e7e
PZ
984 ctx = NULL;
985 }
25346b93
PM
986 }
987 rcu_read_unlock();
058ebd0e 988 preempt_enable();
25346b93
PM
989 return ctx;
990}
991
992/*
993 * Get the context for a task and increment its pin_count so it
994 * can't get swapped to another task. This also increments its
995 * reference count so that the context can't get freed.
996 */
8dc85d54
PZ
997static struct perf_event_context *
998perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 999{
cdd6c482 1000 struct perf_event_context *ctx;
25346b93
PM
1001 unsigned long flags;
1002
8dc85d54 1003 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
1004 if (ctx) {
1005 ++ctx->pin_count;
e625cce1 1006 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1007 }
1008 return ctx;
1009}
1010
cdd6c482 1011static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
1012{
1013 unsigned long flags;
1014
e625cce1 1015 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 1016 --ctx->pin_count;
e625cce1 1017 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1018}
1019
f67218c3
PZ
1020/*
1021 * Update the record of the current time in a context.
1022 */
1023static void update_context_time(struct perf_event_context *ctx)
1024{
1025 u64 now = perf_clock();
1026
1027 ctx->time += now - ctx->timestamp;
1028 ctx->timestamp = now;
1029}
1030
4158755d
SE
1031static u64 perf_event_time(struct perf_event *event)
1032{
1033 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
1034
1035 if (is_cgroup_event(event))
1036 return perf_cgroup_event_time(event);
1037
4158755d
SE
1038 return ctx ? ctx->time : 0;
1039}
1040
f67218c3
PZ
1041/*
1042 * Update the total_time_enabled and total_time_running fields for a event.
b7526f0c 1043 * The caller of this function needs to hold the ctx->lock.
f67218c3
PZ
1044 */
1045static void update_event_times(struct perf_event *event)
1046{
1047 struct perf_event_context *ctx = event->ctx;
1048 u64 run_end;
1049
1050 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1051 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1052 return;
e5d1367f
SE
1053 /*
1054 * in cgroup mode, time_enabled represents
1055 * the time the event was enabled AND active
1056 * tasks were in the monitored cgroup. This is
1057 * independent of the activity of the context as
1058 * there may be a mix of cgroup and non-cgroup events.
1059 *
1060 * That is why we treat cgroup events differently
1061 * here.
1062 */
1063 if (is_cgroup_event(event))
46cd6a7f 1064 run_end = perf_cgroup_event_time(event);
e5d1367f
SE
1065 else if (ctx->is_active)
1066 run_end = ctx->time;
acd1d7c1
PZ
1067 else
1068 run_end = event->tstamp_stopped;
1069
1070 event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3
PZ
1071
1072 if (event->state == PERF_EVENT_STATE_INACTIVE)
1073 run_end = event->tstamp_stopped;
1074 else
4158755d 1075 run_end = perf_event_time(event);
f67218c3
PZ
1076
1077 event->total_time_running = run_end - event->tstamp_running;
e5d1367f 1078
f67218c3
PZ
1079}
1080
96c21a46
PZ
1081/*
1082 * Update total_time_enabled and total_time_running for all events in a group.
1083 */
1084static void update_group_times(struct perf_event *leader)
1085{
1086 struct perf_event *event;
1087
1088 update_event_times(leader);
1089 list_for_each_entry(event, &leader->sibling_list, group_entry)
1090 update_event_times(event);
1091}
1092
889ff015
FW
1093static struct list_head *
1094ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1095{
1096 if (event->attr.pinned)
1097 return &ctx->pinned_groups;
1098 else
1099 return &ctx->flexible_groups;
1100}
1101
fccc714b 1102/*
cdd6c482 1103 * Add a event from the lists for its context.
fccc714b
PZ
1104 * Must be called with ctx->mutex and ctx->lock held.
1105 */
04289bb9 1106static void
cdd6c482 1107list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1108{
8a49542c
PZ
1109 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1110 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9
IM
1111
1112 /*
8a49542c
PZ
1113 * If we're a stand alone event or group leader, we go to the context
1114 * list, group events are kept attached to the group so that
1115 * perf_group_detach can, at all times, locate all siblings.
04289bb9 1116 */
8a49542c 1117 if (event->group_leader == event) {
889ff015
FW
1118 struct list_head *list;
1119
d6f962b5
FW
1120 if (is_software_event(event))
1121 event->group_flags |= PERF_GROUP_SOFTWARE;
1122
889ff015
FW
1123 list = ctx_group_list(event, ctx);
1124 list_add_tail(&event->group_entry, list);
5c148194 1125 }
592903cd 1126
08309379 1127 if (is_cgroup_event(event))
e5d1367f 1128 ctx->nr_cgroups++;
e5d1367f 1129
d010b332
SE
1130 if (has_branch_stack(event))
1131 ctx->nr_branch_stack++;
1132
cdd6c482 1133 list_add_rcu(&event->event_entry, &ctx->event_list);
b5ab4cd5 1134 if (!ctx->nr_events)
108b02cf 1135 perf_pmu_rotate_start(ctx->pmu);
cdd6c482
IM
1136 ctx->nr_events++;
1137 if (event->attr.inherit_stat)
bfbd3381 1138 ctx->nr_stat++;
04289bb9
IM
1139}
1140
0231bb53
JO
1141/*
1142 * Initialize event state based on the perf_event_attr::disabled.
1143 */
1144static inline void perf_event__state_init(struct perf_event *event)
1145{
1146 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1147 PERF_EVENT_STATE_INACTIVE;
1148}
1149
c320c7b7
ACM
1150/*
1151 * Called at perf_event creation and when events are attached/detached from a
1152 * group.
1153 */
1154static void perf_event__read_size(struct perf_event *event)
1155{
1156 int entry = sizeof(u64); /* value */
1157 int size = 0;
1158 int nr = 1;
1159
1160 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1161 size += sizeof(u64);
1162
1163 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1164 size += sizeof(u64);
1165
1166 if (event->attr.read_format & PERF_FORMAT_ID)
1167 entry += sizeof(u64);
1168
1169 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1170 nr += event->group_leader->nr_siblings;
1171 size += sizeof(u64);
1172 }
1173
1174 size += entry * nr;
1175 event->read_size = size;
1176}
1177
1178static void perf_event__header_size(struct perf_event *event)
1179{
1180 struct perf_sample_data *data;
1181 u64 sample_type = event->attr.sample_type;
1182 u16 size = 0;
1183
1184 perf_event__read_size(event);
1185
1186 if (sample_type & PERF_SAMPLE_IP)
1187 size += sizeof(data->ip);
1188
6844c09d
ACM
1189 if (sample_type & PERF_SAMPLE_ADDR)
1190 size += sizeof(data->addr);
1191
1192 if (sample_type & PERF_SAMPLE_PERIOD)
1193 size += sizeof(data->period);
1194
c3feedf2
AK
1195 if (sample_type & PERF_SAMPLE_WEIGHT)
1196 size += sizeof(data->weight);
1197
6844c09d
ACM
1198 if (sample_type & PERF_SAMPLE_READ)
1199 size += event->read_size;
1200
d6be9ad6
SE
1201 if (sample_type & PERF_SAMPLE_DATA_SRC)
1202 size += sizeof(data->data_src.val);
1203
6844c09d
ACM
1204 event->header_size = size;
1205}
1206
1207static void perf_event__id_header_size(struct perf_event *event)
1208{
1209 struct perf_sample_data *data;
1210 u64 sample_type = event->attr.sample_type;
1211 u16 size = 0;
1212
c320c7b7
ACM
1213 if (sample_type & PERF_SAMPLE_TID)
1214 size += sizeof(data->tid_entry);
1215
1216 if (sample_type & PERF_SAMPLE_TIME)
1217 size += sizeof(data->time);
1218
c320c7b7
ACM
1219 if (sample_type & PERF_SAMPLE_ID)
1220 size += sizeof(data->id);
1221
1222 if (sample_type & PERF_SAMPLE_STREAM_ID)
1223 size += sizeof(data->stream_id);
1224
1225 if (sample_type & PERF_SAMPLE_CPU)
1226 size += sizeof(data->cpu_entry);
1227
6844c09d 1228 event->id_header_size = size;
c320c7b7
ACM
1229}
1230
8a49542c
PZ
1231static void perf_group_attach(struct perf_event *event)
1232{
c320c7b7 1233 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 1234
74c3337c
PZ
1235 /*
1236 * We can have double attach due to group movement in perf_event_open.
1237 */
1238 if (event->attach_state & PERF_ATTACH_GROUP)
1239 return;
1240
8a49542c
PZ
1241 event->attach_state |= PERF_ATTACH_GROUP;
1242
1243 if (group_leader == event)
1244 return;
1245
1246 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1247 !is_software_event(event))
1248 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1249
1250 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1251 group_leader->nr_siblings++;
c320c7b7
ACM
1252
1253 perf_event__header_size(group_leader);
1254
1255 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1256 perf_event__header_size(pos);
8a49542c
PZ
1257}
1258
a63eaf34 1259/*
cdd6c482 1260 * Remove a event from the lists for its context.
fccc714b 1261 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1262 */
04289bb9 1263static void
cdd6c482 1264list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1265{
68cacd29 1266 struct perf_cpu_context *cpuctx;
8a49542c
PZ
1267 /*
1268 * We can have double detach due to exit/hot-unplug + close.
1269 */
1270 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1271 return;
8a49542c
PZ
1272
1273 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1274
68cacd29 1275 if (is_cgroup_event(event)) {
e5d1367f 1276 ctx->nr_cgroups--;
68cacd29
SE
1277 cpuctx = __get_cpu_context(ctx);
1278 /*
1279 * if there are no more cgroup events
1280 * then cler cgrp to avoid stale pointer
1281 * in update_cgrp_time_from_cpuctx()
1282 */
1283 if (!ctx->nr_cgroups)
1284 cpuctx->cgrp = NULL;
1285 }
e5d1367f 1286
d010b332
SE
1287 if (has_branch_stack(event))
1288 ctx->nr_branch_stack--;
1289
cdd6c482
IM
1290 ctx->nr_events--;
1291 if (event->attr.inherit_stat)
bfbd3381 1292 ctx->nr_stat--;
8bc20959 1293
cdd6c482 1294 list_del_rcu(&event->event_entry);
04289bb9 1295
8a49542c
PZ
1296 if (event->group_leader == event)
1297 list_del_init(&event->group_entry);
5c148194 1298
96c21a46 1299 update_group_times(event);
b2e74a26
SE
1300
1301 /*
1302 * If event was in error state, then keep it
1303 * that way, otherwise bogus counts will be
1304 * returned on read(). The only way to get out
1305 * of error state is by explicit re-enabling
1306 * of the event
1307 */
1308 if (event->state > PERF_EVENT_STATE_OFF)
1309 event->state = PERF_EVENT_STATE_OFF;
050735b0
PZ
1310}
1311
8a49542c 1312static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1313{
1314 struct perf_event *sibling, *tmp;
8a49542c
PZ
1315 struct list_head *list = NULL;
1316
1317 /*
1318 * We can have double detach due to exit/hot-unplug + close.
1319 */
1320 if (!(event->attach_state & PERF_ATTACH_GROUP))
1321 return;
1322
1323 event->attach_state &= ~PERF_ATTACH_GROUP;
1324
1325 /*
1326 * If this is a sibling, remove it from its group.
1327 */
1328 if (event->group_leader != event) {
1329 list_del_init(&event->group_entry);
1330 event->group_leader->nr_siblings--;
c320c7b7 1331 goto out;
8a49542c
PZ
1332 }
1333
1334 if (!list_empty(&event->group_entry))
1335 list = &event->group_entry;
2e2af50b 1336
04289bb9 1337 /*
cdd6c482
IM
1338 * If this was a group event with sibling events then
1339 * upgrade the siblings to singleton events by adding them
8a49542c 1340 * to whatever list we are on.
04289bb9 1341 */
cdd6c482 1342 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c
PZ
1343 if (list)
1344 list_move_tail(&sibling->group_entry, list);
04289bb9 1345 sibling->group_leader = sibling;
d6f962b5
FW
1346
1347 /* Inherit group flags from the previous leader */
1348 sibling->group_flags = event->group_flags;
04289bb9 1349 }
c320c7b7
ACM
1350
1351out:
1352 perf_event__header_size(event->group_leader);
1353
1354 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1355 perf_event__header_size(tmp);
04289bb9
IM
1356}
1357
fa66f07a
SE
1358static inline int
1359event_filter_match(struct perf_event *event)
1360{
e5d1367f
SE
1361 return (event->cpu == -1 || event->cpu == smp_processor_id())
1362 && perf_cgroup_match(event);
fa66f07a
SE
1363}
1364
9ffcfa6f
SE
1365static void
1366event_sched_out(struct perf_event *event,
3b6f9e5c 1367 struct perf_cpu_context *cpuctx,
cdd6c482 1368 struct perf_event_context *ctx)
3b6f9e5c 1369{
4158755d 1370 u64 tstamp = perf_event_time(event);
fa66f07a
SE
1371 u64 delta;
1372 /*
1373 * An event which could not be activated because of
1374 * filter mismatch still needs to have its timings
1375 * maintained, otherwise bogus information is return
1376 * via read() for time_enabled, time_running:
1377 */
1378 if (event->state == PERF_EVENT_STATE_INACTIVE
1379 && !event_filter_match(event)) {
e5d1367f 1380 delta = tstamp - event->tstamp_stopped;
fa66f07a 1381 event->tstamp_running += delta;
4158755d 1382 event->tstamp_stopped = tstamp;
fa66f07a
SE
1383 }
1384
cdd6c482 1385 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1386 return;
3b6f9e5c 1387
cdd6c482
IM
1388 event->state = PERF_EVENT_STATE_INACTIVE;
1389 if (event->pending_disable) {
1390 event->pending_disable = 0;
1391 event->state = PERF_EVENT_STATE_OFF;
970892a9 1392 }
4158755d 1393 event->tstamp_stopped = tstamp;
a4eaf7f1 1394 event->pmu->del(event, 0);
cdd6c482 1395 event->oncpu = -1;
3b6f9e5c 1396
cdd6c482 1397 if (!is_software_event(event))
3b6f9e5c
PM
1398 cpuctx->active_oncpu--;
1399 ctx->nr_active--;
0f5a2601
PZ
1400 if (event->attr.freq && event->attr.sample_freq)
1401 ctx->nr_freq--;
cdd6c482 1402 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c
PM
1403 cpuctx->exclusive = 0;
1404}
1405
d859e29f 1406static void
cdd6c482 1407group_sched_out(struct perf_event *group_event,
d859e29f 1408 struct perf_cpu_context *cpuctx,
cdd6c482 1409 struct perf_event_context *ctx)
d859e29f 1410{
cdd6c482 1411 struct perf_event *event;
fa66f07a 1412 int state = group_event->state;
d859e29f 1413
cdd6c482 1414 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
1415
1416 /*
1417 * Schedule out siblings (if any):
1418 */
cdd6c482
IM
1419 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1420 event_sched_out(event, cpuctx, ctx);
d859e29f 1421
fa66f07a 1422 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29f
PM
1423 cpuctx->exclusive = 0;
1424}
1425
0793a61d 1426/*
cdd6c482 1427 * Cross CPU call to remove a performance event
0793a61d 1428 *
cdd6c482 1429 * We disable the event on the hardware level first. After that we
0793a61d
TG
1430 * remove it from the context list.
1431 */
fe4b04fa 1432static int __perf_remove_from_context(void *info)
0793a61d 1433{
cdd6c482
IM
1434 struct perf_event *event = info;
1435 struct perf_event_context *ctx = event->ctx;
108b02cf 1436 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
0793a61d 1437
e625cce1 1438 raw_spin_lock(&ctx->lock);
cdd6c482 1439 event_sched_out(event, cpuctx, ctx);
cdd6c482 1440 list_del_event(event, ctx);
64ce3126
PZ
1441 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1442 ctx->is_active = 0;
1443 cpuctx->task_ctx = NULL;
1444 }
e625cce1 1445 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1446
1447 return 0;
0793a61d
TG
1448}
1449
1450
1451/*
cdd6c482 1452 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 1453 *
cdd6c482 1454 * CPU events are removed with a smp call. For task events we only
0793a61d 1455 * call when the task is on a CPU.
c93f7669 1456 *
cdd6c482
IM
1457 * If event->ctx is a cloned context, callers must make sure that
1458 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
1459 * remains valid. This is OK when called from perf_release since
1460 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 1461 * When called from perf_event_exit_task, it's OK because the
c93f7669 1462 * context has been detached from its task.
0793a61d 1463 */
fe4b04fa 1464static void perf_remove_from_context(struct perf_event *event)
0793a61d 1465{
cdd6c482 1466 struct perf_event_context *ctx = event->ctx;
0793a61d
TG
1467 struct task_struct *task = ctx->task;
1468
fe4b04fa
PZ
1469 lockdep_assert_held(&ctx->mutex);
1470
0793a61d
TG
1471 if (!task) {
1472 /*
cdd6c482 1473 * Per cpu events are removed via an smp call and
af901ca1 1474 * the removal is always successful.
0793a61d 1475 */
fe4b04fa 1476 cpu_function_call(event->cpu, __perf_remove_from_context, event);
0793a61d
TG
1477 return;
1478 }
1479
1480retry:
fe4b04fa
PZ
1481 if (!task_function_call(task, __perf_remove_from_context, event))
1482 return;
0793a61d 1483
e625cce1 1484 raw_spin_lock_irq(&ctx->lock);
0793a61d 1485 /*
fe4b04fa
PZ
1486 * If we failed to find a running task, but find the context active now
1487 * that we've acquired the ctx->lock, retry.
0793a61d 1488 */
fe4b04fa 1489 if (ctx->is_active) {
e625cce1 1490 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1491 goto retry;
1492 }
1493
1494 /*
fe4b04fa
PZ
1495 * Since the task isn't running, its safe to remove the event, us
1496 * holding the ctx->lock ensures the task won't get scheduled in.
0793a61d 1497 */
fe4b04fa 1498 list_del_event(event, ctx);
e625cce1 1499 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1500}
1501
d859e29f 1502/*
cdd6c482 1503 * Cross CPU call to disable a performance event
d859e29f 1504 */
500ad2d8 1505int __perf_event_disable(void *info)
d859e29f 1506{
cdd6c482 1507 struct perf_event *event = info;
cdd6c482 1508 struct perf_event_context *ctx = event->ctx;
108b02cf 1509 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f
PM
1510
1511 /*
cdd6c482
IM
1512 * If this is a per-task event, need to check whether this
1513 * event's task is the current task on this cpu.
fe4b04fa
PZ
1514 *
1515 * Can trigger due to concurrent perf_event_context_sched_out()
1516 * flipping contexts around.
d859e29f 1517 */
665c2142 1518 if (ctx->task && cpuctx->task_ctx != ctx)
fe4b04fa 1519 return -EINVAL;
d859e29f 1520
e625cce1 1521 raw_spin_lock(&ctx->lock);
d859e29f
PM
1522
1523 /*
cdd6c482 1524 * If the event is on, turn it off.
d859e29f
PM
1525 * If it is in error state, leave it in error state.
1526 */
cdd6c482 1527 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
4af4998b 1528 update_context_time(ctx);
e5d1367f 1529 update_cgrp_time_from_event(event);
cdd6c482
IM
1530 update_group_times(event);
1531 if (event == event->group_leader)
1532 group_sched_out(event, cpuctx, ctx);
d859e29f 1533 else
cdd6c482
IM
1534 event_sched_out(event, cpuctx, ctx);
1535 event->state = PERF_EVENT_STATE_OFF;
d859e29f
PM
1536 }
1537
e625cce1 1538 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
1539
1540 return 0;
d859e29f
PM
1541}
1542
1543/*
cdd6c482 1544 * Disable a event.
c93f7669 1545 *
cdd6c482
IM
1546 * If event->ctx is a cloned context, callers must make sure that
1547 * every task struct that event->ctx->task could possibly point to
c93f7669 1548 * remains valid. This condition is satisifed when called through
cdd6c482
IM
1549 * perf_event_for_each_child or perf_event_for_each because they
1550 * hold the top-level event's child_mutex, so any descendant that
1551 * goes to exit will block in sync_child_event.
1552 * When called from perf_pending_event it's OK because event->ctx
c93f7669 1553 * is the current context on this CPU and preemption is disabled,
cdd6c482 1554 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 1555 */
44234adc 1556void perf_event_disable(struct perf_event *event)
d859e29f 1557{
cdd6c482 1558 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
1559 struct task_struct *task = ctx->task;
1560
1561 if (!task) {
1562 /*
cdd6c482 1563 * Disable the event on the cpu that it's on
d859e29f 1564 */
fe4b04fa 1565 cpu_function_call(event->cpu, __perf_event_disable, event);
d859e29f
PM
1566 return;
1567 }
1568
9ed6060d 1569retry:
fe4b04fa
PZ
1570 if (!task_function_call(task, __perf_event_disable, event))
1571 return;
d859e29f 1572
e625cce1 1573 raw_spin_lock_irq(&ctx->lock);
d859e29f 1574 /*
cdd6c482 1575 * If the event is still active, we need to retry the cross-call.
d859e29f 1576 */
cdd6c482 1577 if (event->state == PERF_EVENT_STATE_ACTIVE) {
e625cce1 1578 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
1579 /*
1580 * Reload the task pointer, it might have been changed by
1581 * a concurrent perf_event_context_sched_out().
1582 */
1583 task = ctx->task;
d859e29f
PM
1584 goto retry;
1585 }
1586
1587 /*
1588 * Since we have the lock this context can't be scheduled
1589 * in, so we can change the state safely.
1590 */
cdd6c482
IM
1591 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1592 update_group_times(event);
1593 event->state = PERF_EVENT_STATE_OFF;
53cfbf59 1594 }
e625cce1 1595 raw_spin_unlock_irq(&ctx->lock);
d859e29f 1596}
dcfce4a0 1597EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 1598
e5d1367f
SE
1599static void perf_set_shadow_time(struct perf_event *event,
1600 struct perf_event_context *ctx,
1601 u64 tstamp)
1602{
1603 /*
1604 * use the correct time source for the time snapshot
1605 *
1606 * We could get by without this by leveraging the
1607 * fact that to get to this function, the caller
1608 * has most likely already called update_context_time()
1609 * and update_cgrp_time_xx() and thus both timestamp
1610 * are identical (or very close). Given that tstamp is,
1611 * already adjusted for cgroup, we could say that:
1612 * tstamp - ctx->timestamp
1613 * is equivalent to
1614 * tstamp - cgrp->timestamp.
1615 *
1616 * Then, in perf_output_read(), the calculation would
1617 * work with no changes because:
1618 * - event is guaranteed scheduled in
1619 * - no scheduled out in between
1620 * - thus the timestamp would be the same
1621 *
1622 * But this is a bit hairy.
1623 *
1624 * So instead, we have an explicit cgroup call to remain
1625 * within the time time source all along. We believe it
1626 * is cleaner and simpler to understand.
1627 */
1628 if (is_cgroup_event(event))
1629 perf_cgroup_set_shadow_time(event, tstamp);
1630 else
1631 event->shadow_ctx_time = tstamp - ctx->timestamp;
1632}
1633
4fe757dd
PZ
1634#define MAX_INTERRUPTS (~0ULL)
1635
1636static void perf_log_throttle(struct perf_event *event, int enable);
1637
235c7fc7 1638static int
9ffcfa6f 1639event_sched_in(struct perf_event *event,
235c7fc7 1640 struct perf_cpu_context *cpuctx,
6e37738a 1641 struct perf_event_context *ctx)
235c7fc7 1642{
4158755d
SE
1643 u64 tstamp = perf_event_time(event);
1644
cdd6c482 1645 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
1646 return 0;
1647
cdd6c482 1648 event->state = PERF_EVENT_STATE_ACTIVE;
6e37738a 1649 event->oncpu = smp_processor_id();
4fe757dd
PZ
1650
1651 /*
1652 * Unthrottle events, since we scheduled we might have missed several
1653 * ticks already, also for a heavily scheduling task there is little
1654 * guarantee it'll get a tick in a timely manner.
1655 */
1656 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1657 perf_log_throttle(event, 1);
1658 event->hw.interrupts = 0;
1659 }
1660
235c7fc7
IM
1661 /*
1662 * The new state must be visible before we turn it on in the hardware:
1663 */
1664 smp_wmb();
1665
a4eaf7f1 1666 if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482
IM
1667 event->state = PERF_EVENT_STATE_INACTIVE;
1668 event->oncpu = -1;
235c7fc7
IM
1669 return -EAGAIN;
1670 }
1671
4158755d 1672 event->tstamp_running += tstamp - event->tstamp_stopped;
9ffcfa6f 1673
e5d1367f 1674 perf_set_shadow_time(event, ctx, tstamp);
eed01528 1675
cdd6c482 1676 if (!is_software_event(event))
3b6f9e5c 1677 cpuctx->active_oncpu++;
235c7fc7 1678 ctx->nr_active++;
0f5a2601
PZ
1679 if (event->attr.freq && event->attr.sample_freq)
1680 ctx->nr_freq++;
235c7fc7 1681
cdd6c482 1682 if (event->attr.exclusive)
3b6f9e5c
PM
1683 cpuctx->exclusive = 1;
1684
235c7fc7
IM
1685 return 0;
1686}
1687
6751b71e 1688static int
cdd6c482 1689group_sched_in(struct perf_event *group_event,
6751b71e 1690 struct perf_cpu_context *cpuctx,
6e37738a 1691 struct perf_event_context *ctx)
6751b71e 1692{
6bde9b6c 1693 struct perf_event *event, *partial_group = NULL;
51b0fe39 1694 struct pmu *pmu = group_event->pmu;
d7842da4
SE
1695 u64 now = ctx->time;
1696 bool simulate = false;
6751b71e 1697
cdd6c482 1698 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
1699 return 0;
1700
ad5133b7 1701 pmu->start_txn(pmu);
6bde9b6c 1702
9ffcfa6f 1703 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 1704 pmu->cancel_txn(pmu);
9e630205 1705 perf_cpu_hrtimer_restart(cpuctx);
6751b71e 1706 return -EAGAIN;
90151c35 1707 }
6751b71e
PM
1708
1709 /*
1710 * Schedule in siblings as one group (if any):
1711 */
cdd6c482 1712 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f 1713 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 1714 partial_group = event;
6751b71e
PM
1715 goto group_error;
1716 }
1717 }
1718
9ffcfa6f 1719 if (!pmu->commit_txn(pmu))
6e85158c 1720 return 0;
9ffcfa6f 1721
6751b71e
PM
1722group_error:
1723 /*
1724 * Groups can be scheduled in as one unit only, so undo any
1725 * partial group before returning:
d7842da4
SE
1726 * The events up to the failed event are scheduled out normally,
1727 * tstamp_stopped will be updated.
1728 *
1729 * The failed events and the remaining siblings need to have
1730 * their timings updated as if they had gone thru event_sched_in()
1731 * and event_sched_out(). This is required to get consistent timings
1732 * across the group. This also takes care of the case where the group
1733 * could never be scheduled by ensuring tstamp_stopped is set to mark
1734 * the time the event was actually stopped, such that time delta
1735 * calculation in update_event_times() is correct.
6751b71e 1736 */
cdd6c482
IM
1737 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1738 if (event == partial_group)
d7842da4
SE
1739 simulate = true;
1740
1741 if (simulate) {
1742 event->tstamp_running += now - event->tstamp_stopped;
1743 event->tstamp_stopped = now;
1744 } else {
1745 event_sched_out(event, cpuctx, ctx);
1746 }
6751b71e 1747 }
9ffcfa6f 1748 event_sched_out(group_event, cpuctx, ctx);
6751b71e 1749
ad5133b7 1750 pmu->cancel_txn(pmu);
90151c35 1751
9e630205
SE
1752 perf_cpu_hrtimer_restart(cpuctx);
1753
6751b71e
PM
1754 return -EAGAIN;
1755}
1756
3b6f9e5c 1757/*
cdd6c482 1758 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 1759 */
cdd6c482 1760static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
1761 struct perf_cpu_context *cpuctx,
1762 int can_add_hw)
1763{
1764 /*
cdd6c482 1765 * Groups consisting entirely of software events can always go on.
3b6f9e5c 1766 */
d6f962b5 1767 if (event->group_flags & PERF_GROUP_SOFTWARE)
3b6f9e5c
PM
1768 return 1;
1769 /*
1770 * If an exclusive group is already on, no other hardware
cdd6c482 1771 * events can go on.
3b6f9e5c
PM
1772 */
1773 if (cpuctx->exclusive)
1774 return 0;
1775 /*
1776 * If this group is exclusive and there are already
cdd6c482 1777 * events on the CPU, it can't go on.
3b6f9e5c 1778 */
cdd6c482 1779 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
1780 return 0;
1781 /*
1782 * Otherwise, try to add it if all previous groups were able
1783 * to go on.
1784 */
1785 return can_add_hw;
1786}
1787
cdd6c482
IM
1788static void add_event_to_ctx(struct perf_event *event,
1789 struct perf_event_context *ctx)
53cfbf59 1790{
4158755d
SE
1791 u64 tstamp = perf_event_time(event);
1792
cdd6c482 1793 list_add_event(event, ctx);
8a49542c 1794 perf_group_attach(event);
4158755d
SE
1795 event->tstamp_enabled = tstamp;
1796 event->tstamp_running = tstamp;
1797 event->tstamp_stopped = tstamp;
53cfbf59
PM
1798}
1799
2c29ef0f
PZ
1800static void task_ctx_sched_out(struct perf_event_context *ctx);
1801static void
1802ctx_sched_in(struct perf_event_context *ctx,
1803 struct perf_cpu_context *cpuctx,
1804 enum event_type_t event_type,
1805 struct task_struct *task);
fe4b04fa 1806
dce5855b
PZ
1807static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1808 struct perf_event_context *ctx,
1809 struct task_struct *task)
1810{
1811 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1812 if (ctx)
1813 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1814 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1815 if (ctx)
1816 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1817}
1818
0793a61d 1819/*
cdd6c482 1820 * Cross CPU call to install and enable a performance event
682076ae
PZ
1821 *
1822 * Must be called with ctx->mutex held
0793a61d 1823 */
fe4b04fa 1824static int __perf_install_in_context(void *info)
0793a61d 1825{
cdd6c482
IM
1826 struct perf_event *event = info;
1827 struct perf_event_context *ctx = event->ctx;
108b02cf 1828 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f
PZ
1829 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1830 struct task_struct *task = current;
1831
b58f6b0d 1832 perf_ctx_lock(cpuctx, task_ctx);
2c29ef0f 1833 perf_pmu_disable(cpuctx->ctx.pmu);
0793a61d
TG
1834
1835 /*
2c29ef0f 1836 * If there was an active task_ctx schedule it out.
0793a61d 1837 */
b58f6b0d 1838 if (task_ctx)
2c29ef0f 1839 task_ctx_sched_out(task_ctx);
b58f6b0d
PZ
1840
1841 /*
1842 * If the context we're installing events in is not the
1843 * active task_ctx, flip them.
1844 */
1845 if (ctx->task && task_ctx != ctx) {
1846 if (task_ctx)
1847 raw_spin_unlock(&task_ctx->lock);
1848 raw_spin_lock(&ctx->lock);
1849 task_ctx = ctx;
1850 }
1851
1852 if (task_ctx) {
1853 cpuctx->task_ctx = task_ctx;
2c29ef0f
PZ
1854 task = task_ctx->task;
1855 }
b58f6b0d 1856
2c29ef0f 1857 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
0793a61d 1858
4af4998b 1859 update_context_time(ctx);
e5d1367f
SE
1860 /*
1861 * update cgrp time only if current cgrp
1862 * matches event->cgrp. Must be done before
1863 * calling add_event_to_ctx()
1864 */
1865 update_cgrp_time_from_event(event);
0793a61d 1866
cdd6c482 1867 add_event_to_ctx(event, ctx);
0793a61d 1868
d859e29f 1869 /*
2c29ef0f 1870 * Schedule everything back in
d859e29f 1871 */
dce5855b 1872 perf_event_sched_in(cpuctx, task_ctx, task);
2c29ef0f
PZ
1873
1874 perf_pmu_enable(cpuctx->ctx.pmu);
1875 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa
PZ
1876
1877 return 0;
0793a61d
TG
1878}
1879
1880/*
cdd6c482 1881 * Attach a performance event to a context
0793a61d 1882 *
cdd6c482
IM
1883 * First we add the event to the list with the hardware enable bit
1884 * in event->hw_config cleared.
0793a61d 1885 *
cdd6c482 1886 * If the event is attached to a task which is on a CPU we use a smp
0793a61d
TG
1887 * call to enable it in the task context. The task might have been
1888 * scheduled away, but we check this in the smp call again.
1889 */
1890static void
cdd6c482
IM
1891perf_install_in_context(struct perf_event_context *ctx,
1892 struct perf_event *event,
0793a61d
TG
1893 int cpu)
1894{
1895 struct task_struct *task = ctx->task;
1896
fe4b04fa
PZ
1897 lockdep_assert_held(&ctx->mutex);
1898
c3f00c70 1899 event->ctx = ctx;
0cda4c02
YZ
1900 if (event->cpu != -1)
1901 event->cpu = cpu;
c3f00c70 1902
0793a61d
TG
1903 if (!task) {
1904 /*
cdd6c482 1905 * Per cpu events are installed via an smp call and
af901ca1 1906 * the install is always successful.
0793a61d 1907 */
fe4b04fa 1908 cpu_function_call(cpu, __perf_install_in_context, event);
0793a61d
TG
1909 return;
1910 }
1911
0793a61d 1912retry:
fe4b04fa
PZ
1913 if (!task_function_call(task, __perf_install_in_context, event))
1914 return;
0793a61d 1915
e625cce1 1916 raw_spin_lock_irq(&ctx->lock);
0793a61d 1917 /*
fe4b04fa
PZ
1918 * If we failed to find a running task, but find the context active now
1919 * that we've acquired the ctx->lock, retry.
0793a61d 1920 */
fe4b04fa 1921 if (ctx->is_active) {
e625cce1 1922 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1923 goto retry;
1924 }
1925
1926 /*
fe4b04fa
PZ
1927 * Since the task isn't running, its safe to add the event, us holding
1928 * the ctx->lock ensures the task won't get scheduled in.
0793a61d 1929 */
fe4b04fa 1930 add_event_to_ctx(event, ctx);
e625cce1 1931 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
1932}
1933
fa289bec 1934/*
cdd6c482 1935 * Put a event into inactive state and update time fields.
fa289bec
PM
1936 * Enabling the leader of a group effectively enables all
1937 * the group members that aren't explicitly disabled, so we
1938 * have to update their ->tstamp_enabled also.
1939 * Note: this works for group members as well as group leaders
1940 * since the non-leader members' sibling_lists will be empty.
1941 */
1d9b482e 1942static void __perf_event_mark_enabled(struct perf_event *event)
fa289bec 1943{
cdd6c482 1944 struct perf_event *sub;
4158755d 1945 u64 tstamp = perf_event_time(event);
fa289bec 1946
cdd6c482 1947 event->state = PERF_EVENT_STATE_INACTIVE;
4158755d 1948 event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d 1949 list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d
SE
1950 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1951 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d 1952 }
fa289bec
PM
1953}
1954
d859e29f 1955/*
cdd6c482 1956 * Cross CPU call to enable a performance event
d859e29f 1957 */
fe4b04fa 1958static int __perf_event_enable(void *info)
04289bb9 1959{
cdd6c482 1960 struct perf_event *event = info;
cdd6c482
IM
1961 struct perf_event_context *ctx = event->ctx;
1962 struct perf_event *leader = event->group_leader;
108b02cf 1963 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
d859e29f 1964 int err;
04289bb9 1965
06f41796
JO
1966 /*
1967 * There's a time window between 'ctx->is_active' check
1968 * in perf_event_enable function and this place having:
1969 * - IRQs on
1970 * - ctx->lock unlocked
1971 *
1972 * where the task could be killed and 'ctx' deactivated
1973 * by perf_event_exit_task.
1974 */
1975 if (!ctx->is_active)
fe4b04fa 1976 return -EINVAL;
3cbed429 1977
e625cce1 1978 raw_spin_lock(&ctx->lock);
4af4998b 1979 update_context_time(ctx);
d859e29f 1980
cdd6c482 1981 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f 1982 goto unlock;
e5d1367f
SE
1983
1984 /*
1985 * set current task's cgroup time reference point
1986 */
3f7cce3c 1987 perf_cgroup_set_timestamp(current, ctx);
e5d1367f 1988
1d9b482e 1989 __perf_event_mark_enabled(event);
04289bb9 1990
e5d1367f
SE
1991 if (!event_filter_match(event)) {
1992 if (is_cgroup_event(event))
1993 perf_cgroup_defer_enabled(event);
f4c4176f 1994 goto unlock;
e5d1367f 1995 }
f4c4176f 1996
04289bb9 1997 /*
cdd6c482 1998 * If the event is in a group and isn't the group leader,
d859e29f 1999 * then don't put it on unless the group is on.
04289bb9 2000 */
cdd6c482 2001 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
d859e29f 2002 goto unlock;
3b6f9e5c 2003
cdd6c482 2004 if (!group_can_go_on(event, cpuctx, 1)) {
d859e29f 2005 err = -EEXIST;
e758a33d 2006 } else {
cdd6c482 2007 if (event == leader)
6e37738a 2008 err = group_sched_in(event, cpuctx, ctx);
e758a33d 2009 else
6e37738a 2010 err = event_sched_in(event, cpuctx, ctx);
e758a33d 2011 }
d859e29f
PM
2012
2013 if (err) {
2014 /*
cdd6c482 2015 * If this event can't go on and it's part of a
d859e29f
PM
2016 * group, then the whole group has to come off.
2017 */
9e630205 2018 if (leader != event) {
d859e29f 2019 group_sched_out(leader, cpuctx, ctx);
9e630205
SE
2020 perf_cpu_hrtimer_restart(cpuctx);
2021 }
0d48696f 2022 if (leader->attr.pinned) {
53cfbf59 2023 update_group_times(leader);
cdd6c482 2024 leader->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2025 }
d859e29f
PM
2026 }
2027
9ed6060d 2028unlock:
e625cce1 2029 raw_spin_unlock(&ctx->lock);
fe4b04fa
PZ
2030
2031 return 0;
d859e29f
PM
2032}
2033
2034/*
cdd6c482 2035 * Enable a event.
c93f7669 2036 *
cdd6c482
IM
2037 * If event->ctx is a cloned context, callers must make sure that
2038 * every task struct that event->ctx->task could possibly point to
c93f7669 2039 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2040 * perf_event_for_each_child or perf_event_for_each as described
2041 * for perf_event_disable.
d859e29f 2042 */
44234adc 2043void perf_event_enable(struct perf_event *event)
d859e29f 2044{
cdd6c482 2045 struct perf_event_context *ctx = event->ctx;
d859e29f
PM
2046 struct task_struct *task = ctx->task;
2047
2048 if (!task) {
2049 /*
cdd6c482 2050 * Enable the event on the cpu that it's on
d859e29f 2051 */
fe4b04fa 2052 cpu_function_call(event->cpu, __perf_event_enable, event);
d859e29f
PM
2053 return;
2054 }
2055
e625cce1 2056 raw_spin_lock_irq(&ctx->lock);
cdd6c482 2057 if (event->state >= PERF_EVENT_STATE_INACTIVE)
d859e29f
PM
2058 goto out;
2059
2060 /*
cdd6c482
IM
2061 * If the event is in error state, clear that first.
2062 * That way, if we see the event in error state below, we
d859e29f
PM
2063 * know that it has gone back into error state, as distinct
2064 * from the task having been scheduled away before the
2065 * cross-call arrived.
2066 */
cdd6c482
IM
2067 if (event->state == PERF_EVENT_STATE_ERROR)
2068 event->state = PERF_EVENT_STATE_OFF;
d859e29f 2069
9ed6060d 2070retry:
fe4b04fa 2071 if (!ctx->is_active) {
1d9b482e 2072 __perf_event_mark_enabled(event);
fe4b04fa
PZ
2073 goto out;
2074 }
2075
e625cce1 2076 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa
PZ
2077
2078 if (!task_function_call(task, __perf_event_enable, event))
2079 return;
d859e29f 2080
e625cce1 2081 raw_spin_lock_irq(&ctx->lock);
d859e29f
PM
2082
2083 /*
cdd6c482 2084 * If the context is active and the event is still off,
d859e29f
PM
2085 * we need to retry the cross-call.
2086 */
fe4b04fa
PZ
2087 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
2088 /*
2089 * task could have been flipped by a concurrent
2090 * perf_event_context_sched_out()
2091 */
2092 task = ctx->task;
d859e29f 2093 goto retry;
fe4b04fa 2094 }
fa289bec 2095
9ed6060d 2096out:
e625cce1 2097 raw_spin_unlock_irq(&ctx->lock);
d859e29f 2098}
dcfce4a0 2099EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 2100
26ca5c11 2101int perf_event_refresh(struct perf_event *event, int refresh)
79f14641 2102{
2023b359 2103 /*
cdd6c482 2104 * not supported on inherited events
2023b359 2105 */
2e939d1d 2106 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
2107 return -EINVAL;
2108
cdd6c482
IM
2109 atomic_add(refresh, &event->event_limit);
2110 perf_event_enable(event);
2023b359
PZ
2111
2112 return 0;
79f14641 2113}
26ca5c11 2114EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 2115
5b0311e1
FW
2116static void ctx_sched_out(struct perf_event_context *ctx,
2117 struct perf_cpu_context *cpuctx,
2118 enum event_type_t event_type)
235c7fc7 2119{
cdd6c482 2120 struct perf_event *event;
db24d33e 2121 int is_active = ctx->is_active;
235c7fc7 2122
db24d33e 2123 ctx->is_active &= ~event_type;
cdd6c482 2124 if (likely(!ctx->nr_events))
facc4307
PZ
2125 return;
2126
4af4998b 2127 update_context_time(ctx);
e5d1367f 2128 update_cgrp_time_from_cpuctx(cpuctx);
5b0311e1 2129 if (!ctx->nr_active)
facc4307 2130 return;
5b0311e1 2131
075e0b00 2132 perf_pmu_disable(ctx->pmu);
db24d33e 2133 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
889ff015
FW
2134 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2135 group_sched_out(event, cpuctx, ctx);
9ed6060d 2136 }
889ff015 2137
db24d33e 2138 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
889ff015 2139 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e1 2140 group_sched_out(event, cpuctx, ctx);
9ed6060d 2141 }
1b9a644f 2142 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
2143}
2144
564c2b21
PM
2145/*
2146 * Test whether two contexts are equivalent, i.e. whether they
2147 * have both been cloned from the same version of the same context
cdd6c482
IM
2148 * and they both have the same number of enabled events.
2149 * If the number of enabled events is the same, then the set
2150 * of enabled events should be the same, because these are both
2151 * inherited contexts, therefore we can't access individual events
564c2b21 2152 * in them directly with an fd; we can only enable/disable all
cdd6c482 2153 * events via prctl, or enable/disable all events in a family
564c2b21
PM
2154 * via ioctl, which will have the same effect on both contexts.
2155 */
cdd6c482
IM
2156static int context_equiv(struct perf_event_context *ctx1,
2157 struct perf_event_context *ctx2)
564c2b21
PM
2158{
2159 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
ad3a37de 2160 && ctx1->parent_gen == ctx2->parent_gen
25346b93 2161 && !ctx1->pin_count && !ctx2->pin_count;
564c2b21
PM
2162}
2163
cdd6c482
IM
2164static void __perf_event_sync_stat(struct perf_event *event,
2165 struct perf_event *next_event)
bfbd3381
PZ
2166{
2167 u64 value;
2168
cdd6c482 2169 if (!event->attr.inherit_stat)
bfbd3381
PZ
2170 return;
2171
2172 /*
cdd6c482 2173 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
2174 * because we're in the middle of a context switch and have IRQs
2175 * disabled, which upsets smp_call_function_single(), however
cdd6c482 2176 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
2177 * don't need to use it.
2178 */
cdd6c482
IM
2179 switch (event->state) {
2180 case PERF_EVENT_STATE_ACTIVE:
3dbebf15
PZ
2181 event->pmu->read(event);
2182 /* fall-through */
bfbd3381 2183
cdd6c482
IM
2184 case PERF_EVENT_STATE_INACTIVE:
2185 update_event_times(event);
bfbd3381
PZ
2186 break;
2187
2188 default:
2189 break;
2190 }
2191
2192 /*
cdd6c482 2193 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
2194 * values when we flip the contexts.
2195 */
e7850595
PZ
2196 value = local64_read(&next_event->count);
2197 value = local64_xchg(&event->count, value);
2198 local64_set(&next_event->count, value);
bfbd3381 2199
cdd6c482
IM
2200 swap(event->total_time_enabled, next_event->total_time_enabled);
2201 swap(event->total_time_running, next_event->total_time_running);
19d2e755 2202
bfbd3381 2203 /*
19d2e755 2204 * Since we swizzled the values, update the user visible data too.
bfbd3381 2205 */
cdd6c482
IM
2206 perf_event_update_userpage(event);
2207 perf_event_update_userpage(next_event);
bfbd3381
PZ
2208}
2209
2210#define list_next_entry(pos, member) \
2211 list_entry(pos->member.next, typeof(*pos), member)
2212
cdd6c482
IM
2213static void perf_event_sync_stat(struct perf_event_context *ctx,
2214 struct perf_event_context *next_ctx)
bfbd3381 2215{
cdd6c482 2216 struct perf_event *event, *next_event;
bfbd3381
PZ
2217
2218 if (!ctx->nr_stat)
2219 return;
2220
02ffdbc8
PZ
2221 update_context_time(ctx);
2222
cdd6c482
IM
2223 event = list_first_entry(&ctx->event_list,
2224 struct perf_event, event_entry);
bfbd3381 2225
cdd6c482
IM
2226 next_event = list_first_entry(&next_ctx->event_list,
2227 struct perf_event, event_entry);
bfbd3381 2228
cdd6c482
IM
2229 while (&event->event_entry != &ctx->event_list &&
2230 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 2231
cdd6c482 2232 __perf_event_sync_stat(event, next_event);
bfbd3381 2233
cdd6c482
IM
2234 event = list_next_entry(event, event_entry);
2235 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
2236 }
2237}
2238
fe4b04fa
PZ
2239static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2240 struct task_struct *next)
0793a61d 2241{
8dc85d54 2242 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482
IM
2243 struct perf_event_context *next_ctx;
2244 struct perf_event_context *parent;
108b02cf 2245 struct perf_cpu_context *cpuctx;
c93f7669 2246 int do_switch = 1;
0793a61d 2247
108b02cf
PZ
2248 if (likely(!ctx))
2249 return;
10989fb2 2250
108b02cf
PZ
2251 cpuctx = __get_cpu_context(ctx);
2252 if (!cpuctx->task_ctx)
0793a61d
TG
2253 return;
2254
c93f7669
PM
2255 rcu_read_lock();
2256 parent = rcu_dereference(ctx->parent_ctx);
8dc85d54 2257 next_ctx = next->perf_event_ctxp[ctxn];
c93f7669
PM
2258 if (parent && next_ctx &&
2259 rcu_dereference(next_ctx->parent_ctx) == parent) {
2260 /*
2261 * Looks like the two contexts are clones, so we might be
2262 * able to optimize the context switch. We lock both
2263 * contexts and check that they are clones under the
2264 * lock (including re-checking that neither has been
2265 * uncloned in the meantime). It doesn't matter which
2266 * order we take the locks because no other cpu could
2267 * be trying to lock both of these tasks.
2268 */
e625cce1
TG
2269 raw_spin_lock(&ctx->lock);
2270 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 2271 if (context_equiv(ctx, next_ctx)) {
665c2142
PZ
2272 /*
2273 * XXX do we need a memory barrier of sorts
cdd6c482 2274 * wrt to rcu_dereference() of perf_event_ctxp
665c2142 2275 */
8dc85d54
PZ
2276 task->perf_event_ctxp[ctxn] = next_ctx;
2277 next->perf_event_ctxp[ctxn] = ctx;
c93f7669
PM
2278 ctx->task = next;
2279 next_ctx->task = task;
2280 do_switch = 0;
bfbd3381 2281
cdd6c482 2282 perf_event_sync_stat(ctx, next_ctx);
c93f7669 2283 }
e625cce1
TG
2284 raw_spin_unlock(&next_ctx->lock);
2285 raw_spin_unlock(&ctx->lock);
564c2b21 2286 }
c93f7669 2287 rcu_read_unlock();
564c2b21 2288
c93f7669 2289 if (do_switch) {
facc4307 2290 raw_spin_lock(&ctx->lock);
5b0311e1 2291 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
c93f7669 2292 cpuctx->task_ctx = NULL;
facc4307 2293 raw_spin_unlock(&ctx->lock);
c93f7669 2294 }
0793a61d
TG
2295}
2296
8dc85d54
PZ
2297#define for_each_task_context_nr(ctxn) \
2298 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2299
2300/*
2301 * Called from scheduler to remove the events of the current task,
2302 * with interrupts disabled.
2303 *
2304 * We stop each event and update the event value in event->count.
2305 *
2306 * This does not protect us against NMI, but disable()
2307 * sets the disabled bit in the control field of event _before_
2308 * accessing the event control register. If a NMI hits, then it will
2309 * not restart the event.
2310 */
ab0cce56
JO
2311void __perf_event_task_sched_out(struct task_struct *task,
2312 struct task_struct *next)
8dc85d54
PZ
2313{
2314 int ctxn;
2315
8dc85d54
PZ
2316 for_each_task_context_nr(ctxn)
2317 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
2318
2319 /*
2320 * if cgroup events exist on this CPU, then we need
2321 * to check if we have to switch out PMU state.
2322 * cgroup event are system-wide mode only
2323 */
2324 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef 2325 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
2326}
2327
04dc2dbb 2328static void task_ctx_sched_out(struct perf_event_context *ctx)
a08b159f 2329{
108b02cf 2330 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
a08b159f 2331
a63eaf34
PM
2332 if (!cpuctx->task_ctx)
2333 return;
012b84da
IM
2334
2335 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2336 return;
2337
04dc2dbb 2338 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
a08b159f
PM
2339 cpuctx->task_ctx = NULL;
2340}
2341
5b0311e1
FW
2342/*
2343 * Called with IRQs disabled
2344 */
2345static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2346 enum event_type_t event_type)
2347{
2348 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
2349}
2350
235c7fc7 2351static void
5b0311e1 2352ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a 2353 struct perf_cpu_context *cpuctx)
0793a61d 2354{
cdd6c482 2355 struct perf_event *event;
0793a61d 2356
889ff015
FW
2357 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2358 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2359 continue;
5632ab12 2360 if (!event_filter_match(event))
3b6f9e5c
PM
2361 continue;
2362
e5d1367f
SE
2363 /* may need to reset tstamp_enabled */
2364 if (is_cgroup_event(event))
2365 perf_cgroup_mark_enabled(event, ctx);
2366
8c9ed8e1 2367 if (group_can_go_on(event, cpuctx, 1))
6e37738a 2368 group_sched_in(event, cpuctx, ctx);
3b6f9e5c
PM
2369
2370 /*
2371 * If this pinned group hasn't been scheduled,
2372 * put it in error state.
2373 */
cdd6c482
IM
2374 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2375 update_group_times(event);
2376 event->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2377 }
3b6f9e5c 2378 }
5b0311e1
FW
2379}
2380
2381static void
2382ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a 2383 struct perf_cpu_context *cpuctx)
5b0311e1
FW
2384{
2385 struct perf_event *event;
2386 int can_add_hw = 1;
3b6f9e5c 2387
889ff015
FW
2388 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2389 /* Ignore events in OFF or ERROR state */
2390 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2391 continue;
04289bb9
IM
2392 /*
2393 * Listen to the 'cpu' scheduling filter constraint
cdd6c482 2394 * of events:
04289bb9 2395 */
5632ab12 2396 if (!event_filter_match(event))
0793a61d
TG
2397 continue;
2398
e5d1367f
SE
2399 /* may need to reset tstamp_enabled */
2400 if (is_cgroup_event(event))
2401 perf_cgroup_mark_enabled(event, ctx);
2402
9ed6060d 2403 if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a 2404 if (group_sched_in(event, cpuctx, ctx))
dd0e6ba2 2405 can_add_hw = 0;
9ed6060d 2406 }
0793a61d 2407 }
5b0311e1
FW
2408}
2409
2410static void
2411ctx_sched_in(struct perf_event_context *ctx,
2412 struct perf_cpu_context *cpuctx,
e5d1367f
SE
2413 enum event_type_t event_type,
2414 struct task_struct *task)
5b0311e1 2415{
e5d1367f 2416 u64 now;
db24d33e 2417 int is_active = ctx->is_active;
e5d1367f 2418
db24d33e 2419 ctx->is_active |= event_type;
5b0311e1 2420 if (likely(!ctx->nr_events))
facc4307 2421 return;
5b0311e1 2422
e5d1367f
SE
2423 now = perf_clock();
2424 ctx->timestamp = now;
3f7cce3c 2425 perf_cgroup_set_timestamp(task, ctx);
5b0311e1
FW
2426 /*
2427 * First go through the list and put on any pinned groups
2428 * in order to give them the best chance of going on.
2429 */
db24d33e 2430 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
6e37738a 2431 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
2432
2433 /* Then walk through the lower prio flexible groups */
db24d33e 2434 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
6e37738a 2435 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
2436}
2437
329c0e01 2438static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
2439 enum event_type_t event_type,
2440 struct task_struct *task)
329c0e01
FW
2441{
2442 struct perf_event_context *ctx = &cpuctx->ctx;
2443
e5d1367f 2444 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
2445}
2446
e5d1367f
SE
2447static void perf_event_context_sched_in(struct perf_event_context *ctx,
2448 struct task_struct *task)
235c7fc7 2449{
108b02cf 2450 struct perf_cpu_context *cpuctx;
235c7fc7 2451
108b02cf 2452 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
2453 if (cpuctx->task_ctx == ctx)
2454 return;
2455
facc4307 2456 perf_ctx_lock(cpuctx, ctx);
1b9a644f 2457 perf_pmu_disable(ctx->pmu);
329c0e01
FW
2458 /*
2459 * We want to keep the following priority order:
2460 * cpu pinned (that don't need to move), task pinned,
2461 * cpu flexible, task flexible.
2462 */
2463 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2464
1d5f003f
GN
2465 if (ctx->nr_events)
2466 cpuctx->task_ctx = ctx;
9b33fa6b 2467
86b47c25
GN
2468 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2469
facc4307
PZ
2470 perf_pmu_enable(ctx->pmu);
2471 perf_ctx_unlock(cpuctx, ctx);
2472
b5ab4cd5
PZ
2473 /*
2474 * Since these rotations are per-cpu, we need to ensure the
2475 * cpu-context we got scheduled on is actually rotating.
2476 */
108b02cf 2477 perf_pmu_rotate_start(ctx->pmu);
235c7fc7
IM
2478}
2479
d010b332
SE
2480/*
2481 * When sampling the branck stack in system-wide, it may be necessary
2482 * to flush the stack on context switch. This happens when the branch
2483 * stack does not tag its entries with the pid of the current task.
2484 * Otherwise it becomes impossible to associate a branch entry with a
2485 * task. This ambiguity is more likely to appear when the branch stack
2486 * supports priv level filtering and the user sets it to monitor only
2487 * at the user level (which could be a useful measurement in system-wide
2488 * mode). In that case, the risk is high of having a branch stack with
2489 * branch from multiple tasks. Flushing may mean dropping the existing
2490 * entries or stashing them somewhere in the PMU specific code layer.
2491 *
2492 * This function provides the context switch callback to the lower code
2493 * layer. It is invoked ONLY when there is at least one system-wide context
2494 * with at least one active event using taken branch sampling.
2495 */
2496static void perf_branch_stack_sched_in(struct task_struct *prev,
2497 struct task_struct *task)
2498{
2499 struct perf_cpu_context *cpuctx;
2500 struct pmu *pmu;
2501 unsigned long flags;
2502
2503 /* no need to flush branch stack if not changing task */
2504 if (prev == task)
2505 return;
2506
2507 local_irq_save(flags);
2508
2509 rcu_read_lock();
2510
2511 list_for_each_entry_rcu(pmu, &pmus, entry) {
2512 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2513
2514 /*
2515 * check if the context has at least one
2516 * event using PERF_SAMPLE_BRANCH_STACK
2517 */
2518 if (cpuctx->ctx.nr_branch_stack > 0
2519 && pmu->flush_branch_stack) {
2520
2521 pmu = cpuctx->ctx.pmu;
2522
2523 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2524
2525 perf_pmu_disable(pmu);
2526
2527 pmu->flush_branch_stack();
2528
2529 perf_pmu_enable(pmu);
2530
2531 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2532 }
2533 }
2534
2535 rcu_read_unlock();
2536
2537 local_irq_restore(flags);
2538}
2539
8dc85d54
PZ
2540/*
2541 * Called from scheduler to add the events of the current task
2542 * with interrupts disabled.
2543 *
2544 * We restore the event value and then enable it.
2545 *
2546 * This does not protect us against NMI, but enable()
2547 * sets the enabled bit in the control field of event _before_
2548 * accessing the event control register. If a NMI hits, then it will
2549 * keep the event running.
2550 */
ab0cce56
JO
2551void __perf_event_task_sched_in(struct task_struct *prev,
2552 struct task_struct *task)
8dc85d54
PZ
2553{
2554 struct perf_event_context *ctx;
2555 int ctxn;
2556
2557 for_each_task_context_nr(ctxn) {
2558 ctx = task->perf_event_ctxp[ctxn];
2559 if (likely(!ctx))
2560 continue;
2561
e5d1367f 2562 perf_event_context_sched_in(ctx, task);
8dc85d54 2563 }
e5d1367f
SE
2564 /*
2565 * if cgroup events exist on this CPU, then we need
2566 * to check if we have to switch in PMU state.
2567 * cgroup event are system-wide mode only
2568 */
2569 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
a8d757ef 2570 perf_cgroup_sched_in(prev, task);
d010b332
SE
2571
2572 /* check for system-wide branch_stack events */
2573 if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2574 perf_branch_stack_sched_in(prev, task);
235c7fc7
IM
2575}
2576
abd50713
PZ
2577static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2578{
2579 u64 frequency = event->attr.sample_freq;
2580 u64 sec = NSEC_PER_SEC;
2581 u64 divisor, dividend;
2582
2583 int count_fls, nsec_fls, frequency_fls, sec_fls;
2584
2585 count_fls = fls64(count);
2586 nsec_fls = fls64(nsec);
2587 frequency_fls = fls64(frequency);
2588 sec_fls = 30;
2589
2590 /*
2591 * We got @count in @nsec, with a target of sample_freq HZ
2592 * the target period becomes:
2593 *
2594 * @count * 10^9
2595 * period = -------------------
2596 * @nsec * sample_freq
2597 *
2598 */
2599
2600 /*
2601 * Reduce accuracy by one bit such that @a and @b converge
2602 * to a similar magnitude.
2603 */
fe4b04fa 2604#define REDUCE_FLS(a, b) \
abd50713
PZ
2605do { \
2606 if (a##_fls > b##_fls) { \
2607 a >>= 1; \
2608 a##_fls--; \
2609 } else { \
2610 b >>= 1; \
2611 b##_fls--; \
2612 } \
2613} while (0)
2614
2615 /*
2616 * Reduce accuracy until either term fits in a u64, then proceed with
2617 * the other, so that finally we can do a u64/u64 division.
2618 */
2619 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2620 REDUCE_FLS(nsec, frequency);
2621 REDUCE_FLS(sec, count);
2622 }
2623
2624 if (count_fls + sec_fls > 64) {
2625 divisor = nsec * frequency;
2626
2627 while (count_fls + sec_fls > 64) {
2628 REDUCE_FLS(count, sec);
2629 divisor >>= 1;
2630 }
2631
2632 dividend = count * sec;
2633 } else {
2634 dividend = count * sec;
2635
2636 while (nsec_fls + frequency_fls > 64) {
2637 REDUCE_FLS(nsec, frequency);
2638 dividend >>= 1;
2639 }
2640
2641 divisor = nsec * frequency;
2642 }
2643
f6ab91ad
PZ
2644 if (!divisor)
2645 return dividend;
2646
abd50713
PZ
2647 return div64_u64(dividend, divisor);
2648}
2649
e050e3f0
SE
2650static DEFINE_PER_CPU(int, perf_throttled_count);
2651static DEFINE_PER_CPU(u64, perf_throttled_seq);
2652
f39d47ff 2653static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 2654{
cdd6c482 2655 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 2656 s64 period, sample_period;
bd2b5b12
PZ
2657 s64 delta;
2658
abd50713 2659 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
2660
2661 delta = (s64)(period - hwc->sample_period);
2662 delta = (delta + 7) / 8; /* low pass filter */
2663
2664 sample_period = hwc->sample_period + delta;
2665
2666 if (!sample_period)
2667 sample_period = 1;
2668
bd2b5b12 2669 hwc->sample_period = sample_period;
abd50713 2670
e7850595 2671 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
2672 if (disable)
2673 event->pmu->stop(event, PERF_EF_UPDATE);
2674
e7850595 2675 local64_set(&hwc->period_left, 0);
f39d47ff
SE
2676
2677 if (disable)
2678 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 2679 }
bd2b5b12
PZ
2680}
2681
e050e3f0
SE
2682/*
2683 * combine freq adjustment with unthrottling to avoid two passes over the
2684 * events. At the same time, make sure, having freq events does not change
2685 * the rate of unthrottling as that would introduce bias.
2686 */
2687static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2688 int needs_unthr)
60db5e09 2689{
cdd6c482
IM
2690 struct perf_event *event;
2691 struct hw_perf_event *hwc;
e050e3f0 2692 u64 now, period = TICK_NSEC;
abd50713 2693 s64 delta;
60db5e09 2694
e050e3f0
SE
2695 /*
2696 * only need to iterate over all events iff:
2697 * - context have events in frequency mode (needs freq adjust)
2698 * - there are events to unthrottle on this cpu
2699 */
2700 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
2701 return;
2702
e050e3f0 2703 raw_spin_lock(&ctx->lock);
f39d47ff 2704 perf_pmu_disable(ctx->pmu);
e050e3f0 2705
03541f8b 2706 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 2707 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
2708 continue;
2709
5632ab12 2710 if (!event_filter_match(event))
5d27c23d
PZ
2711 continue;
2712
cdd6c482 2713 hwc = &event->hw;
6a24ed6c 2714
e050e3f0
SE
2715 if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
2716 hwc->interrupts = 0;
cdd6c482 2717 perf_log_throttle(event, 1);
a4eaf7f1 2718 event->pmu->start(event, 0);
a78ac325
PZ
2719 }
2720
cdd6c482 2721 if (!event->attr.freq || !event->attr.sample_freq)
60db5e09
PZ
2722 continue;
2723
e050e3f0
SE
2724 /*
2725 * stop the event and update event->count
2726 */
2727 event->pmu->stop(event, PERF_EF_UPDATE);
2728
e7850595 2729 now = local64_read(&event->count);
abd50713
PZ
2730 delta = now - hwc->freq_count_stamp;
2731 hwc->freq_count_stamp = now;
60db5e09 2732
e050e3f0
SE
2733 /*
2734 * restart the event
2735 * reload only if value has changed
f39d47ff
SE
2736 * we have stopped the event so tell that
2737 * to perf_adjust_period() to avoid stopping it
2738 * twice.
e050e3f0 2739 */
abd50713 2740 if (delta > 0)
f39d47ff 2741 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
2742
2743 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
60db5e09 2744 }
e050e3f0 2745
f39d47ff 2746 perf_pmu_enable(ctx->pmu);
e050e3f0 2747 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
2748}
2749
235c7fc7 2750/*
cdd6c482 2751 * Round-robin a context's events:
235c7fc7 2752 */
cdd6c482 2753static void rotate_ctx(struct perf_event_context *ctx)
0793a61d 2754{
dddd3379
TG
2755 /*
2756 * Rotate the first entry last of non-pinned groups. Rotation might be
2757 * disabled by the inheritance code.
2758 */
2759 if (!ctx->rotate_disable)
2760 list_rotate_left(&ctx->flexible_groups);
235c7fc7
IM
2761}
2762
b5ab4cd5 2763/*
e9d2b064
PZ
2764 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2765 * because they're strictly cpu affine and rotate_start is called with IRQs
2766 * disabled, while rotate_context is called from IRQ context.
b5ab4cd5 2767 */
9e630205 2768static int perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7 2769{
8dc85d54 2770 struct perf_event_context *ctx = NULL;
e050e3f0 2771 int rotate = 0, remove = 1;
7fc23a53 2772
b5ab4cd5 2773 if (cpuctx->ctx.nr_events) {
e9d2b064 2774 remove = 0;
b5ab4cd5
PZ
2775 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2776 rotate = 1;
2777 }
235c7fc7 2778
8dc85d54 2779 ctx = cpuctx->task_ctx;
b5ab4cd5 2780 if (ctx && ctx->nr_events) {
e9d2b064 2781 remove = 0;
b5ab4cd5
PZ
2782 if (ctx->nr_events != ctx->nr_active)
2783 rotate = 1;
2784 }
9717e6cd 2785
e050e3f0 2786 if (!rotate)
0f5a2601
PZ
2787 goto done;
2788
facc4307 2789 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 2790 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 2791
e050e3f0
SE
2792 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2793 if (ctx)
2794 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d 2795
e050e3f0
SE
2796 rotate_ctx(&cpuctx->ctx);
2797 if (ctx)
2798 rotate_ctx(ctx);
235c7fc7 2799
e050e3f0 2800 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 2801
0f5a2601
PZ
2802 perf_pmu_enable(cpuctx->ctx.pmu);
2803 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
b5ab4cd5 2804done:
e9d2b064
PZ
2805 if (remove)
2806 list_del_init(&cpuctx->rotation_list);
9e630205
SE
2807
2808 return rotate;
e9d2b064
PZ
2809}
2810
026249ef
FW
2811#ifdef CONFIG_NO_HZ_FULL
2812bool perf_event_can_stop_tick(void)
2813{
2814 if (list_empty(&__get_cpu_var(rotation_list)))
2815 return true;
2816 else
2817 return false;
2818}
2819#endif
2820
e9d2b064
PZ
2821void perf_event_task_tick(void)
2822{
2823 struct list_head *head = &__get_cpu_var(rotation_list);
2824 struct perf_cpu_context *cpuctx, *tmp;
e050e3f0
SE
2825 struct perf_event_context *ctx;
2826 int throttled;
b5ab4cd5 2827
e9d2b064
PZ
2828 WARN_ON(!irqs_disabled());
2829
e050e3f0
SE
2830 __this_cpu_inc(perf_throttled_seq);
2831 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2832
e9d2b064 2833 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
e050e3f0
SE
2834 ctx = &cpuctx->ctx;
2835 perf_adjust_freq_unthr_context(ctx, throttled);
2836
2837 ctx = cpuctx->task_ctx;
2838 if (ctx)
2839 perf_adjust_freq_unthr_context(ctx, throttled);
e9d2b064 2840 }
0793a61d
TG
2841}
2842
889ff015
FW
2843static int event_enable_on_exec(struct perf_event *event,
2844 struct perf_event_context *ctx)
2845{
2846 if (!event->attr.enable_on_exec)
2847 return 0;
2848
2849 event->attr.enable_on_exec = 0;
2850 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2851 return 0;
2852
1d9b482e 2853 __perf_event_mark_enabled(event);
889ff015
FW
2854
2855 return 1;
2856}
2857
57e7986e 2858/*
cdd6c482 2859 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
2860 * This expects task == current.
2861 */
8dc85d54 2862static void perf_event_enable_on_exec(struct perf_event_context *ctx)
57e7986e 2863{
cdd6c482 2864 struct perf_event *event;
57e7986e
PM
2865 unsigned long flags;
2866 int enabled = 0;
889ff015 2867 int ret;
57e7986e
PM
2868
2869 local_irq_save(flags);
cdd6c482 2870 if (!ctx || !ctx->nr_events)
57e7986e
PM
2871 goto out;
2872
e566b76e
SE
2873 /*
2874 * We must ctxsw out cgroup events to avoid conflict
2875 * when invoking perf_task_event_sched_in() later on
2876 * in this function. Otherwise we end up trying to
2877 * ctxswin cgroup events which are already scheduled
2878 * in.
2879 */
a8d757ef 2880 perf_cgroup_sched_out(current, NULL);
57e7986e 2881
e625cce1 2882 raw_spin_lock(&ctx->lock);
04dc2dbb 2883 task_ctx_sched_out(ctx);
57e7986e 2884
b79387ef 2885 list_for_each_entry(event, &ctx->event_list, event_entry) {
889ff015
FW
2886 ret = event_enable_on_exec(event, ctx);
2887 if (ret)
2888 enabled = 1;
57e7986e
PM
2889 }
2890
2891 /*
cdd6c482 2892 * Unclone this context if we enabled any event.
57e7986e 2893 */
71a851b4
PZ
2894 if (enabled)
2895 unclone_ctx(ctx);
57e7986e 2896
e625cce1 2897 raw_spin_unlock(&ctx->lock);
57e7986e 2898
e566b76e
SE
2899 /*
2900 * Also calls ctxswin for cgroup events, if any:
2901 */
e5d1367f 2902 perf_event_context_sched_in(ctx, ctx->task);
9ed6060d 2903out:
57e7986e
PM
2904 local_irq_restore(flags);
2905}
2906
0793a61d 2907/*
cdd6c482 2908 * Cross CPU call to read the hardware event
0793a61d 2909 */
cdd6c482 2910static void __perf_event_read(void *info)
0793a61d 2911{
cdd6c482
IM
2912 struct perf_event *event = info;
2913 struct perf_event_context *ctx = event->ctx;
108b02cf 2914 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
621a01ea 2915
e1ac3614
PM
2916 /*
2917 * If this is a task context, we need to check whether it is
2918 * the current task context of this cpu. If not it has been
2919 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
2920 * event->count would have been updated to a recent sample
2921 * when the event was scheduled out.
e1ac3614
PM
2922 */
2923 if (ctx->task && cpuctx->task_ctx != ctx)
2924 return;
2925
e625cce1 2926 raw_spin_lock(&ctx->lock);
e5d1367f 2927 if (ctx->is_active) {
542e72fc 2928 update_context_time(ctx);
e5d1367f
SE
2929 update_cgrp_time_from_event(event);
2930 }
cdd6c482 2931 update_event_times(event);
542e72fc
PZ
2932 if (event->state == PERF_EVENT_STATE_ACTIVE)
2933 event->pmu->read(event);
e625cce1 2934 raw_spin_unlock(&ctx->lock);
0793a61d
TG
2935}
2936
b5e58793
PZ
2937static inline u64 perf_event_count(struct perf_event *event)
2938{
e7850595 2939 return local64_read(&event->count) + atomic64_read(&event->child_count);
b5e58793
PZ
2940}
2941
cdd6c482 2942static u64 perf_event_read(struct perf_event *event)
0793a61d
TG
2943{
2944 /*
cdd6c482
IM
2945 * If event is enabled and currently active on a CPU, update the
2946 * value in the event structure:
0793a61d 2947 */
cdd6c482
IM
2948 if (event->state == PERF_EVENT_STATE_ACTIVE) {
2949 smp_call_function_single(event->oncpu,
2950 __perf_event_read, event, 1);
2951 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
2952 struct perf_event_context *ctx = event->ctx;
2953 unsigned long flags;
2954
e625cce1 2955 raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9
SE
2956 /*
2957 * may read while context is not active
2958 * (e.g., thread is blocked), in that case
2959 * we cannot update context time
2960 */
e5d1367f 2961 if (ctx->is_active) {
c530ccd9 2962 update_context_time(ctx);
e5d1367f
SE
2963 update_cgrp_time_from_event(event);
2964 }
cdd6c482 2965 update_event_times(event);
e625cce1 2966 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d
TG
2967 }
2968
b5e58793 2969 return perf_event_count(event);
0793a61d
TG
2970}
2971
a63eaf34 2972/*
cdd6c482 2973 * Initialize the perf_event context in a task_struct:
a63eaf34 2974 */
eb184479 2975static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 2976{
e625cce1 2977 raw_spin_lock_init(&ctx->lock);
a63eaf34 2978 mutex_init(&ctx->mutex);
889ff015
FW
2979 INIT_LIST_HEAD(&ctx->pinned_groups);
2980 INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34
PM
2981 INIT_LIST_HEAD(&ctx->event_list);
2982 atomic_set(&ctx->refcount, 1);
eb184479
PZ
2983}
2984
2985static struct perf_event_context *
2986alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2987{
2988 struct perf_event_context *ctx;
2989
2990 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2991 if (!ctx)
2992 return NULL;
2993
2994 __perf_event_init_context(ctx);
2995 if (task) {
2996 ctx->task = task;
2997 get_task_struct(task);
0793a61d 2998 }
eb184479
PZ
2999 ctx->pmu = pmu;
3000
3001 return ctx;
a63eaf34
PM
3002}
3003
2ebd4ffb
MH
3004static struct task_struct *
3005find_lively_task_by_vpid(pid_t vpid)
3006{
3007 struct task_struct *task;
3008 int err;
0793a61d
TG
3009
3010 rcu_read_lock();
2ebd4ffb 3011 if (!vpid)
0793a61d
TG
3012 task = current;
3013 else
2ebd4ffb 3014 task = find_task_by_vpid(vpid);
0793a61d
TG
3015 if (task)
3016 get_task_struct(task);
3017 rcu_read_unlock();
3018
3019 if (!task)
3020 return ERR_PTR(-ESRCH);
3021
0793a61d 3022 /* Reuse ptrace permission checks for now. */
c93f7669
PM
3023 err = -EACCES;
3024 if (!ptrace_may_access(task, PTRACE_MODE_READ))
3025 goto errout;
3026
2ebd4ffb
MH
3027 return task;
3028errout:
3029 put_task_struct(task);
3030 return ERR_PTR(err);
3031
3032}
3033
fe4b04fa
PZ
3034/*
3035 * Returns a matching context with refcount and pincount.
3036 */
108b02cf 3037static struct perf_event_context *
38a81da2 3038find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
0793a61d 3039{
cdd6c482 3040 struct perf_event_context *ctx;
22a4f650 3041 struct perf_cpu_context *cpuctx;
25346b93 3042 unsigned long flags;
8dc85d54 3043 int ctxn, err;
0793a61d 3044
22a4ec72 3045 if (!task) {
cdd6c482 3046 /* Must be root to operate on a CPU event: */
0764771d 3047 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
3048 return ERR_PTR(-EACCES);
3049
0793a61d 3050 /*
cdd6c482 3051 * We could be clever and allow to attach a event to an
0793a61d
TG
3052 * offline CPU and activate it when the CPU comes up, but
3053 * that's for later.
3054 */
f6325e30 3055 if (!cpu_online(cpu))
0793a61d
TG
3056 return ERR_PTR(-ENODEV);
3057
108b02cf 3058 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 3059 ctx = &cpuctx->ctx;
c93f7669 3060 get_ctx(ctx);
fe4b04fa 3061 ++ctx->pin_count;
0793a61d 3062
0793a61d
TG
3063 return ctx;
3064 }
3065
8dc85d54
PZ
3066 err = -EINVAL;
3067 ctxn = pmu->task_ctx_nr;
3068 if (ctxn < 0)
3069 goto errout;
3070
9ed6060d 3071retry:
8dc85d54 3072 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 3073 if (ctx) {
71a851b4 3074 unclone_ctx(ctx);
fe4b04fa 3075 ++ctx->pin_count;
e625cce1 3076 raw_spin_unlock_irqrestore(&ctx->lock, flags);
9137fb28 3077 } else {
eb184479 3078 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
3079 err = -ENOMEM;
3080 if (!ctx)
3081 goto errout;
eb184479 3082
dbe08d82
ON
3083 err = 0;
3084 mutex_lock(&task->perf_event_mutex);
3085 /*
3086 * If it has already passed perf_event_exit_task().
3087 * we must see PF_EXITING, it takes this mutex too.
3088 */
3089 if (task->flags & PF_EXITING)
3090 err = -ESRCH;
3091 else if (task->perf_event_ctxp[ctxn])
3092 err = -EAGAIN;
fe4b04fa 3093 else {
9137fb28 3094 get_ctx(ctx);
fe4b04fa 3095 ++ctx->pin_count;
dbe08d82 3096 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 3097 }
dbe08d82
ON
3098 mutex_unlock(&task->perf_event_mutex);
3099
3100 if (unlikely(err)) {
9137fb28 3101 put_ctx(ctx);
dbe08d82
ON
3102
3103 if (err == -EAGAIN)
3104 goto retry;
3105 goto errout;
a63eaf34
PM
3106 }
3107 }
3108
0793a61d 3109 return ctx;
c93f7669 3110
9ed6060d 3111errout:
c93f7669 3112 return ERR_PTR(err);
0793a61d
TG
3113}
3114
6fb2915d
LZ
3115static void perf_event_free_filter(struct perf_event *event);
3116
cdd6c482 3117static void free_event_rcu(struct rcu_head *head)
592903cd 3118{
cdd6c482 3119 struct perf_event *event;
592903cd 3120
cdd6c482
IM
3121 event = container_of(head, struct perf_event, rcu_head);
3122 if (event->ns)
3123 put_pid_ns(event->ns);
6fb2915d 3124 perf_event_free_filter(event);
cdd6c482 3125 kfree(event);
592903cd
PZ
3126}
3127
76369139 3128static void ring_buffer_put(struct ring_buffer *rb);
9bb5d40c 3129static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
925d519a 3130
cdd6c482 3131static void free_event(struct perf_event *event)
f1600952 3132{
e360adbe 3133 irq_work_sync(&event->pending);
925d519a 3134
cdd6c482 3135 if (!event->parent) {
82cd6def 3136 if (event->attach_state & PERF_ATTACH_TASK)
c5905afb 3137 static_key_slow_dec_deferred(&perf_sched_events);
3af9e859 3138 if (event->attr.mmap || event->attr.mmap_data)
cdd6c482
IM
3139 atomic_dec(&nr_mmap_events);
3140 if (event->attr.comm)
3141 atomic_dec(&nr_comm_events);
3142 if (event->attr.task)
3143 atomic_dec(&nr_task_events);
927c7a9e
FW
3144 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3145 put_callchain_buffers();
08309379
PZ
3146 if (is_cgroup_event(event)) {
3147 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
c5905afb 3148 static_key_slow_dec_deferred(&perf_sched_events);
08309379 3149 }
d010b332
SE
3150
3151 if (has_branch_stack(event)) {
3152 static_key_slow_dec_deferred(&perf_sched_events);
3153 /* is system-wide event */
9bb5d40c 3154 if (!(event->attach_state & PERF_ATTACH_TASK)) {
d010b332
SE
3155 atomic_dec(&per_cpu(perf_branch_stack_events,
3156 event->cpu));
9bb5d40c 3157 }
d010b332 3158 }
f344011c 3159 }
9ee318a7 3160
76369139 3161 if (event->rb) {
9bb5d40c
PZ
3162 struct ring_buffer *rb;
3163
3164 /*
3165 * Can happen when we close an event with re-directed output.
3166 *
3167 * Since we have a 0 refcount, perf_mmap_close() will skip
3168 * over us; possibly making our ring_buffer_put() the last.
3169 */
3170 mutex_lock(&event->mmap_mutex);
3171 rb = event->rb;
3172 if (rb) {
3173 rcu_assign_pointer(event->rb, NULL);
3174 ring_buffer_detach(event, rb);
3175 ring_buffer_put(rb); /* could be last */
3176 }
3177 mutex_unlock(&event->mmap_mutex);
a4be7c27
PZ
3178 }
3179
e5d1367f
SE
3180 if (is_cgroup_event(event))
3181 perf_detach_cgroup(event);
3182
cdd6c482
IM
3183 if (event->destroy)
3184 event->destroy(event);
e077df4f 3185
0c67b408
PZ
3186 if (event->ctx)
3187 put_ctx(event->ctx);
3188
cdd6c482 3189 call_rcu(&event->rcu_head, free_event_rcu);
f1600952
PZ
3190}
3191
a66a3052 3192int perf_event_release_kernel(struct perf_event *event)
0793a61d 3193{
cdd6c482 3194 struct perf_event_context *ctx = event->ctx;
0793a61d 3195
ad3a37de 3196 WARN_ON_ONCE(ctx->parent_ctx);
a0507c84
PZ
3197 /*
3198 * There are two ways this annotation is useful:
3199 *
3200 * 1) there is a lock recursion from perf_event_exit_task
3201 * see the comment there.
3202 *
3203 * 2) there is a lock-inversion with mmap_sem through
3204 * perf_event_read_group(), which takes faults while
3205 * holding ctx->mutex, however this is called after
3206 * the last filedesc died, so there is no possibility
3207 * to trigger the AB-BA case.
3208 */
3209 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
050735b0 3210 raw_spin_lock_irq(&ctx->lock);
8a49542c 3211 perf_group_detach(event);
050735b0 3212 raw_spin_unlock_irq(&ctx->lock);
e03a9a55 3213 perf_remove_from_context(event);
d859e29f 3214 mutex_unlock(&ctx->mutex);
0793a61d 3215
cdd6c482 3216 free_event(event);
0793a61d
TG
3217
3218 return 0;
3219}
a66a3052 3220EXPORT_SYMBOL_GPL(perf_event_release_kernel);
0793a61d 3221
a66a3052
PZ
3222/*
3223 * Called when the last reference to the file is gone.
3224 */
a6fa941d 3225static void put_event(struct perf_event *event)
fb0459d7 3226{
8882135b 3227 struct task_struct *owner;
fb0459d7 3228
a6fa941d
AV
3229 if (!atomic_long_dec_and_test(&event->refcount))
3230 return;
fb0459d7 3231
8882135b
PZ
3232 rcu_read_lock();
3233 owner = ACCESS_ONCE(event->owner);
3234 /*
3235 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
3236 * !owner it means the list deletion is complete and we can indeed
3237 * free this event, otherwise we need to serialize on
3238 * owner->perf_event_mutex.
3239 */
3240 smp_read_barrier_depends();
3241 if (owner) {
3242 /*
3243 * Since delayed_put_task_struct() also drops the last
3244 * task reference we can safely take a new reference
3245 * while holding the rcu_read_lock().
3246 */
3247 get_task_struct(owner);
3248 }
3249 rcu_read_unlock();
3250
3251 if (owner) {
3252 mutex_lock(&owner->perf_event_mutex);
3253 /*
3254 * We have to re-check the event->owner field, if it is cleared
3255 * we raced with perf_event_exit_task(), acquiring the mutex
3256 * ensured they're done, and we can proceed with freeing the
3257 * event.
3258 */
3259 if (event->owner)
3260 list_del_init(&event->owner_entry);
3261 mutex_unlock(&owner->perf_event_mutex);
3262 put_task_struct(owner);
3263 }
3264
a6fa941d
AV
3265 perf_event_release_kernel(event);
3266}
3267
3268static int perf_release(struct inode *inode, struct file *file)
3269{
3270 put_event(file->private_data);
3271 return 0;
fb0459d7 3272}
fb0459d7 3273
59ed446f 3274u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 3275{
cdd6c482 3276 struct perf_event *child;
e53c0994
PZ
3277 u64 total = 0;
3278
59ed446f
PZ
3279 *enabled = 0;
3280 *running = 0;
3281
6f10581a 3282 mutex_lock(&event->child_mutex);
cdd6c482 3283 total += perf_event_read(event);
59ed446f
PZ
3284 *enabled += event->total_time_enabled +
3285 atomic64_read(&event->child_total_time_enabled);
3286 *running += event->total_time_running +
3287 atomic64_read(&event->child_total_time_running);
3288
3289 list_for_each_entry(child, &event->child_list, child_list) {
cdd6c482 3290 total += perf_event_read(child);
59ed446f
PZ
3291 *enabled += child->total_time_enabled;
3292 *running += child->total_time_running;
3293 }
6f10581a 3294 mutex_unlock(&event->child_mutex);
e53c0994
PZ
3295
3296 return total;
3297}
fb0459d7 3298EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 3299
cdd6c482 3300static int perf_event_read_group(struct perf_event *event,
3dab77fb
PZ
3301 u64 read_format, char __user *buf)
3302{
cdd6c482 3303 struct perf_event *leader = event->group_leader, *sub;
6f10581a
PZ
3304 int n = 0, size = 0, ret = -EFAULT;
3305 struct perf_event_context *ctx = leader->ctx;
abf4868b 3306 u64 values[5];
59ed446f 3307 u64 count, enabled, running;
abf4868b 3308
6f10581a 3309 mutex_lock(&ctx->mutex);
59ed446f 3310 count = perf_event_read_value(leader, &enabled, &running);
3dab77fb
PZ
3311
3312 values[n++] = 1 + leader->nr_siblings;
59ed446f
PZ
3313 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3314 values[n++] = enabled;
3315 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3316 values[n++] = running;
abf4868b
PZ
3317 values[n++] = count;
3318 if (read_format & PERF_FORMAT_ID)
3319 values[n++] = primary_event_id(leader);
3dab77fb
PZ
3320
3321 size = n * sizeof(u64);
3322
3323 if (copy_to_user(buf, values, size))
6f10581a 3324 goto unlock;
3dab77fb 3325
6f10581a 3326 ret = size;
3dab77fb 3327
65abc865 3328 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
abf4868b 3329 n = 0;
3dab77fb 3330
59ed446f 3331 values[n++] = perf_event_read_value(sub, &enabled, &running);
abf4868b
PZ
3332 if (read_format & PERF_FORMAT_ID)
3333 values[n++] = primary_event_id(sub);
3334
3335 size = n * sizeof(u64);
3336
184d3da8 3337 if (copy_to_user(buf + ret, values, size)) {
6f10581a
PZ
3338 ret = -EFAULT;
3339 goto unlock;
3340 }
abf4868b
PZ
3341
3342 ret += size;
3dab77fb 3343 }
6f10581a
PZ
3344unlock:
3345 mutex_unlock(&ctx->mutex);
3dab77fb 3346
abf4868b 3347 return ret;
3dab77fb
PZ
3348}
3349
cdd6c482 3350static int perf_event_read_one(struct perf_event *event,
3dab77fb
PZ
3351 u64 read_format, char __user *buf)
3352{
59ed446f 3353 u64 enabled, running;
3dab77fb
PZ
3354 u64 values[4];
3355 int n = 0;
3356
59ed446f
PZ
3357 values[n++] = perf_event_read_value(event, &enabled, &running);
3358 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3359 values[n++] = enabled;
3360 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3361 values[n++] = running;
3dab77fb 3362 if (read_format & PERF_FORMAT_ID)
cdd6c482 3363 values[n++] = primary_event_id(event);
3dab77fb
PZ
3364
3365 if (copy_to_user(buf, values, n * sizeof(u64)))
3366 return -EFAULT;
3367
3368 return n * sizeof(u64);
3369}
3370
0793a61d 3371/*
cdd6c482 3372 * Read the performance event - simple non blocking version for now
0793a61d
TG
3373 */
3374static ssize_t
cdd6c482 3375perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
0793a61d 3376{
cdd6c482 3377 u64 read_format = event->attr.read_format;
3dab77fb 3378 int ret;
0793a61d 3379
3b6f9e5c 3380 /*
cdd6c482 3381 * Return end-of-file for a read on a event that is in
3b6f9e5c
PM
3382 * error state (i.e. because it was pinned but it couldn't be
3383 * scheduled on to the CPU at some point).
3384 */
cdd6c482 3385 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
3386 return 0;
3387
c320c7b7 3388 if (count < event->read_size)
3dab77fb
PZ
3389 return -ENOSPC;
3390
cdd6c482 3391 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 3392 if (read_format & PERF_FORMAT_GROUP)
cdd6c482 3393 ret = perf_event_read_group(event, read_format, buf);
3dab77fb 3394 else
cdd6c482 3395 ret = perf_event_read_one(event, read_format, buf);
0793a61d 3396
3dab77fb 3397 return ret;
0793a61d
TG
3398}
3399
0793a61d
TG
3400static ssize_t
3401perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3402{
cdd6c482 3403 struct perf_event *event = file->private_data;
0793a61d 3404
cdd6c482 3405 return perf_read_hw(event, buf, count);
0793a61d
TG
3406}
3407
3408static unsigned int perf_poll(struct file *file, poll_table *wait)
3409{
cdd6c482 3410 struct perf_event *event = file->private_data;
76369139 3411 struct ring_buffer *rb;
c33a0bc4 3412 unsigned int events = POLL_HUP;
c7138f37 3413
10c6db11 3414 /*
9bb5d40c
PZ
3415 * Pin the event->rb by taking event->mmap_mutex; otherwise
3416 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
10c6db11
PZ
3417 */
3418 mutex_lock(&event->mmap_mutex);
9bb5d40c
PZ
3419 rb = event->rb;
3420 if (rb)
76369139 3421 events = atomic_xchg(&rb->poll, 0);
10c6db11
PZ
3422 mutex_unlock(&event->mmap_mutex);
3423
cdd6c482 3424 poll_wait(file, &event->waitq, wait);
0793a61d 3425
0793a61d
TG
3426 return events;
3427}
3428
cdd6c482 3429static void perf_event_reset(struct perf_event *event)
6de6a7b9 3430{
cdd6c482 3431 (void)perf_event_read(event);
e7850595 3432 local64_set(&event->count, 0);
cdd6c482 3433 perf_event_update_userpage(event);
3df5edad
PZ
3434}
3435
c93f7669 3436/*
cdd6c482
IM
3437 * Holding the top-level event's child_mutex means that any
3438 * descendant process that has inherited this event will block
3439 * in sync_child_event if it goes to exit, thus satisfying the
3440 * task existence requirements of perf_event_enable/disable.
c93f7669 3441 */
cdd6c482
IM
3442static void perf_event_for_each_child(struct perf_event *event,
3443 void (*func)(struct perf_event *))
3df5edad 3444{
cdd6c482 3445 struct perf_event *child;
3df5edad 3446
cdd6c482
IM
3447 WARN_ON_ONCE(event->ctx->parent_ctx);
3448 mutex_lock(&event->child_mutex);
3449 func(event);
3450 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 3451 func(child);
cdd6c482 3452 mutex_unlock(&event->child_mutex);
3df5edad
PZ
3453}
3454
cdd6c482
IM
3455static void perf_event_for_each(struct perf_event *event,
3456 void (*func)(struct perf_event *))
3df5edad 3457{
cdd6c482
IM
3458 struct perf_event_context *ctx = event->ctx;
3459 struct perf_event *sibling;
3df5edad 3460
75f937f2
PZ
3461 WARN_ON_ONCE(ctx->parent_ctx);
3462 mutex_lock(&ctx->mutex);
cdd6c482 3463 event = event->group_leader;
75f937f2 3464
cdd6c482 3465 perf_event_for_each_child(event, func);
cdd6c482 3466 list_for_each_entry(sibling, &event->sibling_list, group_entry)
724b6daa 3467 perf_event_for_each_child(sibling, func);
75f937f2 3468 mutex_unlock(&ctx->mutex);
6de6a7b9
PZ
3469}
3470
cdd6c482 3471static int perf_event_period(struct perf_event *event, u64 __user *arg)
08247e31 3472{
cdd6c482 3473 struct perf_event_context *ctx = event->ctx;
08247e31
PZ
3474 int ret = 0;
3475 u64 value;
3476
6c7e550f 3477 if (!is_sampling_event(event))
08247e31
PZ
3478 return -EINVAL;
3479
ad0cf347 3480 if (copy_from_user(&value, arg, sizeof(value)))
08247e31
PZ
3481 return -EFAULT;
3482
3483 if (!value)
3484 return -EINVAL;
3485
e625cce1 3486 raw_spin_lock_irq(&ctx->lock);
cdd6c482
IM
3487 if (event->attr.freq) {
3488 if (value > sysctl_perf_event_sample_rate) {
08247e31
PZ
3489 ret = -EINVAL;
3490 goto unlock;
3491 }
3492
cdd6c482 3493 event->attr.sample_freq = value;
08247e31 3494 } else {
cdd6c482
IM
3495 event->attr.sample_period = value;
3496 event->hw.sample_period = value;
08247e31
PZ
3497 }
3498unlock:
e625cce1 3499 raw_spin_unlock_irq(&ctx->lock);
08247e31
PZ
3500
3501 return ret;
3502}
3503
ac9721f3
PZ
3504static const struct file_operations perf_fops;
3505
2903ff01 3506static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 3507{
2903ff01
AV
3508 struct fd f = fdget(fd);
3509 if (!f.file)
3510 return -EBADF;
ac9721f3 3511
2903ff01
AV
3512 if (f.file->f_op != &perf_fops) {
3513 fdput(f);
3514 return -EBADF;
ac9721f3 3515 }
2903ff01
AV
3516 *p = f;
3517 return 0;
ac9721f3
PZ
3518}
3519
3520static int perf_event_set_output(struct perf_event *event,
3521 struct perf_event *output_event);
6fb2915d 3522static int perf_event_set_filter(struct perf_event *event, void __user *arg);
a4be7c27 3523
d859e29f
PM
3524static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3525{
cdd6c482
IM
3526 struct perf_event *event = file->private_data;
3527 void (*func)(struct perf_event *);
3df5edad 3528 u32 flags = arg;
d859e29f
PM
3529
3530 switch (cmd) {
cdd6c482
IM
3531 case PERF_EVENT_IOC_ENABLE:
3532 func = perf_event_enable;
d859e29f 3533 break;
cdd6c482
IM
3534 case PERF_EVENT_IOC_DISABLE:
3535 func = perf_event_disable;
79f14641 3536 break;
cdd6c482
IM
3537 case PERF_EVENT_IOC_RESET:
3538 func = perf_event_reset;
6de6a7b9 3539 break;
3df5edad 3540
cdd6c482
IM
3541 case PERF_EVENT_IOC_REFRESH:
3542 return perf_event_refresh(event, arg);
08247e31 3543
cdd6c482
IM
3544 case PERF_EVENT_IOC_PERIOD:
3545 return perf_event_period(event, (u64 __user *)arg);
08247e31 3546
cdd6c482 3547 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 3548 {
ac9721f3 3549 int ret;
ac9721f3 3550 if (arg != -1) {
2903ff01
AV
3551 struct perf_event *output_event;
3552 struct fd output;
3553 ret = perf_fget_light(arg, &output);
3554 if (ret)
3555 return ret;
3556 output_event = output.file->private_data;
3557 ret = perf_event_set_output(event, output_event);
3558 fdput(output);
3559 } else {
3560 ret = perf_event_set_output(event, NULL);
ac9721f3 3561 }
ac9721f3
PZ
3562 return ret;
3563 }
a4be7c27 3564
6fb2915d
LZ
3565 case PERF_EVENT_IOC_SET_FILTER:
3566 return perf_event_set_filter(event, (void __user *)arg);
3567
d859e29f 3568 default:
3df5edad 3569 return -ENOTTY;
d859e29f 3570 }
3df5edad
PZ
3571
3572 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 3573 perf_event_for_each(event, func);
3df5edad 3574 else
cdd6c482 3575 perf_event_for_each_child(event, func);
3df5edad
PZ
3576
3577 return 0;
d859e29f
PM
3578}
3579
cdd6c482 3580int perf_event_task_enable(void)
771d7cde 3581{
cdd6c482 3582 struct perf_event *event;
771d7cde 3583
cdd6c482
IM
3584 mutex_lock(&current->perf_event_mutex);
3585 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3586 perf_event_for_each_child(event, perf_event_enable);
3587 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
3588
3589 return 0;
3590}
3591
cdd6c482 3592int perf_event_task_disable(void)
771d7cde 3593{
cdd6c482 3594 struct perf_event *event;
771d7cde 3595
cdd6c482
IM
3596 mutex_lock(&current->perf_event_mutex);
3597 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3598 perf_event_for_each_child(event, perf_event_disable);
3599 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
3600
3601 return 0;
3602}
3603
cdd6c482 3604static int perf_event_index(struct perf_event *event)
194002b2 3605{
a4eaf7f1
PZ
3606 if (event->hw.state & PERF_HES_STOPPED)
3607 return 0;
3608
cdd6c482 3609 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
3610 return 0;
3611
35edc2a5 3612 return event->pmu->event_idx(event);
194002b2
PZ
3613}
3614
c4794295 3615static void calc_timer_values(struct perf_event *event,
e3f3541c 3616 u64 *now,
7f310a5d
EM
3617 u64 *enabled,
3618 u64 *running)
c4794295 3619{
e3f3541c 3620 u64 ctx_time;
c4794295 3621
e3f3541c
PZ
3622 *now = perf_clock();
3623 ctx_time = event->shadow_ctx_time + *now;
c4794295
EM
3624 *enabled = ctx_time - event->tstamp_enabled;
3625 *running = ctx_time - event->tstamp_running;
3626}
3627
c7206205 3628void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
3629{
3630}
3631
38ff667b
PZ
3632/*
3633 * Callers need to ensure there can be no nesting of this function, otherwise
3634 * the seqlock logic goes bad. We can not serialize this because the arch
3635 * code calls this from NMI context.
3636 */
cdd6c482 3637void perf_event_update_userpage(struct perf_event *event)
37d81828 3638{
cdd6c482 3639 struct perf_event_mmap_page *userpg;
76369139 3640 struct ring_buffer *rb;
e3f3541c 3641 u64 enabled, running, now;
38ff667b
PZ
3642
3643 rcu_read_lock();
0d641208
EM
3644 /*
3645 * compute total_time_enabled, total_time_running
3646 * based on snapshot values taken when the event
3647 * was last scheduled in.
3648 *
3649 * we cannot simply called update_context_time()
3650 * because of locking issue as we can be called in
3651 * NMI context
3652 */
e3f3541c 3653 calc_timer_values(event, &now, &enabled, &running);
76369139
FW
3654 rb = rcu_dereference(event->rb);
3655 if (!rb)
38ff667b
PZ
3656 goto unlock;
3657
76369139 3658 userpg = rb->user_page;
37d81828 3659
7b732a75
PZ
3660 /*
3661 * Disable preemption so as to not let the corresponding user-space
3662 * spin too long if we get preempted.
3663 */
3664 preempt_disable();
37d81828 3665 ++userpg->lock;
92f22a38 3666 barrier();
cdd6c482 3667 userpg->index = perf_event_index(event);
b5e58793 3668 userpg->offset = perf_event_count(event);
365a4038 3669 if (userpg->index)
e7850595 3670 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 3671
0d641208 3672 userpg->time_enabled = enabled +
cdd6c482 3673 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 3674
0d641208 3675 userpg->time_running = running +
cdd6c482 3676 atomic64_read(&event->child_total_time_running);
7f8b4e4e 3677
c7206205 3678 arch_perf_update_userpage(userpg, now);
e3f3541c 3679
92f22a38 3680 barrier();
37d81828 3681 ++userpg->lock;
7b732a75 3682 preempt_enable();
38ff667b 3683unlock:
7b732a75 3684 rcu_read_unlock();
37d81828
PM
3685}
3686
906010b2
PZ
3687static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3688{
3689 struct perf_event *event = vma->vm_file->private_data;
76369139 3690 struct ring_buffer *rb;
906010b2
PZ
3691 int ret = VM_FAULT_SIGBUS;
3692
3693 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3694 if (vmf->pgoff == 0)
3695 ret = 0;
3696 return ret;
3697 }
3698
3699 rcu_read_lock();
76369139
FW
3700 rb = rcu_dereference(event->rb);
3701 if (!rb)
906010b2
PZ
3702 goto unlock;
3703
3704 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3705 goto unlock;
3706
76369139 3707 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
3708 if (!vmf->page)
3709 goto unlock;
3710
3711 get_page(vmf->page);
3712 vmf->page->mapping = vma->vm_file->f_mapping;
3713 vmf->page->index = vmf->pgoff;
3714
3715 ret = 0;
3716unlock:
3717 rcu_read_unlock();
3718
3719 return ret;
3720}
3721
10c6db11
PZ
3722static void ring_buffer_attach(struct perf_event *event,
3723 struct ring_buffer *rb)
3724{
3725 unsigned long flags;
3726
3727 if (!list_empty(&event->rb_entry))
3728 return;
3729
3730 spin_lock_irqsave(&rb->event_lock, flags);
9bb5d40c
PZ
3731 if (list_empty(&event->rb_entry))
3732 list_add(&event->rb_entry, &rb->event_list);
10c6db11
PZ
3733 spin_unlock_irqrestore(&rb->event_lock, flags);
3734}
3735
9bb5d40c 3736static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
10c6db11
PZ
3737{
3738 unsigned long flags;
3739
3740 if (list_empty(&event->rb_entry))
3741 return;
3742
3743 spin_lock_irqsave(&rb->event_lock, flags);
3744 list_del_init(&event->rb_entry);
3745 wake_up_all(&event->waitq);
3746 spin_unlock_irqrestore(&rb->event_lock, flags);
3747}
3748
3749static void ring_buffer_wakeup(struct perf_event *event)
3750{
3751 struct ring_buffer *rb;
3752
3753 rcu_read_lock();
3754 rb = rcu_dereference(event->rb);
9bb5d40c
PZ
3755 if (rb) {
3756 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3757 wake_up_all(&event->waitq);
3758 }
10c6db11
PZ
3759 rcu_read_unlock();
3760}
3761
76369139 3762static void rb_free_rcu(struct rcu_head *rcu_head)
906010b2 3763{
76369139 3764 struct ring_buffer *rb;
906010b2 3765
76369139
FW
3766 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3767 rb_free(rb);
7b732a75
PZ
3768}
3769
76369139 3770static struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 3771{
76369139 3772 struct ring_buffer *rb;
7b732a75 3773
ac9721f3 3774 rcu_read_lock();
76369139
FW
3775 rb = rcu_dereference(event->rb);
3776 if (rb) {
3777 if (!atomic_inc_not_zero(&rb->refcount))
3778 rb = NULL;
ac9721f3
PZ
3779 }
3780 rcu_read_unlock();
3781
76369139 3782 return rb;
ac9721f3
PZ
3783}
3784
76369139 3785static void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 3786{
76369139 3787 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 3788 return;
7b732a75 3789
9bb5d40c 3790 WARN_ON_ONCE(!list_empty(&rb->event_list));
10c6db11 3791
76369139 3792 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
3793}
3794
3795static void perf_mmap_open(struct vm_area_struct *vma)
3796{
cdd6c482 3797 struct perf_event *event = vma->vm_file->private_data;
7b732a75 3798
cdd6c482 3799 atomic_inc(&event->mmap_count);
9bb5d40c 3800 atomic_inc(&event->rb->mmap_count);
7b732a75
PZ
3801}
3802
9bb5d40c
PZ
3803/*
3804 * A buffer can be mmap()ed multiple times; either directly through the same
3805 * event, or through other events by use of perf_event_set_output().
3806 *
3807 * In order to undo the VM accounting done by perf_mmap() we need to destroy
3808 * the buffer here, where we still have a VM context. This means we need
3809 * to detach all events redirecting to us.
3810 */
7b732a75
PZ
3811static void perf_mmap_close(struct vm_area_struct *vma)
3812{
cdd6c482 3813 struct perf_event *event = vma->vm_file->private_data;
7b732a75 3814
9bb5d40c
PZ
3815 struct ring_buffer *rb = event->rb;
3816 struct user_struct *mmap_user = rb->mmap_user;
3817 int mmap_locked = rb->mmap_locked;
3818 unsigned long size = perf_data_size(rb);
789f90fc 3819
9bb5d40c
PZ
3820 atomic_dec(&rb->mmap_count);
3821
3822 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3823 return;
3824
3825 /* Detach current event from the buffer. */
3826 rcu_assign_pointer(event->rb, NULL);
3827 ring_buffer_detach(event, rb);
3828 mutex_unlock(&event->mmap_mutex);
3829
3830 /* If there's still other mmap()s of this buffer, we're done. */
3831 if (atomic_read(&rb->mmap_count)) {
3832 ring_buffer_put(rb); /* can't be last */
3833 return;
3834 }
ac9721f3 3835
9bb5d40c
PZ
3836 /*
3837 * No other mmap()s, detach from all other events that might redirect
3838 * into the now unreachable buffer. Somewhat complicated by the
3839 * fact that rb::event_lock otherwise nests inside mmap_mutex.
3840 */
3841again:
3842 rcu_read_lock();
3843 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3844 if (!atomic_long_inc_not_zero(&event->refcount)) {
3845 /*
3846 * This event is en-route to free_event() which will
3847 * detach it and remove it from the list.
3848 */
3849 continue;
3850 }
3851 rcu_read_unlock();
789f90fc 3852
9bb5d40c
PZ
3853 mutex_lock(&event->mmap_mutex);
3854 /*
3855 * Check we didn't race with perf_event_set_output() which can
3856 * swizzle the rb from under us while we were waiting to
3857 * acquire mmap_mutex.
3858 *
3859 * If we find a different rb; ignore this event, a next
3860 * iteration will no longer find it on the list. We have to
3861 * still restart the iteration to make sure we're not now
3862 * iterating the wrong list.
3863 */
3864 if (event->rb == rb) {
3865 rcu_assign_pointer(event->rb, NULL);
3866 ring_buffer_detach(event, rb);
3867 ring_buffer_put(rb); /* can't be last, we still have one */
26cb63ad 3868 }
cdd6c482 3869 mutex_unlock(&event->mmap_mutex);
9bb5d40c 3870 put_event(event);
ac9721f3 3871
9bb5d40c
PZ
3872 /*
3873 * Restart the iteration; either we're on the wrong list or
3874 * destroyed its integrity by doing a deletion.
3875 */
3876 goto again;
7b732a75 3877 }
9bb5d40c
PZ
3878 rcu_read_unlock();
3879
3880 /*
3881 * It could be there's still a few 0-ref events on the list; they'll
3882 * get cleaned up by free_event() -- they'll also still have their
3883 * ref on the rb and will free it whenever they are done with it.
3884 *
3885 * Aside from that, this buffer is 'fully' detached and unmapped,
3886 * undo the VM accounting.
3887 */
3888
3889 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
3890 vma->vm_mm->pinned_vm -= mmap_locked;
3891 free_uid(mmap_user);
3892
3893 ring_buffer_put(rb); /* could be last */
37d81828
PM
3894}
3895
f0f37e2f 3896static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8
PZ
3897 .open = perf_mmap_open,
3898 .close = perf_mmap_close,
3899 .fault = perf_mmap_fault,
3900 .page_mkwrite = perf_mmap_fault,
37d81828
PM
3901};
3902
3903static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3904{
cdd6c482 3905 struct perf_event *event = file->private_data;
22a4f650 3906 unsigned long user_locked, user_lock_limit;
789f90fc 3907 struct user_struct *user = current_user();
22a4f650 3908 unsigned long locked, lock_limit;
76369139 3909 struct ring_buffer *rb;
7b732a75
PZ
3910 unsigned long vma_size;
3911 unsigned long nr_pages;
789f90fc 3912 long user_extra, extra;
d57e34fd 3913 int ret = 0, flags = 0;
37d81828 3914
c7920614
PZ
3915 /*
3916 * Don't allow mmap() of inherited per-task counters. This would
3917 * create a performance issue due to all children writing to the
76369139 3918 * same rb.
c7920614
PZ
3919 */
3920 if (event->cpu == -1 && event->attr.inherit)
3921 return -EINVAL;
3922
43a21ea8 3923 if (!(vma->vm_flags & VM_SHARED))
37d81828 3924 return -EINVAL;
7b732a75
PZ
3925
3926 vma_size = vma->vm_end - vma->vm_start;
3927 nr_pages = (vma_size / PAGE_SIZE) - 1;
3928
7730d865 3929 /*
76369139 3930 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
3931 * can do bitmasks instead of modulo.
3932 */
3933 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
3934 return -EINVAL;
3935
7b732a75 3936 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
3937 return -EINVAL;
3938
7b732a75
PZ
3939 if (vma->vm_pgoff != 0)
3940 return -EINVAL;
37d81828 3941
cdd6c482 3942 WARN_ON_ONCE(event->ctx->parent_ctx);
9bb5d40c 3943again:
cdd6c482 3944 mutex_lock(&event->mmap_mutex);
76369139 3945 if (event->rb) {
9bb5d40c 3946 if (event->rb->nr_pages != nr_pages) {
ebb3c4c4 3947 ret = -EINVAL;
9bb5d40c
PZ
3948 goto unlock;
3949 }
3950
3951 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
3952 /*
3953 * Raced against perf_mmap_close() through
3954 * perf_event_set_output(). Try again, hope for better
3955 * luck.
3956 */
3957 mutex_unlock(&event->mmap_mutex);
3958 goto again;
3959 }
3960
ebb3c4c4
PZ
3961 goto unlock;
3962 }
3963
789f90fc 3964 user_extra = nr_pages + 1;
cdd6c482 3965 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
3966
3967 /*
3968 * Increase the limit linearly with more CPUs:
3969 */
3970 user_lock_limit *= num_online_cpus();
3971
789f90fc 3972 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 3973
789f90fc
PZ
3974 extra = 0;
3975 if (user_locked > user_lock_limit)
3976 extra = user_locked - user_lock_limit;
7b732a75 3977
78d7d407 3978 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 3979 lock_limit >>= PAGE_SHIFT;
bc3e53f6 3980 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 3981
459ec28a
IM
3982 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3983 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
3984 ret = -EPERM;
3985 goto unlock;
3986 }
7b732a75 3987
76369139 3988 WARN_ON(event->rb);
906010b2 3989
d57e34fd 3990 if (vma->vm_flags & VM_WRITE)
76369139 3991 flags |= RING_BUFFER_WRITABLE;
d57e34fd 3992
4ec8363d
VW
3993 rb = rb_alloc(nr_pages,
3994 event->attr.watermark ? event->attr.wakeup_watermark : 0,
3995 event->cpu, flags);
3996
76369139 3997 if (!rb) {
ac9721f3 3998 ret = -ENOMEM;
ebb3c4c4 3999 goto unlock;
ac9721f3 4000 }
26cb63ad 4001
9bb5d40c 4002 atomic_set(&rb->mmap_count, 1);
26cb63ad
PZ
4003 rb->mmap_locked = extra;
4004 rb->mmap_user = get_current_user();
43a21ea8 4005
ac9721f3 4006 atomic_long_add(user_extra, &user->locked_vm);
26cb63ad
PZ
4007 vma->vm_mm->pinned_vm += extra;
4008
9bb5d40c 4009 ring_buffer_attach(event, rb);
26cb63ad 4010 rcu_assign_pointer(event->rb, rb);
ac9721f3 4011
9a0f05cb
PZ
4012 perf_event_update_userpage(event);
4013
ebb3c4c4 4014unlock:
ac9721f3
PZ
4015 if (!ret)
4016 atomic_inc(&event->mmap_count);
cdd6c482 4017 mutex_unlock(&event->mmap_mutex);
37d81828 4018
9bb5d40c
PZ
4019 /*
4020 * Since pinned accounting is per vm we cannot allow fork() to copy our
4021 * vma.
4022 */
26cb63ad 4023 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
37d81828 4024 vma->vm_ops = &perf_mmap_vmops;
7b732a75
PZ
4025
4026 return ret;
37d81828
PM
4027}
4028
3c446b3d
PZ
4029static int perf_fasync(int fd, struct file *filp, int on)
4030{
496ad9aa 4031 struct inode *inode = file_inode(filp);
cdd6c482 4032 struct perf_event *event = filp->private_data;
3c446b3d
PZ
4033 int retval;
4034
4035 mutex_lock(&inode->i_mutex);
cdd6c482 4036 retval = fasync_helper(fd, filp, on, &event->fasync);
3c446b3d
PZ
4037 mutex_unlock(&inode->i_mutex);
4038
4039 if (retval < 0)
4040 return retval;
4041
4042 return 0;
4043}
4044
0793a61d 4045static const struct file_operations perf_fops = {
3326c1ce 4046 .llseek = no_llseek,
0793a61d
TG
4047 .release = perf_release,
4048 .read = perf_read,
4049 .poll = perf_poll,
d859e29f
PM
4050 .unlocked_ioctl = perf_ioctl,
4051 .compat_ioctl = perf_ioctl,
37d81828 4052 .mmap = perf_mmap,
3c446b3d 4053 .fasync = perf_fasync,
0793a61d
TG
4054};
4055
925d519a 4056/*
cdd6c482 4057 * Perf event wakeup
925d519a
PZ
4058 *
4059 * If there's data, ensure we set the poll() state and publish everything
4060 * to user-space before waking everybody up.
4061 */
4062
cdd6c482 4063void perf_event_wakeup(struct perf_event *event)
925d519a 4064{
10c6db11 4065 ring_buffer_wakeup(event);
4c9e2542 4066
cdd6c482
IM
4067 if (event->pending_kill) {
4068 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
4069 event->pending_kill = 0;
4c9e2542 4070 }
925d519a
PZ
4071}
4072
e360adbe 4073static void perf_pending_event(struct irq_work *entry)
79f14641 4074{
cdd6c482
IM
4075 struct perf_event *event = container_of(entry,
4076 struct perf_event, pending);
79f14641 4077
cdd6c482
IM
4078 if (event->pending_disable) {
4079 event->pending_disable = 0;
4080 __perf_event_disable(event);
79f14641
PZ
4081 }
4082
cdd6c482
IM
4083 if (event->pending_wakeup) {
4084 event->pending_wakeup = 0;
4085 perf_event_wakeup(event);
79f14641
PZ
4086 }
4087}
4088
39447b38
ZY
4089/*
4090 * We assume there is only KVM supporting the callbacks.
4091 * Later on, we might change it to a list if there is
4092 * another virtualization implementation supporting the callbacks.
4093 */
4094struct perf_guest_info_callbacks *perf_guest_cbs;
4095
4096int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4097{
4098 perf_guest_cbs = cbs;
4099 return 0;
4100}
4101EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
4102
4103int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4104{
4105 perf_guest_cbs = NULL;
4106 return 0;
4107}
4108EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
4109
4018994f
JO
4110static void
4111perf_output_sample_regs(struct perf_output_handle *handle,
4112 struct pt_regs *regs, u64 mask)
4113{
4114 int bit;
4115
4116 for_each_set_bit(bit, (const unsigned long *) &mask,
4117 sizeof(mask) * BITS_PER_BYTE) {
4118 u64 val;
4119
4120 val = perf_reg_value(regs, bit);
4121 perf_output_put(handle, val);
4122 }
4123}
4124
4125static void perf_sample_regs_user(struct perf_regs_user *regs_user,
4126 struct pt_regs *regs)
4127{
4128 if (!user_mode(regs)) {
4129 if (current->mm)
4130 regs = task_pt_regs(current);
4131 else
4132 regs = NULL;
4133 }
4134
4135 if (regs) {
4136 regs_user->regs = regs;
4137 regs_user->abi = perf_reg_abi(current);
4138 }
4139}
4140
c5ebcedb
JO
4141/*
4142 * Get remaining task size from user stack pointer.
4143 *
4144 * It'd be better to take stack vma map and limit this more
4145 * precisly, but there's no way to get it safely under interrupt,
4146 * so using TASK_SIZE as limit.
4147 */
4148static u64 perf_ustack_task_size(struct pt_regs *regs)
4149{
4150 unsigned long addr = perf_user_stack_pointer(regs);
4151
4152 if (!addr || addr >= TASK_SIZE)
4153 return 0;
4154
4155 return TASK_SIZE - addr;
4156}
4157
4158static u16
4159perf_sample_ustack_size(u16 stack_size, u16 header_size,
4160 struct pt_regs *regs)
4161{
4162 u64 task_size;
4163
4164 /* No regs, no stack pointer, no dump. */
4165 if (!regs)
4166 return 0;
4167
4168 /*
4169 * Check if we fit in with the requested stack size into the:
4170 * - TASK_SIZE
4171 * If we don't, we limit the size to the TASK_SIZE.
4172 *
4173 * - remaining sample size
4174 * If we don't, we customize the stack size to
4175 * fit in to the remaining sample size.
4176 */
4177
4178 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
4179 stack_size = min(stack_size, (u16) task_size);
4180
4181 /* Current header size plus static size and dynamic size. */
4182 header_size += 2 * sizeof(u64);
4183
4184 /* Do we fit in with the current stack dump size? */
4185 if ((u16) (header_size + stack_size) < header_size) {
4186 /*
4187 * If we overflow the maximum size for the sample,
4188 * we customize the stack dump size to fit in.
4189 */
4190 stack_size = USHRT_MAX - header_size - sizeof(u64);
4191 stack_size = round_up(stack_size, sizeof(u64));
4192 }
4193
4194 return stack_size;
4195}
4196
4197static void
4198perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
4199 struct pt_regs *regs)
4200{
4201 /* Case of a kernel thread, nothing to dump */
4202 if (!regs) {
4203 u64 size = 0;
4204 perf_output_put(handle, size);
4205 } else {
4206 unsigned long sp;
4207 unsigned int rem;
4208 u64 dyn_size;
4209
4210 /*
4211 * We dump:
4212 * static size
4213 * - the size requested by user or the best one we can fit
4214 * in to the sample max size
4215 * data
4216 * - user stack dump data
4217 * dynamic size
4218 * - the actual dumped size
4219 */
4220
4221 /* Static size. */
4222 perf_output_put(handle, dump_size);
4223
4224 /* Data. */
4225 sp = perf_user_stack_pointer(regs);
4226 rem = __output_copy_user(handle, (void *) sp, dump_size);
4227 dyn_size = dump_size - rem;
4228
4229 perf_output_skip(handle, rem);
4230
4231 /* Dynamic size. */
4232 perf_output_put(handle, dyn_size);
4233 }
4234}
4235
c980d109
ACM
4236static void __perf_event_header__init_id(struct perf_event_header *header,
4237 struct perf_sample_data *data,
4238 struct perf_event *event)
6844c09d
ACM
4239{
4240 u64 sample_type = event->attr.sample_type;
4241
4242 data->type = sample_type;
4243 header->size += event->id_header_size;
4244
4245 if (sample_type & PERF_SAMPLE_TID) {
4246 /* namespace issues */
4247 data->tid_entry.pid = perf_event_pid(event, current);
4248 data->tid_entry.tid = perf_event_tid(event, current);
4249 }
4250
4251 if (sample_type & PERF_SAMPLE_TIME)
4252 data->time = perf_clock();
4253
4254 if (sample_type & PERF_SAMPLE_ID)
4255 data->id = primary_event_id(event);
4256
4257 if (sample_type & PERF_SAMPLE_STREAM_ID)
4258 data->stream_id = event->id;
4259
4260 if (sample_type & PERF_SAMPLE_CPU) {
4261 data->cpu_entry.cpu = raw_smp_processor_id();
4262 data->cpu_entry.reserved = 0;
4263 }
4264}
4265
76369139
FW
4266void perf_event_header__init_id(struct perf_event_header *header,
4267 struct perf_sample_data *data,
4268 struct perf_event *event)
c980d109
ACM
4269{
4270 if (event->attr.sample_id_all)
4271 __perf_event_header__init_id(header, data, event);
4272}
4273
4274static void __perf_event__output_id_sample(struct perf_output_handle *handle,
4275 struct perf_sample_data *data)
4276{
4277 u64 sample_type = data->type;
4278
4279 if (sample_type & PERF_SAMPLE_TID)
4280 perf_output_put(handle, data->tid_entry);
4281
4282 if (sample_type & PERF_SAMPLE_TIME)
4283 perf_output_put(handle, data->time);
4284
4285 if (sample_type & PERF_SAMPLE_ID)
4286 perf_output_put(handle, data->id);
4287
4288 if (sample_type & PERF_SAMPLE_STREAM_ID)
4289 perf_output_put(handle, data->stream_id);
4290
4291 if (sample_type & PERF_SAMPLE_CPU)
4292 perf_output_put(handle, data->cpu_entry);
4293}
4294
76369139
FW
4295void perf_event__output_id_sample(struct perf_event *event,
4296 struct perf_output_handle *handle,
4297 struct perf_sample_data *sample)
c980d109
ACM
4298{
4299 if (event->attr.sample_id_all)
4300 __perf_event__output_id_sample(handle, sample);
4301}
4302
3dab77fb 4303static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
4304 struct perf_event *event,
4305 u64 enabled, u64 running)
3dab77fb 4306{
cdd6c482 4307 u64 read_format = event->attr.read_format;
3dab77fb
PZ
4308 u64 values[4];
4309 int n = 0;
4310
b5e58793 4311 values[n++] = perf_event_count(event);
3dab77fb 4312 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 4313 values[n++] = enabled +
cdd6c482 4314 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
4315 }
4316 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 4317 values[n++] = running +
cdd6c482 4318 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
4319 }
4320 if (read_format & PERF_FORMAT_ID)
cdd6c482 4321 values[n++] = primary_event_id(event);
3dab77fb 4322
76369139 4323 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
4324}
4325
4326/*
cdd6c482 4327 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb
PZ
4328 */
4329static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
4330 struct perf_event *event,
4331 u64 enabled, u64 running)
3dab77fb 4332{
cdd6c482
IM
4333 struct perf_event *leader = event->group_leader, *sub;
4334 u64 read_format = event->attr.read_format;
3dab77fb
PZ
4335 u64 values[5];
4336 int n = 0;
4337
4338 values[n++] = 1 + leader->nr_siblings;
4339
4340 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 4341 values[n++] = enabled;
3dab77fb
PZ
4342
4343 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 4344 values[n++] = running;
3dab77fb 4345
cdd6c482 4346 if (leader != event)
3dab77fb
PZ
4347 leader->pmu->read(leader);
4348
b5e58793 4349 values[n++] = perf_event_count(leader);
3dab77fb 4350 if (read_format & PERF_FORMAT_ID)
cdd6c482 4351 values[n++] = primary_event_id(leader);
3dab77fb 4352
76369139 4353 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 4354
65abc865 4355 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb
PZ
4356 n = 0;
4357
cdd6c482 4358 if (sub != event)
3dab77fb
PZ
4359 sub->pmu->read(sub);
4360
b5e58793 4361 values[n++] = perf_event_count(sub);
3dab77fb 4362 if (read_format & PERF_FORMAT_ID)
cdd6c482 4363 values[n++] = primary_event_id(sub);
3dab77fb 4364
76369139 4365 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
4366 }
4367}
4368
eed01528
SE
4369#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
4370 PERF_FORMAT_TOTAL_TIME_RUNNING)
4371
3dab77fb 4372static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 4373 struct perf_event *event)
3dab77fb 4374{
e3f3541c 4375 u64 enabled = 0, running = 0, now;
eed01528
SE
4376 u64 read_format = event->attr.read_format;
4377
4378 /*
4379 * compute total_time_enabled, total_time_running
4380 * based on snapshot values taken when the event
4381 * was last scheduled in.
4382 *
4383 * we cannot simply called update_context_time()
4384 * because of locking issue as we are called in
4385 * NMI context
4386 */
c4794295 4387 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 4388 calc_timer_values(event, &now, &enabled, &running);
eed01528 4389
cdd6c482 4390 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 4391 perf_output_read_group(handle, event, enabled, running);
3dab77fb 4392 else
eed01528 4393 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
4394}
4395
5622f295
MM
4396void perf_output_sample(struct perf_output_handle *handle,
4397 struct perf_event_header *header,
4398 struct perf_sample_data *data,
cdd6c482 4399 struct perf_event *event)
5622f295
MM
4400{
4401 u64 sample_type = data->type;
4402
4403 perf_output_put(handle, *header);
4404
4405 if (sample_type & PERF_SAMPLE_IP)
4406 perf_output_put(handle, data->ip);
4407
4408 if (sample_type & PERF_SAMPLE_TID)
4409 perf_output_put(handle, data->tid_entry);
4410
4411 if (sample_type & PERF_SAMPLE_TIME)
4412 perf_output_put(handle, data->time);
4413
4414 if (sample_type & PERF_SAMPLE_ADDR)
4415 perf_output_put(handle, data->addr);
4416
4417 if (sample_type & PERF_SAMPLE_ID)
4418 perf_output_put(handle, data->id);
4419
4420 if (sample_type & PERF_SAMPLE_STREAM_ID)
4421 perf_output_put(handle, data->stream_id);
4422
4423 if (sample_type & PERF_SAMPLE_CPU)
4424 perf_output_put(handle, data->cpu_entry);
4425
4426 if (sample_type & PERF_SAMPLE_PERIOD)
4427 perf_output_put(handle, data->period);
4428
4429 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 4430 perf_output_read(handle, event);
5622f295
MM
4431
4432 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4433 if (data->callchain) {
4434 int size = 1;
4435
4436 if (data->callchain)
4437 size += data->callchain->nr;
4438
4439 size *= sizeof(u64);
4440
76369139 4441 __output_copy(handle, data->callchain, size);
5622f295
MM
4442 } else {
4443 u64 nr = 0;
4444 perf_output_put(handle, nr);
4445 }
4446 }
4447
4448 if (sample_type & PERF_SAMPLE_RAW) {
4449 if (data->raw) {
4450 perf_output_put(handle, data->raw->size);
76369139
FW
4451 __output_copy(handle, data->raw->data,
4452 data->raw->size);
5622f295
MM
4453 } else {
4454 struct {
4455 u32 size;
4456 u32 data;
4457 } raw = {
4458 .size = sizeof(u32),
4459 .data = 0,
4460 };
4461 perf_output_put(handle, raw);
4462 }
4463 }
a7ac67ea 4464
bce38cd5
SE
4465 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4466 if (data->br_stack) {
4467 size_t size;
4468
4469 size = data->br_stack->nr
4470 * sizeof(struct perf_branch_entry);
4471
4472 perf_output_put(handle, data->br_stack->nr);
4473 perf_output_copy(handle, data->br_stack->entries, size);
4474 } else {
4475 /*
4476 * we always store at least the value of nr
4477 */
4478 u64 nr = 0;
4479 perf_output_put(handle, nr);
4480 }
4481 }
4018994f
JO
4482
4483 if (sample_type & PERF_SAMPLE_REGS_USER) {
4484 u64 abi = data->regs_user.abi;
4485
4486 /*
4487 * If there are no regs to dump, notice it through
4488 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
4489 */
4490 perf_output_put(handle, abi);
4491
4492 if (abi) {
4493 u64 mask = event->attr.sample_regs_user;
4494 perf_output_sample_regs(handle,
4495 data->regs_user.regs,
4496 mask);
4497 }
4498 }
c5ebcedb 4499
a5cdd40c 4500 if (sample_type & PERF_SAMPLE_STACK_USER) {
c5ebcedb
JO
4501 perf_output_sample_ustack(handle,
4502 data->stack_user_size,
4503 data->regs_user.regs);
a5cdd40c 4504 }
c3feedf2
AK
4505
4506 if (sample_type & PERF_SAMPLE_WEIGHT)
4507 perf_output_put(handle, data->weight);
d6be9ad6
SE
4508
4509 if (sample_type & PERF_SAMPLE_DATA_SRC)
4510 perf_output_put(handle, data->data_src.val);
a5cdd40c
PZ
4511
4512 if (!event->attr.watermark) {
4513 int wakeup_events = event->attr.wakeup_events;
4514
4515 if (wakeup_events) {
4516 struct ring_buffer *rb = handle->rb;
4517 int events = local_inc_return(&rb->events);
4518
4519 if (events >= wakeup_events) {
4520 local_sub(wakeup_events, &rb->events);
4521 local_inc(&rb->wakeup);
4522 }
4523 }
4524 }
5622f295
MM
4525}
4526
4527void perf_prepare_sample(struct perf_event_header *header,
4528 struct perf_sample_data *data,
cdd6c482 4529 struct perf_event *event,
5622f295 4530 struct pt_regs *regs)
7b732a75 4531{
cdd6c482 4532 u64 sample_type = event->attr.sample_type;
7b732a75 4533
cdd6c482 4534 header->type = PERF_RECORD_SAMPLE;
c320c7b7 4535 header->size = sizeof(*header) + event->header_size;
5622f295
MM
4536
4537 header->misc = 0;
4538 header->misc |= perf_misc_flags(regs);
6fab0192 4539
c980d109 4540 __perf_event_header__init_id(header, data, event);
6844c09d 4541
c320c7b7 4542 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
4543 data->ip = perf_instruction_pointer(regs);
4544
b23f3325 4545 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 4546 int size = 1;
394ee076 4547
e6dab5ff 4548 data->callchain = perf_callchain(event, regs);
5622f295
MM
4549
4550 if (data->callchain)
4551 size += data->callchain->nr;
4552
4553 header->size += size * sizeof(u64);
394ee076
PZ
4554 }
4555
3a43ce68 4556 if (sample_type & PERF_SAMPLE_RAW) {
a044560c
PZ
4557 int size = sizeof(u32);
4558
4559 if (data->raw)
4560 size += data->raw->size;
4561 else
4562 size += sizeof(u32);
4563
4564 WARN_ON_ONCE(size & (sizeof(u64)-1));
5622f295 4565 header->size += size;
7f453c24 4566 }
bce38cd5
SE
4567
4568 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4569 int size = sizeof(u64); /* nr */
4570 if (data->br_stack) {
4571 size += data->br_stack->nr
4572 * sizeof(struct perf_branch_entry);
4573 }
4574 header->size += size;
4575 }
4018994f
JO
4576
4577 if (sample_type & PERF_SAMPLE_REGS_USER) {
4578 /* regs dump ABI info */
4579 int size = sizeof(u64);
4580
4581 perf_sample_regs_user(&data->regs_user, regs);
4582
4583 if (data->regs_user.regs) {
4584 u64 mask = event->attr.sample_regs_user;
4585 size += hweight64(mask) * sizeof(u64);
4586 }
4587
4588 header->size += size;
4589 }
c5ebcedb
JO
4590
4591 if (sample_type & PERF_SAMPLE_STACK_USER) {
4592 /*
4593 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
4594 * processed as the last one or have additional check added
4595 * in case new sample type is added, because we could eat
4596 * up the rest of the sample size.
4597 */
4598 struct perf_regs_user *uregs = &data->regs_user;
4599 u16 stack_size = event->attr.sample_stack_user;
4600 u16 size = sizeof(u64);
4601
4602 if (!uregs->abi)
4603 perf_sample_regs_user(uregs, regs);
4604
4605 stack_size = perf_sample_ustack_size(stack_size, header->size,
4606 uregs->regs);
4607
4608 /*
4609 * If there is something to dump, add space for the dump
4610 * itself and for the field that tells the dynamic size,
4611 * which is how many have been actually dumped.
4612 */
4613 if (stack_size)
4614 size += sizeof(u64) + stack_size;
4615
4616 data->stack_user_size = stack_size;
4617 header->size += size;
4618 }
5622f295 4619}
7f453c24 4620
a8b0ca17 4621static void perf_event_output(struct perf_event *event,
5622f295
MM
4622 struct perf_sample_data *data,
4623 struct pt_regs *regs)
4624{
4625 struct perf_output_handle handle;
4626 struct perf_event_header header;
689802b2 4627
927c7a9e
FW
4628 /* protect the callchain buffers */
4629 rcu_read_lock();
4630
cdd6c482 4631 perf_prepare_sample(&header, data, event, regs);
5c148194 4632
a7ac67ea 4633 if (perf_output_begin(&handle, event, header.size))
927c7a9e 4634 goto exit;
0322cd6e 4635
cdd6c482 4636 perf_output_sample(&handle, &header, data, event);
f413cdb8 4637
8a057d84 4638 perf_output_end(&handle);
927c7a9e
FW
4639
4640exit:
4641 rcu_read_unlock();
0322cd6e
PZ
4642}
4643
38b200d6 4644/*
cdd6c482 4645 * read event_id
38b200d6
PZ
4646 */
4647
4648struct perf_read_event {
4649 struct perf_event_header header;
4650
4651 u32 pid;
4652 u32 tid;
38b200d6
PZ
4653};
4654
4655static void
cdd6c482 4656perf_event_read_event(struct perf_event *event,
38b200d6
PZ
4657 struct task_struct *task)
4658{
4659 struct perf_output_handle handle;
c980d109 4660 struct perf_sample_data sample;
dfc65094 4661 struct perf_read_event read_event = {
38b200d6 4662 .header = {
cdd6c482 4663 .type = PERF_RECORD_READ,
38b200d6 4664 .misc = 0,
c320c7b7 4665 .size = sizeof(read_event) + event->read_size,
38b200d6 4666 },
cdd6c482
IM
4667 .pid = perf_event_pid(event, task),
4668 .tid = perf_event_tid(event, task),
38b200d6 4669 };
3dab77fb 4670 int ret;
38b200d6 4671
c980d109 4672 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 4673 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
4674 if (ret)
4675 return;
4676
dfc65094 4677 perf_output_put(&handle, read_event);
cdd6c482 4678 perf_output_read(&handle, event);
c980d109 4679 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 4680
38b200d6
PZ
4681 perf_output_end(&handle);
4682}
4683
52d857a8
JO
4684typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
4685
4686static void
4687perf_event_aux_ctx(struct perf_event_context *ctx,
52d857a8
JO
4688 perf_event_aux_output_cb output,
4689 void *data)
4690{
4691 struct perf_event *event;
4692
4693 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4694 if (event->state < PERF_EVENT_STATE_INACTIVE)
4695 continue;
4696 if (!event_filter_match(event))
4697 continue;
67516844 4698 output(event, data);
52d857a8
JO
4699 }
4700}
4701
4702static void
67516844 4703perf_event_aux(perf_event_aux_output_cb output, void *data,
52d857a8
JO
4704 struct perf_event_context *task_ctx)
4705{
4706 struct perf_cpu_context *cpuctx;
4707 struct perf_event_context *ctx;
4708 struct pmu *pmu;
4709 int ctxn;
4710
4711 rcu_read_lock();
4712 list_for_each_entry_rcu(pmu, &pmus, entry) {
4713 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4714 if (cpuctx->unique_pmu != pmu)
4715 goto next;
67516844 4716 perf_event_aux_ctx(&cpuctx->ctx, output, data);
52d857a8
JO
4717 if (task_ctx)
4718 goto next;
4719 ctxn = pmu->task_ctx_nr;
4720 if (ctxn < 0)
4721 goto next;
4722 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4723 if (ctx)
67516844 4724 perf_event_aux_ctx(ctx, output, data);
52d857a8
JO
4725next:
4726 put_cpu_ptr(pmu->pmu_cpu_context);
4727 }
4728
4729 if (task_ctx) {
4730 preempt_disable();
67516844 4731 perf_event_aux_ctx(task_ctx, output, data);
52d857a8
JO
4732 preempt_enable();
4733 }
4734 rcu_read_unlock();
4735}
4736
60313ebe 4737/*
9f498cc5
PZ
4738 * task tracking -- fork/exit
4739 *
3af9e859 4740 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
60313ebe
PZ
4741 */
4742
9f498cc5 4743struct perf_task_event {
3a80b4a3 4744 struct task_struct *task;
cdd6c482 4745 struct perf_event_context *task_ctx;
60313ebe
PZ
4746
4747 struct {
4748 struct perf_event_header header;
4749
4750 u32 pid;
4751 u32 ppid;
9f498cc5
PZ
4752 u32 tid;
4753 u32 ptid;
393b2ad8 4754 u64 time;
cdd6c482 4755 } event_id;
60313ebe
PZ
4756};
4757
67516844
JO
4758static int perf_event_task_match(struct perf_event *event)
4759{
4760 return event->attr.comm || event->attr.mmap ||
4761 event->attr.mmap_data || event->attr.task;
4762}
4763
cdd6c482 4764static void perf_event_task_output(struct perf_event *event,
52d857a8 4765 void *data)
60313ebe 4766{
52d857a8 4767 struct perf_task_event *task_event = data;
60313ebe 4768 struct perf_output_handle handle;
c980d109 4769 struct perf_sample_data sample;
9f498cc5 4770 struct task_struct *task = task_event->task;
c980d109 4771 int ret, size = task_event->event_id.header.size;
8bb39f9a 4772
67516844
JO
4773 if (!perf_event_task_match(event))
4774 return;
4775
c980d109 4776 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 4777
c980d109 4778 ret = perf_output_begin(&handle, event,
a7ac67ea 4779 task_event->event_id.header.size);
ef60777c 4780 if (ret)
c980d109 4781 goto out;
60313ebe 4782
cdd6c482
IM
4783 task_event->event_id.pid = perf_event_pid(event, task);
4784 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 4785
cdd6c482
IM
4786 task_event->event_id.tid = perf_event_tid(event, task);
4787 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 4788
cdd6c482 4789 perf_output_put(&handle, task_event->event_id);
393b2ad8 4790
c980d109
ACM
4791 perf_event__output_id_sample(event, &handle, &sample);
4792
60313ebe 4793 perf_output_end(&handle);
c980d109
ACM
4794out:
4795 task_event->event_id.header.size = size;
60313ebe
PZ
4796}
4797
cdd6c482
IM
4798static void perf_event_task(struct task_struct *task,
4799 struct perf_event_context *task_ctx,
3a80b4a3 4800 int new)
60313ebe 4801{
9f498cc5 4802 struct perf_task_event task_event;
60313ebe 4803
cdd6c482
IM
4804 if (!atomic_read(&nr_comm_events) &&
4805 !atomic_read(&nr_mmap_events) &&
4806 !atomic_read(&nr_task_events))
60313ebe
PZ
4807 return;
4808
9f498cc5 4809 task_event = (struct perf_task_event){
3a80b4a3
PZ
4810 .task = task,
4811 .task_ctx = task_ctx,
cdd6c482 4812 .event_id = {
60313ebe 4813 .header = {
cdd6c482 4814 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 4815 .misc = 0,
cdd6c482 4816 .size = sizeof(task_event.event_id),
60313ebe 4817 },
573402db
PZ
4818 /* .pid */
4819 /* .ppid */
9f498cc5
PZ
4820 /* .tid */
4821 /* .ptid */
6f93d0a7 4822 .time = perf_clock(),
60313ebe
PZ
4823 },
4824 };
4825
67516844 4826 perf_event_aux(perf_event_task_output,
52d857a8
JO
4827 &task_event,
4828 task_ctx);
9f498cc5
PZ
4829}
4830
cdd6c482 4831void perf_event_fork(struct task_struct *task)
9f498cc5 4832{
cdd6c482 4833 perf_event_task(task, NULL, 1);
60313ebe
PZ
4834}
4835
8d1b2d93
PZ
4836/*
4837 * comm tracking
4838 */
4839
4840struct perf_comm_event {
22a4f650
IM
4841 struct task_struct *task;
4842 char *comm;
8d1b2d93
PZ
4843 int comm_size;
4844
4845 struct {
4846 struct perf_event_header header;
4847
4848 u32 pid;
4849 u32 tid;
cdd6c482 4850 } event_id;
8d1b2d93
PZ
4851};
4852
67516844
JO
4853static int perf_event_comm_match(struct perf_event *event)
4854{
4855 return event->attr.comm;
4856}
4857
cdd6c482 4858static void perf_event_comm_output(struct perf_event *event,
52d857a8 4859 void *data)
8d1b2d93 4860{
52d857a8 4861 struct perf_comm_event *comm_event = data;
8d1b2d93 4862 struct perf_output_handle handle;
c980d109 4863 struct perf_sample_data sample;
cdd6c482 4864 int size = comm_event->event_id.header.size;
c980d109
ACM
4865 int ret;
4866
67516844
JO
4867 if (!perf_event_comm_match(event))
4868 return;
4869
c980d109
ACM
4870 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4871 ret = perf_output_begin(&handle, event,
a7ac67ea 4872 comm_event->event_id.header.size);
8d1b2d93
PZ
4873
4874 if (ret)
c980d109 4875 goto out;
8d1b2d93 4876
cdd6c482
IM
4877 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4878 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 4879
cdd6c482 4880 perf_output_put(&handle, comm_event->event_id);
76369139 4881 __output_copy(&handle, comm_event->comm,
8d1b2d93 4882 comm_event->comm_size);
c980d109
ACM
4883
4884 perf_event__output_id_sample(event, &handle, &sample);
4885
8d1b2d93 4886 perf_output_end(&handle);
c980d109
ACM
4887out:
4888 comm_event->event_id.header.size = size;
8d1b2d93
PZ
4889}
4890
cdd6c482 4891static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93 4892{
413ee3b4 4893 char comm[TASK_COMM_LEN];
8d1b2d93 4894 unsigned int size;
8d1b2d93 4895
413ee3b4 4896 memset(comm, 0, sizeof(comm));
96b02d78 4897 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 4898 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
4899
4900 comm_event->comm = comm;
4901 comm_event->comm_size = size;
4902
cdd6c482 4903 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8dc85d54 4904
67516844 4905 perf_event_aux(perf_event_comm_output,
52d857a8
JO
4906 comm_event,
4907 NULL);
8d1b2d93
PZ
4908}
4909
cdd6c482 4910void perf_event_comm(struct task_struct *task)
8d1b2d93 4911{
9ee318a7 4912 struct perf_comm_event comm_event;
8dc85d54
PZ
4913 struct perf_event_context *ctx;
4914 int ctxn;
9ee318a7 4915
c79aa0d9 4916 rcu_read_lock();
8dc85d54
PZ
4917 for_each_task_context_nr(ctxn) {
4918 ctx = task->perf_event_ctxp[ctxn];
4919 if (!ctx)
4920 continue;
9ee318a7 4921
8dc85d54
PZ
4922 perf_event_enable_on_exec(ctx);
4923 }
c79aa0d9 4924 rcu_read_unlock();
9ee318a7 4925
cdd6c482 4926 if (!atomic_read(&nr_comm_events))
9ee318a7 4927 return;
a63eaf34 4928
9ee318a7 4929 comm_event = (struct perf_comm_event){
8d1b2d93 4930 .task = task,
573402db
PZ
4931 /* .comm */
4932 /* .comm_size */
cdd6c482 4933 .event_id = {
573402db 4934 .header = {
cdd6c482 4935 .type = PERF_RECORD_COMM,
573402db
PZ
4936 .misc = 0,
4937 /* .size */
4938 },
4939 /* .pid */
4940 /* .tid */
8d1b2d93
PZ
4941 },
4942 };
4943
cdd6c482 4944 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
4945}
4946
0a4a9391
PZ
4947/*
4948 * mmap tracking
4949 */
4950
4951struct perf_mmap_event {
089dd79d
PZ
4952 struct vm_area_struct *vma;
4953
4954 const char *file_name;
4955 int file_size;
0a4a9391
PZ
4956
4957 struct {
4958 struct perf_event_header header;
4959
4960 u32 pid;
4961 u32 tid;
4962 u64 start;
4963 u64 len;
4964 u64 pgoff;
cdd6c482 4965 } event_id;
0a4a9391
PZ
4966};
4967
67516844
JO
4968static int perf_event_mmap_match(struct perf_event *event,
4969 void *data)
4970{
4971 struct perf_mmap_event *mmap_event = data;
4972 struct vm_area_struct *vma = mmap_event->vma;
4973 int executable = vma->vm_flags & VM_EXEC;
4974
4975 return (!executable && event->attr.mmap_data) ||
4976 (executable && event->attr.mmap);
4977}
4978
cdd6c482 4979static void perf_event_mmap_output(struct perf_event *event,
52d857a8 4980 void *data)
0a4a9391 4981{
52d857a8 4982 struct perf_mmap_event *mmap_event = data;
0a4a9391 4983 struct perf_output_handle handle;
c980d109 4984 struct perf_sample_data sample;
cdd6c482 4985 int size = mmap_event->event_id.header.size;
c980d109 4986 int ret;
0a4a9391 4987
67516844
JO
4988 if (!perf_event_mmap_match(event, data))
4989 return;
4990
c980d109
ACM
4991 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4992 ret = perf_output_begin(&handle, event,
a7ac67ea 4993 mmap_event->event_id.header.size);
0a4a9391 4994 if (ret)
c980d109 4995 goto out;
0a4a9391 4996
cdd6c482
IM
4997 mmap_event->event_id.pid = perf_event_pid(event, current);
4998 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 4999
cdd6c482 5000 perf_output_put(&handle, mmap_event->event_id);
76369139 5001 __output_copy(&handle, mmap_event->file_name,
0a4a9391 5002 mmap_event->file_size);
c980d109
ACM
5003
5004 perf_event__output_id_sample(event, &handle, &sample);
5005
78d613eb 5006 perf_output_end(&handle);
c980d109
ACM
5007out:
5008 mmap_event->event_id.header.size = size;
0a4a9391
PZ
5009}
5010
cdd6c482 5011static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391 5012{
089dd79d
PZ
5013 struct vm_area_struct *vma = mmap_event->vma;
5014 struct file *file = vma->vm_file;
0a4a9391
PZ
5015 unsigned int size;
5016 char tmp[16];
5017 char *buf = NULL;
089dd79d 5018 const char *name;
0a4a9391 5019
413ee3b4
AB
5020 memset(tmp, 0, sizeof(tmp));
5021
0a4a9391 5022 if (file) {
413ee3b4 5023 /*
76369139 5024 * d_path works from the end of the rb backwards, so we
413ee3b4
AB
5025 * need to add enough zero bytes after the string to handle
5026 * the 64bit alignment we do later.
5027 */
5028 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
0a4a9391
PZ
5029 if (!buf) {
5030 name = strncpy(tmp, "//enomem", sizeof(tmp));
5031 goto got_name;
5032 }
d3d21c41 5033 name = d_path(&file->f_path, buf, PATH_MAX);
0a4a9391
PZ
5034 if (IS_ERR(name)) {
5035 name = strncpy(tmp, "//toolong", sizeof(tmp));
5036 goto got_name;
5037 }
5038 } else {
413ee3b4
AB
5039 if (arch_vma_name(mmap_event->vma)) {
5040 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
c97847d2
CG
5041 sizeof(tmp) - 1);
5042 tmp[sizeof(tmp) - 1] = '\0';
089dd79d 5043 goto got_name;
413ee3b4 5044 }
089dd79d
PZ
5045
5046 if (!vma->vm_mm) {
5047 name = strncpy(tmp, "[vdso]", sizeof(tmp));
5048 goto got_name;
3af9e859
EM
5049 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
5050 vma->vm_end >= vma->vm_mm->brk) {
5051 name = strncpy(tmp, "[heap]", sizeof(tmp));
5052 goto got_name;
5053 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
5054 vma->vm_end >= vma->vm_mm->start_stack) {
5055 name = strncpy(tmp, "[stack]", sizeof(tmp));
5056 goto got_name;
089dd79d
PZ
5057 }
5058
0a4a9391
PZ
5059 name = strncpy(tmp, "//anon", sizeof(tmp));
5060 goto got_name;
5061 }
5062
5063got_name:
888fcee0 5064 size = ALIGN(strlen(name)+1, sizeof(u64));
0a4a9391
PZ
5065
5066 mmap_event->file_name = name;
5067 mmap_event->file_size = size;
5068
2fe85427
SE
5069 if (!(vma->vm_flags & VM_EXEC))
5070 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
5071
cdd6c482 5072 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 5073
67516844 5074 perf_event_aux(perf_event_mmap_output,
52d857a8
JO
5075 mmap_event,
5076 NULL);
665c2142 5077
0a4a9391
PZ
5078 kfree(buf);
5079}
5080
3af9e859 5081void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 5082{
9ee318a7
PZ
5083 struct perf_mmap_event mmap_event;
5084
cdd6c482 5085 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
5086 return;
5087
5088 mmap_event = (struct perf_mmap_event){
089dd79d 5089 .vma = vma,
573402db
PZ
5090 /* .file_name */
5091 /* .file_size */
cdd6c482 5092 .event_id = {
573402db 5093 .header = {
cdd6c482 5094 .type = PERF_RECORD_MMAP,
39447b38 5095 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
5096 /* .size */
5097 },
5098 /* .pid */
5099 /* .tid */
089dd79d
PZ
5100 .start = vma->vm_start,
5101 .len = vma->vm_end - vma->vm_start,
3a0304e9 5102 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391
PZ
5103 },
5104 };
5105
cdd6c482 5106 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
5107}
5108
a78ac325
PZ
5109/*
5110 * IRQ throttle logging
5111 */
5112
cdd6c482 5113static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
5114{
5115 struct perf_output_handle handle;
c980d109 5116 struct perf_sample_data sample;
a78ac325
PZ
5117 int ret;
5118
5119 struct {
5120 struct perf_event_header header;
5121 u64 time;
cca3f454 5122 u64 id;
7f453c24 5123 u64 stream_id;
a78ac325
PZ
5124 } throttle_event = {
5125 .header = {
cdd6c482 5126 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
5127 .misc = 0,
5128 .size = sizeof(throttle_event),
5129 },
def0a9b2 5130 .time = perf_clock(),
cdd6c482
IM
5131 .id = primary_event_id(event),
5132 .stream_id = event->id,
a78ac325
PZ
5133 };
5134
966ee4d6 5135 if (enable)
cdd6c482 5136 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 5137
c980d109
ACM
5138 perf_event_header__init_id(&throttle_event.header, &sample, event);
5139
5140 ret = perf_output_begin(&handle, event,
a7ac67ea 5141 throttle_event.header.size);
a78ac325
PZ
5142 if (ret)
5143 return;
5144
5145 perf_output_put(&handle, throttle_event);
c980d109 5146 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
5147 perf_output_end(&handle);
5148}
5149
f6c7d5fe 5150/*
cdd6c482 5151 * Generic event overflow handling, sampling.
f6c7d5fe
PZ
5152 */
5153
a8b0ca17 5154static int __perf_event_overflow(struct perf_event *event,
5622f295
MM
5155 int throttle, struct perf_sample_data *data,
5156 struct pt_regs *regs)
f6c7d5fe 5157{
cdd6c482
IM
5158 int events = atomic_read(&event->event_limit);
5159 struct hw_perf_event *hwc = &event->hw;
e050e3f0 5160 u64 seq;
79f14641
PZ
5161 int ret = 0;
5162
96398826
PZ
5163 /*
5164 * Non-sampling counters might still use the PMI to fold short
5165 * hardware counters, ignore those.
5166 */
5167 if (unlikely(!is_sampling_event(event)))
5168 return 0;
5169
e050e3f0
SE
5170 seq = __this_cpu_read(perf_throttled_seq);
5171 if (seq != hwc->interrupts_seq) {
5172 hwc->interrupts_seq = seq;
5173 hwc->interrupts = 1;
5174 } else {
5175 hwc->interrupts++;
5176 if (unlikely(throttle
5177 && hwc->interrupts >= max_samples_per_tick)) {
5178 __this_cpu_inc(perf_throttled_count);
163ec435
PZ
5179 hwc->interrupts = MAX_INTERRUPTS;
5180 perf_log_throttle(event, 0);
a78ac325
PZ
5181 ret = 1;
5182 }
e050e3f0 5183 }
60db5e09 5184
cdd6c482 5185 if (event->attr.freq) {
def0a9b2 5186 u64 now = perf_clock();
abd50713 5187 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 5188
abd50713 5189 hwc->freq_time_stamp = now;
bd2b5b12 5190
abd50713 5191 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 5192 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
5193 }
5194
2023b359
PZ
5195 /*
5196 * XXX event_limit might not quite work as expected on inherited
cdd6c482 5197 * events
2023b359
PZ
5198 */
5199
cdd6c482
IM
5200 event->pending_kill = POLL_IN;
5201 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 5202 ret = 1;
cdd6c482 5203 event->pending_kill = POLL_HUP;
a8b0ca17
PZ
5204 event->pending_disable = 1;
5205 irq_work_queue(&event->pending);
79f14641
PZ
5206 }
5207
453f19ee 5208 if (event->overflow_handler)
a8b0ca17 5209 event->overflow_handler(event, data, regs);
453f19ee 5210 else
a8b0ca17 5211 perf_event_output(event, data, regs);
453f19ee 5212
f506b3dc 5213 if (event->fasync && event->pending_kill) {
a8b0ca17
PZ
5214 event->pending_wakeup = 1;
5215 irq_work_queue(&event->pending);
f506b3dc
PZ
5216 }
5217
79f14641 5218 return ret;
f6c7d5fe
PZ
5219}
5220
a8b0ca17 5221int perf_event_overflow(struct perf_event *event,
5622f295
MM
5222 struct perf_sample_data *data,
5223 struct pt_regs *regs)
850bc73f 5224{
a8b0ca17 5225 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
5226}
5227
15dbf27c 5228/*
cdd6c482 5229 * Generic software event infrastructure
15dbf27c
PZ
5230 */
5231
b28ab83c
PZ
5232struct swevent_htable {
5233 struct swevent_hlist *swevent_hlist;
5234 struct mutex hlist_mutex;
5235 int hlist_refcount;
5236
5237 /* Recursion avoidance in each contexts */
5238 int recursion[PERF_NR_CONTEXTS];
5239};
5240
5241static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
5242
7b4b6658 5243/*
cdd6c482
IM
5244 * We directly increment event->count and keep a second value in
5245 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
5246 * is kept in the range [-sample_period, 0] so that we can use the
5247 * sign as trigger.
5248 */
5249
ab573844 5250u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 5251{
cdd6c482 5252 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
5253 u64 period = hwc->last_period;
5254 u64 nr, offset;
5255 s64 old, val;
5256
5257 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
5258
5259again:
e7850595 5260 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
5261 if (val < 0)
5262 return 0;
15dbf27c 5263
7b4b6658
PZ
5264 nr = div64_u64(period + val, period);
5265 offset = nr * period;
5266 val -= offset;
e7850595 5267 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 5268 goto again;
15dbf27c 5269
7b4b6658 5270 return nr;
15dbf27c
PZ
5271}
5272
0cff784a 5273static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 5274 struct perf_sample_data *data,
5622f295 5275 struct pt_regs *regs)
15dbf27c 5276{
cdd6c482 5277 struct hw_perf_event *hwc = &event->hw;
850bc73f 5278 int throttle = 0;
15dbf27c 5279
0cff784a
PZ
5280 if (!overflow)
5281 overflow = perf_swevent_set_period(event);
15dbf27c 5282
7b4b6658
PZ
5283 if (hwc->interrupts == MAX_INTERRUPTS)
5284 return;
15dbf27c 5285
7b4b6658 5286 for (; overflow; overflow--) {
a8b0ca17 5287 if (__perf_event_overflow(event, throttle,
5622f295 5288 data, regs)) {
7b4b6658
PZ
5289 /*
5290 * We inhibit the overflow from happening when
5291 * hwc->interrupts == MAX_INTERRUPTS.
5292 */
5293 break;
5294 }
cf450a73 5295 throttle = 1;
7b4b6658 5296 }
15dbf27c
PZ
5297}
5298
a4eaf7f1 5299static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 5300 struct perf_sample_data *data,
5622f295 5301 struct pt_regs *regs)
7b4b6658 5302{
cdd6c482 5303 struct hw_perf_event *hwc = &event->hw;
d6d020e9 5304
e7850595 5305 local64_add(nr, &event->count);
d6d020e9 5306
0cff784a
PZ
5307 if (!regs)
5308 return;
5309
6c7e550f 5310 if (!is_sampling_event(event))
7b4b6658 5311 return;
d6d020e9 5312
5d81e5cf
AV
5313 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
5314 data->period = nr;
5315 return perf_swevent_overflow(event, 1, data, regs);
5316 } else
5317 data->period = event->hw.last_period;
5318
0cff784a 5319 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 5320 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 5321
e7850595 5322 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 5323 return;
df1a132b 5324
a8b0ca17 5325 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
5326}
5327
f5ffe02e
FW
5328static int perf_exclude_event(struct perf_event *event,
5329 struct pt_regs *regs)
5330{
a4eaf7f1 5331 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 5332 return 1;
a4eaf7f1 5333
f5ffe02e
FW
5334 if (regs) {
5335 if (event->attr.exclude_user && user_mode(regs))
5336 return 1;
5337
5338 if (event->attr.exclude_kernel && !user_mode(regs))
5339 return 1;
5340 }
5341
5342 return 0;
5343}
5344
cdd6c482 5345static int perf_swevent_match(struct perf_event *event,
1c432d89 5346 enum perf_type_id type,
6fb2915d
LZ
5347 u32 event_id,
5348 struct perf_sample_data *data,
5349 struct pt_regs *regs)
15dbf27c 5350{
cdd6c482 5351 if (event->attr.type != type)
a21ca2ca 5352 return 0;
f5ffe02e 5353
cdd6c482 5354 if (event->attr.config != event_id)
15dbf27c
PZ
5355 return 0;
5356
f5ffe02e
FW
5357 if (perf_exclude_event(event, regs))
5358 return 0;
15dbf27c
PZ
5359
5360 return 1;
5361}
5362
76e1d904
FW
5363static inline u64 swevent_hash(u64 type, u32 event_id)
5364{
5365 u64 val = event_id | (type << 32);
5366
5367 return hash_64(val, SWEVENT_HLIST_BITS);
5368}
5369
49f135ed
FW
5370static inline struct hlist_head *
5371__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 5372{
49f135ed
FW
5373 u64 hash = swevent_hash(type, event_id);
5374
5375 return &hlist->heads[hash];
5376}
76e1d904 5377
49f135ed
FW
5378/* For the read side: events when they trigger */
5379static inline struct hlist_head *
b28ab83c 5380find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
5381{
5382 struct swevent_hlist *hlist;
76e1d904 5383
b28ab83c 5384 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
5385 if (!hlist)
5386 return NULL;
5387
49f135ed
FW
5388 return __find_swevent_head(hlist, type, event_id);
5389}
5390
5391/* For the event head insertion and removal in the hlist */
5392static inline struct hlist_head *
b28ab83c 5393find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
5394{
5395 struct swevent_hlist *hlist;
5396 u32 event_id = event->attr.config;
5397 u64 type = event->attr.type;
5398
5399 /*
5400 * Event scheduling is always serialized against hlist allocation
5401 * and release. Which makes the protected version suitable here.
5402 * The context lock guarantees that.
5403 */
b28ab83c 5404 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
5405 lockdep_is_held(&event->ctx->lock));
5406 if (!hlist)
5407 return NULL;
5408
5409 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
5410}
5411
5412static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 5413 u64 nr,
76e1d904
FW
5414 struct perf_sample_data *data,
5415 struct pt_regs *regs)
15dbf27c 5416{
b28ab83c 5417 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482 5418 struct perf_event *event;
76e1d904 5419 struct hlist_head *head;
15dbf27c 5420
76e1d904 5421 rcu_read_lock();
b28ab83c 5422 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
5423 if (!head)
5424 goto end;
5425
b67bfe0d 5426 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6fb2915d 5427 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 5428 perf_swevent_event(event, nr, data, regs);
15dbf27c 5429 }
76e1d904
FW
5430end:
5431 rcu_read_unlock();
15dbf27c
PZ
5432}
5433
4ed7c92d 5434int perf_swevent_get_recursion_context(void)
96f6d444 5435{
b28ab83c 5436 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
96f6d444 5437
b28ab83c 5438 return get_recursion_context(swhash->recursion);
96f6d444 5439}
645e8cc0 5440EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 5441
fa9f90be 5442inline void perf_swevent_put_recursion_context(int rctx)
15dbf27c 5443{
b28ab83c 5444 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
927c7a9e 5445
b28ab83c 5446 put_recursion_context(swhash->recursion, rctx);
ce71b9df 5447}
15dbf27c 5448
a8b0ca17 5449void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 5450{
a4234bfc 5451 struct perf_sample_data data;
4ed7c92d
PZ
5452 int rctx;
5453
1c024eca 5454 preempt_disable_notrace();
4ed7c92d
PZ
5455 rctx = perf_swevent_get_recursion_context();
5456 if (rctx < 0)
5457 return;
a4234bfc 5458
fd0d000b 5459 perf_sample_data_init(&data, addr, 0);
92bf309a 5460
a8b0ca17 5461 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4ed7c92d
PZ
5462
5463 perf_swevent_put_recursion_context(rctx);
1c024eca 5464 preempt_enable_notrace();
b8e83514
PZ
5465}
5466
cdd6c482 5467static void perf_swevent_read(struct perf_event *event)
15dbf27c 5468{
15dbf27c
PZ
5469}
5470
a4eaf7f1 5471static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 5472{
b28ab83c 5473 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
cdd6c482 5474 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
5475 struct hlist_head *head;
5476
6c7e550f 5477 if (is_sampling_event(event)) {
7b4b6658 5478 hwc->last_period = hwc->sample_period;
cdd6c482 5479 perf_swevent_set_period(event);
7b4b6658 5480 }
76e1d904 5481
a4eaf7f1
PZ
5482 hwc->state = !(flags & PERF_EF_START);
5483
b28ab83c 5484 head = find_swevent_head(swhash, event);
76e1d904
FW
5485 if (WARN_ON_ONCE(!head))
5486 return -EINVAL;
5487
5488 hlist_add_head_rcu(&event->hlist_entry, head);
5489
15dbf27c
PZ
5490 return 0;
5491}
5492
a4eaf7f1 5493static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 5494{
76e1d904 5495 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
5496}
5497
a4eaf7f1 5498static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 5499{
a4eaf7f1 5500 event->hw.state = 0;
d6d020e9 5501}
aa9c4c0f 5502
a4eaf7f1 5503static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 5504{
a4eaf7f1 5505 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
5506}
5507
49f135ed
FW
5508/* Deref the hlist from the update side */
5509static inline struct swevent_hlist *
b28ab83c 5510swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 5511{
b28ab83c
PZ
5512 return rcu_dereference_protected(swhash->swevent_hlist,
5513 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
5514}
5515
b28ab83c 5516static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 5517{
b28ab83c 5518 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 5519
49f135ed 5520 if (!hlist)
76e1d904
FW
5521 return;
5522
b28ab83c 5523 rcu_assign_pointer(swhash->swevent_hlist, NULL);
fa4bbc4c 5524 kfree_rcu(hlist, rcu_head);
76e1d904
FW
5525}
5526
5527static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5528{
b28ab83c 5529 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 5530
b28ab83c 5531 mutex_lock(&swhash->hlist_mutex);
76e1d904 5532
b28ab83c
PZ
5533 if (!--swhash->hlist_refcount)
5534 swevent_hlist_release(swhash);
76e1d904 5535
b28ab83c 5536 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
5537}
5538
5539static void swevent_hlist_put(struct perf_event *event)
5540{
5541 int cpu;
5542
5543 if (event->cpu != -1) {
5544 swevent_hlist_put_cpu(event, event->cpu);
5545 return;
5546 }
5547
5548 for_each_possible_cpu(cpu)
5549 swevent_hlist_put_cpu(event, cpu);
5550}
5551
5552static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5553{
b28ab83c 5554 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
5555 int err = 0;
5556
b28ab83c 5557 mutex_lock(&swhash->hlist_mutex);
76e1d904 5558
b28ab83c 5559 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d904
FW
5560 struct swevent_hlist *hlist;
5561
5562 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5563 if (!hlist) {
5564 err = -ENOMEM;
5565 goto exit;
5566 }
b28ab83c 5567 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 5568 }
b28ab83c 5569 swhash->hlist_refcount++;
9ed6060d 5570exit:
b28ab83c 5571 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
5572
5573 return err;
5574}
5575
5576static int swevent_hlist_get(struct perf_event *event)
5577{
5578 int err;
5579 int cpu, failed_cpu;
5580
5581 if (event->cpu != -1)
5582 return swevent_hlist_get_cpu(event, event->cpu);
5583
5584 get_online_cpus();
5585 for_each_possible_cpu(cpu) {
5586 err = swevent_hlist_get_cpu(event, cpu);
5587 if (err) {
5588 failed_cpu = cpu;
5589 goto fail;
5590 }
5591 }
5592 put_online_cpus();
5593
5594 return 0;
9ed6060d 5595fail:
76e1d904
FW
5596 for_each_possible_cpu(cpu) {
5597 if (cpu == failed_cpu)
5598 break;
5599 swevent_hlist_put_cpu(event, cpu);
5600 }
5601
5602 put_online_cpus();
5603 return err;
5604}
5605
c5905afb 5606struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 5607
b0a873eb
PZ
5608static void sw_perf_event_destroy(struct perf_event *event)
5609{
5610 u64 event_id = event->attr.config;
95476b64 5611
b0a873eb
PZ
5612 WARN_ON(event->parent);
5613
c5905afb 5614 static_key_slow_dec(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
5615 swevent_hlist_put(event);
5616}
5617
5618static int perf_swevent_init(struct perf_event *event)
5619{
8176cced 5620 u64 event_id = event->attr.config;
b0a873eb
PZ
5621
5622 if (event->attr.type != PERF_TYPE_SOFTWARE)
5623 return -ENOENT;
5624
2481c5fa
SE
5625 /*
5626 * no branch sampling for software events
5627 */
5628 if (has_branch_stack(event))
5629 return -EOPNOTSUPP;
5630
b0a873eb
PZ
5631 switch (event_id) {
5632 case PERF_COUNT_SW_CPU_CLOCK:
5633 case PERF_COUNT_SW_TASK_CLOCK:
5634 return -ENOENT;
5635
5636 default:
5637 break;
5638 }
5639
ce677831 5640 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
5641 return -ENOENT;
5642
5643 if (!event->parent) {
5644 int err;
5645
5646 err = swevent_hlist_get(event);
5647 if (err)
5648 return err;
5649
c5905afb 5650 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
5651 event->destroy = sw_perf_event_destroy;
5652 }
5653
5654 return 0;
5655}
5656
35edc2a5
PZ
5657static int perf_swevent_event_idx(struct perf_event *event)
5658{
5659 return 0;
5660}
5661
b0a873eb 5662static struct pmu perf_swevent = {
89a1e187 5663 .task_ctx_nr = perf_sw_context,
95476b64 5664
b0a873eb 5665 .event_init = perf_swevent_init,
a4eaf7f1
PZ
5666 .add = perf_swevent_add,
5667 .del = perf_swevent_del,
5668 .start = perf_swevent_start,
5669 .stop = perf_swevent_stop,
1c024eca 5670 .read = perf_swevent_read,
35edc2a5
PZ
5671
5672 .event_idx = perf_swevent_event_idx,
1c024eca
PZ
5673};
5674
b0a873eb
PZ
5675#ifdef CONFIG_EVENT_TRACING
5676
1c024eca
PZ
5677static int perf_tp_filter_match(struct perf_event *event,
5678 struct perf_sample_data *data)
5679{
5680 void *record = data->raw->data;
5681
5682 if (likely(!event->filter) || filter_match_preds(event->filter, record))
5683 return 1;
5684 return 0;
5685}
5686
5687static int perf_tp_event_match(struct perf_event *event,
5688 struct perf_sample_data *data,
5689 struct pt_regs *regs)
5690{
a0f7d0f7
FW
5691 if (event->hw.state & PERF_HES_STOPPED)
5692 return 0;
580d607c
PZ
5693 /*
5694 * All tracepoints are from kernel-space.
5695 */
5696 if (event->attr.exclude_kernel)
1c024eca
PZ
5697 return 0;
5698
5699 if (!perf_tp_filter_match(event, data))
5700 return 0;
5701
5702 return 1;
5703}
5704
5705void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
e6dab5ff
AV
5706 struct pt_regs *regs, struct hlist_head *head, int rctx,
5707 struct task_struct *task)
95476b64
FW
5708{
5709 struct perf_sample_data data;
1c024eca 5710 struct perf_event *event;
1c024eca 5711
95476b64
FW
5712 struct perf_raw_record raw = {
5713 .size = entry_size,
5714 .data = record,
5715 };
5716
fd0d000b 5717 perf_sample_data_init(&data, addr, 0);
95476b64
FW
5718 data.raw = &raw;
5719
b67bfe0d 5720 hlist_for_each_entry_rcu(event, head, hlist_entry) {
1c024eca 5721 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 5722 perf_swevent_event(event, count, &data, regs);
4f41c013 5723 }
ecc55f84 5724
e6dab5ff
AV
5725 /*
5726 * If we got specified a target task, also iterate its context and
5727 * deliver this event there too.
5728 */
5729 if (task && task != current) {
5730 struct perf_event_context *ctx;
5731 struct trace_entry *entry = record;
5732
5733 rcu_read_lock();
5734 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
5735 if (!ctx)
5736 goto unlock;
5737
5738 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5739 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5740 continue;
5741 if (event->attr.config != entry->type)
5742 continue;
5743 if (perf_tp_event_match(event, &data, regs))
5744 perf_swevent_event(event, count, &data, regs);
5745 }
5746unlock:
5747 rcu_read_unlock();
5748 }
5749
ecc55f84 5750 perf_swevent_put_recursion_context(rctx);
95476b64
FW
5751}
5752EXPORT_SYMBOL_GPL(perf_tp_event);
5753
cdd6c482 5754static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 5755{
1c024eca 5756 perf_trace_destroy(event);
e077df4f
PZ
5757}
5758
b0a873eb 5759static int perf_tp_event_init(struct perf_event *event)
e077df4f 5760{
76e1d904
FW
5761 int err;
5762
b0a873eb
PZ
5763 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5764 return -ENOENT;
5765
2481c5fa
SE
5766 /*
5767 * no branch sampling for tracepoint events
5768 */
5769 if (has_branch_stack(event))
5770 return -EOPNOTSUPP;
5771
1c024eca
PZ
5772 err = perf_trace_init(event);
5773 if (err)
b0a873eb 5774 return err;
e077df4f 5775
cdd6c482 5776 event->destroy = tp_perf_event_destroy;
e077df4f 5777
b0a873eb
PZ
5778 return 0;
5779}
5780
5781static struct pmu perf_tracepoint = {
89a1e187
PZ
5782 .task_ctx_nr = perf_sw_context,
5783
b0a873eb 5784 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
5785 .add = perf_trace_add,
5786 .del = perf_trace_del,
5787 .start = perf_swevent_start,
5788 .stop = perf_swevent_stop,
b0a873eb 5789 .read = perf_swevent_read,
35edc2a5
PZ
5790
5791 .event_idx = perf_swevent_event_idx,
b0a873eb
PZ
5792};
5793
5794static inline void perf_tp_register(void)
5795{
2e80a82a 5796 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f 5797}
6fb2915d
LZ
5798
5799static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5800{
5801 char *filter_str;
5802 int ret;
5803
5804 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5805 return -EINVAL;
5806
5807 filter_str = strndup_user(arg, PAGE_SIZE);
5808 if (IS_ERR(filter_str))
5809 return PTR_ERR(filter_str);
5810
5811 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5812
5813 kfree(filter_str);
5814 return ret;
5815}
5816
5817static void perf_event_free_filter(struct perf_event *event)
5818{
5819 ftrace_profile_free_filter(event);
5820}
5821
e077df4f 5822#else
6fb2915d 5823
b0a873eb 5824static inline void perf_tp_register(void)
e077df4f 5825{
e077df4f 5826}
6fb2915d
LZ
5827
5828static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5829{
5830 return -ENOENT;
5831}
5832
5833static void perf_event_free_filter(struct perf_event *event)
5834{
5835}
5836
07b139c8 5837#endif /* CONFIG_EVENT_TRACING */
e077df4f 5838
24f1e32c 5839#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 5840void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 5841{
f5ffe02e
FW
5842 struct perf_sample_data sample;
5843 struct pt_regs *regs = data;
5844
fd0d000b 5845 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 5846
a4eaf7f1 5847 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 5848 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
5849}
5850#endif
5851
b0a873eb
PZ
5852/*
5853 * hrtimer based swevent callback
5854 */
f29ac756 5855
b0a873eb 5856static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 5857{
b0a873eb
PZ
5858 enum hrtimer_restart ret = HRTIMER_RESTART;
5859 struct perf_sample_data data;
5860 struct pt_regs *regs;
5861 struct perf_event *event;
5862 u64 period;
f29ac756 5863
b0a873eb 5864 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
5865
5866 if (event->state != PERF_EVENT_STATE_ACTIVE)
5867 return HRTIMER_NORESTART;
5868
b0a873eb 5869 event->pmu->read(event);
f344011c 5870
fd0d000b 5871 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
5872 regs = get_irq_regs();
5873
5874 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 5875 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 5876 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
5877 ret = HRTIMER_NORESTART;
5878 }
24f1e32c 5879
b0a873eb
PZ
5880 period = max_t(u64, 10000, event->hw.sample_period);
5881 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 5882
b0a873eb 5883 return ret;
f29ac756
PZ
5884}
5885
b0a873eb 5886static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 5887{
b0a873eb 5888 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
5889 s64 period;
5890
5891 if (!is_sampling_event(event))
5892 return;
f5ffe02e 5893
5d508e82
FBH
5894 period = local64_read(&hwc->period_left);
5895 if (period) {
5896 if (period < 0)
5897 period = 10000;
fa407f35 5898
5d508e82
FBH
5899 local64_set(&hwc->period_left, 0);
5900 } else {
5901 period = max_t(u64, 10000, hwc->sample_period);
5902 }
5903 __hrtimer_start_range_ns(&hwc->hrtimer,
b0a873eb 5904 ns_to_ktime(period), 0,
b5ab4cd5 5905 HRTIMER_MODE_REL_PINNED, 0);
24f1e32c 5906}
b0a873eb
PZ
5907
5908static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 5909{
b0a873eb
PZ
5910 struct hw_perf_event *hwc = &event->hw;
5911
6c7e550f 5912 if (is_sampling_event(event)) {
b0a873eb 5913 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 5914 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
5915
5916 hrtimer_cancel(&hwc->hrtimer);
5917 }
24f1e32c
FW
5918}
5919
ba3dd36c
PZ
5920static void perf_swevent_init_hrtimer(struct perf_event *event)
5921{
5922 struct hw_perf_event *hwc = &event->hw;
5923
5924 if (!is_sampling_event(event))
5925 return;
5926
5927 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5928 hwc->hrtimer.function = perf_swevent_hrtimer;
5929
5930 /*
5931 * Since hrtimers have a fixed rate, we can do a static freq->period
5932 * mapping and avoid the whole period adjust feedback stuff.
5933 */
5934 if (event->attr.freq) {
5935 long freq = event->attr.sample_freq;
5936
5937 event->attr.sample_period = NSEC_PER_SEC / freq;
5938 hwc->sample_period = event->attr.sample_period;
5939 local64_set(&hwc->period_left, hwc->sample_period);
778141e3 5940 hwc->last_period = hwc->sample_period;
ba3dd36c
PZ
5941 event->attr.freq = 0;
5942 }
5943}
5944
b0a873eb
PZ
5945/*
5946 * Software event: cpu wall time clock
5947 */
5948
5949static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 5950{
b0a873eb
PZ
5951 s64 prev;
5952 u64 now;
5953
a4eaf7f1 5954 now = local_clock();
b0a873eb
PZ
5955 prev = local64_xchg(&event->hw.prev_count, now);
5956 local64_add(now - prev, &event->count);
24f1e32c 5957}
24f1e32c 5958
a4eaf7f1 5959static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 5960{
a4eaf7f1 5961 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 5962 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
5963}
5964
a4eaf7f1 5965static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 5966{
b0a873eb
PZ
5967 perf_swevent_cancel_hrtimer(event);
5968 cpu_clock_event_update(event);
5969}
f29ac756 5970
a4eaf7f1
PZ
5971static int cpu_clock_event_add(struct perf_event *event, int flags)
5972{
5973 if (flags & PERF_EF_START)
5974 cpu_clock_event_start(event, flags);
5975
5976 return 0;
5977}
5978
5979static void cpu_clock_event_del(struct perf_event *event, int flags)
5980{
5981 cpu_clock_event_stop(event, flags);
5982}
5983
b0a873eb
PZ
5984static void cpu_clock_event_read(struct perf_event *event)
5985{
5986 cpu_clock_event_update(event);
5987}
f344011c 5988
b0a873eb
PZ
5989static int cpu_clock_event_init(struct perf_event *event)
5990{
5991 if (event->attr.type != PERF_TYPE_SOFTWARE)
5992 return -ENOENT;
5993
5994 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5995 return -ENOENT;
5996
2481c5fa
SE
5997 /*
5998 * no branch sampling for software events
5999 */
6000 if (has_branch_stack(event))
6001 return -EOPNOTSUPP;
6002
ba3dd36c
PZ
6003 perf_swevent_init_hrtimer(event);
6004
b0a873eb 6005 return 0;
f29ac756
PZ
6006}
6007
b0a873eb 6008static struct pmu perf_cpu_clock = {
89a1e187
PZ
6009 .task_ctx_nr = perf_sw_context,
6010
b0a873eb 6011 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
6012 .add = cpu_clock_event_add,
6013 .del = cpu_clock_event_del,
6014 .start = cpu_clock_event_start,
6015 .stop = cpu_clock_event_stop,
b0a873eb 6016 .read = cpu_clock_event_read,
35edc2a5
PZ
6017
6018 .event_idx = perf_swevent_event_idx,
b0a873eb
PZ
6019};
6020
6021/*
6022 * Software event: task time clock
6023 */
6024
6025static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 6026{
b0a873eb
PZ
6027 u64 prev;
6028 s64 delta;
5c92d124 6029
b0a873eb
PZ
6030 prev = local64_xchg(&event->hw.prev_count, now);
6031 delta = now - prev;
6032 local64_add(delta, &event->count);
6033}
5c92d124 6034
a4eaf7f1 6035static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 6036{
a4eaf7f1 6037 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 6038 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
6039}
6040
a4eaf7f1 6041static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
6042{
6043 perf_swevent_cancel_hrtimer(event);
6044 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
6045}
6046
6047static int task_clock_event_add(struct perf_event *event, int flags)
6048{
6049 if (flags & PERF_EF_START)
6050 task_clock_event_start(event, flags);
b0a873eb 6051
a4eaf7f1
PZ
6052 return 0;
6053}
6054
6055static void task_clock_event_del(struct perf_event *event, int flags)
6056{
6057 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
6058}
6059
6060static void task_clock_event_read(struct perf_event *event)
6061{
768a06e2
PZ
6062 u64 now = perf_clock();
6063 u64 delta = now - event->ctx->timestamp;
6064 u64 time = event->ctx->time + delta;
b0a873eb
PZ
6065
6066 task_clock_event_update(event, time);
6067}
6068
6069static int task_clock_event_init(struct perf_event *event)
6fb2915d 6070{
b0a873eb
PZ
6071 if (event->attr.type != PERF_TYPE_SOFTWARE)
6072 return -ENOENT;
6073
6074 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
6075 return -ENOENT;
6076
2481c5fa
SE
6077 /*
6078 * no branch sampling for software events
6079 */
6080 if (has_branch_stack(event))
6081 return -EOPNOTSUPP;
6082
ba3dd36c
PZ
6083 perf_swevent_init_hrtimer(event);
6084
b0a873eb 6085 return 0;
6fb2915d
LZ
6086}
6087
b0a873eb 6088static struct pmu perf_task_clock = {
89a1e187
PZ
6089 .task_ctx_nr = perf_sw_context,
6090
b0a873eb 6091 .event_init = task_clock_event_init,
a4eaf7f1
PZ
6092 .add = task_clock_event_add,
6093 .del = task_clock_event_del,
6094 .start = task_clock_event_start,
6095 .stop = task_clock_event_stop,
b0a873eb 6096 .read = task_clock_event_read,
35edc2a5
PZ
6097
6098 .event_idx = perf_swevent_event_idx,
b0a873eb 6099};
6fb2915d 6100
ad5133b7 6101static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 6102{
e077df4f 6103}
6fb2915d 6104
ad5133b7 6105static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 6106{
ad5133b7 6107 return 0;
6fb2915d
LZ
6108}
6109
ad5133b7 6110static void perf_pmu_start_txn(struct pmu *pmu)
6fb2915d 6111{
ad5133b7 6112 perf_pmu_disable(pmu);
6fb2915d
LZ
6113}
6114
ad5133b7
PZ
6115static int perf_pmu_commit_txn(struct pmu *pmu)
6116{
6117 perf_pmu_enable(pmu);
6118 return 0;
6119}
e077df4f 6120
ad5133b7 6121static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 6122{
ad5133b7 6123 perf_pmu_enable(pmu);
24f1e32c
FW
6124}
6125
35edc2a5
PZ
6126static int perf_event_idx_default(struct perf_event *event)
6127{
6128 return event->hw.idx + 1;
6129}
6130
8dc85d54
PZ
6131/*
6132 * Ensures all contexts with the same task_ctx_nr have the same
6133 * pmu_cpu_context too.
6134 */
6135static void *find_pmu_context(int ctxn)
24f1e32c 6136{
8dc85d54 6137 struct pmu *pmu;
b326e956 6138
8dc85d54
PZ
6139 if (ctxn < 0)
6140 return NULL;
24f1e32c 6141
8dc85d54
PZ
6142 list_for_each_entry(pmu, &pmus, entry) {
6143 if (pmu->task_ctx_nr == ctxn)
6144 return pmu->pmu_cpu_context;
6145 }
24f1e32c 6146
8dc85d54 6147 return NULL;
24f1e32c
FW
6148}
6149
51676957 6150static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c 6151{
51676957
PZ
6152 int cpu;
6153
6154 for_each_possible_cpu(cpu) {
6155 struct perf_cpu_context *cpuctx;
6156
6157 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6158
3f1f3320
PZ
6159 if (cpuctx->unique_pmu == old_pmu)
6160 cpuctx->unique_pmu = pmu;
51676957
PZ
6161 }
6162}
6163
6164static void free_pmu_context(struct pmu *pmu)
6165{
6166 struct pmu *i;
f5ffe02e 6167
8dc85d54 6168 mutex_lock(&pmus_lock);
0475f9ea 6169 /*
8dc85d54 6170 * Like a real lame refcount.
0475f9ea 6171 */
51676957
PZ
6172 list_for_each_entry(i, &pmus, entry) {
6173 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
6174 update_pmu_context(i, pmu);
8dc85d54 6175 goto out;
51676957 6176 }
8dc85d54 6177 }
d6d020e9 6178
51676957 6179 free_percpu(pmu->pmu_cpu_context);
8dc85d54
PZ
6180out:
6181 mutex_unlock(&pmus_lock);
24f1e32c 6182}
2e80a82a 6183static struct idr pmu_idr;
d6d020e9 6184
abe43400
PZ
6185static ssize_t
6186type_show(struct device *dev, struct device_attribute *attr, char *page)
6187{
6188 struct pmu *pmu = dev_get_drvdata(dev);
6189
6190 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
6191}
6192
62b85639
SE
6193static ssize_t
6194perf_event_mux_interval_ms_show(struct device *dev,
6195 struct device_attribute *attr,
6196 char *page)
6197{
6198 struct pmu *pmu = dev_get_drvdata(dev);
6199
6200 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
6201}
6202
6203static ssize_t
6204perf_event_mux_interval_ms_store(struct device *dev,
6205 struct device_attribute *attr,
6206 const char *buf, size_t count)
6207{
6208 struct pmu *pmu = dev_get_drvdata(dev);
6209 int timer, cpu, ret;
6210
6211 ret = kstrtoint(buf, 0, &timer);
6212 if (ret)
6213 return ret;
6214
6215 if (timer < 1)
6216 return -EINVAL;
6217
6218 /* same value, noting to do */
6219 if (timer == pmu->hrtimer_interval_ms)
6220 return count;
6221
6222 pmu->hrtimer_interval_ms = timer;
6223
6224 /* update all cpuctx for this PMU */
6225 for_each_possible_cpu(cpu) {
6226 struct perf_cpu_context *cpuctx;
6227 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6228 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
6229
6230 if (hrtimer_active(&cpuctx->hrtimer))
6231 hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
6232 }
6233
6234 return count;
6235}
6236
abe43400 6237static struct device_attribute pmu_dev_attrs[] = {
62b85639
SE
6238 __ATTR_RO(type),
6239 __ATTR_RW(perf_event_mux_interval_ms),
6240 __ATTR_NULL,
abe43400
PZ
6241};
6242
6243static int pmu_bus_running;
6244static struct bus_type pmu_bus = {
6245 .name = "event_source",
6246 .dev_attrs = pmu_dev_attrs,
6247};
6248
6249static void pmu_dev_release(struct device *dev)
6250{
6251 kfree(dev);
6252}
6253
6254static int pmu_dev_alloc(struct pmu *pmu)
6255{
6256 int ret = -ENOMEM;
6257
6258 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
6259 if (!pmu->dev)
6260 goto out;
6261
0c9d42ed 6262 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
6263 device_initialize(pmu->dev);
6264 ret = dev_set_name(pmu->dev, "%s", pmu->name);
6265 if (ret)
6266 goto free_dev;
6267
6268 dev_set_drvdata(pmu->dev, pmu);
6269 pmu->dev->bus = &pmu_bus;
6270 pmu->dev->release = pmu_dev_release;
6271 ret = device_add(pmu->dev);
6272 if (ret)
6273 goto free_dev;
6274
6275out:
6276 return ret;
6277
6278free_dev:
6279 put_device(pmu->dev);
6280 goto out;
6281}
6282
547e9fd7 6283static struct lock_class_key cpuctx_mutex;
facc4307 6284static struct lock_class_key cpuctx_lock;
547e9fd7 6285
03d8e80b 6286int perf_pmu_register(struct pmu *pmu, const char *name, int type)
24f1e32c 6287{
108b02cf 6288 int cpu, ret;
24f1e32c 6289
b0a873eb 6290 mutex_lock(&pmus_lock);
33696fc0
PZ
6291 ret = -ENOMEM;
6292 pmu->pmu_disable_count = alloc_percpu(int);
6293 if (!pmu->pmu_disable_count)
6294 goto unlock;
f29ac756 6295
2e80a82a
PZ
6296 pmu->type = -1;
6297 if (!name)
6298 goto skip_type;
6299 pmu->name = name;
6300
6301 if (type < 0) {
0e9c3be2
TH
6302 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
6303 if (type < 0) {
6304 ret = type;
2e80a82a
PZ
6305 goto free_pdc;
6306 }
6307 }
6308 pmu->type = type;
6309
abe43400
PZ
6310 if (pmu_bus_running) {
6311 ret = pmu_dev_alloc(pmu);
6312 if (ret)
6313 goto free_idr;
6314 }
6315
2e80a82a 6316skip_type:
8dc85d54
PZ
6317 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
6318 if (pmu->pmu_cpu_context)
6319 goto got_cpu_context;
f29ac756 6320
c4814202 6321 ret = -ENOMEM;
108b02cf
PZ
6322 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
6323 if (!pmu->pmu_cpu_context)
abe43400 6324 goto free_dev;
f344011c 6325
108b02cf
PZ
6326 for_each_possible_cpu(cpu) {
6327 struct perf_cpu_context *cpuctx;
6328
6329 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 6330 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 6331 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 6332 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
b04243ef 6333 cpuctx->ctx.type = cpu_context;
108b02cf 6334 cpuctx->ctx.pmu = pmu;
9e630205
SE
6335
6336 __perf_cpu_hrtimer_init(cpuctx, cpu);
6337
e9d2b064 6338 INIT_LIST_HEAD(&cpuctx->rotation_list);
3f1f3320 6339 cpuctx->unique_pmu = pmu;
108b02cf 6340 }
76e1d904 6341
8dc85d54 6342got_cpu_context:
ad5133b7
PZ
6343 if (!pmu->start_txn) {
6344 if (pmu->pmu_enable) {
6345 /*
6346 * If we have pmu_enable/pmu_disable calls, install
6347 * transaction stubs that use that to try and batch
6348 * hardware accesses.
6349 */
6350 pmu->start_txn = perf_pmu_start_txn;
6351 pmu->commit_txn = perf_pmu_commit_txn;
6352 pmu->cancel_txn = perf_pmu_cancel_txn;
6353 } else {
6354 pmu->start_txn = perf_pmu_nop_void;
6355 pmu->commit_txn = perf_pmu_nop_int;
6356 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 6357 }
5c92d124 6358 }
15dbf27c 6359
ad5133b7
PZ
6360 if (!pmu->pmu_enable) {
6361 pmu->pmu_enable = perf_pmu_nop_void;
6362 pmu->pmu_disable = perf_pmu_nop_void;
6363 }
6364
35edc2a5
PZ
6365 if (!pmu->event_idx)
6366 pmu->event_idx = perf_event_idx_default;
6367
b0a873eb 6368 list_add_rcu(&pmu->entry, &pmus);
33696fc0
PZ
6369 ret = 0;
6370unlock:
b0a873eb
PZ
6371 mutex_unlock(&pmus_lock);
6372
33696fc0 6373 return ret;
108b02cf 6374
abe43400
PZ
6375free_dev:
6376 device_del(pmu->dev);
6377 put_device(pmu->dev);
6378
2e80a82a
PZ
6379free_idr:
6380 if (pmu->type >= PERF_TYPE_MAX)
6381 idr_remove(&pmu_idr, pmu->type);
6382
108b02cf
PZ
6383free_pdc:
6384 free_percpu(pmu->pmu_disable_count);
6385 goto unlock;
f29ac756
PZ
6386}
6387
b0a873eb 6388void perf_pmu_unregister(struct pmu *pmu)
5c92d124 6389{
b0a873eb
PZ
6390 mutex_lock(&pmus_lock);
6391 list_del_rcu(&pmu->entry);
6392 mutex_unlock(&pmus_lock);
5c92d124 6393
0475f9ea 6394 /*
cde8e884
PZ
6395 * We dereference the pmu list under both SRCU and regular RCU, so
6396 * synchronize against both of those.
0475f9ea 6397 */
b0a873eb 6398 synchronize_srcu(&pmus_srcu);
cde8e884 6399 synchronize_rcu();
d6d020e9 6400
33696fc0 6401 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
6402 if (pmu->type >= PERF_TYPE_MAX)
6403 idr_remove(&pmu_idr, pmu->type);
abe43400
PZ
6404 device_del(pmu->dev);
6405 put_device(pmu->dev);
51676957 6406 free_pmu_context(pmu);
b0a873eb 6407}
d6d020e9 6408
b0a873eb
PZ
6409struct pmu *perf_init_event(struct perf_event *event)
6410{
6411 struct pmu *pmu = NULL;
6412 int idx;
940c5b29 6413 int ret;
b0a873eb
PZ
6414
6415 idx = srcu_read_lock(&pmus_srcu);
2e80a82a
PZ
6416
6417 rcu_read_lock();
6418 pmu = idr_find(&pmu_idr, event->attr.type);
6419 rcu_read_unlock();
940c5b29 6420 if (pmu) {
7e5b2a01 6421 event->pmu = pmu;
940c5b29
LM
6422 ret = pmu->event_init(event);
6423 if (ret)
6424 pmu = ERR_PTR(ret);
2e80a82a 6425 goto unlock;
940c5b29 6426 }
2e80a82a 6427
b0a873eb 6428 list_for_each_entry_rcu(pmu, &pmus, entry) {
7e5b2a01 6429 event->pmu = pmu;
940c5b29 6430 ret = pmu->event_init(event);
b0a873eb 6431 if (!ret)
e5f4d339 6432 goto unlock;
76e1d904 6433
b0a873eb
PZ
6434 if (ret != -ENOENT) {
6435 pmu = ERR_PTR(ret);
e5f4d339 6436 goto unlock;
f344011c 6437 }
5c92d124 6438 }
e5f4d339
PZ
6439 pmu = ERR_PTR(-ENOENT);
6440unlock:
b0a873eb 6441 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 6442
4aeb0b42 6443 return pmu;
5c92d124
IM
6444}
6445
0793a61d 6446/*
cdd6c482 6447 * Allocate and initialize a event structure
0793a61d 6448 */
cdd6c482 6449static struct perf_event *
c3f00c70 6450perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
6451 struct task_struct *task,
6452 struct perf_event *group_leader,
6453 struct perf_event *parent_event,
4dc0da86
AK
6454 perf_overflow_handler_t overflow_handler,
6455 void *context)
0793a61d 6456{
51b0fe39 6457 struct pmu *pmu;
cdd6c482
IM
6458 struct perf_event *event;
6459 struct hw_perf_event *hwc;
d5d2bc0d 6460 long err;
0793a61d 6461
66832eb4
ON
6462 if ((unsigned)cpu >= nr_cpu_ids) {
6463 if (!task || cpu != -1)
6464 return ERR_PTR(-EINVAL);
6465 }
6466
c3f00c70 6467 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 6468 if (!event)
d5d2bc0d 6469 return ERR_PTR(-ENOMEM);
0793a61d 6470
04289bb9 6471 /*
cdd6c482 6472 * Single events are their own group leaders, with an
04289bb9
IM
6473 * empty sibling list:
6474 */
6475 if (!group_leader)
cdd6c482 6476 group_leader = event;
04289bb9 6477
cdd6c482
IM
6478 mutex_init(&event->child_mutex);
6479 INIT_LIST_HEAD(&event->child_list);
fccc714b 6480
cdd6c482
IM
6481 INIT_LIST_HEAD(&event->group_entry);
6482 INIT_LIST_HEAD(&event->event_entry);
6483 INIT_LIST_HEAD(&event->sibling_list);
10c6db11
PZ
6484 INIT_LIST_HEAD(&event->rb_entry);
6485
cdd6c482 6486 init_waitqueue_head(&event->waitq);
e360adbe 6487 init_irq_work(&event->pending, perf_pending_event);
0793a61d 6488
cdd6c482 6489 mutex_init(&event->mmap_mutex);
7b732a75 6490
a6fa941d 6491 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
6492 event->cpu = cpu;
6493 event->attr = *attr;
6494 event->group_leader = group_leader;
6495 event->pmu = NULL;
cdd6c482 6496 event->oncpu = -1;
a96bbc16 6497
cdd6c482 6498 event->parent = parent_event;
b84fbc9f 6499
17cf22c3 6500 event->ns = get_pid_ns(task_active_pid_ns(current));
cdd6c482 6501 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 6502
cdd6c482 6503 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 6504
d580ff86
PZ
6505 if (task) {
6506 event->attach_state = PERF_ATTACH_TASK;
f22c1bb6
ON
6507
6508 if (attr->type == PERF_TYPE_TRACEPOINT)
6509 event->hw.tp_target = task;
d580ff86
PZ
6510#ifdef CONFIG_HAVE_HW_BREAKPOINT
6511 /*
6512 * hw_breakpoint is a bit difficult here..
6513 */
f22c1bb6 6514 else if (attr->type == PERF_TYPE_BREAKPOINT)
d580ff86
PZ
6515 event->hw.bp_target = task;
6516#endif
6517 }
6518
4dc0da86 6519 if (!overflow_handler && parent_event) {
b326e956 6520 overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
6521 context = parent_event->overflow_handler_context;
6522 }
66832eb4 6523
b326e956 6524 event->overflow_handler = overflow_handler;
4dc0da86 6525 event->overflow_handler_context = context;
97eaf530 6526
0231bb53 6527 perf_event__state_init(event);
a86ed508 6528
4aeb0b42 6529 pmu = NULL;
b8e83514 6530
cdd6c482 6531 hwc = &event->hw;
bd2b5b12 6532 hwc->sample_period = attr->sample_period;
0d48696f 6533 if (attr->freq && attr->sample_freq)
bd2b5b12 6534 hwc->sample_period = 1;
eced1dfc 6535 hwc->last_period = hwc->sample_period;
bd2b5b12 6536
e7850595 6537 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 6538
2023b359 6539 /*
cdd6c482 6540 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b359 6541 */
3dab77fb 6542 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
2023b359
PZ
6543 goto done;
6544
b0a873eb 6545 pmu = perf_init_event(event);
974802ea 6546
d5d2bc0d
PM
6547done:
6548 err = 0;
4aeb0b42 6549 if (!pmu)
d5d2bc0d 6550 err = -EINVAL;
4aeb0b42
RR
6551 else if (IS_ERR(pmu))
6552 err = PTR_ERR(pmu);
5c92d124 6553
d5d2bc0d 6554 if (err) {
cdd6c482
IM
6555 if (event->ns)
6556 put_pid_ns(event->ns);
6557 kfree(event);
d5d2bc0d 6558 return ERR_PTR(err);
621a01ea 6559 }
d5d2bc0d 6560
cdd6c482 6561 if (!event->parent) {
82cd6def 6562 if (event->attach_state & PERF_ATTACH_TASK)
c5905afb 6563 static_key_slow_inc(&perf_sched_events.key);
3af9e859 6564 if (event->attr.mmap || event->attr.mmap_data)
cdd6c482
IM
6565 atomic_inc(&nr_mmap_events);
6566 if (event->attr.comm)
6567 atomic_inc(&nr_comm_events);
6568 if (event->attr.task)
6569 atomic_inc(&nr_task_events);
6050cb0b
FW
6570 if (has_branch_stack(event)) {
6571 static_key_slow_inc(&perf_sched_events.key);
6572 if (!(event->attach_state & PERF_ATTACH_TASK))
6573 atomic_inc(&per_cpu(perf_branch_stack_events,
6574 event->cpu));
6575 }
927c7a9e
FW
6576 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6577 err = get_callchain_buffers();
6578 if (err) {
6579 free_event(event);
6580 return ERR_PTR(err);
6581 }
6582 }
f344011c 6583 }
9ee318a7 6584
cdd6c482 6585 return event;
0793a61d
TG
6586}
6587
cdd6c482
IM
6588static int perf_copy_attr(struct perf_event_attr __user *uattr,
6589 struct perf_event_attr *attr)
974802ea 6590{
974802ea 6591 u32 size;
cdf8073d 6592 int ret;
974802ea
PZ
6593
6594 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6595 return -EFAULT;
6596
6597 /*
6598 * zero the full structure, so that a short copy will be nice.
6599 */
6600 memset(attr, 0, sizeof(*attr));
6601
6602 ret = get_user(size, &uattr->size);
6603 if (ret)
6604 return ret;
6605
6606 if (size > PAGE_SIZE) /* silly large */
6607 goto err_size;
6608
6609 if (!size) /* abi compat */
6610 size = PERF_ATTR_SIZE_VER0;
6611
6612 if (size < PERF_ATTR_SIZE_VER0)
6613 goto err_size;
6614
6615 /*
6616 * If we're handed a bigger struct than we know of,
cdf8073d
IS
6617 * ensure all the unknown bits are 0 - i.e. new
6618 * user-space does not rely on any kernel feature
6619 * extensions we dont know about yet.
974802ea
PZ
6620 */
6621 if (size > sizeof(*attr)) {
cdf8073d
IS
6622 unsigned char __user *addr;
6623 unsigned char __user *end;
6624 unsigned char val;
974802ea 6625
cdf8073d
IS
6626 addr = (void __user *)uattr + sizeof(*attr);
6627 end = (void __user *)uattr + size;
974802ea 6628
cdf8073d 6629 for (; addr < end; addr++) {
974802ea
PZ
6630 ret = get_user(val, addr);
6631 if (ret)
6632 return ret;
6633 if (val)
6634 goto err_size;
6635 }
b3e62e35 6636 size = sizeof(*attr);
974802ea
PZ
6637 }
6638
6639 ret = copy_from_user(attr, uattr, size);
6640 if (ret)
6641 return -EFAULT;
6642
cd757645 6643 if (attr->__reserved_1)
974802ea
PZ
6644 return -EINVAL;
6645
6646 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6647 return -EINVAL;
6648
6649 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6650 return -EINVAL;
6651
bce38cd5
SE
6652 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6653 u64 mask = attr->branch_sample_type;
6654
6655 /* only using defined bits */
6656 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6657 return -EINVAL;
6658
6659 /* at least one branch bit must be set */
6660 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6661 return -EINVAL;
6662
bce38cd5
SE
6663 /* propagate priv level, when not set for branch */
6664 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6665
6666 /* exclude_kernel checked on syscall entry */
6667 if (!attr->exclude_kernel)
6668 mask |= PERF_SAMPLE_BRANCH_KERNEL;
6669
6670 if (!attr->exclude_user)
6671 mask |= PERF_SAMPLE_BRANCH_USER;
6672
6673 if (!attr->exclude_hv)
6674 mask |= PERF_SAMPLE_BRANCH_HV;
6675 /*
6676 * adjust user setting (for HW filter setup)
6677 */
6678 attr->branch_sample_type = mask;
6679 }
e712209a
SE
6680 /* privileged levels capture (kernel, hv): check permissions */
6681 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
2b923c8f
SE
6682 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6683 return -EACCES;
bce38cd5 6684 }
4018994f 6685
c5ebcedb 6686 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
4018994f 6687 ret = perf_reg_validate(attr->sample_regs_user);
c5ebcedb
JO
6688 if (ret)
6689 return ret;
6690 }
6691
6692 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
6693 if (!arch_perf_have_user_stack_dump())
6694 return -ENOSYS;
6695
6696 /*
6697 * We have __u32 type for the size, but so far
6698 * we can only use __u16 as maximum due to the
6699 * __u16 sample size limit.
6700 */
6701 if (attr->sample_stack_user >= USHRT_MAX)
6702 ret = -EINVAL;
6703 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
6704 ret = -EINVAL;
6705 }
4018994f 6706
974802ea
PZ
6707out:
6708 return ret;
6709
6710err_size:
6711 put_user(sizeof(*attr), &uattr->size);
6712 ret = -E2BIG;
6713 goto out;
6714}
6715
ac9721f3
PZ
6716static int
6717perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 6718{
76369139 6719 struct ring_buffer *rb = NULL, *old_rb = NULL;
a4be7c27
PZ
6720 int ret = -EINVAL;
6721
ac9721f3 6722 if (!output_event)
a4be7c27
PZ
6723 goto set;
6724
ac9721f3
PZ
6725 /* don't allow circular references */
6726 if (event == output_event)
a4be7c27
PZ
6727 goto out;
6728
0f139300
PZ
6729 /*
6730 * Don't allow cross-cpu buffers
6731 */
6732 if (output_event->cpu != event->cpu)
6733 goto out;
6734
6735 /*
76369139 6736 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
6737 */
6738 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6739 goto out;
6740
a4be7c27 6741set:
cdd6c482 6742 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
6743 /* Can't redirect output if we've got an active mmap() */
6744 if (atomic_read(&event->mmap_count))
6745 goto unlock;
a4be7c27 6746
9bb5d40c
PZ
6747 old_rb = event->rb;
6748
ac9721f3 6749 if (output_event) {
76369139
FW
6750 /* get the rb we want to redirect to */
6751 rb = ring_buffer_get(output_event);
6752 if (!rb)
ac9721f3 6753 goto unlock;
a4be7c27
PZ
6754 }
6755
10c6db11
PZ
6756 if (old_rb)
6757 ring_buffer_detach(event, old_rb);
9bb5d40c
PZ
6758
6759 if (rb)
6760 ring_buffer_attach(event, rb);
6761
6762 rcu_assign_pointer(event->rb, rb);
6763
6764 if (old_rb) {
6765 ring_buffer_put(old_rb);
6766 /*
6767 * Since we detached before setting the new rb, so that we
6768 * could attach the new rb, we could have missed a wakeup.
6769 * Provide it now.
6770 */
6771 wake_up_all(&event->waitq);
6772 }
6773
a4be7c27 6774 ret = 0;
ac9721f3
PZ
6775unlock:
6776 mutex_unlock(&event->mmap_mutex);
6777
a4be7c27 6778out:
a4be7c27
PZ
6779 return ret;
6780}
6781
0793a61d 6782/**
cdd6c482 6783 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 6784 *
cdd6c482 6785 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 6786 * @pid: target pid
9f66a381 6787 * @cpu: target cpu
cdd6c482 6788 * @group_fd: group leader event fd
0793a61d 6789 */
cdd6c482
IM
6790SYSCALL_DEFINE5(perf_event_open,
6791 struct perf_event_attr __user *, attr_uptr,
2743a5b0 6792 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 6793{
b04243ef
PZ
6794 struct perf_event *group_leader = NULL, *output_event = NULL;
6795 struct perf_event *event, *sibling;
cdd6c482
IM
6796 struct perf_event_attr attr;
6797 struct perf_event_context *ctx;
6798 struct file *event_file = NULL;
2903ff01 6799 struct fd group = {NULL, 0};
38a81da2 6800 struct task_struct *task = NULL;
89a1e187 6801 struct pmu *pmu;
ea635c64 6802 int event_fd;
b04243ef 6803 int move_group = 0;
dc86cabe 6804 int err;
0793a61d 6805
2743a5b0 6806 /* for future expandability... */
e5d1367f 6807 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
6808 return -EINVAL;
6809
dc86cabe
IM
6810 err = perf_copy_attr(attr_uptr, &attr);
6811 if (err)
6812 return err;
eab656ae 6813
0764771d
PZ
6814 if (!attr.exclude_kernel) {
6815 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6816 return -EACCES;
6817 }
6818
df58ab24 6819 if (attr.freq) {
cdd6c482 6820 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24
PZ
6821 return -EINVAL;
6822 }
6823
e5d1367f
SE
6824 /*
6825 * In cgroup mode, the pid argument is used to pass the fd
6826 * opened to the cgroup directory in cgroupfs. The cpu argument
6827 * designates the cpu on which to monitor threads from that
6828 * cgroup.
6829 */
6830 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6831 return -EINVAL;
6832
ab72a702 6833 event_fd = get_unused_fd();
ea635c64
AV
6834 if (event_fd < 0)
6835 return event_fd;
6836
ac9721f3 6837 if (group_fd != -1) {
2903ff01
AV
6838 err = perf_fget_light(group_fd, &group);
6839 if (err)
d14b12d7 6840 goto err_fd;
2903ff01 6841 group_leader = group.file->private_data;
ac9721f3
PZ
6842 if (flags & PERF_FLAG_FD_OUTPUT)
6843 output_event = group_leader;
6844 if (flags & PERF_FLAG_FD_NO_GROUP)
6845 group_leader = NULL;
6846 }
6847
e5d1367f 6848 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
6849 task = find_lively_task_by_vpid(pid);
6850 if (IS_ERR(task)) {
6851 err = PTR_ERR(task);
6852 goto err_group_fd;
6853 }
6854 }
6855
fbfc623f
YZ
6856 get_online_cpus();
6857
4dc0da86
AK
6858 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6859 NULL, NULL);
d14b12d7
SE
6860 if (IS_ERR(event)) {
6861 err = PTR_ERR(event);
c6be5a5c 6862 goto err_task;
d14b12d7
SE
6863 }
6864
e5d1367f
SE
6865 if (flags & PERF_FLAG_PID_CGROUP) {
6866 err = perf_cgroup_connect(pid, event, &attr, group_leader);
6867 if (err)
6868 goto err_alloc;
08309379
PZ
6869 /*
6870 * one more event:
6871 * - that has cgroup constraint on event->cpu
6872 * - that may need work on context switch
6873 */
6874 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
c5905afb 6875 static_key_slow_inc(&perf_sched_events.key);
e5d1367f
SE
6876 }
6877
89a1e187
PZ
6878 /*
6879 * Special case software events and allow them to be part of
6880 * any hardware group.
6881 */
6882 pmu = event->pmu;
b04243ef
PZ
6883
6884 if (group_leader &&
6885 (is_software_event(event) != is_software_event(group_leader))) {
6886 if (is_software_event(event)) {
6887 /*
6888 * If event and group_leader are not both a software
6889 * event, and event is, then group leader is not.
6890 *
6891 * Allow the addition of software events to !software
6892 * groups, this is safe because software events never
6893 * fail to schedule.
6894 */
6895 pmu = group_leader->pmu;
6896 } else if (is_software_event(group_leader) &&
6897 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
6898 /*
6899 * In case the group is a pure software group, and we
6900 * try to add a hardware event, move the whole group to
6901 * the hardware context.
6902 */
6903 move_group = 1;
6904 }
6905 }
89a1e187
PZ
6906
6907 /*
6908 * Get the target context (task or percpu):
6909 */
e2d37cd2 6910 ctx = find_get_context(pmu, task, event->cpu);
89a1e187
PZ
6911 if (IS_ERR(ctx)) {
6912 err = PTR_ERR(ctx);
c6be5a5c 6913 goto err_alloc;
89a1e187
PZ
6914 }
6915
fd1edb3a
PZ
6916 if (task) {
6917 put_task_struct(task);
6918 task = NULL;
6919 }
6920
ccff286d 6921 /*
cdd6c482 6922 * Look up the group leader (we will attach this event to it):
04289bb9 6923 */
ac9721f3 6924 if (group_leader) {
dc86cabe 6925 err = -EINVAL;
04289bb9 6926
04289bb9 6927 /*
ccff286d
IM
6928 * Do not allow a recursive hierarchy (this new sibling
6929 * becoming part of another group-sibling):
6930 */
6931 if (group_leader->group_leader != group_leader)
c3f00c70 6932 goto err_context;
ccff286d
IM
6933 /*
6934 * Do not allow to attach to a group in a different
6935 * task or CPU context:
04289bb9 6936 */
b04243ef
PZ
6937 if (move_group) {
6938 if (group_leader->ctx->type != ctx->type)
6939 goto err_context;
6940 } else {
6941 if (group_leader->ctx != ctx)
6942 goto err_context;
6943 }
6944
3b6f9e5c
PM
6945 /*
6946 * Only a group leader can be exclusive or pinned
6947 */
0d48696f 6948 if (attr.exclusive || attr.pinned)
c3f00c70 6949 goto err_context;
ac9721f3
PZ
6950 }
6951
6952 if (output_event) {
6953 err = perf_event_set_output(event, output_event);
6954 if (err)
c3f00c70 6955 goto err_context;
ac9721f3 6956 }
0793a61d 6957
ea635c64
AV
6958 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6959 if (IS_ERR(event_file)) {
6960 err = PTR_ERR(event_file);
c3f00c70 6961 goto err_context;
ea635c64 6962 }
9b51f66d 6963
b04243ef
PZ
6964 if (move_group) {
6965 struct perf_event_context *gctx = group_leader->ctx;
6966
6967 mutex_lock(&gctx->mutex);
fe4b04fa 6968 perf_remove_from_context(group_leader);
0231bb53
JO
6969
6970 /*
6971 * Removing from the context ends up with disabled
6972 * event. What we want here is event in the initial
6973 * startup state, ready to be add into new context.
6974 */
6975 perf_event__state_init(group_leader);
b04243ef
PZ
6976 list_for_each_entry(sibling, &group_leader->sibling_list,
6977 group_entry) {
fe4b04fa 6978 perf_remove_from_context(sibling);
0231bb53 6979 perf_event__state_init(sibling);
b04243ef
PZ
6980 put_ctx(gctx);
6981 }
6982 mutex_unlock(&gctx->mutex);
6983 put_ctx(gctx);
ea635c64 6984 }
9b51f66d 6985
ad3a37de 6986 WARN_ON_ONCE(ctx->parent_ctx);
d859e29f 6987 mutex_lock(&ctx->mutex);
b04243ef
PZ
6988
6989 if (move_group) {
0cda4c02 6990 synchronize_rcu();
e2d37cd2 6991 perf_install_in_context(ctx, group_leader, event->cpu);
b04243ef
PZ
6992 get_ctx(ctx);
6993 list_for_each_entry(sibling, &group_leader->sibling_list,
6994 group_entry) {
e2d37cd2 6995 perf_install_in_context(ctx, sibling, event->cpu);
b04243ef
PZ
6996 get_ctx(ctx);
6997 }
6998 }
6999
e2d37cd2 7000 perf_install_in_context(ctx, event, event->cpu);
ad3a37de 7001 ++ctx->generation;
fe4b04fa 7002 perf_unpin_context(ctx);
d859e29f 7003 mutex_unlock(&ctx->mutex);
9b51f66d 7004
fbfc623f
YZ
7005 put_online_cpus();
7006
cdd6c482 7007 event->owner = current;
8882135b 7008
cdd6c482
IM
7009 mutex_lock(&current->perf_event_mutex);
7010 list_add_tail(&event->owner_entry, &current->perf_event_list);
7011 mutex_unlock(&current->perf_event_mutex);
082ff5a2 7012
c320c7b7
ACM
7013 /*
7014 * Precalculate sample_data sizes
7015 */
7016 perf_event__header_size(event);
6844c09d 7017 perf_event__id_header_size(event);
c320c7b7 7018
8a49542c
PZ
7019 /*
7020 * Drop the reference on the group_event after placing the
7021 * new event on the sibling_list. This ensures destruction
7022 * of the group leader will find the pointer to itself in
7023 * perf_group_detach().
7024 */
2903ff01 7025 fdput(group);
ea635c64
AV
7026 fd_install(event_fd, event_file);
7027 return event_fd;
0793a61d 7028
c3f00c70 7029err_context:
fe4b04fa 7030 perf_unpin_context(ctx);
ea635c64 7031 put_ctx(ctx);
c6be5a5c 7032err_alloc:
ea635c64 7033 free_event(event);
e7d0bc04 7034err_task:
fbfc623f 7035 put_online_cpus();
e7d0bc04
PZ
7036 if (task)
7037 put_task_struct(task);
89a1e187 7038err_group_fd:
2903ff01 7039 fdput(group);
ea635c64
AV
7040err_fd:
7041 put_unused_fd(event_fd);
dc86cabe 7042 return err;
0793a61d
TG
7043}
7044
fb0459d7
AV
7045/**
7046 * perf_event_create_kernel_counter
7047 *
7048 * @attr: attributes of the counter to create
7049 * @cpu: cpu in which the counter is bound
38a81da2 7050 * @task: task to profile (NULL for percpu)
fb0459d7
AV
7051 */
7052struct perf_event *
7053perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 7054 struct task_struct *task,
4dc0da86
AK
7055 perf_overflow_handler_t overflow_handler,
7056 void *context)
fb0459d7 7057{
fb0459d7 7058 struct perf_event_context *ctx;
c3f00c70 7059 struct perf_event *event;
fb0459d7 7060 int err;
d859e29f 7061
fb0459d7
AV
7062 /*
7063 * Get the target context (task or percpu):
7064 */
d859e29f 7065
4dc0da86
AK
7066 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
7067 overflow_handler, context);
c3f00c70
PZ
7068 if (IS_ERR(event)) {
7069 err = PTR_ERR(event);
7070 goto err;
7071 }
d859e29f 7072
38a81da2 7073 ctx = find_get_context(event->pmu, task, cpu);
c6567f64
FW
7074 if (IS_ERR(ctx)) {
7075 err = PTR_ERR(ctx);
c3f00c70 7076 goto err_free;
d859e29f 7077 }
fb0459d7 7078
fb0459d7
AV
7079 WARN_ON_ONCE(ctx->parent_ctx);
7080 mutex_lock(&ctx->mutex);
7081 perf_install_in_context(ctx, event, cpu);
7082 ++ctx->generation;
fe4b04fa 7083 perf_unpin_context(ctx);
fb0459d7
AV
7084 mutex_unlock(&ctx->mutex);
7085
fb0459d7
AV
7086 return event;
7087
c3f00c70
PZ
7088err_free:
7089 free_event(event);
7090err:
c6567f64 7091 return ERR_PTR(err);
9b51f66d 7092}
fb0459d7 7093EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 7094
0cda4c02
YZ
7095void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7096{
7097 struct perf_event_context *src_ctx;
7098 struct perf_event_context *dst_ctx;
7099 struct perf_event *event, *tmp;
7100 LIST_HEAD(events);
7101
7102 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
7103 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
7104
7105 mutex_lock(&src_ctx->mutex);
7106 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7107 event_entry) {
7108 perf_remove_from_context(event);
7109 put_ctx(src_ctx);
7110 list_add(&event->event_entry, &events);
7111 }
7112 mutex_unlock(&src_ctx->mutex);
7113
7114 synchronize_rcu();
7115
7116 mutex_lock(&dst_ctx->mutex);
7117 list_for_each_entry_safe(event, tmp, &events, event_entry) {
7118 list_del(&event->event_entry);
7119 if (event->state >= PERF_EVENT_STATE_OFF)
7120 event->state = PERF_EVENT_STATE_INACTIVE;
7121 perf_install_in_context(dst_ctx, event, dst_cpu);
7122 get_ctx(dst_ctx);
7123 }
7124 mutex_unlock(&dst_ctx->mutex);
7125}
7126EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
7127
cdd6c482 7128static void sync_child_event(struct perf_event *child_event,
38b200d6 7129 struct task_struct *child)
d859e29f 7130{
cdd6c482 7131 struct perf_event *parent_event = child_event->parent;
8bc20959 7132 u64 child_val;
d859e29f 7133
cdd6c482
IM
7134 if (child_event->attr.inherit_stat)
7135 perf_event_read_event(child_event, child);
38b200d6 7136
b5e58793 7137 child_val = perf_event_count(child_event);
d859e29f
PM
7138
7139 /*
7140 * Add back the child's count to the parent's count:
7141 */
a6e6dea6 7142 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
7143 atomic64_add(child_event->total_time_enabled,
7144 &parent_event->child_total_time_enabled);
7145 atomic64_add(child_event->total_time_running,
7146 &parent_event->child_total_time_running);
d859e29f
PM
7147
7148 /*
cdd6c482 7149 * Remove this event from the parent's list
d859e29f 7150 */
cdd6c482
IM
7151 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7152 mutex_lock(&parent_event->child_mutex);
7153 list_del_init(&child_event->child_list);
7154 mutex_unlock(&parent_event->child_mutex);
d859e29f
PM
7155
7156 /*
cdd6c482 7157 * Release the parent event, if this was the last
d859e29f
PM
7158 * reference to it.
7159 */
a6fa941d 7160 put_event(parent_event);
d859e29f
PM
7161}
7162
9b51f66d 7163static void
cdd6c482
IM
7164__perf_event_exit_task(struct perf_event *child_event,
7165 struct perf_event_context *child_ctx,
38b200d6 7166 struct task_struct *child)
9b51f66d 7167{
38b435b1
PZ
7168 if (child_event->parent) {
7169 raw_spin_lock_irq(&child_ctx->lock);
7170 perf_group_detach(child_event);
7171 raw_spin_unlock_irq(&child_ctx->lock);
7172 }
9b51f66d 7173
fe4b04fa 7174 perf_remove_from_context(child_event);
0cc0c027 7175
9b51f66d 7176 /*
38b435b1 7177 * It can happen that the parent exits first, and has events
9b51f66d 7178 * that are still around due to the child reference. These
38b435b1 7179 * events need to be zapped.
9b51f66d 7180 */
38b435b1 7181 if (child_event->parent) {
cdd6c482
IM
7182 sync_child_event(child_event, child);
7183 free_event(child_event);
4bcf349a 7184 }
9b51f66d
IM
7185}
7186
8dc85d54 7187static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 7188{
cdd6c482
IM
7189 struct perf_event *child_event, *tmp;
7190 struct perf_event_context *child_ctx;
a63eaf34 7191 unsigned long flags;
9b51f66d 7192
8dc85d54 7193 if (likely(!child->perf_event_ctxp[ctxn])) {
cdd6c482 7194 perf_event_task(child, NULL, 0);
9b51f66d 7195 return;
9f498cc5 7196 }
9b51f66d 7197
a63eaf34 7198 local_irq_save(flags);
ad3a37de
PM
7199 /*
7200 * We can't reschedule here because interrupts are disabled,
7201 * and either child is current or it is a task that can't be
7202 * scheduled, so we are now safe from rescheduling changing
7203 * our context.
7204 */
806839b2 7205 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
c93f7669
PM
7206
7207 /*
7208 * Take the context lock here so that if find_get_context is
cdd6c482 7209 * reading child->perf_event_ctxp, we wait until it has
c93f7669
PM
7210 * incremented the context's refcount before we do put_ctx below.
7211 */
e625cce1 7212 raw_spin_lock(&child_ctx->lock);
04dc2dbb 7213 task_ctx_sched_out(child_ctx);
8dc85d54 7214 child->perf_event_ctxp[ctxn] = NULL;
71a851b4
PZ
7215 /*
7216 * If this context is a clone; unclone it so it can't get
7217 * swapped to another process while we're removing all
cdd6c482 7218 * the events from it.
71a851b4
PZ
7219 */
7220 unclone_ctx(child_ctx);
5e942bb3 7221 update_context_time(child_ctx);
e625cce1 7222 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
9f498cc5
PZ
7223
7224 /*
cdd6c482
IM
7225 * Report the task dead after unscheduling the events so that we
7226 * won't get any samples after PERF_RECORD_EXIT. We can however still
7227 * get a few PERF_RECORD_READ events.
9f498cc5 7228 */
cdd6c482 7229 perf_event_task(child, child_ctx, 0);
a63eaf34 7230
66fff224
PZ
7231 /*
7232 * We can recurse on the same lock type through:
7233 *
cdd6c482
IM
7234 * __perf_event_exit_task()
7235 * sync_child_event()
a6fa941d
AV
7236 * put_event()
7237 * mutex_lock(&ctx->mutex)
66fff224
PZ
7238 *
7239 * But since its the parent context it won't be the same instance.
7240 */
a0507c84 7241 mutex_lock(&child_ctx->mutex);
a63eaf34 7242
8bc20959 7243again:
889ff015
FW
7244 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
7245 group_entry)
7246 __perf_event_exit_task(child_event, child_ctx, child);
7247
7248 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
65abc865 7249 group_entry)
cdd6c482 7250 __perf_event_exit_task(child_event, child_ctx, child);
8bc20959
PZ
7251
7252 /*
cdd6c482 7253 * If the last event was a group event, it will have appended all
8bc20959
PZ
7254 * its siblings to the list, but we obtained 'tmp' before that which
7255 * will still point to the list head terminating the iteration.
7256 */
889ff015
FW
7257 if (!list_empty(&child_ctx->pinned_groups) ||
7258 !list_empty(&child_ctx->flexible_groups))
8bc20959 7259 goto again;
a63eaf34
PM
7260
7261 mutex_unlock(&child_ctx->mutex);
7262
7263 put_ctx(child_ctx);
9b51f66d
IM
7264}
7265
8dc85d54
PZ
7266/*
7267 * When a child task exits, feed back event values to parent events.
7268 */
7269void perf_event_exit_task(struct task_struct *child)
7270{
8882135b 7271 struct perf_event *event, *tmp;
8dc85d54
PZ
7272 int ctxn;
7273
8882135b
PZ
7274 mutex_lock(&child->perf_event_mutex);
7275 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
7276 owner_entry) {
7277 list_del_init(&event->owner_entry);
7278
7279 /*
7280 * Ensure the list deletion is visible before we clear
7281 * the owner, closes a race against perf_release() where
7282 * we need to serialize on the owner->perf_event_mutex.
7283 */
7284 smp_wmb();
7285 event->owner = NULL;
7286 }
7287 mutex_unlock(&child->perf_event_mutex);
7288
8dc85d54
PZ
7289 for_each_task_context_nr(ctxn)
7290 perf_event_exit_task_context(child, ctxn);
7291}
7292
889ff015
FW
7293static void perf_free_event(struct perf_event *event,
7294 struct perf_event_context *ctx)
7295{
7296 struct perf_event *parent = event->parent;
7297
7298 if (WARN_ON_ONCE(!parent))
7299 return;
7300
7301 mutex_lock(&parent->child_mutex);
7302 list_del_init(&event->child_list);
7303 mutex_unlock(&parent->child_mutex);
7304
a6fa941d 7305 put_event(parent);
889ff015 7306
8a49542c 7307 perf_group_detach(event);
889ff015
FW
7308 list_del_event(event, ctx);
7309 free_event(event);
7310}
7311
bbbee908
PZ
7312/*
7313 * free an unexposed, unused context as created by inheritance by
8dc85d54 7314 * perf_event_init_task below, used by fork() in case of fail.
bbbee908 7315 */
cdd6c482 7316void perf_event_free_task(struct task_struct *task)
bbbee908 7317{
8dc85d54 7318 struct perf_event_context *ctx;
cdd6c482 7319 struct perf_event *event, *tmp;
8dc85d54 7320 int ctxn;
bbbee908 7321
8dc85d54
PZ
7322 for_each_task_context_nr(ctxn) {
7323 ctx = task->perf_event_ctxp[ctxn];
7324 if (!ctx)
7325 continue;
bbbee908 7326
8dc85d54 7327 mutex_lock(&ctx->mutex);
bbbee908 7328again:
8dc85d54
PZ
7329 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
7330 group_entry)
7331 perf_free_event(event, ctx);
bbbee908 7332
8dc85d54
PZ
7333 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
7334 group_entry)
7335 perf_free_event(event, ctx);
bbbee908 7336
8dc85d54
PZ
7337 if (!list_empty(&ctx->pinned_groups) ||
7338 !list_empty(&ctx->flexible_groups))
7339 goto again;
bbbee908 7340
8dc85d54 7341 mutex_unlock(&ctx->mutex);
bbbee908 7342
8dc85d54
PZ
7343 put_ctx(ctx);
7344 }
889ff015
FW
7345}
7346
4e231c79
PZ
7347void perf_event_delayed_put(struct task_struct *task)
7348{
7349 int ctxn;
7350
7351 for_each_task_context_nr(ctxn)
7352 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
7353}
7354
97dee4f3
PZ
7355/*
7356 * inherit a event from parent task to child task:
7357 */
7358static struct perf_event *
7359inherit_event(struct perf_event *parent_event,
7360 struct task_struct *parent,
7361 struct perf_event_context *parent_ctx,
7362 struct task_struct *child,
7363 struct perf_event *group_leader,
7364 struct perf_event_context *child_ctx)
7365{
7366 struct perf_event *child_event;
cee010ec 7367 unsigned long flags;
97dee4f3
PZ
7368
7369 /*
7370 * Instead of creating recursive hierarchies of events,
7371 * we link inherited events back to the original parent,
7372 * which has a filp for sure, which we use as the reference
7373 * count:
7374 */
7375 if (parent_event->parent)
7376 parent_event = parent_event->parent;
7377
7378 child_event = perf_event_alloc(&parent_event->attr,
7379 parent_event->cpu,
d580ff86 7380 child,
97dee4f3 7381 group_leader, parent_event,
4dc0da86 7382 NULL, NULL);
97dee4f3
PZ
7383 if (IS_ERR(child_event))
7384 return child_event;
a6fa941d
AV
7385
7386 if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
7387 free_event(child_event);
7388 return NULL;
7389 }
7390
97dee4f3
PZ
7391 get_ctx(child_ctx);
7392
7393 /*
7394 * Make the child state follow the state of the parent event,
7395 * not its attr.disabled bit. We hold the parent's mutex,
7396 * so we won't race with perf_event_{en, dis}able_family.
7397 */
7398 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
7399 child_event->state = PERF_EVENT_STATE_INACTIVE;
7400 else
7401 child_event->state = PERF_EVENT_STATE_OFF;
7402
7403 if (parent_event->attr.freq) {
7404 u64 sample_period = parent_event->hw.sample_period;
7405 struct hw_perf_event *hwc = &child_event->hw;
7406
7407 hwc->sample_period = sample_period;
7408 hwc->last_period = sample_period;
7409
7410 local64_set(&hwc->period_left, sample_period);
7411 }
7412
7413 child_event->ctx = child_ctx;
7414 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
7415 child_event->overflow_handler_context
7416 = parent_event->overflow_handler_context;
97dee4f3 7417
614b6780
TG
7418 /*
7419 * Precalculate sample_data sizes
7420 */
7421 perf_event__header_size(child_event);
6844c09d 7422 perf_event__id_header_size(child_event);
614b6780 7423
97dee4f3
PZ
7424 /*
7425 * Link it up in the child's context:
7426 */
cee010ec 7427 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 7428 add_event_to_ctx(child_event, child_ctx);
cee010ec 7429 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 7430
97dee4f3
PZ
7431 /*
7432 * Link this into the parent event's child list
7433 */
7434 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7435 mutex_lock(&parent_event->child_mutex);
7436 list_add_tail(&child_event->child_list, &parent_event->child_list);
7437 mutex_unlock(&parent_event->child_mutex);
7438
7439 return child_event;
7440}
7441
7442static int inherit_group(struct perf_event *parent_event,
7443 struct task_struct *parent,
7444 struct perf_event_context *parent_ctx,
7445 struct task_struct *child,
7446 struct perf_event_context *child_ctx)
7447{
7448 struct perf_event *leader;
7449 struct perf_event *sub;
7450 struct perf_event *child_ctr;
7451
7452 leader = inherit_event(parent_event, parent, parent_ctx,
7453 child, NULL, child_ctx);
7454 if (IS_ERR(leader))
7455 return PTR_ERR(leader);
7456 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
7457 child_ctr = inherit_event(sub, parent, parent_ctx,
7458 child, leader, child_ctx);
7459 if (IS_ERR(child_ctr))
7460 return PTR_ERR(child_ctr);
7461 }
7462 return 0;
889ff015
FW
7463}
7464
7465static int
7466inherit_task_group(struct perf_event *event, struct task_struct *parent,
7467 struct perf_event_context *parent_ctx,
8dc85d54 7468 struct task_struct *child, int ctxn,
889ff015
FW
7469 int *inherited_all)
7470{
7471 int ret;
8dc85d54 7472 struct perf_event_context *child_ctx;
889ff015
FW
7473
7474 if (!event->attr.inherit) {
7475 *inherited_all = 0;
7476 return 0;
bbbee908
PZ
7477 }
7478
fe4b04fa 7479 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
7480 if (!child_ctx) {
7481 /*
7482 * This is executed from the parent task context, so
7483 * inherit events that have been marked for cloning.
7484 * First allocate and initialize a context for the
7485 * child.
7486 */
bbbee908 7487
734df5ab 7488 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
889ff015
FW
7489 if (!child_ctx)
7490 return -ENOMEM;
bbbee908 7491
8dc85d54 7492 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
7493 }
7494
7495 ret = inherit_group(event, parent, parent_ctx,
7496 child, child_ctx);
7497
7498 if (ret)
7499 *inherited_all = 0;
7500
7501 return ret;
bbbee908
PZ
7502}
7503
9b51f66d 7504/*
cdd6c482 7505 * Initialize the perf_event context in task_struct
9b51f66d 7506 */
8dc85d54 7507int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 7508{
889ff015 7509 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
7510 struct perf_event_context *cloned_ctx;
7511 struct perf_event *event;
9b51f66d 7512 struct task_struct *parent = current;
564c2b21 7513 int inherited_all = 1;
dddd3379 7514 unsigned long flags;
6ab423e0 7515 int ret = 0;
9b51f66d 7516
8dc85d54 7517 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
7518 return 0;
7519
ad3a37de 7520 /*
25346b93
PM
7521 * If the parent's context is a clone, pin it so it won't get
7522 * swapped under us.
ad3a37de 7523 */
8dc85d54 7524 parent_ctx = perf_pin_task_context(parent, ctxn);
25346b93 7525
ad3a37de
PM
7526 /*
7527 * No need to check if parent_ctx != NULL here; since we saw
7528 * it non-NULL earlier, the only reason for it to become NULL
7529 * is if we exit, and since we're currently in the middle of
7530 * a fork we can't be exiting at the same time.
7531 */
ad3a37de 7532
9b51f66d
IM
7533 /*
7534 * Lock the parent list. No need to lock the child - not PID
7535 * hashed yet and not running, so nobody can access it.
7536 */
d859e29f 7537 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
7538
7539 /*
7540 * We dont have to disable NMIs - we are only looking at
7541 * the list, not manipulating it:
7542 */
889ff015 7543 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d54
PZ
7544 ret = inherit_task_group(event, parent, parent_ctx,
7545 child, ctxn, &inherited_all);
889ff015
FW
7546 if (ret)
7547 break;
7548 }
b93f7978 7549
dddd3379
TG
7550 /*
7551 * We can't hold ctx->lock when iterating the ->flexible_group list due
7552 * to allocations, but we need to prevent rotation because
7553 * rotate_ctx() will change the list from interrupt context.
7554 */
7555 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7556 parent_ctx->rotate_disable = 1;
7557 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7558
889ff015 7559 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d54
PZ
7560 ret = inherit_task_group(event, parent, parent_ctx,
7561 child, ctxn, &inherited_all);
889ff015 7562 if (ret)
9b51f66d 7563 break;
564c2b21
PM
7564 }
7565
dddd3379
TG
7566 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7567 parent_ctx->rotate_disable = 0;
dddd3379 7568
8dc85d54 7569 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 7570
05cbaa28 7571 if (child_ctx && inherited_all) {
564c2b21
PM
7572 /*
7573 * Mark the child context as a clone of the parent
7574 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
7575 *
7576 * Note that if the parent is a clone, the holding of
7577 * parent_ctx->lock avoids it from being uncloned.
564c2b21 7578 */
c5ed5145 7579 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
7580 if (cloned_ctx) {
7581 child_ctx->parent_ctx = cloned_ctx;
25346b93 7582 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
7583 } else {
7584 child_ctx->parent_ctx = parent_ctx;
7585 child_ctx->parent_gen = parent_ctx->generation;
7586 }
7587 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
7588 }
7589
c5ed5145 7590 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29f 7591 mutex_unlock(&parent_ctx->mutex);
6ab423e0 7592
25346b93 7593 perf_unpin_context(parent_ctx);
fe4b04fa 7594 put_ctx(parent_ctx);
ad3a37de 7595
6ab423e0 7596 return ret;
9b51f66d
IM
7597}
7598
8dc85d54
PZ
7599/*
7600 * Initialize the perf_event context in task_struct
7601 */
7602int perf_event_init_task(struct task_struct *child)
7603{
7604 int ctxn, ret;
7605
8550d7cb
ON
7606 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
7607 mutex_init(&child->perf_event_mutex);
7608 INIT_LIST_HEAD(&child->perf_event_list);
7609
8dc85d54
PZ
7610 for_each_task_context_nr(ctxn) {
7611 ret = perf_event_init_context(child, ctxn);
7612 if (ret)
7613 return ret;
7614 }
7615
7616 return 0;
7617}
7618
220b140b
PM
7619static void __init perf_event_init_all_cpus(void)
7620{
b28ab83c 7621 struct swevent_htable *swhash;
220b140b 7622 int cpu;
220b140b
PM
7623
7624 for_each_possible_cpu(cpu) {
b28ab83c
PZ
7625 swhash = &per_cpu(swevent_htable, cpu);
7626 mutex_init(&swhash->hlist_mutex);
e9d2b064 7627 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
220b140b
PM
7628 }
7629}
7630
0db0628d 7631static void perf_event_init_cpu(int cpu)
0793a61d 7632{
108b02cf 7633 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 7634
b28ab83c 7635 mutex_lock(&swhash->hlist_mutex);
4536e4d1 7636 if (swhash->hlist_refcount > 0) {
76e1d904
FW
7637 struct swevent_hlist *hlist;
7638
b28ab83c
PZ
7639 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7640 WARN_ON(!hlist);
7641 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 7642 }
b28ab83c 7643 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
7644}
7645
c277443c 7646#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
e9d2b064 7647static void perf_pmu_rotate_stop(struct pmu *pmu)
0793a61d 7648{
e9d2b064
PZ
7649 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7650
7651 WARN_ON(!irqs_disabled());
7652
7653 list_del_init(&cpuctx->rotation_list);
7654}
7655
108b02cf 7656static void __perf_event_exit_context(void *__info)
0793a61d 7657{
108b02cf 7658 struct perf_event_context *ctx = __info;
cdd6c482 7659 struct perf_event *event, *tmp;
0793a61d 7660
108b02cf 7661 perf_pmu_rotate_stop(ctx->pmu);
b5ab4cd5 7662
889ff015 7663 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
fe4b04fa 7664 __perf_remove_from_context(event);
889ff015 7665 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
fe4b04fa 7666 __perf_remove_from_context(event);
0793a61d 7667}
108b02cf
PZ
7668
7669static void perf_event_exit_cpu_context(int cpu)
7670{
7671 struct perf_event_context *ctx;
7672 struct pmu *pmu;
7673 int idx;
7674
7675 idx = srcu_read_lock(&pmus_srcu);
7676 list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c 7677 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cf
PZ
7678
7679 mutex_lock(&ctx->mutex);
7680 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7681 mutex_unlock(&ctx->mutex);
7682 }
7683 srcu_read_unlock(&pmus_srcu, idx);
108b02cf
PZ
7684}
7685
cdd6c482 7686static void perf_event_exit_cpu(int cpu)
0793a61d 7687{
b28ab83c 7688 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
d859e29f 7689
b28ab83c
PZ
7690 mutex_lock(&swhash->hlist_mutex);
7691 swevent_hlist_release(swhash);
7692 mutex_unlock(&swhash->hlist_mutex);
76e1d904 7693
108b02cf 7694 perf_event_exit_cpu_context(cpu);
0793a61d
TG
7695}
7696#else
cdd6c482 7697static inline void perf_event_exit_cpu(int cpu) { }
0793a61d
TG
7698#endif
7699
c277443c
PZ
7700static int
7701perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7702{
7703 int cpu;
7704
7705 for_each_online_cpu(cpu)
7706 perf_event_exit_cpu(cpu);
7707
7708 return NOTIFY_OK;
7709}
7710
7711/*
7712 * Run the perf reboot notifier at the very last possible moment so that
7713 * the generic watchdog code runs as long as possible.
7714 */
7715static struct notifier_block perf_reboot_notifier = {
7716 .notifier_call = perf_reboot,
7717 .priority = INT_MIN,
7718};
7719
0db0628d 7720static int
0793a61d
TG
7721perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7722{
7723 unsigned int cpu = (long)hcpu;
7724
4536e4d1 7725 switch (action & ~CPU_TASKS_FROZEN) {
0793a61d
TG
7726
7727 case CPU_UP_PREPARE:
5e11637e 7728 case CPU_DOWN_FAILED:
cdd6c482 7729 perf_event_init_cpu(cpu);
0793a61d
TG
7730 break;
7731
5e11637e 7732 case CPU_UP_CANCELED:
0793a61d 7733 case CPU_DOWN_PREPARE:
cdd6c482 7734 perf_event_exit_cpu(cpu);
0793a61d 7735 break;
0793a61d
TG
7736 default:
7737 break;
7738 }
7739
7740 return NOTIFY_OK;
7741}
7742
cdd6c482 7743void __init perf_event_init(void)
0793a61d 7744{
3c502e7a
JW
7745 int ret;
7746
2e80a82a
PZ
7747 idr_init(&pmu_idr);
7748
220b140b 7749 perf_event_init_all_cpus();
b0a873eb 7750 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
7751 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7752 perf_pmu_register(&perf_cpu_clock, NULL, -1);
7753 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb
PZ
7754 perf_tp_register();
7755 perf_cpu_notifier(perf_cpu_notify);
c277443c 7756 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
7757
7758 ret = init_hw_breakpoint();
7759 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520
GN
7760
7761 /* do not patch jump label more than once per second */
7762 jump_label_rate_limit(&perf_sched_events, HZ);
b01c3a00
JO
7763
7764 /*
7765 * Build time assertion that we keep the data_head at the intended
7766 * location. IOW, validation we got the __reserved[] size right.
7767 */
7768 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7769 != 1024);
0793a61d 7770}
abe43400
PZ
7771
7772static int __init perf_event_sysfs_init(void)
7773{
7774 struct pmu *pmu;
7775 int ret;
7776
7777 mutex_lock(&pmus_lock);
7778
7779 ret = bus_register(&pmu_bus);
7780 if (ret)
7781 goto unlock;
7782
7783 list_for_each_entry(pmu, &pmus, entry) {
7784 if (!pmu->name || pmu->type < 0)
7785 continue;
7786
7787 ret = pmu_dev_alloc(pmu);
7788 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7789 }
7790 pmu_bus_running = 1;
7791 ret = 0;
7792
7793unlock:
7794 mutex_unlock(&pmus_lock);
7795
7796 return ret;
7797}
7798device_initcall(perf_event_sysfs_init);
e5d1367f
SE
7799
7800#ifdef CONFIG_CGROUP_PERF
92fb9748 7801static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
e5d1367f
SE
7802{
7803 struct perf_cgroup *jc;
e5d1367f 7804
1b15d055 7805 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
7806 if (!jc)
7807 return ERR_PTR(-ENOMEM);
7808
e5d1367f
SE
7809 jc->info = alloc_percpu(struct perf_cgroup_info);
7810 if (!jc->info) {
7811 kfree(jc);
7812 return ERR_PTR(-ENOMEM);
7813 }
7814
e5d1367f
SE
7815 return &jc->css;
7816}
7817
92fb9748 7818static void perf_cgroup_css_free(struct cgroup *cont)
e5d1367f
SE
7819{
7820 struct perf_cgroup *jc;
7821 jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
7822 struct perf_cgroup, css);
7823 free_percpu(jc->info);
7824 kfree(jc);
7825}
7826
7827static int __perf_cgroup_move(void *info)
7828{
7829 struct task_struct *task = info;
7830 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
7831 return 0;
7832}
7833
761b3ef5 7834static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
e5d1367f 7835{
bb9d97b6
TH
7836 struct task_struct *task;
7837
7838 cgroup_taskset_for_each(task, cgrp, tset)
7839 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
7840}
7841
761b3ef5
LZ
7842static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7843 struct task_struct *task)
e5d1367f
SE
7844{
7845 /*
7846 * cgroup_exit() is called in the copy_process() failure path.
7847 * Ignore this case since the task hasn't ran yet, this avoids
7848 * trying to poke a half freed task state from generic code.
7849 */
7850 if (!(task->flags & PF_EXITING))
7851 return;
7852
bb9d97b6 7853 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
7854}
7855
7856struct cgroup_subsys perf_subsys = {
e7e7ee2e
IM
7857 .name = "perf_event",
7858 .subsys_id = perf_subsys_id,
92fb9748
TH
7859 .css_alloc = perf_cgroup_css_alloc,
7860 .css_free = perf_cgroup_css_free,
e7e7ee2e 7861 .exit = perf_cgroup_exit,
bb9d97b6 7862 .attach = perf_cgroup_attach,
e5d1367f
SE
7863};
7864#endif /* CONFIG_CGROUP_PERF */