perf_counter: Fix the cpu_clock software counter
[linux-2.6-block.git] / kernel / perf_counter.c
CommitLineData
0793a61d
TG
1/*
2 * Performance counter core code
3 *
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6 *
7 * For licencing details see kernel-base/COPYING
8 */
9
10#include <linux/fs.h>
11#include <linux/cpu.h>
12#include <linux/smp.h>
04289bb9 13#include <linux/file.h>
0793a61d
TG
14#include <linux/poll.h>
15#include <linux/sysfs.h>
16#include <linux/ptrace.h>
17#include <linux/percpu.h>
18#include <linux/uaccess.h>
19#include <linux/syscalls.h>
20#include <linux/anon_inodes.h>
aa9c4c0f 21#include <linux/kernel_stat.h>
0793a61d
TG
22#include <linux/perf_counter.h>
23
24/*
25 * Each CPU has a list of per CPU counters:
26 */
27DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
28
088e2852 29int perf_max_counters __read_mostly = 1;
0793a61d
TG
30static int perf_reserved_percpu __read_mostly;
31static int perf_overcommit __read_mostly = 1;
32
33/*
34 * Mutex for (sysadmin-configurable) counter reservations:
35 */
36static DEFINE_MUTEX(perf_resource_mutex);
37
38/*
39 * Architecture provided APIs - weak aliases:
40 */
5c92d124 41extern __weak const struct hw_perf_counter_ops *
621a01ea 42hw_perf_counter_init(struct perf_counter *counter)
0793a61d 43{
ff6f0541 44 return NULL;
0793a61d
TG
45}
46
01b2838c 47u64 __weak hw_perf_save_disable(void) { return 0; }
01ea1cca
YL
48void __weak hw_perf_restore(u64 ctrl) { barrier(); }
49void __weak hw_perf_counter_setup(void) { barrier(); }
0793a61d 50
04289bb9
IM
51static void
52list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
53{
54 struct perf_counter *group_leader = counter->group_leader;
55
56 /*
57 * Depending on whether it is a standalone or sibling counter,
58 * add it straight to the context's counter list, or to the group
59 * leader's sibling list:
60 */
61 if (counter->group_leader == counter)
62 list_add_tail(&counter->list_entry, &ctx->counter_list);
63 else
64 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
65}
66
67static void
68list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
69{
70 struct perf_counter *sibling, *tmp;
71
72 list_del_init(&counter->list_entry);
73
04289bb9
IM
74 /*
75 * If this was a group counter with sibling counters then
76 * upgrade the siblings to singleton counters by adding them
77 * to the context list directly:
78 */
79 list_for_each_entry_safe(sibling, tmp,
80 &counter->sibling_list, list_entry) {
81
82 list_del_init(&sibling->list_entry);
83 list_add_tail(&sibling->list_entry, &ctx->counter_list);
04289bb9
IM
84 sibling->group_leader = sibling;
85 }
86}
87
0793a61d
TG
88/*
89 * Cross CPU call to remove a performance counter
90 *
91 * We disable the counter on the hardware level first. After that we
92 * remove it from the context list.
93 */
04289bb9 94static void __perf_counter_remove_from_context(void *info)
0793a61d
TG
95{
96 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
97 struct perf_counter *counter = info;
98 struct perf_counter_context *ctx = counter->ctx;
9b51f66d 99 unsigned long flags;
5c92d124 100 u64 perf_flags;
0793a61d
TG
101
102 /*
103 * If this is a task context, we need to check whether it is
104 * the current task context of this cpu. If not it has been
105 * scheduled out before the smp call arrived.
106 */
107 if (ctx->task && cpuctx->task_ctx != ctx)
108 return;
109
aa9c4c0f
IM
110 curr_rq_lock_irq_save(&flags);
111 spin_lock(&ctx->lock);
0793a61d 112
6a930700 113 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
6a930700 114 counter->state = PERF_COUNTER_STATE_INACTIVE;
235c7fc7 115 counter->hw_ops->disable(counter);
0793a61d
TG
116 ctx->nr_active--;
117 cpuctx->active_oncpu--;
118 counter->task = NULL;
235c7fc7 119 counter->oncpu = -1;
0793a61d
TG
120 }
121 ctx->nr_counters--;
122
123 /*
124 * Protect the list operation against NMI by disabling the
125 * counters on a global level. NOP for non NMI based counters.
126 */
01b2838c 127 perf_flags = hw_perf_save_disable();
04289bb9 128 list_del_counter(counter, ctx);
01b2838c 129 hw_perf_restore(perf_flags);
0793a61d
TG
130
131 if (!ctx->task) {
132 /*
133 * Allow more per task counters with respect to the
134 * reservation:
135 */
136 cpuctx->max_pertask =
137 min(perf_max_counters - ctx->nr_counters,
138 perf_max_counters - perf_reserved_percpu);
139 }
140
aa9c4c0f
IM
141 spin_unlock(&ctx->lock);
142 curr_rq_unlock_irq_restore(&flags);
0793a61d
TG
143}
144
145
146/*
147 * Remove the counter from a task's (or a CPU's) list of counters.
148 *
149 * Must be called with counter->mutex held.
150 *
151 * CPU counters are removed with a smp call. For task counters we only
152 * call when the task is on a CPU.
153 */
04289bb9 154static void perf_counter_remove_from_context(struct perf_counter *counter)
0793a61d
TG
155{
156 struct perf_counter_context *ctx = counter->ctx;
157 struct task_struct *task = ctx->task;
158
159 if (!task) {
160 /*
161 * Per cpu counters are removed via an smp call and
162 * the removal is always sucessful.
163 */
164 smp_call_function_single(counter->cpu,
04289bb9 165 __perf_counter_remove_from_context,
0793a61d
TG
166 counter, 1);
167 return;
168 }
169
170retry:
04289bb9 171 task_oncpu_function_call(task, __perf_counter_remove_from_context,
0793a61d
TG
172 counter);
173
174 spin_lock_irq(&ctx->lock);
175 /*
176 * If the context is active we need to retry the smp call.
177 */
04289bb9 178 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
0793a61d
TG
179 spin_unlock_irq(&ctx->lock);
180 goto retry;
181 }
182
183 /*
184 * The lock prevents that this context is scheduled in so we
04289bb9 185 * can remove the counter safely, if the call above did not
0793a61d
TG
186 * succeed.
187 */
04289bb9 188 if (!list_empty(&counter->list_entry)) {
0793a61d 189 ctx->nr_counters--;
04289bb9 190 list_del_counter(counter, ctx);
0793a61d
TG
191 counter->task = NULL;
192 }
193 spin_unlock_irq(&ctx->lock);
194}
195
235c7fc7
IM
196static int
197counter_sched_in(struct perf_counter *counter,
198 struct perf_cpu_context *cpuctx,
199 struct perf_counter_context *ctx,
200 int cpu)
201{
202 if (counter->state == PERF_COUNTER_STATE_OFF)
203 return 0;
204
205 counter->state = PERF_COUNTER_STATE_ACTIVE;
206 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
207 /*
208 * The new state must be visible before we turn it on in the hardware:
209 */
210 smp_wmb();
211
212 if (counter->hw_ops->enable(counter)) {
213 counter->state = PERF_COUNTER_STATE_INACTIVE;
214 counter->oncpu = -1;
215 return -EAGAIN;
216 }
217
218 cpuctx->active_oncpu++;
219 ctx->nr_active++;
220
221 return 0;
222}
223
0793a61d 224/*
235c7fc7 225 * Cross CPU call to install and enable a performance counter
0793a61d
TG
226 */
227static void __perf_install_in_context(void *info)
228{
229 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
230 struct perf_counter *counter = info;
231 struct perf_counter_context *ctx = counter->ctx;
232 int cpu = smp_processor_id();
9b51f66d 233 unsigned long flags;
5c92d124 234 u64 perf_flags;
0793a61d
TG
235
236 /*
237 * If this is a task context, we need to check whether it is
238 * the current task context of this cpu. If not it has been
239 * scheduled out before the smp call arrived.
240 */
241 if (ctx->task && cpuctx->task_ctx != ctx)
242 return;
243
aa9c4c0f
IM
244 curr_rq_lock_irq_save(&flags);
245 spin_lock(&ctx->lock);
0793a61d
TG
246
247 /*
248 * Protect the list operation against NMI by disabling the
249 * counters on a global level. NOP for non NMI based counters.
250 */
01b2838c 251 perf_flags = hw_perf_save_disable();
0793a61d 252
235c7fc7 253 list_add_counter(counter, ctx);
0793a61d
TG
254 ctx->nr_counters++;
255
235c7fc7 256 counter_sched_in(counter, cpuctx, ctx, cpu);
0793a61d
TG
257
258 if (!ctx->task && cpuctx->max_pertask)
259 cpuctx->max_pertask--;
260
235c7fc7
IM
261 hw_perf_restore(perf_flags);
262
aa9c4c0f
IM
263 spin_unlock(&ctx->lock);
264 curr_rq_unlock_irq_restore(&flags);
0793a61d
TG
265}
266
267/*
268 * Attach a performance counter to a context
269 *
270 * First we add the counter to the list with the hardware enable bit
271 * in counter->hw_config cleared.
272 *
273 * If the counter is attached to a task which is on a CPU we use a smp
274 * call to enable it in the task context. The task might have been
275 * scheduled away, but we check this in the smp call again.
276 */
277static void
278perf_install_in_context(struct perf_counter_context *ctx,
279 struct perf_counter *counter,
280 int cpu)
281{
282 struct task_struct *task = ctx->task;
283
284 counter->ctx = ctx;
285 if (!task) {
286 /*
287 * Per cpu counters are installed via an smp call and
288 * the install is always sucessful.
289 */
290 smp_call_function_single(cpu, __perf_install_in_context,
291 counter, 1);
292 return;
293 }
294
295 counter->task = task;
296retry:
297 task_oncpu_function_call(task, __perf_install_in_context,
298 counter);
299
300 spin_lock_irq(&ctx->lock);
301 /*
0793a61d
TG
302 * we need to retry the smp call.
303 */
04289bb9 304 if (ctx->nr_active && list_empty(&counter->list_entry)) {
0793a61d
TG
305 spin_unlock_irq(&ctx->lock);
306 goto retry;
307 }
308
309 /*
310 * The lock prevents that this context is scheduled in so we
311 * can add the counter safely, if it the call above did not
312 * succeed.
313 */
04289bb9
IM
314 if (list_empty(&counter->list_entry)) {
315 list_add_counter(counter, ctx);
0793a61d
TG
316 ctx->nr_counters++;
317 }
318 spin_unlock_irq(&ctx->lock);
319}
320
04289bb9
IM
321static void
322counter_sched_out(struct perf_counter *counter,
323 struct perf_cpu_context *cpuctx,
324 struct perf_counter_context *ctx)
325{
6a930700 326 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
04289bb9
IM
327 return;
328
6a930700 329 counter->state = PERF_COUNTER_STATE_INACTIVE;
235c7fc7 330 counter->hw_ops->disable(counter);
6a930700 331 counter->oncpu = -1;
04289bb9
IM
332
333 cpuctx->active_oncpu--;
334 ctx->nr_active--;
335}
336
337static void
338group_sched_out(struct perf_counter *group_counter,
339 struct perf_cpu_context *cpuctx,
340 struct perf_counter_context *ctx)
341{
342 struct perf_counter *counter;
343
344 counter_sched_out(group_counter, cpuctx, ctx);
345
346 /*
347 * Schedule out siblings (if any):
348 */
349 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
350 counter_sched_out(counter, cpuctx, ctx);
351}
352
235c7fc7
IM
353void __perf_counter_sched_out(struct perf_counter_context *ctx,
354 struct perf_cpu_context *cpuctx)
355{
356 struct perf_counter *counter;
357
358 if (likely(!ctx->nr_counters))
359 return;
360
361 spin_lock(&ctx->lock);
362 if (ctx->nr_active) {
363 list_for_each_entry(counter, &ctx->counter_list, list_entry)
364 group_sched_out(counter, cpuctx, ctx);
365 }
366 spin_unlock(&ctx->lock);
367}
368
0793a61d
TG
369/*
370 * Called from scheduler to remove the counters of the current task,
371 * with interrupts disabled.
372 *
373 * We stop each counter and update the counter value in counter->count.
374 *
7671581f 375 * This does not protect us against NMI, but disable()
0793a61d
TG
376 * sets the disabled bit in the control field of counter _before_
377 * accessing the counter control register. If a NMI hits, then it will
378 * not restart the counter.
379 */
380void perf_counter_task_sched_out(struct task_struct *task, int cpu)
381{
382 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
383 struct perf_counter_context *ctx = &task->perf_counter_ctx;
0793a61d
TG
384
385 if (likely(!cpuctx->task_ctx))
386 return;
387
235c7fc7
IM
388 __perf_counter_sched_out(ctx, cpuctx);
389
0793a61d
TG
390 cpuctx->task_ctx = NULL;
391}
392
235c7fc7 393static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
04289bb9 394{
235c7fc7 395 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
04289bb9
IM
396}
397
7995888f 398static int
04289bb9
IM
399group_sched_in(struct perf_counter *group_counter,
400 struct perf_cpu_context *cpuctx,
401 struct perf_counter_context *ctx,
402 int cpu)
403{
95cdd2e7
IM
404 struct perf_counter *counter, *partial_group;
405 int ret = 0;
04289bb9 406
95cdd2e7
IM
407 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
408 return -EAGAIN;
04289bb9
IM
409
410 /*
411 * Schedule in siblings as one group (if any):
412 */
7995888f 413 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
95cdd2e7
IM
414 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
415 partial_group = counter;
416 goto group_error;
417 }
418 ret = -EAGAIN;
419 }
420
421 return ret;
422
423group_error:
424 /*
425 * Groups can be scheduled in as one unit only, so undo any
426 * partial group before returning:
427 */
428 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
429 if (counter == partial_group)
430 break;
431 counter_sched_out(counter, cpuctx, ctx);
7995888f 432 }
95cdd2e7 433 counter_sched_out(group_counter, cpuctx, ctx);
7995888f 434
95cdd2e7 435 return -EAGAIN;
04289bb9
IM
436}
437
235c7fc7
IM
438static void
439__perf_counter_sched_in(struct perf_counter_context *ctx,
440 struct perf_cpu_context *cpuctx, int cpu)
0793a61d 441{
0793a61d
TG
442 struct perf_counter *counter;
443
444 if (likely(!ctx->nr_counters))
445 return;
446
447 spin_lock(&ctx->lock);
04289bb9 448 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
04289bb9
IM
449 /*
450 * Listen to the 'cpu' scheduling filter constraint
451 * of counters:
452 */
0793a61d
TG
453 if (counter->cpu != -1 && counter->cpu != cpu)
454 continue;
455
7995888f
IM
456 /*
457 * If we scheduled in a group atomically and
458 * exclusively, break out:
459 */
460 if (group_sched_in(counter, cpuctx, ctx, cpu))
461 break;
0793a61d
TG
462 }
463 spin_unlock(&ctx->lock);
235c7fc7
IM
464}
465
466/*
467 * Called from scheduler to add the counters of the current task
468 * with interrupts disabled.
469 *
470 * We restore the counter value and then enable it.
471 *
472 * This does not protect us against NMI, but enable()
473 * sets the enabled bit in the control field of counter _before_
474 * accessing the counter control register. If a NMI hits, then it will
475 * keep the counter running.
476 */
477void perf_counter_task_sched_in(struct task_struct *task, int cpu)
478{
479 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
480 struct perf_counter_context *ctx = &task->perf_counter_ctx;
04289bb9 481
235c7fc7 482 __perf_counter_sched_in(ctx, cpuctx, cpu);
0793a61d
TG
483 cpuctx->task_ctx = ctx;
484}
485
235c7fc7
IM
486static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
487{
488 struct perf_counter_context *ctx = &cpuctx->ctx;
489
490 __perf_counter_sched_in(ctx, cpuctx, cpu);
491}
492
1d1c7ddb
IM
493int perf_counter_task_disable(void)
494{
495 struct task_struct *curr = current;
496 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
497 struct perf_counter *counter;
aa9c4c0f 498 unsigned long flags;
1d1c7ddb
IM
499 u64 perf_flags;
500 int cpu;
501
502 if (likely(!ctx->nr_counters))
503 return 0;
504
aa9c4c0f 505 curr_rq_lock_irq_save(&flags);
1d1c7ddb
IM
506 cpu = smp_processor_id();
507
aa9c4c0f
IM
508 /* force the update of the task clock: */
509 __task_delta_exec(curr, 1);
510
1d1c7ddb
IM
511 perf_counter_task_sched_out(curr, cpu);
512
513 spin_lock(&ctx->lock);
514
515 /*
516 * Disable all the counters:
517 */
518 perf_flags = hw_perf_save_disable();
519
9b51f66d 520 list_for_each_entry(counter, &ctx->counter_list, list_entry)
6a930700 521 counter->state = PERF_COUNTER_STATE_OFF;
9b51f66d 522
1d1c7ddb
IM
523 hw_perf_restore(perf_flags);
524
525 spin_unlock(&ctx->lock);
526
aa9c4c0f 527 curr_rq_unlock_irq_restore(&flags);
1d1c7ddb
IM
528
529 return 0;
530}
531
532int perf_counter_task_enable(void)
533{
534 struct task_struct *curr = current;
535 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
536 struct perf_counter *counter;
aa9c4c0f 537 unsigned long flags;
1d1c7ddb
IM
538 u64 perf_flags;
539 int cpu;
540
541 if (likely(!ctx->nr_counters))
542 return 0;
543
aa9c4c0f 544 curr_rq_lock_irq_save(&flags);
1d1c7ddb
IM
545 cpu = smp_processor_id();
546
aa9c4c0f
IM
547 /* force the update of the task clock: */
548 __task_delta_exec(curr, 1);
549
235c7fc7
IM
550 perf_counter_task_sched_out(curr, cpu);
551
1d1c7ddb
IM
552 spin_lock(&ctx->lock);
553
554 /*
555 * Disable all the counters:
556 */
557 perf_flags = hw_perf_save_disable();
558
559 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
6a930700 560 if (counter->state != PERF_COUNTER_STATE_OFF)
1d1c7ddb 561 continue;
6a930700 562 counter->state = PERF_COUNTER_STATE_INACTIVE;
aa9c4c0f 563 counter->hw_event.disabled = 0;
1d1c7ddb
IM
564 }
565 hw_perf_restore(perf_flags);
566
567 spin_unlock(&ctx->lock);
568
569 perf_counter_task_sched_in(curr, cpu);
570
aa9c4c0f 571 curr_rq_unlock_irq_restore(&flags);
1d1c7ddb
IM
572
573 return 0;
574}
575
235c7fc7
IM
576/*
577 * Round-robin a context's counters:
578 */
579static void rotate_ctx(struct perf_counter_context *ctx)
0793a61d 580{
0793a61d 581 struct perf_counter *counter;
5c92d124 582 u64 perf_flags;
0793a61d 583
235c7fc7 584 if (!ctx->nr_counters)
0793a61d
TG
585 return;
586
0793a61d 587 spin_lock(&ctx->lock);
0793a61d 588 /*
04289bb9 589 * Rotate the first entry last (works just fine for group counters too):
0793a61d 590 */
01b2838c 591 perf_flags = hw_perf_save_disable();
04289bb9
IM
592 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
593 list_del(&counter->list_entry);
594 list_add_tail(&counter->list_entry, &ctx->counter_list);
0793a61d
TG
595 break;
596 }
01b2838c 597 hw_perf_restore(perf_flags);
0793a61d
TG
598
599 spin_unlock(&ctx->lock);
235c7fc7
IM
600}
601
602void perf_counter_task_tick(struct task_struct *curr, int cpu)
603{
604 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
605 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
606 const int rotate_percpu = 0;
607
608 if (rotate_percpu)
609 perf_counter_cpu_sched_out(cpuctx);
610 perf_counter_task_sched_out(curr, cpu);
0793a61d 611
235c7fc7
IM
612 if (rotate_percpu)
613 rotate_ctx(&cpuctx->ctx);
614 rotate_ctx(ctx);
615
616 if (rotate_percpu)
617 perf_counter_cpu_sched_in(cpuctx, cpu);
0793a61d
TG
618 perf_counter_task_sched_in(curr, cpu);
619}
620
0793a61d
TG
621/*
622 * Cross CPU call to read the hardware counter
623 */
7671581f 624static void __read(void *info)
0793a61d 625{
621a01ea 626 struct perf_counter *counter = info;
aa9c4c0f 627 unsigned long flags;
621a01ea 628
aa9c4c0f 629 curr_rq_lock_irq_save(&flags);
7671581f 630 counter->hw_ops->read(counter);
aa9c4c0f 631 curr_rq_unlock_irq_restore(&flags);
0793a61d
TG
632}
633
04289bb9 634static u64 perf_counter_read(struct perf_counter *counter)
0793a61d
TG
635{
636 /*
637 * If counter is enabled and currently active on a CPU, update the
638 * value in the counter structure:
639 */
6a930700 640 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
0793a61d 641 smp_call_function_single(counter->oncpu,
7671581f 642 __read, counter, 1);
0793a61d
TG
643 }
644
ee06094f 645 return atomic64_read(&counter->count);
0793a61d
TG
646}
647
648/*
649 * Cross CPU call to switch performance data pointers
650 */
651static void __perf_switch_irq_data(void *info)
652{
653 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
654 struct perf_counter *counter = info;
655 struct perf_counter_context *ctx = counter->ctx;
656 struct perf_data *oldirqdata = counter->irqdata;
657
658 /*
659 * If this is a task context, we need to check whether it is
660 * the current task context of this cpu. If not it has been
661 * scheduled out before the smp call arrived.
662 */
663 if (ctx->task) {
664 if (cpuctx->task_ctx != ctx)
665 return;
666 spin_lock(&ctx->lock);
667 }
668
669 /* Change the pointer NMI safe */
670 atomic_long_set((atomic_long_t *)&counter->irqdata,
671 (unsigned long) counter->usrdata);
672 counter->usrdata = oldirqdata;
673
674 if (ctx->task)
675 spin_unlock(&ctx->lock);
676}
677
678static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
679{
680 struct perf_counter_context *ctx = counter->ctx;
681 struct perf_data *oldirqdata = counter->irqdata;
682 struct task_struct *task = ctx->task;
683
684 if (!task) {
685 smp_call_function_single(counter->cpu,
686 __perf_switch_irq_data,
687 counter, 1);
688 return counter->usrdata;
689 }
690
691retry:
692 spin_lock_irq(&ctx->lock);
6a930700 693 if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
0793a61d
TG
694 counter->irqdata = counter->usrdata;
695 counter->usrdata = oldirqdata;
696 spin_unlock_irq(&ctx->lock);
697 return oldirqdata;
698 }
699 spin_unlock_irq(&ctx->lock);
700 task_oncpu_function_call(task, __perf_switch_irq_data, counter);
701 /* Might have failed, because task was scheduled out */
702 if (counter->irqdata == oldirqdata)
703 goto retry;
704
705 return counter->usrdata;
706}
707
708static void put_context(struct perf_counter_context *ctx)
709{
710 if (ctx->task)
711 put_task_struct(ctx->task);
712}
713
714static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
715{
716 struct perf_cpu_context *cpuctx;
717 struct perf_counter_context *ctx;
718 struct task_struct *task;
719
720 /*
721 * If cpu is not a wildcard then this is a percpu counter:
722 */
723 if (cpu != -1) {
724 /* Must be root to operate on a CPU counter: */
725 if (!capable(CAP_SYS_ADMIN))
726 return ERR_PTR(-EACCES);
727
728 if (cpu < 0 || cpu > num_possible_cpus())
729 return ERR_PTR(-EINVAL);
730
731 /*
732 * We could be clever and allow to attach a counter to an
733 * offline CPU and activate it when the CPU comes up, but
734 * that's for later.
735 */
736 if (!cpu_isset(cpu, cpu_online_map))
737 return ERR_PTR(-ENODEV);
738
739 cpuctx = &per_cpu(perf_cpu_context, cpu);
740 ctx = &cpuctx->ctx;
741
0793a61d
TG
742 return ctx;
743 }
744
745 rcu_read_lock();
746 if (!pid)
747 task = current;
748 else
749 task = find_task_by_vpid(pid);
750 if (task)
751 get_task_struct(task);
752 rcu_read_unlock();
753
754 if (!task)
755 return ERR_PTR(-ESRCH);
756
757 ctx = &task->perf_counter_ctx;
758 ctx->task = task;
759
760 /* Reuse ptrace permission checks for now. */
761 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
762 put_context(ctx);
763 return ERR_PTR(-EACCES);
764 }
765
766 return ctx;
767}
768
769/*
770 * Called when the last reference to the file is gone.
771 */
772static int perf_release(struct inode *inode, struct file *file)
773{
774 struct perf_counter *counter = file->private_data;
775 struct perf_counter_context *ctx = counter->ctx;
776
777 file->private_data = NULL;
778
779 mutex_lock(&counter->mutex);
780
04289bb9 781 perf_counter_remove_from_context(counter);
0793a61d
TG
782 put_context(ctx);
783
784 mutex_unlock(&counter->mutex);
785
786 kfree(counter);
787
788 return 0;
789}
790
791/*
792 * Read the performance counter - simple non blocking version for now
793 */
794static ssize_t
795perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
796{
797 u64 cntval;
798
799 if (count != sizeof(cntval))
800 return -EINVAL;
801
802 mutex_lock(&counter->mutex);
04289bb9 803 cntval = perf_counter_read(counter);
0793a61d
TG
804 mutex_unlock(&counter->mutex);
805
806 return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
807}
808
809static ssize_t
810perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count)
811{
812 if (!usrdata->len)
813 return 0;
814
815 count = min(count, (size_t)usrdata->len);
816 if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count))
817 return -EFAULT;
818
819 /* Adjust the counters */
820 usrdata->len -= count;
821 if (!usrdata->len)
822 usrdata->rd_idx = 0;
823 else
824 usrdata->rd_idx += count;
825
826 return count;
827}
828
829static ssize_t
830perf_read_irq_data(struct perf_counter *counter,
831 char __user *buf,
832 size_t count,
833 int nonblocking)
834{
835 struct perf_data *irqdata, *usrdata;
836 DECLARE_WAITQUEUE(wait, current);
837 ssize_t res;
838
839 irqdata = counter->irqdata;
840 usrdata = counter->usrdata;
841
842 if (usrdata->len + irqdata->len >= count)
843 goto read_pending;
844
845 if (nonblocking)
846 return -EAGAIN;
847
848 spin_lock_irq(&counter->waitq.lock);
849 __add_wait_queue(&counter->waitq, &wait);
850 for (;;) {
851 set_current_state(TASK_INTERRUPTIBLE);
852 if (usrdata->len + irqdata->len >= count)
853 break;
854
855 if (signal_pending(current))
856 break;
857
858 spin_unlock_irq(&counter->waitq.lock);
859 schedule();
860 spin_lock_irq(&counter->waitq.lock);
861 }
862 __remove_wait_queue(&counter->waitq, &wait);
863 __set_current_state(TASK_RUNNING);
864 spin_unlock_irq(&counter->waitq.lock);
865
866 if (usrdata->len + irqdata->len < count)
867 return -ERESTARTSYS;
868read_pending:
869 mutex_lock(&counter->mutex);
870
871 /* Drain pending data first: */
872 res = perf_copy_usrdata(usrdata, buf, count);
873 if (res < 0 || res == count)
874 goto out;
875
876 /* Switch irq buffer: */
877 usrdata = perf_switch_irq_data(counter);
878 if (perf_copy_usrdata(usrdata, buf + res, count - res) < 0) {
879 if (!res)
880 res = -EFAULT;
881 } else {
882 res = count;
883 }
884out:
885 mutex_unlock(&counter->mutex);
886
887 return res;
888}
889
890static ssize_t
891perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
892{
893 struct perf_counter *counter = file->private_data;
894
9f66a381 895 switch (counter->hw_event.record_type) {
0793a61d
TG
896 case PERF_RECORD_SIMPLE:
897 return perf_read_hw(counter, buf, count);
898
899 case PERF_RECORD_IRQ:
900 case PERF_RECORD_GROUP:
901 return perf_read_irq_data(counter, buf, count,
902 file->f_flags & O_NONBLOCK);
903 }
904 return -EINVAL;
905}
906
907static unsigned int perf_poll(struct file *file, poll_table *wait)
908{
909 struct perf_counter *counter = file->private_data;
910 unsigned int events = 0;
911 unsigned long flags;
912
913 poll_wait(file, &counter->waitq, wait);
914
915 spin_lock_irqsave(&counter->waitq.lock, flags);
916 if (counter->usrdata->len || counter->irqdata->len)
917 events |= POLLIN;
918 spin_unlock_irqrestore(&counter->waitq.lock, flags);
919
920 return events;
921}
922
923static const struct file_operations perf_fops = {
924 .release = perf_release,
925 .read = perf_read,
926 .poll = perf_poll,
927};
928
95cdd2e7 929static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
5c92d124 930{
9abf8a08
PM
931 int cpu = raw_smp_processor_id();
932
933 atomic64_set(&counter->hw.prev_count, cpu_clock(cpu));
95cdd2e7 934 return 0;
5c92d124
IM
935}
936
9abf8a08
PM
937static void cpu_clock_perf_counter_update(struct perf_counter *counter)
938{
939 int cpu = raw_smp_processor_id();
940 s64 prev;
941 u64 now;
942
943 now = cpu_clock(cpu);
944 prev = atomic64_read(&counter->hw.prev_count);
945 atomic64_set(&counter->hw.prev_count, now);
946 atomic64_add(now - prev, &counter->count);
947}
948
5c92d124
IM
949static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
950{
9abf8a08 951 cpu_clock_perf_counter_update(counter);
5c92d124
IM
952}
953
954static void cpu_clock_perf_counter_read(struct perf_counter *counter)
955{
9abf8a08 956 cpu_clock_perf_counter_update(counter);
5c92d124
IM
957}
958
959static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
7671581f
IM
960 .enable = cpu_clock_perf_counter_enable,
961 .disable = cpu_clock_perf_counter_disable,
962 .read = cpu_clock_perf_counter_read,
5c92d124
IM
963};
964
aa9c4c0f
IM
965/*
966 * Called from within the scheduler:
967 */
968static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
bae43c99 969{
aa9c4c0f
IM
970 struct task_struct *curr = counter->task;
971 u64 delta;
972
aa9c4c0f
IM
973 delta = __task_delta_exec(curr, update);
974
975 return curr->se.sum_exec_runtime + delta;
976}
977
978static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
979{
980 u64 prev;
8cb391e8
IM
981 s64 delta;
982
983 prev = atomic64_read(&counter->hw.prev_count);
8cb391e8
IM
984
985 atomic64_set(&counter->hw.prev_count, now);
986
987 delta = now - prev;
8cb391e8
IM
988
989 atomic64_add(delta, &counter->count);
bae43c99
IM
990}
991
8cb391e8 992static void task_clock_perf_counter_read(struct perf_counter *counter)
bae43c99 993{
aa9c4c0f
IM
994 u64 now = task_clock_perf_counter_val(counter, 1);
995
996 task_clock_perf_counter_update(counter, now);
bae43c99
IM
997}
998
95cdd2e7 999static int task_clock_perf_counter_enable(struct perf_counter *counter)
8cb391e8 1000{
aa9c4c0f
IM
1001 u64 now = task_clock_perf_counter_val(counter, 0);
1002
1003 atomic64_set(&counter->hw.prev_count, now);
95cdd2e7
IM
1004
1005 return 0;
8cb391e8
IM
1006}
1007
1008static void task_clock_perf_counter_disable(struct perf_counter *counter)
bae43c99 1009{
aa9c4c0f
IM
1010 u64 now = task_clock_perf_counter_val(counter, 0);
1011
1012 task_clock_perf_counter_update(counter, now);
bae43c99
IM
1013}
1014
1015static const struct hw_perf_counter_ops perf_ops_task_clock = {
7671581f
IM
1016 .enable = task_clock_perf_counter_enable,
1017 .disable = task_clock_perf_counter_disable,
1018 .read = task_clock_perf_counter_read,
bae43c99
IM
1019};
1020
e06c61a8
IM
1021static u64 get_page_faults(void)
1022{
1023 struct task_struct *curr = current;
1024
1025 return curr->maj_flt + curr->min_flt;
1026}
1027
1028static void page_faults_perf_counter_update(struct perf_counter *counter)
1029{
1030 u64 prev, now;
1031 s64 delta;
1032
1033 prev = atomic64_read(&counter->hw.prev_count);
1034 now = get_page_faults();
1035
1036 atomic64_set(&counter->hw.prev_count, now);
1037
1038 delta = now - prev;
e06c61a8
IM
1039
1040 atomic64_add(delta, &counter->count);
1041}
1042
1043static void page_faults_perf_counter_read(struct perf_counter *counter)
1044{
1045 page_faults_perf_counter_update(counter);
1046}
1047
95cdd2e7 1048static int page_faults_perf_counter_enable(struct perf_counter *counter)
e06c61a8
IM
1049{
1050 /*
1051 * page-faults is a per-task value already,
1052 * so we dont have to clear it on switch-in.
1053 */
95cdd2e7
IM
1054
1055 return 0;
e06c61a8
IM
1056}
1057
1058static void page_faults_perf_counter_disable(struct perf_counter *counter)
1059{
1060 page_faults_perf_counter_update(counter);
1061}
1062
1063static const struct hw_perf_counter_ops perf_ops_page_faults = {
7671581f
IM
1064 .enable = page_faults_perf_counter_enable,
1065 .disable = page_faults_perf_counter_disable,
1066 .read = page_faults_perf_counter_read,
e06c61a8
IM
1067};
1068
5d6a27d8
IM
1069static u64 get_context_switches(void)
1070{
1071 struct task_struct *curr = current;
1072
1073 return curr->nvcsw + curr->nivcsw;
1074}
1075
1076static void context_switches_perf_counter_update(struct perf_counter *counter)
1077{
1078 u64 prev, now;
1079 s64 delta;
1080
1081 prev = atomic64_read(&counter->hw.prev_count);
1082 now = get_context_switches();
1083
1084 atomic64_set(&counter->hw.prev_count, now);
1085
1086 delta = now - prev;
5d6a27d8
IM
1087
1088 atomic64_add(delta, &counter->count);
1089}
1090
1091static void context_switches_perf_counter_read(struct perf_counter *counter)
1092{
1093 context_switches_perf_counter_update(counter);
1094}
1095
95cdd2e7 1096static int context_switches_perf_counter_enable(struct perf_counter *counter)
5d6a27d8
IM
1097{
1098 /*
1099 * ->nvcsw + curr->nivcsw is a per-task value already,
1100 * so we dont have to clear it on switch-in.
1101 */
95cdd2e7
IM
1102
1103 return 0;
5d6a27d8
IM
1104}
1105
1106static void context_switches_perf_counter_disable(struct perf_counter *counter)
1107{
1108 context_switches_perf_counter_update(counter);
1109}
1110
1111static const struct hw_perf_counter_ops perf_ops_context_switches = {
7671581f
IM
1112 .enable = context_switches_perf_counter_enable,
1113 .disable = context_switches_perf_counter_disable,
1114 .read = context_switches_perf_counter_read,
5d6a27d8
IM
1115};
1116
6c594c21
IM
1117static inline u64 get_cpu_migrations(void)
1118{
1119 return current->se.nr_migrations;
1120}
1121
1122static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
1123{
1124 u64 prev, now;
1125 s64 delta;
1126
1127 prev = atomic64_read(&counter->hw.prev_count);
1128 now = get_cpu_migrations();
1129
1130 atomic64_set(&counter->hw.prev_count, now);
1131
1132 delta = now - prev;
6c594c21
IM
1133
1134 atomic64_add(delta, &counter->count);
1135}
1136
1137static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1138{
1139 cpu_migrations_perf_counter_update(counter);
1140}
1141
95cdd2e7 1142static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
6c594c21
IM
1143{
1144 /*
1145 * se.nr_migrations is a per-task value already,
1146 * so we dont have to clear it on switch-in.
1147 */
95cdd2e7
IM
1148
1149 return 0;
6c594c21
IM
1150}
1151
1152static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1153{
1154 cpu_migrations_perf_counter_update(counter);
1155}
1156
1157static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
7671581f
IM
1158 .enable = cpu_migrations_perf_counter_enable,
1159 .disable = cpu_migrations_perf_counter_disable,
1160 .read = cpu_migrations_perf_counter_read,
6c594c21
IM
1161};
1162
5c92d124
IM
1163static const struct hw_perf_counter_ops *
1164sw_perf_counter_init(struct perf_counter *counter)
1165{
1166 const struct hw_perf_counter_ops *hw_ops = NULL;
1167
1168 switch (counter->hw_event.type) {
1169 case PERF_COUNT_CPU_CLOCK:
1170 hw_ops = &perf_ops_cpu_clock;
1171 break;
bae43c99
IM
1172 case PERF_COUNT_TASK_CLOCK:
1173 hw_ops = &perf_ops_task_clock;
1174 break;
e06c61a8
IM
1175 case PERF_COUNT_PAGE_FAULTS:
1176 hw_ops = &perf_ops_page_faults;
1177 break;
5d6a27d8
IM
1178 case PERF_COUNT_CONTEXT_SWITCHES:
1179 hw_ops = &perf_ops_context_switches;
1180 break;
6c594c21
IM
1181 case PERF_COUNT_CPU_MIGRATIONS:
1182 hw_ops = &perf_ops_cpu_migrations;
1183 break;
5c92d124
IM
1184 default:
1185 break;
1186 }
1187 return hw_ops;
1188}
1189
0793a61d
TG
1190/*
1191 * Allocate and initialize a counter structure
1192 */
1193static struct perf_counter *
04289bb9
IM
1194perf_counter_alloc(struct perf_counter_hw_event *hw_event,
1195 int cpu,
9b51f66d
IM
1196 struct perf_counter *group_leader,
1197 gfp_t gfpflags)
0793a61d 1198{
5c92d124 1199 const struct hw_perf_counter_ops *hw_ops;
621a01ea 1200 struct perf_counter *counter;
0793a61d 1201
9b51f66d 1202 counter = kzalloc(sizeof(*counter), gfpflags);
0793a61d
TG
1203 if (!counter)
1204 return NULL;
1205
04289bb9
IM
1206 /*
1207 * Single counters are their own group leaders, with an
1208 * empty sibling list:
1209 */
1210 if (!group_leader)
1211 group_leader = counter;
1212
0793a61d 1213 mutex_init(&counter->mutex);
04289bb9
IM
1214 INIT_LIST_HEAD(&counter->list_entry);
1215 INIT_LIST_HEAD(&counter->sibling_list);
0793a61d
TG
1216 init_waitqueue_head(&counter->waitq);
1217
9f66a381
IM
1218 counter->irqdata = &counter->data[0];
1219 counter->usrdata = &counter->data[1];
1220 counter->cpu = cpu;
1221 counter->hw_event = *hw_event;
1222 counter->wakeup_pending = 0;
04289bb9 1223 counter->group_leader = group_leader;
621a01ea
IM
1224 counter->hw_ops = NULL;
1225
235c7fc7 1226 counter->state = PERF_COUNTER_STATE_INACTIVE;
a86ed508
IM
1227 if (hw_event->disabled)
1228 counter->state = PERF_COUNTER_STATE_OFF;
1229
5c92d124
IM
1230 hw_ops = NULL;
1231 if (!hw_event->raw && hw_event->type < 0)
1232 hw_ops = sw_perf_counter_init(counter);
9b51f66d 1233 if (!hw_ops)
5c92d124 1234 hw_ops = hw_perf_counter_init(counter);
5c92d124 1235
621a01ea
IM
1236 if (!hw_ops) {
1237 kfree(counter);
1238 return NULL;
1239 }
1240 counter->hw_ops = hw_ops;
0793a61d
TG
1241
1242 return counter;
1243}
1244
1245/**
9f66a381
IM
1246 * sys_perf_task_open - open a performance counter, associate it to a task/cpu
1247 *
1248 * @hw_event_uptr: event type attributes for monitoring/sampling
0793a61d 1249 * @pid: target pid
9f66a381
IM
1250 * @cpu: target cpu
1251 * @group_fd: group leader counter fd
0793a61d 1252 */
1d1c7ddb
IM
1253asmlinkage int
1254sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user,
1255 pid_t pid, int cpu, int group_fd)
0793a61d 1256{
04289bb9 1257 struct perf_counter *counter, *group_leader;
9f66a381 1258 struct perf_counter_hw_event hw_event;
04289bb9 1259 struct perf_counter_context *ctx;
9b51f66d 1260 struct file *counter_file = NULL;
04289bb9
IM
1261 struct file *group_file = NULL;
1262 int fput_needed = 0;
9b51f66d 1263 int fput_needed2 = 0;
0793a61d
TG
1264 int ret;
1265
9f66a381 1266 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
eab656ae
TG
1267 return -EFAULT;
1268
04289bb9 1269 /*
ccff286d
IM
1270 * Get the target context (task or percpu):
1271 */
1272 ctx = find_get_context(pid, cpu);
1273 if (IS_ERR(ctx))
1274 return PTR_ERR(ctx);
1275
1276 /*
1277 * Look up the group leader (we will attach this counter to it):
04289bb9
IM
1278 */
1279 group_leader = NULL;
1280 if (group_fd != -1) {
1281 ret = -EINVAL;
1282 group_file = fget_light(group_fd, &fput_needed);
1283 if (!group_file)
ccff286d 1284 goto err_put_context;
04289bb9 1285 if (group_file->f_op != &perf_fops)
ccff286d 1286 goto err_put_context;
04289bb9
IM
1287
1288 group_leader = group_file->private_data;
1289 /*
ccff286d
IM
1290 * Do not allow a recursive hierarchy (this new sibling
1291 * becoming part of another group-sibling):
1292 */
1293 if (group_leader->group_leader != group_leader)
1294 goto err_put_context;
1295 /*
1296 * Do not allow to attach to a group in a different
1297 * task or CPU context:
04289bb9 1298 */
ccff286d
IM
1299 if (group_leader->ctx != ctx)
1300 goto err_put_context;
04289bb9
IM
1301 }
1302
5c92d124 1303 ret = -EINVAL;
9b51f66d 1304 counter = perf_counter_alloc(&hw_event, cpu, group_leader, GFP_KERNEL);
0793a61d
TG
1305 if (!counter)
1306 goto err_put_context;
1307
0793a61d
TG
1308 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
1309 if (ret < 0)
9b51f66d
IM
1310 goto err_free_put_context;
1311
1312 counter_file = fget_light(ret, &fput_needed2);
1313 if (!counter_file)
1314 goto err_free_put_context;
1315
1316 counter->filp = counter_file;
1317 perf_install_in_context(ctx, counter, cpu);
1318
1319 fput_light(counter_file, fput_needed2);
0793a61d 1320
04289bb9
IM
1321out_fput:
1322 fput_light(group_file, fput_needed);
1323
0793a61d
TG
1324 return ret;
1325
9b51f66d 1326err_free_put_context:
0793a61d
TG
1327 kfree(counter);
1328
1329err_put_context:
1330 put_context(ctx);
1331
04289bb9 1332 goto out_fput;
0793a61d
TG
1333}
1334
9b51f66d
IM
1335/*
1336 * Initialize the perf_counter context in a task_struct:
1337 */
1338static void
1339__perf_counter_init_context(struct perf_counter_context *ctx,
1340 struct task_struct *task)
1341{
1342 memset(ctx, 0, sizeof(*ctx));
1343 spin_lock_init(&ctx->lock);
1344 INIT_LIST_HEAD(&ctx->counter_list);
1345 ctx->task = task;
1346}
1347
1348/*
1349 * inherit a counter from parent task to child task:
1350 */
1351static int
1352inherit_counter(struct perf_counter *parent_counter,
1353 struct task_struct *parent,
1354 struct perf_counter_context *parent_ctx,
1355 struct task_struct *child,
1356 struct perf_counter_context *child_ctx)
1357{
1358 struct perf_counter *child_counter;
1359
1360 child_counter = perf_counter_alloc(&parent_counter->hw_event,
1361 parent_counter->cpu, NULL,
1362 GFP_ATOMIC);
1363 if (!child_counter)
1364 return -ENOMEM;
1365
1366 /*
1367 * Link it up in the child's context:
1368 */
1369 child_counter->ctx = child_ctx;
1370 child_counter->task = child;
1371 list_add_counter(child_counter, child_ctx);
1372 child_ctx->nr_counters++;
1373
1374 child_counter->parent = parent_counter;
9b51f66d
IM
1375 /*
1376 * inherit into child's child as well:
1377 */
1378 child_counter->hw_event.inherit = 1;
1379
1380 /*
1381 * Get a reference to the parent filp - we will fput it
1382 * when the child counter exits. This is safe to do because
1383 * we are in the parent and we know that the filp still
1384 * exists and has a nonzero count:
1385 */
1386 atomic_long_inc(&parent_counter->filp->f_count);
1387
1388 return 0;
1389}
1390
1391static void
1392__perf_counter_exit_task(struct task_struct *child,
1393 struct perf_counter *child_counter,
1394 struct perf_counter_context *child_ctx)
1395{
1396 struct perf_counter *parent_counter;
1397 u64 parent_val, child_val;
9b51f66d
IM
1398
1399 /*
235c7fc7
IM
1400 * If we do not self-reap then we have to wait for the
1401 * child task to unschedule (it will happen for sure),
1402 * so that its counter is at its final count. (This
1403 * condition triggers rarely - child tasks usually get
1404 * off their CPU before the parent has a chance to
1405 * get this far into the reaping action)
9b51f66d 1406 */
235c7fc7
IM
1407 if (child != current) {
1408 wait_task_inactive(child, 0);
1409 list_del_init(&child_counter->list_entry);
1410 } else {
0cc0c027 1411 struct perf_cpu_context *cpuctx;
235c7fc7
IM
1412 unsigned long flags;
1413 u64 perf_flags;
1414
1415 /*
1416 * Disable and unlink this counter.
1417 *
1418 * Be careful about zapping the list - IRQ/NMI context
1419 * could still be processing it:
1420 */
1421 curr_rq_lock_irq_save(&flags);
1422 perf_flags = hw_perf_save_disable();
0cc0c027
IM
1423
1424 cpuctx = &__get_cpu_var(perf_cpu_context);
1425
235c7fc7
IM
1426 if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) {
1427 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
1428 child_counter->hw_ops->disable(child_counter);
1429 cpuctx->active_oncpu--;
1430 child_ctx->nr_active--;
1431 child_counter->oncpu = -1;
1432 }
0cc0c027 1433
235c7fc7 1434 list_del_init(&child_counter->list_entry);
0cc0c027 1435
235c7fc7 1436 child_ctx->nr_counters--;
9b51f66d 1437
235c7fc7
IM
1438 hw_perf_restore(perf_flags);
1439 curr_rq_unlock_irq_restore(&flags);
1440 }
9b51f66d
IM
1441
1442 parent_counter = child_counter->parent;
1443 /*
1444 * It can happen that parent exits first, and has counters
1445 * that are still around due to the child reference. These
1446 * counters need to be zapped - but otherwise linger.
1447 */
1448 if (!parent_counter)
1449 return;
1450
1451 parent_val = atomic64_read(&parent_counter->count);
1452 child_val = atomic64_read(&child_counter->count);
1453
1454 /*
1455 * Add back the child's count to the parent's count:
1456 */
1457 atomic64_add(child_val, &parent_counter->count);
1458
1459 fput(parent_counter->filp);
1460
1461 kfree(child_counter);
1462}
1463
1464/*
1465 * When a child task exist, feed back counter values to parent counters.
1466 *
1467 * Note: we are running in child context, but the PID is not hashed
1468 * anymore so new counters will not be added.
1469 */
1470void perf_counter_exit_task(struct task_struct *child)
1471{
1472 struct perf_counter *child_counter, *tmp;
1473 struct perf_counter_context *child_ctx;
1474
1475 child_ctx = &child->perf_counter_ctx;
1476
1477 if (likely(!child_ctx->nr_counters))
1478 return;
1479
1480 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
1481 list_entry)
1482 __perf_counter_exit_task(child, child_counter, child_ctx);
1483}
1484
1485/*
1486 * Initialize the perf_counter context in task_struct
1487 */
1488void perf_counter_init_task(struct task_struct *child)
1489{
1490 struct perf_counter_context *child_ctx, *parent_ctx;
1491 struct perf_counter *counter, *parent_counter;
1492 struct task_struct *parent = current;
1493 unsigned long flags;
1494
1495 child_ctx = &child->perf_counter_ctx;
1496 parent_ctx = &parent->perf_counter_ctx;
1497
1498 __perf_counter_init_context(child_ctx, child);
1499
1500 /*
1501 * This is executed from the parent task context, so inherit
1502 * counters that have been marked for cloning:
1503 */
1504
1505 if (likely(!parent_ctx->nr_counters))
1506 return;
1507
1508 /*
1509 * Lock the parent list. No need to lock the child - not PID
1510 * hashed yet and not running, so nobody can access it.
1511 */
1512 spin_lock_irqsave(&parent_ctx->lock, flags);
1513
1514 /*
1515 * We dont have to disable NMIs - we are only looking at
1516 * the list, not manipulating it:
1517 */
1518 list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
1519 if (!counter->hw_event.inherit || counter->group_leader != counter)
1520 continue;
1521
1522 /*
1523 * Instead of creating recursive hierarchies of counters,
1524 * we link inheritd counters back to the original parent,
1525 * which has a filp for sure, which we use as the reference
1526 * count:
1527 */
1528 parent_counter = counter;
1529 if (counter->parent)
1530 parent_counter = counter->parent;
1531
1532 if (inherit_counter(parent_counter, parent,
1533 parent_ctx, child, child_ctx))
1534 break;
1535 }
1536
1537 spin_unlock_irqrestore(&parent_ctx->lock, flags);
1538}
1539
04289bb9 1540static void __cpuinit perf_counter_init_cpu(int cpu)
0793a61d 1541{
04289bb9 1542 struct perf_cpu_context *cpuctx;
0793a61d 1543
04289bb9
IM
1544 cpuctx = &per_cpu(perf_cpu_context, cpu);
1545 __perf_counter_init_context(&cpuctx->ctx, NULL);
0793a61d
TG
1546
1547 mutex_lock(&perf_resource_mutex);
04289bb9 1548 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
0793a61d 1549 mutex_unlock(&perf_resource_mutex);
04289bb9 1550
0793a61d
TG
1551 hw_perf_counter_setup();
1552}
1553
1554#ifdef CONFIG_HOTPLUG_CPU
04289bb9 1555static void __perf_counter_exit_cpu(void *info)
0793a61d
TG
1556{
1557 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1558 struct perf_counter_context *ctx = &cpuctx->ctx;
1559 struct perf_counter *counter, *tmp;
1560
04289bb9
IM
1561 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
1562 __perf_counter_remove_from_context(counter);
0793a61d
TG
1563
1564}
04289bb9 1565static void perf_counter_exit_cpu(int cpu)
0793a61d 1566{
04289bb9 1567 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
0793a61d
TG
1568}
1569#else
04289bb9 1570static inline void perf_counter_exit_cpu(int cpu) { }
0793a61d
TG
1571#endif
1572
1573static int __cpuinit
1574perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
1575{
1576 unsigned int cpu = (long)hcpu;
1577
1578 switch (action) {
1579
1580 case CPU_UP_PREPARE:
1581 case CPU_UP_PREPARE_FROZEN:
04289bb9 1582 perf_counter_init_cpu(cpu);
0793a61d
TG
1583 break;
1584
1585 case CPU_DOWN_PREPARE:
1586 case CPU_DOWN_PREPARE_FROZEN:
04289bb9 1587 perf_counter_exit_cpu(cpu);
0793a61d
TG
1588 break;
1589
1590 default:
1591 break;
1592 }
1593
1594 return NOTIFY_OK;
1595}
1596
1597static struct notifier_block __cpuinitdata perf_cpu_nb = {
1598 .notifier_call = perf_cpu_notify,
1599};
1600
1601static int __init perf_counter_init(void)
1602{
1603 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
1604 (void *)(long)smp_processor_id());
1605 register_cpu_notifier(&perf_cpu_nb);
1606
1607 return 0;
1608}
1609early_initcall(perf_counter_init);
1610
1611static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
1612{
1613 return sprintf(buf, "%d\n", perf_reserved_percpu);
1614}
1615
1616static ssize_t
1617perf_set_reserve_percpu(struct sysdev_class *class,
1618 const char *buf,
1619 size_t count)
1620{
1621 struct perf_cpu_context *cpuctx;
1622 unsigned long val;
1623 int err, cpu, mpt;
1624
1625 err = strict_strtoul(buf, 10, &val);
1626 if (err)
1627 return err;
1628 if (val > perf_max_counters)
1629 return -EINVAL;
1630
1631 mutex_lock(&perf_resource_mutex);
1632 perf_reserved_percpu = val;
1633 for_each_online_cpu(cpu) {
1634 cpuctx = &per_cpu(perf_cpu_context, cpu);
1635 spin_lock_irq(&cpuctx->ctx.lock);
1636 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
1637 perf_max_counters - perf_reserved_percpu);
1638 cpuctx->max_pertask = mpt;
1639 spin_unlock_irq(&cpuctx->ctx.lock);
1640 }
1641 mutex_unlock(&perf_resource_mutex);
1642
1643 return count;
1644}
1645
1646static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
1647{
1648 return sprintf(buf, "%d\n", perf_overcommit);
1649}
1650
1651static ssize_t
1652perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
1653{
1654 unsigned long val;
1655 int err;
1656
1657 err = strict_strtoul(buf, 10, &val);
1658 if (err)
1659 return err;
1660 if (val > 1)
1661 return -EINVAL;
1662
1663 mutex_lock(&perf_resource_mutex);
1664 perf_overcommit = val;
1665 mutex_unlock(&perf_resource_mutex);
1666
1667 return count;
1668}
1669
1670static SYSDEV_CLASS_ATTR(
1671 reserve_percpu,
1672 0644,
1673 perf_show_reserve_percpu,
1674 perf_set_reserve_percpu
1675 );
1676
1677static SYSDEV_CLASS_ATTR(
1678 overcommit,
1679 0644,
1680 perf_show_overcommit,
1681 perf_set_overcommit
1682 );
1683
1684static struct attribute *perfclass_attrs[] = {
1685 &attr_reserve_percpu.attr,
1686 &attr_overcommit.attr,
1687 NULL
1688};
1689
1690static struct attribute_group perfclass_attr_group = {
1691 .attrs = perfclass_attrs,
1692 .name = "perf_counters",
1693};
1694
1695static int __init perf_counter_sysfs_init(void)
1696{
1697 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
1698 &perfclass_attr_group);
1699}
1700device_initcall(perf_counter_sysfs_init);