c841563de0433d9dd17f066456796f35f0eca482
[linux-block.git] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6  *
7  *
8  *  For licensing details see kernel-base/COPYING
9  */
10
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/file.h>
16 #include <linux/poll.h>
17 #include <linux/sysfs.h>
18 #include <linux/ptrace.h>
19 #include <linux/percpu.h>
20 #include <linux/vmstat.h>
21 #include <linux/hardirq.h>
22 #include <linux/rculist.h>
23 #include <linux/uaccess.h>
24 #include <linux/syscalls.h>
25 #include <linux/anon_inodes.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/perf_counter.h>
28 #include <linux/dcache.h>
29
30 #include <asm/irq_regs.h>
31
32 /*
33  * Each CPU has a list of per CPU counters:
34  */
35 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
36
37 int perf_max_counters __read_mostly = 1;
38 static int perf_reserved_percpu __read_mostly;
39 static int perf_overcommit __read_mostly = 1;
40
41 /*
42  * Mutex for (sysadmin-configurable) counter reservations:
43  */
44 static DEFINE_MUTEX(perf_resource_mutex);
45
46 /*
47  * Architecture provided APIs - weak aliases:
48  */
49 extern __weak const struct hw_perf_counter_ops *
50 hw_perf_counter_init(struct perf_counter *counter)
51 {
52         return NULL;
53 }
54
55 u64 __weak hw_perf_save_disable(void)           { return 0; }
56 void __weak hw_perf_restore(u64 ctrl)           { barrier(); }
57 void __weak hw_perf_counter_setup(int cpu)      { barrier(); }
58 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
59                struct perf_cpu_context *cpuctx,
60                struct perf_counter_context *ctx, int cpu)
61 {
62         return 0;
63 }
64
65 void __weak perf_counter_print_debug(void)      { }
66
67 static void
68 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
69 {
70         struct perf_counter *group_leader = counter->group_leader;
71
72         /*
73          * Depending on whether it is a standalone or sibling counter,
74          * add it straight to the context's counter list, or to the group
75          * leader's sibling list:
76          */
77         if (counter->group_leader == counter)
78                 list_add_tail(&counter->list_entry, &ctx->counter_list);
79         else {
80                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
81                 group_leader->nr_siblings++;
82         }
83
84         list_add_rcu(&counter->event_entry, &ctx->event_list);
85 }
86
87 static void
88 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
89 {
90         struct perf_counter *sibling, *tmp;
91
92         list_del_init(&counter->list_entry);
93         list_del_rcu(&counter->event_entry);
94
95         if (counter->group_leader != counter)
96                 counter->group_leader->nr_siblings--;
97
98         /*
99          * If this was a group counter with sibling counters then
100          * upgrade the siblings to singleton counters by adding them
101          * to the context list directly:
102          */
103         list_for_each_entry_safe(sibling, tmp,
104                                  &counter->sibling_list, list_entry) {
105
106                 list_move_tail(&sibling->list_entry, &ctx->counter_list);
107                 sibling->group_leader = sibling;
108         }
109 }
110
111 static void
112 counter_sched_out(struct perf_counter *counter,
113                   struct perf_cpu_context *cpuctx,
114                   struct perf_counter_context *ctx)
115 {
116         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
117                 return;
118
119         counter->state = PERF_COUNTER_STATE_INACTIVE;
120         counter->tstamp_stopped = ctx->time_now;
121         counter->hw_ops->disable(counter);
122         counter->oncpu = -1;
123
124         if (!is_software_counter(counter))
125                 cpuctx->active_oncpu--;
126         ctx->nr_active--;
127         if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
128                 cpuctx->exclusive = 0;
129 }
130
131 static void
132 group_sched_out(struct perf_counter *group_counter,
133                 struct perf_cpu_context *cpuctx,
134                 struct perf_counter_context *ctx)
135 {
136         struct perf_counter *counter;
137
138         if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
139                 return;
140
141         counter_sched_out(group_counter, cpuctx, ctx);
142
143         /*
144          * Schedule out siblings (if any):
145          */
146         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
147                 counter_sched_out(counter, cpuctx, ctx);
148
149         if (group_counter->hw_event.exclusive)
150                 cpuctx->exclusive = 0;
151 }
152
153 /*
154  * Cross CPU call to remove a performance counter
155  *
156  * We disable the counter on the hardware level first. After that we
157  * remove it from the context list.
158  */
159 static void __perf_counter_remove_from_context(void *info)
160 {
161         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
162         struct perf_counter *counter = info;
163         struct perf_counter_context *ctx = counter->ctx;
164         unsigned long flags;
165         u64 perf_flags;
166
167         /*
168          * If this is a task context, we need to check whether it is
169          * the current task context of this cpu. If not it has been
170          * scheduled out before the smp call arrived.
171          */
172         if (ctx->task && cpuctx->task_ctx != ctx)
173                 return;
174
175         curr_rq_lock_irq_save(&flags);
176         spin_lock(&ctx->lock);
177
178         counter_sched_out(counter, cpuctx, ctx);
179
180         counter->task = NULL;
181         ctx->nr_counters--;
182
183         /*
184          * Protect the list operation against NMI by disabling the
185          * counters on a global level. NOP for non NMI based counters.
186          */
187         perf_flags = hw_perf_save_disable();
188         list_del_counter(counter, ctx);
189         hw_perf_restore(perf_flags);
190
191         if (!ctx->task) {
192                 /*
193                  * Allow more per task counters with respect to the
194                  * reservation:
195                  */
196                 cpuctx->max_pertask =
197                         min(perf_max_counters - ctx->nr_counters,
198                             perf_max_counters - perf_reserved_percpu);
199         }
200
201         spin_unlock(&ctx->lock);
202         curr_rq_unlock_irq_restore(&flags);
203 }
204
205
206 /*
207  * Remove the counter from a task's (or a CPU's) list of counters.
208  *
209  * Must be called with counter->mutex and ctx->mutex held.
210  *
211  * CPU counters are removed with a smp call. For task counters we only
212  * call when the task is on a CPU.
213  */
214 static void perf_counter_remove_from_context(struct perf_counter *counter)
215 {
216         struct perf_counter_context *ctx = counter->ctx;
217         struct task_struct *task = ctx->task;
218
219         if (!task) {
220                 /*
221                  * Per cpu counters are removed via an smp call and
222                  * the removal is always sucessful.
223                  */
224                 smp_call_function_single(counter->cpu,
225                                          __perf_counter_remove_from_context,
226                                          counter, 1);
227                 return;
228         }
229
230 retry:
231         task_oncpu_function_call(task, __perf_counter_remove_from_context,
232                                  counter);
233
234         spin_lock_irq(&ctx->lock);
235         /*
236          * If the context is active we need to retry the smp call.
237          */
238         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
239                 spin_unlock_irq(&ctx->lock);
240                 goto retry;
241         }
242
243         /*
244          * The lock prevents that this context is scheduled in so we
245          * can remove the counter safely, if the call above did not
246          * succeed.
247          */
248         if (!list_empty(&counter->list_entry)) {
249                 ctx->nr_counters--;
250                 list_del_counter(counter, ctx);
251                 counter->task = NULL;
252         }
253         spin_unlock_irq(&ctx->lock);
254 }
255
256 /*
257  * Get the current time for this context.
258  * If this is a task context, we use the task's task clock,
259  * or for a per-cpu context, we use the cpu clock.
260  */
261 static u64 get_context_time(struct perf_counter_context *ctx, int update)
262 {
263         struct task_struct *curr = ctx->task;
264
265         if (!curr)
266                 return cpu_clock(smp_processor_id());
267
268         return __task_delta_exec(curr, update) + curr->se.sum_exec_runtime;
269 }
270
271 /*
272  * Update the record of the current time in a context.
273  */
274 static void update_context_time(struct perf_counter_context *ctx, int update)
275 {
276         ctx->time_now = get_context_time(ctx, update) - ctx->time_lost;
277 }
278
279 /*
280  * Update the total_time_enabled and total_time_running fields for a counter.
281  */
282 static void update_counter_times(struct perf_counter *counter)
283 {
284         struct perf_counter_context *ctx = counter->ctx;
285         u64 run_end;
286
287         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
288                 counter->total_time_enabled = ctx->time_now -
289                         counter->tstamp_enabled;
290                 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
291                         run_end = counter->tstamp_stopped;
292                 else
293                         run_end = ctx->time_now;
294                 counter->total_time_running = run_end - counter->tstamp_running;
295         }
296 }
297
298 /*
299  * Update total_time_enabled and total_time_running for all counters in a group.
300  */
301 static void update_group_times(struct perf_counter *leader)
302 {
303         struct perf_counter *counter;
304
305         update_counter_times(leader);
306         list_for_each_entry(counter, &leader->sibling_list, list_entry)
307                 update_counter_times(counter);
308 }
309
310 /*
311  * Cross CPU call to disable a performance counter
312  */
313 static void __perf_counter_disable(void *info)
314 {
315         struct perf_counter *counter = info;
316         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
317         struct perf_counter_context *ctx = counter->ctx;
318         unsigned long flags;
319
320         /*
321          * If this is a per-task counter, need to check whether this
322          * counter's task is the current task on this cpu.
323          */
324         if (ctx->task && cpuctx->task_ctx != ctx)
325                 return;
326
327         curr_rq_lock_irq_save(&flags);
328         spin_lock(&ctx->lock);
329
330         /*
331          * If the counter is on, turn it off.
332          * If it is in error state, leave it in error state.
333          */
334         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
335                 update_context_time(ctx, 1);
336                 update_counter_times(counter);
337                 if (counter == counter->group_leader)
338                         group_sched_out(counter, cpuctx, ctx);
339                 else
340                         counter_sched_out(counter, cpuctx, ctx);
341                 counter->state = PERF_COUNTER_STATE_OFF;
342         }
343
344         spin_unlock(&ctx->lock);
345         curr_rq_unlock_irq_restore(&flags);
346 }
347
348 /*
349  * Disable a counter.
350  */
351 static void perf_counter_disable(struct perf_counter *counter)
352 {
353         struct perf_counter_context *ctx = counter->ctx;
354         struct task_struct *task = ctx->task;
355
356         if (!task) {
357                 /*
358                  * Disable the counter on the cpu that it's on
359                  */
360                 smp_call_function_single(counter->cpu, __perf_counter_disable,
361                                          counter, 1);
362                 return;
363         }
364
365  retry:
366         task_oncpu_function_call(task, __perf_counter_disable, counter);
367
368         spin_lock_irq(&ctx->lock);
369         /*
370          * If the counter is still active, we need to retry the cross-call.
371          */
372         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
373                 spin_unlock_irq(&ctx->lock);
374                 goto retry;
375         }
376
377         /*
378          * Since we have the lock this context can't be scheduled
379          * in, so we can change the state safely.
380          */
381         if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
382                 update_counter_times(counter);
383                 counter->state = PERF_COUNTER_STATE_OFF;
384         }
385
386         spin_unlock_irq(&ctx->lock);
387 }
388
389 /*
390  * Disable a counter and all its children.
391  */
392 static void perf_counter_disable_family(struct perf_counter *counter)
393 {
394         struct perf_counter *child;
395
396         perf_counter_disable(counter);
397
398         /*
399          * Lock the mutex to protect the list of children
400          */
401         mutex_lock(&counter->mutex);
402         list_for_each_entry(child, &counter->child_list, child_list)
403                 perf_counter_disable(child);
404         mutex_unlock(&counter->mutex);
405 }
406
407 static int
408 counter_sched_in(struct perf_counter *counter,
409                  struct perf_cpu_context *cpuctx,
410                  struct perf_counter_context *ctx,
411                  int cpu)
412 {
413         if (counter->state <= PERF_COUNTER_STATE_OFF)
414                 return 0;
415
416         counter->state = PERF_COUNTER_STATE_ACTIVE;
417         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
418         /*
419          * The new state must be visible before we turn it on in the hardware:
420          */
421         smp_wmb();
422
423         if (counter->hw_ops->enable(counter)) {
424                 counter->state = PERF_COUNTER_STATE_INACTIVE;
425                 counter->oncpu = -1;
426                 return -EAGAIN;
427         }
428
429         counter->tstamp_running += ctx->time_now - counter->tstamp_stopped;
430
431         if (!is_software_counter(counter))
432                 cpuctx->active_oncpu++;
433         ctx->nr_active++;
434
435         if (counter->hw_event.exclusive)
436                 cpuctx->exclusive = 1;
437
438         return 0;
439 }
440
441 /*
442  * Return 1 for a group consisting entirely of software counters,
443  * 0 if the group contains any hardware counters.
444  */
445 static int is_software_only_group(struct perf_counter *leader)
446 {
447         struct perf_counter *counter;
448
449         if (!is_software_counter(leader))
450                 return 0;
451
452         list_for_each_entry(counter, &leader->sibling_list, list_entry)
453                 if (!is_software_counter(counter))
454                         return 0;
455
456         return 1;
457 }
458
459 /*
460  * Work out whether we can put this counter group on the CPU now.
461  */
462 static int group_can_go_on(struct perf_counter *counter,
463                            struct perf_cpu_context *cpuctx,
464                            int can_add_hw)
465 {
466         /*
467          * Groups consisting entirely of software counters can always go on.
468          */
469         if (is_software_only_group(counter))
470                 return 1;
471         /*
472          * If an exclusive group is already on, no other hardware
473          * counters can go on.
474          */
475         if (cpuctx->exclusive)
476                 return 0;
477         /*
478          * If this group is exclusive and there are already
479          * counters on the CPU, it can't go on.
480          */
481         if (counter->hw_event.exclusive && cpuctx->active_oncpu)
482                 return 0;
483         /*
484          * Otherwise, try to add it if all previous groups were able
485          * to go on.
486          */
487         return can_add_hw;
488 }
489
490 static void add_counter_to_ctx(struct perf_counter *counter,
491                                struct perf_counter_context *ctx)
492 {
493         list_add_counter(counter, ctx);
494         ctx->nr_counters++;
495         counter->prev_state = PERF_COUNTER_STATE_OFF;
496         counter->tstamp_enabled = ctx->time_now;
497         counter->tstamp_running = ctx->time_now;
498         counter->tstamp_stopped = ctx->time_now;
499 }
500
501 /*
502  * Cross CPU call to install and enable a performance counter
503  */
504 static void __perf_install_in_context(void *info)
505 {
506         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
507         struct perf_counter *counter = info;
508         struct perf_counter_context *ctx = counter->ctx;
509         struct perf_counter *leader = counter->group_leader;
510         int cpu = smp_processor_id();
511         unsigned long flags;
512         u64 perf_flags;
513         int err;
514
515         /*
516          * If this is a task context, we need to check whether it is
517          * the current task context of this cpu. If not it has been
518          * scheduled out before the smp call arrived.
519          */
520         if (ctx->task && cpuctx->task_ctx != ctx)
521                 return;
522
523         curr_rq_lock_irq_save(&flags);
524         spin_lock(&ctx->lock);
525         update_context_time(ctx, 1);
526
527         /*
528          * Protect the list operation against NMI by disabling the
529          * counters on a global level. NOP for non NMI based counters.
530          */
531         perf_flags = hw_perf_save_disable();
532
533         add_counter_to_ctx(counter, ctx);
534
535         /*
536          * Don't put the counter on if it is disabled or if
537          * it is in a group and the group isn't on.
538          */
539         if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
540             (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
541                 goto unlock;
542
543         /*
544          * An exclusive counter can't go on if there are already active
545          * hardware counters, and no hardware counter can go on if there
546          * is already an exclusive counter on.
547          */
548         if (!group_can_go_on(counter, cpuctx, 1))
549                 err = -EEXIST;
550         else
551                 err = counter_sched_in(counter, cpuctx, ctx, cpu);
552
553         if (err) {
554                 /*
555                  * This counter couldn't go on.  If it is in a group
556                  * then we have to pull the whole group off.
557                  * If the counter group is pinned then put it in error state.
558                  */
559                 if (leader != counter)
560                         group_sched_out(leader, cpuctx, ctx);
561                 if (leader->hw_event.pinned) {
562                         update_group_times(leader);
563                         leader->state = PERF_COUNTER_STATE_ERROR;
564                 }
565         }
566
567         if (!err && !ctx->task && cpuctx->max_pertask)
568                 cpuctx->max_pertask--;
569
570  unlock:
571         hw_perf_restore(perf_flags);
572
573         spin_unlock(&ctx->lock);
574         curr_rq_unlock_irq_restore(&flags);
575 }
576
577 /*
578  * Attach a performance counter to a context
579  *
580  * First we add the counter to the list with the hardware enable bit
581  * in counter->hw_config cleared.
582  *
583  * If the counter is attached to a task which is on a CPU we use a smp
584  * call to enable it in the task context. The task might have been
585  * scheduled away, but we check this in the smp call again.
586  *
587  * Must be called with ctx->mutex held.
588  */
589 static void
590 perf_install_in_context(struct perf_counter_context *ctx,
591                         struct perf_counter *counter,
592                         int cpu)
593 {
594         struct task_struct *task = ctx->task;
595
596         if (!task) {
597                 /*
598                  * Per cpu counters are installed via an smp call and
599                  * the install is always sucessful.
600                  */
601                 smp_call_function_single(cpu, __perf_install_in_context,
602                                          counter, 1);
603                 return;
604         }
605
606         counter->task = task;
607 retry:
608         task_oncpu_function_call(task, __perf_install_in_context,
609                                  counter);
610
611         spin_lock_irq(&ctx->lock);
612         /*
613          * we need to retry the smp call.
614          */
615         if (ctx->is_active && list_empty(&counter->list_entry)) {
616                 spin_unlock_irq(&ctx->lock);
617                 goto retry;
618         }
619
620         /*
621          * The lock prevents that this context is scheduled in so we
622          * can add the counter safely, if it the call above did not
623          * succeed.
624          */
625         if (list_empty(&counter->list_entry))
626                 add_counter_to_ctx(counter, ctx);
627         spin_unlock_irq(&ctx->lock);
628 }
629
630 /*
631  * Cross CPU call to enable a performance counter
632  */
633 static void __perf_counter_enable(void *info)
634 {
635         struct perf_counter *counter = info;
636         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
637         struct perf_counter_context *ctx = counter->ctx;
638         struct perf_counter *leader = counter->group_leader;
639         unsigned long flags;
640         int err;
641
642         /*
643          * If this is a per-task counter, need to check whether this
644          * counter's task is the current task on this cpu.
645          */
646         if (ctx->task && cpuctx->task_ctx != ctx)
647                 return;
648
649         curr_rq_lock_irq_save(&flags);
650         spin_lock(&ctx->lock);
651         update_context_time(ctx, 1);
652
653         counter->prev_state = counter->state;
654         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
655                 goto unlock;
656         counter->state = PERF_COUNTER_STATE_INACTIVE;
657         counter->tstamp_enabled = ctx->time_now - counter->total_time_enabled;
658
659         /*
660          * If the counter is in a group and isn't the group leader,
661          * then don't put it on unless the group is on.
662          */
663         if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
664                 goto unlock;
665
666         if (!group_can_go_on(counter, cpuctx, 1))
667                 err = -EEXIST;
668         else
669                 err = counter_sched_in(counter, cpuctx, ctx,
670                                        smp_processor_id());
671
672         if (err) {
673                 /*
674                  * If this counter can't go on and it's part of a
675                  * group, then the whole group has to come off.
676                  */
677                 if (leader != counter)
678                         group_sched_out(leader, cpuctx, ctx);
679                 if (leader->hw_event.pinned) {
680                         update_group_times(leader);
681                         leader->state = PERF_COUNTER_STATE_ERROR;
682                 }
683         }
684
685  unlock:
686         spin_unlock(&ctx->lock);
687         curr_rq_unlock_irq_restore(&flags);
688 }
689
690 /*
691  * Enable a counter.
692  */
693 static void perf_counter_enable(struct perf_counter *counter)
694 {
695         struct perf_counter_context *ctx = counter->ctx;
696         struct task_struct *task = ctx->task;
697
698         if (!task) {
699                 /*
700                  * Enable the counter on the cpu that it's on
701                  */
702                 smp_call_function_single(counter->cpu, __perf_counter_enable,
703                                          counter, 1);
704                 return;
705         }
706
707         spin_lock_irq(&ctx->lock);
708         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
709                 goto out;
710
711         /*
712          * If the counter is in error state, clear that first.
713          * That way, if we see the counter in error state below, we
714          * know that it has gone back into error state, as distinct
715          * from the task having been scheduled away before the
716          * cross-call arrived.
717          */
718         if (counter->state == PERF_COUNTER_STATE_ERROR)
719                 counter->state = PERF_COUNTER_STATE_OFF;
720
721  retry:
722         spin_unlock_irq(&ctx->lock);
723         task_oncpu_function_call(task, __perf_counter_enable, counter);
724
725         spin_lock_irq(&ctx->lock);
726
727         /*
728          * If the context is active and the counter is still off,
729          * we need to retry the cross-call.
730          */
731         if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
732                 goto retry;
733
734         /*
735          * Since we have the lock this context can't be scheduled
736          * in, so we can change the state safely.
737          */
738         if (counter->state == PERF_COUNTER_STATE_OFF) {
739                 counter->state = PERF_COUNTER_STATE_INACTIVE;
740                 counter->tstamp_enabled = ctx->time_now -
741                         counter->total_time_enabled;
742         }
743  out:
744         spin_unlock_irq(&ctx->lock);
745 }
746
747 /*
748  * Enable a counter and all its children.
749  */
750 static void perf_counter_enable_family(struct perf_counter *counter)
751 {
752         struct perf_counter *child;
753
754         perf_counter_enable(counter);
755
756         /*
757          * Lock the mutex to protect the list of children
758          */
759         mutex_lock(&counter->mutex);
760         list_for_each_entry(child, &counter->child_list, child_list)
761                 perf_counter_enable(child);
762         mutex_unlock(&counter->mutex);
763 }
764
765 void __perf_counter_sched_out(struct perf_counter_context *ctx,
766                               struct perf_cpu_context *cpuctx)
767 {
768         struct perf_counter *counter;
769         u64 flags;
770
771         spin_lock(&ctx->lock);
772         ctx->is_active = 0;
773         if (likely(!ctx->nr_counters))
774                 goto out;
775         update_context_time(ctx, 0);
776
777         flags = hw_perf_save_disable();
778         if (ctx->nr_active) {
779                 list_for_each_entry(counter, &ctx->counter_list, list_entry)
780                         group_sched_out(counter, cpuctx, ctx);
781         }
782         hw_perf_restore(flags);
783  out:
784         spin_unlock(&ctx->lock);
785 }
786
787 /*
788  * Called from scheduler to remove the counters of the current task,
789  * with interrupts disabled.
790  *
791  * We stop each counter and update the counter value in counter->count.
792  *
793  * This does not protect us against NMI, but disable()
794  * sets the disabled bit in the control field of counter _before_
795  * accessing the counter control register. If a NMI hits, then it will
796  * not restart the counter.
797  */
798 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
799 {
800         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
801         struct perf_counter_context *ctx = &task->perf_counter_ctx;
802         struct pt_regs *regs;
803
804         if (likely(!cpuctx->task_ctx))
805                 return;
806
807         regs = task_pt_regs(task);
808         perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
809         __perf_counter_sched_out(ctx, cpuctx);
810
811         cpuctx->task_ctx = NULL;
812 }
813
814 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
815 {
816         __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
817 }
818
819 static int
820 group_sched_in(struct perf_counter *group_counter,
821                struct perf_cpu_context *cpuctx,
822                struct perf_counter_context *ctx,
823                int cpu)
824 {
825         struct perf_counter *counter, *partial_group;
826         int ret;
827
828         if (group_counter->state == PERF_COUNTER_STATE_OFF)
829                 return 0;
830
831         ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
832         if (ret)
833                 return ret < 0 ? ret : 0;
834
835         group_counter->prev_state = group_counter->state;
836         if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
837                 return -EAGAIN;
838
839         /*
840          * Schedule in siblings as one group (if any):
841          */
842         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
843                 counter->prev_state = counter->state;
844                 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
845                         partial_group = counter;
846                         goto group_error;
847                 }
848         }
849
850         return 0;
851
852 group_error:
853         /*
854          * Groups can be scheduled in as one unit only, so undo any
855          * partial group before returning:
856          */
857         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
858                 if (counter == partial_group)
859                         break;
860                 counter_sched_out(counter, cpuctx, ctx);
861         }
862         counter_sched_out(group_counter, cpuctx, ctx);
863
864         return -EAGAIN;
865 }
866
867 static void
868 __perf_counter_sched_in(struct perf_counter_context *ctx,
869                         struct perf_cpu_context *cpuctx, int cpu)
870 {
871         struct perf_counter *counter;
872         u64 flags;
873         int can_add_hw = 1;
874
875         spin_lock(&ctx->lock);
876         ctx->is_active = 1;
877         if (likely(!ctx->nr_counters))
878                 goto out;
879
880         /*
881          * Add any time since the last sched_out to the lost time
882          * so it doesn't get included in the total_time_enabled and
883          * total_time_running measures for counters in the context.
884          */
885         ctx->time_lost = get_context_time(ctx, 0) - ctx->time_now;
886
887         flags = hw_perf_save_disable();
888
889         /*
890          * First go through the list and put on any pinned groups
891          * in order to give them the best chance of going on.
892          */
893         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
894                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
895                     !counter->hw_event.pinned)
896                         continue;
897                 if (counter->cpu != -1 && counter->cpu != cpu)
898                         continue;
899
900                 if (group_can_go_on(counter, cpuctx, 1))
901                         group_sched_in(counter, cpuctx, ctx, cpu);
902
903                 /*
904                  * If this pinned group hasn't been scheduled,
905                  * put it in error state.
906                  */
907                 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
908                         update_group_times(counter);
909                         counter->state = PERF_COUNTER_STATE_ERROR;
910                 }
911         }
912
913         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
914                 /*
915                  * Ignore counters in OFF or ERROR state, and
916                  * ignore pinned counters since we did them already.
917                  */
918                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
919                     counter->hw_event.pinned)
920                         continue;
921
922                 /*
923                  * Listen to the 'cpu' scheduling filter constraint
924                  * of counters:
925                  */
926                 if (counter->cpu != -1 && counter->cpu != cpu)
927                         continue;
928
929                 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
930                         if (group_sched_in(counter, cpuctx, ctx, cpu))
931                                 can_add_hw = 0;
932                 }
933         }
934         hw_perf_restore(flags);
935  out:
936         spin_unlock(&ctx->lock);
937 }
938
939 /*
940  * Called from scheduler to add the counters of the current task
941  * with interrupts disabled.
942  *
943  * We restore the counter value and then enable it.
944  *
945  * This does not protect us against NMI, but enable()
946  * sets the enabled bit in the control field of counter _before_
947  * accessing the counter control register. If a NMI hits, then it will
948  * keep the counter running.
949  */
950 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
951 {
952         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
953         struct perf_counter_context *ctx = &task->perf_counter_ctx;
954
955         __perf_counter_sched_in(ctx, cpuctx, cpu);
956         cpuctx->task_ctx = ctx;
957 }
958
959 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
960 {
961         struct perf_counter_context *ctx = &cpuctx->ctx;
962
963         __perf_counter_sched_in(ctx, cpuctx, cpu);
964 }
965
966 int perf_counter_task_disable(void)
967 {
968         struct task_struct *curr = current;
969         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
970         struct perf_counter *counter;
971         unsigned long flags;
972         u64 perf_flags;
973         int cpu;
974
975         if (likely(!ctx->nr_counters))
976                 return 0;
977
978         curr_rq_lock_irq_save(&flags);
979         cpu = smp_processor_id();
980
981         /* force the update of the task clock: */
982         __task_delta_exec(curr, 1);
983
984         perf_counter_task_sched_out(curr, cpu);
985
986         spin_lock(&ctx->lock);
987
988         /*
989          * Disable all the counters:
990          */
991         perf_flags = hw_perf_save_disable();
992
993         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
994                 if (counter->state != PERF_COUNTER_STATE_ERROR) {
995                         update_group_times(counter);
996                         counter->state = PERF_COUNTER_STATE_OFF;
997                 }
998         }
999
1000         hw_perf_restore(perf_flags);
1001
1002         spin_unlock(&ctx->lock);
1003
1004         curr_rq_unlock_irq_restore(&flags);
1005
1006         return 0;
1007 }
1008
1009 int perf_counter_task_enable(void)
1010 {
1011         struct task_struct *curr = current;
1012         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
1013         struct perf_counter *counter;
1014         unsigned long flags;
1015         u64 perf_flags;
1016         int cpu;
1017
1018         if (likely(!ctx->nr_counters))
1019                 return 0;
1020
1021         curr_rq_lock_irq_save(&flags);
1022         cpu = smp_processor_id();
1023
1024         /* force the update of the task clock: */
1025         __task_delta_exec(curr, 1);
1026
1027         perf_counter_task_sched_out(curr, cpu);
1028
1029         spin_lock(&ctx->lock);
1030
1031         /*
1032          * Disable all the counters:
1033          */
1034         perf_flags = hw_perf_save_disable();
1035
1036         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1037                 if (counter->state > PERF_COUNTER_STATE_OFF)
1038                         continue;
1039                 counter->state = PERF_COUNTER_STATE_INACTIVE;
1040                 counter->tstamp_enabled = ctx->time_now -
1041                         counter->total_time_enabled;
1042                 counter->hw_event.disabled = 0;
1043         }
1044         hw_perf_restore(perf_flags);
1045
1046         spin_unlock(&ctx->lock);
1047
1048         perf_counter_task_sched_in(curr, cpu);
1049
1050         curr_rq_unlock_irq_restore(&flags);
1051
1052         return 0;
1053 }
1054
1055 /*
1056  * Round-robin a context's counters:
1057  */
1058 static void rotate_ctx(struct perf_counter_context *ctx)
1059 {
1060         struct perf_counter *counter;
1061         u64 perf_flags;
1062
1063         if (!ctx->nr_counters)
1064                 return;
1065
1066         spin_lock(&ctx->lock);
1067         /*
1068          * Rotate the first entry last (works just fine for group counters too):
1069          */
1070         perf_flags = hw_perf_save_disable();
1071         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1072                 list_move_tail(&counter->list_entry, &ctx->counter_list);
1073                 break;
1074         }
1075         hw_perf_restore(perf_flags);
1076
1077         spin_unlock(&ctx->lock);
1078 }
1079
1080 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1081 {
1082         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1083         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
1084         const int rotate_percpu = 0;
1085
1086         if (rotate_percpu)
1087                 perf_counter_cpu_sched_out(cpuctx);
1088         perf_counter_task_sched_out(curr, cpu);
1089
1090         if (rotate_percpu)
1091                 rotate_ctx(&cpuctx->ctx);
1092         rotate_ctx(ctx);
1093
1094         if (rotate_percpu)
1095                 perf_counter_cpu_sched_in(cpuctx, cpu);
1096         perf_counter_task_sched_in(curr, cpu);
1097 }
1098
1099 /*
1100  * Cross CPU call to read the hardware counter
1101  */
1102 static void __read(void *info)
1103 {
1104         struct perf_counter *counter = info;
1105         struct perf_counter_context *ctx = counter->ctx;
1106         unsigned long flags;
1107
1108         curr_rq_lock_irq_save(&flags);
1109         if (ctx->is_active)
1110                 update_context_time(ctx, 1);
1111         counter->hw_ops->read(counter);
1112         update_counter_times(counter);
1113         curr_rq_unlock_irq_restore(&flags);
1114 }
1115
1116 static u64 perf_counter_read(struct perf_counter *counter)
1117 {
1118         /*
1119          * If counter is enabled and currently active on a CPU, update the
1120          * value in the counter structure:
1121          */
1122         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1123                 smp_call_function_single(counter->oncpu,
1124                                          __read, counter, 1);
1125         } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1126                 update_counter_times(counter);
1127         }
1128
1129         return atomic64_read(&counter->count);
1130 }
1131
1132 static void put_context(struct perf_counter_context *ctx)
1133 {
1134         if (ctx->task)
1135                 put_task_struct(ctx->task);
1136 }
1137
1138 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1139 {
1140         struct perf_cpu_context *cpuctx;
1141         struct perf_counter_context *ctx;
1142         struct task_struct *task;
1143
1144         /*
1145          * If cpu is not a wildcard then this is a percpu counter:
1146          */
1147         if (cpu != -1) {
1148                 /* Must be root to operate on a CPU counter: */
1149                 if (!capable(CAP_SYS_ADMIN))
1150                         return ERR_PTR(-EACCES);
1151
1152                 if (cpu < 0 || cpu > num_possible_cpus())
1153                         return ERR_PTR(-EINVAL);
1154
1155                 /*
1156                  * We could be clever and allow to attach a counter to an
1157                  * offline CPU and activate it when the CPU comes up, but
1158                  * that's for later.
1159                  */
1160                 if (!cpu_isset(cpu, cpu_online_map))
1161                         return ERR_PTR(-ENODEV);
1162
1163                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1164                 ctx = &cpuctx->ctx;
1165
1166                 return ctx;
1167         }
1168
1169         rcu_read_lock();
1170         if (!pid)
1171                 task = current;
1172         else
1173                 task = find_task_by_vpid(pid);
1174         if (task)
1175                 get_task_struct(task);
1176         rcu_read_unlock();
1177
1178         if (!task)
1179                 return ERR_PTR(-ESRCH);
1180
1181         ctx = &task->perf_counter_ctx;
1182         ctx->task = task;
1183
1184         /* Reuse ptrace permission checks for now. */
1185         if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1186                 put_context(ctx);
1187                 return ERR_PTR(-EACCES);
1188         }
1189
1190         return ctx;
1191 }
1192
1193 static void free_counter_rcu(struct rcu_head *head)
1194 {
1195         struct perf_counter *counter;
1196
1197         counter = container_of(head, struct perf_counter, rcu_head);
1198         kfree(counter);
1199 }
1200
1201 static void perf_pending_sync(struct perf_counter *counter);
1202
1203 static void free_counter(struct perf_counter *counter)
1204 {
1205         perf_pending_sync(counter);
1206
1207         if (counter->destroy)
1208                 counter->destroy(counter);
1209
1210         call_rcu(&counter->rcu_head, free_counter_rcu);
1211 }
1212
1213 /*
1214  * Called when the last reference to the file is gone.
1215  */
1216 static int perf_release(struct inode *inode, struct file *file)
1217 {
1218         struct perf_counter *counter = file->private_data;
1219         struct perf_counter_context *ctx = counter->ctx;
1220
1221         file->private_data = NULL;
1222
1223         mutex_lock(&ctx->mutex);
1224         mutex_lock(&counter->mutex);
1225
1226         perf_counter_remove_from_context(counter);
1227
1228         mutex_unlock(&counter->mutex);
1229         mutex_unlock(&ctx->mutex);
1230
1231         free_counter(counter);
1232         put_context(ctx);
1233
1234         return 0;
1235 }
1236
1237 /*
1238  * Read the performance counter - simple non blocking version for now
1239  */
1240 static ssize_t
1241 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1242 {
1243         u64 values[3];
1244         int n;
1245
1246         /*
1247          * Return end-of-file for a read on a counter that is in
1248          * error state (i.e. because it was pinned but it couldn't be
1249          * scheduled on to the CPU at some point).
1250          */
1251         if (counter->state == PERF_COUNTER_STATE_ERROR)
1252                 return 0;
1253
1254         mutex_lock(&counter->mutex);
1255         values[0] = perf_counter_read(counter);
1256         n = 1;
1257         if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1258                 values[n++] = counter->total_time_enabled +
1259                         atomic64_read(&counter->child_total_time_enabled);
1260         if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1261                 values[n++] = counter->total_time_running +
1262                         atomic64_read(&counter->child_total_time_running);
1263         mutex_unlock(&counter->mutex);
1264
1265         if (count < n * sizeof(u64))
1266                 return -EINVAL;
1267         count = n * sizeof(u64);
1268
1269         if (copy_to_user(buf, values, count))
1270                 return -EFAULT;
1271
1272         return count;
1273 }
1274
1275 static ssize_t
1276 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1277 {
1278         struct perf_counter *counter = file->private_data;
1279
1280         return perf_read_hw(counter, buf, count);
1281 }
1282
1283 static unsigned int perf_poll(struct file *file, poll_table *wait)
1284 {
1285         struct perf_counter *counter = file->private_data;
1286         struct perf_mmap_data *data;
1287         unsigned int events;
1288
1289         rcu_read_lock();
1290         data = rcu_dereference(counter->data);
1291         if (data)
1292                 events = atomic_xchg(&data->wakeup, 0);
1293         else
1294                 events = POLL_HUP;
1295         rcu_read_unlock();
1296
1297         poll_wait(file, &counter->waitq, wait);
1298
1299         return events;
1300 }
1301
1302 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1303 {
1304         struct perf_counter *counter = file->private_data;
1305         int err = 0;
1306
1307         switch (cmd) {
1308         case PERF_COUNTER_IOC_ENABLE:
1309                 perf_counter_enable_family(counter);
1310                 break;
1311         case PERF_COUNTER_IOC_DISABLE:
1312                 perf_counter_disable_family(counter);
1313                 break;
1314         default:
1315                 err = -ENOTTY;
1316         }
1317         return err;
1318 }
1319
1320 /*
1321  * Callers need to ensure there can be no nesting of this function, otherwise
1322  * the seqlock logic goes bad. We can not serialize this because the arch
1323  * code calls this from NMI context.
1324  */
1325 void perf_counter_update_userpage(struct perf_counter *counter)
1326 {
1327         struct perf_mmap_data *data;
1328         struct perf_counter_mmap_page *userpg;
1329
1330         rcu_read_lock();
1331         data = rcu_dereference(counter->data);
1332         if (!data)
1333                 goto unlock;
1334
1335         userpg = data->user_page;
1336
1337         /*
1338          * Disable preemption so as to not let the corresponding user-space
1339          * spin too long if we get preempted.
1340          */
1341         preempt_disable();
1342         ++userpg->lock;
1343         barrier();
1344         userpg->index = counter->hw.idx;
1345         userpg->offset = atomic64_read(&counter->count);
1346         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1347                 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1348
1349         barrier();
1350         ++userpg->lock;
1351         preempt_enable();
1352 unlock:
1353         rcu_read_unlock();
1354 }
1355
1356 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1357 {
1358         struct perf_counter *counter = vma->vm_file->private_data;
1359         struct perf_mmap_data *data;
1360         int ret = VM_FAULT_SIGBUS;
1361
1362         rcu_read_lock();
1363         data = rcu_dereference(counter->data);
1364         if (!data)
1365                 goto unlock;
1366
1367         if (vmf->pgoff == 0) {
1368                 vmf->page = virt_to_page(data->user_page);
1369         } else {
1370                 int nr = vmf->pgoff - 1;
1371
1372                 if ((unsigned)nr > data->nr_pages)
1373                         goto unlock;
1374
1375                 vmf->page = virt_to_page(data->data_pages[nr]);
1376         }
1377         get_page(vmf->page);
1378         ret = 0;
1379 unlock:
1380         rcu_read_unlock();
1381
1382         return ret;
1383 }
1384
1385 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1386 {
1387         struct perf_mmap_data *data;
1388         unsigned long size;
1389         int i;
1390
1391         WARN_ON(atomic_read(&counter->mmap_count));
1392
1393         size = sizeof(struct perf_mmap_data);
1394         size += nr_pages * sizeof(void *);
1395
1396         data = kzalloc(size, GFP_KERNEL);
1397         if (!data)
1398                 goto fail;
1399
1400         data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1401         if (!data->user_page)
1402                 goto fail_user_page;
1403
1404         for (i = 0; i < nr_pages; i++) {
1405                 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1406                 if (!data->data_pages[i])
1407                         goto fail_data_pages;
1408         }
1409
1410         data->nr_pages = nr_pages;
1411
1412         rcu_assign_pointer(counter->data, data);
1413
1414         return 0;
1415
1416 fail_data_pages:
1417         for (i--; i >= 0; i--)
1418                 free_page((unsigned long)data->data_pages[i]);
1419
1420         free_page((unsigned long)data->user_page);
1421
1422 fail_user_page:
1423         kfree(data);
1424
1425 fail:
1426         return -ENOMEM;
1427 }
1428
1429 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1430 {
1431         struct perf_mmap_data *data = container_of(rcu_head,
1432                         struct perf_mmap_data, rcu_head);
1433         int i;
1434
1435         free_page((unsigned long)data->user_page);
1436         for (i = 0; i < data->nr_pages; i++)
1437                 free_page((unsigned long)data->data_pages[i]);
1438         kfree(data);
1439 }
1440
1441 static void perf_mmap_data_free(struct perf_counter *counter)
1442 {
1443         struct perf_mmap_data *data = counter->data;
1444
1445         WARN_ON(atomic_read(&counter->mmap_count));
1446
1447         rcu_assign_pointer(counter->data, NULL);
1448         call_rcu(&data->rcu_head, __perf_mmap_data_free);
1449 }
1450
1451 static void perf_mmap_open(struct vm_area_struct *vma)
1452 {
1453         struct perf_counter *counter = vma->vm_file->private_data;
1454
1455         atomic_inc(&counter->mmap_count);
1456 }
1457
1458 static void perf_mmap_close(struct vm_area_struct *vma)
1459 {
1460         struct perf_counter *counter = vma->vm_file->private_data;
1461
1462         if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1463                                       &counter->mmap_mutex)) {
1464                 vma->vm_mm->locked_vm -= counter->data->nr_pages + 1;
1465                 perf_mmap_data_free(counter);
1466                 mutex_unlock(&counter->mmap_mutex);
1467         }
1468 }
1469
1470 static struct vm_operations_struct perf_mmap_vmops = {
1471         .open  = perf_mmap_open,
1472         .close = perf_mmap_close,
1473         .fault = perf_mmap_fault,
1474 };
1475
1476 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1477 {
1478         struct perf_counter *counter = file->private_data;
1479         unsigned long vma_size;
1480         unsigned long nr_pages;
1481         unsigned long locked, lock_limit;
1482         int ret = 0;
1483
1484         if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1485                 return -EINVAL;
1486
1487         vma_size = vma->vm_end - vma->vm_start;
1488         nr_pages = (vma_size / PAGE_SIZE) - 1;
1489
1490         /*
1491          * If we have data pages ensure they're a power-of-two number, so we
1492          * can do bitmasks instead of modulo.
1493          */
1494         if (nr_pages != 0 && !is_power_of_2(nr_pages))
1495                 return -EINVAL;
1496
1497         if (vma_size != PAGE_SIZE * (1 + nr_pages))
1498                 return -EINVAL;
1499
1500         if (vma->vm_pgoff != 0)
1501                 return -EINVAL;
1502
1503         mutex_lock(&counter->mmap_mutex);
1504         if (atomic_inc_not_zero(&counter->mmap_count)) {
1505                 if (nr_pages != counter->data->nr_pages)
1506                         ret = -EINVAL;
1507                 goto unlock;
1508         }
1509
1510         locked = vma->vm_mm->locked_vm;
1511         locked += nr_pages + 1;
1512
1513         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1514         lock_limit >>= PAGE_SHIFT;
1515
1516         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1517                 ret = -EPERM;
1518                 goto unlock;
1519         }
1520
1521         WARN_ON(counter->data);
1522         ret = perf_mmap_data_alloc(counter, nr_pages);
1523         if (ret)
1524                 goto unlock;
1525
1526         atomic_set(&counter->mmap_count, 1);
1527         vma->vm_mm->locked_vm += nr_pages + 1;
1528 unlock:
1529         mutex_unlock(&counter->mmap_mutex);
1530
1531         vma->vm_flags &= ~VM_MAYWRITE;
1532         vma->vm_flags |= VM_RESERVED;
1533         vma->vm_ops = &perf_mmap_vmops;
1534
1535         return ret;
1536 }
1537
1538 static int perf_fasync(int fd, struct file *filp, int on)
1539 {
1540         struct perf_counter *counter = filp->private_data;
1541         struct inode *inode = filp->f_path.dentry->d_inode;
1542         int retval;
1543
1544         mutex_lock(&inode->i_mutex);
1545         retval = fasync_helper(fd, filp, on, &counter->fasync);
1546         mutex_unlock(&inode->i_mutex);
1547
1548         if (retval < 0)
1549                 return retval;
1550
1551         return 0;
1552 }
1553
1554 static const struct file_operations perf_fops = {
1555         .release                = perf_release,
1556         .read                   = perf_read,
1557         .poll                   = perf_poll,
1558         .unlocked_ioctl         = perf_ioctl,
1559         .compat_ioctl           = perf_ioctl,
1560         .mmap                   = perf_mmap,
1561         .fasync                 = perf_fasync,
1562 };
1563
1564 /*
1565  * Perf counter wakeup
1566  *
1567  * If there's data, ensure we set the poll() state and publish everything
1568  * to user-space before waking everybody up.
1569  */
1570
1571 void perf_counter_wakeup(struct perf_counter *counter)
1572 {
1573         struct perf_mmap_data *data;
1574
1575         rcu_read_lock();
1576         data = rcu_dereference(counter->data);
1577         if (data) {
1578                 atomic_set(&data->wakeup, POLL_IN);
1579                 /*
1580                  * Ensure all data writes are issued before updating the
1581                  * user-space data head information. The matching rmb()
1582                  * will be in userspace after reading this value.
1583                  */
1584                 smp_wmb();
1585                 data->user_page->data_head = atomic_read(&data->head);
1586         }
1587         rcu_read_unlock();
1588
1589         wake_up_all(&counter->waitq);
1590         kill_fasync(&counter->fasync, SIGIO, POLL_IN);
1591 }
1592
1593 static void perf_pending_wakeup(struct perf_pending_entry *entry)
1594 {
1595         struct perf_counter *counter = container_of(entry,
1596                         struct perf_counter, pending);
1597
1598         perf_counter_wakeup(counter);
1599 }
1600
1601 /*
1602  * Pending wakeups
1603  *
1604  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
1605  *
1606  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
1607  * single linked list and use cmpxchg() to add entries lockless.
1608  */
1609
1610 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
1611
1612 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
1613         PENDING_TAIL,
1614 };
1615
1616 static void perf_pending_queue(struct perf_pending_entry *entry,
1617                                void (*func)(struct perf_pending_entry *))
1618 {
1619         struct perf_pending_entry **head;
1620
1621         if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
1622                 return;
1623
1624         entry->func = func;
1625
1626         head = &get_cpu_var(perf_pending_head);
1627
1628         do {
1629                 entry->next = *head;
1630         } while (cmpxchg(head, entry->next, entry) != entry->next);
1631
1632         set_perf_counter_pending();
1633
1634         put_cpu_var(perf_pending_head);
1635 }
1636
1637 static int __perf_pending_run(void)
1638 {
1639         struct perf_pending_entry *list;
1640         int nr = 0;
1641
1642         list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
1643         while (list != PENDING_TAIL) {
1644                 void (*func)(struct perf_pending_entry *);
1645                 struct perf_pending_entry *entry = list;
1646
1647                 list = list->next;
1648
1649                 func = entry->func;
1650                 entry->next = NULL;
1651                 /*
1652                  * Ensure we observe the unqueue before we issue the wakeup,
1653                  * so that we won't be waiting forever.
1654                  * -- see perf_not_pending().
1655                  */
1656                 smp_wmb();
1657
1658                 func(entry);
1659                 nr++;
1660         }
1661
1662         return nr;
1663 }
1664
1665 static inline int perf_not_pending(struct perf_counter *counter)
1666 {
1667         /*
1668          * If we flush on whatever cpu we run, there is a chance we don't
1669          * need to wait.
1670          */
1671         get_cpu();
1672         __perf_pending_run();
1673         put_cpu();
1674
1675         /*
1676          * Ensure we see the proper queue state before going to sleep
1677          * so that we do not miss the wakeup. -- see perf_pending_handle()
1678          */
1679         smp_rmb();
1680         return counter->pending.next == NULL;
1681 }
1682
1683 static void perf_pending_sync(struct perf_counter *counter)
1684 {
1685         wait_event(counter->waitq, perf_not_pending(counter));
1686 }
1687
1688 void perf_counter_do_pending(void)
1689 {
1690         __perf_pending_run();
1691 }
1692
1693 /*
1694  * Callchain support -- arch specific
1695  */
1696
1697 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1698 {
1699         return NULL;
1700 }
1701
1702 /*
1703  * Output
1704  */
1705
1706 struct perf_output_handle {
1707         struct perf_counter     *counter;
1708         struct perf_mmap_data   *data;
1709         unsigned int            offset;
1710         unsigned int            head;
1711         int                     wakeup;
1712         int                     nmi;
1713 };
1714
1715 static inline void __perf_output_wakeup(struct perf_output_handle *handle)
1716 {
1717         if (handle->nmi) {
1718                 perf_pending_queue(&handle->counter->pending,
1719                                    perf_pending_wakeup);
1720         } else
1721                 perf_counter_wakeup(handle->counter);
1722 }
1723
1724 static int perf_output_begin(struct perf_output_handle *handle,
1725                              struct perf_counter *counter, unsigned int size,
1726                              int nmi)
1727 {
1728         struct perf_mmap_data *data;
1729         unsigned int offset, head;
1730
1731         rcu_read_lock();
1732         data = rcu_dereference(counter->data);
1733         if (!data)
1734                 goto out;
1735
1736         handle->counter = counter;
1737         handle->nmi     = nmi;
1738
1739         if (!data->nr_pages)
1740                 goto fail;
1741
1742         do {
1743                 offset = head = atomic_read(&data->head);
1744                 head += size;
1745         } while (atomic_cmpxchg(&data->head, offset, head) != offset);
1746
1747         handle->data    = data;
1748         handle->offset  = offset;
1749         handle->head    = head;
1750         handle->wakeup  = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
1751
1752         return 0;
1753
1754 fail:
1755         __perf_output_wakeup(handle);
1756 out:
1757         rcu_read_unlock();
1758
1759         return -ENOSPC;
1760 }
1761
1762 static void perf_output_copy(struct perf_output_handle *handle,
1763                              void *buf, unsigned int len)
1764 {
1765         unsigned int pages_mask;
1766         unsigned int offset;
1767         unsigned int size;
1768         void **pages;
1769
1770         offset          = handle->offset;
1771         pages_mask      = handle->data->nr_pages - 1;
1772         pages           = handle->data->data_pages;
1773
1774         do {
1775                 unsigned int page_offset;
1776                 int nr;
1777
1778                 nr          = (offset >> PAGE_SHIFT) & pages_mask;
1779                 page_offset = offset & (PAGE_SIZE - 1);
1780                 size        = min_t(unsigned int, PAGE_SIZE - page_offset, len);
1781
1782                 memcpy(pages[nr] + page_offset, buf, size);
1783
1784                 len         -= size;
1785                 buf         += size;
1786                 offset      += size;
1787         } while (len);
1788
1789         handle->offset = offset;
1790
1791         WARN_ON_ONCE(handle->offset > handle->head);
1792 }
1793
1794 #define perf_output_put(handle, x) \
1795         perf_output_copy((handle), &(x), sizeof(x))
1796
1797 static void perf_output_end(struct perf_output_handle *handle)
1798 {
1799         int wakeup_events = handle->counter->hw_event.wakeup_events;
1800
1801         if (wakeup_events) {
1802                 int events = atomic_inc_return(&handle->data->events);
1803                 if (events >= wakeup_events) {
1804                         atomic_sub(wakeup_events, &handle->data->events);
1805                         __perf_output_wakeup(handle);
1806                 }
1807         } else if (handle->wakeup)
1808                 __perf_output_wakeup(handle);
1809         rcu_read_unlock();
1810 }
1811
1812 static void perf_counter_output(struct perf_counter *counter,
1813                                 int nmi, struct pt_regs *regs)
1814 {
1815         int ret;
1816         u64 record_type = counter->hw_event.record_type;
1817         struct perf_output_handle handle;
1818         struct perf_event_header header;
1819         u64 ip;
1820         struct {
1821                 u32 pid, tid;
1822         } tid_entry;
1823         struct {
1824                 u64 event;
1825                 u64 counter;
1826         } group_entry;
1827         struct perf_callchain_entry *callchain = NULL;
1828         int callchain_size = 0;
1829
1830         header.type = PERF_EVENT_COUNTER_OVERFLOW;
1831         header.size = sizeof(header);
1832
1833         if (record_type & PERF_RECORD_IP) {
1834                 ip = instruction_pointer(regs);
1835                 header.type |= __PERF_EVENT_IP;
1836                 header.size += sizeof(ip);
1837         }
1838
1839         if (record_type & PERF_RECORD_TID) {
1840                 /* namespace issues */
1841                 tid_entry.pid = current->group_leader->pid;
1842                 tid_entry.tid = current->pid;
1843
1844                 header.type |= __PERF_EVENT_TID;
1845                 header.size += sizeof(tid_entry);
1846         }
1847
1848         if (record_type & PERF_RECORD_GROUP) {
1849                 header.type |= __PERF_EVENT_GROUP;
1850                 header.size += sizeof(u64) +
1851                         counter->nr_siblings * sizeof(group_entry);
1852         }
1853
1854         if (record_type & PERF_RECORD_CALLCHAIN) {
1855                 callchain = perf_callchain(regs);
1856
1857                 if (callchain) {
1858                         callchain_size = (1 + callchain->nr) * sizeof(u64);
1859
1860                         header.type |= __PERF_EVENT_CALLCHAIN;
1861                         header.size += callchain_size;
1862                 }
1863         }
1864
1865         ret = perf_output_begin(&handle, counter, header.size, nmi);
1866         if (ret)
1867                 return;
1868
1869         perf_output_put(&handle, header);
1870
1871         if (record_type & PERF_RECORD_IP)
1872                 perf_output_put(&handle, ip);
1873
1874         if (record_type & PERF_RECORD_TID)
1875                 perf_output_put(&handle, tid_entry);
1876
1877         if (record_type & PERF_RECORD_GROUP) {
1878                 struct perf_counter *leader, *sub;
1879                 u64 nr = counter->nr_siblings;
1880
1881                 perf_output_put(&handle, nr);
1882
1883                 leader = counter->group_leader;
1884                 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1885                         if (sub != counter)
1886                                 sub->hw_ops->read(sub);
1887
1888                         group_entry.event = sub->hw_event.config;
1889                         group_entry.counter = atomic64_read(&sub->count);
1890
1891                         perf_output_put(&handle, group_entry);
1892                 }
1893         }
1894
1895         if (callchain)
1896                 perf_output_copy(&handle, callchain, callchain_size);
1897
1898         perf_output_end(&handle);
1899 }
1900
1901 /*
1902  * mmap tracking
1903  */
1904
1905 struct perf_mmap_event {
1906         struct file     *file;
1907         char            *file_name;
1908         int             file_size;
1909
1910         struct {
1911                 struct perf_event_header        header;
1912
1913                 u32                             pid;
1914                 u32                             tid;
1915                 u64                             start;
1916                 u64                             len;
1917                 u64                             pgoff;
1918         } event;
1919 };
1920
1921 static void perf_counter_mmap_output(struct perf_counter *counter,
1922                                      struct perf_mmap_event *mmap_event)
1923 {
1924         struct perf_output_handle handle;
1925         int size = mmap_event->event.header.size;
1926         int ret = perf_output_begin(&handle, counter, size, 0);
1927
1928         if (ret)
1929                 return;
1930
1931         perf_output_put(&handle, mmap_event->event);
1932         perf_output_copy(&handle, mmap_event->file_name,
1933                                    mmap_event->file_size);
1934         perf_output_end(&handle);
1935 }
1936
1937 static int perf_counter_mmap_match(struct perf_counter *counter,
1938                                    struct perf_mmap_event *mmap_event)
1939 {
1940         if (counter->hw_event.mmap &&
1941             mmap_event->event.header.type == PERF_EVENT_MMAP)
1942                 return 1;
1943
1944         if (counter->hw_event.munmap &&
1945             mmap_event->event.header.type == PERF_EVENT_MUNMAP)
1946                 return 1;
1947
1948         return 0;
1949 }
1950
1951 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
1952                                   struct perf_mmap_event *mmap_event)
1953 {
1954         struct perf_counter *counter;
1955
1956         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
1957                 return;
1958
1959         rcu_read_lock();
1960         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
1961                 if (perf_counter_mmap_match(counter, mmap_event))
1962                         perf_counter_mmap_output(counter, mmap_event);
1963         }
1964         rcu_read_unlock();
1965 }
1966
1967 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
1968 {
1969         struct perf_cpu_context *cpuctx;
1970         struct file *file = mmap_event->file;
1971         unsigned int size;
1972         char tmp[16];
1973         char *buf = NULL;
1974         char *name;
1975
1976         if (file) {
1977                 buf = kzalloc(PATH_MAX, GFP_KERNEL);
1978                 if (!buf) {
1979                         name = strncpy(tmp, "//enomem", sizeof(tmp));
1980                         goto got_name;
1981                 }
1982                 name = dentry_path(file->f_dentry, buf, PATH_MAX);
1983                 if (IS_ERR(name)) {
1984                         name = strncpy(tmp, "//toolong", sizeof(tmp));
1985                         goto got_name;
1986                 }
1987         } else {
1988                 name = strncpy(tmp, "//anon", sizeof(tmp));
1989                 goto got_name;
1990         }
1991
1992 got_name:
1993         size = ALIGN(strlen(name), sizeof(u64));
1994
1995         mmap_event->file_name = name;
1996         mmap_event->file_size = size;
1997
1998         mmap_event->event.header.size = sizeof(mmap_event->event) + size;
1999
2000         cpuctx = &get_cpu_var(perf_cpu_context);
2001         perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2002         put_cpu_var(perf_cpu_context);
2003
2004         perf_counter_mmap_ctx(&current->perf_counter_ctx, mmap_event);
2005
2006         kfree(buf);
2007 }
2008
2009 void perf_counter_mmap(unsigned long addr, unsigned long len,
2010                        unsigned long pgoff, struct file *file)
2011 {
2012         struct perf_mmap_event mmap_event = {
2013                 .file   = file,
2014                 .event  = {
2015                         .header = { .type = PERF_EVENT_MMAP, },
2016                         .pid    = current->group_leader->pid,
2017                         .tid    = current->pid,
2018                         .start  = addr,
2019                         .len    = len,
2020                         .pgoff  = pgoff,
2021                 },
2022         };
2023
2024         perf_counter_mmap_event(&mmap_event);
2025 }
2026
2027 void perf_counter_munmap(unsigned long addr, unsigned long len,
2028                          unsigned long pgoff, struct file *file)
2029 {
2030         struct perf_mmap_event mmap_event = {
2031                 .file   = file,
2032                 .event  = {
2033                         .header = { .type = PERF_EVENT_MUNMAP, },
2034                         .pid    = current->group_leader->pid,
2035                         .tid    = current->pid,
2036                         .start  = addr,
2037                         .len    = len,
2038                         .pgoff  = pgoff,
2039                 },
2040         };
2041
2042         perf_counter_mmap_event(&mmap_event);
2043 }
2044
2045 /*
2046  * Generic counter overflow handling.
2047  */
2048
2049 int perf_counter_overflow(struct perf_counter *counter,
2050                           int nmi, struct pt_regs *regs)
2051 {
2052         perf_counter_output(counter, nmi, regs);
2053         return 0;
2054 }
2055
2056 /*
2057  * Generic software counter infrastructure
2058  */
2059
2060 static void perf_swcounter_update(struct perf_counter *counter)
2061 {
2062         struct hw_perf_counter *hwc = &counter->hw;
2063         u64 prev, now;
2064         s64 delta;
2065
2066 again:
2067         prev = atomic64_read(&hwc->prev_count);
2068         now = atomic64_read(&hwc->count);
2069         if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2070                 goto again;
2071
2072         delta = now - prev;
2073
2074         atomic64_add(delta, &counter->count);
2075         atomic64_sub(delta, &hwc->period_left);
2076 }
2077
2078 static void perf_swcounter_set_period(struct perf_counter *counter)
2079 {
2080         struct hw_perf_counter *hwc = &counter->hw;
2081         s64 left = atomic64_read(&hwc->period_left);
2082         s64 period = hwc->irq_period;
2083
2084         if (unlikely(left <= -period)) {
2085                 left = period;
2086                 atomic64_set(&hwc->period_left, left);
2087         }
2088
2089         if (unlikely(left <= 0)) {
2090                 left += period;
2091                 atomic64_add(period, &hwc->period_left);
2092         }
2093
2094         atomic64_set(&hwc->prev_count, -left);
2095         atomic64_set(&hwc->count, -left);
2096 }
2097
2098 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2099 {
2100         enum hrtimer_restart ret = HRTIMER_RESTART;
2101         struct perf_counter *counter;
2102         struct pt_regs *regs;
2103
2104         counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
2105         counter->hw_ops->read(counter);
2106
2107         regs = get_irq_regs();
2108         /*
2109          * In case we exclude kernel IPs or are somehow not in interrupt
2110          * context, provide the next best thing, the user IP.
2111          */
2112         if ((counter->hw_event.exclude_kernel || !regs) &&
2113                         !counter->hw_event.exclude_user)
2114                 regs = task_pt_regs(current);
2115
2116         if (regs) {
2117                 if (perf_counter_overflow(counter, 0, regs))
2118                         ret = HRTIMER_NORESTART;
2119         }
2120
2121         hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
2122
2123         return ret;
2124 }
2125
2126 static void perf_swcounter_overflow(struct perf_counter *counter,
2127                                     int nmi, struct pt_regs *regs)
2128 {
2129         perf_swcounter_update(counter);
2130         perf_swcounter_set_period(counter);
2131         if (perf_counter_overflow(counter, nmi, regs))
2132                 /* soft-disable the counter */
2133                 ;
2134
2135 }
2136
2137 static int perf_swcounter_match(struct perf_counter *counter,
2138                                 enum perf_event_types type,
2139                                 u32 event, struct pt_regs *regs)
2140 {
2141         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
2142                 return 0;
2143
2144         if (perf_event_raw(&counter->hw_event))
2145                 return 0;
2146
2147         if (perf_event_type(&counter->hw_event) != type)
2148                 return 0;
2149
2150         if (perf_event_id(&counter->hw_event) != event)
2151                 return 0;
2152
2153         if (counter->hw_event.exclude_user && user_mode(regs))
2154                 return 0;
2155
2156         if (counter->hw_event.exclude_kernel && !user_mode(regs))
2157                 return 0;
2158
2159         return 1;
2160 }
2161
2162 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
2163                                int nmi, struct pt_regs *regs)
2164 {
2165         int neg = atomic64_add_negative(nr, &counter->hw.count);
2166         if (counter->hw.irq_period && !neg)
2167                 perf_swcounter_overflow(counter, nmi, regs);
2168 }
2169
2170 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
2171                                      enum perf_event_types type, u32 event,
2172                                      u64 nr, int nmi, struct pt_regs *regs)
2173 {
2174         struct perf_counter *counter;
2175
2176         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2177                 return;
2178
2179         rcu_read_lock();
2180         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2181                 if (perf_swcounter_match(counter, type, event, regs))
2182                         perf_swcounter_add(counter, nr, nmi, regs);
2183         }
2184         rcu_read_unlock();
2185 }
2186
2187 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
2188 {
2189         if (in_nmi())
2190                 return &cpuctx->recursion[3];
2191
2192         if (in_irq())
2193                 return &cpuctx->recursion[2];
2194
2195         if (in_softirq())
2196                 return &cpuctx->recursion[1];
2197
2198         return &cpuctx->recursion[0];
2199 }
2200
2201 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
2202                                    u64 nr, int nmi, struct pt_regs *regs)
2203 {
2204         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
2205         int *recursion = perf_swcounter_recursion_context(cpuctx);
2206
2207         if (*recursion)
2208                 goto out;
2209
2210         (*recursion)++;
2211         barrier();
2212
2213         perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs);
2214         if (cpuctx->task_ctx) {
2215                 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
2216                                 nr, nmi, regs);
2217         }
2218
2219         barrier();
2220         (*recursion)--;
2221
2222 out:
2223         put_cpu_var(perf_cpu_context);
2224 }
2225
2226 void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs)
2227 {
2228         __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs);
2229 }
2230
2231 static void perf_swcounter_read(struct perf_counter *counter)
2232 {
2233         perf_swcounter_update(counter);
2234 }
2235
2236 static int perf_swcounter_enable(struct perf_counter *counter)
2237 {
2238         perf_swcounter_set_period(counter);
2239         return 0;
2240 }
2241
2242 static void perf_swcounter_disable(struct perf_counter *counter)
2243 {
2244         perf_swcounter_update(counter);
2245 }
2246
2247 static const struct hw_perf_counter_ops perf_ops_generic = {
2248         .enable         = perf_swcounter_enable,
2249         .disable        = perf_swcounter_disable,
2250         .read           = perf_swcounter_read,
2251 };
2252
2253 /*
2254  * Software counter: cpu wall time clock
2255  */
2256
2257 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
2258 {
2259         int cpu = raw_smp_processor_id();
2260         s64 prev;
2261         u64 now;
2262
2263         now = cpu_clock(cpu);
2264         prev = atomic64_read(&counter->hw.prev_count);
2265         atomic64_set(&counter->hw.prev_count, now);
2266         atomic64_add(now - prev, &counter->count);
2267 }
2268
2269 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
2270 {
2271         struct hw_perf_counter *hwc = &counter->hw;
2272         int cpu = raw_smp_processor_id();
2273
2274         atomic64_set(&hwc->prev_count, cpu_clock(cpu));
2275         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2276         hwc->hrtimer.function = perf_swcounter_hrtimer;
2277         if (hwc->irq_period) {
2278                 __hrtimer_start_range_ns(&hwc->hrtimer,
2279                                 ns_to_ktime(hwc->irq_period), 0,
2280                                 HRTIMER_MODE_REL, 0);
2281         }
2282
2283         return 0;
2284 }
2285
2286 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
2287 {
2288         hrtimer_cancel(&counter->hw.hrtimer);
2289         cpu_clock_perf_counter_update(counter);
2290 }
2291
2292 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
2293 {
2294         cpu_clock_perf_counter_update(counter);
2295 }
2296
2297 static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
2298         .enable         = cpu_clock_perf_counter_enable,
2299         .disable        = cpu_clock_perf_counter_disable,
2300         .read           = cpu_clock_perf_counter_read,
2301 };
2302
2303 /*
2304  * Software counter: task time clock
2305  */
2306
2307 /*
2308  * Called from within the scheduler:
2309  */
2310 static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
2311 {
2312         struct task_struct *curr = counter->task;
2313         u64 delta;
2314
2315         delta = __task_delta_exec(curr, update);
2316
2317         return curr->se.sum_exec_runtime + delta;
2318 }
2319
2320 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
2321 {
2322         u64 prev;
2323         s64 delta;
2324
2325         prev = atomic64_read(&counter->hw.prev_count);
2326
2327         atomic64_set(&counter->hw.prev_count, now);
2328
2329         delta = now - prev;
2330
2331         atomic64_add(delta, &counter->count);
2332 }
2333
2334 static int task_clock_perf_counter_enable(struct perf_counter *counter)
2335 {
2336         struct hw_perf_counter *hwc = &counter->hw;
2337
2338         atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
2339         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2340         hwc->hrtimer.function = perf_swcounter_hrtimer;
2341         if (hwc->irq_period) {
2342                 __hrtimer_start_range_ns(&hwc->hrtimer,
2343                                 ns_to_ktime(hwc->irq_period), 0,
2344                                 HRTIMER_MODE_REL, 0);
2345         }
2346
2347         return 0;
2348 }
2349
2350 static void task_clock_perf_counter_disable(struct perf_counter *counter)
2351 {
2352         hrtimer_cancel(&counter->hw.hrtimer);
2353         task_clock_perf_counter_update(counter,
2354                         task_clock_perf_counter_val(counter, 0));
2355 }
2356
2357 static void task_clock_perf_counter_read(struct perf_counter *counter)
2358 {
2359         task_clock_perf_counter_update(counter,
2360                         task_clock_perf_counter_val(counter, 1));
2361 }
2362
2363 static const struct hw_perf_counter_ops perf_ops_task_clock = {
2364         .enable         = task_clock_perf_counter_enable,
2365         .disable        = task_clock_perf_counter_disable,
2366         .read           = task_clock_perf_counter_read,
2367 };
2368
2369 /*
2370  * Software counter: cpu migrations
2371  */
2372
2373 static inline u64 get_cpu_migrations(struct perf_counter *counter)
2374 {
2375         struct task_struct *curr = counter->ctx->task;
2376
2377         if (curr)
2378                 return curr->se.nr_migrations;
2379         return cpu_nr_migrations(smp_processor_id());
2380 }
2381
2382 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
2383 {
2384         u64 prev, now;
2385         s64 delta;
2386
2387         prev = atomic64_read(&counter->hw.prev_count);
2388         now = get_cpu_migrations(counter);
2389
2390         atomic64_set(&counter->hw.prev_count, now);
2391
2392         delta = now - prev;
2393
2394         atomic64_add(delta, &counter->count);
2395 }
2396
2397 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
2398 {
2399         cpu_migrations_perf_counter_update(counter);
2400 }
2401
2402 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
2403 {
2404         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
2405                 atomic64_set(&counter->hw.prev_count,
2406                              get_cpu_migrations(counter));
2407         return 0;
2408 }
2409
2410 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
2411 {
2412         cpu_migrations_perf_counter_update(counter);
2413 }
2414
2415 static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
2416         .enable         = cpu_migrations_perf_counter_enable,
2417         .disable        = cpu_migrations_perf_counter_disable,
2418         .read           = cpu_migrations_perf_counter_read,
2419 };
2420
2421 #ifdef CONFIG_EVENT_PROFILE
2422 void perf_tpcounter_event(int event_id)
2423 {
2424         struct pt_regs *regs = get_irq_regs();
2425
2426         if (!regs)
2427                 regs = task_pt_regs(current);
2428
2429         __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs);
2430 }
2431
2432 extern int ftrace_profile_enable(int);
2433 extern void ftrace_profile_disable(int);
2434
2435 static void tp_perf_counter_destroy(struct perf_counter *counter)
2436 {
2437         ftrace_profile_disable(perf_event_id(&counter->hw_event));
2438 }
2439
2440 static const struct hw_perf_counter_ops *
2441 tp_perf_counter_init(struct perf_counter *counter)
2442 {
2443         int event_id = perf_event_id(&counter->hw_event);
2444         int ret;
2445
2446         ret = ftrace_profile_enable(event_id);
2447         if (ret)
2448                 return NULL;
2449
2450         counter->destroy = tp_perf_counter_destroy;
2451         counter->hw.irq_period = counter->hw_event.irq_period;
2452
2453         return &perf_ops_generic;
2454 }
2455 #else
2456 static const struct hw_perf_counter_ops *
2457 tp_perf_counter_init(struct perf_counter *counter)
2458 {
2459         return NULL;
2460 }
2461 #endif
2462
2463 static const struct hw_perf_counter_ops *
2464 sw_perf_counter_init(struct perf_counter *counter)
2465 {
2466         struct perf_counter_hw_event *hw_event = &counter->hw_event;
2467         const struct hw_perf_counter_ops *hw_ops = NULL;
2468         struct hw_perf_counter *hwc = &counter->hw;
2469
2470         /*
2471          * Software counters (currently) can't in general distinguish
2472          * between user, kernel and hypervisor events.
2473          * However, context switches and cpu migrations are considered
2474          * to be kernel events, and page faults are never hypervisor
2475          * events.
2476          */
2477         switch (perf_event_id(&counter->hw_event)) {
2478         case PERF_COUNT_CPU_CLOCK:
2479                 hw_ops = &perf_ops_cpu_clock;
2480
2481                 if (hw_event->irq_period && hw_event->irq_period < 10000)
2482                         hw_event->irq_period = 10000;
2483                 break;
2484         case PERF_COUNT_TASK_CLOCK:
2485                 /*
2486                  * If the user instantiates this as a per-cpu counter,
2487                  * use the cpu_clock counter instead.
2488                  */
2489                 if (counter->ctx->task)
2490                         hw_ops = &perf_ops_task_clock;
2491                 else
2492                         hw_ops = &perf_ops_cpu_clock;
2493
2494                 if (hw_event->irq_period && hw_event->irq_period < 10000)
2495                         hw_event->irq_period = 10000;
2496                 break;
2497         case PERF_COUNT_PAGE_FAULTS:
2498         case PERF_COUNT_PAGE_FAULTS_MIN:
2499         case PERF_COUNT_PAGE_FAULTS_MAJ:
2500         case PERF_COUNT_CONTEXT_SWITCHES:
2501                 hw_ops = &perf_ops_generic;
2502                 break;
2503         case PERF_COUNT_CPU_MIGRATIONS:
2504                 if (!counter->hw_event.exclude_kernel)
2505                         hw_ops = &perf_ops_cpu_migrations;
2506                 break;
2507         }
2508
2509         if (hw_ops)
2510                 hwc->irq_period = hw_event->irq_period;
2511
2512         return hw_ops;
2513 }
2514
2515 /*
2516  * Allocate and initialize a counter structure
2517  */
2518 static struct perf_counter *
2519 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2520                    int cpu,
2521                    struct perf_counter_context *ctx,
2522                    struct perf_counter *group_leader,
2523                    gfp_t gfpflags)
2524 {
2525         const struct hw_perf_counter_ops *hw_ops;
2526         struct perf_counter *counter;
2527         long err;
2528
2529         counter = kzalloc(sizeof(*counter), gfpflags);
2530         if (!counter)
2531                 return ERR_PTR(-ENOMEM);
2532
2533         /*
2534          * Single counters are their own group leaders, with an
2535          * empty sibling list:
2536          */
2537         if (!group_leader)
2538                 group_leader = counter;
2539
2540         mutex_init(&counter->mutex);
2541         INIT_LIST_HEAD(&counter->list_entry);
2542         INIT_LIST_HEAD(&counter->event_entry);
2543         INIT_LIST_HEAD(&counter->sibling_list);
2544         init_waitqueue_head(&counter->waitq);
2545
2546         mutex_init(&counter->mmap_mutex);
2547
2548         INIT_LIST_HEAD(&counter->child_list);
2549
2550         counter->cpu                    = cpu;
2551         counter->hw_event               = *hw_event;
2552         counter->group_leader           = group_leader;
2553         counter->hw_ops                 = NULL;
2554         counter->ctx                    = ctx;
2555
2556         counter->state = PERF_COUNTER_STATE_INACTIVE;
2557         if (hw_event->disabled)
2558                 counter->state = PERF_COUNTER_STATE_OFF;
2559
2560         hw_ops = NULL;
2561
2562         if (perf_event_raw(hw_event)) {
2563                 hw_ops = hw_perf_counter_init(counter);
2564                 goto done;
2565         }
2566
2567         switch (perf_event_type(hw_event)) {
2568         case PERF_TYPE_HARDWARE:
2569                 hw_ops = hw_perf_counter_init(counter);
2570                 break;
2571
2572         case PERF_TYPE_SOFTWARE:
2573                 hw_ops = sw_perf_counter_init(counter);
2574                 break;
2575
2576         case PERF_TYPE_TRACEPOINT:
2577                 hw_ops = tp_perf_counter_init(counter);
2578                 break;
2579         }
2580 done:
2581         err = 0;
2582         if (!hw_ops)
2583                 err = -EINVAL;
2584         else if (IS_ERR(hw_ops))
2585                 err = PTR_ERR(hw_ops);
2586
2587         if (err) {
2588                 kfree(counter);
2589                 return ERR_PTR(err);
2590         }
2591
2592         counter->hw_ops = hw_ops;
2593
2594         return counter;
2595 }
2596
2597 /**
2598  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
2599  *
2600  * @hw_event_uptr:      event type attributes for monitoring/sampling
2601  * @pid:                target pid
2602  * @cpu:                target cpu
2603  * @group_fd:           group leader counter fd
2604  */
2605 SYSCALL_DEFINE5(perf_counter_open,
2606                 const struct perf_counter_hw_event __user *, hw_event_uptr,
2607                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
2608 {
2609         struct perf_counter *counter, *group_leader;
2610         struct perf_counter_hw_event hw_event;
2611         struct perf_counter_context *ctx;
2612         struct file *counter_file = NULL;
2613         struct file *group_file = NULL;
2614         int fput_needed = 0;
2615         int fput_needed2 = 0;
2616         int ret;
2617
2618         /* for future expandability... */
2619         if (flags)
2620                 return -EINVAL;
2621
2622         if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
2623                 return -EFAULT;
2624
2625         /*
2626          * Get the target context (task or percpu):
2627          */
2628         ctx = find_get_context(pid, cpu);
2629         if (IS_ERR(ctx))
2630                 return PTR_ERR(ctx);
2631
2632         /*
2633          * Look up the group leader (we will attach this counter to it):
2634          */
2635         group_leader = NULL;
2636         if (group_fd != -1) {
2637                 ret = -EINVAL;
2638                 group_file = fget_light(group_fd, &fput_needed);
2639                 if (!group_file)
2640                         goto err_put_context;
2641                 if (group_file->f_op != &perf_fops)
2642                         goto err_put_context;
2643
2644                 group_leader = group_file->private_data;
2645                 /*
2646                  * Do not allow a recursive hierarchy (this new sibling
2647                  * becoming part of another group-sibling):
2648                  */
2649                 if (group_leader->group_leader != group_leader)
2650                         goto err_put_context;
2651                 /*
2652                  * Do not allow to attach to a group in a different
2653                  * task or CPU context:
2654                  */
2655                 if (group_leader->ctx != ctx)
2656                         goto err_put_context;
2657                 /*
2658                  * Only a group leader can be exclusive or pinned
2659                  */
2660                 if (hw_event.exclusive || hw_event.pinned)
2661                         goto err_put_context;
2662         }
2663
2664         counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
2665                                      GFP_KERNEL);
2666         ret = PTR_ERR(counter);
2667         if (IS_ERR(counter))
2668                 goto err_put_context;
2669
2670         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
2671         if (ret < 0)
2672                 goto err_free_put_context;
2673
2674         counter_file = fget_light(ret, &fput_needed2);
2675         if (!counter_file)
2676                 goto err_free_put_context;
2677
2678         counter->filp = counter_file;
2679         mutex_lock(&ctx->mutex);
2680         perf_install_in_context(ctx, counter, cpu);
2681         mutex_unlock(&ctx->mutex);
2682
2683         fput_light(counter_file, fput_needed2);
2684
2685 out_fput:
2686         fput_light(group_file, fput_needed);
2687
2688         return ret;
2689
2690 err_free_put_context:
2691         kfree(counter);
2692
2693 err_put_context:
2694         put_context(ctx);
2695
2696         goto out_fput;
2697 }
2698
2699 /*
2700  * Initialize the perf_counter context in a task_struct:
2701  */
2702 static void
2703 __perf_counter_init_context(struct perf_counter_context *ctx,
2704                             struct task_struct *task)
2705 {
2706         memset(ctx, 0, sizeof(*ctx));
2707         spin_lock_init(&ctx->lock);
2708         mutex_init(&ctx->mutex);
2709         INIT_LIST_HEAD(&ctx->counter_list);
2710         INIT_LIST_HEAD(&ctx->event_list);
2711         ctx->task = task;
2712 }
2713
2714 /*
2715  * inherit a counter from parent task to child task:
2716  */
2717 static struct perf_counter *
2718 inherit_counter(struct perf_counter *parent_counter,
2719               struct task_struct *parent,
2720               struct perf_counter_context *parent_ctx,
2721               struct task_struct *child,
2722               struct perf_counter *group_leader,
2723               struct perf_counter_context *child_ctx)
2724 {
2725         struct perf_counter *child_counter;
2726
2727         /*
2728          * Instead of creating recursive hierarchies of counters,
2729          * we link inherited counters back to the original parent,
2730          * which has a filp for sure, which we use as the reference
2731          * count:
2732          */
2733         if (parent_counter->parent)
2734                 parent_counter = parent_counter->parent;
2735
2736         child_counter = perf_counter_alloc(&parent_counter->hw_event,
2737                                            parent_counter->cpu, child_ctx,
2738                                            group_leader, GFP_KERNEL);
2739         if (IS_ERR(child_counter))
2740                 return child_counter;
2741
2742         /*
2743          * Link it up in the child's context:
2744          */
2745         child_counter->task = child;
2746         add_counter_to_ctx(child_counter, child_ctx);
2747
2748         child_counter->parent = parent_counter;
2749         /*
2750          * inherit into child's child as well:
2751          */
2752         child_counter->hw_event.inherit = 1;
2753
2754         /*
2755          * Get a reference to the parent filp - we will fput it
2756          * when the child counter exits. This is safe to do because
2757          * we are in the parent and we know that the filp still
2758          * exists and has a nonzero count:
2759          */
2760         atomic_long_inc(&parent_counter->filp->f_count);
2761
2762         /*
2763          * Link this into the parent counter's child list
2764          */
2765         mutex_lock(&parent_counter->mutex);
2766         list_add_tail(&child_counter->child_list, &parent_counter->child_list);
2767
2768         /*
2769          * Make the child state follow the state of the parent counter,
2770          * not its hw_event.disabled bit.  We hold the parent's mutex,
2771          * so we won't race with perf_counter_{en,dis}able_family.
2772          */
2773         if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
2774                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
2775         else
2776                 child_counter->state = PERF_COUNTER_STATE_OFF;
2777
2778         mutex_unlock(&parent_counter->mutex);
2779
2780         return child_counter;
2781 }
2782
2783 static int inherit_group(struct perf_counter *parent_counter,
2784               struct task_struct *parent,
2785               struct perf_counter_context *parent_ctx,
2786               struct task_struct *child,
2787               struct perf_counter_context *child_ctx)
2788 {
2789         struct perf_counter *leader;
2790         struct perf_counter *sub;
2791         struct perf_counter *child_ctr;
2792
2793         leader = inherit_counter(parent_counter, parent, parent_ctx,
2794                                  child, NULL, child_ctx);
2795         if (IS_ERR(leader))
2796                 return PTR_ERR(leader);
2797         list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
2798                 child_ctr = inherit_counter(sub, parent, parent_ctx,
2799                                             child, leader, child_ctx);
2800                 if (IS_ERR(child_ctr))
2801                         return PTR_ERR(child_ctr);
2802         }
2803         return 0;
2804 }
2805
2806 static void sync_child_counter(struct perf_counter *child_counter,
2807                                struct perf_counter *parent_counter)
2808 {
2809         u64 parent_val, child_val;
2810
2811         parent_val = atomic64_read(&parent_counter->count);
2812         child_val = atomic64_read(&child_counter->count);
2813
2814         /*
2815          * Add back the child's count to the parent's count:
2816          */
2817         atomic64_add(child_val, &parent_counter->count);
2818         atomic64_add(child_counter->total_time_enabled,
2819                      &parent_counter->child_total_time_enabled);
2820         atomic64_add(child_counter->total_time_running,
2821                      &parent_counter->child_total_time_running);
2822
2823         /*
2824          * Remove this counter from the parent's list
2825          */
2826         mutex_lock(&parent_counter->mutex);
2827         list_del_init(&child_counter->child_list);
2828         mutex_unlock(&parent_counter->mutex);
2829
2830         /*
2831          * Release the parent counter, if this was the last
2832          * reference to it.
2833          */
2834         fput(parent_counter->filp);
2835 }
2836
2837 static void
2838 __perf_counter_exit_task(struct task_struct *child,
2839                          struct perf_counter *child_counter,
2840                          struct perf_counter_context *child_ctx)
2841 {
2842         struct perf_counter *parent_counter;
2843         struct perf_counter *sub, *tmp;
2844
2845         /*
2846          * If we do not self-reap then we have to wait for the
2847          * child task to unschedule (it will happen for sure),
2848          * so that its counter is at its final count. (This
2849          * condition triggers rarely - child tasks usually get
2850          * off their CPU before the parent has a chance to
2851          * get this far into the reaping action)
2852          */
2853         if (child != current) {
2854                 wait_task_inactive(child, 0);
2855                 list_del_init(&child_counter->list_entry);
2856                 update_counter_times(child_counter);
2857         } else {
2858                 struct perf_cpu_context *cpuctx;
2859                 unsigned long flags;
2860                 u64 perf_flags;
2861
2862                 /*
2863                  * Disable and unlink this counter.
2864                  *
2865                  * Be careful about zapping the list - IRQ/NMI context
2866                  * could still be processing it:
2867                  */
2868                 curr_rq_lock_irq_save(&flags);
2869                 perf_flags = hw_perf_save_disable();
2870
2871                 cpuctx = &__get_cpu_var(perf_cpu_context);
2872
2873                 group_sched_out(child_counter, cpuctx, child_ctx);
2874                 update_counter_times(child_counter);
2875
2876                 list_del_init(&child_counter->list_entry);
2877
2878                 child_ctx->nr_counters--;
2879
2880                 hw_perf_restore(perf_flags);
2881                 curr_rq_unlock_irq_restore(&flags);
2882         }
2883
2884         parent_counter = child_counter->parent;
2885         /*
2886          * It can happen that parent exits first, and has counters
2887          * that are still around due to the child reference. These
2888          * counters need to be zapped - but otherwise linger.
2889          */
2890         if (parent_counter) {
2891                 sync_child_counter(child_counter, parent_counter);
2892                 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
2893                                          list_entry) {
2894                         if (sub->parent) {
2895                                 sync_child_counter(sub, sub->parent);
2896                                 free_counter(sub);
2897                         }
2898                 }
2899                 free_counter(child_counter);
2900         }
2901 }
2902
2903 /*
2904  * When a child task exits, feed back counter values to parent counters.
2905  *
2906  * Note: we may be running in child context, but the PID is not hashed
2907  * anymore so new counters will not be added.
2908  */
2909 void perf_counter_exit_task(struct task_struct *child)
2910 {
2911         struct perf_counter *child_counter, *tmp;
2912         struct perf_counter_context *child_ctx;
2913
2914         child_ctx = &child->perf_counter_ctx;
2915
2916         if (likely(!child_ctx->nr_counters))
2917                 return;
2918
2919         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
2920                                  list_entry)
2921                 __perf_counter_exit_task(child, child_counter, child_ctx);
2922 }
2923
2924 /*
2925  * Initialize the perf_counter context in task_struct
2926  */
2927 void perf_counter_init_task(struct task_struct *child)
2928 {
2929         struct perf_counter_context *child_ctx, *parent_ctx;
2930         struct perf_counter *counter;
2931         struct task_struct *parent = current;
2932
2933         child_ctx  =  &child->perf_counter_ctx;
2934         parent_ctx = &parent->perf_counter_ctx;
2935
2936         __perf_counter_init_context(child_ctx, child);
2937
2938         /*
2939          * This is executed from the parent task context, so inherit
2940          * counters that have been marked for cloning:
2941          */
2942
2943         if (likely(!parent_ctx->nr_counters))
2944                 return;
2945
2946         /*
2947          * Lock the parent list. No need to lock the child - not PID
2948          * hashed yet and not running, so nobody can access it.
2949          */
2950         mutex_lock(&parent_ctx->mutex);
2951
2952         /*
2953          * We dont have to disable NMIs - we are only looking at
2954          * the list, not manipulating it:
2955          */
2956         list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
2957                 if (!counter->hw_event.inherit)
2958                         continue;
2959
2960                 if (inherit_group(counter, parent,
2961                                   parent_ctx, child, child_ctx))
2962                         break;
2963         }
2964
2965         mutex_unlock(&parent_ctx->mutex);
2966 }
2967
2968 static void __cpuinit perf_counter_init_cpu(int cpu)
2969 {
2970         struct perf_cpu_context *cpuctx;
2971
2972         cpuctx = &per_cpu(perf_cpu_context, cpu);
2973         __perf_counter_init_context(&cpuctx->ctx, NULL);
2974
2975         mutex_lock(&perf_resource_mutex);
2976         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
2977         mutex_unlock(&perf_resource_mutex);
2978
2979         hw_perf_counter_setup(cpu);
2980 }
2981
2982 #ifdef CONFIG_HOTPLUG_CPU
2983 static void __perf_counter_exit_cpu(void *info)
2984 {
2985         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
2986         struct perf_counter_context *ctx = &cpuctx->ctx;
2987         struct perf_counter *counter, *tmp;
2988
2989         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
2990                 __perf_counter_remove_from_context(counter);
2991 }
2992 static void perf_counter_exit_cpu(int cpu)
2993 {
2994         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
2995         struct perf_counter_context *ctx = &cpuctx->ctx;
2996
2997         mutex_lock(&ctx->mutex);
2998         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
2999         mutex_unlock(&ctx->mutex);
3000 }
3001 #else
3002 static inline void perf_counter_exit_cpu(int cpu) { }
3003 #endif
3004
3005 static int __cpuinit
3006 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
3007 {
3008         unsigned int cpu = (long)hcpu;
3009
3010         switch (action) {
3011
3012         case CPU_UP_PREPARE:
3013         case CPU_UP_PREPARE_FROZEN:
3014                 perf_counter_init_cpu(cpu);
3015                 break;
3016
3017         case CPU_DOWN_PREPARE:
3018         case CPU_DOWN_PREPARE_FROZEN:
3019                 perf_counter_exit_cpu(cpu);
3020                 break;
3021
3022         default:
3023                 break;
3024         }
3025
3026         return NOTIFY_OK;
3027 }
3028
3029 static struct notifier_block __cpuinitdata perf_cpu_nb = {
3030         .notifier_call          = perf_cpu_notify,
3031 };
3032
3033 static int __init perf_counter_init(void)
3034 {
3035         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
3036                         (void *)(long)smp_processor_id());
3037         register_cpu_notifier(&perf_cpu_nb);
3038
3039         return 0;
3040 }
3041 early_initcall(perf_counter_init);
3042
3043 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
3044 {
3045         return sprintf(buf, "%d\n", perf_reserved_percpu);
3046 }
3047
3048 static ssize_t
3049 perf_set_reserve_percpu(struct sysdev_class *class,
3050                         const char *buf,
3051                         size_t count)
3052 {
3053         struct perf_cpu_context *cpuctx;
3054         unsigned long val;
3055         int err, cpu, mpt;
3056
3057         err = strict_strtoul(buf, 10, &val);
3058         if (err)
3059                 return err;
3060         if (val > perf_max_counters)
3061                 return -EINVAL;
3062
3063         mutex_lock(&perf_resource_mutex);
3064         perf_reserved_percpu = val;
3065         for_each_online_cpu(cpu) {
3066                 cpuctx = &per_cpu(perf_cpu_context, cpu);
3067                 spin_lock_irq(&cpuctx->ctx.lock);
3068                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
3069                           perf_max_counters - perf_reserved_percpu);
3070                 cpuctx->max_pertask = mpt;
3071                 spin_unlock_irq(&cpuctx->ctx.lock);
3072         }
3073         mutex_unlock(&perf_resource_mutex);
3074
3075         return count;
3076 }
3077
3078 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
3079 {
3080         return sprintf(buf, "%d\n", perf_overcommit);
3081 }
3082
3083 static ssize_t
3084 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
3085 {
3086         unsigned long val;
3087         int err;
3088
3089         err = strict_strtoul(buf, 10, &val);
3090         if (err)
3091                 return err;
3092         if (val > 1)
3093                 return -EINVAL;
3094
3095         mutex_lock(&perf_resource_mutex);
3096         perf_overcommit = val;
3097         mutex_unlock(&perf_resource_mutex);
3098
3099         return count;
3100 }
3101
3102 static SYSDEV_CLASS_ATTR(
3103                                 reserve_percpu,
3104                                 0644,
3105                                 perf_show_reserve_percpu,
3106                                 perf_set_reserve_percpu
3107                         );
3108
3109 static SYSDEV_CLASS_ATTR(
3110                                 overcommit,
3111                                 0644,
3112                                 perf_show_overcommit,
3113                                 perf_set_overcommit
3114                         );
3115
3116 static struct attribute *perfclass_attrs[] = {
3117         &attr_reserve_percpu.attr,
3118         &attr_overcommit.attr,
3119         NULL
3120 };
3121
3122 static struct attribute_group perfclass_attr_group = {
3123         .attrs                  = perfclass_attrs,
3124         .name                   = "perf_counters",
3125 };
3126
3127 static int __init perf_counter_sysfs_init(void)
3128 {
3129         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
3130                                   &perfclass_attr_group);
3131 }
3132 device_initcall(perf_counter_sysfs_init);