perf: Better order flexible and pinned scheduling
authorFrederic Weisbecker <fweisbec@gmail.com>
Sun, 17 Jan 2010 11:56:05 +0000 (12:56 +0100)
committerFrederic Weisbecker <fweisbec@gmail.com>
Sun, 17 Jan 2010 12:11:05 +0000 (13:11 +0100)
When a task gets scheduled in. We don't touch the cpu bound events
so the priority order becomes:

cpu pinned, cpu flexible, task pinned, task flexible.

So schedule out cpu flexibles when a new task context gets in
and correctly order the groups to schedule in:

task pinned, cpu flexible, task flexible.

Cpu pinned groups don't need to be touched at this time.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
kernel/perf_event.c

index a90ae694cbc1928d698cc2eb50f72dd09c0eec90..edc46b92b5088c511e6539720bb0cc6a69596b76 100644 (file)
@@ -1362,6 +1362,14 @@ ctx_sched_in(struct perf_event_context *ctx,
        raw_spin_unlock(&ctx->lock);
 }
 
+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+                            enum event_type_t event_type)
+{
+       struct perf_event_context *ctx = &cpuctx->ctx;
+
+       ctx_sched_in(ctx, cpuctx, event_type);
+}
+
 static void task_ctx_sched_in(struct task_struct *task,
                              enum event_type_t event_type)
 {
@@ -1388,15 +1396,27 @@ static void task_ctx_sched_in(struct task_struct *task,
  */
 void perf_event_task_sched_in(struct task_struct *task)
 {
-       task_ctx_sched_in(task, EVENT_ALL);
-}
+       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+       struct perf_event_context *ctx = task->perf_event_ctxp;
 
-static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
-                            enum event_type_t event_type)
-{
-       struct perf_event_context *ctx = &cpuctx->ctx;
+       if (likely(!ctx))
+               return;
 
-       ctx_sched_in(ctx, cpuctx, event_type);
+       if (cpuctx->task_ctx == ctx)
+               return;
+
+       /*
+        * We want to keep the following priority order:
+        * cpu pinned (that don't need to move), task pinned,
+        * cpu flexible, task flexible.
+        */
+       cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+
+       ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
+       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
+       ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
+
+       cpuctx->task_ctx = ctx;
 }
 
 #define MAX_INTERRUPTS (~0ULL)