sched: Feature to disable APERF/MPERF cpu_power
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 3 Sep 2009 11:20:03 +0000 (13:20 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 15 Sep 2009 14:51:28 +0000 (16:51 +0200)
I suspect a feed-back loop between cpuidle and the aperf/mperf
cpu_power bits, where when we have idle C-states lower the ratio,
which leads to lower cpu_power and then less load, which generates
more idle time, etc..

Put in a knob to disable it.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c
kernel/sched_features.h

index c210321adcb92e502a8083ade161c42f204f4a80..e8e603bf8761e4785d9442eb0882383134875809 100644 (file)
@@ -3602,11 +3602,19 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
        unsigned long power = SCHED_LOAD_SCALE;
        struct sched_group *sdg = sd->groups;
 
-       power *= arch_scale_freq_power(sd, cpu);
+       if (sched_feat(ARCH_POWER))
+               power *= arch_scale_freq_power(sd, cpu);
+       else
+               power *= default_scale_freq_power(sd, cpu);
+
        power >>= SCHED_LOAD_SHIFT;
 
        if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
-               power *= arch_scale_smt_power(sd, cpu);
+               if (sched_feat(ARCH_POWER))
+                       power *= arch_scale_smt_power(sd, cpu);
+               else
+                       power *= default_scale_smt_power(sd, cpu);
+
                power >>= SCHED_LOAD_SHIFT;
        }
 
index e98c2e8de1d507c0889406cafc81a2ce3e0153d4..294e10edd3c8bc67e135cce272c772e9ca67379e 100644 (file)
@@ -82,6 +82,11 @@ SCHED_FEAT(LAST_BUDDY, 1)
  */
 SCHED_FEAT(CACHE_HOT_BUDDY, 1)
 
+/*
+ * Use arch dependent cpu power functions
+ */
+SCHED_FEAT(ARCH_POWER, 0)
+
 SCHED_FEAT(HRTICK, 0)
 SCHED_FEAT(DOUBLE_TICK, 0)
 SCHED_FEAT(LB_BIAS, 1)