sched/fair: Add per-CPU min capacity to sched_group_capacity
authorMorten Rasmussen <morten.rasmussen@arm.com>
Fri, 14 Oct 2016 13:41:09 +0000 (14:41 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 16 Nov 2016 09:29:06 +0000 (10:29 +0100)
struct sched_group_capacity currently represents the compute capacity
sum of all CPUs in the sched_group.

Unless it is divided by the group_weight to get the average capacity
per CPU, it hides differences in CPU capacity for mixed capacity systems
(e.g. high RT/IRQ utilization or ARM big.LITTLE).

But even the average may not be sufficient if the group covers CPUs of
different capacities.

Instead, by extending struct sched_group_capacity to indicate min per-CPU
capacity in the group a suitable group for a given task utilization can
more easily be found such that CPUs with reduced capacity can be avoided
for tasks with high utilization (not implemented by this patch).

Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dietmar.eggemann@arm.com
Cc: freedom.tan@mediatek.com
Cc: keita.kobayashi.ym@renesas.com
Cc: mgalbraith@suse.de
Cc: sgurrappadi@nvidia.com
Cc: vincent.guittot@linaro.org
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1476452472-24740-4-git-send-email-morten.rasmussen@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index f3cfa0dd5b34950db22931b3e496d5bf77124844..6bf1fd3514d5ec9edee9df74573ff54f5634914f 100644 (file)
@@ -5708,7 +5708,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                printk(KERN_CONT " %*pbl",
                       cpumask_pr_args(sched_group_cpus(group)));
                if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
-                       printk(KERN_CONT " (cpu_capacity = %d)",
+                       printk(KERN_CONT " (cpu_capacity = %lu)",
                                group->sgc->capacity);
                }
 
@@ -6185,6 +6185,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                 * die on a /0 trap.
                 */
                sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+               sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
 
                /*
                 * Make sure the first group of this domain contains the
index 1ad37064c0c2752bdb041fb9fc5a9139af58fbb2..faf8f18616e6c7c5d044e6b5d9bd8e0b78b00132 100644 (file)
@@ -6909,13 +6909,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 
        cpu_rq(cpu)->cpu_capacity = capacity;
        sdg->sgc->capacity = capacity;
+       sdg->sgc->min_capacity = capacity;
 }
 
 void update_group_capacity(struct sched_domain *sd, int cpu)
 {
        struct sched_domain *child = sd->child;
        struct sched_group *group, *sdg = sd->groups;
-       unsigned long capacity;
+       unsigned long capacity, min_capacity;
        unsigned long interval;
 
        interval = msecs_to_jiffies(sd->balance_interval);
@@ -6928,6 +6929,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
        }
 
        capacity = 0;
+       min_capacity = ULONG_MAX;
 
        if (child->flags & SD_OVERLAP) {
                /*
@@ -6952,11 +6954,12 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
                         */
                        if (unlikely(!rq->sd)) {
                                capacity += capacity_of(cpu);
-                               continue;
+                       } else {
+                               sgc = rq->sd->groups->sgc;
+                               capacity += sgc->capacity;
                        }
 
-                       sgc = rq->sd->groups->sgc;
-                       capacity += sgc->capacity;
+                       min_capacity = min(capacity, min_capacity);
                }
        } else  {
                /*
@@ -6966,12 +6969,16 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
 
                group = child->groups;
                do {
-                       capacity += group->sgc->capacity;
+                       struct sched_group_capacity *sgc = group->sgc;
+
+                       capacity += sgc->capacity;
+                       min_capacity = min(sgc->min_capacity, min_capacity);
                        group = group->next;
                } while (group != child->groups);
        }
 
        sdg->sgc->capacity = capacity;
+       sdg->sgc->min_capacity = min_capacity;
 }
 
 /*
index 055f935d4421838a3da328c6bab87ff7e84c90d4..345c1ccaba3448f0ffa307356367c07c864a2ad1 100644 (file)
@@ -892,7 +892,8 @@ struct sched_group_capacity {
         * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
         * for a single CPU.
         */
-       unsigned int capacity;
+       unsigned long capacity;
+       unsigned long min_capacity; /* Min per-CPU capacity in group */
        unsigned long next_update;
        int imbalance; /* XXX unrelated to capacity but shared group state */