sched/fair: Use load instead of runnable load in wakeup path
authorVincent Guittot <vincent.guittot@linaro.org>
Fri, 18 Oct 2019 13:26:36 +0000 (15:26 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 21 Oct 2019 07:40:55 +0000 (09:40 +0200)
Runnable load was originally introduced to take into account the case where
blocked load biases the wake up path which may end to select an overloaded
CPU with a large number of runnable tasks instead of an underutilized
CPU with a huge blocked load.

Tha wake up path now starts looking for idle CPUs before comparing
runnable load and it's worth aligning the wake up path with the
load_balance() logic.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: hdanton@sina.com
Cc: parth@linux.ibm.com
Cc: pauld@redhat.com
Cc: quentin.perret@arm.com
Cc: riel@surriel.com
Cc: srikar@linux.vnet.ibm.com
Cc: valentin.schneider@arm.com
Link: https://lkml.kernel.org/r/1571405198-27570-10-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c

index 1fd6f3917fe712443aa3d84fc2f53bc485abbc9e..b0703b461460354d50df8057339504f41ef287c1 100644 (file)
@@ -1474,7 +1474,12 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
               group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
 }
 
-static unsigned long cpu_runnable_load(struct rq *rq);
+static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
+
+static unsigned long cpu_runnable_load(struct rq *rq)
+{
+       return cfs_rq_runnable_load_avg(&rq->cfs);
+}
 
 /* Cached statistics for all CPUs within a node */
 struct numa_stats {
@@ -5370,11 +5375,6 @@ static int sched_idle_cpu(int cpu)
                        rq->nr_running);
 }
 
-static unsigned long cpu_runnable_load(struct rq *rq)
-{
-       return cfs_rq_runnable_load_avg(&rq->cfs);
-}
-
 static unsigned long cpu_load(struct rq *rq)
 {
        return cfs_rq_load_avg(&rq->cfs);
@@ -5475,7 +5475,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
        s64 this_eff_load, prev_eff_load;
        unsigned long task_load;
 
-       this_eff_load = cpu_runnable_load(cpu_rq(this_cpu));
+       this_eff_load = cpu_load(cpu_rq(this_cpu));
 
        if (sync) {
                unsigned long current_load = task_h_load(current);
@@ -5493,7 +5493,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
                this_eff_load *= 100;
        this_eff_load *= capacity_of(prev_cpu);
 
-       prev_eff_load = cpu_runnable_load(cpu_rq(prev_cpu));
+       prev_eff_load = cpu_load(cpu_rq(prev_cpu));
        prev_eff_load -= task_load;
        if (sched_feat(WA_BIAS))
                prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5581,7 +5581,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                max_spare_cap = 0;
 
                for_each_cpu(i, sched_group_span(group)) {
-                       load = cpu_runnable_load(cpu_rq(i));
+                       load = cpu_load(cpu_rq(i));
                        runnable_load += load;
 
                        avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
@@ -5722,7 +5722,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
                                continue;
                        }
 
-                       load = cpu_runnable_load(cpu_rq(i));
+                       load = cpu_load(cpu_rq(i));
                        if (load < min_load) {
                                min_load = load;
                                least_loaded_cpu = i;