sched/fair: Fix unnecessary increase of balance interval
authorVincent Guittot <vincent.guittot@linaro.org>
Fri, 14 Dec 2018 16:01:57 +0000 (17:01 +0100)
committerIngo Molnar <mingo@kernel.org>
Sun, 27 Jan 2019 11:29:37 +0000 (12:29 +0100)
In case of active balancing, we increase the balance interval to cover
pinned tasks cases not covered by all_pinned logic. Neverthless, the
active migration triggered by asym packing should be treated as the normal
unbalanced case and reset the interval to default value, otherwise active
migration for asym_packing can be easily delayed for hundreds of ms
because of this pinned task detection mechanism.

The same happens to other conditions tested in need_active_balance() like
misfit task and when the capacity of src_cpu is reduced compared to
dst_cpu (see comments in need_active_balance() for details).

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: valentin.schneider@arm.com
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c

index 30450901200eac65a533b8d815af8723775c009f..e2ff4b69dcf653e8b5271c1ed5b40bb78eb482b2 100644 (file)
@@ -8826,21 +8826,25 @@ static struct rq *find_busiest_queue(struct lb_env *env,
  */
 #define MAX_PINNED_INTERVAL    512
 
-static int need_active_balance(struct lb_env *env)
+static inline bool
+asym_active_balance(struct lb_env *env)
 {
-       struct sched_domain *sd = env->sd;
+       /*
+        * ASYM_PACKING needs to force migrate tasks from busy but
+        * lower priority CPUs in order to pack all tasks in the
+        * highest priority CPUs.
+        */
+       return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
+              sched_asym_prefer(env->dst_cpu, env->src_cpu);
+}
 
-       if (env->idle != CPU_NOT_IDLE) {
+static inline bool
+voluntary_active_balance(struct lb_env *env)
+{
+       struct sched_domain *sd = env->sd;
 
-               /*
-                * ASYM_PACKING needs to force migrate tasks from busy but
-                * lower priority CPUs in order to pack all tasks in the
-                * highest priority CPUs.
-                */
-               if ((sd->flags & SD_ASYM_PACKING) &&
-                   sched_asym_prefer(env->dst_cpu, env->src_cpu))
-                       return 1;
-       }
+       if (asym_active_balance(env))
+               return 1;
 
        /*
         * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
@@ -8858,6 +8862,16 @@ static int need_active_balance(struct lb_env *env)
        if (env->src_grp_type == group_misfit_task)
                return 1;
 
+       return 0;
+}
+
+static int need_active_balance(struct lb_env *env)
+{
+       struct sched_domain *sd = env->sd;
+
+       if (voluntary_active_balance(env))
+               return 1;
+
        return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
 }
 
@@ -9119,7 +9133,7 @@ more_balance:
        } else
                sd->nr_balance_failed = 0;
 
-       if (likely(!active_balance)) {
+       if (likely(!active_balance) || voluntary_active_balance(&env)) {
                /* We were unbalanced, so reset the balancing interval */
                sd->balance_interval = sd->min_interval;
        } else {