sched/fair: Reduce the window for duplicated update
authorVincent Guittot <vincent.guittot@linaro.org>
Wed, 24 Feb 2021 13:30:07 +0000 (14:30 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 6 Mar 2021 11:40:22 +0000 (12:40 +0100)
Start to update last_blocked_load_update_tick to reduce the possibility
of another cpu starting the update one more time

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/20210224133007.28644-8-vincent.guittot@linaro.org
kernel/sched/fair.c

index e87e1b3bcdca93fab7a6ae1df8fee7221d6c1d59..f1b55f9a085d3e7720fe63276d5736907f85ee11 100644 (file)
@@ -7852,16 +7852,20 @@ static inline bool others_have_blocked(struct rq *rq)
        return false;
 }
 
-static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
+static inline void update_blocked_load_tick(struct rq *rq)
 {
-       rq->last_blocked_load_update_tick = jiffies;
+       WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
+}
 
+static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
+{
        if (!has_blocked)
                rq->has_blocked_load = 0;
 }
 #else
 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
 static inline bool others_have_blocked(struct rq *rq) { return false; }
+static inline void update_blocked_load_tick(struct rq *rq) {}
 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
 #endif
 
@@ -8022,6 +8026,7 @@ static void update_blocked_averages(int cpu)
        struct rq_flags rf;
 
        rq_lock_irqsave(rq, &rf);
+       update_blocked_load_tick(rq);
        update_rq_clock(rq);
 
        decayed |= __update_blocked_others(rq, &done);
@@ -8363,7 +8368,7 @@ static bool update_nohz_stats(struct rq *rq)
        if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
                return false;
 
-       if (!time_after(jiffies, rq->last_blocked_load_update_tick))
+       if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
                return true;
 
        update_blocked_averages(cpu);