sched/core: Simplify update_rq_clock() in __schedule()
authorPeter Zijlstra <peterz@infradead.org>
Tue, 21 Feb 2017 13:40:35 +0000 (14:40 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 16 Mar 2017 08:46:24 +0000 (09:46 +0100)
Instead of relying on deactivate_task() to call update_rq_clock() and
handling the case where it didn't happen (task_on_rq_queued),
unconditionally do update_rq_clock() and skip any further updates.

This also avoids a double update on deactivate_task() + ttwu_local().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c

index dead90d680fd9aca3873c175d59b7aa8aea2e1a6..179a6c928bf1af6e7f5d430d38cce1e1412c84fe 100644 (file)
@@ -2114,7 +2114,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
                        delayacct_blkio_end();
                        atomic_dec(&rq->nr_iowait);
                }
-               ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+               ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
        }
 
        ttwu_do_wakeup(rq, p, 0, rf);
@@ -3393,13 +3393,14 @@ static void __sched notrace __schedule(bool preempt)
 
        /* Promote REQ to ACT */
        rq->clock_update_flags <<= 1;
+       update_rq_clock(rq);
 
        switch_count = &prev->nivcsw;
        if (!preempt && prev->state) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
                        prev->state = TASK_RUNNING;
                } else {
-                       deactivate_task(rq, prev, DEQUEUE_SLEEP);
+                       deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
                        prev->on_rq = 0;
 
                        if (prev->in_iowait) {
@@ -3423,9 +3424,6 @@ static void __sched notrace __schedule(bool preempt)
                switch_count = &prev->nvcsw;
        }
 
-       if (task_on_rq_queued(prev))
-               update_rq_clock(rq);
-
        next = pick_next_task(rq, prev, &rf);
        clear_tsk_need_resched(prev);
        clear_preempt_need_resched();