sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
authorKirill Tkhai <ktkhai@parallels.com>
Fri, 12 Sep 2014 11:03:34 +0000 (15:03 +0400)
committerIngo Molnar <mingo@kernel.org>
Fri, 19 Sep 2014 10:35:21 +0000 (12:35 +0200)
If a task is queued but not running on it rq, we can simply migrate
it without migration thread and switching of context.

Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1410519814.3569.7.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c

index 5536397a0309cf69f60782d4303844662716c618..4b1ddebed54a5a9336bf1e6a879aeabec7cd95a8 100644 (file)
@@ -4629,6 +4629,33 @@ void init_idle(struct task_struct *idle, int cpu)
 }
 
 #ifdef CONFIG_SMP
+/*
+ * move_queued_task - move a queued task to new rq.
+ *
+ * Returns (locked) new rq. Old rq's lock is released.
+ */
+static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
+{
+       struct rq *rq = task_rq(p);
+
+       lockdep_assert_held(&rq->lock);
+
+       dequeue_task(rq, p, 0);
+       p->on_rq = TASK_ON_RQ_MIGRATING;
+       set_task_cpu(p, new_cpu);
+       raw_spin_unlock(&rq->lock);
+
+       rq = cpu_rq(new_cpu);
+
+       raw_spin_lock(&rq->lock);
+       BUG_ON(task_cpu(p) != new_cpu);
+       p->on_rq = TASK_ON_RQ_QUEUED;
+       enqueue_task(rq, p, 0);
+       check_preempt_curr(rq, p, 0);
+
+       return rq;
+}
+
 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
        if (p->sched_class && p->sched_class->set_cpus_allowed)
@@ -4685,14 +4712,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
                goto out;
 
        dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-       if (task_on_rq_queued(p) || p->state == TASK_WAKING) {
+       if (task_running(rq, p) || p->state == TASK_WAKING) {
                struct migration_arg arg = { p, dest_cpu };
                /* Need help from migration thread: drop lock and wait. */
                task_rq_unlock(rq, p, &flags);
                stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
                tlb_migrate_finish(p->mm);
                return 0;
-       }
+       } else if (task_on_rq_queued(p))
+               rq = move_queued_task(p, dest_cpu);
 out:
        task_rq_unlock(rq, p, &flags);
 
@@ -4735,19 +4763,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
         * If we're not on a rq, the next wake-up will ensure we're
         * placed properly.
         */
-       if (task_on_rq_queued(p)) {
-               dequeue_task(rq, p, 0);
-               p->on_rq = TASK_ON_RQ_MIGRATING;
-               set_task_cpu(p, dest_cpu);
-               raw_spin_unlock(&rq->lock);
-
-               rq = cpu_rq(dest_cpu);
-               raw_spin_lock(&rq->lock);
-               BUG_ON(task_rq(p) != rq);
-               p->on_rq = TASK_ON_RQ_QUEUED;
-               enqueue_task(rq, p, 0);
-               check_preempt_curr(rq, p, 0);
-       }
+       if (task_on_rq_queued(p))
+               rq = move_queued_task(p, dest_cpu);
 done:
        ret = 1;
 fail: