sched: Fix task affinity for select_task_rq_fair
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 11 Sep 2009 10:45:38 +0000 (12:45 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 15 Sep 2009 14:01:07 +0000 (16:01 +0200)
While merging select_task_rq_fair() and sched_balance_self() I made
a mistake that leads to testing the wrong task affinty.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index 43dc6d1d9e88588bb0fa1ad43f8494e77f6eeab5..8b3eddbcf9a48909192700647f9d78949ef247e2 100644 (file)
@@ -1318,7 +1318,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  */
 static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
 {
-       struct task_struct *t = current;
        struct sched_domain *tmp, *sd = NULL;
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
@@ -1393,13 +1392,13 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
                        continue;
                }
 
-               group = find_idlest_group(sd, t, cpu);
+               group = find_idlest_group(sd, p, cpu);
                if (!group) {
                        sd = sd->child;
                        continue;
                }
 
-               new_cpu = find_idlest_cpu(group, t, cpu);
+               new_cpu = find_idlest_cpu(group, p, cpu);
                if (new_cpu == -1 || new_cpu == cpu) {
                        /* Now try balancing at a lower domain level of cpu */
                        sd = sd->child;