Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / kernel / sched / deadline.c
index 255ce138b65232dabcc553c54340f45440ca2792..256e577faf1bf977ae00a97f742103a826714f93 100644 (file)
@@ -530,7 +530,7 @@ again:
        update_rq_clock(rq);
        dl_se->dl_throttled = 0;
        dl_se->dl_yielded = 0;
-       if (p->on_rq) {
+       if (task_on_rq_queued(p)) {
                enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
                if (task_has_dl_policy(rq->curr))
                        check_preempt_curr_dl(rq, p, 0);
@@ -997,10 +997,7 @@ static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
 #ifdef CONFIG_SCHED_HRTICK
 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
 {
-       s64 delta = p->dl.dl_runtime - p->dl.runtime;
-
-       if (delta > 10000)
-               hrtick_start(rq, p->dl.runtime);
+       hrtick_start(rq, p->dl.runtime);
 }
 #endif
 
@@ -1030,7 +1027,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
                 * means a stop task can slip in, in which case we need to
                 * re-start task selection.
                 */
-               if (rq->stop && rq->stop->on_rq)
+               if (rq->stop && task_on_rq_queued(rq->stop))
                        return RETRY_TASK;
        }
 
@@ -1124,10 +1121,8 @@ static void set_curr_task_dl(struct rq *rq)
 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
 {
        if (!task_running(rq, p) &&
-           (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
-           (p->nr_cpus_allowed > 1))
+           cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
                return 1;
-
        return 0;
 }
 
@@ -1158,7 +1153,7 @@ static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
 static int find_later_rq(struct task_struct *task)
 {
        struct sched_domain *sd;
-       struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
+       struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
        int this_cpu = smp_processor_id();
        int best_cpu, cpu = task_cpu(task);
 
@@ -1169,6 +1164,13 @@ static int find_later_rq(struct task_struct *task)
        if (task->nr_cpus_allowed == 1)
                return -1;
 
+       /*
+        * We have to consider system topology and task affinity
+        * first, then we can look for a suitable cpu.
+        */
+       cpumask_copy(later_mask, task_rq(task)->rd->span);
+       cpumask_and(later_mask, later_mask, cpu_active_mask);
+       cpumask_and(later_mask, later_mask, &task->cpus_allowed);
        best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
                        task, later_mask);
        if (best_cpu == -1)
@@ -1257,7 +1259,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
                        if (unlikely(task_rq(task) != rq ||
                                     !cpumask_test_cpu(later_rq->cpu,
                                                       &task->cpus_allowed) ||
-                                    task_running(rq, task) || !task->on_rq)) {
+                                    task_running(rq, task) ||
+                                    !task_on_rq_queued(task))) {
                                double_unlock_balance(rq, later_rq);
                                later_rq = NULL;
                                break;
@@ -1296,7 +1299,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
        BUG_ON(task_current(rq, p));
        BUG_ON(p->nr_cpus_allowed <= 1);
 
-       BUG_ON(!p->on_rq);
+       BUG_ON(!task_on_rq_queued(p));
        BUG_ON(!dl_task(p));
 
        return p;
@@ -1443,7 +1446,7 @@ static int pull_dl_task(struct rq *this_rq)
                     dl_time_before(p->dl.deadline,
                                    this_rq->dl.earliest_dl.curr))) {
                        WARN_ON(p == src_rq->curr);
-                       WARN_ON(!p->on_rq);
+                       WARN_ON(!task_on_rq_queued(p));
 
                        /*
                         * Then we pull iff p has actually an earlier
@@ -1569,6 +1572,8 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
        if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
                hrtimer_try_to_cancel(&p->dl.dl_timer);
 
+       __dl_clear_params(p);
+
 #ifdef CONFIG_SMP
        /*
         * Since this might be the only -deadline task on the rq,
@@ -1596,7 +1601,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
        if (unlikely(p->dl.dl_throttled))
                return;
 
-       if (p->on_rq && rq->curr != p) {
+       if (task_on_rq_queued(p) && rq->curr != p) {
 #ifdef CONFIG_SMP
                if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
                        /* Only reschedule if pushing failed */
@@ -1614,7 +1619,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
                            int oldprio)
 {
-       if (p->on_rq || rq->curr == p) {
+       if (task_on_rq_queued(p) || rq->curr == p) {
 #ifdef CONFIG_SMP
                /*
                 * This might be too much, but unfortunately