sched/deadline: Remove dl_new from struct sched_dl_entity
authorLuca Abeni <luca.abeni@unitn.it>
Mon, 7 Mar 2016 11:27:04 +0000 (12:27 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 8 Mar 2016 11:24:55 +0000 (12:24 +0100)
The dl_new field of struct sched_dl_entity is currently used to
identify new deadline tasks, so that their deadline and runtime
can be properly initialised.

However, these tasks can be easily identified by checking if
their deadline is smaller than the current time when they switch
to SCHED_DEADLINE. So, dl_new can be removed by introducing this
check in switched_to_dl(); this allows to simplify the
SCHED_DEADLINE code.

Signed-off-by: Luca Abeni <luca.abeni@unitn.it>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1457350024-7825-2-git-send-email-luca.abeni@unitn.it
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/sched.h
kernel/sched/core.c
kernel/sched/deadline.c

index 9519b66fc21ff1396df36cffa16c40bcfc12e8d9..838a89a78332ae92c877c865d446ac80d8761e7f 100644 (file)
@@ -1333,10 +1333,6 @@ struct sched_dl_entity {
         * task has to wait for a replenishment to be performed at the
         * next firing of dl_timer.
         *
-        * @dl_new tells if a new instance arrived. If so we must
-        * start executing it with full runtime and reset its absolute
-        * deadline;
-        *
         * @dl_boosted tells if we are boosted due to DI. If so we are
         * outside bandwidth enforcement mechanism (but only until we
         * exit the critical section);
@@ -1344,7 +1340,7 @@ struct sched_dl_entity {
         * @dl_yielded tells if task gave up the cpu before consuming
         * all its available runtime during the last job.
         */
-       int dl_throttled, dl_new, dl_boosted, dl_yielded;
+       int dl_throttled, dl_boosted, dl_yielded;
 
        /*
         * Bandwidth enforcement timer. Each -deadline task has its
index 423452cae5cfa143b322cee70dbb68e10677da59..249e37dc02c862fc98dd2fab55d14135404f0f16 100644 (file)
@@ -2051,7 +2051,6 @@ void __dl_clear_params(struct task_struct *p)
        dl_se->dl_bw = 0;
 
        dl_se->dl_throttled = 0;
-       dl_se->dl_new = 1;
        dl_se->dl_yielded = 0;
 }
 
index 15abf04bf0b8903bdab6da5fa40eb7e0bd0dd094..c7a036facbe1d311e24a56d470288ecf8d43d0d3 100644 (file)
@@ -352,7 +352,15 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
        struct rq *rq = rq_of_dl_rq(dl_rq);
 
-       WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
+       WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
+
+       /*
+        * We are racing with the deadline timer. So, do nothing because
+        * the deadline timer handler will take care of properly recharging
+        * the runtime and postponing the deadline
+        */
+       if (dl_se->dl_throttled)
+               return;
 
        /*
         * We use the regular wall clock time to set deadlines in the
@@ -361,7 +369,6 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
         */
        dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
        dl_se->runtime = pi_se->dl_runtime;
-       dl_se->dl_new = 0;
 }
 
 /*
@@ -503,15 +510,6 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
        struct rq *rq = rq_of_dl_rq(dl_rq);
 
-       /*
-        * The arrival of a new instance needs special treatment, i.e.,
-        * the actual scheduling parameters have to be "renewed".
-        */
-       if (dl_se->dl_new) {
-               setup_new_dl_entity(dl_se, pi_se);
-               return;
-       }
-
        if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
            dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
@@ -607,16 +605,6 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
                goto unlock;
        }
 
-       /*
-        * This is possible if switched_from_dl() raced against a running
-        * callback that took the above !dl_task() path and we've since then
-        * switched back into SCHED_DEADLINE.
-        *
-        * There's nothing to do except drop our task reference.
-        */
-       if (dl_se->dl_new)
-               goto unlock;
-
        /*
         * The task might have been boosted by someone else and might be in the
         * boosting/deboosting path, its not throttled.
@@ -925,7 +913,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
         * parameters of the task might need updating. Otherwise,
         * we want a replenishment of its runtime.
         */
-       if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
+       if (flags & ENQUEUE_WAKEUP)
                update_dl_entity(dl_se, pi_se);
        else if (flags & ENQUEUE_REPLENISH)
                replenish_dl_entity(dl_se, pi_se);
@@ -1726,6 +1714,9 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
  */
 static void switched_to_dl(struct rq *rq, struct task_struct *p)
 {
+       if (dl_time_before(p->dl.deadline, rq_clock(rq)))
+               setup_new_dl_entity(&p->dl, &p->dl);
+
        if (task_on_rq_queued(p) && rq->curr != p) {
 #ifdef CONFIG_SMP
                if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)