sched/fair: Rename check_preempt_curr() to wakeup_preempt()
authorIngo Molnar <mingo@kernel.org>
Tue, 19 Sep 2023 08:38:21 +0000 (10:38 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 Sep 2023 08:40:10 +0000 (10:40 +0200)
The name is a bit opaque - make it clear that this is about wakeup
preemption.

Also rename the ->check_preempt_curr() methods similarly.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stop_task.c

index 5a50c4e41be9ffb6d54c8ad5b76b045b15d7f980..52ceb85b64211e318698360385bce30c316eaf3d 100644 (file)
@@ -2211,10 +2211,10 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                p->sched_class->prio_changed(rq, p, oldprio);
 }
 
-void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
 {
        if (p->sched_class == rq->curr->sched_class)
-               rq->curr->sched_class->check_preempt_curr(rq, p, flags);
+               rq->curr->sched_class->wakeup_preempt(rq, p, flags);
        else if (sched_class_above(p->sched_class, rq->curr->sched_class))
                resched_curr(rq);
 
@@ -2508,7 +2508,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
        rq_lock(rq, rf);
        WARN_ON_ONCE(task_cpu(p) != new_cpu);
        activate_task(rq, p, 0);
-       check_preempt_curr(rq, p, 0);
+       wakeup_preempt(rq, p, 0);
 
        return rq;
 }
@@ -3390,7 +3390,7 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
                deactivate_task(src_rq, p, 0);
                set_task_cpu(p, cpu);
                activate_task(dst_rq, p, 0);
-               check_preempt_curr(dst_rq, p, 0);
+               wakeup_preempt(dst_rq, p, 0);
 
                rq_unpin_lock(dst_rq, &drf);
                rq_unpin_lock(src_rq, &srf);
@@ -3764,7 +3764,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
        }
 
        activate_task(rq, p, en_flags);
-       check_preempt_curr(rq, p, wake_flags);
+       wakeup_preempt(rq, p, wake_flags);
 
        ttwu_do_wakeup(p);
 
@@ -3835,7 +3835,7 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
                         * it should preempt the task that is current now.
                         */
                        update_rq_clock(rq);
-                       check_preempt_curr(rq, p, wake_flags);
+                       wakeup_preempt(rq, p, wake_flags);
                }
                ttwu_do_wakeup(p);
                ret = 1;
@@ -4854,7 +4854,7 @@ void wake_up_new_task(struct task_struct *p)
 
        activate_task(rq, p, ENQUEUE_NOCLOCK);
        trace_sched_wakeup_new(p);
-       check_preempt_curr(rq, p, WF_FORK);
+       wakeup_preempt(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_woken) {
                /*
index 58b542bf2893436185aed5f3c67b9d0ebef96eb2..fb1996a674db5073d28577e48194a2fce7f3b81e 100644 (file)
@@ -763,7 +763,7 @@ static inline void deadline_queue_pull_task(struct rq *rq)
 
 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
-static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
+static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
 
 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
                                            struct rq *rq)
@@ -1175,7 +1175,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 
        enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
        if (dl_task(rq->curr))
-               check_preempt_curr_dl(rq, p, 0);
+               wakeup_preempt_dl(rq, p, 0);
        else
                resched_curr(rq);
 
@@ -1939,7 +1939,7 @@ static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
  * Only called when both the current and waking task are -deadline
  * tasks.
  */
-static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
+static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
                                  int flags)
 {
        if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
@@ -2652,7 +2652,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
                        deadline_queue_push_tasks(rq);
 #endif
                if (dl_task(rq->curr))
-                       check_preempt_curr_dl(rq, p, 0);
+                       wakeup_preempt_dl(rq, p, 0);
                else
                        resched_curr(rq);
        } else {
@@ -2721,7 +2721,7 @@ DEFINE_SCHED_CLASS(dl) = {
        .dequeue_task           = dequeue_task_dl,
        .yield_task             = yield_task_dl,
 
-       .check_preempt_curr     = check_preempt_curr_dl,
+       .wakeup_preempt         = wakeup_preempt_dl,
 
        .pick_next_task         = pick_next_task_dl,
        .put_prev_task          = put_prev_task_dl,
index aeaf31e32c6769c7b65268980519ce6c6390be90..fcf0c5bc8b478d6cba5703323118ac03a7efe6b2 100644 (file)
@@ -8007,7 +8007,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
 
        /*
         * This is possible from callers such as attach_tasks(), in which we
-        * unconditionally check_preempt_curr() after an enqueue (which may have
+        * unconditionally wakeup_preempt() after an enqueue (which may have
         * lead to a throttle).  This both saves work and prevents false
         * next-buddy nomination below.
         */
@@ -8914,7 +8914,7 @@ static void attach_task(struct rq *rq, struct task_struct *p)
 
        WARN_ON_ONCE(task_rq(p) != rq);
        activate_task(rq, p, ENQUEUE_NOCLOCK);
-       check_preempt_curr(rq, p, 0);
+       wakeup_preempt(rq, p, 0);
 }
 
 /*
@@ -12369,7 +12369,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
                if (p->prio > oldprio)
                        resched_curr(rq);
        } else
-               check_preempt_curr(rq, p, 0);
+               wakeup_preempt(rq, p, 0);
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -12471,7 +12471,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
                if (task_current(rq, p))
                        resched_curr(rq);
                else
-                       check_preempt_curr(rq, p, 0);
+                       wakeup_preempt(rq, p, 0);
        }
 }
 
@@ -12830,7 +12830,7 @@ DEFINE_SCHED_CLASS(fair) = {
        .yield_task             = yield_task_fair,
        .yield_to_task          = yield_to_task_fair,
 
-       .check_preempt_curr     = check_preempt_wakeup_fair,
+       .wakeup_preempt         = check_preempt_wakeup_fair,
 
        .pick_next_task         = __pick_next_task_fair,
        .put_prev_task          = put_prev_task_fair,
index 342f58a329f528fcc6060208cfba9b7e908d3a98..26f714003c1fad674da7a7c582c108d9345e4e28 100644 (file)
@@ -400,7 +400,7 @@ balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 /*
  * Idle tasks are unconditionally rescheduled:
  */
-static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
+static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
 {
        resched_curr(rq);
 }
@@ -481,7 +481,7 @@ DEFINE_SCHED_CLASS(idle) = {
        /* dequeue is not valid, we print a debug message there: */
        .dequeue_task           = dequeue_task_idle,
 
-       .check_preempt_curr     = check_preempt_curr_idle,
+       .wakeup_preempt         = wakeup_preempt_idle,
 
        .pick_next_task         = pick_next_task_idle,
        .put_prev_task          = put_prev_task_idle,
index 0597ba0f85ff30870b164b8e27d2af1da56cf67d..3e442fa3f6bcedef0f8270614755711a0506edc3 100644 (file)
@@ -953,7 +953,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 
                                /*
                                 * When we're idle and a woken (rt) task is
-                                * throttled check_preempt_curr() will set
+                                * throttled wakeup_preempt() will set
                                 * skip_update and the time between the wakeup
                                 * and this unthrottle will get accounted as
                                 * 'runtime'.
@@ -1715,7 +1715,7 @@ static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
 /*
  * Preempt the current task with a newly woken task if needed:
  */
-static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
+static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
 {
        if (p->prio < rq->curr->prio) {
                resched_curr(rq);
@@ -2702,7 +2702,7 @@ DEFINE_SCHED_CLASS(rt) = {
        .dequeue_task           = dequeue_task_rt,
        .yield_task             = yield_task_rt,
 
-       .check_preempt_curr     = check_preempt_curr_rt,
+       .wakeup_preempt         = wakeup_preempt_rt,
 
        .pick_next_task         = pick_next_task_rt,
        .put_prev_task          = put_prev_task_rt,
index 5f217b1e8f1cd03161b1488f71ccf7e86ed49686..7e070dcf707470d0d732429e947a2398e9c5ddcf 100644 (file)
@@ -2236,7 +2236,7 @@ struct sched_class {
        void (*yield_task)   (struct rq *rq);
        bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
 
-       void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
+       void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
 
        struct task_struct *(*pick_next_task)(struct rq *rq);
 
@@ -2510,7 +2510,7 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
 
-extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
+extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
 
 #ifdef CONFIG_PREEMPT_RT
 #define SCHED_NR_MIGRATE_BREAK 8
index 85590599b4d60545b7774222971f8f7baa678dac..6cf7304e6449d8074fef5419864930812466042a 100644 (file)
@@ -23,7 +23,7 @@ balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 #endif /* CONFIG_SMP */
 
 static void
-check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
+wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags)
 {
        /* we're never preempted */
 }
@@ -120,7 +120,7 @@ DEFINE_SCHED_CLASS(stop) = {
        .dequeue_task           = dequeue_task_stop,
        .yield_task             = yield_task_stop,
 
-       .check_preempt_curr     = check_preempt_curr_stop,
+       .wakeup_preempt         = wakeup_preempt_stop,
 
        .pick_next_task         = pick_next_task_stop,
        .put_prev_task          = put_prev_task_stop,