Merge tag 'sysctl-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof...
[linux-block.git] / kernel / sched / core.c
index b8d2ff09b853adbe3ab52bee0f62335cc7d77e89..696c6490bd5b7801996cf585b7a93a902dc8dd03 100644 (file)
 #include <linux/topology.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/cond_resched.h>
+#include <linux/sched/cputime.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/init.h>
 #include <linux/sched/isolation.h>
 #include <linux/sched/loadavg.h>
 #include <linux/sched/mm.h>
@@ -597,10 +600,10 @@ void double_rq_lock(struct rq *rq1, struct rq *rq2)
                swap(rq1, rq2);
 
        raw_spin_rq_lock(rq1);
-       if (__rq_lockp(rq1) == __rq_lockp(rq2))
-               return;
+       if (__rq_lockp(rq1) != __rq_lockp(rq2))
+               raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
 
-       raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
+       double_rq_clock_clear_update(rq1, rq2);
 }
 #endif
 
@@ -2184,7 +2187,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 {
        if (p->sched_class == rq->curr->sched_class)
                rq->curr->sched_class->check_preempt_curr(rq, p, flags);
-       else if (p->sched_class > rq->curr->sched_class)
+       else if (sched_class_above(p->sched_class, rq->curr->sched_class))
                resched_curr(rq);
 
        /*
@@ -2402,7 +2405,7 @@ static int migration_cpu_stop(void *data)
         * __migrate_task() such that we will not miss enforcing cpus_ptr
         * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
         */
-       flush_smp_call_function_from_idle();
+       flush_smp_call_function_queue();
 
        raw_spin_lock(&p->pi_lock);
        rq_lock(rq, &rf);
@@ -5729,7 +5732,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
         * higher scheduling class, because otherwise those lose the
         * opportunity to pull in more work from other CPUs.
         */
-       if (likely(prev->sched_class <= &fair_sched_class &&
+       if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
                   rq->nr_running == rq->cfs.h_nr_running)) {
 
                p = pick_next_task_fair(rq, prev, rf);
@@ -5792,6 +5795,8 @@ static inline struct task_struct *pick_task(struct rq *rq)
 
 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
 
+static void queue_core_balance(struct rq *rq);
+
 static struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
@@ -5841,7 +5846,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                }
 
                rq->core_pick = NULL;
-               return next;
+               goto out;
        }
 
        put_prev_task_balance(rq, prev, rf);
@@ -5891,7 +5896,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                         */
                        WARN_ON_ONCE(fi_before);
                        task_vruntime_update(rq, next, false);
-                       goto done;
+                       goto out_set_next;
                }
        }
 
@@ -6010,8 +6015,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                resched_curr(rq_i);
        }
 
-done:
+out_set_next:
        set_next_task(rq, next);
+out:
+       if (rq->core->core_forceidle_count && next == rq->idle)
+               queue_core_balance(rq);
+
        return next;
 }
 
@@ -6040,7 +6049,7 @@ static bool try_steal_cookie(int this, int that)
                if (p == src->core_pick || p == src->curr)
                        goto next;
 
-               if (!cpumask_test_cpu(this, &p->cpus_mask))
+               if (!is_cpu_allowed(p, this))
                        goto next;
 
                if (p->core_occupation > dst->idle->core_occupation)
@@ -6106,7 +6115,7 @@ static void sched_core_balance(struct rq *rq)
 
 static DEFINE_PER_CPU(struct callback_head, core_balance_head);
 
-void queue_core_balance(struct rq *rq)
+static void queue_core_balance(struct rq *rq)
 {
        if (!sched_core_enabled(rq))
                return;
@@ -6416,7 +6425,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
                migrate_disable_switch(rq, prev);
                psi_sched_switch(prev, next, !task_on_rq_queued(prev));
 
-               trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev_state, prev, next);
+               trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
 
                /* Also unlocks the rq: */
                rq = context_switch(rq, prev, next, &rf);
@@ -8449,6 +8458,18 @@ static void __init preempt_dynamic_init(void)
        }
 }
 
+#define PREEMPT_MODEL_ACCESSOR(mode) \
+       bool preempt_model_##mode(void)                                          \
+       {                                                                        \
+               WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
+               return preempt_dynamic_mode == preempt_dynamic_##mode;           \
+       }                                                                        \
+       EXPORT_SYMBOL_GPL(preempt_model_##mode)
+
+PREEMPT_MODEL_ACCESSOR(none);
+PREEMPT_MODEL_ACCESSOR(voluntary);
+PREEMPT_MODEL_ACCESSOR(full);
+
 #else /* !CONFIG_PREEMPT_DYNAMIC */
 
 static inline void preempt_dynamic_init(void) { }
@@ -9491,11 +9512,11 @@ void __init sched_init(void)
        int i;
 
        /* Make sure the linker didn't screw up */
-       BUG_ON(&idle_sched_class + 1 != &fair_sched_class ||
-              &fair_sched_class + 1 != &rt_sched_class ||
-              &rt_sched_class + 1   != &dl_sched_class);
+       BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
+              &fair_sched_class != &rt_sched_class + 1 ||
+              &rt_sched_class   != &dl_sched_class + 1);
 #ifdef CONFIG_SMP
-       BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
+       BUG_ON(&dl_sched_class != &stop_sched_class + 1);
 #endif
 
        wait_bit_init();