sched: Prepare for Core-wide rq->lock
authorPeter Zijlstra <peterz@infradead.org>
Tue, 2 Mar 2021 11:16:48 +0000 (12:16 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 12 May 2021 09:43:26 +0000 (11:43 +0200)
When switching on core-sched, CPUs need to agree which lock to use for
their RQ.

The new rule will be that rq->core_enabled will be toggled while
holding all rq->__locks that belong to a core. This means we need to
double check the rq->core_enabled value after each lock acquire and
retry if it changed.

This also has implications for those sites that take multiple RQ
locks, they need to be careful that the second lock doesn't end up
being the first lock.

Verify the lock pointer after acquiring the first lock, because if
they're on the same core, holding any of the rq->__lock instances will
pin the core state.

While there, change the rq->__lock order to CPU number, instead of rq
address, this greatly simplifies the next patch.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Don Hiatt <dhiatt@digitalocean.com>
Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/YJUNY0dmrJMD/BIm@hirez.programming.kicks-ass.net
kernel/sched/core.c
kernel/sched/sched.h

index 5e6f5f5750a3266ceb531af1fc26e35f64b47723..8bd2f12810e30ffa7d65d0a29efded120521857a 100644 (file)
@@ -186,12 +186,37 @@ int sysctl_sched_rt_runtime = 950000;
 
 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
 {
-       raw_spin_lock_nested(rq_lockp(rq), subclass);
+       raw_spinlock_t *lock;
+
+       if (sched_core_disabled()) {
+               raw_spin_lock_nested(&rq->__lock, subclass);
+               return;
+       }
+
+       for (;;) {
+               lock = rq_lockp(rq);
+               raw_spin_lock_nested(lock, subclass);
+               if (likely(lock == rq_lockp(rq)))
+                       return;
+               raw_spin_unlock(lock);
+       }
 }
 
 bool raw_spin_rq_trylock(struct rq *rq)
 {
-       return raw_spin_trylock(rq_lockp(rq));
+       raw_spinlock_t *lock;
+       bool ret;
+
+       if (sched_core_disabled())
+               return raw_spin_trylock(&rq->__lock);
+
+       for (;;) {
+               lock = rq_lockp(rq);
+               ret = raw_spin_trylock(lock);
+               if (!ret || (likely(lock == rq_lockp(rq))))
+                       return ret;
+               raw_spin_unlock(lock);
+       }
 }
 
 void raw_spin_rq_unlock(struct rq *rq)
@@ -199,6 +224,25 @@ void raw_spin_rq_unlock(struct rq *rq)
        raw_spin_unlock(rq_lockp(rq));
 }
 
+#ifdef CONFIG_SMP
+/*
+ * double_rq_lock - safely lock two runqueues
+ */
+void double_rq_lock(struct rq *rq1, struct rq *rq2)
+{
+       lockdep_assert_irqs_disabled();
+
+       if (rq_order_less(rq2, rq1))
+               swap(rq1, rq2);
+
+       raw_spin_rq_lock(rq1);
+       if (rq_lockp(rq1) == rq_lockp(rq2))
+               return;
+
+       raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
+}
+#endif
+
 /*
  * __task_rq_lock - lock the rq @p resides on.
  */
index dbabf282c039acad5a39c339eb8c3f34d85399cf..f8bd5c8fc90aae0006e7abafee075452a2dfd961 100644 (file)
@@ -1113,6 +1113,11 @@ static inline bool is_migration_disabled(struct task_struct *p)
 #endif
 }
 
+static inline bool sched_core_disabled(void)
+{
+       return true;
+}
+
 static inline raw_spinlock_t *rq_lockp(struct rq *rq)
 {
        return &rq->__lock;
@@ -2231,10 +2236,17 @@ unsigned long arch_scale_freq_capacity(int cpu)
 }
 #endif
 
+
 #ifdef CONFIG_SMP
-#ifdef CONFIG_PREEMPTION
 
-static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
+static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
+{
+       return rq1->cpu < rq2->cpu;
+}
+
+extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
+#ifdef CONFIG_PREEMPTION
 
 /*
  * fair double_lock_balance: Safely acquires both rq->locks in a fair
@@ -2274,14 +2286,13 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
        if (likely(raw_spin_rq_trylock(busiest)))
                return 0;
 
-       if (rq_lockp(busiest) >= rq_lockp(this_rq)) {
+       if (rq_order_less(this_rq, busiest)) {
                raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING);
                return 0;
        }
 
        raw_spin_rq_unlock(this_rq);
-       raw_spin_rq_lock(busiest);
-       raw_spin_rq_lock_nested(this_rq, SINGLE_DEPTH_NESTING);
+       double_rq_lock(this_rq, busiest);
 
        return 1;
 }
@@ -2333,31 +2344,6 @@ static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
        raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
 }
 
-/*
- * double_rq_lock - safely lock two runqueues
- *
- * Note this does not disable interrupts like task_rq_lock,
- * you need to do so manually before calling.
- */
-static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
-       __acquires(rq1->lock)
-       __acquires(rq2->lock)
-{
-       BUG_ON(!irqs_disabled());
-       if (rq_lockp(rq1) == rq_lockp(rq2)) {
-               raw_spin_rq_lock(rq1);
-               __acquire(rq2->lock);   /* Fake it out ;) */
-       } else {
-               if (rq_lockp(rq1) < rq_lockp(rq2)) {
-                       raw_spin_rq_lock(rq1);
-                       raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
-               } else {
-                       raw_spin_rq_lock(rq2);
-                       raw_spin_rq_lock_nested(rq1, SINGLE_DEPTH_NESTING);
-               }
-       }
-}
-
 /*
  * double_rq_unlock - safely unlock two runqueues
  *
@@ -2368,11 +2354,11 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
        __releases(rq1->lock)
        __releases(rq2->lock)
 {
-       raw_spin_rq_unlock(rq1);
        if (rq_lockp(rq1) != rq_lockp(rq2))
                raw_spin_rq_unlock(rq2);
        else
                __release(rq2->lock);
+       raw_spin_rq_unlock(rq1);
 }
 
 extern void set_rq_online (struct rq *rq);