bcachefs: Fix rcu_pending for PREEMPT_RT
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 8 Jun 2025 16:17:02 +0000 (12:17 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Thu, 12 Jun 2025 03:21:30 +0000 (23:21 -0400)
PREEMPT_RT redefines how standard spinlocks work, so local_irq_save() +
spin_lock() is no longer equivalent to spin_lock_irqsave(). Fortunately,
we don't strictly need to do it that way.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/rcu_pending.c

index bef2aa1b8bcdd8c9222ab03d1a92e3f934a5c023..b1438be9d690888bcb185973219864371203fe21 100644 (file)
@@ -182,11 +182,6 @@ static inline void kfree_bulk(size_t nr, void ** p)
        while (nr--)
                kfree(*p);
 }
-
-#define local_irq_save(flags)          \
-do {                                   \
-       flags = 0;                      \
-} while (0)
 #endif
 
 static noinline void __process_finished_items(struct rcu_pending *pending,
@@ -429,9 +424,15 @@ __rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *head,
 
        BUG_ON((ptr != NULL) != (pending->process == RCU_PENDING_KVFREE_FN));
 
-       local_irq_save(flags);
-       p = this_cpu_ptr(pending->p);
-       spin_lock(&p->lock);
+       /* We could technically be scheduled before taking the lock and end up
+        * using a different cpu's rcu_pending_pcpu: that's ok, it needs a lock
+        * anyways
+        *
+        * And we have to do it this way to avoid breaking PREEMPT_RT, which
+        * redefines how spinlocks work:
+        */
+       p = raw_cpu_ptr(pending->p);
+       spin_lock_irqsave(&p->lock, flags);
        rcu_gp_poll_state_t seq = __get_state_synchronize_rcu(pending->srcu);
 restart:
        if (may_sleep &&
@@ -520,9 +521,8 @@ check_expired:
                goto free_node;
        }
 
-       local_irq_save(flags);
-       p = this_cpu_ptr(pending->p);
-       spin_lock(&p->lock);
+       p = raw_cpu_ptr(pending->p);
+       spin_lock_irqsave(&p->lock, flags);
        goto restart;
 }