rcu/tree: Reduce wake up for synchronize_rcu() common case
authorJoel Fernandes (Google) <joel@joelfernandes.org>
Tue, 19 Mar 2024 18:54:57 +0000 (14:54 -0400)
committerPaul E. McKenney <paulmck@kernel.org>
Tue, 18 Jun 2024 16:59:40 +0000 (09:59 -0700)
In the synchronize_rcu() common case, we will have less than
SR_MAX_USERS_WAKE_FROM_GP number of users per GP. Waking up the kworker
is pointless just to free the last injected wait head since at that point,
all the users have already been awakened.

Introduce a new counter to track this and prevent the wakeup in the
common case.

[ paulmck: Remove atomic_dec_return_release in cannot-happen state. ]

Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c
kernel/rcu/tree.h

index 28c7031711a3fe9604c75272227bf1e6fb0979b8..ecd57940fc881c7880f1e292f06756c795df4336 100644 (file)
@@ -96,6 +96,7 @@ static struct rcu_state rcu_state = {
        .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
        .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
                rcu_sr_normal_gp_cleanup_work),
+       .srs_cleanups_pending = ATOMIC_INIT(0),
 };
 
 /* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -1660,6 +1661,9 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
 
                rcu_sr_put_wait_head(rcu);
        }
+
+       /* Order list manipulations with atomic access. */
+       atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
 }
 
 /*
@@ -1667,7 +1671,7 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
  */
 static void rcu_sr_normal_gp_cleanup(void)
 {
-       struct llist_node *wait_tail, *next, *rcu;
+       struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
        int done = 0;
 
        wait_tail = rcu_state.srs_wait_tail;
@@ -1693,16 +1697,34 @@ static void rcu_sr_normal_gp_cleanup(void)
                        break;
        }
 
-       // concurrent sr_normal_gp_cleanup work might observe this update.
-       smp_store_release(&rcu_state.srs_done_tail, wait_tail);
+       /*
+        * Fast path, no more users to process except putting the second last
+        * wait head if no inflight-workers. If there are in-flight workers,
+        * they will remove the last wait head.
+        *
+        * Note that the ACQUIRE orders atomic access with list manipulation.
+        */
+       if (wait_tail->next && wait_tail->next->next == NULL &&
+           rcu_sr_is_wait_head(wait_tail->next) &&
+           !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
+               rcu_sr_put_wait_head(wait_tail->next);
+               wait_tail->next = NULL;
+       }
+
+       /* Concurrent sr_normal_gp_cleanup work might observe this update. */
        ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
+       smp_store_release(&rcu_state.srs_done_tail, wait_tail);
 
        /*
         * We schedule a work in order to perform a final processing
         * of outstanding users(if still left) and releasing wait-heads
         * added by rcu_sr_normal_gp_init() call.
         */
-       queue_work(sync_wq, &rcu_state.srs_cleanup_work);
+       if (wait_tail->next) {
+               atomic_inc(&rcu_state.srs_cleanups_pending);
+               if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
+                       atomic_dec(&rcu_state.srs_cleanups_pending);
+       }
 }
 
 /*
index bae7925c497feecc5116b46b23ac8194e77a66d4..affcb92a358c364a2c8fafd64db791a8a2a02c97 100644 (file)
@@ -420,6 +420,7 @@ struct rcu_state {
        struct llist_node *srs_done_tail; /* ready for GP users. */
        struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX];
        struct work_struct srs_cleanup_work;
+       atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
 };
 
 /* Values for rcu_state structure's gp_flags field. */