rcu: Support direct wake-up of synchronize_rcu() users
authorUladzislau Rezki (Sony) <urezki@gmail.com>
Fri, 8 Mar 2024 17:34:07 +0000 (18:34 +0100)
committerUladzislau Rezki (Sony) <urezki@gmail.com>
Mon, 15 Apr 2024 17:47:51 +0000 (19:47 +0200)
This patch introduces a small enhancement which allows to do a
direct wake-up of synchronize_rcu() callers. It occurs after a
completion of grace period, thus by the gp-kthread.

Number of clients is limited by the hard-coded maximum allowed
threshold. The remaining part, if still exists is deferred to
a main worker.

Link: https://lore.kernel.org/lkml/Zd0ZtNu+Rt0qXkfS@lothringen/
Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
kernel/rcu/tree.c
kernel/rcu/tree.h

index 2e1c5be6d64b11ad14484e5504c60e85e5083546..2a270abade4df40c5b47471a60e2de217677eb9b 100644 (file)
@@ -1645,7 +1645,8 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
  */
 static void rcu_sr_normal_gp_cleanup(void)
 {
-       struct llist_node *wait_tail;
+       struct llist_node *wait_tail, *next, *rcu;
+       int done = 0;
 
        wait_tail = rcu_state.srs_wait_tail;
        if (wait_tail == NULL)
@@ -1653,11 +1654,32 @@ static void rcu_sr_normal_gp_cleanup(void)
 
        rcu_state.srs_wait_tail = NULL;
        ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
+       WARN_ON_ONCE(!rcu_sr_is_wait_head(wait_tail));
+
+       /*
+        * Process (a) and (d) cases. See an illustration.
+        */
+       llist_for_each_safe(rcu, next, wait_tail->next) {
+               if (rcu_sr_is_wait_head(rcu))
+                       break;
+
+               rcu_sr_normal_complete(rcu);
+               // It can be last, update a next on this step.
+               wait_tail->next = next;
+
+               if (++done == SR_MAX_USERS_WAKE_FROM_GP)
+                       break;
+       }
 
        // concurrent sr_normal_gp_cleanup work might observe this update.
        smp_store_release(&rcu_state.srs_done_tail, wait_tail);
        ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
 
+       /*
+        * We schedule a work in order to perform a final processing
+        * of outstanding users(if still left) and releasing wait-heads
+        * added by rcu_sr_normal_gp_init() call.
+        */
        schedule_work(&rcu_state.srs_cleanup_work);
 }
 
index b942b94374382ed8e23a34863cc1e7689640446f..2832787cee1d0e942346e32bad905067f933580b 100644 (file)
@@ -315,6 +315,12 @@ do {                                                                       \
        __set_current_state(TASK_RUNNING);                              \
 } while (0)
 
+/*
+ * A max threshold for synchronize_rcu() users which are
+ * awaken directly by the rcu_gp_kthread(). Left part is
+ * deferred to the main worker.
+ */
+#define SR_MAX_USERS_WAKE_FROM_GP 5
 #define SR_NORMAL_GP_WAIT_HEAD_MAX 5
 
 struct sr_wait_node {