rcu/nocb: Remove SEGCBLIST_RCU_CORE
authorFrederic Weisbecker <frederic@kernel.org>
Thu, 30 May 2024 13:45:50 +0000 (15:45 +0200)
committerNeeraj Upadhyay <neeraj.upadhyay@kernel.org>
Mon, 29 Jul 2024 02:04:32 +0000 (07:34 +0530)
RCU core can't be running anymore while in the middle of (de-)offloading
since this sort of transition now only applies to offline CPUs.

The SEGCBLIST_RCU_CORE state can therefore be removed.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
include/linux/rcu_segcblist.h
kernel/rcu/rcu_segcblist.h
kernel/rcu/tree.c
kernel/rcu/tree_nocb.h

index ba95c06675e113dd639cecf6eaa9b63b005208b8..5469c54cd778594c11081a042f81d6620c677b46 100644 (file)
@@ -185,11 +185,10 @@ struct rcu_cblist {
  *  ----------------------------------------------------------------------------
  */
 #define SEGCBLIST_ENABLED      BIT(0)
-#define SEGCBLIST_RCU_CORE     BIT(1)
-#define SEGCBLIST_LOCKING      BIT(2)
-#define SEGCBLIST_KTHREAD_CB   BIT(3)
-#define SEGCBLIST_KTHREAD_GP   BIT(4)
-#define SEGCBLIST_OFFLOADED    BIT(5)
+#define SEGCBLIST_LOCKING      BIT(1)
+#define SEGCBLIST_KTHREAD_CB   BIT(2)
+#define SEGCBLIST_KTHREAD_GP   BIT(3)
+#define SEGCBLIST_OFFLOADED    BIT(4)
 
 struct rcu_segcblist {
        struct rcu_head *head;
index 4fe877f5f654097bfe97f59cbd720dda3c8e5004..7a0962dfee86031168c4e24f98113344eb5b3bb4 100644 (file)
@@ -95,15 +95,6 @@ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
        return false;
 }
 
-static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp)
-{
-       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
-           !rcu_segcblist_test_flags(rsclp, SEGCBLIST_RCU_CORE))
-               return true;
-
-       return false;
-}
-
 /*
  * Are all segments following the specified segment of the specified
  * rcu_segcblist structure empty of callbacks?  (The specified
index 1a272c678533edd8006cfe9a52765c9abfe27d81..82e831b969e41bba9b8eac1f9f9a538146279a80 100644 (file)
@@ -79,9 +79,6 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *);
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
        .gpwrap = true,
-#ifdef CONFIG_RCU_NOCB_CPU
-       .cblist.flags = SEGCBLIST_RCU_CORE,
-#endif
 };
 static struct rcu_state rcu_state = {
        .level = { &rcu_state.node[0] },
index af44e75eb0cd9ea2b2e3771505f56c5939ef3351..24daf606de0c163a164897d64d1a29450d13bbd6 100644 (file)
@@ -1060,7 +1060,6 @@ static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
        WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
        WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
 
-       rcu_segcblist_set_flags(cblist, SEGCBLIST_RCU_CORE);
        wake_gp = rdp_offload_toggle(rdp, false, flags);
 
        mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
@@ -1168,13 +1167,6 @@ static int rcu_nocb_rdp_offload(struct rcu_data *rdp)
        swait_event_exclusive(rdp->nocb_state_wq,
                              rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
 
-       /*
-        * All kthreads are ready to work, we can finally enable nocb bypass.
-        */
-       rcu_nocb_lock_irqsave(rdp, flags);
-       rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE);
-       rcu_nocb_unlock_irqrestore(rdp, flags);
-
        return 0;
 }
 
@@ -1350,7 +1342,6 @@ void __init rcu_init_nohz(void)
                        rcu_segcblist_init(&rdp->cblist);
                rcu_segcblist_offload(&rdp->cblist, true);
                rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
-               rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE);
        }
        rcu_organize_nocb_kthreads();
 }