rcu/nocb: Use separate flag to indicate offloaded ->cblist
authorPaul E. McKenney <paulmck@linux.ibm.com>
Fri, 12 Apr 2019 22:58:34 +0000 (15:58 -0700)
committerPaul E. McKenney <paulmck@linux.ibm.com>
Tue, 13 Aug 2019 21:35:49 +0000 (14:35 -0700)
RCU callback processing currently uses rcu_is_nocb_cpu() to determine
whether or not the current CPU's callbacks are to be offloaded.
This works, but it is not so good for cache locality.  Plus use of
->cblist for offloaded callbacks will greatly increase the frequency
of these checks.  This commit therefore adds a ->offloaded flag to the
rcu_segcblist structure to provide a more flexible and cache-friendly
means of checking for callback offloading.

Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
include/linux/rcu_segcblist.h
kernel/rcu/rcu_segcblist.c
kernel/rcu/rcu_segcblist.h
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h

index ed2cfd3c074337d363fca652106424df7157cd40..8b684888f71d19344bd8e073c1be66c8b5fc5dc7 100644 (file)
@@ -71,6 +71,7 @@ struct rcu_segcblist {
        long len;
        long len_lazy;
        u8 enabled;
+       u8 offloaded;
 };
 
 #define RCU_SEGCBLIST_INITIALIZER(n) \
index b305dcac34c991028208eeabf273d3a933e048b8..700779f4c0cb3231f82c8bdc89f153e463958474 100644 (file)
@@ -73,6 +73,18 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
        rsclp->enabled = 0;
 }
 
+/*
+ * Mark the specified rcu_segcblist structure as offloaded.  This
+ * structure must be empty.
+ */
+void rcu_segcblist_offload(struct rcu_segcblist *rsclp)
+{
+       WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
+       WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
+       WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp));
+       rsclp->offloaded = 1;
+}
+
 /*
  * Does the specified rcu_segcblist structure contain callbacks that
  * are ready to be invoked?
index b2de7b32da29e6d38e3ae10236cfa784256ca0a4..8f3783391075100caf02b7e6ffcd0184f2cfdb76 100644 (file)
@@ -66,6 +66,12 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
        return rsclp->enabled;
 }
 
+/* Is the specified rcu_segcblist offloaded?  */
+static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
+{
+       return rsclp->offloaded;
+}
+
 /*
  * Are all segments following the specified segment of the specified
  * rcu_segcblist structure empty of callbacks?  (The specified
@@ -78,6 +84,7 @@ static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
 
 void rcu_segcblist_init(struct rcu_segcblist *rsclp);
 void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
+void rcu_segcblist_offload(struct rcu_segcblist *rsclp);
 bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
 bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
 struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
index a14e5fbbea467ecd6cbadd390f5382bd320b07f3..6f5c96c4f9a3239df87f99c2fe7b0274cf09bcda 100644 (file)
@@ -2858,10 +2858,11 @@ void rcu_barrier(void)
         * corresponding CPU's preceding callbacks have been invoked.
         */
        for_each_possible_cpu(cpu) {
-               if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
-                       continue;
                rdp = per_cpu_ptr(&rcu_data, cpu);
-               if (rcu_is_nocb_cpu(cpu)) {
+               if (!cpu_online(cpu) &&
+                   !rcu_segcblist_is_offloaded(&rdp->cblist))
+                       continue;
+               if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
                        if (!rcu_nocb_cpu_needs_barrier(cpu)) {
                                rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
                                                   rcu_state.barrier_sequence);
@@ -3155,7 +3156,8 @@ void rcutree_migrate_callbacks(int cpu)
        struct rcu_node *rnp_root = rcu_get_root();
        bool needwake;
 
-       if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
+       if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
+           rcu_segcblist_empty(&rdp->cblist))
                return;  /* No callbacks to migrate. */
 
        local_irq_save(flags);
index b8a43cf9bb4e22af2106de80d7e5713dce26f83a..fc6133eed50a14e8de9d45ec3b86da948f9bf474 100644 (file)
@@ -1382,7 +1382,7 @@ static void rcu_prepare_for_idle(void)
        int tne;
 
        lockdep_assert_irqs_disabled();
-       if (rcu_is_nocb_cpu(smp_processor_id()))
+       if (rcu_segcblist_is_offloaded(&rdp->cblist))
                return;
 
        /* Handle nohz enablement switches conservatively. */
@@ -1431,8 +1431,10 @@ static void rcu_prepare_for_idle(void)
  */
 static void rcu_cleanup_after_idle(void)
 {
+       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+
        lockdep_assert_irqs_disabled();
-       if (rcu_is_nocb_cpu(smp_processor_id()))
+       if (rcu_segcblist_is_offloaded(&rdp->cblist))
                return;
        if (rcu_try_advance_all_cbs())
                invoke_rcu_core();
@@ -1694,7 +1696,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
                            bool lazy, unsigned long flags)
 {
 
-       if (!rcu_is_nocb_cpu(rdp->cpu))
+       if (!rcu_segcblist_is_offloaded(&rdp->cblist))
                return false;
        __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
        if (__is_kfree_rcu_offset((unsigned long)rhp->func))
@@ -1729,7 +1731,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
                                                     unsigned long flags)
 {
        lockdep_assert_irqs_disabled();
-       if (!rcu_is_nocb_cpu(smp_processor_id()))
+       if (!rcu_segcblist_is_offloaded(&my_rdp->cblist))
                return false; /* Not NOCBs CPU, caller must migrate CBs. */
        __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist),
                                rcu_segcblist_tail(&rdp->cblist),
@@ -2192,6 +2194,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
        }
        rcu_segcblist_init(&rdp->cblist);
        rcu_segcblist_disable(&rdp->cblist);
+       rcu_segcblist_offload(&rdp->cblist);
        return true;
 }