2 * Read-Copy Update mechanism for mutual exclusion, realtime implementation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * With thanks to Esben Nielsen, Bill Huey, and Ingo Molnar
22 * for pushing me away from locks and towards counters, and
23 * to Suparna Bhattacharya for pushing me completely away
24 * from atomic instructions on the read side.
26 * Papers: http://www.rdrop.com/users/paulmck/RCU
28 * Design Document: http://lwn.net/Articles/253651/
30 * For detailed explanation of Read-Copy Update mechanism see -
31 * Documentation/RCU/ *.txt
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/init.h>
37 #include <linux/spinlock.h>
38 #include <linux/smp.h>
39 #include <linux/rcupdate.h>
40 #include <linux/interrupt.h>
41 #include <linux/sched.h>
42 #include <asm/atomic.h>
43 #include <linux/bitops.h>
44 #include <linux/module.h>
45 #include <linux/completion.h>
46 #include <linux/moduleparam.h>
47 #include <linux/percpu.h>
48 #include <linux/notifier.h>
49 #include <linux/rcupdate.h>
50 #include <linux/cpu.h>
51 #include <linux/random.h>
52 #include <linux/delay.h>
53 #include <linux/byteorder/swabb.h>
54 #include <linux/cpumask.h>
55 #include <linux/rcupreempt_trace.h>
58 * Macro that prevents the compiler from reordering accesses, but does
59 * absolutely -nothing- to prevent CPUs from reordering. This is used
60 * only to mediate communication between mainline code and hardware
61 * interrupt and NMI handlers.
63 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66 * PREEMPT_RCU data structures.
70 * GP_STAGES specifies the number of times the state machine has
71 * to go through the all the rcu_try_flip_states (see below)
72 * in a single Grace Period.
74 * GP in GP_STAGES stands for Grace Period ;)
78 spinlock_t lock; /* Protect rcu_data fields. */
79 long completed; /* Number of last completed batch. */
81 struct tasklet_struct rcu_tasklet;
82 struct rcu_head *nextlist;
83 struct rcu_head **nexttail;
84 struct rcu_head *waitlist[GP_STAGES];
85 struct rcu_head **waittail[GP_STAGES];
86 struct rcu_head *donelist;
87 struct rcu_head **donetail;
89 #ifdef CONFIG_RCU_TRACE
90 struct rcupreempt_trace trace;
91 #endif /* #ifdef CONFIG_RCU_TRACE */
95 * States for rcu_try_flip() and friends.
98 enum rcu_try_flip_states {
101 * Stay here if nothing is happening. Flip the counter if somthing
102 * starts happening. Denoted by "I"
104 rcu_try_flip_idle_state,
107 * Wait here for all CPUs to notice that the counter has flipped. This
108 * prevents the old set of counters from ever being incremented once
109 * we leave this state, which in turn is necessary because we cannot
110 * test any individual counter for zero -- we can only check the sum.
113 rcu_try_flip_waitack_state,
116 * Wait here for the sum of the old per-CPU counters to reach zero.
119 rcu_try_flip_waitzero_state,
122 * Wait here for each of the other CPUs to execute a memory barrier.
123 * This is necessary to ensure that these other CPUs really have
124 * completed executing their RCU read-side critical sections, despite
125 * their CPUs wildly reordering memory. Denoted by "M".
127 rcu_try_flip_waitmb_state,
131 spinlock_t fliplock; /* Protect state-machine transitions. */
132 long completed; /* Number of last completed batch. */
133 enum rcu_try_flip_states rcu_try_flip_state; /* The current state of
134 the rcu state machine */
137 static DEFINE_PER_CPU(struct rcu_data, rcu_data);
138 static struct rcu_ctrlblk rcu_ctrlblk = {
139 .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
141 .rcu_try_flip_state = rcu_try_flip_idle_state,
145 #ifdef CONFIG_RCU_TRACE
146 static char *rcu_try_flip_state_names[] =
147 { "idle", "waitack", "waitzero", "waitmb" };
148 #endif /* #ifdef CONFIG_RCU_TRACE */
151 * Enum and per-CPU flag to determine when each CPU has seen
152 * the most recent counter flip.
155 enum rcu_flip_flag_values {
156 rcu_flip_seen, /* Steady/initial state, last flip seen. */
157 /* Only GP detector can update. */
158 rcu_flipped /* Flip just completed, need confirmation. */
159 /* Only corresponding CPU can update. */
161 static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_flip_flag_values, rcu_flip_flag)
165 * Enum and per-CPU flag to determine when each CPU has executed the
166 * needed memory barrier to fence in memory references from its last RCU
167 * read-side critical section in the just-completed grace period.
170 enum rcu_mb_flag_values {
171 rcu_mb_done, /* Steady/initial state, no mb()s required. */
172 /* Only GP detector can update. */
173 rcu_mb_needed /* Flip just completed, need an mb(). */
174 /* Only corresponding CPU can update. */
176 static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag)
180 * RCU_DATA_ME: find the current CPU's rcu_data structure.
181 * RCU_DATA_CPU: find the specified CPU's rcu_data structure.
183 #define RCU_DATA_ME() (&__get_cpu_var(rcu_data))
184 #define RCU_DATA_CPU(cpu) (&per_cpu(rcu_data, cpu))
187 * Helper macro for tracing when the appropriate rcu_data is not
188 * cached in a local variable, but where the CPU number is so cached.
190 #define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace));
193 * Helper macro for tracing when the appropriate rcu_data is not
194 * cached in a local variable.
196 #define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace));
199 * Helper macro for tracing when the appropriate rcu_data is pointed
200 * to by a local variable.
202 #define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace));
205 * Return the number of RCU batches processed thus far. Useful
206 * for debug and statistics.
208 long rcu_batches_completed(void)
210 return rcu_ctrlblk.completed;
212 EXPORT_SYMBOL_GPL(rcu_batches_completed);
214 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
216 void __rcu_read_lock(void)
219 struct task_struct *t = current;
222 nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
225 /* An earlier rcu_read_lock() covers us, just count it. */
227 t->rcu_read_lock_nesting = nesting + 1;
233 * We disable interrupts for the following reasons:
234 * - If we get scheduling clock interrupt here, and we
235 * end up acking the counter flip, it's like a promise
236 * that we will never increment the old counter again.
237 * Thus we will break that promise if that
238 * scheduling clock interrupt happens between the time
239 * we pick the .completed field and the time that we
240 * increment our counter.
242 * - We don't want to be preempted out here.
244 * NMIs can still occur, of course, and might themselves
245 * contain rcu_read_lock().
248 local_irq_save(flags);
251 * Outermost nesting of rcu_read_lock(), so increment
252 * the current counter for the current CPU. Use volatile
253 * casts to prevent the compiler from reordering.
256 idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1;
257 ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])++;
260 * Now that the per-CPU counter has been incremented, we
261 * are protected from races with rcu_read_lock() invoked
262 * from NMI handlers on this CPU. We can therefore safely
263 * increment the nesting counter, relieving further NMIs
264 * of the need to increment the per-CPU counter.
267 ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting + 1;
270 * Now that we have preventing any NMIs from storing
271 * to the ->rcu_flipctr_idx, we can safely use it to
272 * remember which counter to decrement in the matching
276 ACCESS_ONCE(t->rcu_flipctr_idx) = idx;
277 local_irq_restore(flags);
280 EXPORT_SYMBOL_GPL(__rcu_read_lock);
282 void __rcu_read_unlock(void)
285 struct task_struct *t = current;
288 nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
292 * We are still protected by the enclosing rcu_read_lock(),
293 * so simply decrement the counter.
296 t->rcu_read_lock_nesting = nesting - 1;
302 * Disable local interrupts to prevent the grace-period
303 * detection state machine from seeing us half-done.
304 * NMIs can still occur, of course, and might themselves
305 * contain rcu_read_lock() and rcu_read_unlock().
308 local_irq_save(flags);
311 * Outermost nesting of rcu_read_unlock(), so we must
312 * decrement the current counter for the current CPU.
313 * This must be done carefully, because NMIs can
314 * occur at any point in this code, and any rcu_read_lock()
315 * and rcu_read_unlock() pairs in the NMI handlers
316 * must interact non-destructively with this code.
317 * Lots of volatile casts, and -very- careful ordering.
319 * Changes to this code, including this one, must be
320 * inspected, validated, and tested extremely carefully!!!
324 * First, pick up the index.
327 idx = ACCESS_ONCE(t->rcu_flipctr_idx);
330 * Now that we have fetched the counter index, it is
331 * safe to decrement the per-task RCU nesting counter.
332 * After this, any interrupts or NMIs will increment and
333 * decrement the per-CPU counters.
335 ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting - 1;
338 * It is now safe to decrement this task's nesting count.
339 * NMIs that occur after this statement will route their
340 * rcu_read_lock() calls through this "else" clause, and
341 * will thus start incrementing the per-CPU counter on
342 * their own. They will also clobber ->rcu_flipctr_idx,
343 * but that is OK, since we have already fetched it.
346 ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])--;
347 local_irq_restore(flags);
350 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
353 * If a global counter flip has occurred since the last time that we
354 * advanced callbacks, advance them. Hardware interrupts must be
355 * disabled when calling this function.
357 static void __rcu_advance_callbacks(struct rcu_data *rdp)
363 if (rdp->completed != rcu_ctrlblk.completed) {
364 if (rdp->waitlist[GP_STAGES - 1] != NULL) {
365 *rdp->donetail = rdp->waitlist[GP_STAGES - 1];
366 rdp->donetail = rdp->waittail[GP_STAGES - 1];
367 RCU_TRACE_RDP(rcupreempt_trace_move2done, rdp);
369 for (i = GP_STAGES - 2; i >= 0; i--) {
370 if (rdp->waitlist[i] != NULL) {
371 rdp->waitlist[i + 1] = rdp->waitlist[i];
372 rdp->waittail[i + 1] = rdp->waittail[i];
375 rdp->waitlist[i + 1] = NULL;
376 rdp->waittail[i + 1] =
377 &rdp->waitlist[i + 1];
380 if (rdp->nextlist != NULL) {
381 rdp->waitlist[0] = rdp->nextlist;
382 rdp->waittail[0] = rdp->nexttail;
384 rdp->nextlist = NULL;
385 rdp->nexttail = &rdp->nextlist;
386 RCU_TRACE_RDP(rcupreempt_trace_move2wait, rdp);
388 rdp->waitlist[0] = NULL;
389 rdp->waittail[0] = &rdp->waitlist[0];
391 rdp->waitlistcount = wlc;
392 rdp->completed = rcu_ctrlblk.completed;
396 * Check to see if this CPU needs to report that it has seen
397 * the most recent counter flip, thereby declaring that all
398 * subsequent rcu_read_lock() invocations will respect this flip.
401 cpu = raw_smp_processor_id();
402 if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
403 smp_mb(); /* Subsequent counter accesses must see new value */
404 per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
405 smp_mb(); /* Subsequent RCU read-side critical sections */
406 /* seen -after- acknowledgement. */
411 * Get here when RCU is idle. Decide whether we need to
412 * move out of idle state, and return non-zero if so.
413 * "Straightforward" approach for the moment, might later
414 * use callback-list lengths, grace-period duration, or
415 * some such to determine when to exit idle state.
416 * Might also need a pre-idle test that does not acquire
417 * the lock, but let's get the simple case working first...
421 rcu_try_flip_idle(void)
425 RCU_TRACE_ME(rcupreempt_trace_try_flip_i1);
426 if (!rcu_pending(smp_processor_id())) {
427 RCU_TRACE_ME(rcupreempt_trace_try_flip_ie1);
435 RCU_TRACE_ME(rcupreempt_trace_try_flip_g1);
436 rcu_ctrlblk.completed++; /* stands in for rcu_try_flip_g2 */
439 * Need a memory barrier so that other CPUs see the new
440 * counter value before they see the subsequent change of all
441 * the rcu_flip_flag instances to rcu_flipped.
444 smp_mb(); /* see above block comment. */
446 /* Now ask each CPU for acknowledgement of the flip. */
448 for_each_possible_cpu(cpu)
449 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
455 * Wait for CPUs to acknowledge the flip.
459 rcu_try_flip_waitack(void)
463 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
464 for_each_possible_cpu(cpu)
465 if (per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
466 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
471 * Make sure our checks above don't bleed into subsequent
472 * waiting for the sum of the counters to reach zero.
475 smp_mb(); /* see above block comment. */
476 RCU_TRACE_ME(rcupreempt_trace_try_flip_a2);
481 * Wait for collective ``last'' counter to reach zero,
482 * then tell all CPUs to do an end-of-grace-period memory barrier.
486 rcu_try_flip_waitzero(void)
489 int lastidx = !(rcu_ctrlblk.completed & 0x1);
492 /* Check to see if the sum of the "last" counters is zero. */
494 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
495 for_each_possible_cpu(cpu)
496 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
498 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
503 * This ensures that the other CPUs see the call for
504 * memory barriers -after- the sum to zero has been
507 smp_mb(); /* ^^^^^^^^^^^^ */
509 /* Call for a memory barrier from each CPU. */
510 for_each_possible_cpu(cpu)
511 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
513 RCU_TRACE_ME(rcupreempt_trace_try_flip_z2);
518 * Wait for all CPUs to do their end-of-grace-period memory barrier.
519 * Return 0 once all CPUs have done so.
523 rcu_try_flip_waitmb(void)
527 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
528 for_each_possible_cpu(cpu)
529 if (per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
530 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
534 smp_mb(); /* Ensure that the above checks precede any following flip. */
535 RCU_TRACE_ME(rcupreempt_trace_try_flip_m2);
540 * Attempt a single flip of the counters. Remember, a single flip does
541 * -not- constitute a grace period. Instead, the interval between
542 * at least GP_STAGES consecutive flips is a grace period.
544 * If anyone is nuts enough to run this CONFIG_PREEMPT_RCU implementation
545 * on a large SMP, they might want to use a hierarchical organization of
546 * the per-CPU-counter pairs.
548 static void rcu_try_flip(void)
552 RCU_TRACE_ME(rcupreempt_trace_try_flip_1);
553 if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) {
554 RCU_TRACE_ME(rcupreempt_trace_try_flip_e1);
559 * Take the next transition(s) through the RCU grace-period
560 * flip-counter state machine.
563 switch (rcu_ctrlblk.rcu_try_flip_state) {
564 case rcu_try_flip_idle_state:
565 if (rcu_try_flip_idle())
566 rcu_ctrlblk.rcu_try_flip_state =
567 rcu_try_flip_waitack_state;
569 case rcu_try_flip_waitack_state:
570 if (rcu_try_flip_waitack())
571 rcu_ctrlblk.rcu_try_flip_state =
572 rcu_try_flip_waitzero_state;
574 case rcu_try_flip_waitzero_state:
575 if (rcu_try_flip_waitzero())
576 rcu_ctrlblk.rcu_try_flip_state =
577 rcu_try_flip_waitmb_state;
579 case rcu_try_flip_waitmb_state:
580 if (rcu_try_flip_waitmb())
581 rcu_ctrlblk.rcu_try_flip_state =
582 rcu_try_flip_idle_state;
584 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
588 * Check to see if this CPU needs to do a memory barrier in order to
589 * ensure that any prior RCU read-side critical sections have committed
590 * their counter manipulations and critical-section memory references
591 * before declaring the grace period to be completed.
593 static void rcu_check_mb(int cpu)
595 if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) {
596 smp_mb(); /* Ensure RCU read-side accesses are visible. */
597 per_cpu(rcu_mb_flag, cpu) = rcu_mb_done;
601 void rcu_check_callbacks(int cpu, int user)
604 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
607 if (rcu_ctrlblk.completed == rdp->completed)
609 spin_lock_irqsave(&rdp->lock, flags);
610 RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
611 __rcu_advance_callbacks(rdp);
612 if (rdp->donelist == NULL) {
613 spin_unlock_irqrestore(&rdp->lock, flags);
615 spin_unlock_irqrestore(&rdp->lock, flags);
616 raise_softirq(RCU_SOFTIRQ);
621 * Needed by dynticks, to make sure all RCU processing has finished
624 void rcu_advance_callbacks(int cpu, int user)
627 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
629 if (rcu_ctrlblk.completed == rdp->completed) {
631 if (rcu_ctrlblk.completed == rdp->completed)
634 spin_lock_irqsave(&rdp->lock, flags);
635 RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
636 __rcu_advance_callbacks(rdp);
637 spin_unlock_irqrestore(&rdp->lock, flags);
640 static void rcu_process_callbacks(struct softirq_action *unused)
643 struct rcu_head *next, *list;
644 struct rcu_data *rdp = RCU_DATA_ME();
646 spin_lock_irqsave(&rdp->lock, flags);
647 list = rdp->donelist;
649 spin_unlock_irqrestore(&rdp->lock, flags);
652 rdp->donelist = NULL;
653 rdp->donetail = &rdp->donelist;
654 RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp);
655 spin_unlock_irqrestore(&rdp->lock, flags);
660 RCU_TRACE_ME(rcupreempt_trace_invoke);
664 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
667 struct rcu_data *rdp;
671 local_irq_save(flags);
673 spin_lock(&rdp->lock);
674 __rcu_advance_callbacks(rdp);
675 *rdp->nexttail = head;
676 rdp->nexttail = &head->next;
677 RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp);
678 spin_unlock(&rdp->lock);
679 local_irq_restore(flags);
681 EXPORT_SYMBOL_GPL(call_rcu);
684 * Wait until all currently running preempt_disable() code segments
685 * (including hardware-irq-disable segments) complete. Note that
686 * in -rt this does -not- necessarily result in all currently executing
687 * interrupt -handlers- having completed.
689 void __synchronize_sched(void)
694 if (sched_getaffinity(0, &oldmask) < 0)
695 oldmask = cpu_possible_map;
696 for_each_online_cpu(cpu) {
697 sched_setaffinity(0, cpumask_of_cpu(cpu));
700 sched_setaffinity(0, oldmask);
702 EXPORT_SYMBOL_GPL(__synchronize_sched);
705 * Check to see if any future RCU-related work will need to be done
706 * by the current CPU, even if none need be done immediately, returning
707 * 1 if so. Assumes that notifiers would take care of handling any
708 * outstanding requests from the RCU core.
710 * This function is part of the RCU implementation; it is -not-
711 * an exported member of the RCU API.
713 int rcu_needs_cpu(int cpu)
715 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
717 return (rdp->donelist != NULL ||
718 !!rdp->waitlistcount ||
719 rdp->nextlist != NULL);
722 int rcu_pending(int cpu)
724 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
726 /* The CPU has at least one callback queued somewhere. */
728 if (rdp->donelist != NULL ||
729 !!rdp->waitlistcount ||
730 rdp->nextlist != NULL)
733 /* The RCU core needs an acknowledgement from this CPU. */
735 if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) ||
736 (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed))
739 /* This CPU has fallen behind the global grace-period number. */
741 if (rdp->completed != rcu_ctrlblk.completed)
744 /* Nothing needed from this CPU. */
749 void __init __rcu_init(void)
753 struct rcu_data *rdp;
755 printk(KERN_NOTICE "Preemptible RCU implementation.\n");
756 for_each_possible_cpu(cpu) {
757 rdp = RCU_DATA_CPU(cpu);
758 spin_lock_init(&rdp->lock);
760 rdp->waitlistcount = 0;
761 rdp->nextlist = NULL;
762 rdp->nexttail = &rdp->nextlist;
763 for (i = 0; i < GP_STAGES; i++) {
764 rdp->waitlist[i] = NULL;
765 rdp->waittail[i] = &rdp->waitlist[i];
767 rdp->donelist = NULL;
768 rdp->donetail = &rdp->donelist;
769 rdp->rcu_flipctr[0] = 0;
770 rdp->rcu_flipctr[1] = 0;
772 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL);
776 * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
778 void synchronize_kernel(void)
783 #ifdef CONFIG_RCU_TRACE
784 long *rcupreempt_flipctr(int cpu)
786 return &RCU_DATA_CPU(cpu)->rcu_flipctr[0];
788 EXPORT_SYMBOL_GPL(rcupreempt_flipctr);
790 int rcupreempt_flip_flag(int cpu)
792 return per_cpu(rcu_flip_flag, cpu);
794 EXPORT_SYMBOL_GPL(rcupreempt_flip_flag);
796 int rcupreempt_mb_flag(int cpu)
798 return per_cpu(rcu_mb_flag, cpu);
800 EXPORT_SYMBOL_GPL(rcupreempt_mb_flag);
802 char *rcupreempt_try_flip_state_name(void)
804 return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state];
806 EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name);
808 struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu)
810 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
814 EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu);
816 #endif /* #ifdef RCU_TRACE */