Merge tag 'pull-work.iov_iter-rebased' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / include / linux / rcutree.h
CommitLineData
a9b7343e 1/* SPDX-License-Identifier: GPL-2.0+ */
64db4cff
PM
2/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 *
64db4cff
PM
5 * Copyright IBM Corporation, 2008
6 *
7 * Author: Dipankar Sarma <dipankar@in.ibm.com>
a9b7343e 8 * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm
64db4cff 9 *
a9b7343e 10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
64db4cff
PM
11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
12 *
13 * For detailed explanation of Read-Copy Update mechanism see -
a71fca58 14 * Documentation/RCU
64db4cff
PM
15 */
16
17#ifndef __LINUX_RCUTREE_H
18#define __LINUX_RCUTREE_H
19
d28139c4 20void rcu_softirq_qs(void);
bcbfdd01 21void rcu_note_context_switch(bool preempt);
29845399 22int rcu_needs_cpu(void);
584dc4ce 23void rcu_cpu_stall_reset(void);
64db4cff 24
29ce8310
GN
25/*
26 * Note a virtualization-based context switch. This is simply a
27 * wrapper around rcu_note_context_switch(), which allows TINY_RCU
46a5d164 28 * to save a few bytes. The caller must have disabled interrupts.
29ce8310
GN
29 */
30static inline void rcu_virt_note_context_switch(int cpu)
31{
bcbfdd01 32 rcu_note_context_switch(false);
29ce8310
GN
33}
34
584dc4ce 35void synchronize_rcu_expedited(void);
c408b215 36void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
486e2593 37
584dc4ce 38void rcu_barrier(void);
17672480 39bool rcu_eqs_special_set(int cpu);
366237e7 40void rcu_momentary_dyntick_idle(void);
a35d1690 41void kfree_rcu_scheduler_running(void);
6be7436d 42bool rcu_gp_might_be_stalled(void);
d96c52fe
PM
43unsigned long start_poll_synchronize_rcu_expedited(void);
44void cond_synchronize_rcu_expedited(unsigned long oldstate);
765a3f4f 45unsigned long get_state_synchronize_rcu(void);
7abb18bd
PM
46unsigned long start_poll_synchronize_rcu(void);
47bool poll_state_synchronize_rcu(unsigned long oldstate);
765a3f4f 48void cond_synchronize_rcu(unsigned long oldstate);
a57eb940 49
3fcd6a23 50bool rcu_is_idle_cpu(int cpu);
51952bc6 51
07325d4a
TG
52#ifdef CONFIG_PROVE_RCU
53void rcu_irq_exit_check_preempt(void);
54#else
55static inline void rcu_irq_exit_check_preempt(void) { }
56#endif
57
17211455
FW
58struct task_struct;
59void rcu_preempt_deferred_qs(struct task_struct *t);
60
584dc4ce 61void exit_rcu(void);
2439b696 62
584dc4ce 63void rcu_scheduler_starting(void);
e6339d3b 64extern int rcu_scheduler_active;
d2b1654f 65void rcu_end_inkernel_boot(void);
59ee0326 66bool rcu_inkernel_boot_has_ended(void);
584dc4ce 67bool rcu_is_watching(void);
01b1d88b 68#ifndef CONFIG_PREEMPTION
5cd37193 69void rcu_all_qs(void);
395a2f09 70#endif
5cd37193 71
4df83742
TG
72/* RCUtree hotplug events */
73int rcutree_prepare_cpu(unsigned int cpu);
74int rcutree_online_cpu(unsigned int cpu);
75int rcutree_offline_cpu(unsigned int cpu);
76int rcutree_dead_cpu(unsigned int cpu);
77int rcutree_dying_cpu(unsigned int cpu);
f64c6013 78void rcu_cpu_starting(unsigned int cpu);
4df83742 79
64db4cff 80#endif /* __LINUX_RCUTREE_H */