Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[linux-2.6-block.git] / kernel / rcu / sync.c
CommitLineData
96b903f5 1// SPDX-License-Identifier: GPL-2.0+
cc44ca84
ON
2/*
3 * RCU-based infrastructure for lightweight reader-writer locking
4 *
cc44ca84
ON
5 * Copyright (c) 2015, Red Hat, Inc.
6 *
7 * Author: Oleg Nesterov <oleg@redhat.com>
8 */
9
10#include <linux/rcu_sync.h>
11#include <linux/sched.h>
12
3a518b76
ON
13#ifdef CONFIG_PROVE_RCU
14#define __INIT_HELD(func) .held = func,
15#else
16#define __INIT_HELD(func)
17#endif
18
82e8c565
ON
19static const struct {
20 void (*sync)(void);
21 void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
07899a6e 22 void (*wait)(void);
3a518b76
ON
23#ifdef CONFIG_PROVE_RCU
24 int (*held)(void);
25#endif
82e8c565
ON
26} gp_ops[] = {
27 [RCU_SYNC] = {
28 .sync = synchronize_rcu,
29 .call = call_rcu,
07899a6e 30 .wait = rcu_barrier,
3a518b76 31 __INIT_HELD(rcu_read_lock_held)
82e8c565
ON
32 },
33 [RCU_SCHED_SYNC] = {
d3ff3891
PM
34 .sync = synchronize_rcu,
35 .call = call_rcu,
36 .wait = rcu_barrier,
3a518b76 37 __INIT_HELD(rcu_read_lock_sched_held)
82e8c565
ON
38 },
39 [RCU_BH_SYNC] = {
d3ff3891
PM
40 .sync = synchronize_rcu,
41 .call = call_rcu,
42 .wait = rcu_barrier,
3a518b76 43 __INIT_HELD(rcu_read_lock_bh_held)
82e8c565
ON
44 },
45};
46
cc44ca84
ON
47enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
48enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
49
50#define rss_lock gp_wait.lock
51
3a518b76 52#ifdef CONFIG_PROVE_RCU
4bace734 53void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
3a518b76 54{
4bace734
ON
55 RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
56 "suspicious rcu_sync_is_idle() usage");
3a518b76 57}
80127a39
PZ
58
59EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
3a518b76
ON
60#endif
61
cc44ca84
ON
62/**
63 * rcu_sync_init() - Initialize an rcu_sync structure
64 * @rsp: Pointer to rcu_sync structure to be initialized
65 * @type: Flavor of RCU with which to synchronize rcu_sync structure
66 */
67void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
68{
69 memset(rsp, 0, sizeof(*rsp));
70 init_waitqueue_head(&rsp->gp_wait);
82e8c565 71 rsp->gp_type = type;
cc44ca84
ON
72}
73
3942a9bd 74/**
27fdb35f
PM
75 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
76 * @rsp: Pointer to rcu_sync structure to use for synchronization
77 *
3942a9bd
PZ
78 * Must be called after rcu_sync_init() and before first use.
79 *
80 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
81 * pairs turn into NO-OPs.
82 */
83void rcu_sync_enter_start(struct rcu_sync *rsp)
84{
85 rsp->gp_count++;
86 rsp->gp_state = GP_PASSED;
87}
88
cc44ca84
ON
89/**
90 * rcu_sync_enter() - Force readers onto slowpath
91 * @rsp: Pointer to rcu_sync structure to use for synchronization
92 *
93 * This function is used by updaters who need readers to make use of
94 * a slowpath during the update. After this function returns, all
95 * subsequent calls to rcu_sync_is_idle() will return false, which
96 * tells readers to stay off their fastpaths. A later call to
97 * rcu_sync_exit() re-enables reader slowpaths.
98 *
99 * When called in isolation, rcu_sync_enter() must wait for a grace
100 * period, however, closely spaced calls to rcu_sync_enter() can
101 * optimize away the grace-period wait via a state machine implemented
102 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
103 */
104void rcu_sync_enter(struct rcu_sync *rsp)
105{
106 bool need_wait, need_sync;
107
108 spin_lock_irq(&rsp->rss_lock);
109 need_wait = rsp->gp_count++;
110 need_sync = rsp->gp_state == GP_IDLE;
111 if (need_sync)
112 rsp->gp_state = GP_PENDING;
113 spin_unlock_irq(&rsp->rss_lock);
114
042d4c70 115 WARN_ON_ONCE(need_wait && need_sync);
cc44ca84 116 if (need_sync) {
82e8c565 117 gp_ops[rsp->gp_type].sync();
cc44ca84
ON
118 rsp->gp_state = GP_PASSED;
119 wake_up_all(&rsp->gp_wait);
120 } else if (need_wait) {
121 wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
122 } else {
123 /*
124 * Possible when there's a pending CB from a rcu_sync_exit().
125 * Nobody has yet been allowed the 'fast' path and thus we can
126 * avoid doing any sync(). The callback will get 'dropped'.
127 */
042d4c70 128 WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
cc44ca84
ON
129 }
130}
131
132/**
133 * rcu_sync_func() - Callback function managing reader access to fastpath
27fdb35f 134 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
cc44ca84
ON
135 *
136 * This function is passed to one of the call_rcu() functions by
137 * rcu_sync_exit(), so that it is invoked after a grace period following the
138 * that invocation of rcu_sync_exit(). It takes action based on events that
139 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
140 * and rcu_sync_exit() pairs need not wait for a grace period.
141 *
142 * If another rcu_sync_enter() is invoked before the grace period
143 * ended, reset state to allow the next rcu_sync_exit() to let the
144 * readers back onto their fastpaths (after a grace period). If both
145 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
146 * before the grace period ended, re-invoke call_rcu() on behalf of that
147 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
148 * can again use their fastpaths.
149 */
27fdb35f 150static void rcu_sync_func(struct rcu_head *rhp)
cc44ca84 151{
27fdb35f 152 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
cc44ca84
ON
153 unsigned long flags;
154
042d4c70
PM
155 WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
156 WARN_ON_ONCE(rsp->cb_state == CB_IDLE);
cc44ca84
ON
157
158 spin_lock_irqsave(&rsp->rss_lock, flags);
159 if (rsp->gp_count) {
160 /*
161 * A new rcu_sync_begin() has happened; drop the callback.
162 */
163 rsp->cb_state = CB_IDLE;
164 } else if (rsp->cb_state == CB_REPLAY) {
165 /*
166 * A new rcu_sync_exit() has happened; requeue the callback
167 * to catch a later GP.
168 */
169 rsp->cb_state = CB_PENDING;
82e8c565 170 gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
cc44ca84
ON
171 } else {
172 /*
173 * We're at least a GP after rcu_sync_exit(); eveybody will now
174 * have observed the write side critical section. Let 'em rip!.
175 */
176 rsp->cb_state = CB_IDLE;
177 rsp->gp_state = GP_IDLE;
178 }
179 spin_unlock_irqrestore(&rsp->rss_lock, flags);
180}
181
182/**
183 * rcu_sync_exit() - Allow readers back onto fast patch after grace period
184 * @rsp: Pointer to rcu_sync structure to use for synchronization
185 *
186 * This function is used by updaters who have completed, and can therefore
187 * now allow readers to make use of their fastpaths after a grace period
188 * has elapsed. After this grace period has completed, all subsequent
189 * calls to rcu_sync_is_idle() will return true, which tells readers that
190 * they can once again use their fastpaths.
191 */
192void rcu_sync_exit(struct rcu_sync *rsp)
193{
194 spin_lock_irq(&rsp->rss_lock);
195 if (!--rsp->gp_count) {
196 if (rsp->cb_state == CB_IDLE) {
197 rsp->cb_state = CB_PENDING;
82e8c565 198 gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
cc44ca84
ON
199 } else if (rsp->cb_state == CB_PENDING) {
200 rsp->cb_state = CB_REPLAY;
201 }
202 }
203 spin_unlock_irq(&rsp->rss_lock);
204}
07899a6e
ON
205
206/**
207 * rcu_sync_dtor() - Clean up an rcu_sync structure
208 * @rsp: Pointer to rcu_sync structure to be cleaned up
209 */
210void rcu_sync_dtor(struct rcu_sync *rsp)
211{
212 int cb_state;
213
042d4c70 214 WARN_ON_ONCE(rsp->gp_count);
07899a6e
ON
215
216 spin_lock_irq(&rsp->rss_lock);
217 if (rsp->cb_state == CB_REPLAY)
218 rsp->cb_state = CB_PENDING;
219 cb_state = rsp->cb_state;
220 spin_unlock_irq(&rsp->rss_lock);
221
222 if (cb_state != CB_IDLE) {
223 gp_ops[rsp->gp_type].wait();
042d4c70 224 WARN_ON_ONCE(rsp->cb_state != CB_IDLE);
07899a6e
ON
225 }
226}