More helpers
[linux-block.git] / kernel / wait.c
... / ...
CommitLineData
1/*
2 * Generic waiting primitives.
3 *
4 * (C) 2004 William Irwin, Oracle
5 */
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/wait.h>
11#include <linux/hash.h>
12
13void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
14{
15 spin_lock_init(&q->lock);
16 lockdep_set_class(&q->lock, key);
17 INIT_LIST_HEAD(&q->task_list);
18}
19
20EXPORT_SYMBOL(__init_waitqueue_head);
21
22void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
23{
24 unsigned long flags;
25
26 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
27 spin_lock_irqsave(&q->lock, flags);
28 __add_wait_queue(q, wait);
29 spin_unlock_irqrestore(&q->lock, flags);
30}
31EXPORT_SYMBOL(add_wait_queue);
32
33void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
34{
35 unsigned long flags;
36
37 wait->flags |= WQ_FLAG_EXCLUSIVE;
38 spin_lock_irqsave(&q->lock, flags);
39 __add_wait_queue_tail(q, wait);
40 spin_unlock_irqrestore(&q->lock, flags);
41}
42EXPORT_SYMBOL(add_wait_queue_exclusive);
43
44void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
45{
46 unsigned long flags;
47
48 spin_lock_irqsave(&q->lock, flags);
49 __remove_wait_queue(q, wait);
50 spin_unlock_irqrestore(&q->lock, flags);
51}
52EXPORT_SYMBOL(remove_wait_queue);
53
54
55/*
56 * Note: we use "set_current_state()" _after_ the wait-queue add,
57 * because we need a memory barrier there on SMP, so that any
58 * wake-function that tests for the wait-queue being active
59 * will be guaranteed to see waitqueue addition _or_ subsequent
60 * tests in this thread will see the wakeup having taken place.
61 *
62 * The spin_unlock() itself is semi-permeable and only protects
63 * one way (it only protects stuff inside the critical region and
64 * stops them from bleeding out - it would still allow subsequent
65 * loads to move into the critical region).
66 */
67void
68prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
69{
70 unsigned long flags;
71
72 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
73 spin_lock_irqsave(&q->lock, flags);
74 if (list_empty(&wait->task_list))
75 __add_wait_queue(q, wait);
76 if (is_sync_wait(wait))
77 set_current_state(state);
78 spin_unlock_irqrestore(&q->lock, flags);
79}
80EXPORT_SYMBOL(prepare_to_wait);
81
82void
83prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
84{
85 unsigned long flags;
86
87 wait->flags |= WQ_FLAG_EXCLUSIVE;
88 spin_lock_irqsave(&q->lock, flags);
89 if (list_empty(&wait->task_list))
90 __add_wait_queue_tail(q, wait);
91 if (is_sync_wait(wait))
92 set_current_state(state);
93 spin_unlock_irqrestore(&q->lock, flags);
94}
95EXPORT_SYMBOL(prepare_to_wait_exclusive);
96
97/*
98 * finish_wait - clean up after waiting in a queue
99 * @q: waitqueue waited on
100 * @wait: wait descriptor
101 *
102 * Sets current thread back to running state and removes
103 * the wait descriptor from the given waitqueue if still
104 * queued.
105 */
106void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
107{
108 unsigned long flags;
109
110 __set_current_state(TASK_RUNNING);
111 /*
112 * We can check for list emptiness outside the lock
113 * IFF:
114 * - we use the "careful" check that verifies both
115 * the next and prev pointers, so that there cannot
116 * be any half-pending updates in progress on other
117 * CPU's that we haven't seen yet (and that might
118 * still change the stack area.
119 * and
120 * - all other users take the lock (ie we can only
121 * have _one_ other CPU that looks at or modifies
122 * the list).
123 */
124 if (!list_empty_careful(&wait->task_list)) {
125 spin_lock_irqsave(&q->lock, flags);
126 list_del_init(&wait->task_list);
127 spin_unlock_irqrestore(&q->lock, flags);
128 }
129}
130EXPORT_SYMBOL(finish_wait);
131
132/*
133 * abort_exclusive_wait - abort exclusive waiting in a queue
134 * @q: waitqueue waited on
135 * @wait: wait descriptor
136 * @state: runstate of the waiter to be woken
137 * @key: key to identify a wait bit queue or %NULL
138 *
139 * Sets current thread back to running state and removes
140 * the wait descriptor from the given waitqueue if still
141 * queued.
142 *
143 * Wakes up the next waiter if the caller is concurrently
144 * woken up through the queue.
145 *
146 * This prevents waiter starvation where an exclusive waiter
147 * aborts and is woken up concurrently and noone wakes up
148 * the next waiter.
149 */
150void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
151 unsigned int mode, void *key)
152{
153 unsigned long flags;
154
155 __set_current_state(TASK_RUNNING);
156 spin_lock_irqsave(&q->lock, flags);
157 if (!list_empty(&wait->task_list))
158 list_del_init(&wait->task_list);
159 else if (waitqueue_active(q))
160 __wake_up_locked_key(q, mode, key);
161 spin_unlock_irqrestore(&q->lock, flags);
162}
163EXPORT_SYMBOL(abort_exclusive_wait);
164
165int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
166{
167 int ret = default_wake_function(wait, mode, sync, key);
168
169 if (ret)
170 list_del_init(&wait->task_list);
171 return ret;
172}
173EXPORT_SYMBOL(autoremove_wake_function);
174
175int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
176{
177 struct wait_bit_key *key = arg;
178 struct wait_bit_queue *wait_bit
179 = container_of(wait, struct wait_bit_queue, wait);
180
181 if (!wait_bit_cleared(wait_bit, key))
182 return 0;
183
184 return autoremove_wake_function(wait, mode, sync, key);
185}
186EXPORT_SYMBOL(wake_bit_function);
187
188/*
189 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
190 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
191 * permitted return codes. Nonzero return codes halt waiting and return.
192 */
193int __sched
194__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
195 int (*action)(void *), unsigned mode)
196{
197 int ret = 0;
198
199 do {
200 prepare_to_wait(wq, &q->wait, mode);
201 if (test_bit(q->key.bit_nr, q->key.flags)) {
202 ret = (*action)(q->key.flags);
203 if (ret)
204 break;
205 }
206 if (!is_sync_wait(&q->wait))
207 ret = -EIOCBRETRY;
208 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
209 finish_wait(wq, &q->wait);
210 return ret;
211}
212EXPORT_SYMBOL(__wait_on_bit);
213
214int __sched out_of_line_wait_on_bit(void *word, int bit,
215 int (*action)(void *), unsigned mode,
216 struct wait_bit_queue *wait)
217
218{
219 wait_queue_head_t *wq = bit_waitqueue(word, bit);
220 DEFINE_WAIT_BIT(stack_wait, word, bit);
221
222 if (!wait)
223 wait = &stack_wait;
224 else {
225 wait->key.flags = word;
226 wait->key.bit_nr = bit;
227 }
228
229 return __wait_on_bit(wq, wait, action, mode);
230}
231EXPORT_SYMBOL(out_of_line_wait_on_bit);
232
233int __sched
234__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
235 int (*action)(void *), unsigned mode)
236{
237 do {
238 int ret;
239
240 prepare_to_wait_exclusive(wq, &q->wait, mode);
241 if (!test_bit(q->key.bit_nr, q->key.flags))
242 continue;
243 ret = action(q->key.flags);
244 if (!ret) {
245 if (!is_sync_wait(&q->wait))
246 ret = -EIOCBRETRY;
247 else
248 continue;
249 }
250 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
251 return ret;
252 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
253 finish_wait(wq, &q->wait);
254 return 0;
255}
256EXPORT_SYMBOL(__wait_on_bit_lock);
257
258int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
259 int (*action)(void *), unsigned mode,
260 struct wait_bit_queue *wait)
261{
262 wait_queue_head_t *wq = bit_waitqueue(word, bit);
263 DEFINE_WAIT_BIT(stack_wait, word, bit);
264
265 if (!wait)
266 wait = &stack_wait;
267 else {
268 wait->key.flags = word;
269 wait->key.bit_nr = bit;
270 }
271
272 return __wait_on_bit_lock(wq, wait, action, mode);
273}
274EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
275
276void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
277{
278 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
279 if (waitqueue_active(wq))
280 __wake_up(wq, TASK_NORMAL, 1, &key);
281}
282EXPORT_SYMBOL(__wake_up_bit);
283
284/**
285 * wake_up_bit - wake up a waiter on a bit
286 * @word: the word being waited on, a kernel virtual address
287 * @bit: the bit of the word being waited on
288 *
289 * There is a standard hashed waitqueue table for generic use. This
290 * is the part of the hashtable's accessor API that wakes up waiters
291 * on a bit. For instance, if one were to have waiters on a bitflag,
292 * one would call wake_up_bit() after clearing the bit.
293 *
294 * In order for this to function properly, as it uses waitqueue_active()
295 * internally, some kind of memory barrier must be done prior to calling
296 * this. Typically, this will be smp_mb__after_clear_bit(), but in some
297 * cases where bitflags are manipulated non-atomically under a lock, one
298 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
299 * because spin_unlock() does not guarantee a memory barrier.
300 */
301void wake_up_bit(void *word, int bit)
302{
303 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
304}
305EXPORT_SYMBOL(wake_up_bit);
306
307wait_queue_head_t *bit_waitqueue(void *word, int bit)
308{
309 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
310 const struct zone *zone = page_zone(virt_to_page(word));
311 unsigned long val = (unsigned long)word << shift | bit;
312
313 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
314}
315EXPORT_SYMBOL(bit_waitqueue);