2 * Generic waiting primitives.
4 * (C) 2004 William Irwin, Oracle
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
10 #include <linux/wait.h>
11 #include <linux/hash.h>
13 void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
15 spin_lock_init(&q->lock);
16 lockdep_set_class(&q->lock, key);
17 INIT_LIST_HEAD(&q->task_list);
20 EXPORT_SYMBOL(__init_waitqueue_head);
22 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
26 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
27 spin_lock_irqsave(&q->lock, flags);
28 __add_wait_queue(q, wait);
29 spin_unlock_irqrestore(&q->lock, flags);
31 EXPORT_SYMBOL(add_wait_queue);
33 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
37 wait->flags |= WQ_FLAG_EXCLUSIVE;
38 spin_lock_irqsave(&q->lock, flags);
39 __add_wait_queue_tail(q, wait);
40 spin_unlock_irqrestore(&q->lock, flags);
42 EXPORT_SYMBOL(add_wait_queue_exclusive);
44 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
48 spin_lock_irqsave(&q->lock, flags);
49 __remove_wait_queue(q, wait);
50 spin_unlock_irqrestore(&q->lock, flags);
52 EXPORT_SYMBOL(remove_wait_queue);
56 * Note: we use "set_current_state()" _after_ the wait-queue add,
57 * because we need a memory barrier there on SMP, so that any
58 * wake-function that tests for the wait-queue being active
59 * will be guaranteed to see waitqueue addition _or_ subsequent
60 * tests in this thread will see the wakeup having taken place.
62 * The spin_unlock() itself is semi-permeable and only protects
63 * one way (it only protects stuff inside the critical region and
64 * stops them from bleeding out - it would still allow subsequent
65 * loads to move into the critical region).
68 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
72 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
73 spin_lock_irqsave(&q->lock, flags);
74 if (list_empty(&wait->task_list))
75 __add_wait_queue(q, wait);
76 if (is_sync_wait(wait))
77 set_current_state(state);
78 spin_unlock_irqrestore(&q->lock, flags);
80 EXPORT_SYMBOL(prepare_to_wait);
83 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
87 wait->flags |= WQ_FLAG_EXCLUSIVE;
88 spin_lock_irqsave(&q->lock, flags);
89 if (list_empty(&wait->task_list))
90 __add_wait_queue_tail(q, wait);
91 if (is_sync_wait(wait))
92 set_current_state(state);
93 spin_unlock_irqrestore(&q->lock, flags);
95 EXPORT_SYMBOL(prepare_to_wait_exclusive);
98 * finish_wait - clean up after waiting in a queue
99 * @q: waitqueue waited on
100 * @wait: wait descriptor
102 * Sets current thread back to running state and removes
103 * the wait descriptor from the given waitqueue if still
106 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
110 __set_current_state(TASK_RUNNING);
112 * We can check for list emptiness outside the lock
114 * - we use the "careful" check that verifies both
115 * the next and prev pointers, so that there cannot
116 * be any half-pending updates in progress on other
117 * CPU's that we haven't seen yet (and that might
118 * still change the stack area.
120 * - all other users take the lock (ie we can only
121 * have _one_ other CPU that looks at or modifies
124 if (!list_empty_careful(&wait->task_list)) {
125 spin_lock_irqsave(&q->lock, flags);
126 list_del_init(&wait->task_list);
127 spin_unlock_irqrestore(&q->lock, flags);
130 EXPORT_SYMBOL(finish_wait);
133 * abort_exclusive_wait - abort exclusive waiting in a queue
134 * @q: waitqueue waited on
135 * @wait: wait descriptor
136 * @state: runstate of the waiter to be woken
137 * @key: key to identify a wait bit queue or %NULL
139 * Sets current thread back to running state and removes
140 * the wait descriptor from the given waitqueue if still
143 * Wakes up the next waiter if the caller is concurrently
144 * woken up through the queue.
146 * This prevents waiter starvation where an exclusive waiter
147 * aborts and is woken up concurrently and noone wakes up
150 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
151 unsigned int mode, void *key)
155 __set_current_state(TASK_RUNNING);
156 spin_lock_irqsave(&q->lock, flags);
157 if (!list_empty(&wait->task_list))
158 list_del_init(&wait->task_list);
159 else if (waitqueue_active(q))
160 __wake_up_locked_key(q, mode, key);
161 spin_unlock_irqrestore(&q->lock, flags);
163 EXPORT_SYMBOL(abort_exclusive_wait);
165 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
167 int ret = default_wake_function(wait, mode, sync, key);
170 list_del_init(&wait->task_list);
173 EXPORT_SYMBOL(autoremove_wake_function);
175 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
177 struct wait_bit_key *key = arg;
178 struct wait_bit_queue *wait_bit
179 = container_of(wait, struct wait_bit_queue, wait);
181 if (wait_bit->key.flags != key->flags ||
182 wait_bit->key.bit_nr != key->bit_nr ||
183 test_bit(key->bit_nr, key->flags))
186 return autoremove_wake_function(wait, mode, sync, key);
188 EXPORT_SYMBOL(wake_bit_function);
191 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
192 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
193 * permitted return codes. Nonzero return codes halt waiting and return.
196 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
197 int (*action)(void *), unsigned mode)
202 prepare_to_wait(wq, &q->wait, mode);
203 if (test_bit(q->key.bit_nr, q->key.flags)) {
204 ret = (*action)(q->key.flags);
208 if (!is_sync_wait(&q->wait))
210 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
211 finish_wait(wq, &q->wait);
214 EXPORT_SYMBOL(__wait_on_bit);
216 int __sched out_of_line_wait_on_bit(void *word, int bit,
217 int (*action)(void *), unsigned mode)
219 wait_queue_head_t *wq = bit_waitqueue(word, bit);
220 DEFINE_WAIT_BIT(wait, word, bit);
222 return __wait_on_bit(wq, &wait, action, mode);
224 EXPORT_SYMBOL(out_of_line_wait_on_bit);
227 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
228 int (*action)(void *), unsigned mode)
233 prepare_to_wait_exclusive(wq, &q->wait, mode);
234 if (!test_bit(q->key.bit_nr, q->key.flags))
236 ret = action(q->key.flags);
238 if (!is_sync_wait(&q->wait))
243 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
245 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
246 finish_wait(wq, &q->wait);
249 EXPORT_SYMBOL(__wait_on_bit_lock);
251 int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
252 int (*action)(void *), unsigned mode)
254 wait_queue_head_t *wq = bit_waitqueue(word, bit);
255 DEFINE_WAIT_BIT(wait, word, bit);
257 return __wait_on_bit_lock(wq, &wait, action, mode);
259 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
261 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
263 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
264 if (waitqueue_active(wq))
265 __wake_up(wq, TASK_NORMAL, 1, &key);
267 EXPORT_SYMBOL(__wake_up_bit);
270 * wake_up_bit - wake up a waiter on a bit
271 * @word: the word being waited on, a kernel virtual address
272 * @bit: the bit of the word being waited on
274 * There is a standard hashed waitqueue table for generic use. This
275 * is the part of the hashtable's accessor API that wakes up waiters
276 * on a bit. For instance, if one were to have waiters on a bitflag,
277 * one would call wake_up_bit() after clearing the bit.
279 * In order for this to function properly, as it uses waitqueue_active()
280 * internally, some kind of memory barrier must be done prior to calling
281 * this. Typically, this will be smp_mb__after_clear_bit(), but in some
282 * cases where bitflags are manipulated non-atomically under a lock, one
283 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
284 * because spin_unlock() does not guarantee a memory barrier.
286 void wake_up_bit(void *word, int bit)
288 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
290 EXPORT_SYMBOL(wake_up_bit);
292 wait_queue_head_t *bit_waitqueue(void *word, int bit)
294 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
295 const struct zone *zone = page_zone(virt_to_page(word));
296 unsigned long val = (unsigned long)word << shift | bit;
298 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
300 EXPORT_SYMBOL(bit_waitqueue);