sched/wait: Collapse __wait_event_lock_irq()
[linux-block.git] / include / linux / wait.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
1da177e4 4
1da177e4
LT
5#include <linux/list.h>
6#include <linux/stddef.h>
7#include <linux/spinlock.h>
1da177e4 8#include <asm/current.h>
607ca46e 9#include <uapi/linux/wait.h>
1da177e4
LT
10
11typedef struct __wait_queue wait_queue_t;
7d478721
PZ
12typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
13int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
1da177e4
LT
14
15struct __wait_queue {
16 unsigned int flags;
17#define WQ_FLAG_EXCLUSIVE 0x01
c43dc2fd 18 void *private;
1da177e4
LT
19 wait_queue_func_t func;
20 struct list_head task_list;
21};
22
23struct wait_bit_key {
24 void *flags;
25 int bit_nr;
cb65537e 26#define WAIT_ATOMIC_T_BIT_NR -1
1da177e4
LT
27};
28
29struct wait_bit_queue {
30 struct wait_bit_key key;
31 wait_queue_t wait;
32};
33
34struct __wait_queue_head {
35 spinlock_t lock;
36 struct list_head task_list;
37};
38typedef struct __wait_queue_head wait_queue_head_t;
39
8c65b4a6 40struct task_struct;
1da177e4
LT
41
42/*
43 * Macros for declaration and initialisaton of the datatypes
44 */
45
46#define __WAITQUEUE_INITIALIZER(name, tsk) { \
c43dc2fd 47 .private = tsk, \
1da177e4
LT
48 .func = default_wake_function, \
49 .task_list = { NULL, NULL } }
50
51#define DECLARE_WAITQUEUE(name, tsk) \
52 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
53
54#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
e4d91918 55 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
1da177e4
LT
56 .task_list = { &(name).task_list, &(name).task_list } }
57
58#define DECLARE_WAIT_QUEUE_HEAD(name) \
59 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
60
61#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
62 { .flags = word, .bit_nr = bit, }
63
cb65537e
DH
64#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
65 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
66
f07fdec5 67extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
2fc39111
PZ
68
69#define init_waitqueue_head(q) \
70 do { \
71 static struct lock_class_key __key; \
72 \
f07fdec5 73 __init_waitqueue_head((q), #q, &__key); \
2fc39111 74 } while (0)
1da177e4 75
7259f0d0
PZ
76#ifdef CONFIG_LOCKDEP
77# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
78 ({ init_waitqueue_head(&name); name; })
79# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
80 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
81#else
82# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
83#endif
84
1da177e4
LT
85static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
86{
87 q->flags = 0;
c43dc2fd 88 q->private = p;
1da177e4
LT
89 q->func = default_wake_function;
90}
91
92static inline void init_waitqueue_func_entry(wait_queue_t *q,
93 wait_queue_func_t func)
94{
95 q->flags = 0;
c43dc2fd 96 q->private = NULL;
1da177e4
LT
97 q->func = func;
98}
99
100static inline int waitqueue_active(wait_queue_head_t *q)
101{
102 return !list_empty(&q->task_list);
103}
104
b3c97528
HH
105extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
106extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
107extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
1da177e4
LT
108
109static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
110{
111 list_add(&new->task_list, &head->task_list);
112}
113
114/*
115 * Used for wake-one threads:
116 */
a93d2f17
CG
117static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
118 wait_queue_t *wait)
119{
120 wait->flags |= WQ_FLAG_EXCLUSIVE;
121 __add_wait_queue(q, wait);
122}
123
1da177e4 124static inline void __add_wait_queue_tail(wait_queue_head_t *head,
a93d2f17 125 wait_queue_t *new)
1da177e4
LT
126{
127 list_add_tail(&new->task_list, &head->task_list);
128}
129
a93d2f17
CG
130static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
132{
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue_tail(q, wait);
135}
136
1da177e4
LT
137static inline void __remove_wait_queue(wait_queue_head_t *head,
138 wait_queue_t *old)
139{
140 list_del(&old->task_list);
141}
142
b3c97528 143void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
4ede816a
DL
144void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
145void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
146 void *key);
63b20011 147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
4ede816a 148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
b3c97528
HH
149void __wake_up_bit(wait_queue_head_t *, void *, int);
150int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152void wake_up_bit(void *, int);
cb65537e 153void wake_up_atomic_t(atomic_t *);
b3c97528
HH
154int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
cb65537e 156int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
b3c97528 157wait_queue_head_t *bit_waitqueue(void *, int);
1da177e4 158
e64d66c8
MW
159#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
160#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
161#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
63b20011
TG
162#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
163#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
e64d66c8 164
1da177e4
LT
165#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
e64d66c8 168#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
1da177e4 169
0ccf831c 170/*
c0da3775 171 * Wakeup macros to be used to report events to the targets.
0ccf831c 172 */
c0da3775
DL
173#define wake_up_poll(x, m) \
174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
175#define wake_up_locked_poll(x, m) \
176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177#define wake_up_interruptible_poll(x, m) \
178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
0ccf831c 181
2953ef24
PZ
182#define ___wait_cond_timeout(condition, ret) \
183({ \
184 bool __cond = (condition); \
185 if (__cond && !ret) \
186 ret = 1; \
187 __cond || !ret; \
188})
189
41a1431b
PZ
190#define ___wait_signal_pending(state) \
191 ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \
192 (state == TASK_KILLABLE && fatal_signal_pending(current)))
193
194#define ___wait_nop_ret int ret __always_unused
195
196#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
197do { \
198 __label__ __out; \
199 DEFINE_WAIT(__wait); \
200 \
201 for (;;) { \
202 if (exclusive) \
203 prepare_to_wait_exclusive(&wq, &__wait, state); \
204 else \
205 prepare_to_wait(&wq, &__wait, state); \
206 \
207 if (condition) \
208 break; \
209 \
210 if (___wait_signal_pending(state)) { \
211 ret = -ERESTARTSYS; \
212 if (exclusive) { \
213 abort_exclusive_wait(&wq, &__wait, \
214 state, NULL); \
215 goto __out; \
216 } \
217 break; \
218 } \
219 \
220 cmd; \
221 } \
222 finish_wait(&wq, &__wait); \
223__out: ; \
224} while (0)
225
1da177e4 226#define __wait_event(wq, condition) \
854267f4
PZ
227 ___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
228 ___wait_nop_ret, schedule())
1da177e4
LT
229
230/**
231 * wait_event - sleep until a condition gets true
232 * @wq: the waitqueue to wait on
233 * @condition: a C expression for the event to wait for
234 *
235 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
236 * @condition evaluates to true. The @condition is checked each time
237 * the waitqueue @wq is woken up.
238 *
239 * wake_up() has to be called after changing any variable that could
240 * change the result of the wait condition.
241 */
242#define wait_event(wq, condition) \
243do { \
244 if (condition) \
245 break; \
246 __wait_event(wq, condition); \
247} while (0)
248
249#define __wait_event_timeout(wq, condition, ret) \
ddc1994b
PZ
250 ___wait_event(wq, ___wait_cond_timeout(condition, ret), \
251 TASK_UNINTERRUPTIBLE, 0, ret, \
252 ret = schedule_timeout(ret))
1da177e4
LT
253
254/**
255 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
256 * @wq: the waitqueue to wait on
257 * @condition: a C expression for the event to wait for
258 * @timeout: timeout, in jiffies
259 *
260 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
261 * @condition evaluates to true. The @condition is checked each time
262 * the waitqueue @wq is woken up.
263 *
264 * wake_up() has to be called after changing any variable that could
265 * change the result of the wait condition.
266 *
4c663cfc
ID
267 * The function returns 0 if the @timeout elapsed, or the remaining
268 * jiffies (at least 1) if the @condition evaluated to %true before
269 * the @timeout elapsed.
1da177e4
LT
270 */
271#define wait_event_timeout(wq, condition, timeout) \
272({ \
273 long __ret = timeout; \
274 if (!(condition)) \
275 __wait_event_timeout(wq, condition, __ret); \
276 __ret; \
277})
278
279#define __wait_event_interruptible(wq, condition, ret) \
f13f4c41
PZ
280 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \
281 schedule())
1da177e4
LT
282
283/**
284 * wait_event_interruptible - sleep until a condition gets true
285 * @wq: the waitqueue to wait on
286 * @condition: a C expression for the event to wait for
287 *
288 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
289 * @condition evaluates to true or a signal is received.
290 * The @condition is checked each time the waitqueue @wq is woken up.
291 *
292 * wake_up() has to be called after changing any variable that could
293 * change the result of the wait condition.
294 *
295 * The function will return -ERESTARTSYS if it was interrupted by a
296 * signal and 0 if @condition evaluated to true.
297 */
298#define wait_event_interruptible(wq, condition) \
299({ \
300 int __ret = 0; \
301 if (!(condition)) \
302 __wait_event_interruptible(wq, condition, __ret); \
303 __ret; \
304})
305
306#define __wait_event_interruptible_timeout(wq, condition, ret) \
c2ebb1fb
PZ
307 ___wait_event(wq, ___wait_cond_timeout(condition, ret), \
308 TASK_INTERRUPTIBLE, 0, ret, \
309 ret = schedule_timeout(ret))
1da177e4
LT
310
311/**
312 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
313 * @wq: the waitqueue to wait on
314 * @condition: a C expression for the event to wait for
315 * @timeout: timeout, in jiffies
316 *
317 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
318 * @condition evaluates to true or a signal is received.
319 * The @condition is checked each time the waitqueue @wq is woken up.
320 *
321 * wake_up() has to be called after changing any variable that could
322 * change the result of the wait condition.
323 *
4c663cfc
ID
324 * Returns:
325 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
326 * a signal, or the remaining jiffies (at least 1) if the @condition
327 * evaluated to %true before the @timeout elapsed.
1da177e4
LT
328 */
329#define wait_event_interruptible_timeout(wq, condition, timeout) \
330({ \
331 long __ret = timeout; \
332 if (!(condition)) \
333 __wait_event_interruptible_timeout(wq, condition, __ret); \
334 __ret; \
335})
336
774a08b3
KO
337#define __wait_event_hrtimeout(wq, condition, timeout, state) \
338({ \
339 int __ret = 0; \
340 DEFINE_WAIT(__wait); \
341 struct hrtimer_sleeper __t; \
342 \
343 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
344 HRTIMER_MODE_REL); \
345 hrtimer_init_sleeper(&__t, current); \
346 if ((timeout).tv64 != KTIME_MAX) \
347 hrtimer_start_range_ns(&__t.timer, timeout, \
348 current->timer_slack_ns, \
349 HRTIMER_MODE_REL); \
350 \
351 for (;;) { \
352 prepare_to_wait(&wq, &__wait, state); \
353 if (condition) \
354 break; \
355 if (state == TASK_INTERRUPTIBLE && \
356 signal_pending(current)) { \
357 __ret = -ERESTARTSYS; \
358 break; \
359 } \
360 if (!__t.task) { \
361 __ret = -ETIME; \
362 break; \
363 } \
364 schedule(); \
365 } \
366 \
367 hrtimer_cancel(&__t.timer); \
368 destroy_hrtimer_on_stack(&__t.timer); \
369 finish_wait(&wq, &__wait); \
370 __ret; \
371})
372
373/**
374 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
375 * @wq: the waitqueue to wait on
376 * @condition: a C expression for the event to wait for
377 * @timeout: timeout, as a ktime_t
378 *
379 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
380 * @condition evaluates to true or a signal is received.
381 * The @condition is checked each time the waitqueue @wq is woken up.
382 *
383 * wake_up() has to be called after changing any variable that could
384 * change the result of the wait condition.
385 *
386 * The function returns 0 if @condition became true, or -ETIME if the timeout
387 * elapsed.
388 */
389#define wait_event_hrtimeout(wq, condition, timeout) \
390({ \
391 int __ret = 0; \
392 if (!(condition)) \
393 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
394 TASK_UNINTERRUPTIBLE); \
395 __ret; \
396})
397
398/**
399 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
400 * @wq: the waitqueue to wait on
401 * @condition: a C expression for the event to wait for
402 * @timeout: timeout, as a ktime_t
403 *
404 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
405 * @condition evaluates to true or a signal is received.
406 * The @condition is checked each time the waitqueue @wq is woken up.
407 *
408 * wake_up() has to be called after changing any variable that could
409 * change the result of the wait condition.
410 *
411 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
412 * interrupted by a signal, or -ETIME if the timeout elapsed.
413 */
414#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
415({ \
416 long __ret = 0; \
417 if (!(condition)) \
418 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
419 TASK_INTERRUPTIBLE); \
420 __ret; \
421})
422
1da177e4 423#define __wait_event_interruptible_exclusive(wq, condition, ret) \
48c25217
PZ
424 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, ret, \
425 schedule())
1da177e4
LT
426
427#define wait_event_interruptible_exclusive(wq, condition) \
428({ \
429 int __ret = 0; \
430 if (!(condition)) \
431 __wait_event_interruptible_exclusive(wq, condition, __ret);\
432 __ret; \
433})
434
22c43c81
MN
435
436#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
437({ \
438 int __ret = 0; \
439 DEFINE_WAIT(__wait); \
440 if (exclusive) \
441 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
442 do { \
443 if (likely(list_empty(&__wait.task_list))) \
444 __add_wait_queue_tail(&(wq), &__wait); \
445 set_current_state(TASK_INTERRUPTIBLE); \
446 if (signal_pending(current)) { \
447 __ret = -ERESTARTSYS; \
448 break; \
449 } \
450 if (irq) \
451 spin_unlock_irq(&(wq).lock); \
452 else \
453 spin_unlock(&(wq).lock); \
454 schedule(); \
455 if (irq) \
456 spin_lock_irq(&(wq).lock); \
457 else \
458 spin_lock(&(wq).lock); \
459 } while (!(condition)); \
460 __remove_wait_queue(&(wq), &__wait); \
461 __set_current_state(TASK_RUNNING); \
462 __ret; \
463})
464
465
466/**
467 * wait_event_interruptible_locked - sleep until a condition gets true
468 * @wq: the waitqueue to wait on
469 * @condition: a C expression for the event to wait for
470 *
471 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
472 * @condition evaluates to true or a signal is received.
473 * The @condition is checked each time the waitqueue @wq is woken up.
474 *
475 * It must be called with wq.lock being held. This spinlock is
476 * unlocked while sleeping but @condition testing is done while lock
477 * is held and when this macro exits the lock is held.
478 *
479 * The lock is locked/unlocked using spin_lock()/spin_unlock()
480 * functions which must match the way they are locked/unlocked outside
481 * of this macro.
482 *
483 * wake_up_locked() has to be called after changing any variable that could
484 * change the result of the wait condition.
485 *
486 * The function will return -ERESTARTSYS if it was interrupted by a
487 * signal and 0 if @condition evaluated to true.
488 */
489#define wait_event_interruptible_locked(wq, condition) \
490 ((condition) \
491 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
492
493/**
494 * wait_event_interruptible_locked_irq - sleep until a condition gets true
495 * @wq: the waitqueue to wait on
496 * @condition: a C expression for the event to wait for
497 *
498 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
499 * @condition evaluates to true or a signal is received.
500 * The @condition is checked each time the waitqueue @wq is woken up.
501 *
502 * It must be called with wq.lock being held. This spinlock is
503 * unlocked while sleeping but @condition testing is done while lock
504 * is held and when this macro exits the lock is held.
505 *
506 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
507 * functions which must match the way they are locked/unlocked outside
508 * of this macro.
509 *
510 * wake_up_locked() has to be called after changing any variable that could
511 * change the result of the wait condition.
512 *
513 * The function will return -ERESTARTSYS if it was interrupted by a
514 * signal and 0 if @condition evaluated to true.
515 */
516#define wait_event_interruptible_locked_irq(wq, condition) \
517 ((condition) \
518 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
519
520/**
521 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
522 * @wq: the waitqueue to wait on
523 * @condition: a C expression for the event to wait for
524 *
525 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
526 * @condition evaluates to true or a signal is received.
527 * The @condition is checked each time the waitqueue @wq is woken up.
528 *
529 * It must be called with wq.lock being held. This spinlock is
530 * unlocked while sleeping but @condition testing is done while lock
531 * is held and when this macro exits the lock is held.
532 *
533 * The lock is locked/unlocked using spin_lock()/spin_unlock()
534 * functions which must match the way they are locked/unlocked outside
535 * of this macro.
536 *
537 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
538 * set thus when other process waits process on the list if this
539 * process is awaken further processes are not considered.
540 *
541 * wake_up_locked() has to be called after changing any variable that could
542 * change the result of the wait condition.
543 *
544 * The function will return -ERESTARTSYS if it was interrupted by a
545 * signal and 0 if @condition evaluated to true.
546 */
547#define wait_event_interruptible_exclusive_locked(wq, condition) \
548 ((condition) \
549 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
550
551/**
552 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
553 * @wq: the waitqueue to wait on
554 * @condition: a C expression for the event to wait for
555 *
556 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
557 * @condition evaluates to true or a signal is received.
558 * The @condition is checked each time the waitqueue @wq is woken up.
559 *
560 * It must be called with wq.lock being held. This spinlock is
561 * unlocked while sleeping but @condition testing is done while lock
562 * is held and when this macro exits the lock is held.
563 *
564 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
565 * functions which must match the way they are locked/unlocked outside
566 * of this macro.
567 *
568 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
569 * set thus when other process waits process on the list if this
570 * process is awaken further processes are not considered.
571 *
572 * wake_up_locked() has to be called after changing any variable that could
573 * change the result of the wait condition.
574 *
575 * The function will return -ERESTARTSYS if it was interrupted by a
576 * signal and 0 if @condition evaluated to true.
577 */
578#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
579 ((condition) \
580 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
581
582
583
1411d5a7
MW
584#define __wait_event_killable(wq, condition, ret) \
585do { \
586 DEFINE_WAIT(__wait); \
587 \
588 for (;;) { \
589 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
590 if (condition) \
591 break; \
592 if (!fatal_signal_pending(current)) { \
593 schedule(); \
594 continue; \
595 } \
596 ret = -ERESTARTSYS; \
597 break; \
598 } \
599 finish_wait(&wq, &__wait); \
600} while (0)
601
602/**
603 * wait_event_killable - sleep until a condition gets true
604 * @wq: the waitqueue to wait on
605 * @condition: a C expression for the event to wait for
606 *
607 * The process is put to sleep (TASK_KILLABLE) until the
608 * @condition evaluates to true or a signal is received.
609 * The @condition is checked each time the waitqueue @wq is woken up.
610 *
611 * wake_up() has to be called after changing any variable that could
612 * change the result of the wait condition.
613 *
614 * The function will return -ERESTARTSYS if it was interrupted by a
615 * signal and 0 if @condition evaluated to true.
616 */
617#define wait_event_killable(wq, condition) \
618({ \
619 int __ret = 0; \
620 if (!(condition)) \
621 __wait_event_killable(wq, condition, __ret); \
622 __ret; \
623})
624
eed8c02e
LC
625
626#define __wait_event_lock_irq(wq, condition, lock, cmd) \
13cb5042
PZ
627 ___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
628 ___wait_nop_ret, \
629 spin_unlock_irq(&lock); \
630 cmd; \
631 schedule(); \
632 spin_lock_irq(&lock))
eed8c02e
LC
633
634/**
635 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
636 * condition is checked under the lock. This
637 * is expected to be called with the lock
638 * taken.
639 * @wq: the waitqueue to wait on
640 * @condition: a C expression for the event to wait for
641 * @lock: a locked spinlock_t, which will be released before cmd
642 * and schedule() and reacquired afterwards.
643 * @cmd: a command which is invoked outside the critical section before
644 * sleep
645 *
646 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
647 * @condition evaluates to true. The @condition is checked each time
648 * the waitqueue @wq is woken up.
649 *
650 * wake_up() has to be called after changing any variable that could
651 * change the result of the wait condition.
652 *
653 * This is supposed to be called while holding the lock. The lock is
654 * dropped before invoking the cmd and going to sleep and is reacquired
655 * afterwards.
656 */
657#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
658do { \
659 if (condition) \
660 break; \
661 __wait_event_lock_irq(wq, condition, lock, cmd); \
662} while (0)
663
664/**
665 * wait_event_lock_irq - sleep until a condition gets true. The
666 * condition is checked under the lock. This
667 * is expected to be called with the lock
668 * taken.
669 * @wq: the waitqueue to wait on
670 * @condition: a C expression for the event to wait for
671 * @lock: a locked spinlock_t, which will be released before schedule()
672 * and reacquired afterwards.
673 *
674 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
675 * @condition evaluates to true. The @condition is checked each time
676 * the waitqueue @wq is woken up.
677 *
678 * wake_up() has to be called after changing any variable that could
679 * change the result of the wait condition.
680 *
681 * This is supposed to be called while holding the lock. The lock is
682 * dropped before going to sleep and is reacquired afterwards.
683 */
684#define wait_event_lock_irq(wq, condition, lock) \
685do { \
686 if (condition) \
687 break; \
688 __wait_event_lock_irq(wq, condition, lock, ); \
689} while (0)
690
691
692#define __wait_event_interruptible_lock_irq(wq, condition, \
693 lock, ret, cmd) \
694do { \
695 DEFINE_WAIT(__wait); \
696 \
697 for (;;) { \
698 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
699 if (condition) \
700 break; \
701 if (signal_pending(current)) { \
702 ret = -ERESTARTSYS; \
703 break; \
704 } \
705 spin_unlock_irq(&lock); \
706 cmd; \
707 schedule(); \
708 spin_lock_irq(&lock); \
709 } \
710 finish_wait(&wq, &__wait); \
711} while (0)
712
713/**
714 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
715 * The condition is checked under the lock. This is expected to
716 * be called with the lock taken.
717 * @wq: the waitqueue to wait on
718 * @condition: a C expression for the event to wait for
719 * @lock: a locked spinlock_t, which will be released before cmd and
720 * schedule() and reacquired afterwards.
721 * @cmd: a command which is invoked outside the critical section before
722 * sleep
723 *
724 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
725 * @condition evaluates to true or a signal is received. The @condition is
726 * checked each time the waitqueue @wq is woken up.
727 *
728 * wake_up() has to be called after changing any variable that could
729 * change the result of the wait condition.
730 *
731 * This is supposed to be called while holding the lock. The lock is
732 * dropped before invoking the cmd and going to sleep and is reacquired
733 * afterwards.
734 *
735 * The macro will return -ERESTARTSYS if it was interrupted by a signal
736 * and 0 if @condition evaluated to true.
737 */
738#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
739({ \
740 int __ret = 0; \
741 \
742 if (!(condition)) \
743 __wait_event_interruptible_lock_irq(wq, condition, \
744 lock, __ret, cmd); \
745 __ret; \
746})
747
748/**
749 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
750 * The condition is checked under the lock. This is expected
751 * to be called with the lock taken.
752 * @wq: the waitqueue to wait on
753 * @condition: a C expression for the event to wait for
754 * @lock: a locked spinlock_t, which will be released before schedule()
755 * and reacquired afterwards.
756 *
757 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
758 * @condition evaluates to true or signal is received. The @condition is
759 * checked each time the waitqueue @wq is woken up.
760 *
761 * wake_up() has to be called after changing any variable that could
762 * change the result of the wait condition.
763 *
764 * This is supposed to be called while holding the lock. The lock is
765 * dropped before going to sleep and is reacquired afterwards.
766 *
767 * The macro will return -ERESTARTSYS if it was interrupted by a signal
768 * and 0 if @condition evaluated to true.
769 */
770#define wait_event_interruptible_lock_irq(wq, condition, lock) \
771({ \
772 int __ret = 0; \
773 \
774 if (!(condition)) \
775 __wait_event_interruptible_lock_irq(wq, condition, \
776 lock, __ret, ); \
777 __ret; \
778})
779
d79ff142
MP
780#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
781 lock, ret) \
782do { \
783 DEFINE_WAIT(__wait); \
784 \
785 for (;;) { \
786 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
2953ef24 787 if (___wait_cond_timeout(condition, ret)) \
d79ff142
MP
788 break; \
789 if (signal_pending(current)) { \
790 ret = -ERESTARTSYS; \
791 break; \
792 } \
793 spin_unlock_irq(&lock); \
794 ret = schedule_timeout(ret); \
795 spin_lock_irq(&lock); \
d79ff142
MP
796 } \
797 finish_wait(&wq, &__wait); \
798} while (0)
799
800/**
801 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
802 * The condition is checked under the lock. This is expected
803 * to be called with the lock taken.
804 * @wq: the waitqueue to wait on
805 * @condition: a C expression for the event to wait for
806 * @lock: a locked spinlock_t, which will be released before schedule()
807 * and reacquired afterwards.
808 * @timeout: timeout, in jiffies
809 *
810 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
811 * @condition evaluates to true or signal is received. The @condition is
812 * checked each time the waitqueue @wq is woken up.
813 *
814 * wake_up() has to be called after changing any variable that could
815 * change the result of the wait condition.
816 *
817 * This is supposed to be called while holding the lock. The lock is
818 * dropped before going to sleep and is reacquired afterwards.
819 *
820 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
821 * was interrupted by a signal, and the remaining jiffies otherwise
822 * if the condition evaluated to true before the timeout elapsed.
823 */
824#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
825 timeout) \
826({ \
827 int __ret = timeout; \
828 \
829 if (!(condition)) \
830 __wait_event_interruptible_lock_irq_timeout( \
831 wq, condition, lock, __ret); \
832 __ret; \
833})
834
eed8c02e 835
1da177e4
LT
836/*
837 * These are the old interfaces to sleep waiting for an event.
0fec171c
IM
838 * They are racy. DO NOT use them, use the wait_event* interfaces above.
839 * We plan to remove these interfaces.
1da177e4 840 */
0fec171c
IM
841extern void sleep_on(wait_queue_head_t *q);
842extern long sleep_on_timeout(wait_queue_head_t *q,
843 signed long timeout);
844extern void interruptible_sleep_on(wait_queue_head_t *q);
845extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
846 signed long timeout);
1da177e4
LT
847
848/*
849 * Waitqueues which are removed from the waitqueue_head at wakeup time
850 */
b3c97528
HH
851void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
852void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
853void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
777c6c5f
JW
854void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
855 unsigned int mode, void *key);
1da177e4
LT
856int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
857int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
858
bf368e4e 859#define DEFINE_WAIT_FUNC(name, function) \
1da177e4 860 wait_queue_t name = { \
c43dc2fd 861 .private = current, \
bf368e4e 862 .func = function, \
7e43c84e 863 .task_list = LIST_HEAD_INIT((name).task_list), \
1da177e4
LT
864 }
865
bf368e4e
ED
866#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
867
1da177e4
LT
868#define DEFINE_WAIT_BIT(name, word, bit) \
869 struct wait_bit_queue name = { \
870 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
871 .wait = { \
c43dc2fd 872 .private = current, \
1da177e4
LT
873 .func = wake_bit_function, \
874 .task_list = \
875 LIST_HEAD_INIT((name).wait.task_list), \
876 }, \
877 }
878
879#define init_wait(wait) \
880 do { \
c43dc2fd 881 (wait)->private = current; \
1da177e4
LT
882 (wait)->func = autoremove_wake_function; \
883 INIT_LIST_HEAD(&(wait)->task_list); \
231d0aef 884 (wait)->flags = 0; \
1da177e4
LT
885 } while (0)
886
887/**
888 * wait_on_bit - wait for a bit to be cleared
889 * @word: the word being waited on, a kernel virtual address
890 * @bit: the bit of the word being waited on
891 * @action: the function used to sleep, which may take special actions
892 * @mode: the task state to sleep in
893 *
894 * There is a standard hashed waitqueue table for generic use. This
895 * is the part of the hashtable's accessor API that waits on a bit.
896 * For instance, if one were to have waiters on a bitflag, one would
897 * call wait_on_bit() in threads waiting for the bit to clear.
898 * One uses wait_on_bit() where one is waiting for the bit to clear,
899 * but has no intention of setting it.
900 */
901static inline int wait_on_bit(void *word, int bit,
902 int (*action)(void *), unsigned mode)
903{
904 if (!test_bit(bit, word))
905 return 0;
906 return out_of_line_wait_on_bit(word, bit, action, mode);
907}
908
909/**
910 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
911 * @word: the word being waited on, a kernel virtual address
912 * @bit: the bit of the word being waited on
913 * @action: the function used to sleep, which may take special actions
914 * @mode: the task state to sleep in
915 *
916 * There is a standard hashed waitqueue table for generic use. This
917 * is the part of the hashtable's accessor API that waits on a bit
918 * when one intends to set it, for instance, trying to lock bitflags.
919 * For instance, if one were to have waiters trying to set bitflag
920 * and waiting for it to clear before setting it, one would call
921 * wait_on_bit() in threads waiting to be able to set the bit.
922 * One uses wait_on_bit_lock() where one is waiting for the bit to
923 * clear with the intention of setting it, and when done, clearing it.
924 */
925static inline int wait_on_bit_lock(void *word, int bit,
926 int (*action)(void *), unsigned mode)
927{
928 if (!test_and_set_bit(bit, word))
929 return 0;
930 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
931}
cb65537e
DH
932
933/**
934 * wait_on_atomic_t - Wait for an atomic_t to become 0
935 * @val: The atomic value being waited on, a kernel virtual address
936 * @action: the function used to sleep, which may take special actions
937 * @mode: the task state to sleep in
938 *
939 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
940 * the purpose of getting a waitqueue, but we set the key to a bit number
941 * outside of the target 'word'.
942 */
943static inline
944int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
945{
946 if (atomic_read(val) == 0)
947 return 0;
948 return out_of_line_wait_on_atomic_t(val, action, mode);
949}
1da177e4 950
1da177e4 951#endif