Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Generic waiting primitives. | |
3 | * | |
6d49e352 | 4 | * (C) 2004 Nadia Yvette Chambers, Oracle |
1da177e4 | 5 | */ |
325ea10c | 6 | #include "sched.h" |
1da177e4 | 7 | |
9d9d676f | 8 | void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) |
21d71f51 | 9 | { |
9d9d676f IM |
10 | spin_lock_init(&wq_head->lock); |
11 | lockdep_set_class_and_name(&wq_head->lock, key, name); | |
2055da97 | 12 | INIT_LIST_HEAD(&wq_head->head); |
21d71f51 | 13 | } |
eb4542b9 | 14 | |
2fc39111 | 15 | EXPORT_SYMBOL(__init_waitqueue_head); |
eb4542b9 | 16 | |
9d9d676f | 17 | void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
18 | { |
19 | unsigned long flags; | |
20 | ||
50816c48 | 21 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
9d9d676f | 22 | spin_lock_irqsave(&wq_head->lock, flags); |
c6b9d9a3 | 23 | __add_wait_queue(wq_head, wq_entry); |
9d9d676f | 24 | spin_unlock_irqrestore(&wq_head->lock, flags); |
1da177e4 LT |
25 | } |
26 | EXPORT_SYMBOL(add_wait_queue); | |
27 | ||
9d9d676f | 28 | void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
29 | { |
30 | unsigned long flags; | |
31 | ||
50816c48 | 32 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
9d9d676f IM |
33 | spin_lock_irqsave(&wq_head->lock, flags); |
34 | __add_wait_queue_entry_tail(wq_head, wq_entry); | |
35 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
1da177e4 LT |
36 | } |
37 | EXPORT_SYMBOL(add_wait_queue_exclusive); | |
38 | ||
9d9d676f | 39 | void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
40 | { |
41 | unsigned long flags; | |
42 | ||
9d9d676f IM |
43 | spin_lock_irqsave(&wq_head->lock, flags); |
44 | __remove_wait_queue(wq_head, wq_entry); | |
45 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
1da177e4 LT |
46 | } |
47 | EXPORT_SYMBOL(remove_wait_queue); | |
48 | ||
2554db91 TC |
49 | /* |
50 | * Scan threshold to break wait queue walk. | |
51 | * This allows a waker to take a break from holding the | |
52 | * wait queue lock during the wait queue walk. | |
53 | */ | |
54 | #define WAITQUEUE_WALK_BREAK_CNT 64 | |
1da177e4 | 55 | |
b4145872 PZ |
56 | /* |
57 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just | |
58 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve | |
59 | * number) then we wake all the non-exclusive tasks and one exclusive task. | |
60 | * | |
61 | * There are circumstances in which we can try to wake a task which has already | |
62 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | |
63 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | |
64 | */ | |
2554db91 TC |
65 | static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, |
66 | int nr_exclusive, int wake_flags, void *key, | |
67 | wait_queue_entry_t *bookmark) | |
b4145872 | 68 | { |
ac6424b9 | 69 | wait_queue_entry_t *curr, *next; |
2554db91 TC |
70 | int cnt = 0; |
71 | ||
e05a8e4d CH |
72 | lockdep_assert_held(&wq_head->lock); |
73 | ||
2554db91 TC |
74 | if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { |
75 | curr = list_next_entry(bookmark, entry); | |
b4145872 | 76 | |
2554db91 TC |
77 | list_del(&bookmark->entry); |
78 | bookmark->flags = 0; | |
79 | } else | |
80 | curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); | |
81 | ||
82 | if (&curr->entry == &wq_head->head) | |
83 | return nr_exclusive; | |
84 | ||
85 | list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { | |
b4145872 | 86 | unsigned flags = curr->flags; |
2554db91 TC |
87 | int ret; |
88 | ||
89 | if (flags & WQ_FLAG_BOOKMARK) | |
90 | continue; | |
91 | ||
92 | ret = curr->func(curr, mode, wake_flags, key); | |
3510ca20 LT |
93 | if (ret < 0) |
94 | break; | |
95 | if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) | |
b4145872 | 96 | break; |
2554db91 TC |
97 | |
98 | if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) && | |
99 | (&next->entry != &wq_head->head)) { | |
100 | bookmark->flags = WQ_FLAG_BOOKMARK; | |
101 | list_add_tail(&bookmark->entry, &next->entry); | |
102 | break; | |
103 | } | |
104 | } | |
97fb7a0a | 105 | |
2554db91 TC |
106 | return nr_exclusive; |
107 | } | |
108 | ||
109 | static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode, | |
110 | int nr_exclusive, int wake_flags, void *key) | |
111 | { | |
112 | unsigned long flags; | |
113 | wait_queue_entry_t bookmark; | |
114 | ||
115 | bookmark.flags = 0; | |
116 | bookmark.private = NULL; | |
117 | bookmark.func = NULL; | |
118 | INIT_LIST_HEAD(&bookmark.entry); | |
119 | ||
120 | spin_lock_irqsave(&wq_head->lock, flags); | |
121 | nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark); | |
122 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
123 | ||
124 | while (bookmark.flags & WQ_FLAG_BOOKMARK) { | |
125 | spin_lock_irqsave(&wq_head->lock, flags); | |
126 | nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, | |
127 | wake_flags, key, &bookmark); | |
128 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
b4145872 PZ |
129 | } |
130 | } | |
131 | ||
132 | /** | |
133 | * __wake_up - wake up threads blocked on a waitqueue. | |
9d9d676f | 134 | * @wq_head: the waitqueue |
b4145872 PZ |
135 | * @mode: which threads |
136 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | |
137 | * @key: is directly passed to the wakeup function | |
138 | * | |
7696f991 AP |
139 | * If this function wakes up a task, it executes a full memory barrier before |
140 | * accessing the task state. | |
b4145872 | 141 | */ |
9d9d676f | 142 | void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, |
b4145872 PZ |
143 | int nr_exclusive, void *key) |
144 | { | |
2554db91 | 145 | __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key); |
b4145872 PZ |
146 | } |
147 | EXPORT_SYMBOL(__wake_up); | |
148 | ||
149 | /* | |
150 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. | |
151 | */ | |
9d9d676f | 152 | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) |
b4145872 | 153 | { |
2554db91 | 154 | __wake_up_common(wq_head, mode, nr, 0, NULL, NULL); |
b4145872 PZ |
155 | } |
156 | EXPORT_SYMBOL_GPL(__wake_up_locked); | |
157 | ||
9d9d676f | 158 | void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) |
b4145872 | 159 | { |
2554db91 | 160 | __wake_up_common(wq_head, mode, 1, 0, key, NULL); |
b4145872 PZ |
161 | } |
162 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); | |
163 | ||
11a19c7b TC |
164 | void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, |
165 | unsigned int mode, void *key, wait_queue_entry_t *bookmark) | |
166 | { | |
167 | __wake_up_common(wq_head, mode, 1, 0, key, bookmark); | |
168 | } | |
169 | EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark); | |
170 | ||
b4145872 PZ |
171 | /** |
172 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. | |
9d9d676f | 173 | * @wq_head: the waitqueue |
b4145872 PZ |
174 | * @mode: which threads |
175 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | |
176 | * @key: opaque value to be passed to wakeup targets | |
177 | * | |
178 | * The sync wakeup differs that the waker knows that it will schedule | |
179 | * away soon, so while the target thread will be woken up, it will not | |
180 | * be migrated to another CPU - ie. the two threads are 'synchronized' | |
181 | * with each other. This can prevent needless bouncing between CPUs. | |
182 | * | |
183 | * On UP it can prevent extra preemption. | |
184 | * | |
7696f991 AP |
185 | * If this function wakes up a task, it executes a full memory barrier before |
186 | * accessing the task state. | |
b4145872 | 187 | */ |
9d9d676f | 188 | void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, |
b4145872 PZ |
189 | int nr_exclusive, void *key) |
190 | { | |
b4145872 PZ |
191 | int wake_flags = 1; /* XXX WF_SYNC */ |
192 | ||
9d9d676f | 193 | if (unlikely(!wq_head)) |
b4145872 PZ |
194 | return; |
195 | ||
196 | if (unlikely(nr_exclusive != 1)) | |
197 | wake_flags = 0; | |
198 | ||
2554db91 | 199 | __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key); |
b4145872 PZ |
200 | } |
201 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); | |
202 | ||
203 | /* | |
204 | * __wake_up_sync - see __wake_up_sync_key() | |
205 | */ | |
9d9d676f | 206 | void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive) |
b4145872 | 207 | { |
9d9d676f | 208 | __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL); |
b4145872 PZ |
209 | } |
210 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | |
211 | ||
1da177e4 LT |
212 | /* |
213 | * Note: we use "set_current_state()" _after_ the wait-queue add, | |
214 | * because we need a memory barrier there on SMP, so that any | |
215 | * wake-function that tests for the wait-queue being active | |
216 | * will be guaranteed to see waitqueue addition _or_ subsequent | |
217 | * tests in this thread will see the wakeup having taken place. | |
218 | * | |
219 | * The spin_unlock() itself is semi-permeable and only protects | |
220 | * one way (it only protects stuff inside the critical region and | |
221 | * stops them from bleeding out - it would still allow subsequent | |
59c51591 | 222 | * loads to move into the critical region). |
1da177e4 | 223 | */ |
7ad5b3a5 | 224 | void |
9d9d676f | 225 | prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
1da177e4 LT |
226 | { |
227 | unsigned long flags; | |
228 | ||
50816c48 | 229 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
9d9d676f | 230 | spin_lock_irqsave(&wq_head->lock, flags); |
2055da97 | 231 | if (list_empty(&wq_entry->entry)) |
9d9d676f | 232 | __add_wait_queue(wq_head, wq_entry); |
a25d644f | 233 | set_current_state(state); |
9d9d676f | 234 | spin_unlock_irqrestore(&wq_head->lock, flags); |
1da177e4 LT |
235 | } |
236 | EXPORT_SYMBOL(prepare_to_wait); | |
237 | ||
7ad5b3a5 | 238 | void |
9d9d676f | 239 | prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
1da177e4 LT |
240 | { |
241 | unsigned long flags; | |
242 | ||
50816c48 | 243 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
9d9d676f | 244 | spin_lock_irqsave(&wq_head->lock, flags); |
2055da97 | 245 | if (list_empty(&wq_entry->entry)) |
9d9d676f | 246 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
a25d644f | 247 | set_current_state(state); |
9d9d676f | 248 | spin_unlock_irqrestore(&wq_head->lock, flags); |
1da177e4 LT |
249 | } |
250 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | |
251 | ||
50816c48 | 252 | void init_wait_entry(struct wait_queue_entry *wq_entry, int flags) |
0176beaf | 253 | { |
50816c48 IM |
254 | wq_entry->flags = flags; |
255 | wq_entry->private = current; | |
256 | wq_entry->func = autoremove_wake_function; | |
2055da97 | 257 | INIT_LIST_HEAD(&wq_entry->entry); |
0176beaf ON |
258 | } |
259 | EXPORT_SYMBOL(init_wait_entry); | |
260 | ||
9d9d676f | 261 | long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
c2d81644 ON |
262 | { |
263 | unsigned long flags; | |
b1ea06a9 | 264 | long ret = 0; |
c2d81644 | 265 | |
9d9d676f | 266 | spin_lock_irqsave(&wq_head->lock, flags); |
b1ea06a9 ON |
267 | if (unlikely(signal_pending_state(state, current))) { |
268 | /* | |
269 | * Exclusive waiter must not fail if it was selected by wakeup, | |
270 | * it should "consume" the condition we were waiting for. | |
271 | * | |
272 | * The caller will recheck the condition and return success if | |
273 | * we were already woken up, we can not miss the event because | |
9d9d676f | 274 | * wakeup locks/unlocks the same wq_head->lock. |
b1ea06a9 ON |
275 | * |
276 | * But we need to ensure that set-condition + wakeup after that | |
277 | * can't see us, it should wake up another exclusive waiter if | |
278 | * we fail. | |
279 | */ | |
2055da97 | 280 | list_del_init(&wq_entry->entry); |
b1ea06a9 ON |
281 | ret = -ERESTARTSYS; |
282 | } else { | |
2055da97 | 283 | if (list_empty(&wq_entry->entry)) { |
50816c48 | 284 | if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) |
9d9d676f | 285 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
b1ea06a9 | 286 | else |
9d9d676f | 287 | __add_wait_queue(wq_head, wq_entry); |
b1ea06a9 ON |
288 | } |
289 | set_current_state(state); | |
c2d81644 | 290 | } |
9d9d676f | 291 | spin_unlock_irqrestore(&wq_head->lock, flags); |
c2d81644 | 292 | |
b1ea06a9 | 293 | return ret; |
c2d81644 ON |
294 | } |
295 | EXPORT_SYMBOL(prepare_to_wait_event); | |
296 | ||
bd0f9b35 LT |
297 | /* |
298 | * Note! These two wait functions are entered with the | |
299 | * wait-queue lock held (and interrupts off in the _irq | |
300 | * case), so there is no race with testing the wakeup | |
301 | * condition in the caller before they add the wait | |
302 | * entry to the wake queue. | |
303 | */ | |
ac6424b9 | 304 | int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
bd0f9b35 | 305 | { |
2055da97 | 306 | if (likely(list_empty(&wait->entry))) |
ac6424b9 | 307 | __add_wait_queue_entry_tail(wq, wait); |
bd0f9b35 LT |
308 | |
309 | set_current_state(TASK_INTERRUPTIBLE); | |
310 | if (signal_pending(current)) | |
311 | return -ERESTARTSYS; | |
312 | ||
313 | spin_unlock(&wq->lock); | |
314 | schedule(); | |
315 | spin_lock(&wq->lock); | |
97fb7a0a | 316 | |
bd0f9b35 LT |
317 | return 0; |
318 | } | |
319 | EXPORT_SYMBOL(do_wait_intr); | |
320 | ||
ac6424b9 | 321 | int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
bd0f9b35 | 322 | { |
2055da97 | 323 | if (likely(list_empty(&wait->entry))) |
ac6424b9 | 324 | __add_wait_queue_entry_tail(wq, wait); |
bd0f9b35 LT |
325 | |
326 | set_current_state(TASK_INTERRUPTIBLE); | |
327 | if (signal_pending(current)) | |
328 | return -ERESTARTSYS; | |
329 | ||
330 | spin_unlock_irq(&wq->lock); | |
331 | schedule(); | |
332 | spin_lock_irq(&wq->lock); | |
97fb7a0a | 333 | |
bd0f9b35 LT |
334 | return 0; |
335 | } | |
336 | EXPORT_SYMBOL(do_wait_intr_irq); | |
337 | ||
ee2f154a | 338 | /** |
777c6c5f | 339 | * finish_wait - clean up after waiting in a queue |
9d9d676f | 340 | * @wq_head: waitqueue waited on |
50816c48 | 341 | * @wq_entry: wait descriptor |
777c6c5f JW |
342 | * |
343 | * Sets current thread back to running state and removes | |
344 | * the wait descriptor from the given waitqueue if still | |
345 | * queued. | |
346 | */ | |
9d9d676f | 347 | void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
348 | { |
349 | unsigned long flags; | |
350 | ||
351 | __set_current_state(TASK_RUNNING); | |
352 | /* | |
353 | * We can check for list emptiness outside the lock | |
354 | * IFF: | |
355 | * - we use the "careful" check that verifies both | |
356 | * the next and prev pointers, so that there cannot | |
357 | * be any half-pending updates in progress on other | |
358 | * CPU's that we haven't seen yet (and that might | |
359 | * still change the stack area. | |
360 | * and | |
361 | * - all other users take the lock (ie we can only | |
362 | * have _one_ other CPU that looks at or modifies | |
363 | * the list). | |
364 | */ | |
2055da97 | 365 | if (!list_empty_careful(&wq_entry->entry)) { |
9d9d676f | 366 | spin_lock_irqsave(&wq_head->lock, flags); |
2055da97 | 367 | list_del_init(&wq_entry->entry); |
9d9d676f | 368 | spin_unlock_irqrestore(&wq_head->lock, flags); |
1da177e4 LT |
369 | } |
370 | } | |
371 | EXPORT_SYMBOL(finish_wait); | |
372 | ||
50816c48 | 373 | int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
1da177e4 | 374 | { |
50816c48 | 375 | int ret = default_wake_function(wq_entry, mode, sync, key); |
1da177e4 LT |
376 | |
377 | if (ret) | |
2055da97 | 378 | list_del_init(&wq_entry->entry); |
97fb7a0a | 379 | |
1da177e4 LT |
380 | return ret; |
381 | } | |
382 | EXPORT_SYMBOL(autoremove_wake_function); | |
383 | ||
cb6538e7 PZ |
384 | static inline bool is_kthread_should_stop(void) |
385 | { | |
386 | return (current->flags & PF_KTHREAD) && kthread_should_stop(); | |
387 | } | |
61ada528 PZ |
388 | |
389 | /* | |
390 | * DEFINE_WAIT_FUNC(wait, woken_wake_func); | |
391 | * | |
9d9d676f | 392 | * add_wait_queue(&wq_head, &wait); |
61ada528 PZ |
393 | * for (;;) { |
394 | * if (condition) | |
395 | * break; | |
396 | * | |
76e079fe | 397 | * // in wait_woken() // in woken_wake_function() |
61ada528 | 398 | * |
76e079fe AP |
399 | * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN; |
400 | * smp_mb(); // A try_to_wake_up(): | |
401 | * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier> | |
402 | * schedule() if (p->state & mode) | |
403 | * p->state = TASK_RUNNING; p->state = TASK_RUNNING; | |
404 | * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~ | |
405 | * smp_mb(); // B condition = true; | |
406 | * } smp_mb(); // C | |
407 | * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN; | |
61ada528 | 408 | */ |
50816c48 | 409 | long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) |
61ada528 | 410 | { |
61ada528 | 411 | /* |
76e079fe AP |
412 | * The below executes an smp_mb(), which matches with the full barrier |
413 | * executed by the try_to_wake_up() in woken_wake_function() such that | |
414 | * either we see the store to wq_entry->flags in woken_wake_function() | |
415 | * or woken_wake_function() sees our store to current->state. | |
61ada528 | 416 | */ |
76e079fe | 417 | set_current_state(mode); /* A */ |
50816c48 | 418 | if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) |
61ada528 PZ |
419 | timeout = schedule_timeout(timeout); |
420 | __set_current_state(TASK_RUNNING); | |
421 | ||
422 | /* | |
76e079fe AP |
423 | * The below executes an smp_mb(), which matches with the smp_mb() (C) |
424 | * in woken_wake_function() such that either we see the wait condition | |
425 | * being true or the store to wq_entry->flags in woken_wake_function() | |
426 | * follows ours in the coherence order. | |
61ada528 | 427 | */ |
50816c48 | 428 | smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ |
61ada528 PZ |
429 | |
430 | return timeout; | |
431 | } | |
432 | EXPORT_SYMBOL(wait_woken); | |
433 | ||
50816c48 | 434 | int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
61ada528 | 435 | { |
76e079fe AP |
436 | /* Pairs with the smp_store_mb() in wait_woken(). */ |
437 | smp_mb(); /* C */ | |
50816c48 | 438 | wq_entry->flags |= WQ_FLAG_WOKEN; |
61ada528 | 439 | |
50816c48 | 440 | return default_wake_function(wq_entry, mode, sync, key); |
61ada528 PZ |
441 | } |
442 | EXPORT_SYMBOL(woken_wake_function); |