Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * Generic waiting primitives. | |
4 | * | |
6d49e352 | 5 | * (C) 2004 Nadia Yvette Chambers, Oracle |
1da177e4 | 6 | */ |
1da177e4 | 7 | |
9d9d676f | 8 | void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) |
21d71f51 | 9 | { |
9d9d676f IM |
10 | spin_lock_init(&wq_head->lock); |
11 | lockdep_set_class_and_name(&wq_head->lock, key, name); | |
2055da97 | 12 | INIT_LIST_HEAD(&wq_head->head); |
21d71f51 | 13 | } |
eb4542b9 | 14 | |
2fc39111 | 15 | EXPORT_SYMBOL(__init_waitqueue_head); |
eb4542b9 | 16 | |
9d9d676f | 17 | void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
18 | { |
19 | unsigned long flags; | |
20 | ||
50816c48 | 21 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
9d9d676f | 22 | spin_lock_irqsave(&wq_head->lock, flags); |
c6b9d9a3 | 23 | __add_wait_queue(wq_head, wq_entry); |
9d9d676f | 24 | spin_unlock_irqrestore(&wq_head->lock, flags); |
1da177e4 LT |
25 | } |
26 | EXPORT_SYMBOL(add_wait_queue); | |
27 | ||
9d9d676f | 28 | void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
29 | { |
30 | unsigned long flags; | |
31 | ||
50816c48 | 32 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
9d9d676f IM |
33 | spin_lock_irqsave(&wq_head->lock, flags); |
34 | __add_wait_queue_entry_tail(wq_head, wq_entry); | |
35 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
1da177e4 LT |
36 | } |
37 | EXPORT_SYMBOL(add_wait_queue_exclusive); | |
38 | ||
c4d51a52 DW |
39 | void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
40 | { | |
41 | unsigned long flags; | |
42 | ||
43 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY; | |
44 | spin_lock_irqsave(&wq_head->lock, flags); | |
45 | __add_wait_queue(wq_head, wq_entry); | |
46 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
47 | } | |
48 | EXPORT_SYMBOL_GPL(add_wait_queue_priority); | |
49 | ||
9d9d676f | 50 | void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
51 | { |
52 | unsigned long flags; | |
53 | ||
9d9d676f IM |
54 | spin_lock_irqsave(&wq_head->lock, flags); |
55 | __remove_wait_queue(wq_head, wq_entry); | |
56 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
1da177e4 LT |
57 | } |
58 | EXPORT_SYMBOL(remove_wait_queue); | |
59 | ||
2554db91 TC |
60 | /* |
61 | * Scan threshold to break wait queue walk. | |
62 | * This allows a waker to take a break from holding the | |
63 | * wait queue lock during the wait queue walk. | |
64 | */ | |
65 | #define WAITQUEUE_WALK_BREAK_CNT 64 | |
1da177e4 | 66 | |
b4145872 PZ |
67 | /* |
68 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just | |
69 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve | |
c4d51a52 DW |
70 | * number) then we wake that number of exclusive tasks, and potentially all |
71 | * the non-exclusive tasks. Normally, exclusive tasks will be at the end of | |
72 | * the list and any non-exclusive tasks will be woken first. A priority task | |
73 | * may be at the head of the list, and can consume the event without any other | |
74 | * tasks being woken. | |
b4145872 PZ |
75 | * |
76 | * There are circumstances in which we can try to wake a task which has already | |
77 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | |
78 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | |
79 | */ | |
2554db91 TC |
80 | static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, |
81 | int nr_exclusive, int wake_flags, void *key, | |
82 | wait_queue_entry_t *bookmark) | |
b4145872 | 83 | { |
ac6424b9 | 84 | wait_queue_entry_t *curr, *next; |
2554db91 TC |
85 | int cnt = 0; |
86 | ||
e05a8e4d CH |
87 | lockdep_assert_held(&wq_head->lock); |
88 | ||
2554db91 TC |
89 | if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { |
90 | curr = list_next_entry(bookmark, entry); | |
b4145872 | 91 | |
2554db91 TC |
92 | list_del(&bookmark->entry); |
93 | bookmark->flags = 0; | |
94 | } else | |
95 | curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); | |
96 | ||
97 | if (&curr->entry == &wq_head->head) | |
98 | return nr_exclusive; | |
99 | ||
100 | list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { | |
b4145872 | 101 | unsigned flags = curr->flags; |
2554db91 TC |
102 | int ret; |
103 | ||
104 | if (flags & WQ_FLAG_BOOKMARK) | |
105 | continue; | |
106 | ||
107 | ret = curr->func(curr, mode, wake_flags, key); | |
3510ca20 LT |
108 | if (ret < 0) |
109 | break; | |
110 | if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) | |
b4145872 | 111 | break; |
2554db91 TC |
112 | |
113 | if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) && | |
114 | (&next->entry != &wq_head->head)) { | |
115 | bookmark->flags = WQ_FLAG_BOOKMARK; | |
116 | list_add_tail(&bookmark->entry, &next->entry); | |
117 | break; | |
118 | } | |
119 | } | |
97fb7a0a | 120 | |
2554db91 TC |
121 | return nr_exclusive; |
122 | } | |
123 | ||
124 | static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode, | |
125 | int nr_exclusive, int wake_flags, void *key) | |
126 | { | |
127 | unsigned long flags; | |
128 | wait_queue_entry_t bookmark; | |
129 | ||
130 | bookmark.flags = 0; | |
131 | bookmark.private = NULL; | |
132 | bookmark.func = NULL; | |
133 | INIT_LIST_HEAD(&bookmark.entry); | |
134 | ||
016190a4 | 135 | do { |
2554db91 TC |
136 | spin_lock_irqsave(&wq_head->lock, flags); |
137 | nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, | |
138 | wake_flags, key, &bookmark); | |
139 | spin_unlock_irqrestore(&wq_head->lock, flags); | |
016190a4 | 140 | } while (bookmark.flags & WQ_FLAG_BOOKMARK); |
b4145872 PZ |
141 | } |
142 | ||
143 | /** | |
144 | * __wake_up - wake up threads blocked on a waitqueue. | |
9d9d676f | 145 | * @wq_head: the waitqueue |
b4145872 PZ |
146 | * @mode: which threads |
147 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | |
148 | * @key: is directly passed to the wakeup function | |
149 | * | |
7696f991 AP |
150 | * If this function wakes up a task, it executes a full memory barrier before |
151 | * accessing the task state. | |
b4145872 | 152 | */ |
9d9d676f | 153 | void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, |
b4145872 PZ |
154 | int nr_exclusive, void *key) |
155 | { | |
2554db91 | 156 | __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key); |
b4145872 PZ |
157 | } |
158 | EXPORT_SYMBOL(__wake_up); | |
159 | ||
160 | /* | |
161 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. | |
162 | */ | |
9d9d676f | 163 | void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) |
b4145872 | 164 | { |
2554db91 | 165 | __wake_up_common(wq_head, mode, nr, 0, NULL, NULL); |
b4145872 PZ |
166 | } |
167 | EXPORT_SYMBOL_GPL(__wake_up_locked); | |
168 | ||
9d9d676f | 169 | void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) |
b4145872 | 170 | { |
2554db91 | 171 | __wake_up_common(wq_head, mode, 1, 0, key, NULL); |
b4145872 PZ |
172 | } |
173 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); | |
174 | ||
11a19c7b TC |
175 | void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, |
176 | unsigned int mode, void *key, wait_queue_entry_t *bookmark) | |
177 | { | |
178 | __wake_up_common(wq_head, mode, 1, 0, key, bookmark); | |
179 | } | |
180 | EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark); | |
181 | ||
b4145872 PZ |
182 | /** |
183 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. | |
9d9d676f | 184 | * @wq_head: the waitqueue |
b4145872 | 185 | * @mode: which threads |
b4145872 PZ |
186 | * @key: opaque value to be passed to wakeup targets |
187 | * | |
188 | * The sync wakeup differs that the waker knows that it will schedule | |
189 | * away soon, so while the target thread will be woken up, it will not | |
190 | * be migrated to another CPU - ie. the two threads are 'synchronized' | |
191 | * with each other. This can prevent needless bouncing between CPUs. | |
192 | * | |
193 | * On UP it can prevent extra preemption. | |
194 | * | |
7696f991 AP |
195 | * If this function wakes up a task, it executes a full memory barrier before |
196 | * accessing the task state. | |
b4145872 | 197 | */ |
9d9d676f | 198 | void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, |
ce4dd442 | 199 | void *key) |
b4145872 | 200 | { |
9d9d676f | 201 | if (unlikely(!wq_head)) |
b4145872 PZ |
202 | return; |
203 | ||
ce4dd442 | 204 | __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key); |
b4145872 PZ |
205 | } |
206 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); | |
207 | ||
f94df989 DH |
208 | /** |
209 | * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue. | |
210 | * @wq_head: the waitqueue | |
211 | * @mode: which threads | |
212 | * @key: opaque value to be passed to wakeup targets | |
213 | * | |
214 | * The sync wakeup differs in that the waker knows that it will schedule | |
215 | * away soon, so while the target thread will be woken up, it will not | |
216 | * be migrated to another CPU - ie. the two threads are 'synchronized' | |
217 | * with each other. This can prevent needless bouncing between CPUs. | |
218 | * | |
219 | * On UP it can prevent extra preemption. | |
220 | * | |
221 | * If this function wakes up a task, it executes a full memory barrier before | |
222 | * accessing the task state. | |
223 | */ | |
224 | void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, | |
225 | unsigned int mode, void *key) | |
226 | { | |
227 | __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL); | |
228 | } | |
229 | EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key); | |
230 | ||
b4145872 PZ |
231 | /* |
232 | * __wake_up_sync - see __wake_up_sync_key() | |
233 | */ | |
ce4dd442 | 234 | void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode) |
b4145872 | 235 | { |
ce4dd442 | 236 | __wake_up_sync_key(wq_head, mode, NULL); |
b4145872 PZ |
237 | } |
238 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | |
239 | ||
42288cb4 EB |
240 | void __wake_up_pollfree(struct wait_queue_head *wq_head) |
241 | { | |
242 | __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE)); | |
243 | /* POLLFREE must have cleared the queue. */ | |
244 | WARN_ON_ONCE(waitqueue_active(wq_head)); | |
245 | } | |
246 | ||
1da177e4 LT |
247 | /* |
248 | * Note: we use "set_current_state()" _after_ the wait-queue add, | |
249 | * because we need a memory barrier there on SMP, so that any | |
250 | * wake-function that tests for the wait-queue being active | |
251 | * will be guaranteed to see waitqueue addition _or_ subsequent | |
252 | * tests in this thread will see the wakeup having taken place. | |
253 | * | |
254 | * The spin_unlock() itself is semi-permeable and only protects | |
255 | * one way (it only protects stuff inside the critical region and | |
256 | * stops them from bleeding out - it would still allow subsequent | |
59c51591 | 257 | * loads to move into the critical region). |
1da177e4 | 258 | */ |
7ad5b3a5 | 259 | void |
9d9d676f | 260 | prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
1da177e4 LT |
261 | { |
262 | unsigned long flags; | |
263 | ||
50816c48 | 264 | wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
9d9d676f | 265 | spin_lock_irqsave(&wq_head->lock, flags); |
2055da97 | 266 | if (list_empty(&wq_entry->entry)) |
9d9d676f | 267 | __add_wait_queue(wq_head, wq_entry); |
a25d644f | 268 | set_current_state(state); |
9d9d676f | 269 | spin_unlock_irqrestore(&wq_head->lock, flags); |
1da177e4 LT |
270 | } |
271 | EXPORT_SYMBOL(prepare_to_wait); | |
272 | ||
11c7aa0d JK |
273 | /* Returns true if we are the first waiter in the queue, false otherwise. */ |
274 | bool | |
9d9d676f | 275 | prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
1da177e4 LT |
276 | { |
277 | unsigned long flags; | |
11c7aa0d | 278 | bool was_empty = false; |
1da177e4 | 279 | |
50816c48 | 280 | wq_entry->flags |= WQ_FLAG_EXCLUSIVE; |
9d9d676f | 281 | spin_lock_irqsave(&wq_head->lock, flags); |
11c7aa0d JK |
282 | if (list_empty(&wq_entry->entry)) { |
283 | was_empty = list_empty(&wq_head->head); | |
9d9d676f | 284 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
11c7aa0d | 285 | } |
a25d644f | 286 | set_current_state(state); |
9d9d676f | 287 | spin_unlock_irqrestore(&wq_head->lock, flags); |
11c7aa0d | 288 | return was_empty; |
1da177e4 LT |
289 | } |
290 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | |
291 | ||
50816c48 | 292 | void init_wait_entry(struct wait_queue_entry *wq_entry, int flags) |
0176beaf | 293 | { |
50816c48 IM |
294 | wq_entry->flags = flags; |
295 | wq_entry->private = current; | |
296 | wq_entry->func = autoremove_wake_function; | |
2055da97 | 297 | INIT_LIST_HEAD(&wq_entry->entry); |
0176beaf ON |
298 | } |
299 | EXPORT_SYMBOL(init_wait_entry); | |
300 | ||
9d9d676f | 301 | long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state) |
c2d81644 ON |
302 | { |
303 | unsigned long flags; | |
b1ea06a9 | 304 | long ret = 0; |
c2d81644 | 305 | |
9d9d676f | 306 | spin_lock_irqsave(&wq_head->lock, flags); |
34ec35ad | 307 | if (signal_pending_state(state, current)) { |
b1ea06a9 ON |
308 | /* |
309 | * Exclusive waiter must not fail if it was selected by wakeup, | |
310 | * it should "consume" the condition we were waiting for. | |
311 | * | |
312 | * The caller will recheck the condition and return success if | |
313 | * we were already woken up, we can not miss the event because | |
9d9d676f | 314 | * wakeup locks/unlocks the same wq_head->lock. |
b1ea06a9 ON |
315 | * |
316 | * But we need to ensure that set-condition + wakeup after that | |
317 | * can't see us, it should wake up another exclusive waiter if | |
318 | * we fail. | |
319 | */ | |
2055da97 | 320 | list_del_init(&wq_entry->entry); |
b1ea06a9 ON |
321 | ret = -ERESTARTSYS; |
322 | } else { | |
2055da97 | 323 | if (list_empty(&wq_entry->entry)) { |
50816c48 | 324 | if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) |
9d9d676f | 325 | __add_wait_queue_entry_tail(wq_head, wq_entry); |
b1ea06a9 | 326 | else |
9d9d676f | 327 | __add_wait_queue(wq_head, wq_entry); |
b1ea06a9 ON |
328 | } |
329 | set_current_state(state); | |
c2d81644 | 330 | } |
9d9d676f | 331 | spin_unlock_irqrestore(&wq_head->lock, flags); |
c2d81644 | 332 | |
b1ea06a9 | 333 | return ret; |
c2d81644 ON |
334 | } |
335 | EXPORT_SYMBOL(prepare_to_wait_event); | |
336 | ||
bd0f9b35 LT |
337 | /* |
338 | * Note! These two wait functions are entered with the | |
339 | * wait-queue lock held (and interrupts off in the _irq | |
340 | * case), so there is no race with testing the wakeup | |
341 | * condition in the caller before they add the wait | |
342 | * entry to the wake queue. | |
343 | */ | |
ac6424b9 | 344 | int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
bd0f9b35 | 345 | { |
2055da97 | 346 | if (likely(list_empty(&wait->entry))) |
ac6424b9 | 347 | __add_wait_queue_entry_tail(wq, wait); |
bd0f9b35 LT |
348 | |
349 | set_current_state(TASK_INTERRUPTIBLE); | |
350 | if (signal_pending(current)) | |
351 | return -ERESTARTSYS; | |
352 | ||
353 | spin_unlock(&wq->lock); | |
354 | schedule(); | |
355 | spin_lock(&wq->lock); | |
97fb7a0a | 356 | |
bd0f9b35 LT |
357 | return 0; |
358 | } | |
359 | EXPORT_SYMBOL(do_wait_intr); | |
360 | ||
ac6424b9 | 361 | int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) |
bd0f9b35 | 362 | { |
2055da97 | 363 | if (likely(list_empty(&wait->entry))) |
ac6424b9 | 364 | __add_wait_queue_entry_tail(wq, wait); |
bd0f9b35 LT |
365 | |
366 | set_current_state(TASK_INTERRUPTIBLE); | |
367 | if (signal_pending(current)) | |
368 | return -ERESTARTSYS; | |
369 | ||
370 | spin_unlock_irq(&wq->lock); | |
371 | schedule(); | |
372 | spin_lock_irq(&wq->lock); | |
97fb7a0a | 373 | |
bd0f9b35 LT |
374 | return 0; |
375 | } | |
376 | EXPORT_SYMBOL(do_wait_intr_irq); | |
377 | ||
ee2f154a | 378 | /** |
777c6c5f | 379 | * finish_wait - clean up after waiting in a queue |
9d9d676f | 380 | * @wq_head: waitqueue waited on |
50816c48 | 381 | * @wq_entry: wait descriptor |
777c6c5f JW |
382 | * |
383 | * Sets current thread back to running state and removes | |
384 | * the wait descriptor from the given waitqueue if still | |
385 | * queued. | |
386 | */ | |
9d9d676f | 387 | void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) |
1da177e4 LT |
388 | { |
389 | unsigned long flags; | |
390 | ||
391 | __set_current_state(TASK_RUNNING); | |
392 | /* | |
393 | * We can check for list emptiness outside the lock | |
394 | * IFF: | |
395 | * - we use the "careful" check that verifies both | |
396 | * the next and prev pointers, so that there cannot | |
397 | * be any half-pending updates in progress on other | |
398 | * CPU's that we haven't seen yet (and that might | |
399 | * still change the stack area. | |
400 | * and | |
401 | * - all other users take the lock (ie we can only | |
402 | * have _one_ other CPU that looks at or modifies | |
403 | * the list). | |
404 | */ | |
2055da97 | 405 | if (!list_empty_careful(&wq_entry->entry)) { |
9d9d676f | 406 | spin_lock_irqsave(&wq_head->lock, flags); |
2055da97 | 407 | list_del_init(&wq_entry->entry); |
9d9d676f | 408 | spin_unlock_irqrestore(&wq_head->lock, flags); |
1da177e4 LT |
409 | } |
410 | } | |
411 | EXPORT_SYMBOL(finish_wait); | |
412 | ||
50816c48 | 413 | int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
1da177e4 | 414 | { |
50816c48 | 415 | int ret = default_wake_function(wq_entry, mode, sync, key); |
1da177e4 LT |
416 | |
417 | if (ret) | |
c6fe44d9 | 418 | list_del_init_careful(&wq_entry->entry); |
97fb7a0a | 419 | |
1da177e4 LT |
420 | return ret; |
421 | } | |
422 | EXPORT_SYMBOL(autoremove_wake_function); | |
423 | ||
cb6538e7 PZ |
424 | static inline bool is_kthread_should_stop(void) |
425 | { | |
426 | return (current->flags & PF_KTHREAD) && kthread_should_stop(); | |
427 | } | |
61ada528 PZ |
428 | |
429 | /* | |
430 | * DEFINE_WAIT_FUNC(wait, woken_wake_func); | |
431 | * | |
9d9d676f | 432 | * add_wait_queue(&wq_head, &wait); |
61ada528 PZ |
433 | * for (;;) { |
434 | * if (condition) | |
435 | * break; | |
436 | * | |
76e079fe | 437 | * // in wait_woken() // in woken_wake_function() |
61ada528 | 438 | * |
76e079fe AP |
439 | * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN; |
440 | * smp_mb(); // A try_to_wake_up(): | |
441 | * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier> | |
442 | * schedule() if (p->state & mode) | |
443 | * p->state = TASK_RUNNING; p->state = TASK_RUNNING; | |
444 | * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~ | |
445 | * smp_mb(); // B condition = true; | |
446 | * } smp_mb(); // C | |
447 | * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN; | |
61ada528 | 448 | */ |
50816c48 | 449 | long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) |
61ada528 | 450 | { |
61ada528 | 451 | /* |
76e079fe AP |
452 | * The below executes an smp_mb(), which matches with the full barrier |
453 | * executed by the try_to_wake_up() in woken_wake_function() such that | |
454 | * either we see the store to wq_entry->flags in woken_wake_function() | |
455 | * or woken_wake_function() sees our store to current->state. | |
61ada528 | 456 | */ |
76e079fe | 457 | set_current_state(mode); /* A */ |
50816c48 | 458 | if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) |
61ada528 PZ |
459 | timeout = schedule_timeout(timeout); |
460 | __set_current_state(TASK_RUNNING); | |
461 | ||
462 | /* | |
76e079fe AP |
463 | * The below executes an smp_mb(), which matches with the smp_mb() (C) |
464 | * in woken_wake_function() such that either we see the wait condition | |
465 | * being true or the store to wq_entry->flags in woken_wake_function() | |
466 | * follows ours in the coherence order. | |
61ada528 | 467 | */ |
50816c48 | 468 | smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ |
61ada528 PZ |
469 | |
470 | return timeout; | |
471 | } | |
472 | EXPORT_SYMBOL(wait_woken); | |
473 | ||
50816c48 | 474 | int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) |
61ada528 | 475 | { |
76e079fe AP |
476 | /* Pairs with the smp_store_mb() in wait_woken(). */ |
477 | smp_mb(); /* C */ | |
50816c48 | 478 | wq_entry->flags |= WQ_FLAG_WOKEN; |
61ada528 | 479 | |
50816c48 | 480 | return default_wake_function(wq_entry, mode, sync, key); |
61ada528 PZ |
481 | } |
482 | EXPORT_SYMBOL(woken_wake_function); |