blob: 928be527477eb8b1e7e04bbf7d3cea0dbde7316f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic waiting primitives.
3 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01004 * (C) 2004 Nadia Yvette Chambers, Oracle
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Ingo Molnar325ea102018-03-03 12:20:47 +01006#include "sched.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07007
Ingo Molnar9d9d6762017-03-05 11:10:18 +01008void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
Ingo Molnar21d71f52006-07-10 04:45:32 -07009{
Ingo Molnar9d9d6762017-03-05 11:10:18 +010010 spin_lock_init(&wq_head->lock);
11 lockdep_set_class_and_name(&wq_head->lock, key, name);
Ingo Molnar2055da92017-06-20 12:06:46 +020012 INIT_LIST_HEAD(&wq_head->head);
Ingo Molnar21d71f52006-07-10 04:45:32 -070013}
Ingo Molnareb4542b2006-07-03 00:25:07 -070014
Peter Zijlstra2fc39112009-08-10 12:33:05 +010015EXPORT_SYMBOL(__init_waitqueue_head);
Ingo Molnareb4542b2006-07-03 00:25:07 -070016
Ingo Molnar9d9d6762017-03-05 11:10:18 +010017void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070018{
19 unsigned long flags;
20
Ingo Molnar50816c42017-03-05 10:33:16 +010021 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +010022 spin_lock_irqsave(&wq_head->lock, flags);
Omar Sandovalc6b9d9a2017-12-05 23:15:31 -080023 __add_wait_queue(wq_head, wq_entry);
Ingo Molnar9d9d6762017-03-05 11:10:18 +010024 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070025}
26EXPORT_SYMBOL(add_wait_queue);
27
Ingo Molnar9d9d6762017-03-05 11:10:18 +010028void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
30 unsigned long flags;
31
Ingo Molnar50816c42017-03-05 10:33:16 +010032 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +010033 spin_lock_irqsave(&wq_head->lock, flags);
34 __add_wait_queue_entry_tail(wq_head, wq_entry);
35 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070036}
37EXPORT_SYMBOL(add_wait_queue_exclusive);
38
Ingo Molnar9d9d6762017-03-05 11:10:18 +010039void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070040{
41 unsigned long flags;
42
Ingo Molnar9d9d6762017-03-05 11:10:18 +010043 spin_lock_irqsave(&wq_head->lock, flags);
44 __remove_wait_queue(wq_head, wq_entry);
45 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046}
47EXPORT_SYMBOL(remove_wait_queue);
48
Tim Chen2554db92017-08-25 09:13:54 -070049/*
50 * Scan threshold to break wait queue walk.
51 * This allows a waker to take a break from holding the
52 * wait queue lock during the wait queue walk.
53 */
54#define WAITQUEUE_WALK_BREAK_CNT 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56/*
Peter Zijlstrab4145872013-10-04 17:24:35 +020057 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
58 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
59 * number) then we wake all the non-exclusive tasks and one exclusive task.
60 *
61 * There are circumstances in which we can try to wake a task which has already
62 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
63 * zero in this (rare) case, and we handle it by continuing to scan the queue.
64 */
Tim Chen2554db92017-08-25 09:13:54 -070065static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
66 int nr_exclusive, int wake_flags, void *key,
67 wait_queue_entry_t *bookmark)
Peter Zijlstrab4145872013-10-04 17:24:35 +020068{
Ingo Molnarac6424b2017-06-20 12:06:13 +020069 wait_queue_entry_t *curr, *next;
Tim Chen2554db92017-08-25 09:13:54 -070070 int cnt = 0;
Peter Zijlstrab4145872013-10-04 17:24:35 +020071
Tim Chen2554db92017-08-25 09:13:54 -070072 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
73 curr = list_next_entry(bookmark, entry);
74
75 list_del(&bookmark->entry);
76 bookmark->flags = 0;
77 } else
78 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
79
80 if (&curr->entry == &wq_head->head)
81 return nr_exclusive;
82
83 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
Peter Zijlstrab4145872013-10-04 17:24:35 +020084 unsigned flags = curr->flags;
Tim Chen2554db92017-08-25 09:13:54 -070085 int ret;
86
87 if (flags & WQ_FLAG_BOOKMARK)
88 continue;
89
90 ret = curr->func(curr, mode, wake_flags, key);
Linus Torvalds3510ca22017-08-27 13:55:12 -070091 if (ret < 0)
92 break;
93 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Peter Zijlstrab4145872013-10-04 17:24:35 +020094 break;
Tim Chen2554db92017-08-25 09:13:54 -070095
96 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
97 (&next->entry != &wq_head->head)) {
98 bookmark->flags = WQ_FLAG_BOOKMARK;
99 list_add_tail(&bookmark->entry, &next->entry);
100 break;
101 }
102 }
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100103
Tim Chen2554db92017-08-25 09:13:54 -0700104 return nr_exclusive;
105}
106
107static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
108 int nr_exclusive, int wake_flags, void *key)
109{
110 unsigned long flags;
111 wait_queue_entry_t bookmark;
112
113 bookmark.flags = 0;
114 bookmark.private = NULL;
115 bookmark.func = NULL;
116 INIT_LIST_HEAD(&bookmark.entry);
117
118 spin_lock_irqsave(&wq_head->lock, flags);
119 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark);
120 spin_unlock_irqrestore(&wq_head->lock, flags);
121
122 while (bookmark.flags & WQ_FLAG_BOOKMARK) {
123 spin_lock_irqsave(&wq_head->lock, flags);
124 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
125 wake_flags, key, &bookmark);
126 spin_unlock_irqrestore(&wq_head->lock, flags);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200127 }
128}
129
130/**
131 * __wake_up - wake up threads blocked on a waitqueue.
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100132 * @wq_head: the waitqueue
Peter Zijlstrab4145872013-10-04 17:24:35 +0200133 * @mode: which threads
134 * @nr_exclusive: how many wake-one or wake-many threads to wake up
135 * @key: is directly passed to the wakeup function
136 *
137 * It may be assumed that this function implies a write memory barrier before
138 * changing the task state if and only if any tasks are woken up.
139 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100140void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
Peter Zijlstrab4145872013-10-04 17:24:35 +0200141 int nr_exclusive, void *key)
142{
Tim Chen2554db92017-08-25 09:13:54 -0700143 __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200144}
145EXPORT_SYMBOL(__wake_up);
146
147/*
148 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
149 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100150void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200151{
Tim Chen2554db92017-08-25 09:13:54 -0700152 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200153}
154EXPORT_SYMBOL_GPL(__wake_up_locked);
155
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100156void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200157{
Tim Chen2554db92017-08-25 09:13:54 -0700158 __wake_up_common(wq_head, mode, 1, 0, key, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200159}
160EXPORT_SYMBOL_GPL(__wake_up_locked_key);
161
Tim Chen11a19c72017-08-25 09:13:55 -0700162void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
163 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
164{
165 __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
166}
167EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
168
Peter Zijlstrab4145872013-10-04 17:24:35 +0200169/**
170 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100171 * @wq_head: the waitqueue
Peter Zijlstrab4145872013-10-04 17:24:35 +0200172 * @mode: which threads
173 * @nr_exclusive: how many wake-one or wake-many threads to wake up
174 * @key: opaque value to be passed to wakeup targets
175 *
176 * The sync wakeup differs that the waker knows that it will schedule
177 * away soon, so while the target thread will be woken up, it will not
178 * be migrated to another CPU - ie. the two threads are 'synchronized'
179 * with each other. This can prevent needless bouncing between CPUs.
180 *
181 * On UP it can prevent extra preemption.
182 *
183 * It may be assumed that this function implies a write memory barrier before
184 * changing the task state if and only if any tasks are woken up.
185 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100186void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
Peter Zijlstrab4145872013-10-04 17:24:35 +0200187 int nr_exclusive, void *key)
188{
Peter Zijlstrab4145872013-10-04 17:24:35 +0200189 int wake_flags = 1; /* XXX WF_SYNC */
190
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100191 if (unlikely(!wq_head))
Peter Zijlstrab4145872013-10-04 17:24:35 +0200192 return;
193
194 if (unlikely(nr_exclusive != 1))
195 wake_flags = 0;
196
Tim Chen2554db92017-08-25 09:13:54 -0700197 __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200198}
199EXPORT_SYMBOL_GPL(__wake_up_sync_key);
200
201/*
202 * __wake_up_sync - see __wake_up_sync_key()
203 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100204void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
Peter Zijlstrab4145872013-10-04 17:24:35 +0200205{
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100206 __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
Peter Zijlstrab4145872013-10-04 17:24:35 +0200207}
208EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
209
210/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 * Note: we use "set_current_state()" _after_ the wait-queue add,
212 * because we need a memory barrier there on SMP, so that any
213 * wake-function that tests for the wait-queue being active
214 * will be guaranteed to see waitqueue addition _or_ subsequent
215 * tests in this thread will see the wakeup having taken place.
216 *
217 * The spin_unlock() itself is semi-permeable and only protects
218 * one way (it only protects stuff inside the critical region and
219 * stops them from bleeding out - it would still allow subsequent
Michael Opdenacker59c51592007-05-09 08:57:56 +0200220 * loads to move into the critical region).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800222void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100223prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
225 unsigned long flags;
226
Ingo Molnar50816c42017-03-05 10:33:16 +0100227 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100228 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar2055da92017-06-20 12:06:46 +0200229 if (list_empty(&wq_entry->entry))
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100230 __add_wait_queue(wq_head, wq_entry);
Tejun Heoa25d6442008-10-15 22:01:38 -0700231 set_current_state(state);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100232 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233}
234EXPORT_SYMBOL(prepare_to_wait);
235
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800236void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100237prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
239 unsigned long flags;
240
Ingo Molnar50816c42017-03-05 10:33:16 +0100241 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100242 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar2055da92017-06-20 12:06:46 +0200243 if (list_empty(&wq_entry->entry))
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100244 __add_wait_queue_entry_tail(wq_head, wq_entry);
Tejun Heoa25d6442008-10-15 22:01:38 -0700245 set_current_state(state);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100246 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248EXPORT_SYMBOL(prepare_to_wait_exclusive);
249
Ingo Molnar50816c42017-03-05 10:33:16 +0100250void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
Oleg Nesterov0176bea2016-09-06 16:00:55 +0200251{
Ingo Molnar50816c42017-03-05 10:33:16 +0100252 wq_entry->flags = flags;
253 wq_entry->private = current;
254 wq_entry->func = autoremove_wake_function;
Ingo Molnar2055da92017-06-20 12:06:46 +0200255 INIT_LIST_HEAD(&wq_entry->entry);
Oleg Nesterov0176bea2016-09-06 16:00:55 +0200256}
257EXPORT_SYMBOL(init_wait_entry);
258
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100259long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200260{
261 unsigned long flags;
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200262 long ret = 0;
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200263
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100264 spin_lock_irqsave(&wq_head->lock, flags);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200265 if (unlikely(signal_pending_state(state, current))) {
266 /*
267 * Exclusive waiter must not fail if it was selected by wakeup,
268 * it should "consume" the condition we were waiting for.
269 *
270 * The caller will recheck the condition and return success if
271 * we were already woken up, we can not miss the event because
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100272 * wakeup locks/unlocks the same wq_head->lock.
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200273 *
274 * But we need to ensure that set-condition + wakeup after that
275 * can't see us, it should wake up another exclusive waiter if
276 * we fail.
277 */
Ingo Molnar2055da92017-06-20 12:06:46 +0200278 list_del_init(&wq_entry->entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200279 ret = -ERESTARTSYS;
280 } else {
Ingo Molnar2055da92017-06-20 12:06:46 +0200281 if (list_empty(&wq_entry->entry)) {
Ingo Molnar50816c42017-03-05 10:33:16 +0100282 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100283 __add_wait_queue_entry_tail(wq_head, wq_entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200284 else
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100285 __add_wait_queue(wq_head, wq_entry);
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200286 }
287 set_current_state(state);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200288 }
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100289 spin_unlock_irqrestore(&wq_head->lock, flags);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200290
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200291 return ret;
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200292}
293EXPORT_SYMBOL(prepare_to_wait_event);
294
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800295/*
296 * Note! These two wait functions are entered with the
297 * wait-queue lock held (and interrupts off in the _irq
298 * case), so there is no race with testing the wakeup
299 * condition in the caller before they add the wait
300 * entry to the wake queue.
301 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200302int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800303{
Ingo Molnar2055da92017-06-20 12:06:46 +0200304 if (likely(list_empty(&wait->entry)))
Ingo Molnarac6424b2017-06-20 12:06:13 +0200305 __add_wait_queue_entry_tail(wq, wait);
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800306
307 set_current_state(TASK_INTERRUPTIBLE);
308 if (signal_pending(current))
309 return -ERESTARTSYS;
310
311 spin_unlock(&wq->lock);
312 schedule();
313 spin_lock(&wq->lock);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100314
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800315 return 0;
316}
317EXPORT_SYMBOL(do_wait_intr);
318
Ingo Molnarac6424b2017-06-20 12:06:13 +0200319int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800320{
Ingo Molnar2055da92017-06-20 12:06:46 +0200321 if (likely(list_empty(&wait->entry)))
Ingo Molnarac6424b2017-06-20 12:06:13 +0200322 __add_wait_queue_entry_tail(wq, wait);
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800323
324 set_current_state(TASK_INTERRUPTIBLE);
325 if (signal_pending(current))
326 return -ERESTARTSYS;
327
328 spin_unlock_irq(&wq->lock);
329 schedule();
330 spin_lock_irq(&wq->lock);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100331
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800332 return 0;
333}
334EXPORT_SYMBOL(do_wait_intr_irq);
335
Randy Dunlapee2f1542010-10-26 14:17:25 -0700336/**
Johannes Weiner777c6c52009-02-04 15:12:14 -0800337 * finish_wait - clean up after waiting in a queue
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100338 * @wq_head: waitqueue waited on
Ingo Molnar50816c42017-03-05 10:33:16 +0100339 * @wq_entry: wait descriptor
Johannes Weiner777c6c52009-02-04 15:12:14 -0800340 *
341 * Sets current thread back to running state and removes
342 * the wait descriptor from the given waitqueue if still
343 * queued.
344 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100345void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
347 unsigned long flags;
348
349 __set_current_state(TASK_RUNNING);
350 /*
351 * We can check for list emptiness outside the lock
352 * IFF:
353 * - we use the "careful" check that verifies both
354 * the next and prev pointers, so that there cannot
355 * be any half-pending updates in progress on other
356 * CPU's that we haven't seen yet (and that might
357 * still change the stack area.
358 * and
359 * - all other users take the lock (ie we can only
360 * have _one_ other CPU that looks at or modifies
361 * the list).
362 */
Ingo Molnar2055da92017-06-20 12:06:46 +0200363 if (!list_empty_careful(&wq_entry->entry)) {
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100364 spin_lock_irqsave(&wq_head->lock, flags);
Ingo Molnar2055da92017-06-20 12:06:46 +0200365 list_del_init(&wq_entry->entry);
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100366 spin_unlock_irqrestore(&wq_head->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
368}
369EXPORT_SYMBOL(finish_wait);
370
Ingo Molnar50816c42017-03-05 10:33:16 +0100371int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372{
Ingo Molnar50816c42017-03-05 10:33:16 +0100373 int ret = default_wake_function(wq_entry, mode, sync, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 if (ret)
Ingo Molnar2055da92017-06-20 12:06:46 +0200376 list_del_init(&wq_entry->entry);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 return ret;
379}
380EXPORT_SYMBOL(autoremove_wake_function);
381
Peter Zijlstracb6538e2014-10-31 11:57:30 +0100382static inline bool is_kthread_should_stop(void)
383{
384 return (current->flags & PF_KTHREAD) && kthread_should_stop();
385}
Peter Zijlstra61ada522014-09-24 10:18:47 +0200386
387/*
388 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
389 *
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100390 * add_wait_queue(&wq_head, &wait);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200391 * for (;;) {
392 * if (condition)
393 * break;
394 *
395 * p->state = mode; condition = true;
396 * smp_mb(); // A smp_wmb(); // C
Ingo Molnar50816c42017-03-05 10:33:16 +0100397 * if (!wq_entry->flags & WQ_FLAG_WOKEN) wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200398 * schedule() try_to_wake_up();
399 * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
Ingo Molnar50816c42017-03-05 10:33:16 +0100400 * wq_entry->flags &= ~WQ_FLAG_WOKEN; condition = true;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200401 * smp_mb() // B smp_wmb(); // C
Ingo Molnar50816c42017-03-05 10:33:16 +0100402 * wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200403 * }
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100404 * remove_wait_queue(&wq_head, &wait);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200405 *
406 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100407long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
Peter Zijlstra61ada522014-09-24 10:18:47 +0200408{
409 set_current_state(mode); /* A */
410 /*
411 * The above implies an smp_mb(), which matches with the smp_wmb() from
412 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
413 * also observe all state before the wakeup.
414 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100415 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
Peter Zijlstra61ada522014-09-24 10:18:47 +0200416 timeout = schedule_timeout(timeout);
417 __set_current_state(TASK_RUNNING);
418
419 /*
420 * The below implies an smp_mb(), it too pairs with the smp_wmb() from
421 * woken_wake_function() such that we must either observe the wait
422 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
423 * an event.
424 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100425 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
Peter Zijlstra61ada522014-09-24 10:18:47 +0200426
427 return timeout;
428}
429EXPORT_SYMBOL(wait_woken);
430
Ingo Molnar50816c42017-03-05 10:33:16 +0100431int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
Peter Zijlstra61ada522014-09-24 10:18:47 +0200432{
433 /*
434 * Although this function is called under waitqueue lock, LOCK
435 * doesn't imply write barrier and the users expects write
436 * barrier semantics on wakeup functions. The following
437 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200438 * and is paired with smp_store_mb() in wait_woken().
Peter Zijlstra61ada522014-09-24 10:18:47 +0200439 */
440 smp_wmb(); /* C */
Ingo Molnar50816c42017-03-05 10:33:16 +0100441 wq_entry->flags |= WQ_FLAG_WOKEN;
Peter Zijlstra61ada522014-09-24 10:18:47 +0200442
Ingo Molnar50816c42017-03-05 10:33:16 +0100443 return default_wake_function(wq_entry, mode, sync, key);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200444}
445EXPORT_SYMBOL(woken_wake_function);