2 * Generic waiting primitives.
4 * (C) 2004 Nadia Yvette Chambers, Oracle
6 #include <linux/init.h>
7 #include <linux/export.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/debug.h>
11 #include <linux/wait.h>
12 #include <linux/hash.h>
13 #include <linux/kthread.h>
14 #include <linux/poll.h>
16 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
18 spin_lock_init(&wq_head->lock);
19 lockdep_set_class_and_name(&wq_head->lock, key, name);
20 INIT_LIST_HEAD(&wq_head->head);
23 EXPORT_SYMBOL(__init_waitqueue_head);
25 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
29 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
30 spin_lock_irqsave(&wq_head->lock, flags);
31 __add_wait_queue(wq_head, wq_entry);
32 spin_unlock_irqrestore(&wq_head->lock, flags);
34 EXPORT_SYMBOL(add_wait_queue);
36 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
40 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
41 spin_lock_irqsave(&wq_head->lock, flags);
42 __add_wait_queue_entry_tail(wq_head, wq_entry);
43 spin_unlock_irqrestore(&wq_head->lock, flags);
45 EXPORT_SYMBOL(add_wait_queue_exclusive);
47 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
51 spin_lock_irqsave(&wq_head->lock, flags);
52 __remove_wait_queue(wq_head, wq_entry);
53 spin_unlock_irqrestore(&wq_head->lock, flags);
55 EXPORT_SYMBOL(remove_wait_queue);
58 * Scan threshold to break wait queue walk.
59 * This allows a waker to take a break from holding the
60 * wait queue lock during the wait queue walk.
62 #define WAITQUEUE_WALK_BREAK_CNT 64
65 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
66 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
67 * number) then we wake all the non-exclusive tasks and one exclusive task.
69 * There are circumstances in which we can try to wake a task which has already
70 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
71 * zero in this (rare) case, and we handle it by continuing to scan the queue.
73 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
74 int nr_exclusive, int wake_flags, void *key,
75 wait_queue_entry_t *bookmark)
77 wait_queue_entry_t *curr, *next;
80 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
81 curr = list_next_entry(bookmark, entry);
83 list_del(&bookmark->entry);
86 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
88 if (&curr->entry == &wq_head->head)
91 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
92 unsigned flags = curr->flags;
95 if (flags & WQ_FLAG_BOOKMARK)
98 ret = curr->func(curr, mode, wake_flags, key);
101 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
104 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
105 (&next->entry != &wq_head->head)) {
106 bookmark->flags = WQ_FLAG_BOOKMARK;
107 list_add_tail(&bookmark->entry, &next->entry);
114 static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
115 int nr_exclusive, int wake_flags, void *key)
118 wait_queue_entry_t bookmark;
121 bookmark.private = NULL;
122 bookmark.func = NULL;
123 INIT_LIST_HEAD(&bookmark.entry);
125 spin_lock_irqsave(&wq_head->lock, flags);
126 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark);
127 spin_unlock_irqrestore(&wq_head->lock, flags);
129 while (bookmark.flags & WQ_FLAG_BOOKMARK) {
130 spin_lock_irqsave(&wq_head->lock, flags);
131 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
132 wake_flags, key, &bookmark);
133 spin_unlock_irqrestore(&wq_head->lock, flags);
138 * __wake_up - wake up threads blocked on a waitqueue.
139 * @wq_head: the waitqueue
140 * @mode: which threads
141 * @nr_exclusive: how many wake-one or wake-many threads to wake up
142 * @key: is directly passed to the wakeup function
144 * It may be assumed that this function implies a write memory barrier before
145 * changing the task state if and only if any tasks are woken up.
147 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
148 int nr_exclusive, void *key)
150 __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
152 EXPORT_SYMBOL(__wake_up);
155 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
157 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
159 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
161 EXPORT_SYMBOL_GPL(__wake_up_locked);
163 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
165 __wake_up_common(wq_head, mode, 1, 0, key, NULL);
167 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
169 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
170 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
172 __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
174 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
177 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
178 * @wq_head: the waitqueue
179 * @mode: which threads
180 * @nr_exclusive: how many wake-one or wake-many threads to wake up
181 * @key: opaque value to be passed to wakeup targets
183 * The sync wakeup differs that the waker knows that it will schedule
184 * away soon, so while the target thread will be woken up, it will not
185 * be migrated to another CPU - ie. the two threads are 'synchronized'
186 * with each other. This can prevent needless bouncing between CPUs.
188 * On UP it can prevent extra preemption.
190 * It may be assumed that this function implies a write memory barrier before
191 * changing the task state if and only if any tasks are woken up.
193 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
194 int nr_exclusive, void *key)
196 int wake_flags = 1; /* XXX WF_SYNC */
198 if (unlikely(!wq_head))
201 if (unlikely(nr_exclusive != 1))
204 __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
206 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
209 * __wake_up_sync - see __wake_up_sync_key()
211 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
213 __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
215 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
217 void __wake_up_pollfree(struct wait_queue_head *wq_head)
219 __wake_up(wq_head, TASK_NORMAL, 0, (void *)(POLLHUP | POLLFREE));
220 /* POLLFREE must have cleared the queue. */
221 WARN_ON_ONCE(waitqueue_active(wq_head));
225 * Note: we use "set_current_state()" _after_ the wait-queue add,
226 * because we need a memory barrier there on SMP, so that any
227 * wake-function that tests for the wait-queue being active
228 * will be guaranteed to see waitqueue addition _or_ subsequent
229 * tests in this thread will see the wakeup having taken place.
231 * The spin_unlock() itself is semi-permeable and only protects
232 * one way (it only protects stuff inside the critical region and
233 * stops them from bleeding out - it would still allow subsequent
234 * loads to move into the critical region).
237 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
241 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
242 spin_lock_irqsave(&wq_head->lock, flags);
243 if (list_empty(&wq_entry->entry))
244 __add_wait_queue(wq_head, wq_entry);
245 set_current_state(state);
246 spin_unlock_irqrestore(&wq_head->lock, flags);
248 EXPORT_SYMBOL(prepare_to_wait);
251 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
255 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
256 spin_lock_irqsave(&wq_head->lock, flags);
257 if (list_empty(&wq_entry->entry))
258 __add_wait_queue_entry_tail(wq_head, wq_entry);
259 set_current_state(state);
260 spin_unlock_irqrestore(&wq_head->lock, flags);
262 EXPORT_SYMBOL(prepare_to_wait_exclusive);
264 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
266 wq_entry->flags = flags;
267 wq_entry->private = current;
268 wq_entry->func = autoremove_wake_function;
269 INIT_LIST_HEAD(&wq_entry->entry);
271 EXPORT_SYMBOL(init_wait_entry);
273 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
278 spin_lock_irqsave(&wq_head->lock, flags);
279 if (unlikely(signal_pending_state(state, current))) {
281 * Exclusive waiter must not fail if it was selected by wakeup,
282 * it should "consume" the condition we were waiting for.
284 * The caller will recheck the condition and return success if
285 * we were already woken up, we can not miss the event because
286 * wakeup locks/unlocks the same wq_head->lock.
288 * But we need to ensure that set-condition + wakeup after that
289 * can't see us, it should wake up another exclusive waiter if
292 list_del_init(&wq_entry->entry);
295 if (list_empty(&wq_entry->entry)) {
296 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
297 __add_wait_queue_entry_tail(wq_head, wq_entry);
299 __add_wait_queue(wq_head, wq_entry);
301 set_current_state(state);
303 spin_unlock_irqrestore(&wq_head->lock, flags);
307 EXPORT_SYMBOL(prepare_to_wait_event);
310 * Note! These two wait functions are entered with the
311 * wait-queue lock held (and interrupts off in the _irq
312 * case), so there is no race with testing the wakeup
313 * condition in the caller before they add the wait
314 * entry to the wake queue.
316 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
318 if (likely(list_empty(&wait->entry)))
319 __add_wait_queue_entry_tail(wq, wait);
321 set_current_state(TASK_INTERRUPTIBLE);
322 if (signal_pending(current))
325 spin_unlock(&wq->lock);
327 spin_lock(&wq->lock);
330 EXPORT_SYMBOL(do_wait_intr);
332 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
334 if (likely(list_empty(&wait->entry)))
335 __add_wait_queue_entry_tail(wq, wait);
337 set_current_state(TASK_INTERRUPTIBLE);
338 if (signal_pending(current))
341 spin_unlock_irq(&wq->lock);
343 spin_lock_irq(&wq->lock);
346 EXPORT_SYMBOL(do_wait_intr_irq);
349 * finish_wait - clean up after waiting in a queue
350 * @wq_head: waitqueue waited on
351 * @wq_entry: wait descriptor
353 * Sets current thread back to running state and removes
354 * the wait descriptor from the given waitqueue if still
357 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
361 __set_current_state(TASK_RUNNING);
363 * We can check for list emptiness outside the lock
365 * - we use the "careful" check that verifies both
366 * the next and prev pointers, so that there cannot
367 * be any half-pending updates in progress on other
368 * CPU's that we haven't seen yet (and that might
369 * still change the stack area.
371 * - all other users take the lock (ie we can only
372 * have _one_ other CPU that looks at or modifies
375 if (!list_empty_careful(&wq_entry->entry)) {
376 spin_lock_irqsave(&wq_head->lock, flags);
377 list_del_init(&wq_entry->entry);
378 spin_unlock_irqrestore(&wq_head->lock, flags);
381 EXPORT_SYMBOL(finish_wait);
383 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
385 int ret = default_wake_function(wq_entry, mode, sync, key);
388 list_del_init(&wq_entry->entry);
391 EXPORT_SYMBOL(autoremove_wake_function);
393 static inline bool is_kthread_should_stop(void)
395 return (current->flags & PF_KTHREAD) && kthread_should_stop();
399 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
401 * add_wait_queue(&wq_head, &wait);
406 * // in wait_woken() // in woken_wake_function()
408 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
409 * smp_mb(); // A try_to_wake_up():
410 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
411 * schedule() if (p->state & mode)
412 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
413 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
414 * smp_mb(); // B condition = true;
416 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
418 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
421 * The below executes an smp_mb(), which matches with the full barrier
422 * executed by the try_to_wake_up() in woken_wake_function() such that
423 * either we see the store to wq_entry->flags in woken_wake_function()
424 * or woken_wake_function() sees our store to current->state.
426 set_current_state(mode); /* A */
427 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
428 timeout = schedule_timeout(timeout);
429 __set_current_state(TASK_RUNNING);
432 * The below executes an smp_mb(), which matches with the smp_mb() (C)
433 * in woken_wake_function() such that either we see the wait condition
434 * being true or the store to wq_entry->flags in woken_wake_function()
435 * follows ours in the coherence order.
437 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
441 EXPORT_SYMBOL(wait_woken);
443 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
445 /* Pairs with the smp_store_mb() in wait_woken(). */
447 wq_entry->flags |= WQ_FLAG_WOKEN;
449 return default_wake_function(wq_entry, mode, sync, key);
451 EXPORT_SYMBOL(woken_wake_function);