GNU Linux-libre 4.14.266-gnu1
[releases.git] / kernel / sched / wait.c
1 /*
2  * Generic waiting primitives.
3  *
4  * (C) 2004 Nadia Yvette Chambers, Oracle
5  */
6 #include <linux/init.h>
7 #include <linux/export.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/debug.h>
10 #include <linux/mm.h>
11 #include <linux/wait.h>
12 #include <linux/hash.h>
13 #include <linux/kthread.h>
14 #include <linux/poll.h>
15
16 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
17 {
18         spin_lock_init(&wq_head->lock);
19         lockdep_set_class_and_name(&wq_head->lock, key, name);
20         INIT_LIST_HEAD(&wq_head->head);
21 }
22
23 EXPORT_SYMBOL(__init_waitqueue_head);
24
25 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
26 {
27         unsigned long flags;
28
29         wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
30         spin_lock_irqsave(&wq_head->lock, flags);
31         __add_wait_queue(wq_head, wq_entry);
32         spin_unlock_irqrestore(&wq_head->lock, flags);
33 }
34 EXPORT_SYMBOL(add_wait_queue);
35
36 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
37 {
38         unsigned long flags;
39
40         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
41         spin_lock_irqsave(&wq_head->lock, flags);
42         __add_wait_queue_entry_tail(wq_head, wq_entry);
43         spin_unlock_irqrestore(&wq_head->lock, flags);
44 }
45 EXPORT_SYMBOL(add_wait_queue_exclusive);
46
47 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
48 {
49         unsigned long flags;
50
51         spin_lock_irqsave(&wq_head->lock, flags);
52         __remove_wait_queue(wq_head, wq_entry);
53         spin_unlock_irqrestore(&wq_head->lock, flags);
54 }
55 EXPORT_SYMBOL(remove_wait_queue);
56
57 /*
58  * Scan threshold to break wait queue walk.
59  * This allows a waker to take a break from holding the
60  * wait queue lock during the wait queue walk.
61  */
62 #define WAITQUEUE_WALK_BREAK_CNT 64
63
64 /*
65  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
66  * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
67  * number) then we wake all the non-exclusive tasks and one exclusive task.
68  *
69  * There are circumstances in which we can try to wake a task which has already
70  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
71  * zero in this (rare) case, and we handle it by continuing to scan the queue.
72  */
73 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
74                         int nr_exclusive, int wake_flags, void *key,
75                         wait_queue_entry_t *bookmark)
76 {
77         wait_queue_entry_t *curr, *next;
78         int cnt = 0;
79
80         if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
81                 curr = list_next_entry(bookmark, entry);
82
83                 list_del(&bookmark->entry);
84                 bookmark->flags = 0;
85         } else
86                 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
87
88         if (&curr->entry == &wq_head->head)
89                 return nr_exclusive;
90
91         list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
92                 unsigned flags = curr->flags;
93                 int ret;
94
95                 if (flags & WQ_FLAG_BOOKMARK)
96                         continue;
97
98                 ret = curr->func(curr, mode, wake_flags, key);
99                 if (ret < 0)
100                         break;
101                 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
102                         break;
103
104                 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
105                                 (&next->entry != &wq_head->head)) {
106                         bookmark->flags = WQ_FLAG_BOOKMARK;
107                         list_add_tail(&bookmark->entry, &next->entry);
108                         break;
109                 }
110         }
111         return nr_exclusive;
112 }
113
114 static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
115                         int nr_exclusive, int wake_flags, void *key)
116 {
117         unsigned long flags;
118         wait_queue_entry_t bookmark;
119
120         bookmark.flags = 0;
121         bookmark.private = NULL;
122         bookmark.func = NULL;
123         INIT_LIST_HEAD(&bookmark.entry);
124
125         spin_lock_irqsave(&wq_head->lock, flags);
126         nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark);
127         spin_unlock_irqrestore(&wq_head->lock, flags);
128
129         while (bookmark.flags & WQ_FLAG_BOOKMARK) {
130                 spin_lock_irqsave(&wq_head->lock, flags);
131                 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
132                                                 wake_flags, key, &bookmark);
133                 spin_unlock_irqrestore(&wq_head->lock, flags);
134         }
135 }
136
137 /**
138  * __wake_up - wake up threads blocked on a waitqueue.
139  * @wq_head: the waitqueue
140  * @mode: which threads
141  * @nr_exclusive: how many wake-one or wake-many threads to wake up
142  * @key: is directly passed to the wakeup function
143  *
144  * It may be assumed that this function implies a write memory barrier before
145  * changing the task state if and only if any tasks are woken up.
146  */
147 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
148                         int nr_exclusive, void *key)
149 {
150         __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
151 }
152 EXPORT_SYMBOL(__wake_up);
153
154 /*
155  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
156  */
157 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
158 {
159         __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
160 }
161 EXPORT_SYMBOL_GPL(__wake_up_locked);
162
163 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
164 {
165         __wake_up_common(wq_head, mode, 1, 0, key, NULL);
166 }
167 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
168
169 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
170                 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
171 {
172         __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
173 }
174 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
175
176 /**
177  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
178  * @wq_head: the waitqueue
179  * @mode: which threads
180  * @nr_exclusive: how many wake-one or wake-many threads to wake up
181  * @key: opaque value to be passed to wakeup targets
182  *
183  * The sync wakeup differs that the waker knows that it will schedule
184  * away soon, so while the target thread will be woken up, it will not
185  * be migrated to another CPU - ie. the two threads are 'synchronized'
186  * with each other. This can prevent needless bouncing between CPUs.
187  *
188  * On UP it can prevent extra preemption.
189  *
190  * It may be assumed that this function implies a write memory barrier before
191  * changing the task state if and only if any tasks are woken up.
192  */
193 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
194                         int nr_exclusive, void *key)
195 {
196         int wake_flags = 1; /* XXX WF_SYNC */
197
198         if (unlikely(!wq_head))
199                 return;
200
201         if (unlikely(nr_exclusive != 1))
202                 wake_flags = 0;
203
204         __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
205 }
206 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
207
208 /*
209  * __wake_up_sync - see __wake_up_sync_key()
210  */
211 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
212 {
213         __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
214 }
215 EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
216
217 void __wake_up_pollfree(struct wait_queue_head *wq_head)
218 {
219         __wake_up(wq_head, TASK_NORMAL, 0, (void *)(POLLHUP | POLLFREE));
220         /* POLLFREE must have cleared the queue. */
221         WARN_ON_ONCE(waitqueue_active(wq_head));
222 }
223
224 /*
225  * Note: we use "set_current_state()" _after_ the wait-queue add,
226  * because we need a memory barrier there on SMP, so that any
227  * wake-function that tests for the wait-queue being active
228  * will be guaranteed to see waitqueue addition _or_ subsequent
229  * tests in this thread will see the wakeup having taken place.
230  *
231  * The spin_unlock() itself is semi-permeable and only protects
232  * one way (it only protects stuff inside the critical region and
233  * stops them from bleeding out - it would still allow subsequent
234  * loads to move into the critical region).
235  */
236 void
237 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
238 {
239         unsigned long flags;
240
241         wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
242         spin_lock_irqsave(&wq_head->lock, flags);
243         if (list_empty(&wq_entry->entry))
244                 __add_wait_queue(wq_head, wq_entry);
245         set_current_state(state);
246         spin_unlock_irqrestore(&wq_head->lock, flags);
247 }
248 EXPORT_SYMBOL(prepare_to_wait);
249
250 void
251 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
252 {
253         unsigned long flags;
254
255         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
256         spin_lock_irqsave(&wq_head->lock, flags);
257         if (list_empty(&wq_entry->entry))
258                 __add_wait_queue_entry_tail(wq_head, wq_entry);
259         set_current_state(state);
260         spin_unlock_irqrestore(&wq_head->lock, flags);
261 }
262 EXPORT_SYMBOL(prepare_to_wait_exclusive);
263
264 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
265 {
266         wq_entry->flags = flags;
267         wq_entry->private = current;
268         wq_entry->func = autoremove_wake_function;
269         INIT_LIST_HEAD(&wq_entry->entry);
270 }
271 EXPORT_SYMBOL(init_wait_entry);
272
273 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
274 {
275         unsigned long flags;
276         long ret = 0;
277
278         spin_lock_irqsave(&wq_head->lock, flags);
279         if (unlikely(signal_pending_state(state, current))) {
280                 /*
281                  * Exclusive waiter must not fail if it was selected by wakeup,
282                  * it should "consume" the condition we were waiting for.
283                  *
284                  * The caller will recheck the condition and return success if
285                  * we were already woken up, we can not miss the event because
286                  * wakeup locks/unlocks the same wq_head->lock.
287                  *
288                  * But we need to ensure that set-condition + wakeup after that
289                  * can't see us, it should wake up another exclusive waiter if
290                  * we fail.
291                  */
292                 list_del_init(&wq_entry->entry);
293                 ret = -ERESTARTSYS;
294         } else {
295                 if (list_empty(&wq_entry->entry)) {
296                         if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
297                                 __add_wait_queue_entry_tail(wq_head, wq_entry);
298                         else
299                                 __add_wait_queue(wq_head, wq_entry);
300                 }
301                 set_current_state(state);
302         }
303         spin_unlock_irqrestore(&wq_head->lock, flags);
304
305         return ret;
306 }
307 EXPORT_SYMBOL(prepare_to_wait_event);
308
309 /*
310  * Note! These two wait functions are entered with the
311  * wait-queue lock held (and interrupts off in the _irq
312  * case), so there is no race with testing the wakeup
313  * condition in the caller before they add the wait
314  * entry to the wake queue.
315  */
316 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
317 {
318         if (likely(list_empty(&wait->entry)))
319                 __add_wait_queue_entry_tail(wq, wait);
320
321         set_current_state(TASK_INTERRUPTIBLE);
322         if (signal_pending(current))
323                 return -ERESTARTSYS;
324
325         spin_unlock(&wq->lock);
326         schedule();
327         spin_lock(&wq->lock);
328         return 0;
329 }
330 EXPORT_SYMBOL(do_wait_intr);
331
332 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
333 {
334         if (likely(list_empty(&wait->entry)))
335                 __add_wait_queue_entry_tail(wq, wait);
336
337         set_current_state(TASK_INTERRUPTIBLE);
338         if (signal_pending(current))
339                 return -ERESTARTSYS;
340
341         spin_unlock_irq(&wq->lock);
342         schedule();
343         spin_lock_irq(&wq->lock);
344         return 0;
345 }
346 EXPORT_SYMBOL(do_wait_intr_irq);
347
348 /**
349  * finish_wait - clean up after waiting in a queue
350  * @wq_head: waitqueue waited on
351  * @wq_entry: wait descriptor
352  *
353  * Sets current thread back to running state and removes
354  * the wait descriptor from the given waitqueue if still
355  * queued.
356  */
357 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
358 {
359         unsigned long flags;
360
361         __set_current_state(TASK_RUNNING);
362         /*
363          * We can check for list emptiness outside the lock
364          * IFF:
365          *  - we use the "careful" check that verifies both
366          *    the next and prev pointers, so that there cannot
367          *    be any half-pending updates in progress on other
368          *    CPU's that we haven't seen yet (and that might
369          *    still change the stack area.
370          * and
371          *  - all other users take the lock (ie we can only
372          *    have _one_ other CPU that looks at or modifies
373          *    the list).
374          */
375         if (!list_empty_careful(&wq_entry->entry)) {
376                 spin_lock_irqsave(&wq_head->lock, flags);
377                 list_del_init(&wq_entry->entry);
378                 spin_unlock_irqrestore(&wq_head->lock, flags);
379         }
380 }
381 EXPORT_SYMBOL(finish_wait);
382
383 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
384 {
385         int ret = default_wake_function(wq_entry, mode, sync, key);
386
387         if (ret)
388                 list_del_init(&wq_entry->entry);
389         return ret;
390 }
391 EXPORT_SYMBOL(autoremove_wake_function);
392
393 static inline bool is_kthread_should_stop(void)
394 {
395         return (current->flags & PF_KTHREAD) && kthread_should_stop();
396 }
397
398 /*
399  * DEFINE_WAIT_FUNC(wait, woken_wake_func);
400  *
401  * add_wait_queue(&wq_head, &wait);
402  * for (;;) {
403  *     if (condition)
404  *         break;
405  *
406  *     // in wait_woken()                       // in woken_wake_function()
407  *
408  *     p->state = mode;                         wq_entry->flags |= WQ_FLAG_WOKEN;
409  *     smp_mb(); // A                           try_to_wake_up():
410  *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))     <full barrier>
411  *         schedule()                              if (p->state & mode)
412  *     p->state = TASK_RUNNING;                       p->state = TASK_RUNNING;
413  *     wq_entry->flags &= ~WQ_FLAG_WOKEN;       ~~~~~~~~~~~~~~~~~~
414  *     smp_mb(); // B                           condition = true;
415  * }                                            smp_mb(); // C
416  * remove_wait_queue(&wq_head, &wait);          wq_entry->flags |= WQ_FLAG_WOKEN;
417  */
418 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
419 {
420         /*
421          * The below executes an smp_mb(), which matches with the full barrier
422          * executed by the try_to_wake_up() in woken_wake_function() such that
423          * either we see the store to wq_entry->flags in woken_wake_function()
424          * or woken_wake_function() sees our store to current->state.
425          */
426         set_current_state(mode); /* A */
427         if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
428                 timeout = schedule_timeout(timeout);
429         __set_current_state(TASK_RUNNING);
430
431         /*
432          * The below executes an smp_mb(), which matches with the smp_mb() (C)
433          * in woken_wake_function() such that either we see the wait condition
434          * being true or the store to wq_entry->flags in woken_wake_function()
435          * follows ours in the coherence order.
436          */
437         smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
438
439         return timeout;
440 }
441 EXPORT_SYMBOL(wait_woken);
442
443 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
444 {
445         /* Pairs with the smp_store_mb() in wait_woken(). */
446         smp_mb(); /* C */
447         wq_entry->flags |= WQ_FLAG_WOKEN;
448
449         return default_wake_function(wq_entry, mode, sync, key);
450 }
451 EXPORT_SYMBOL(woken_wake_function);