GNU Linux-libre 4.14.266-gnu1
[releases.git] / include / linux / wait.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5  * Linux wait queue related types and methods
6  */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13
14 typedef struct wait_queue_entry wait_queue_entry_t;
15
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE       0x01
21 #define WQ_FLAG_WOKEN           0x02
22 #define WQ_FLAG_BOOKMARK        0x04
23
24 /*
25  * A single wait-queue entry structure:
26  */
27 struct wait_queue_entry {
28         unsigned int            flags;
29         void                    *private;
30         wait_queue_func_t       func;
31         struct list_head        entry;
32 };
33
34 struct wait_queue_head {
35         spinlock_t              lock;
36         struct list_head        head;
37 };
38 typedef struct wait_queue_head wait_queue_head_t;
39
40 struct task_struct;
41
42 /*
43  * Macros for declaration and initialisaton of the datatypes
44  */
45
46 #define __WAITQUEUE_INITIALIZER(name, tsk) {                                    \
47         .private        = tsk,                                                  \
48         .func           = default_wake_function,                                \
49         .entry          = { NULL, NULL } }
50
51 #define DECLARE_WAITQUEUE(name, tsk)                                            \
52         struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
53
54 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                                   \
55         .lock           = __SPIN_LOCK_UNLOCKED(name.lock),                      \
56         .head           = { &(name).head, &(name).head } }
57
58 #define DECLARE_WAIT_QUEUE_HEAD(name) \
59         struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
60
61 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
62
63 #define init_waitqueue_head(wq_head)                                            \
64         do {                                                                    \
65                 static struct lock_class_key __key;                             \
66                                                                                 \
67                 __init_waitqueue_head((wq_head), #wq_head, &__key);             \
68         } while (0)
69
70 #ifdef CONFIG_LOCKDEP
71 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
72         ({ init_waitqueue_head(&name); name; })
73 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
74         struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
75 #else
76 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
77 #endif
78
79 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
80 {
81         wq_entry->flags         = 0;
82         wq_entry->private       = p;
83         wq_entry->func          = default_wake_function;
84 }
85
86 static inline void
87 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
88 {
89         wq_entry->flags         = 0;
90         wq_entry->private       = NULL;
91         wq_entry->func          = func;
92 }
93
94 /**
95  * waitqueue_active -- locklessly test for waiters on the queue
96  * @wq_head: the waitqueue to test for waiters
97  *
98  * returns true if the wait list is not empty
99  *
100  * NOTE: this function is lockless and requires care, incorrect usage _will_
101  * lead to sporadic and non-obvious failure.
102  *
103  * Use either while holding wait_queue_head::lock or when used for wakeups
104  * with an extra smp_mb() like:
105  *
106  *      CPU0 - waker                    CPU1 - waiter
107  *
108  *                                      for (;;) {
109  *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
110  *      smp_mb();                         // smp_mb() from set_current_state()
111  *      if (waitqueue_active(wq_head))         if (@cond)
112  *        wake_up(wq_head);                      break;
113  *                                        schedule();
114  *                                      }
115  *                                      finish_wait(&wq_head, &wait);
116  *
117  * Because without the explicit smp_mb() it's possible for the
118  * waitqueue_active() load to get hoisted over the @cond store such that we'll
119  * observe an empty wait list while the waiter might not observe @cond.
120  *
121  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
122  * which (when the lock is uncontended) are of roughly equal cost.
123  */
124 static inline int waitqueue_active(struct wait_queue_head *wq_head)
125 {
126         return !list_empty(&wq_head->head);
127 }
128
129 /**
130  * wq_has_sleeper - check if there are any waiting processes
131  * @wq_head: wait queue head
132  *
133  * Returns true if wq_head has waiting processes
134  *
135  * Please refer to the comment for waitqueue_active.
136  */
137 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
138 {
139         /*
140          * We need to be sure we are in sync with the
141          * add_wait_queue modifications to the wait queue.
142          *
143          * This memory barrier should be paired with one on the
144          * waiting side.
145          */
146         smp_mb();
147         return waitqueue_active(wq_head);
148 }
149
150 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
151 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
152 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
153
154 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
155 {
156         list_add(&wq_entry->entry, &wq_head->head);
157 }
158
159 /*
160  * Used for wake-one threads:
161  */
162 static inline void
163 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
164 {
165         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
166         __add_wait_queue(wq_head, wq_entry);
167 }
168
169 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
170 {
171         list_add_tail(&wq_entry->entry, &wq_head->head);
172 }
173
174 static inline void
175 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
176 {
177         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
178         __add_wait_queue_entry_tail(wq_head, wq_entry);
179 }
180
181 static inline void
182 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
183 {
184         list_del(&wq_entry->entry);
185 }
186
187 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
188 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
189 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
190                 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
191 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
192 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
193 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
194 void __wake_up_pollfree(struct wait_queue_head *wq_head);
195
196 #define wake_up(x)                      __wake_up(x, TASK_NORMAL, 1, NULL)
197 #define wake_up_nr(x, nr)               __wake_up(x, TASK_NORMAL, nr, NULL)
198 #define wake_up_all(x)                  __wake_up(x, TASK_NORMAL, 0, NULL)
199 #define wake_up_locked(x)               __wake_up_locked((x), TASK_NORMAL, 1)
200 #define wake_up_all_locked(x)           __wake_up_locked((x), TASK_NORMAL, 0)
201
202 #define wake_up_interruptible(x)        __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
203 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
204 #define wake_up_interruptible_all(x)    __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
205 #define wake_up_interruptible_sync(x)   __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
206
207 /*
208  * Wakeup macros to be used to report events to the targets.
209  */
210 #define wake_up_poll(x, m)                                                      \
211         __wake_up(x, TASK_NORMAL, 1, (void *) (m))
212 #define wake_up_locked_poll(x, m)                                               \
213         __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
214 #define wake_up_interruptible_poll(x, m)                                        \
215         __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
216 #define wake_up_interruptible_sync_poll(x, m)                                   \
217         __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
218
219 /**
220  * wake_up_pollfree - signal that a polled waitqueue is going away
221  * @wq_head: the wait queue head
222  *
223  * In the very rare cases where a ->poll() implementation uses a waitqueue whose
224  * lifetime is tied to a task rather than to the 'struct file' being polled,
225  * this function must be called before the waitqueue is freed so that
226  * non-blocking polls (e.g. epoll) are notified that the queue is going away.
227  *
228  * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
229  * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
230  */
231 static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
232 {
233         /*
234          * For performance reasons, we don't always take the queue lock here.
235          * Therefore, we might race with someone removing the last entry from
236          * the queue, and proceed while they still hold the queue lock.
237          * However, rcu_read_lock() is required to be held in such cases, so we
238          * can safely proceed with an RCU-delayed free.
239          */
240         if (waitqueue_active(wq_head))
241                 __wake_up_pollfree(wq_head);
242 }
243
244 #define ___wait_cond_timeout(condition)                                         \
245 ({                                                                              \
246         bool __cond = (condition);                                              \
247         if (__cond && !__ret)                                                   \
248                 __ret = 1;                                                      \
249         __cond || !__ret;                                                       \
250 })
251
252 #define ___wait_is_interruptible(state)                                         \
253         (!__builtin_constant_p(state) ||                                        \
254                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)          \
255
256 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
257
258 /*
259  * The below macro ___wait_event() has an explicit shadow of the __ret
260  * variable when used from the wait_event_*() macros.
261  *
262  * This is so that both can use the ___wait_cond_timeout() construct
263  * to wrap the condition.
264  *
265  * The type inconsistency of the wait_event_*() __ret variable is also
266  * on purpose; we use long where we can return timeout values and int
267  * otherwise.
268  */
269
270 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)           \
271 ({                                                                              \
272         __label__ __out;                                                        \
273         struct wait_queue_entry __wq_entry;                                     \
274         long __ret = ret;       /* explicit shadow */                           \
275                                                                                 \
276         init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);        \
277         for (;;) {                                                              \
278                 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
279                                                                                 \
280                 if (condition)                                                  \
281                         break;                                                  \
282                                                                                 \
283                 if (___wait_is_interruptible(state) && __int) {                 \
284                         __ret = __int;                                          \
285                         goto __out;                                             \
286                 }                                                               \
287                                                                                 \
288                 cmd;                                                            \
289         }                                                                       \
290         finish_wait(&wq_head, &__wq_entry);                                     \
291 __out:  __ret;                                                                  \
292 })
293
294 #define __wait_event(wq_head, condition)                                        \
295         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
296                             schedule())
297
298 /**
299  * wait_event - sleep until a condition gets true
300  * @wq_head: the waitqueue to wait on
301  * @condition: a C expression for the event to wait for
302  *
303  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
304  * @condition evaluates to true. The @condition is checked each time
305  * the waitqueue @wq_head is woken up.
306  *
307  * wake_up() has to be called after changing any variable that could
308  * change the result of the wait condition.
309  */
310 #define wait_event(wq_head, condition)                                          \
311 do {                                                                            \
312         might_sleep();                                                          \
313         if (condition)                                                          \
314                 break;                                                          \
315         __wait_event(wq_head, condition);                                       \
316 } while (0)
317
318 #define __io_wait_event(wq_head, condition)                                     \
319         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
320                             io_schedule())
321
322 /*
323  * io_wait_event() -- like wait_event() but with io_schedule()
324  */
325 #define io_wait_event(wq_head, condition)                                       \
326 do {                                                                            \
327         might_sleep();                                                          \
328         if (condition)                                                          \
329                 break;                                                          \
330         __io_wait_event(wq_head, condition);                                    \
331 } while (0)
332
333 #define __wait_event_freezable(wq_head, condition)                              \
334         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
335                             schedule(); try_to_freeze())
336
337 /**
338  * wait_event_freezable - sleep (or freeze) until a condition gets true
339  * @wq_head: the waitqueue to wait on
340  * @condition: a C expression for the event to wait for
341  *
342  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
343  * to system load) until the @condition evaluates to true. The
344  * @condition is checked each time the waitqueue @wq_head is woken up.
345  *
346  * wake_up() has to be called after changing any variable that could
347  * change the result of the wait condition.
348  */
349 #define wait_event_freezable(wq_head, condition)                                \
350 ({                                                                              \
351         int __ret = 0;                                                          \
352         might_sleep();                                                          \
353         if (!(condition))                                                       \
354                 __ret = __wait_event_freezable(wq_head, condition);             \
355         __ret;                                                                  \
356 })
357
358 #define __wait_event_timeout(wq_head, condition, timeout)                       \
359         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
360                       TASK_UNINTERRUPTIBLE, 0, timeout,                         \
361                       __ret = schedule_timeout(__ret))
362
363 /**
364  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
365  * @wq_head: the waitqueue to wait on
366  * @condition: a C expression for the event to wait for
367  * @timeout: timeout, in jiffies
368  *
369  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
370  * @condition evaluates to true. The @condition is checked each time
371  * the waitqueue @wq_head is woken up.
372  *
373  * wake_up() has to be called after changing any variable that could
374  * change the result of the wait condition.
375  *
376  * Returns:
377  * 0 if the @condition evaluated to %false after the @timeout elapsed,
378  * 1 if the @condition evaluated to %true after the @timeout elapsed,
379  * or the remaining jiffies (at least 1) if the @condition evaluated
380  * to %true before the @timeout elapsed.
381  */
382 #define wait_event_timeout(wq_head, condition, timeout)                         \
383 ({                                                                              \
384         long __ret = timeout;                                                   \
385         might_sleep();                                                          \
386         if (!___wait_cond_timeout(condition))                                   \
387                 __ret = __wait_event_timeout(wq_head, condition, timeout);      \
388         __ret;                                                                  \
389 })
390
391 #define __wait_event_freezable_timeout(wq_head, condition, timeout)             \
392         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
393                       TASK_INTERRUPTIBLE, 0, timeout,                           \
394                       __ret = schedule_timeout(__ret); try_to_freeze())
395
396 /*
397  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
398  * increasing load and is freezable.
399  */
400 #define wait_event_freezable_timeout(wq_head, condition, timeout)               \
401 ({                                                                              \
402         long __ret = timeout;                                                   \
403         might_sleep();                                                          \
404         if (!___wait_cond_timeout(condition))                                   \
405                 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
406         __ret;                                                                  \
407 })
408
409 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)              \
410         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,     \
411                             cmd1; schedule(); cmd2)
412 /*
413  * Just like wait_event_cmd(), except it sets exclusive flag
414  */
415 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)                \
416 do {                                                                            \
417         if (condition)                                                          \
418                 break;                                                          \
419         __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);             \
420 } while (0)
421
422 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)                        \
423         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
424                             cmd1; schedule(); cmd2)
425
426 /**
427  * wait_event_cmd - sleep until a condition gets true
428  * @wq_head: the waitqueue to wait on
429  * @condition: a C expression for the event to wait for
430  * @cmd1: the command will be executed before sleep
431  * @cmd2: the command will be executed after sleep
432  *
433  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
434  * @condition evaluates to true. The @condition is checked each time
435  * the waitqueue @wq_head is woken up.
436  *
437  * wake_up() has to be called after changing any variable that could
438  * change the result of the wait condition.
439  */
440 #define wait_event_cmd(wq_head, condition, cmd1, cmd2)                          \
441 do {                                                                            \
442         if (condition)                                                          \
443                 break;                                                          \
444         __wait_event_cmd(wq_head, condition, cmd1, cmd2);                       \
445 } while (0)
446
447 #define __wait_event_interruptible(wq_head, condition)                          \
448         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
449                       schedule())
450
451 /**
452  * wait_event_interruptible - sleep until a condition gets true
453  * @wq_head: the waitqueue to wait on
454  * @condition: a C expression for the event to wait for
455  *
456  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
457  * @condition evaluates to true or a signal is received.
458  * The @condition is checked each time the waitqueue @wq_head is woken up.
459  *
460  * wake_up() has to be called after changing any variable that could
461  * change the result of the wait condition.
462  *
463  * The function will return -ERESTARTSYS if it was interrupted by a
464  * signal and 0 if @condition evaluated to true.
465  */
466 #define wait_event_interruptible(wq_head, condition)                            \
467 ({                                                                              \
468         int __ret = 0;                                                          \
469         might_sleep();                                                          \
470         if (!(condition))                                                       \
471                 __ret = __wait_event_interruptible(wq_head, condition);         \
472         __ret;                                                                  \
473 })
474
475 #define __wait_event_interruptible_timeout(wq_head, condition, timeout)         \
476         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
477                       TASK_INTERRUPTIBLE, 0, timeout,                           \
478                       __ret = schedule_timeout(__ret))
479
480 /**
481  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
482  * @wq_head: the waitqueue to wait on
483  * @condition: a C expression for the event to wait for
484  * @timeout: timeout, in jiffies
485  *
486  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
487  * @condition evaluates to true or a signal is received.
488  * The @condition is checked each time the waitqueue @wq_head is woken up.
489  *
490  * wake_up() has to be called after changing any variable that could
491  * change the result of the wait condition.
492  *
493  * Returns:
494  * 0 if the @condition evaluated to %false after the @timeout elapsed,
495  * 1 if the @condition evaluated to %true after the @timeout elapsed,
496  * the remaining jiffies (at least 1) if the @condition evaluated
497  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
498  * interrupted by a signal.
499  */
500 #define wait_event_interruptible_timeout(wq_head, condition, timeout)           \
501 ({                                                                              \
502         long __ret = timeout;                                                   \
503         might_sleep();                                                          \
504         if (!___wait_cond_timeout(condition))                                   \
505                 __ret = __wait_event_interruptible_timeout(wq_head,             \
506                                                 condition, timeout);            \
507         __ret;                                                                  \
508 })
509
510 #define __wait_event_hrtimeout(wq_head, condition, timeout, state)              \
511 ({                                                                              \
512         int __ret = 0;                                                          \
513         struct hrtimer_sleeper __t;                                             \
514                                                                                 \
515         hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);   \
516         hrtimer_init_sleeper(&__t, current);                                    \
517         if ((timeout) != KTIME_MAX)                                             \
518                 hrtimer_start_range_ns(&__t.timer, timeout,                     \
519                                        current->timer_slack_ns,                 \
520                                        HRTIMER_MODE_REL);                       \
521                                                                                 \
522         __ret = ___wait_event(wq_head, condition, state, 0, 0,                  \
523                 if (!__t.task) {                                                \
524                         __ret = -ETIME;                                         \
525                         break;                                                  \
526                 }                                                               \
527                 schedule());                                                    \
528                                                                                 \
529         hrtimer_cancel(&__t.timer);                                             \
530         destroy_hrtimer_on_stack(&__t.timer);                                   \
531         __ret;                                                                  \
532 })
533
534 /**
535  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
536  * @wq_head: the waitqueue to wait on
537  * @condition: a C expression for the event to wait for
538  * @timeout: timeout, as a ktime_t
539  *
540  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
541  * @condition evaluates to true or a signal is received.
542  * The @condition is checked each time the waitqueue @wq_head is woken up.
543  *
544  * wake_up() has to be called after changing any variable that could
545  * change the result of the wait condition.
546  *
547  * The function returns 0 if @condition became true, or -ETIME if the timeout
548  * elapsed.
549  */
550 #define wait_event_hrtimeout(wq_head, condition, timeout)                       \
551 ({                                                                              \
552         int __ret = 0;                                                          \
553         might_sleep();                                                          \
554         if (!(condition))                                                       \
555                 __ret = __wait_event_hrtimeout(wq_head, condition, timeout,     \
556                                                TASK_UNINTERRUPTIBLE);           \
557         __ret;                                                                  \
558 })
559
560 /**
561  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
562  * @wq: the waitqueue to wait on
563  * @condition: a C expression for the event to wait for
564  * @timeout: timeout, as a ktime_t
565  *
566  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
567  * @condition evaluates to true or a signal is received.
568  * The @condition is checked each time the waitqueue @wq is woken up.
569  *
570  * wake_up() has to be called after changing any variable that could
571  * change the result of the wait condition.
572  *
573  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
574  * interrupted by a signal, or -ETIME if the timeout elapsed.
575  */
576 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)              \
577 ({                                                                              \
578         long __ret = 0;                                                         \
579         might_sleep();                                                          \
580         if (!(condition))                                                       \
581                 __ret = __wait_event_hrtimeout(wq, condition, timeout,          \
582                                                TASK_INTERRUPTIBLE);             \
583         __ret;                                                                  \
584 })
585
586 #define __wait_event_interruptible_exclusive(wq, condition)                     \
587         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,                  \
588                       schedule())
589
590 #define wait_event_interruptible_exclusive(wq, condition)                       \
591 ({                                                                              \
592         int __ret = 0;                                                          \
593         might_sleep();                                                          \
594         if (!(condition))                                                       \
595                 __ret = __wait_event_interruptible_exclusive(wq, condition);    \
596         __ret;                                                                  \
597 })
598
599 #define __wait_event_killable_exclusive(wq, condition)                          \
600         ___wait_event(wq, condition, TASK_KILLABLE, 1, 0,                       \
601                       schedule())
602
603 #define wait_event_killable_exclusive(wq, condition)                            \
604 ({                                                                              \
605         int __ret = 0;                                                          \
606         might_sleep();                                                          \
607         if (!(condition))                                                       \
608                 __ret = __wait_event_killable_exclusive(wq, condition);         \
609         __ret;                                                                  \
610 })
611
612
613 #define __wait_event_freezable_exclusive(wq, condition)                         \
614         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,                  \
615                         schedule(); try_to_freeze())
616
617 #define wait_event_freezable_exclusive(wq, condition)                           \
618 ({                                                                              \
619         int __ret = 0;                                                          \
620         might_sleep();                                                          \
621         if (!(condition))                                                       \
622                 __ret = __wait_event_freezable_exclusive(wq, condition);        \
623         __ret;                                                                  \
624 })
625
626 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
627 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
628
629 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)         \
630 ({                                                                              \
631         int __ret;                                                              \
632         DEFINE_WAIT(__wait);                                                    \
633         if (exclusive)                                                          \
634                 __wait.flags |= WQ_FLAG_EXCLUSIVE;                              \
635         do {                                                                    \
636                 __ret = fn(&(wq), &__wait);                                     \
637                 if (__ret)                                                      \
638                         break;                                                  \
639         } while (!(condition));                                                 \
640         __remove_wait_queue(&(wq), &__wait);                                    \
641         __set_current_state(TASK_RUNNING);                                      \
642         __ret;                                                                  \
643 })
644
645
646 /**
647  * wait_event_interruptible_locked - sleep until a condition gets true
648  * @wq: the waitqueue to wait on
649  * @condition: a C expression for the event to wait for
650  *
651  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
652  * @condition evaluates to true or a signal is received.
653  * The @condition is checked each time the waitqueue @wq is woken up.
654  *
655  * It must be called with wq.lock being held.  This spinlock is
656  * unlocked while sleeping but @condition testing is done while lock
657  * is held and when this macro exits the lock is held.
658  *
659  * The lock is locked/unlocked using spin_lock()/spin_unlock()
660  * functions which must match the way they are locked/unlocked outside
661  * of this macro.
662  *
663  * wake_up_locked() has to be called after changing any variable that could
664  * change the result of the wait condition.
665  *
666  * The function will return -ERESTARTSYS if it was interrupted by a
667  * signal and 0 if @condition evaluated to true.
668  */
669 #define wait_event_interruptible_locked(wq, condition)                          \
670         ((condition)                                                            \
671          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
672
673 /**
674  * wait_event_interruptible_locked_irq - sleep until a condition gets true
675  * @wq: the waitqueue to wait on
676  * @condition: a C expression for the event to wait for
677  *
678  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
679  * @condition evaluates to true or a signal is received.
680  * The @condition is checked each time the waitqueue @wq is woken up.
681  *
682  * It must be called with wq.lock being held.  This spinlock is
683  * unlocked while sleeping but @condition testing is done while lock
684  * is held and when this macro exits the lock is held.
685  *
686  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
687  * functions which must match the way they are locked/unlocked outside
688  * of this macro.
689  *
690  * wake_up_locked() has to be called after changing any variable that could
691  * change the result of the wait condition.
692  *
693  * The function will return -ERESTARTSYS if it was interrupted by a
694  * signal and 0 if @condition evaluated to true.
695  */
696 #define wait_event_interruptible_locked_irq(wq, condition)                      \
697         ((condition)                                                            \
698          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
699
700 /**
701  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
702  * @wq: the waitqueue to wait on
703  * @condition: a C expression for the event to wait for
704  *
705  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
706  * @condition evaluates to true or a signal is received.
707  * The @condition is checked each time the waitqueue @wq is woken up.
708  *
709  * It must be called with wq.lock being held.  This spinlock is
710  * unlocked while sleeping but @condition testing is done while lock
711  * is held and when this macro exits the lock is held.
712  *
713  * The lock is locked/unlocked using spin_lock()/spin_unlock()
714  * functions which must match the way they are locked/unlocked outside
715  * of this macro.
716  *
717  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
718  * set thus when other process waits process on the list if this
719  * process is awaken further processes are not considered.
720  *
721  * wake_up_locked() has to be called after changing any variable that could
722  * change the result of the wait condition.
723  *
724  * The function will return -ERESTARTSYS if it was interrupted by a
725  * signal and 0 if @condition evaluated to true.
726  */
727 #define wait_event_interruptible_exclusive_locked(wq, condition)                \
728         ((condition)                                                            \
729          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
730
731 /**
732  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
733  * @wq: the waitqueue to wait on
734  * @condition: a C expression for the event to wait for
735  *
736  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
737  * @condition evaluates to true or a signal is received.
738  * The @condition is checked each time the waitqueue @wq is woken up.
739  *
740  * It must be called with wq.lock being held.  This spinlock is
741  * unlocked while sleeping but @condition testing is done while lock
742  * is held and when this macro exits the lock is held.
743  *
744  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
745  * functions which must match the way they are locked/unlocked outside
746  * of this macro.
747  *
748  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
749  * set thus when other process waits process on the list if this
750  * process is awaken further processes are not considered.
751  *
752  * wake_up_locked() has to be called after changing any variable that could
753  * change the result of the wait condition.
754  *
755  * The function will return -ERESTARTSYS if it was interrupted by a
756  * signal and 0 if @condition evaluated to true.
757  */
758 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)            \
759         ((condition)                                                            \
760          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
761
762
763 #define __wait_event_killable(wq, condition)                                    \
764         ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
765
766 /**
767  * wait_event_killable - sleep until a condition gets true
768  * @wq_head: the waitqueue to wait on
769  * @condition: a C expression for the event to wait for
770  *
771  * The process is put to sleep (TASK_KILLABLE) until the
772  * @condition evaluates to true or a signal is received.
773  * The @condition is checked each time the waitqueue @wq_head is woken up.
774  *
775  * wake_up() has to be called after changing any variable that could
776  * change the result of the wait condition.
777  *
778  * The function will return -ERESTARTSYS if it was interrupted by a
779  * signal and 0 if @condition evaluated to true.
780  */
781 #define wait_event_killable(wq_head, condition)                                 \
782 ({                                                                              \
783         int __ret = 0;                                                          \
784         might_sleep();                                                          \
785         if (!(condition))                                                       \
786                 __ret = __wait_event_killable(wq_head, condition);              \
787         __ret;                                                                  \
788 })
789
790 #define __wait_event_killable_timeout(wq_head, condition, timeout)              \
791         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
792                       TASK_KILLABLE, 0, timeout,                                \
793                       __ret = schedule_timeout(__ret))
794
795 /**
796  * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
797  * @wq_head: the waitqueue to wait on
798  * @condition: a C expression for the event to wait for
799  * @timeout: timeout, in jiffies
800  *
801  * The process is put to sleep (TASK_KILLABLE) until the
802  * @condition evaluates to true or a kill signal is received.
803  * The @condition is checked each time the waitqueue @wq_head is woken up.
804  *
805  * wake_up() has to be called after changing any variable that could
806  * change the result of the wait condition.
807  *
808  * Returns:
809  * 0 if the @condition evaluated to %false after the @timeout elapsed,
810  * 1 if the @condition evaluated to %true after the @timeout elapsed,
811  * the remaining jiffies (at least 1) if the @condition evaluated
812  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
813  * interrupted by a kill signal.
814  *
815  * Only kill signals interrupt this process.
816  */
817 #define wait_event_killable_timeout(wq_head, condition, timeout)                \
818 ({                                                                              \
819         long __ret = timeout;                                                   \
820         might_sleep();                                                          \
821         if (!___wait_cond_timeout(condition))                                   \
822                 __ret = __wait_event_killable_timeout(wq_head,                  \
823                                                 condition, timeout);            \
824         __ret;                                                                  \
825 })
826
827
828 #define __wait_event_lock_irq(wq_head, condition, lock, cmd)                    \
829         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
830                             spin_unlock_irq(&lock);                             \
831                             cmd;                                                \
832                             schedule();                                         \
833                             spin_lock_irq(&lock))
834
835 /**
836  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
837  *                           condition is checked under the lock. This
838  *                           is expected to be called with the lock
839  *                           taken.
840  * @wq_head: the waitqueue to wait on
841  * @condition: a C expression for the event to wait for
842  * @lock: a locked spinlock_t, which will be released before cmd
843  *        and schedule() and reacquired afterwards.
844  * @cmd: a command which is invoked outside the critical section before
845  *       sleep
846  *
847  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
848  * @condition evaluates to true. The @condition is checked each time
849  * the waitqueue @wq_head is woken up.
850  *
851  * wake_up() has to be called after changing any variable that could
852  * change the result of the wait condition.
853  *
854  * This is supposed to be called while holding the lock. The lock is
855  * dropped before invoking the cmd and going to sleep and is reacquired
856  * afterwards.
857  */
858 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)                  \
859 do {                                                                            \
860         if (condition)                                                          \
861                 break;                                                          \
862         __wait_event_lock_irq(wq_head, condition, lock, cmd);                   \
863 } while (0)
864
865 /**
866  * wait_event_lock_irq - sleep until a condition gets true. The
867  *                       condition is checked under the lock. This
868  *                       is expected to be called with the lock
869  *                       taken.
870  * @wq_head: the waitqueue to wait on
871  * @condition: a C expression for the event to wait for
872  * @lock: a locked spinlock_t, which will be released before schedule()
873  *        and reacquired afterwards.
874  *
875  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
876  * @condition evaluates to true. The @condition is checked each time
877  * the waitqueue @wq_head is woken up.
878  *
879  * wake_up() has to be called after changing any variable that could
880  * change the result of the wait condition.
881  *
882  * This is supposed to be called while holding the lock. The lock is
883  * dropped before going to sleep and is reacquired afterwards.
884  */
885 #define wait_event_lock_irq(wq_head, condition, lock)                           \
886 do {                                                                            \
887         if (condition)                                                          \
888                 break;                                                          \
889         __wait_event_lock_irq(wq_head, condition, lock, );                      \
890 } while (0)
891
892
893 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)      \
894         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
895                       spin_unlock_irq(&lock);                                   \
896                       cmd;                                                      \
897                       schedule();                                               \
898                       spin_lock_irq(&lock))
899
900 /**
901  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
902  *              The condition is checked under the lock. This is expected to
903  *              be called with the lock taken.
904  * @wq_head: the waitqueue to wait on
905  * @condition: a C expression for the event to wait for
906  * @lock: a locked spinlock_t, which will be released before cmd and
907  *        schedule() and reacquired afterwards.
908  * @cmd: a command which is invoked outside the critical section before
909  *       sleep
910  *
911  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
912  * @condition evaluates to true or a signal is received. The @condition is
913  * checked each time the waitqueue @wq_head is woken up.
914  *
915  * wake_up() has to be called after changing any variable that could
916  * change the result of the wait condition.
917  *
918  * This is supposed to be called while holding the lock. The lock is
919  * dropped before invoking the cmd and going to sleep and is reacquired
920  * afterwards.
921  *
922  * The macro will return -ERESTARTSYS if it was interrupted by a signal
923  * and 0 if @condition evaluated to true.
924  */
925 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)    \
926 ({                                                                              \
927         int __ret = 0;                                                          \
928         if (!(condition))                                                       \
929                 __ret = __wait_event_interruptible_lock_irq(wq_head,            \
930                                                 condition, lock, cmd);          \
931         __ret;                                                                  \
932 })
933
934 /**
935  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
936  *              The condition is checked under the lock. This is expected
937  *              to be called with the lock taken.
938  * @wq_head: the waitqueue to wait on
939  * @condition: a C expression for the event to wait for
940  * @lock: a locked spinlock_t, which will be released before schedule()
941  *        and reacquired afterwards.
942  *
943  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
944  * @condition evaluates to true or signal is received. The @condition is
945  * checked each time the waitqueue @wq_head is woken up.
946  *
947  * wake_up() has to be called after changing any variable that could
948  * change the result of the wait condition.
949  *
950  * This is supposed to be called while holding the lock. The lock is
951  * dropped before going to sleep and is reacquired afterwards.
952  *
953  * The macro will return -ERESTARTSYS if it was interrupted by a signal
954  * and 0 if @condition evaluated to true.
955  */
956 #define wait_event_interruptible_lock_irq(wq_head, condition, lock)             \
957 ({                                                                              \
958         int __ret = 0;                                                          \
959         if (!(condition))                                                       \
960                 __ret = __wait_event_interruptible_lock_irq(wq_head,            \
961                                                 condition, lock,);              \
962         __ret;                                                                  \
963 })
964
965 #define __wait_event_interruptible_lock_irq_timeout(wq_head, condition,         \
966                                                     lock, timeout)              \
967         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
968                       TASK_INTERRUPTIBLE, 0, timeout,                           \
969                       spin_unlock_irq(&lock);                                   \
970                       __ret = schedule_timeout(__ret);                          \
971                       spin_lock_irq(&lock));
972
973 /**
974  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
975  *              true or a timeout elapses. The condition is checked under
976  *              the lock. This is expected to be called with the lock taken.
977  * @wq_head: the waitqueue to wait on
978  * @condition: a C expression for the event to wait for
979  * @lock: a locked spinlock_t, which will be released before schedule()
980  *        and reacquired afterwards.
981  * @timeout: timeout, in jiffies
982  *
983  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
984  * @condition evaluates to true or signal is received. The @condition is
985  * checked each time the waitqueue @wq_head is woken up.
986  *
987  * wake_up() has to be called after changing any variable that could
988  * change the result of the wait condition.
989  *
990  * This is supposed to be called while holding the lock. The lock is
991  * dropped before going to sleep and is reacquired afterwards.
992  *
993  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
994  * was interrupted by a signal, and the remaining jiffies otherwise
995  * if the condition evaluated to true before the timeout elapsed.
996  */
997 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,     \
998                                                   timeout)                      \
999 ({                                                                              \
1000         long __ret = timeout;                                                   \
1001         if (!___wait_cond_timeout(condition))                                   \
1002                 __ret = __wait_event_interruptible_lock_irq_timeout(            \
1003                                         wq_head, condition, lock, timeout);     \
1004         __ret;                                                                  \
1005 })
1006
1007 /*
1008  * Waitqueues which are removed from the waitqueue_head at wakeup time
1009  */
1010 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1011 void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1012 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1013 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1014 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1015 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1016 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1017
1018 #define DEFINE_WAIT_FUNC(name, function)                                        \
1019         struct wait_queue_entry name = {                                        \
1020                 .private        = current,                                      \
1021                 .func           = function,                                     \
1022                 .entry          = LIST_HEAD_INIT((name).entry),                 \
1023         }
1024
1025 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1026
1027 #define init_wait(wait)                                                         \
1028         do {                                                                    \
1029                 (wait)->private = current;                                      \
1030                 (wait)->func = autoremove_wake_function;                        \
1031                 INIT_LIST_HEAD(&(wait)->entry);                                 \
1032                 (wait)->flags = 0;                                              \
1033         } while (0)
1034
1035 #endif /* _LINUX_WAIT_H */