blob: 6aa09a875fbdcafdeadebde4a70556efeed0f266 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
Ingo Molnarfb869b62013-10-04 10:24:49 +02003/*
4 * Linux wait queue related types and methods
5 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <asm/current.h>
David Howells607ca462012-10-13 10:46:48 +010010#include <uapi/linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12typedef struct __wait_queue wait_queue_t;
Peter Zijlstra7d478722009-09-14 19:55:44 +020013typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Peter Zijlstra61ada522014-09-24 10:18:47 +020016/* __wait_queue::flags */
17#define WQ_FLAG_EXCLUSIVE 0x01
18#define WQ_FLAG_WOKEN 0x02
19
Linus Torvalds1da177e2005-04-16 15:20:36 -070020struct __wait_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020021 unsigned int flags;
Ingo Molnarfb869b62013-10-04 10:24:49 +020022 void *private;
23 wait_queue_func_t func;
24 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070025};
26
27struct wait_bit_key {
Ingo Molnarfb869b62013-10-04 10:24:49 +020028 void *flags;
29 int bit_nr;
30#define WAIT_ATOMIC_T_BIT_NR -1
NeilBrowncbbce822014-09-25 13:55:19 +100031 unsigned long timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032};
33
34struct wait_bit_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020035 struct wait_bit_key key;
36 wait_queue_t wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037};
38
39struct __wait_queue_head {
Ingo Molnarfb869b62013-10-04 10:24:49 +020040 spinlock_t lock;
41 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042};
43typedef struct __wait_queue_head wait_queue_head_t;
44
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080045struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47/*
48 * Macros for declaration and initialisaton of the datatypes
49 */
50
51#define __WAITQUEUE_INITIALIZER(name, tsk) { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070052 .private = tsk, \
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 .func = default_wake_function, \
54 .task_list = { NULL, NULL } }
55
56#define DECLARE_WAITQUEUE(name, tsk) \
57 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58
59#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
Ingo Molnare4d91912006-07-03 00:24:34 -070060 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 .task_list = { &(name).task_list, &(name).task_list } }
62
63#define DECLARE_WAIT_QUEUE_HEAD(name) \
64 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65
66#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
67 { .flags = word, .bit_nr = bit, }
68
David Howellscb655372013-05-10 19:50:26 +010069#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
70 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010072extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
Peter Zijlstra2fc39112009-08-10 12:33:05 +010073
74#define init_waitqueue_head(q) \
75 do { \
76 static struct lock_class_key __key; \
77 \
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010078 __init_waitqueue_head((q), #q, &__key); \
Peter Zijlstra2fc39112009-08-10 12:33:05 +010079 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080081#ifdef CONFIG_LOCKDEP
82# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83 ({ init_waitqueue_head(&name); name; })
84# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86#else
87# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88#endif
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91{
Ingo Molnarfb869b62013-10-04 10:24:49 +020092 q->flags = 0;
93 q->private = p;
94 q->func = default_wake_function;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
Ingo Molnarfb869b62013-10-04 10:24:49 +020097static inline void
98init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
Ingo Molnarfb869b62013-10-04 10:24:49 +0200100 q->flags = 0;
101 q->private = NULL;
102 q->func = func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103}
104
105static inline int waitqueue_active(wait_queue_head_t *q)
106{
107 return !list_empty(&q->task_list);
108}
109
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800110/**
111 * wq_has_sleeper - check if there are any waiting processes
112 * @wq: wait queue head
113 *
114 * Returns true if wq has waiting processes
115 *
116 * Please refer to the comment for waitqueue_active.
117 */
118static inline bool wq_has_sleeper(wait_queue_head_t *wq)
119{
120 /*
121 * We need to be sure we are in sync with the
122 * add_wait_queue modifications to the wait queue.
123 *
124 * This memory barrier should be paired with one on the
125 * waiting side.
126 */
127 smp_mb();
128 return waitqueue_active(wq);
129}
130
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800131extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
132extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
133extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
136{
137 list_add(&new->task_list, &head->task_list);
138}
139
140/*
141 * Used for wake-one threads:
142 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200143static inline void
144__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800145{
146 wait->flags |= WQ_FLAG_EXCLUSIVE;
147 __add_wait_queue(q, wait);
148}
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150static inline void __add_wait_queue_tail(wait_queue_head_t *head,
Changli Gaoa93d2f12010-05-07 14:33:26 +0800151 wait_queue_t *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152{
153 list_add_tail(&new->task_list, &head->task_list);
154}
155
Ingo Molnarfb869b62013-10-04 10:24:49 +0200156static inline void
157__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800158{
159 wait->flags |= WQ_FLAG_EXCLUSIVE;
160 __add_wait_queue_tail(q, wait);
161}
162
Ingo Molnarfb869b62013-10-04 10:24:49 +0200163static inline void
164__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
166 list_del(&old->task_list);
167}
168
NeilBrownc1221322014-07-07 15:16:04 +1000169typedef int wait_bit_action_f(struct wait_bit_key *);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800170void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Andrea Arcangeliac5be6b2015-09-22 14:58:49 -0700171void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200172void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Thomas Gleixner63b20012011-12-01 00:04:00 +0100173void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700174void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800175void __wake_up_bit(wait_queue_head_t *, void *, int);
NeilBrownc1221322014-07-07 15:16:04 +1000176int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
177int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800178void wake_up_bit(void *, int);
David Howellscb655372013-05-10 19:50:26 +0100179void wake_up_atomic_t(atomic_t *);
NeilBrownc1221322014-07-07 15:16:04 +1000180int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
NeilBrowncbbce822014-09-25 13:55:19 +1000181int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
NeilBrownc1221322014-07-07 15:16:04 +1000182int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
David Howellscb655372013-05-10 19:50:26 +0100183int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800184wait_queue_head_t *bit_waitqueue(void *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500186#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
187#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
188#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
Thomas Gleixner63b20012011-12-01 00:04:00 +0100189#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
190#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
193#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
194#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500195#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800197/*
Davide Libenzic0da3772009-03-31 15:24:20 -0700198 * Wakeup macros to be used to report events to the targets.
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800199 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200200#define wake_up_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700201 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200202#define wake_up_locked_poll(x, m) \
Andrea Arcangeliac5be6b2015-09-22 14:58:49 -0700203 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200204#define wake_up_interruptible_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700205 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
206#define wake_up_interruptible_sync_poll(x, m) \
207 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800208
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200209#define ___wait_cond_timeout(condition) \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200210({ \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200211 bool __cond = (condition); \
212 if (__cond && !__ret) \
213 __ret = 1; \
214 __cond || !__ret; \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200215})
216
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200217#define ___wait_is_interruptible(state) \
218 (!__builtin_constant_p(state) || \
219 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200220
Peter Zijlstra8b322012014-04-18 15:07:17 -0700221/*
222 * The below macro ___wait_event() has an explicit shadow of the __ret
223 * variable when used from the wait_event_*() macros.
224 *
225 * This is so that both can use the ___wait_cond_timeout() construct
226 * to wrap the condition.
227 *
228 * The type inconsistency of the wait_event_*() __ret variable is also
229 * on purpose; we use long where we can return timeout values and int
230 * otherwise.
231 */
232
Peter Zijlstra41a14312013-10-02 11:22:21 +0200233#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200234({ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200235 __label__ __out; \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200236 wait_queue_t __wait; \
Peter Zijlstra8b322012014-04-18 15:07:17 -0700237 long __ret = ret; /* explicit shadow */ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200238 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200239 INIT_LIST_HEAD(&__wait.task_list); \
240 if (exclusive) \
241 __wait.flags = WQ_FLAG_EXCLUSIVE; \
242 else \
243 __wait.flags = 0; \
244 \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200245 for (;;) { \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200246 long __int = prepare_to_wait_event(&wq, &__wait, state);\
Peter Zijlstra41a14312013-10-02 11:22:21 +0200247 \
248 if (condition) \
249 break; \
250 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200251 if (___wait_is_interruptible(state) && __int) { \
252 __ret = __int; \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200253 if (exclusive) { \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200254 abort_exclusive_wait(&wq, &__wait, \
255 state, NULL); \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200256 goto __out; \
257 } \
258 break; \
259 } \
260 \
261 cmd; \
262 } \
263 finish_wait(&wq, &__wait); \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200264__out: __ret; \
265})
Peter Zijlstra41a14312013-10-02 11:22:21 +0200266
Ingo Molnarfb869b62013-10-04 10:24:49 +0200267#define __wait_event(wq, condition) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200268 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
269 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271/**
272 * wait_event - sleep until a condition gets true
273 * @wq: the waitqueue to wait on
274 * @condition: a C expression for the event to wait for
275 *
276 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
277 * @condition evaluates to true. The @condition is checked each time
278 * the waitqueue @wq is woken up.
279 *
280 * wake_up() has to be called after changing any variable that could
281 * change the result of the wait condition.
282 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200283#define wait_event(wq, condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284do { \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200285 might_sleep(); \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200286 if (condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 break; \
288 __wait_event(wq, condition); \
289} while (0)
290
Peter Zijlstra2c561242015-02-03 12:55:31 +0100291#define __io_wait_event(wq, condition) \
292 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
293 io_schedule())
294
295/*
296 * io_wait_event() -- like wait_event() but with io_schedule()
297 */
298#define io_wait_event(wq, condition) \
299do { \
300 might_sleep(); \
301 if (condition) \
302 break; \
303 __io_wait_event(wq, condition); \
304} while (0)
305
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100306#define __wait_event_freezable(wq, condition) \
307 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
308 schedule(); try_to_freeze())
309
310/**
311 * wait_event - sleep (or freeze) until a condition gets true
312 * @wq: the waitqueue to wait on
313 * @condition: a C expression for the event to wait for
314 *
315 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
316 * to system load) until the @condition evaluates to true. The
317 * @condition is checked each time the waitqueue @wq is woken up.
318 *
319 * wake_up() has to be called after changing any variable that could
320 * change the result of the wait condition.
321 */
322#define wait_event_freezable(wq, condition) \
323({ \
324 int __ret = 0; \
325 might_sleep(); \
326 if (!(condition)) \
327 __ret = __wait_event_freezable(wq, condition); \
328 __ret; \
329})
330
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200331#define __wait_event_timeout(wq, condition, timeout) \
332 ___wait_event(wq, ___wait_cond_timeout(condition), \
333 TASK_UNINTERRUPTIBLE, 0, timeout, \
334 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336/**
337 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
338 * @wq: the waitqueue to wait on
339 * @condition: a C expression for the event to wait for
340 * @timeout: timeout, in jiffies
341 *
342 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
343 * @condition evaluates to true. The @condition is checked each time
344 * the waitqueue @wq is woken up.
345 *
346 * wake_up() has to be called after changing any variable that could
347 * change the result of the wait condition.
348 *
Scot Doyle6b44f512014-08-24 17:12:27 +0000349 * Returns:
350 * 0 if the @condition evaluated to %false after the @timeout elapsed,
351 * 1 if the @condition evaluated to %true after the @timeout elapsed,
352 * or the remaining jiffies (at least 1) if the @condition evaluated
353 * to %true before the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 */
355#define wait_event_timeout(wq, condition, timeout) \
356({ \
357 long __ret = timeout; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200358 might_sleep(); \
Oleg Nesterov89229152013-10-07 20:31:06 +0200359 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200360 __ret = __wait_event_timeout(wq, condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 __ret; \
362})
363
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100364#define __wait_event_freezable_timeout(wq, condition, timeout) \
365 ___wait_event(wq, ___wait_cond_timeout(condition), \
366 TASK_INTERRUPTIBLE, 0, timeout, \
367 __ret = schedule_timeout(__ret); try_to_freeze())
368
369/*
370 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
371 * increasing load and is freezable.
372 */
373#define wait_event_freezable_timeout(wq, condition, timeout) \
374({ \
375 long __ret = timeout; \
376 might_sleep(); \
377 if (!___wait_cond_timeout(condition)) \
378 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
379 __ret; \
380})
381
Yuanhan Liu9f3520c2015-05-08 18:19:05 +1000382#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
383 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
384 cmd1; schedule(); cmd2)
385/*
386 * Just like wait_event_cmd(), except it sets exclusive flag
387 */
388#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
389do { \
390 if (condition) \
391 break; \
392 __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
393} while (0)
394
Shaohua Li82e06c82013-11-14 15:16:16 +1100395#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
396 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
397 cmd1; schedule(); cmd2)
398
399/**
400 * wait_event_cmd - sleep until a condition gets true
401 * @wq: the waitqueue to wait on
402 * @condition: a C expression for the event to wait for
Masanari Iidaf434f7a2014-01-22 01:22:06 +0900403 * @cmd1: the command will be executed before sleep
404 * @cmd2: the command will be executed after sleep
Shaohua Li82e06c82013-11-14 15:16:16 +1100405 *
406 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
407 * @condition evaluates to true. The @condition is checked each time
408 * the waitqueue @wq is woken up.
409 *
410 * wake_up() has to be called after changing any variable that could
411 * change the result of the wait condition.
412 */
413#define wait_event_cmd(wq, condition, cmd1, cmd2) \
414do { \
415 if (condition) \
416 break; \
417 __wait_event_cmd(wq, condition, cmd1, cmd2); \
418} while (0)
419
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200420#define __wait_event_interruptible(wq, condition) \
421 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstraf13f4c42013-10-02 11:22:24 +0200422 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424/**
425 * wait_event_interruptible - sleep until a condition gets true
426 * @wq: the waitqueue to wait on
427 * @condition: a C expression for the event to wait for
428 *
429 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
430 * @condition evaluates to true or a signal is received.
431 * The @condition is checked each time the waitqueue @wq is woken up.
432 *
433 * wake_up() has to be called after changing any variable that could
434 * change the result of the wait condition.
435 *
436 * The function will return -ERESTARTSYS if it was interrupted by a
437 * signal and 0 if @condition evaluated to true.
438 */
439#define wait_event_interruptible(wq, condition) \
440({ \
441 int __ret = 0; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200442 might_sleep(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200444 __ret = __wait_event_interruptible(wq, condition); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 __ret; \
446})
447
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200448#define __wait_event_interruptible_timeout(wq, condition, timeout) \
449 ___wait_event(wq, ___wait_cond_timeout(condition), \
450 TASK_INTERRUPTIBLE, 0, timeout, \
451 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
453/**
454 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
455 * @wq: the waitqueue to wait on
456 * @condition: a C expression for the event to wait for
457 * @timeout: timeout, in jiffies
458 *
459 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
460 * @condition evaluates to true or a signal is received.
461 * The @condition is checked each time the waitqueue @wq is woken up.
462 *
463 * wake_up() has to be called after changing any variable that could
464 * change the result of the wait condition.
465 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700466 * Returns:
Scot Doyle6b44f512014-08-24 17:12:27 +0000467 * 0 if the @condition evaluated to %false after the @timeout elapsed,
468 * 1 if the @condition evaluated to %true after the @timeout elapsed,
469 * the remaining jiffies (at least 1) if the @condition evaluated
470 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
471 * interrupted by a signal.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 */
473#define wait_event_interruptible_timeout(wq, condition, timeout) \
474({ \
475 long __ret = timeout; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200476 might_sleep(); \
Oleg Nesterov89229152013-10-07 20:31:06 +0200477 if (!___wait_cond_timeout(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200478 __ret = __wait_event_interruptible_timeout(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200479 condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 __ret; \
481})
482
Kent Overstreet774a08b2013-05-07 16:18:43 -0700483#define __wait_event_hrtimeout(wq, condition, timeout, state) \
484({ \
485 int __ret = 0; \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700486 struct hrtimer_sleeper __t; \
487 \
488 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
489 HRTIMER_MODE_REL); \
490 hrtimer_init_sleeper(&__t, current); \
491 if ((timeout).tv64 != KTIME_MAX) \
492 hrtimer_start_range_ns(&__t.timer, timeout, \
493 current->timer_slack_ns, \
494 HRTIMER_MODE_REL); \
495 \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200496 __ret = ___wait_event(wq, condition, state, 0, 0, \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700497 if (!__t.task) { \
498 __ret = -ETIME; \
499 break; \
500 } \
Peter Zijlstraebdc1952013-10-02 11:22:32 +0200501 schedule()); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700502 \
503 hrtimer_cancel(&__t.timer); \
504 destroy_hrtimer_on_stack(&__t.timer); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700505 __ret; \
506})
507
508/**
509 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
510 * @wq: the waitqueue to wait on
511 * @condition: a C expression for the event to wait for
512 * @timeout: timeout, as a ktime_t
513 *
514 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
515 * @condition evaluates to true or a signal is received.
516 * The @condition is checked each time the waitqueue @wq is woken up.
517 *
518 * wake_up() has to be called after changing any variable that could
519 * change the result of the wait condition.
520 *
521 * The function returns 0 if @condition became true, or -ETIME if the timeout
522 * elapsed.
523 */
524#define wait_event_hrtimeout(wq, condition, timeout) \
525({ \
526 int __ret = 0; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200527 might_sleep(); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700528 if (!(condition)) \
529 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
530 TASK_UNINTERRUPTIBLE); \
531 __ret; \
532})
533
534/**
535 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
536 * @wq: the waitqueue to wait on
537 * @condition: a C expression for the event to wait for
538 * @timeout: timeout, as a ktime_t
539 *
540 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
541 * @condition evaluates to true or a signal is received.
542 * The @condition is checked each time the waitqueue @wq is woken up.
543 *
544 * wake_up() has to be called after changing any variable that could
545 * change the result of the wait condition.
546 *
547 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
548 * interrupted by a signal, or -ETIME if the timeout elapsed.
549 */
550#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
551({ \
552 long __ret = 0; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200553 might_sleep(); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700554 if (!(condition)) \
555 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
556 TASK_INTERRUPTIBLE); \
557 __ret; \
558})
559
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200560#define __wait_event_interruptible_exclusive(wq, condition) \
561 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
Peter Zijlstra48c25212013-10-02 11:22:26 +0200562 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
564#define wait_event_interruptible_exclusive(wq, condition) \
565({ \
566 int __ret = 0; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200567 might_sleep(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200569 __ret = __wait_event_interruptible_exclusive(wq, condition);\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 __ret; \
571})
572
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200573
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100574#define __wait_event_freezable_exclusive(wq, condition) \
575 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
576 schedule(); try_to_freeze())
577
578#define wait_event_freezable_exclusive(wq, condition) \
579({ \
580 int __ret = 0; \
581 might_sleep(); \
582 if (!(condition)) \
583 __ret = __wait_event_freezable_exclusive(wq, condition);\
584 __ret; \
585})
586
587
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200588#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
589({ \
590 int __ret = 0; \
591 DEFINE_WAIT(__wait); \
592 if (exclusive) \
593 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
594 do { \
595 if (likely(list_empty(&__wait.task_list))) \
596 __add_wait_queue_tail(&(wq), &__wait); \
597 set_current_state(TASK_INTERRUPTIBLE); \
598 if (signal_pending(current)) { \
599 __ret = -ERESTARTSYS; \
600 break; \
601 } \
602 if (irq) \
603 spin_unlock_irq(&(wq).lock); \
604 else \
605 spin_unlock(&(wq).lock); \
606 schedule(); \
607 if (irq) \
608 spin_lock_irq(&(wq).lock); \
609 else \
610 spin_lock(&(wq).lock); \
611 } while (!(condition)); \
612 __remove_wait_queue(&(wq), &__wait); \
613 __set_current_state(TASK_RUNNING); \
614 __ret; \
615})
616
617
618/**
619 * wait_event_interruptible_locked - sleep until a condition gets true
620 * @wq: the waitqueue to wait on
621 * @condition: a C expression for the event to wait for
622 *
623 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
624 * @condition evaluates to true or a signal is received.
625 * The @condition is checked each time the waitqueue @wq is woken up.
626 *
627 * It must be called with wq.lock being held. This spinlock is
628 * unlocked while sleeping but @condition testing is done while lock
629 * is held and when this macro exits the lock is held.
630 *
631 * The lock is locked/unlocked using spin_lock()/spin_unlock()
632 * functions which must match the way they are locked/unlocked outside
633 * of this macro.
634 *
635 * wake_up_locked() has to be called after changing any variable that could
636 * change the result of the wait condition.
637 *
638 * The function will return -ERESTARTSYS if it was interrupted by a
639 * signal and 0 if @condition evaluated to true.
640 */
641#define wait_event_interruptible_locked(wq, condition) \
642 ((condition) \
643 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
644
645/**
646 * wait_event_interruptible_locked_irq - sleep until a condition gets true
647 * @wq: the waitqueue to wait on
648 * @condition: a C expression for the event to wait for
649 *
650 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
651 * @condition evaluates to true or a signal is received.
652 * The @condition is checked each time the waitqueue @wq is woken up.
653 *
654 * It must be called with wq.lock being held. This spinlock is
655 * unlocked while sleeping but @condition testing is done while lock
656 * is held and when this macro exits the lock is held.
657 *
658 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
659 * functions which must match the way they are locked/unlocked outside
660 * of this macro.
661 *
662 * wake_up_locked() has to be called after changing any variable that could
663 * change the result of the wait condition.
664 *
665 * The function will return -ERESTARTSYS if it was interrupted by a
666 * signal and 0 if @condition evaluated to true.
667 */
668#define wait_event_interruptible_locked_irq(wq, condition) \
669 ((condition) \
670 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
671
672/**
673 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
674 * @wq: the waitqueue to wait on
675 * @condition: a C expression for the event to wait for
676 *
677 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
678 * @condition evaluates to true or a signal is received.
679 * The @condition is checked each time the waitqueue @wq is woken up.
680 *
681 * It must be called with wq.lock being held. This spinlock is
682 * unlocked while sleeping but @condition testing is done while lock
683 * is held and when this macro exits the lock is held.
684 *
685 * The lock is locked/unlocked using spin_lock()/spin_unlock()
686 * functions which must match the way they are locked/unlocked outside
687 * of this macro.
688 *
689 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
690 * set thus when other process waits process on the list if this
691 * process is awaken further processes are not considered.
692 *
693 * wake_up_locked() has to be called after changing any variable that could
694 * change the result of the wait condition.
695 *
696 * The function will return -ERESTARTSYS if it was interrupted by a
697 * signal and 0 if @condition evaluated to true.
698 */
699#define wait_event_interruptible_exclusive_locked(wq, condition) \
700 ((condition) \
701 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
702
703/**
704 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
705 * @wq: the waitqueue to wait on
706 * @condition: a C expression for the event to wait for
707 *
708 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
709 * @condition evaluates to true or a signal is received.
710 * The @condition is checked each time the waitqueue @wq is woken up.
711 *
712 * It must be called with wq.lock being held. This spinlock is
713 * unlocked while sleeping but @condition testing is done while lock
714 * is held and when this macro exits the lock is held.
715 *
716 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
717 * functions which must match the way they are locked/unlocked outside
718 * of this macro.
719 *
720 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
721 * set thus when other process waits process on the list if this
722 * process is awaken further processes are not considered.
723 *
724 * wake_up_locked() has to be called after changing any variable that could
725 * change the result of the wait condition.
726 *
727 * The function will return -ERESTARTSYS if it was interrupted by a
728 * signal and 0 if @condition evaluated to true.
729 */
730#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
731 ((condition) \
732 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
733
734
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200735#define __wait_event_killable(wq, condition) \
736 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500737
738/**
739 * wait_event_killable - sleep until a condition gets true
740 * @wq: the waitqueue to wait on
741 * @condition: a C expression for the event to wait for
742 *
743 * The process is put to sleep (TASK_KILLABLE) until the
744 * @condition evaluates to true or a signal is received.
745 * The @condition is checked each time the waitqueue @wq is woken up.
746 *
747 * wake_up() has to be called after changing any variable that could
748 * change the result of the wait condition.
749 *
750 * The function will return -ERESTARTSYS if it was interrupted by a
751 * signal and 0 if @condition evaluated to true.
752 */
753#define wait_event_killable(wq, condition) \
754({ \
755 int __ret = 0; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200756 might_sleep(); \
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500757 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200758 __ret = __wait_event_killable(wq, condition); \
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500759 __ret; \
760})
761
Lukas Czernereed8c022012-11-30 11:42:40 +0100762
763#define __wait_event_lock_irq(wq, condition, lock, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200764 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
765 spin_unlock_irq(&lock); \
766 cmd; \
767 schedule(); \
768 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100769
770/**
771 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
772 * condition is checked under the lock. This
773 * is expected to be called with the lock
774 * taken.
775 * @wq: the waitqueue to wait on
776 * @condition: a C expression for the event to wait for
777 * @lock: a locked spinlock_t, which will be released before cmd
778 * and schedule() and reacquired afterwards.
779 * @cmd: a command which is invoked outside the critical section before
780 * sleep
781 *
782 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
783 * @condition evaluates to true. The @condition is checked each time
784 * the waitqueue @wq is woken up.
785 *
786 * wake_up() has to be called after changing any variable that could
787 * change the result of the wait condition.
788 *
789 * This is supposed to be called while holding the lock. The lock is
790 * dropped before invoking the cmd and going to sleep and is reacquired
791 * afterwards.
792 */
793#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
794do { \
795 if (condition) \
796 break; \
797 __wait_event_lock_irq(wq, condition, lock, cmd); \
798} while (0)
799
800/**
801 * wait_event_lock_irq - sleep until a condition gets true. The
802 * condition is checked under the lock. This
803 * is expected to be called with the lock
804 * taken.
805 * @wq: the waitqueue to wait on
806 * @condition: a C expression for the event to wait for
807 * @lock: a locked spinlock_t, which will be released before schedule()
808 * and reacquired afterwards.
809 *
810 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
811 * @condition evaluates to true. The @condition is checked each time
812 * the waitqueue @wq is woken up.
813 *
814 * wake_up() has to be called after changing any variable that could
815 * change the result of the wait condition.
816 *
817 * This is supposed to be called while holding the lock. The lock is
818 * dropped before going to sleep and is reacquired afterwards.
819 */
820#define wait_event_lock_irq(wq, condition, lock) \
821do { \
822 if (condition) \
823 break; \
824 __wait_event_lock_irq(wq, condition, lock, ); \
825} while (0)
826
827
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200828#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200829 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200830 spin_unlock_irq(&lock); \
831 cmd; \
832 schedule(); \
Peter Zijlstra8fbd88f2013-10-02 11:22:28 +0200833 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100834
835/**
836 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
837 * The condition is checked under the lock. This is expected to
838 * be called with the lock taken.
839 * @wq: the waitqueue to wait on
840 * @condition: a C expression for the event to wait for
841 * @lock: a locked spinlock_t, which will be released before cmd and
842 * schedule() and reacquired afterwards.
843 * @cmd: a command which is invoked outside the critical section before
844 * sleep
845 *
846 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
847 * @condition evaluates to true or a signal is received. The @condition is
848 * checked each time the waitqueue @wq is woken up.
849 *
850 * wake_up() has to be called after changing any variable that could
851 * change the result of the wait condition.
852 *
853 * This is supposed to be called while holding the lock. The lock is
854 * dropped before invoking the cmd and going to sleep and is reacquired
855 * afterwards.
856 *
857 * The macro will return -ERESTARTSYS if it was interrupted by a signal
858 * and 0 if @condition evaluated to true.
859 */
860#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
861({ \
862 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100863 if (!(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200864 __ret = __wait_event_interruptible_lock_irq(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200865 condition, lock, cmd); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100866 __ret; \
867})
868
869/**
870 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
871 * The condition is checked under the lock. This is expected
872 * to be called with the lock taken.
873 * @wq: the waitqueue to wait on
874 * @condition: a C expression for the event to wait for
875 * @lock: a locked spinlock_t, which will be released before schedule()
876 * and reacquired afterwards.
877 *
878 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
879 * @condition evaluates to true or signal is received. The @condition is
880 * checked each time the waitqueue @wq is woken up.
881 *
882 * wake_up() has to be called after changing any variable that could
883 * change the result of the wait condition.
884 *
885 * This is supposed to be called while holding the lock. The lock is
886 * dropped before going to sleep and is reacquired afterwards.
887 *
888 * The macro will return -ERESTARTSYS if it was interrupted by a signal
889 * and 0 if @condition evaluated to true.
890 */
891#define wait_event_interruptible_lock_irq(wq, condition, lock) \
892({ \
893 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100894 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200895 __ret = __wait_event_interruptible_lock_irq(wq, \
Thierry Reding92ec1182013-10-23 13:40:55 +0200896 condition, lock,); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100897 __ret; \
898})
899
Ingo Molnarfb869b62013-10-04 10:24:49 +0200900#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
901 lock, timeout) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200902 ___wait_event(wq, ___wait_cond_timeout(condition), \
Heiko Carstens7d716452013-10-31 12:48:14 +0100903 TASK_INTERRUPTIBLE, 0, timeout, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200904 spin_unlock_irq(&lock); \
905 __ret = schedule_timeout(__ret); \
Peter Zijlstraa1dc6852013-10-02 11:22:29 +0200906 spin_lock_irq(&lock));
Martin Peschked79ff142013-08-22 17:45:36 +0200907
908/**
Ingo Molnarfb869b62013-10-04 10:24:49 +0200909 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
910 * true or a timeout elapses. The condition is checked under
911 * the lock. This is expected to be called with the lock taken.
Martin Peschked79ff142013-08-22 17:45:36 +0200912 * @wq: the waitqueue to wait on
913 * @condition: a C expression for the event to wait for
914 * @lock: a locked spinlock_t, which will be released before schedule()
915 * and reacquired afterwards.
916 * @timeout: timeout, in jiffies
917 *
918 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
919 * @condition evaluates to true or signal is received. The @condition is
920 * checked each time the waitqueue @wq is woken up.
921 *
922 * wake_up() has to be called after changing any variable that could
923 * change the result of the wait condition.
924 *
925 * This is supposed to be called while holding the lock. The lock is
926 * dropped before going to sleep and is reacquired afterwards.
927 *
928 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
929 * was interrupted by a signal, and the remaining jiffies otherwise
930 * if the condition evaluated to true before the timeout elapsed.
931 */
932#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
933 timeout) \
934({ \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200935 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200936 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200937 __ret = __wait_event_interruptible_lock_irq_timeout( \
938 wq, condition, lock, timeout); \
Martin Peschked79ff142013-08-22 17:45:36 +0200939 __ret; \
940})
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942/*
943 * Waitqueues which are removed from the waitqueue_head at wakeup time
944 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800945void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
946void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200947long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800948void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200949void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
Peter Zijlstra61ada522014-09-24 10:18:47 +0200950long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
951int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
953int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
954
Eric Dumazetbf368e42009-04-28 02:24:21 -0700955#define DEFINE_WAIT_FUNC(name, function) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 wait_queue_t name = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700957 .private = current, \
Eric Dumazetbf368e42009-04-28 02:24:21 -0700958 .func = function, \
blaisorblade@yahoo.it7e43c842005-05-25 01:31:42 +0200959 .task_list = LIST_HEAD_INIT((name).task_list), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 }
961
Eric Dumazetbf368e42009-04-28 02:24:21 -0700962#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964#define DEFINE_WAIT_BIT(name, word, bit) \
965 struct wait_bit_queue name = { \
966 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
967 .wait = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700968 .private = current, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 .func = wake_bit_function, \
970 .task_list = \
971 LIST_HEAD_INIT((name).wait.task_list), \
972 }, \
973 }
974
975#define init_wait(wait) \
976 do { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700977 (wait)->private = current; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 (wait)->func = autoremove_wake_function; \
979 INIT_LIST_HEAD(&(wait)->task_list); \
Evgeny Kuznetsov231d0ae2010-10-05 12:47:57 +0400980 (wait)->flags = 0; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 } while (0)
982
NeilBrown74316202014-07-07 15:16:04 +1000983
NeilBrownc1221322014-07-07 15:16:04 +1000984extern int bit_wait(struct wait_bit_key *);
985extern int bit_wait_io(struct wait_bit_key *);
NeilBrowncbbce822014-09-25 13:55:19 +1000986extern int bit_wait_timeout(struct wait_bit_key *);
987extern int bit_wait_io_timeout(struct wait_bit_key *);
NeilBrown74316202014-07-07 15:16:04 +1000988
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989/**
990 * wait_on_bit - wait for a bit to be cleared
991 * @word: the word being waited on, a kernel virtual address
992 * @bit: the bit of the word being waited on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 * @mode: the task state to sleep in
994 *
995 * There is a standard hashed waitqueue table for generic use. This
996 * is the part of the hashtable's accessor API that waits on a bit.
997 * For instance, if one were to have waiters on a bitflag, one would
998 * call wait_on_bit() in threads waiting for the bit to clear.
999 * One uses wait_on_bit() where one is waiting for the bit to clear,
1000 * but has no intention of setting it.
NeilBrown74316202014-07-07 15:16:04 +10001001 * Returned value will be zero if the bit was cleared, or non-zero
1002 * if the process received a signal and the mode permitted wakeup
1003 * on that signal.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 */
Ingo Molnarfb869b62013-10-04 10:24:49 +02001005static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001006wait_on_bit(unsigned long *word, int bit, unsigned mode)
NeilBrown74316202014-07-07 15:16:04 +10001007{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001008 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +10001009 if (!test_bit(bit, word))
1010 return 0;
1011 return out_of_line_wait_on_bit(word, bit,
1012 bit_wait,
1013 mode);
1014}
1015
1016/**
1017 * wait_on_bit_io - wait for a bit to be cleared
1018 * @word: the word being waited on, a kernel virtual address
1019 * @bit: the bit of the word being waited on
1020 * @mode: the task state to sleep in
1021 *
1022 * Use the standard hashed waitqueue table to wait for a bit
1023 * to be cleared. This is similar to wait_on_bit(), but calls
1024 * io_schedule() instead of schedule() for the actual waiting.
1025 *
1026 * Returned value will be zero if the bit was cleared, or non-zero
1027 * if the process received a signal and the mode permitted wakeup
1028 * on that signal.
1029 */
1030static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001031wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
NeilBrown74316202014-07-07 15:16:04 +10001032{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001033 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +10001034 if (!test_bit(bit, word))
1035 return 0;
1036 return out_of_line_wait_on_bit(word, bit,
1037 bit_wait_io,
1038 mode);
1039}
1040
1041/**
Johan Hedberg44fc0e52015-01-30 13:14:36 +02001042 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1043 * @word: the word being waited on, a kernel virtual address
1044 * @bit: the bit of the word being waited on
1045 * @mode: the task state to sleep in
1046 * @timeout: timeout, in jiffies
1047 *
1048 * Use the standard hashed waitqueue table to wait for a bit
1049 * to be cleared. This is similar to wait_on_bit(), except also takes a
1050 * timeout parameter.
1051 *
1052 * Returned value will be zero if the bit was cleared before the
1053 * @timeout elapsed, or non-zero if the @timeout elapsed or process
1054 * received a signal and the mode permitted wakeup on that signal.
1055 */
1056static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001057wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1058 unsigned long timeout)
Johan Hedberg44fc0e52015-01-30 13:14:36 +02001059{
1060 might_sleep();
1061 if (!test_bit(bit, word))
1062 return 0;
1063 return out_of_line_wait_on_bit_timeout(word, bit,
1064 bit_wait_timeout,
1065 mode, timeout);
1066}
1067
1068/**
NeilBrown74316202014-07-07 15:16:04 +10001069 * wait_on_bit_action - wait for a bit to be cleared
1070 * @word: the word being waited on, a kernel virtual address
1071 * @bit: the bit of the word being waited on
1072 * @action: the function used to sleep, which may take special actions
1073 * @mode: the task state to sleep in
1074 *
1075 * Use the standard hashed waitqueue table to wait for a bit
1076 * to be cleared, and allow the waiting action to be specified.
1077 * This is like wait_on_bit() but allows fine control of how the waiting
1078 * is done.
1079 *
1080 * Returned value will be zero if the bit was cleared, or non-zero
1081 * if the process received a signal and the mode permitted wakeup
1082 * on that signal.
1083 */
1084static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001085wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1086 unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001088 might_sleep();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 if (!test_bit(bit, word))
1090 return 0;
1091 return out_of_line_wait_on_bit(word, bit, action, mode);
1092}
1093
1094/**
1095 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1096 * @word: the word being waited on, a kernel virtual address
1097 * @bit: the bit of the word being waited on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 * @mode: the task state to sleep in
1099 *
1100 * There is a standard hashed waitqueue table for generic use. This
1101 * is the part of the hashtable's accessor API that waits on a bit
1102 * when one intends to set it, for instance, trying to lock bitflags.
1103 * For instance, if one were to have waiters trying to set bitflag
1104 * and waiting for it to clear before setting it, one would call
1105 * wait_on_bit() in threads waiting to be able to set the bit.
1106 * One uses wait_on_bit_lock() where one is waiting for the bit to
1107 * clear with the intention of setting it, and when done, clearing it.
NeilBrown74316202014-07-07 15:16:04 +10001108 *
1109 * Returns zero if the bit was (eventually) found to be clear and was
1110 * set. Returns non-zero if a signal was delivered to the process and
1111 * the @mode allows that signal to wake the process.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 */
Ingo Molnarfb869b62013-10-04 10:24:49 +02001113static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001114wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
NeilBrown74316202014-07-07 15:16:04 +10001115{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001116 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +10001117 if (!test_and_set_bit(bit, word))
1118 return 0;
1119 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1120}
1121
1122/**
1123 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1124 * @word: the word being waited on, a kernel virtual address
1125 * @bit: the bit of the word being waited on
1126 * @mode: the task state to sleep in
1127 *
1128 * Use the standard hashed waitqueue table to wait for a bit
1129 * to be cleared and then to atomically set it. This is similar
1130 * to wait_on_bit(), but calls io_schedule() instead of schedule()
1131 * for the actual waiting.
1132 *
1133 * Returns zero if the bit was (eventually) found to be clear and was
1134 * set. Returns non-zero if a signal was delivered to the process and
1135 * the @mode allows that signal to wake the process.
1136 */
1137static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001138wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
NeilBrown74316202014-07-07 15:16:04 +10001139{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001140 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +10001141 if (!test_and_set_bit(bit, word))
1142 return 0;
1143 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1144}
1145
1146/**
1147 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1148 * @word: the word being waited on, a kernel virtual address
1149 * @bit: the bit of the word being waited on
1150 * @action: the function used to sleep, which may take special actions
1151 * @mode: the task state to sleep in
1152 *
1153 * Use the standard hashed waitqueue table to wait for a bit
1154 * to be cleared and then to set it, and allow the waiting action
1155 * to be specified.
1156 * This is like wait_on_bit() but allows fine control of how the waiting
1157 * is done.
1158 *
1159 * Returns zero if the bit was (eventually) found to be clear and was
1160 * set. Returns non-zero if a signal was delivered to the process and
1161 * the @mode allows that signal to wake the process.
1162 */
1163static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001164wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1165 unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001167 might_sleep();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 if (!test_and_set_bit(bit, word))
1169 return 0;
1170 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1171}
David Howellscb655372013-05-10 19:50:26 +01001172
1173/**
1174 * wait_on_atomic_t - Wait for an atomic_t to become 0
1175 * @val: The atomic value being waited on, a kernel virtual address
1176 * @action: the function used to sleep, which may take special actions
1177 * @mode: the task state to sleep in
1178 *
1179 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1180 * the purpose of getting a waitqueue, but we set the key to a bit number
1181 * outside of the target 'word'.
1182 */
1183static inline
1184int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1185{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001186 might_sleep();
David Howellscb655372013-05-10 19:50:26 +01001187 if (atomic_read(val) == 0)
1188 return 0;
1189 return out_of_line_wait_on_atomic_t(val, action, mode);
1190}
Ingo Molnarfb869b62013-10-04 10:24:49 +02001191
1192#endif /* _LINUX_WAIT_H */