blob: 77fdea851d8b4013a9f060b19eacc3f2ad67aa5a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
Ingo Molnarfb869b62013-10-04 10:24:49 +02003/*
4 * Linux wait queue related types and methods
5 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +01009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <asm/current.h>
David Howells607ca462012-10-13 10:46:48 +010011#include <uapi/linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Ingo Molnarac6424b2017-06-20 12:06:13 +020013typedef struct wait_queue_entry wait_queue_entry_t;
Ingo Molnar50816c42017-03-05 10:33:16 +010014
15typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
16int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Ingo Molnarac6424b2017-06-20 12:06:13 +020018/* wait_queue_entry::flags */
Peter Zijlstra61ada522014-09-24 10:18:47 +020019#define WQ_FLAG_EXCLUSIVE 0x01
20#define WQ_FLAG_WOKEN 0x02
21
Ingo Molnarac6424b2017-06-20 12:06:13 +020022/*
23 * A single wait-queue entry structure:
24 */
25struct wait_queue_entry {
Ingo Molnarfb869b62013-10-04 10:24:49 +020026 unsigned int flags;
Ingo Molnarfb869b62013-10-04 10:24:49 +020027 void *private;
28 wait_queue_func_t func;
29 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070030};
31
32struct wait_bit_key {
Ingo Molnarfb869b62013-10-04 10:24:49 +020033 void *flags;
34 int bit_nr;
35#define WAIT_ATOMIC_T_BIT_NR -1
NeilBrowncbbce822014-09-25 13:55:19 +100036 unsigned long timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037};
38
39struct wait_bit_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020040 struct wait_bit_key key;
Ingo Molnar50816c42017-03-05 10:33:16 +010041 struct wait_queue_entry wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042};
43
44struct __wait_queue_head {
Ingo Molnarfb869b62013-10-04 10:24:49 +020045 spinlock_t lock;
46 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047};
48typedef struct __wait_queue_head wait_queue_head_t;
49
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080050struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52/*
53 * Macros for declaration and initialisaton of the datatypes
54 */
55
56#define __WAITQUEUE_INITIALIZER(name, tsk) { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070057 .private = tsk, \
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 .func = default_wake_function, \
59 .task_list = { NULL, NULL } }
60
61#define DECLARE_WAITQUEUE(name, tsk) \
Ingo Molnar50816c42017-03-05 10:33:16 +010062 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
Ingo Molnare4d91912006-07-03 00:24:34 -070065 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 .task_list = { &(name).task_list, &(name).task_list } }
67
68#define DECLARE_WAIT_QUEUE_HEAD(name) \
69 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
70
71#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
72 { .flags = word, .bit_nr = bit, }
73
David Howellscb655372013-05-10 19:50:26 +010074#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
75 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
76
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010077extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
Peter Zijlstra2fc39112009-08-10 12:33:05 +010078
79#define init_waitqueue_head(q) \
80 do { \
81 static struct lock_class_key __key; \
82 \
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010083 __init_waitqueue_head((q), #q, &__key); \
Peter Zijlstra2fc39112009-08-10 12:33:05 +010084 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080086#ifdef CONFIG_LOCKDEP
87# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
88 ({ init_waitqueue_head(&name); name; })
89# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
90 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
91#else
92# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
93#endif
94
Ingo Molnar50816c42017-03-05 10:33:16 +010095static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
Ingo Molnar50816c42017-03-05 10:33:16 +010097 wq_entry->flags = 0;
98 wq_entry->private = p;
99 wq_entry->func = default_wake_function;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101
Ingo Molnarfb869b62013-10-04 10:24:49 +0200102static inline void
Ingo Molnar50816c42017-03-05 10:33:16 +0100103init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Ingo Molnar50816c42017-03-05 10:33:16 +0100105 wq_entry->flags = 0;
106 wq_entry->private = NULL;
107 wq_entry->func = func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108}
109
Peter Zijlstra69e51e922015-10-23 14:32:34 +0200110/**
111 * waitqueue_active -- locklessly test for waiters on the queue
112 * @q: the waitqueue to test for waiters
113 *
114 * returns true if the wait list is not empty
115 *
116 * NOTE: this function is lockless and requires care, incorrect usage _will_
117 * lead to sporadic and non-obvious failure.
118 *
119 * Use either while holding wait_queue_head_t::lock or when used for wakeups
120 * with an extra smp_mb() like:
121 *
122 * CPU0 - waker CPU1 - waiter
123 *
124 * for (;;) {
125 * @cond = true; prepare_to_wait(&wq, &wait, state);
126 * smp_mb(); // smp_mb() from set_current_state()
127 * if (waitqueue_active(wq)) if (@cond)
128 * wake_up(wq); break;
129 * schedule();
130 * }
131 * finish_wait(&wq, &wait);
132 *
133 * Because without the explicit smp_mb() it's possible for the
134 * waitqueue_active() load to get hoisted over the @cond store such that we'll
135 * observe an empty wait list while the waiter might not observe @cond.
136 *
137 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
138 * which (when the lock is uncontended) are of roughly equal cost.
139 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static inline int waitqueue_active(wait_queue_head_t *q)
141{
142 return !list_empty(&q->task_list);
143}
144
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800145/**
146 * wq_has_sleeper - check if there are any waiting processes
147 * @wq: wait queue head
148 *
149 * Returns true if wq has waiting processes
150 *
151 * Please refer to the comment for waitqueue_active.
152 */
153static inline bool wq_has_sleeper(wait_queue_head_t *wq)
154{
155 /*
156 * We need to be sure we are in sync with the
157 * add_wait_queue modifications to the wait queue.
158 *
159 * This memory barrier should be paired with one on the
160 * waiting side.
161 */
162 smp_mb();
163 return waitqueue_active(wq);
164}
165
Ingo Molnar50816c42017-03-05 10:33:16 +0100166extern void add_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
167extern void add_wait_queue_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
168extern void remove_wait_queue(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Ingo Molnar50816c42017-03-05 10:33:16 +0100170static inline void __add_wait_queue(wait_queue_head_t *head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
Ingo Molnar50816c42017-03-05 10:33:16 +0100172 list_add(&wq_entry->task_list, &head->task_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173}
174
175/*
176 * Used for wake-one threads:
177 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200178static inline void
Ingo Molnar50816c42017-03-05 10:33:16 +0100179__add_wait_queue_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800180{
Ingo Molnar50816c42017-03-05 10:33:16 +0100181 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
182 __add_wait_queue(q, wq_entry);
Changli Gaoa93d2f12010-05-07 14:33:26 +0800183}
184
Ingo Molnar50816c42017-03-05 10:33:16 +0100185static inline void __add_wait_queue_entry_tail(wait_queue_head_t *head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
Ingo Molnar50816c42017-03-05 10:33:16 +0100187 list_add_tail(&wq_entry->task_list, &head->task_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Ingo Molnarfb869b62013-10-04 10:24:49 +0200190static inline void
Ingo Molnar50816c42017-03-05 10:33:16 +0100191__add_wait_queue_entry_tail_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800192{
Ingo Molnar50816c42017-03-05 10:33:16 +0100193 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
194 __add_wait_queue_entry_tail(q, wq_entry);
Changli Gaoa93d2f12010-05-07 14:33:26 +0800195}
196
Ingo Molnarfb869b62013-10-04 10:24:49 +0200197static inline void
Ingo Molnar50816c42017-03-05 10:33:16 +0100198__remove_wait_queue(wait_queue_head_t *head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
Ingo Molnar50816c42017-03-05 10:33:16 +0100200 list_del(&wq_entry->task_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201}
202
Peter Zijlstradfd01f02015-12-13 22:11:16 +0100203typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800204void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Andrea Arcangeliac5be6b2015-09-22 14:58:49 -0700205void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200206void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Thomas Gleixner63b20012011-12-01 00:04:00 +0100207void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700208void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800209void __wake_up_bit(wait_queue_head_t *, void *, int);
NeilBrownc1221322014-07-07 15:16:04 +1000210int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
211int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800212void wake_up_bit(void *, int);
David Howellscb655372013-05-10 19:50:26 +0100213void wake_up_atomic_t(atomic_t *);
NeilBrownc1221322014-07-07 15:16:04 +1000214int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
NeilBrowncbbce822014-09-25 13:55:19 +1000215int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
NeilBrownc1221322014-07-07 15:16:04 +1000216int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
David Howellscb655372013-05-10 19:50:26 +0100217int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800218wait_queue_head_t *bit_waitqueue(void *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500220#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
221#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
222#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
Thomas Gleixner63b20012011-12-01 00:04:00 +0100223#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
224#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
227#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
228#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500229#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800231/*
Davide Libenzic0da3772009-03-31 15:24:20 -0700232 * Wakeup macros to be used to report events to the targets.
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800233 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200234#define wake_up_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700235 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200236#define wake_up_locked_poll(x, m) \
Andrea Arcangeliac5be6b2015-09-22 14:58:49 -0700237 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200238#define wake_up_interruptible_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700239 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
240#define wake_up_interruptible_sync_poll(x, m) \
241 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800242
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200243#define ___wait_cond_timeout(condition) \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200244({ \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200245 bool __cond = (condition); \
246 if (__cond && !__ret) \
247 __ret = 1; \
248 __cond || !__ret; \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200249})
250
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200251#define ___wait_is_interruptible(state) \
252 (!__builtin_constant_p(state) || \
253 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200254
Ingo Molnar50816c42017-03-05 10:33:16 +0100255extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
Oleg Nesterov0176bea2016-09-06 16:00:55 +0200256
Peter Zijlstra8b322012014-04-18 15:07:17 -0700257/*
258 * The below macro ___wait_event() has an explicit shadow of the __ret
259 * variable when used from the wait_event_*() macros.
260 *
261 * This is so that both can use the ___wait_cond_timeout() construct
262 * to wrap the condition.
263 *
264 * The type inconsistency of the wait_event_*() __ret variable is also
265 * on purpose; we use long where we can return timeout values and int
266 * otherwise.
267 */
268
Peter Zijlstra41a14312013-10-02 11:22:21 +0200269#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200270({ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200271 __label__ __out; \
Ingo Molnar50816c42017-03-05 10:33:16 +0100272 struct wait_queue_entry __wq_entry; \
Peter Zijlstra8b322012014-04-18 15:07:17 -0700273 long __ret = ret; /* explicit shadow */ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200274 \
Ingo Molnar50816c42017-03-05 10:33:16 +0100275 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);\
Peter Zijlstra41a14312013-10-02 11:22:21 +0200276 for (;;) { \
Ingo Molnar50816c42017-03-05 10:33:16 +0100277 long __int = prepare_to_wait_event(&wq, &__wq_entry, state);\
Peter Zijlstra41a14312013-10-02 11:22:21 +0200278 \
279 if (condition) \
280 break; \
281 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200282 if (___wait_is_interruptible(state) && __int) { \
283 __ret = __int; \
Oleg Nesterovb1ea06a2016-09-08 18:48:15 +0200284 goto __out; \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200285 } \
286 \
287 cmd; \
288 } \
Ingo Molnar50816c42017-03-05 10:33:16 +0100289 finish_wait(&wq, &__wq_entry); \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200290__out: __ret; \
291})
Peter Zijlstra41a14312013-10-02 11:22:21 +0200292
Ingo Molnarfb869b62013-10-04 10:24:49 +0200293#define __wait_event(wq, condition) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200294 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
295 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297/**
298 * wait_event - sleep until a condition gets true
299 * @wq: the waitqueue to wait on
300 * @condition: a C expression for the event to wait for
301 *
302 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
303 * @condition evaluates to true. The @condition is checked each time
304 * the waitqueue @wq is woken up.
305 *
306 * wake_up() has to be called after changing any variable that could
307 * change the result of the wait condition.
308 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200309#define wait_event(wq, condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310do { \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200311 might_sleep(); \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200312 if (condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 break; \
314 __wait_event(wq, condition); \
315} while (0)
316
Peter Zijlstra2c561242015-02-03 12:55:31 +0100317#define __io_wait_event(wq, condition) \
318 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
319 io_schedule())
320
321/*
322 * io_wait_event() -- like wait_event() but with io_schedule()
323 */
324#define io_wait_event(wq, condition) \
325do { \
326 might_sleep(); \
327 if (condition) \
328 break; \
329 __io_wait_event(wq, condition); \
330} while (0)
331
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100332#define __wait_event_freezable(wq, condition) \
333 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
334 schedule(); try_to_freeze())
335
336/**
Stafford Hornef4bcfa12016-02-23 22:39:28 +0900337 * wait_event_freezable - sleep (or freeze) until a condition gets true
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100338 * @wq: the waitqueue to wait on
339 * @condition: a C expression for the event to wait for
340 *
341 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
342 * to system load) until the @condition evaluates to true. The
343 * @condition is checked each time the waitqueue @wq is woken up.
344 *
345 * wake_up() has to be called after changing any variable that could
346 * change the result of the wait condition.
347 */
348#define wait_event_freezable(wq, condition) \
349({ \
350 int __ret = 0; \
351 might_sleep(); \
352 if (!(condition)) \
353 __ret = __wait_event_freezable(wq, condition); \
354 __ret; \
355})
356
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200357#define __wait_event_timeout(wq, condition, timeout) \
358 ___wait_event(wq, ___wait_cond_timeout(condition), \
359 TASK_UNINTERRUPTIBLE, 0, timeout, \
360 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362/**
363 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
364 * @wq: the waitqueue to wait on
365 * @condition: a C expression for the event to wait for
366 * @timeout: timeout, in jiffies
367 *
368 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
369 * @condition evaluates to true. The @condition is checked each time
370 * the waitqueue @wq is woken up.
371 *
372 * wake_up() has to be called after changing any variable that could
373 * change the result of the wait condition.
374 *
Scot Doyle6b44f512014-08-24 17:12:27 +0000375 * Returns:
376 * 0 if the @condition evaluated to %false after the @timeout elapsed,
377 * 1 if the @condition evaluated to %true after the @timeout elapsed,
378 * or the remaining jiffies (at least 1) if the @condition evaluated
379 * to %true before the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 */
381#define wait_event_timeout(wq, condition, timeout) \
382({ \
383 long __ret = timeout; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200384 might_sleep(); \
Oleg Nesterov89229152013-10-07 20:31:06 +0200385 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200386 __ret = __wait_event_timeout(wq, condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 __ret; \
388})
389
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100390#define __wait_event_freezable_timeout(wq, condition, timeout) \
391 ___wait_event(wq, ___wait_cond_timeout(condition), \
392 TASK_INTERRUPTIBLE, 0, timeout, \
393 __ret = schedule_timeout(__ret); try_to_freeze())
394
395/*
396 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
397 * increasing load and is freezable.
398 */
399#define wait_event_freezable_timeout(wq, condition, timeout) \
400({ \
401 long __ret = timeout; \
402 might_sleep(); \
403 if (!___wait_cond_timeout(condition)) \
404 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
405 __ret; \
406})
407
Yuanhan Liu9f3520c2015-05-08 18:19:05 +1000408#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
409 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
410 cmd1; schedule(); cmd2)
411/*
412 * Just like wait_event_cmd(), except it sets exclusive flag
413 */
414#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
415do { \
416 if (condition) \
417 break; \
418 __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
419} while (0)
420
Shaohua Li82e06c82013-11-14 15:16:16 +1100421#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
422 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
423 cmd1; schedule(); cmd2)
424
425/**
426 * wait_event_cmd - sleep until a condition gets true
427 * @wq: the waitqueue to wait on
428 * @condition: a C expression for the event to wait for
Masanari Iidaf434f7a2014-01-22 01:22:06 +0900429 * @cmd1: the command will be executed before sleep
430 * @cmd2: the command will be executed after sleep
Shaohua Li82e06c82013-11-14 15:16:16 +1100431 *
432 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
433 * @condition evaluates to true. The @condition is checked each time
434 * the waitqueue @wq is woken up.
435 *
436 * wake_up() has to be called after changing any variable that could
437 * change the result of the wait condition.
438 */
439#define wait_event_cmd(wq, condition, cmd1, cmd2) \
440do { \
441 if (condition) \
442 break; \
443 __wait_event_cmd(wq, condition, cmd1, cmd2); \
444} while (0)
445
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200446#define __wait_event_interruptible(wq, condition) \
447 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstraf13f4c42013-10-02 11:22:24 +0200448 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
450/**
451 * wait_event_interruptible - sleep until a condition gets true
452 * @wq: the waitqueue to wait on
453 * @condition: a C expression for the event to wait for
454 *
455 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
456 * @condition evaluates to true or a signal is received.
457 * The @condition is checked each time the waitqueue @wq is woken up.
458 *
459 * wake_up() has to be called after changing any variable that could
460 * change the result of the wait condition.
461 *
462 * The function will return -ERESTARTSYS if it was interrupted by a
463 * signal and 0 if @condition evaluated to true.
464 */
465#define wait_event_interruptible(wq, condition) \
466({ \
467 int __ret = 0; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200468 might_sleep(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200470 __ret = __wait_event_interruptible(wq, condition); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 __ret; \
472})
473
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200474#define __wait_event_interruptible_timeout(wq, condition, timeout) \
475 ___wait_event(wq, ___wait_cond_timeout(condition), \
476 TASK_INTERRUPTIBLE, 0, timeout, \
477 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479/**
480 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
481 * @wq: the waitqueue to wait on
482 * @condition: a C expression for the event to wait for
483 * @timeout: timeout, in jiffies
484 *
485 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
486 * @condition evaluates to true or a signal is received.
487 * The @condition is checked each time the waitqueue @wq is woken up.
488 *
489 * wake_up() has to be called after changing any variable that could
490 * change the result of the wait condition.
491 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700492 * Returns:
Scot Doyle6b44f512014-08-24 17:12:27 +0000493 * 0 if the @condition evaluated to %false after the @timeout elapsed,
494 * 1 if the @condition evaluated to %true after the @timeout elapsed,
495 * the remaining jiffies (at least 1) if the @condition evaluated
496 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
497 * interrupted by a signal.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 */
499#define wait_event_interruptible_timeout(wq, condition, timeout) \
500({ \
501 long __ret = timeout; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200502 might_sleep(); \
Oleg Nesterov89229152013-10-07 20:31:06 +0200503 if (!___wait_cond_timeout(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200504 __ret = __wait_event_interruptible_timeout(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200505 condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 __ret; \
507})
508
Kent Overstreet774a08b2013-05-07 16:18:43 -0700509#define __wait_event_hrtimeout(wq, condition, timeout, state) \
510({ \
511 int __ret = 0; \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700512 struct hrtimer_sleeper __t; \
513 \
514 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
515 HRTIMER_MODE_REL); \
516 hrtimer_init_sleeper(&__t, current); \
Thomas Gleixner2456e852016-12-25 11:38:40 +0100517 if ((timeout) != KTIME_MAX) \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700518 hrtimer_start_range_ns(&__t.timer, timeout, \
519 current->timer_slack_ns, \
520 HRTIMER_MODE_REL); \
521 \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200522 __ret = ___wait_event(wq, condition, state, 0, 0, \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700523 if (!__t.task) { \
524 __ret = -ETIME; \
525 break; \
526 } \
Peter Zijlstraebdc1952013-10-02 11:22:32 +0200527 schedule()); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700528 \
529 hrtimer_cancel(&__t.timer); \
530 destroy_hrtimer_on_stack(&__t.timer); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700531 __ret; \
532})
533
534/**
535 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
536 * @wq: the waitqueue to wait on
537 * @condition: a C expression for the event to wait for
538 * @timeout: timeout, as a ktime_t
539 *
540 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
541 * @condition evaluates to true or a signal is received.
542 * The @condition is checked each time the waitqueue @wq is woken up.
543 *
544 * wake_up() has to be called after changing any variable that could
545 * change the result of the wait condition.
546 *
547 * The function returns 0 if @condition became true, or -ETIME if the timeout
548 * elapsed.
549 */
550#define wait_event_hrtimeout(wq, condition, timeout) \
551({ \
552 int __ret = 0; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200553 might_sleep(); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700554 if (!(condition)) \
555 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
556 TASK_UNINTERRUPTIBLE); \
557 __ret; \
558})
559
560/**
561 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
562 * @wq: the waitqueue to wait on
563 * @condition: a C expression for the event to wait for
564 * @timeout: timeout, as a ktime_t
565 *
566 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
567 * @condition evaluates to true or a signal is received.
568 * The @condition is checked each time the waitqueue @wq is woken up.
569 *
570 * wake_up() has to be called after changing any variable that could
571 * change the result of the wait condition.
572 *
573 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
574 * interrupted by a signal, or -ETIME if the timeout elapsed.
575 */
576#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
577({ \
578 long __ret = 0; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200579 might_sleep(); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700580 if (!(condition)) \
581 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
582 TASK_INTERRUPTIBLE); \
583 __ret; \
584})
585
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200586#define __wait_event_interruptible_exclusive(wq, condition) \
587 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
Peter Zijlstra48c25212013-10-02 11:22:26 +0200588 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590#define wait_event_interruptible_exclusive(wq, condition) \
591({ \
592 int __ret = 0; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200593 might_sleep(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200595 __ret = __wait_event_interruptible_exclusive(wq, condition);\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 __ret; \
597})
598
Al Viro6a0fb302016-07-19 03:04:34 -0400599#define __wait_event_killable_exclusive(wq, condition) \
600 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
601 schedule())
602
603#define wait_event_killable_exclusive(wq, condition) \
604({ \
605 int __ret = 0; \
606 might_sleep(); \
607 if (!(condition)) \
608 __ret = __wait_event_killable_exclusive(wq, condition); \
609 __ret; \
610})
611
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200612
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100613#define __wait_event_freezable_exclusive(wq, condition) \
614 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
615 schedule(); try_to_freeze())
616
617#define wait_event_freezable_exclusive(wq, condition) \
618({ \
619 int __ret = 0; \
620 might_sleep(); \
621 if (!(condition)) \
622 __ret = __wait_event_freezable_exclusive(wq, condition);\
623 __ret; \
624})
625
Ingo Molnarac6424b2017-06-20 12:06:13 +0200626extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
627extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100628
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800629#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200630({ \
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800631 int __ret; \
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200632 DEFINE_WAIT(__wait); \
633 if (exclusive) \
634 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
635 do { \
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800636 __ret = fn(&(wq), &__wait); \
637 if (__ret) \
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200638 break; \
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200639 } while (!(condition)); \
640 __remove_wait_queue(&(wq), &__wait); \
641 __set_current_state(TASK_RUNNING); \
642 __ret; \
643})
644
645
646/**
647 * wait_event_interruptible_locked - sleep until a condition gets true
648 * @wq: the waitqueue to wait on
649 * @condition: a C expression for the event to wait for
650 *
651 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
652 * @condition evaluates to true or a signal is received.
653 * The @condition is checked each time the waitqueue @wq is woken up.
654 *
655 * It must be called with wq.lock being held. This spinlock is
656 * unlocked while sleeping but @condition testing is done while lock
657 * is held and when this macro exits the lock is held.
658 *
659 * The lock is locked/unlocked using spin_lock()/spin_unlock()
660 * functions which must match the way they are locked/unlocked outside
661 * of this macro.
662 *
663 * wake_up_locked() has to be called after changing any variable that could
664 * change the result of the wait condition.
665 *
666 * The function will return -ERESTARTSYS if it was interrupted by a
667 * signal and 0 if @condition evaluated to true.
668 */
669#define wait_event_interruptible_locked(wq, condition) \
670 ((condition) \
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800671 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200672
673/**
674 * wait_event_interruptible_locked_irq - sleep until a condition gets true
675 * @wq: the waitqueue to wait on
676 * @condition: a C expression for the event to wait for
677 *
678 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
679 * @condition evaluates to true or a signal is received.
680 * The @condition is checked each time the waitqueue @wq is woken up.
681 *
682 * It must be called with wq.lock being held. This spinlock is
683 * unlocked while sleeping but @condition testing is done while lock
684 * is held and when this macro exits the lock is held.
685 *
686 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
687 * functions which must match the way they are locked/unlocked outside
688 * of this macro.
689 *
690 * wake_up_locked() has to be called after changing any variable that could
691 * change the result of the wait condition.
692 *
693 * The function will return -ERESTARTSYS if it was interrupted by a
694 * signal and 0 if @condition evaluated to true.
695 */
696#define wait_event_interruptible_locked_irq(wq, condition) \
697 ((condition) \
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800698 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200699
700/**
701 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
702 * @wq: the waitqueue to wait on
703 * @condition: a C expression for the event to wait for
704 *
705 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
706 * @condition evaluates to true or a signal is received.
707 * The @condition is checked each time the waitqueue @wq is woken up.
708 *
709 * It must be called with wq.lock being held. This spinlock is
710 * unlocked while sleeping but @condition testing is done while lock
711 * is held and when this macro exits the lock is held.
712 *
713 * The lock is locked/unlocked using spin_lock()/spin_unlock()
714 * functions which must match the way they are locked/unlocked outside
715 * of this macro.
716 *
717 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
718 * set thus when other process waits process on the list if this
719 * process is awaken further processes are not considered.
720 *
721 * wake_up_locked() has to be called after changing any variable that could
722 * change the result of the wait condition.
723 *
724 * The function will return -ERESTARTSYS if it was interrupted by a
725 * signal and 0 if @condition evaluated to true.
726 */
727#define wait_event_interruptible_exclusive_locked(wq, condition) \
728 ((condition) \
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800729 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200730
731/**
732 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
733 * @wq: the waitqueue to wait on
734 * @condition: a C expression for the event to wait for
735 *
736 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
737 * @condition evaluates to true or a signal is received.
738 * The @condition is checked each time the waitqueue @wq is woken up.
739 *
740 * It must be called with wq.lock being held. This spinlock is
741 * unlocked while sleeping but @condition testing is done while lock
742 * is held and when this macro exits the lock is held.
743 *
744 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
745 * functions which must match the way they are locked/unlocked outside
746 * of this macro.
747 *
748 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
749 * set thus when other process waits process on the list if this
750 * process is awaken further processes are not considered.
751 *
752 * wake_up_locked() has to be called after changing any variable that could
753 * change the result of the wait condition.
754 *
755 * The function will return -ERESTARTSYS if it was interrupted by a
756 * signal and 0 if @condition evaluated to true.
757 */
758#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
759 ((condition) \
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800760 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200761
762
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200763#define __wait_event_killable(wq, condition) \
764 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500765
766/**
767 * wait_event_killable - sleep until a condition gets true
768 * @wq: the waitqueue to wait on
769 * @condition: a C expression for the event to wait for
770 *
771 * The process is put to sleep (TASK_KILLABLE) until the
772 * @condition evaluates to true or a signal is received.
773 * The @condition is checked each time the waitqueue @wq is woken up.
774 *
775 * wake_up() has to be called after changing any variable that could
776 * change the result of the wait condition.
777 *
778 * The function will return -ERESTARTSYS if it was interrupted by a
779 * signal and 0 if @condition evaluated to true.
780 */
781#define wait_event_killable(wq, condition) \
782({ \
783 int __ret = 0; \
Peter Zijlstrae22b8862014-09-24 10:18:48 +0200784 might_sleep(); \
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500785 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200786 __ret = __wait_event_killable(wq, condition); \
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500787 __ret; \
788})
789
Lukas Czernereed8c022012-11-30 11:42:40 +0100790
791#define __wait_event_lock_irq(wq, condition, lock, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200792 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
793 spin_unlock_irq(&lock); \
794 cmd; \
795 schedule(); \
796 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100797
798/**
799 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
800 * condition is checked under the lock. This
801 * is expected to be called with the lock
802 * taken.
803 * @wq: the waitqueue to wait on
804 * @condition: a C expression for the event to wait for
805 * @lock: a locked spinlock_t, which will be released before cmd
806 * and schedule() and reacquired afterwards.
807 * @cmd: a command which is invoked outside the critical section before
808 * sleep
809 *
810 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
811 * @condition evaluates to true. The @condition is checked each time
812 * the waitqueue @wq is woken up.
813 *
814 * wake_up() has to be called after changing any variable that could
815 * change the result of the wait condition.
816 *
817 * This is supposed to be called while holding the lock. The lock is
818 * dropped before invoking the cmd and going to sleep and is reacquired
819 * afterwards.
820 */
821#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
822do { \
823 if (condition) \
824 break; \
825 __wait_event_lock_irq(wq, condition, lock, cmd); \
826} while (0)
827
828/**
829 * wait_event_lock_irq - sleep until a condition gets true. The
830 * condition is checked under the lock. This
831 * is expected to be called with the lock
832 * taken.
833 * @wq: the waitqueue to wait on
834 * @condition: a C expression for the event to wait for
835 * @lock: a locked spinlock_t, which will be released before schedule()
836 * and reacquired afterwards.
837 *
838 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
839 * @condition evaluates to true. The @condition is checked each time
840 * the waitqueue @wq is woken up.
841 *
842 * wake_up() has to be called after changing any variable that could
843 * change the result of the wait condition.
844 *
845 * This is supposed to be called while holding the lock. The lock is
846 * dropped before going to sleep and is reacquired afterwards.
847 */
848#define wait_event_lock_irq(wq, condition, lock) \
849do { \
850 if (condition) \
851 break; \
852 __wait_event_lock_irq(wq, condition, lock, ); \
853} while (0)
854
855
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200856#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200857 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200858 spin_unlock_irq(&lock); \
859 cmd; \
860 schedule(); \
Peter Zijlstra8fbd88f2013-10-02 11:22:28 +0200861 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100862
863/**
864 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
865 * The condition is checked under the lock. This is expected to
866 * be called with the lock taken.
867 * @wq: the waitqueue to wait on
868 * @condition: a C expression for the event to wait for
869 * @lock: a locked spinlock_t, which will be released before cmd and
870 * schedule() and reacquired afterwards.
871 * @cmd: a command which is invoked outside the critical section before
872 * sleep
873 *
874 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
875 * @condition evaluates to true or a signal is received. The @condition is
876 * checked each time the waitqueue @wq is woken up.
877 *
878 * wake_up() has to be called after changing any variable that could
879 * change the result of the wait condition.
880 *
881 * This is supposed to be called while holding the lock. The lock is
882 * dropped before invoking the cmd and going to sleep and is reacquired
883 * afterwards.
884 *
885 * The macro will return -ERESTARTSYS if it was interrupted by a signal
886 * and 0 if @condition evaluated to true.
887 */
888#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
889({ \
890 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100891 if (!(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200892 __ret = __wait_event_interruptible_lock_irq(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200893 condition, lock, cmd); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100894 __ret; \
895})
896
897/**
898 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
899 * The condition is checked under the lock. This is expected
900 * to be called with the lock taken.
901 * @wq: the waitqueue to wait on
902 * @condition: a C expression for the event to wait for
903 * @lock: a locked spinlock_t, which will be released before schedule()
904 * and reacquired afterwards.
905 *
906 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
907 * @condition evaluates to true or signal is received. The @condition is
908 * checked each time the waitqueue @wq is woken up.
909 *
910 * wake_up() has to be called after changing any variable that could
911 * change the result of the wait condition.
912 *
913 * This is supposed to be called while holding the lock. The lock is
914 * dropped before going to sleep and is reacquired afterwards.
915 *
916 * The macro will return -ERESTARTSYS if it was interrupted by a signal
917 * and 0 if @condition evaluated to true.
918 */
919#define wait_event_interruptible_lock_irq(wq, condition, lock) \
920({ \
921 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100922 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200923 __ret = __wait_event_interruptible_lock_irq(wq, \
Thierry Reding92ec1182013-10-23 13:40:55 +0200924 condition, lock,); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100925 __ret; \
926})
927
Ingo Molnarfb869b62013-10-04 10:24:49 +0200928#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
929 lock, timeout) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200930 ___wait_event(wq, ___wait_cond_timeout(condition), \
Heiko Carstens7d716452013-10-31 12:48:14 +0100931 TASK_INTERRUPTIBLE, 0, timeout, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200932 spin_unlock_irq(&lock); \
933 __ret = schedule_timeout(__ret); \
Peter Zijlstraa1dc6852013-10-02 11:22:29 +0200934 spin_lock_irq(&lock));
Martin Peschked79ff142013-08-22 17:45:36 +0200935
936/**
Ingo Molnarfb869b62013-10-04 10:24:49 +0200937 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
938 * true or a timeout elapses. The condition is checked under
939 * the lock. This is expected to be called with the lock taken.
Martin Peschked79ff142013-08-22 17:45:36 +0200940 * @wq: the waitqueue to wait on
941 * @condition: a C expression for the event to wait for
942 * @lock: a locked spinlock_t, which will be released before schedule()
943 * and reacquired afterwards.
944 * @timeout: timeout, in jiffies
945 *
946 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
947 * @condition evaluates to true or signal is received. The @condition is
948 * checked each time the waitqueue @wq is woken up.
949 *
950 * wake_up() has to be called after changing any variable that could
951 * change the result of the wait condition.
952 *
953 * This is supposed to be called while holding the lock. The lock is
954 * dropped before going to sleep and is reacquired afterwards.
955 *
956 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
957 * was interrupted by a signal, and the remaining jiffies otherwise
958 * if the condition evaluated to true before the timeout elapsed.
959 */
960#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
961 timeout) \
962({ \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200963 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200964 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200965 __ret = __wait_event_interruptible_lock_irq_timeout( \
966 wq, condition, lock, timeout); \
Martin Peschked79ff142013-08-22 17:45:36 +0200967 __ret; \
968})
969
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970/*
971 * Waitqueues which are removed from the waitqueue_head at wakeup time
972 */
Ingo Molnar50816c42017-03-05 10:33:16 +0100973void prepare_to_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state);
974void prepare_to_wait_exclusive(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state);
975long prepare_to_wait_event(wait_queue_head_t *q, struct wait_queue_entry *wq_entry, int state);
976void finish_wait(wait_queue_head_t *q, struct wait_queue_entry *wq_entry);
977long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
978int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
979int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
980int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Eric Dumazetbf368e42009-04-28 02:24:21 -0700982#define DEFINE_WAIT_FUNC(name, function) \
Ingo Molnar50816c42017-03-05 10:33:16 +0100983 struct wait_queue_entry name = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700984 .private = current, \
Eric Dumazetbf368e42009-04-28 02:24:21 -0700985 .func = function, \
blaisorblade@yahoo.it7e43c842005-05-25 01:31:42 +0200986 .task_list = LIST_HEAD_INIT((name).task_list), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 }
988
Eric Dumazetbf368e42009-04-28 02:24:21 -0700989#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
990
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991#define DEFINE_WAIT_BIT(name, word, bit) \
992 struct wait_bit_queue name = { \
993 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
994 .wait = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700995 .private = current, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 .func = wake_bit_function, \
997 .task_list = \
998 LIST_HEAD_INIT((name).wait.task_list), \
999 }, \
1000 }
1001
1002#define init_wait(wait) \
1003 do { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -07001004 (wait)->private = current; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 (wait)->func = autoremove_wake_function; \
1006 INIT_LIST_HEAD(&(wait)->task_list); \
Evgeny Kuznetsov231d0ae2010-10-05 12:47:57 +04001007 (wait)->flags = 0; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 } while (0)
1009
NeilBrown74316202014-07-07 15:16:04 +10001010
Peter Zijlstradfd01f02015-12-13 22:11:16 +01001011extern int bit_wait(struct wait_bit_key *, int);
1012extern int bit_wait_io(struct wait_bit_key *, int);
1013extern int bit_wait_timeout(struct wait_bit_key *, int);
1014extern int bit_wait_io_timeout(struct wait_bit_key *, int);
NeilBrown74316202014-07-07 15:16:04 +10001015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016/**
1017 * wait_on_bit - wait for a bit to be cleared
1018 * @word: the word being waited on, a kernel virtual address
1019 * @bit: the bit of the word being waited on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 * @mode: the task state to sleep in
1021 *
1022 * There is a standard hashed waitqueue table for generic use. This
1023 * is the part of the hashtable's accessor API that waits on a bit.
1024 * For instance, if one were to have waiters on a bitflag, one would
1025 * call wait_on_bit() in threads waiting for the bit to clear.
1026 * One uses wait_on_bit() where one is waiting for the bit to clear,
1027 * but has no intention of setting it.
NeilBrown74316202014-07-07 15:16:04 +10001028 * Returned value will be zero if the bit was cleared, or non-zero
1029 * if the process received a signal and the mode permitted wakeup
1030 * on that signal.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 */
Ingo Molnarfb869b62013-10-04 10:24:49 +02001032static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001033wait_on_bit(unsigned long *word, int bit, unsigned mode)
NeilBrown74316202014-07-07 15:16:04 +10001034{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001035 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +10001036 if (!test_bit(bit, word))
1037 return 0;
1038 return out_of_line_wait_on_bit(word, bit,
1039 bit_wait,
1040 mode);
1041}
1042
1043/**
1044 * wait_on_bit_io - wait for a bit to be cleared
1045 * @word: the word being waited on, a kernel virtual address
1046 * @bit: the bit of the word being waited on
1047 * @mode: the task state to sleep in
1048 *
1049 * Use the standard hashed waitqueue table to wait for a bit
1050 * to be cleared. This is similar to wait_on_bit(), but calls
1051 * io_schedule() instead of schedule() for the actual waiting.
1052 *
1053 * Returned value will be zero if the bit was cleared, or non-zero
1054 * if the process received a signal and the mode permitted wakeup
1055 * on that signal.
1056 */
1057static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001058wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
NeilBrown74316202014-07-07 15:16:04 +10001059{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001060 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +10001061 if (!test_bit(bit, word))
1062 return 0;
1063 return out_of_line_wait_on_bit(word, bit,
1064 bit_wait_io,
1065 mode);
1066}
1067
1068/**
Johan Hedberg44fc0e52015-01-30 13:14:36 +02001069 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1070 * @word: the word being waited on, a kernel virtual address
1071 * @bit: the bit of the word being waited on
1072 * @mode: the task state to sleep in
1073 * @timeout: timeout, in jiffies
1074 *
1075 * Use the standard hashed waitqueue table to wait for a bit
1076 * to be cleared. This is similar to wait_on_bit(), except also takes a
1077 * timeout parameter.
1078 *
1079 * Returned value will be zero if the bit was cleared before the
1080 * @timeout elapsed, or non-zero if the @timeout elapsed or process
1081 * received a signal and the mode permitted wakeup on that signal.
1082 */
1083static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001084wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1085 unsigned long timeout)
Johan Hedberg44fc0e52015-01-30 13:14:36 +02001086{
1087 might_sleep();
1088 if (!test_bit(bit, word))
1089 return 0;
1090 return out_of_line_wait_on_bit_timeout(word, bit,
1091 bit_wait_timeout,
1092 mode, timeout);
1093}
1094
1095/**
NeilBrown74316202014-07-07 15:16:04 +10001096 * wait_on_bit_action - wait for a bit to be cleared
1097 * @word: the word being waited on, a kernel virtual address
1098 * @bit: the bit of the word being waited on
1099 * @action: the function used to sleep, which may take special actions
1100 * @mode: the task state to sleep in
1101 *
1102 * Use the standard hashed waitqueue table to wait for a bit
1103 * to be cleared, and allow the waiting action to be specified.
1104 * This is like wait_on_bit() but allows fine control of how the waiting
1105 * is done.
1106 *
1107 * Returned value will be zero if the bit was cleared, or non-zero
1108 * if the process received a signal and the mode permitted wakeup
1109 * on that signal.
1110 */
1111static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001112wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1113 unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001115 might_sleep();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 if (!test_bit(bit, word))
1117 return 0;
1118 return out_of_line_wait_on_bit(word, bit, action, mode);
1119}
1120
1121/**
1122 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1123 * @word: the word being waited on, a kernel virtual address
1124 * @bit: the bit of the word being waited on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 * @mode: the task state to sleep in
1126 *
1127 * There is a standard hashed waitqueue table for generic use. This
1128 * is the part of the hashtable's accessor API that waits on a bit
1129 * when one intends to set it, for instance, trying to lock bitflags.
1130 * For instance, if one were to have waiters trying to set bitflag
1131 * and waiting for it to clear before setting it, one would call
1132 * wait_on_bit() in threads waiting to be able to set the bit.
1133 * One uses wait_on_bit_lock() where one is waiting for the bit to
1134 * clear with the intention of setting it, and when done, clearing it.
NeilBrown74316202014-07-07 15:16:04 +10001135 *
1136 * Returns zero if the bit was (eventually) found to be clear and was
1137 * set. Returns non-zero if a signal was delivered to the process and
1138 * the @mode allows that signal to wake the process.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 */
Ingo Molnarfb869b62013-10-04 10:24:49 +02001140static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001141wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
NeilBrown74316202014-07-07 15:16:04 +10001142{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001143 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +10001144 if (!test_and_set_bit(bit, word))
1145 return 0;
1146 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1147}
1148
1149/**
1150 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1151 * @word: the word being waited on, a kernel virtual address
1152 * @bit: the bit of the word being waited on
1153 * @mode: the task state to sleep in
1154 *
1155 * Use the standard hashed waitqueue table to wait for a bit
1156 * to be cleared and then to atomically set it. This is similar
1157 * to wait_on_bit(), but calls io_schedule() instead of schedule()
1158 * for the actual waiting.
1159 *
1160 * Returns zero if the bit was (eventually) found to be clear and was
1161 * set. Returns non-zero if a signal was delivered to the process and
1162 * the @mode allows that signal to wake the process.
1163 */
1164static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001165wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
NeilBrown74316202014-07-07 15:16:04 +10001166{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001167 might_sleep();
NeilBrown74316202014-07-07 15:16:04 +10001168 if (!test_and_set_bit(bit, word))
1169 return 0;
1170 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1171}
1172
1173/**
1174 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1175 * @word: the word being waited on, a kernel virtual address
1176 * @bit: the bit of the word being waited on
1177 * @action: the function used to sleep, which may take special actions
1178 * @mode: the task state to sleep in
1179 *
1180 * Use the standard hashed waitqueue table to wait for a bit
1181 * to be cleared and then to set it, and allow the waiting action
1182 * to be specified.
1183 * This is like wait_on_bit() but allows fine control of how the waiting
1184 * is done.
1185 *
1186 * Returns zero if the bit was (eventually) found to be clear and was
1187 * set. Returns non-zero if a signal was delivered to the process and
1188 * the @mode allows that signal to wake the process.
1189 */
1190static inline int
Palmer Dabbelt7e605982015-04-30 21:19:56 -07001191wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1192 unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001194 might_sleep();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 if (!test_and_set_bit(bit, word))
1196 return 0;
1197 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1198}
David Howellscb655372013-05-10 19:50:26 +01001199
1200/**
1201 * wait_on_atomic_t - Wait for an atomic_t to become 0
1202 * @val: The atomic value being waited on, a kernel virtual address
1203 * @action: the function used to sleep, which may take special actions
1204 * @mode: the task state to sleep in
1205 *
1206 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1207 * the purpose of getting a waitqueue, but we set the key to a bit number
1208 * outside of the target 'word'.
1209 */
1210static inline
1211int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1212{
Peter Zijlstrae22b8862014-09-24 10:18:48 +02001213 might_sleep();
David Howellscb655372013-05-10 19:50:26 +01001214 if (atomic_read(val) == 0)
1215 return 0;
1216 return out_of_line_wait_on_atomic_t(val, action, mode);
1217}
Ingo Molnarfb869b62013-10-04 10:24:49 +02001218
1219#endif /* _LINUX_WAIT_H */