blob: 78401ef02d29945dc685f6d259cbc393a658f273 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
Ingo Molnarfb869b62013-10-04 10:24:49 +02003/*
4 * Linux wait queue related types and methods
5 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +01009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <asm/current.h>
David Howells607ca462012-10-13 10:46:48 +010011#include <uapi/linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Ingo Molnarac6424b2017-06-20 12:06:13 +020013typedef struct wait_queue_entry wait_queue_entry_t;
Ingo Molnar50816c42017-03-05 10:33:16 +010014
15typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
16int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Ingo Molnarac6424b2017-06-20 12:06:13 +020018/* wait_queue_entry::flags */
Peter Zijlstra61ada522014-09-24 10:18:47 +020019#define WQ_FLAG_EXCLUSIVE 0x01
20#define WQ_FLAG_WOKEN 0x02
Tim Chen2554db92017-08-25 09:13:54 -070021#define WQ_FLAG_BOOKMARK 0x04
Peter Zijlstra61ada522014-09-24 10:18:47 +020022
Ingo Molnarac6424b2017-06-20 12:06:13 +020023/*
24 * A single wait-queue entry structure:
25 */
26struct wait_queue_entry {
Ingo Molnarfb869b62013-10-04 10:24:49 +020027 unsigned int flags;
Ingo Molnarfb869b62013-10-04 10:24:49 +020028 void *private;
29 wait_queue_func_t func;
Ingo Molnar2055da92017-06-20 12:06:46 +020030 struct list_head entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -070031};
32
Ingo Molnar9d9d6762017-03-05 11:10:18 +010033struct wait_queue_head {
Ingo Molnarfb869b62013-10-04 10:24:49 +020034 spinlock_t lock;
Ingo Molnar2055da92017-06-20 12:06:46 +020035 struct list_head head;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036};
Ingo Molnar9d9d6762017-03-05 11:10:18 +010037typedef struct wait_queue_head wait_queue_head_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080039struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41/*
42 * Macros for declaration and initialisaton of the datatypes
43 */
44
Ingo Molnar4b1c4802017-03-05 12:07:33 +010045#define __WAITQUEUE_INITIALIZER(name, tsk) { \
46 .private = tsk, \
47 .func = default_wake_function, \
Ingo Molnar2055da92017-06-20 12:06:46 +020048 .entry = { NULL, NULL } }
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Ingo Molnar4b1c4802017-03-05 12:07:33 +010050#define DECLARE_WAITQUEUE(name, tsk) \
Ingo Molnar50816c42017-03-05 10:33:16 +010051 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Ingo Molnar4b1c4802017-03-05 12:07:33 +010053#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
54 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
Ingo Molnar2055da92017-06-20 12:06:46 +020055 .head = { &(name).head, &(name).head } }
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#define DECLARE_WAIT_QUEUE_HEAD(name) \
Ingo Molnar9d9d6762017-03-05 11:10:18 +010058 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Ingo Molnar9d9d6762017-03-05 11:10:18 +010060extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
Peter Zijlstra2fc39112009-08-10 12:33:05 +010061
Ingo Molnar4b1c4802017-03-05 12:07:33 +010062#define init_waitqueue_head(wq_head) \
63 do { \
64 static struct lock_class_key __key; \
65 \
66 __init_waitqueue_head((wq_head), #wq_head, &__key); \
Peter Zijlstra2fc39112009-08-10 12:33:05 +010067 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080069#ifdef CONFIG_LOCKDEP
70# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
71 ({ init_waitqueue_head(&name); name; })
72# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
Ingo Molnar9d9d6762017-03-05 11:10:18 +010073 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080074#else
75# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
76#endif
77
Ingo Molnar50816c42017-03-05 10:33:16 +010078static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Ingo Molnar50816c42017-03-05 10:33:16 +010080 wq_entry->flags = 0;
81 wq_entry->private = p;
82 wq_entry->func = default_wake_function;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
Ingo Molnarfb869b62013-10-04 10:24:49 +020085static inline void
Ingo Molnar50816c42017-03-05 10:33:16 +010086init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Ingo Molnar50816c42017-03-05 10:33:16 +010088 wq_entry->flags = 0;
89 wq_entry->private = NULL;
90 wq_entry->func = func;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Peter Zijlstra69e51e922015-10-23 14:32:34 +020093/**
94 * waitqueue_active -- locklessly test for waiters on the queue
Ingo Molnar9d9d6762017-03-05 11:10:18 +010095 * @wq_head: the waitqueue to test for waiters
Peter Zijlstra69e51e922015-10-23 14:32:34 +020096 *
97 * returns true if the wait list is not empty
98 *
99 * NOTE: this function is lockless and requires care, incorrect usage _will_
100 * lead to sporadic and non-obvious failure.
101 *
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100102 * Use either while holding wait_queue_head::lock or when used for wakeups
Peter Zijlstra69e51e922015-10-23 14:32:34 +0200103 * with an extra smp_mb() like:
104 *
105 * CPU0 - waker CPU1 - waiter
106 *
107 * for (;;) {
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100108 * @cond = true; prepare_to_wait(&wq_head, &wait, state);
Peter Zijlstra69e51e922015-10-23 14:32:34 +0200109 * smp_mb(); // smp_mb() from set_current_state()
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100110 * if (waitqueue_active(wq_head)) if (@cond)
111 * wake_up(wq_head); break;
Peter Zijlstra69e51e922015-10-23 14:32:34 +0200112 * schedule();
113 * }
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100114 * finish_wait(&wq_head, &wait);
Peter Zijlstra69e51e922015-10-23 14:32:34 +0200115 *
116 * Because without the explicit smp_mb() it's possible for the
117 * waitqueue_active() load to get hoisted over the @cond store such that we'll
118 * observe an empty wait list while the waiter might not observe @cond.
119 *
120 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
121 * which (when the lock is uncontended) are of roughly equal cost.
122 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100123static inline int waitqueue_active(struct wait_queue_head *wq_head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
Ingo Molnar2055da92017-06-20 12:06:46 +0200125 return !list_empty(&wq_head->head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126}
127
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800128/**
129 * wq_has_sleeper - check if there are any waiting processes
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100130 * @wq_head: wait queue head
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800131 *
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100132 * Returns true if wq_head has waiting processes
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800133 *
134 * Please refer to the comment for waitqueue_active.
135 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100136static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800137{
138 /*
139 * We need to be sure we are in sync with the
140 * add_wait_queue modifications to the wait queue.
141 *
142 * This memory barrier should be paired with one on the
143 * waiting side.
144 */
145 smp_mb();
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100146 return waitqueue_active(wq_head);
Herbert Xu1ce0bf52015-11-26 13:55:39 +0800147}
148
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100149extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
150extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
151extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100153static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Ingo Molnar2055da92017-06-20 12:06:46 +0200155 list_add(&wq_entry->entry, &wq_head->head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156}
157
158/*
159 * Used for wake-one threads:
160 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200161static inline void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100162__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800163{
Ingo Molnar50816c42017-03-05 10:33:16 +0100164 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100165 __add_wait_queue(wq_head, wq_entry);
Changli Gaoa93d2f12010-05-07 14:33:26 +0800166}
167
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100168static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
Ingo Molnar2055da92017-06-20 12:06:46 +0200170 list_add_tail(&wq_entry->entry, &wq_head->head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172
Ingo Molnarfb869b62013-10-04 10:24:49 +0200173static inline void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100174__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800175{
Ingo Molnar50816c42017-03-05 10:33:16 +0100176 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100177 __add_wait_queue_entry_tail(wq_head, wq_entry);
Changli Gaoa93d2f12010-05-07 14:33:26 +0800178}
179
Ingo Molnarfb869b62013-10-04 10:24:49 +0200180static inline void
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100181__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
Ingo Molnar2055da92017-06-20 12:06:46 +0200183 list_del(&wq_entry->entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184}
185
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100186void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
187void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
188void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
189void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
190void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500192#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
193#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
194#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
Thomas Gleixner63b20012011-12-01 00:04:00 +0100195#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
196#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
199#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
200#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500201#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800203/*
Davide Libenzic0da3772009-03-31 15:24:20 -0700204 * Wakeup macros to be used to report events to the targets.
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800205 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100206#define wake_up_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700207 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100208#define wake_up_locked_poll(x, m) \
Andrea Arcangeliac5be6b2015-09-22 14:58:49 -0700209 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100210#define wake_up_interruptible_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700211 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100212#define wake_up_interruptible_sync_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700213 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800214
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100215#define ___wait_cond_timeout(condition) \
216({ \
217 bool __cond = (condition); \
218 if (__cond && !__ret) \
219 __ret = 1; \
220 __cond || !__ret; \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200221})
222
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100223#define ___wait_is_interruptible(state) \
224 (!__builtin_constant_p(state) || \
225 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200226
Ingo Molnar50816c42017-03-05 10:33:16 +0100227extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
Oleg Nesterov0176bea2016-09-06 16:00:55 +0200228
Peter Zijlstra8b322012014-04-18 15:07:17 -0700229/*
230 * The below macro ___wait_event() has an explicit shadow of the __ret
231 * variable when used from the wait_event_*() macros.
232 *
233 * This is so that both can use the ___wait_cond_timeout() construct
234 * to wrap the condition.
235 *
236 * The type inconsistency of the wait_event_*() __ret variable is also
237 * on purpose; we use long where we can return timeout values and int
238 * otherwise.
239 */
240
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100241#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
242({ \
243 __label__ __out; \
244 struct wait_queue_entry __wq_entry; \
245 long __ret = ret; /* explicit shadow */ \
246 \
247 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
248 for (;;) { \
249 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
250 \
251 if (condition) \
252 break; \
253 \
254 if (___wait_is_interruptible(state) && __int) { \
255 __ret = __int; \
256 goto __out; \
257 } \
258 \
259 cmd; \
260 } \
261 finish_wait(&wq_head, &__wq_entry); \
262__out: __ret; \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200263})
Peter Zijlstra41a14312013-10-02 11:22:21 +0200264
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100265#define __wait_event(wq_head, condition) \
266 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200267 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
269/**
270 * wait_event - sleep until a condition gets true
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100271 * @wq_head: the waitqueue to wait on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 * @condition: a C expression for the event to wait for
273 *
274 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
275 * @condition evaluates to true. The @condition is checked each time
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100276 * the waitqueue @wq_head is woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 *
278 * wake_up() has to be called after changing any variable that could
279 * change the result of the wait condition.
280 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100281#define wait_event(wq_head, condition) \
282do { \
283 might_sleep(); \
284 if (condition) \
285 break; \
286 __wait_event(wq_head, condition); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287} while (0)
288
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100289#define __io_wait_event(wq_head, condition) \
290 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
Peter Zijlstra2c561242015-02-03 12:55:31 +0100291 io_schedule())
292
293/*
294 * io_wait_event() -- like wait_event() but with io_schedule()
295 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100296#define io_wait_event(wq_head, condition) \
297do { \
298 might_sleep(); \
299 if (condition) \
300 break; \
301 __io_wait_event(wq_head, condition); \
Peter Zijlstra2c561242015-02-03 12:55:31 +0100302} while (0)
303
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100304#define __wait_event_freezable(wq_head, condition) \
305 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100306 schedule(); try_to_freeze())
307
308/**
Stafford Hornef4bcfa12016-02-23 22:39:28 +0900309 * wait_event_freezable - sleep (or freeze) until a condition gets true
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100310 * @wq_head: the waitqueue to wait on
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100311 * @condition: a C expression for the event to wait for
312 *
313 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
314 * to system load) until the @condition evaluates to true. The
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100315 * @condition is checked each time the waitqueue @wq_head is woken up.
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100316 *
317 * wake_up() has to be called after changing any variable that could
318 * change the result of the wait condition.
319 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100320#define wait_event_freezable(wq_head, condition) \
321({ \
322 int __ret = 0; \
323 might_sleep(); \
324 if (!(condition)) \
325 __ret = __wait_event_freezable(wq_head, condition); \
326 __ret; \
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100327})
328
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100329#define __wait_event_timeout(wq_head, condition, timeout) \
330 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
331 TASK_UNINTERRUPTIBLE, 0, timeout, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200332 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334/**
335 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100336 * @wq_head: the waitqueue to wait on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 * @condition: a C expression for the event to wait for
338 * @timeout: timeout, in jiffies
339 *
340 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
341 * @condition evaluates to true. The @condition is checked each time
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100342 * the waitqueue @wq_head is woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 *
344 * wake_up() has to be called after changing any variable that could
345 * change the result of the wait condition.
346 *
Scot Doyle6b44f512014-08-24 17:12:27 +0000347 * Returns:
348 * 0 if the @condition evaluated to %false after the @timeout elapsed,
349 * 1 if the @condition evaluated to %true after the @timeout elapsed,
350 * or the remaining jiffies (at least 1) if the @condition evaluated
351 * to %true before the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100353#define wait_event_timeout(wq_head, condition, timeout) \
354({ \
355 long __ret = timeout; \
356 might_sleep(); \
357 if (!___wait_cond_timeout(condition)) \
358 __ret = __wait_event_timeout(wq_head, condition, timeout); \
359 __ret; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360})
361
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100362#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
363 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
364 TASK_INTERRUPTIBLE, 0, timeout, \
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100365 __ret = schedule_timeout(__ret); try_to_freeze())
366
367/*
368 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
369 * increasing load and is freezable.
370 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100371#define wait_event_freezable_timeout(wq_head, condition, timeout) \
372({ \
373 long __ret = timeout; \
374 might_sleep(); \
375 if (!___wait_cond_timeout(condition)) \
376 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
377 __ret; \
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100378})
379
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100380#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
381 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
Yuanhan Liu9f3520c2015-05-08 18:19:05 +1000382 cmd1; schedule(); cmd2)
383/*
384 * Just like wait_event_cmd(), except it sets exclusive flag
385 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100386#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
387do { \
388 if (condition) \
389 break; \
390 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
Yuanhan Liu9f3520c2015-05-08 18:19:05 +1000391} while (0)
392
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100393#define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
394 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
Shaohua Li82e06c82013-11-14 15:16:16 +1100395 cmd1; schedule(); cmd2)
396
397/**
398 * wait_event_cmd - sleep until a condition gets true
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100399 * @wq_head: the waitqueue to wait on
Shaohua Li82e06c82013-11-14 15:16:16 +1100400 * @condition: a C expression for the event to wait for
Masanari Iidaf434f7a2014-01-22 01:22:06 +0900401 * @cmd1: the command will be executed before sleep
402 * @cmd2: the command will be executed after sleep
Shaohua Li82e06c82013-11-14 15:16:16 +1100403 *
404 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
405 * @condition evaluates to true. The @condition is checked each time
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100406 * the waitqueue @wq_head is woken up.
Shaohua Li82e06c82013-11-14 15:16:16 +1100407 *
408 * wake_up() has to be called after changing any variable that could
409 * change the result of the wait condition.
410 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100411#define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
412do { \
413 if (condition) \
414 break; \
415 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
Shaohua Li82e06c82013-11-14 15:16:16 +1100416} while (0)
417
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100418#define __wait_event_interruptible(wq_head, condition) \
419 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstraf13f4c42013-10-02 11:22:24 +0200420 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422/**
423 * wait_event_interruptible - sleep until a condition gets true
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100424 * @wq_head: the waitqueue to wait on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 * @condition: a C expression for the event to wait for
426 *
427 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
428 * @condition evaluates to true or a signal is received.
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100429 * The @condition is checked each time the waitqueue @wq_head is woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 *
431 * wake_up() has to be called after changing any variable that could
432 * change the result of the wait condition.
433 *
434 * The function will return -ERESTARTSYS if it was interrupted by a
435 * signal and 0 if @condition evaluated to true.
436 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100437#define wait_event_interruptible(wq_head, condition) \
438({ \
439 int __ret = 0; \
440 might_sleep(); \
441 if (!(condition)) \
442 __ret = __wait_event_interruptible(wq_head, condition); \
443 __ret; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444})
445
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100446#define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
447 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
448 TASK_INTERRUPTIBLE, 0, timeout, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200449 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
451/**
452 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100453 * @wq_head: the waitqueue to wait on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 * @condition: a C expression for the event to wait for
455 * @timeout: timeout, in jiffies
456 *
457 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
458 * @condition evaluates to true or a signal is received.
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100459 * The @condition is checked each time the waitqueue @wq_head is woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 *
461 * wake_up() has to be called after changing any variable that could
462 * change the result of the wait condition.
463 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700464 * Returns:
Scot Doyle6b44f512014-08-24 17:12:27 +0000465 * 0 if the @condition evaluated to %false after the @timeout elapsed,
466 * 1 if the @condition evaluated to %true after the @timeout elapsed,
467 * the remaining jiffies (at least 1) if the @condition evaluated
468 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
469 * interrupted by a signal.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100471#define wait_event_interruptible_timeout(wq_head, condition, timeout) \
472({ \
473 long __ret = timeout; \
474 might_sleep(); \
475 if (!___wait_cond_timeout(condition)) \
476 __ret = __wait_event_interruptible_timeout(wq_head, \
477 condition, timeout); \
478 __ret; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479})
480
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100481#define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
482({ \
483 int __ret = 0; \
484 struct hrtimer_sleeper __t; \
485 \
486 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \
487 hrtimer_init_sleeper(&__t, current); \
488 if ((timeout) != KTIME_MAX) \
489 hrtimer_start_range_ns(&__t.timer, timeout, \
490 current->timer_slack_ns, \
491 HRTIMER_MODE_REL); \
492 \
493 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
494 if (!__t.task) { \
495 __ret = -ETIME; \
496 break; \
497 } \
498 schedule()); \
499 \
500 hrtimer_cancel(&__t.timer); \
501 destroy_hrtimer_on_stack(&__t.timer); \
502 __ret; \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700503})
504
505/**
506 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100507 * @wq_head: the waitqueue to wait on
Kent Overstreet774a08b2013-05-07 16:18:43 -0700508 * @condition: a C expression for the event to wait for
509 * @timeout: timeout, as a ktime_t
510 *
511 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
512 * @condition evaluates to true or a signal is received.
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100513 * The @condition is checked each time the waitqueue @wq_head is woken up.
Kent Overstreet774a08b2013-05-07 16:18:43 -0700514 *
515 * wake_up() has to be called after changing any variable that could
516 * change the result of the wait condition.
517 *
518 * The function returns 0 if @condition became true, or -ETIME if the timeout
519 * elapsed.
520 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100521#define wait_event_hrtimeout(wq_head, condition, timeout) \
522({ \
523 int __ret = 0; \
524 might_sleep(); \
525 if (!(condition)) \
526 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
527 TASK_UNINTERRUPTIBLE); \
528 __ret; \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700529})
530
531/**
532 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
Jonathan Corbet6c423f52017-07-24 13:58:00 -0600533 * @wq: the waitqueue to wait on
Kent Overstreet774a08b2013-05-07 16:18:43 -0700534 * @condition: a C expression for the event to wait for
535 * @timeout: timeout, as a ktime_t
536 *
537 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
538 * @condition evaluates to true or a signal is received.
Jonathan Corbet6c423f52017-07-24 13:58:00 -0600539 * The @condition is checked each time the waitqueue @wq is woken up.
Kent Overstreet774a08b2013-05-07 16:18:43 -0700540 *
541 * wake_up() has to be called after changing any variable that could
542 * change the result of the wait condition.
543 *
544 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
545 * interrupted by a signal, or -ETIME if the timeout elapsed.
546 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100547#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
548({ \
549 long __ret = 0; \
550 might_sleep(); \
551 if (!(condition)) \
552 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
553 TASK_INTERRUPTIBLE); \
554 __ret; \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700555})
556
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100557#define __wait_event_interruptible_exclusive(wq, condition) \
558 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
Peter Zijlstra48c25212013-10-02 11:22:26 +0200559 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100561#define wait_event_interruptible_exclusive(wq, condition) \
562({ \
563 int __ret = 0; \
564 might_sleep(); \
565 if (!(condition)) \
566 __ret = __wait_event_interruptible_exclusive(wq, condition); \
567 __ret; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568})
569
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100570#define __wait_event_killable_exclusive(wq, condition) \
571 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
Al Viro6a0fb302016-07-19 03:04:34 -0400572 schedule())
573
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100574#define wait_event_killable_exclusive(wq, condition) \
575({ \
576 int __ret = 0; \
577 might_sleep(); \
578 if (!(condition)) \
579 __ret = __wait_event_killable_exclusive(wq, condition); \
580 __ret; \
Al Viro6a0fb302016-07-19 03:04:34 -0400581})
582
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200583
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100584#define __wait_event_freezable_exclusive(wq, condition) \
585 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100586 schedule(); try_to_freeze())
587
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100588#define wait_event_freezable_exclusive(wq, condition) \
589({ \
590 int __ret = 0; \
591 might_sleep(); \
592 if (!(condition)) \
593 __ret = __wait_event_freezable_exclusive(wq, condition); \
594 __ret; \
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100595})
596
Ingo Molnarac6424b2017-06-20 12:06:13 +0200597extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
598extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
Peter Zijlstra36df04b2014-10-29 12:21:57 +0100599
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100600#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
601({ \
602 int __ret; \
603 DEFINE_WAIT(__wait); \
604 if (exclusive) \
605 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
606 do { \
607 __ret = fn(&(wq), &__wait); \
608 if (__ret) \
609 break; \
610 } while (!(condition)); \
611 __remove_wait_queue(&(wq), &__wait); \
612 __set_current_state(TASK_RUNNING); \
613 __ret; \
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200614})
615
616
617/**
618 * wait_event_interruptible_locked - sleep until a condition gets true
619 * @wq: the waitqueue to wait on
620 * @condition: a C expression for the event to wait for
621 *
622 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
623 * @condition evaluates to true or a signal is received.
624 * The @condition is checked each time the waitqueue @wq is woken up.
625 *
626 * It must be called with wq.lock being held. This spinlock is
627 * unlocked while sleeping but @condition testing is done while lock
628 * is held and when this macro exits the lock is held.
629 *
630 * The lock is locked/unlocked using spin_lock()/spin_unlock()
631 * functions which must match the way they are locked/unlocked outside
632 * of this macro.
633 *
634 * wake_up_locked() has to be called after changing any variable that could
635 * change the result of the wait condition.
636 *
637 * The function will return -ERESTARTSYS if it was interrupted by a
638 * signal and 0 if @condition evaluated to true.
639 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100640#define wait_event_interruptible_locked(wq, condition) \
641 ((condition) \
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800642 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200643
644/**
645 * wait_event_interruptible_locked_irq - sleep until a condition gets true
646 * @wq: the waitqueue to wait on
647 * @condition: a C expression for the event to wait for
648 *
649 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
650 * @condition evaluates to true or a signal is received.
651 * The @condition is checked each time the waitqueue @wq is woken up.
652 *
653 * It must be called with wq.lock being held. This spinlock is
654 * unlocked while sleeping but @condition testing is done while lock
655 * is held and when this macro exits the lock is held.
656 *
657 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
658 * functions which must match the way they are locked/unlocked outside
659 * of this macro.
660 *
661 * wake_up_locked() has to be called after changing any variable that could
662 * change the result of the wait condition.
663 *
664 * The function will return -ERESTARTSYS if it was interrupted by a
665 * signal and 0 if @condition evaluated to true.
666 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100667#define wait_event_interruptible_locked_irq(wq, condition) \
668 ((condition) \
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800669 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200670
671/**
672 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
673 * @wq: the waitqueue to wait on
674 * @condition: a C expression for the event to wait for
675 *
676 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
677 * @condition evaluates to true or a signal is received.
678 * The @condition is checked each time the waitqueue @wq is woken up.
679 *
680 * It must be called with wq.lock being held. This spinlock is
681 * unlocked while sleeping but @condition testing is done while lock
682 * is held and when this macro exits the lock is held.
683 *
684 * The lock is locked/unlocked using spin_lock()/spin_unlock()
685 * functions which must match the way they are locked/unlocked outside
686 * of this macro.
687 *
688 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
689 * set thus when other process waits process on the list if this
690 * process is awaken further processes are not considered.
691 *
692 * wake_up_locked() has to be called after changing any variable that could
693 * change the result of the wait condition.
694 *
695 * The function will return -ERESTARTSYS if it was interrupted by a
696 * signal and 0 if @condition evaluated to true.
697 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100698#define wait_event_interruptible_exclusive_locked(wq, condition) \
699 ((condition) \
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800700 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200701
702/**
703 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
704 * @wq: the waitqueue to wait on
705 * @condition: a C expression for the event to wait for
706 *
707 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
708 * @condition evaluates to true or a signal is received.
709 * The @condition is checked each time the waitqueue @wq is woken up.
710 *
711 * It must be called with wq.lock being held. This spinlock is
712 * unlocked while sleeping but @condition testing is done while lock
713 * is held and when this macro exits the lock is held.
714 *
715 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
716 * functions which must match the way they are locked/unlocked outside
717 * of this macro.
718 *
719 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
720 * set thus when other process waits process on the list if this
721 * process is awaken further processes are not considered.
722 *
723 * wake_up_locked() has to be called after changing any variable that could
724 * change the result of the wait condition.
725 *
726 * The function will return -ERESTARTSYS if it was interrupted by a
727 * signal and 0 if @condition evaluated to true.
728 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100729#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
730 ((condition) \
Linus Torvaldsbd0f9b32017-03-07 15:33:14 -0800731 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200732
733
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100734#define __wait_event_killable(wq, condition) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200735 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500736
737/**
738 * wait_event_killable - sleep until a condition gets true
Jonathan Corbet6c423f52017-07-24 13:58:00 -0600739 * @wq_head: the waitqueue to wait on
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500740 * @condition: a C expression for the event to wait for
741 *
742 * The process is put to sleep (TASK_KILLABLE) until the
743 * @condition evaluates to true or a signal is received.
Jonathan Corbet6c423f52017-07-24 13:58:00 -0600744 * The @condition is checked each time the waitqueue @wq_head is woken up.
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500745 *
746 * wake_up() has to be called after changing any variable that could
747 * change the result of the wait condition.
748 *
749 * The function will return -ERESTARTSYS if it was interrupted by a
750 * signal and 0 if @condition evaluated to true.
751 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100752#define wait_event_killable(wq_head, condition) \
753({ \
754 int __ret = 0; \
755 might_sleep(); \
756 if (!(condition)) \
757 __ret = __wait_event_killable(wq_head, condition); \
758 __ret; \
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500759})
760
Luis R. Rodriguez8ada9272017-08-18 15:15:55 -0700761#define __wait_event_killable_timeout(wq_head, condition, timeout) \
762 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
763 TASK_KILLABLE, 0, timeout, \
764 __ret = schedule_timeout(__ret))
765
766/**
767 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
768 * @wq_head: the waitqueue to wait on
769 * @condition: a C expression for the event to wait for
770 * @timeout: timeout, in jiffies
771 *
772 * The process is put to sleep (TASK_KILLABLE) until the
773 * @condition evaluates to true or a kill signal is received.
774 * The @condition is checked each time the waitqueue @wq_head is woken up.
775 *
776 * wake_up() has to be called after changing any variable that could
777 * change the result of the wait condition.
778 *
779 * Returns:
780 * 0 if the @condition evaluated to %false after the @timeout elapsed,
781 * 1 if the @condition evaluated to %true after the @timeout elapsed,
782 * the remaining jiffies (at least 1) if the @condition evaluated
783 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
784 * interrupted by a kill signal.
785 *
786 * Only kill signals interrupt this process.
787 */
788#define wait_event_killable_timeout(wq_head, condition, timeout) \
789({ \
790 long __ret = timeout; \
791 might_sleep(); \
792 if (!___wait_cond_timeout(condition)) \
793 __ret = __wait_event_killable_timeout(wq_head, \
794 condition, timeout); \
795 __ret; \
796})
797
Lukas Czernereed8c022012-11-30 11:42:40 +0100798
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100799#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
800 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
801 spin_unlock_irq(&lock); \
802 cmd; \
803 schedule(); \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200804 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100805
806/**
807 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
808 * condition is checked under the lock. This
809 * is expected to be called with the lock
810 * taken.
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100811 * @wq_head: the waitqueue to wait on
Lukas Czernereed8c022012-11-30 11:42:40 +0100812 * @condition: a C expression for the event to wait for
813 * @lock: a locked spinlock_t, which will be released before cmd
814 * and schedule() and reacquired afterwards.
815 * @cmd: a command which is invoked outside the critical section before
816 * sleep
817 *
818 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
819 * @condition evaluates to true. The @condition is checked each time
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100820 * the waitqueue @wq_head is woken up.
Lukas Czernereed8c022012-11-30 11:42:40 +0100821 *
822 * wake_up() has to be called after changing any variable that could
823 * change the result of the wait condition.
824 *
825 * This is supposed to be called while holding the lock. The lock is
826 * dropped before invoking the cmd and going to sleep and is reacquired
827 * afterwards.
828 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100829#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
830do { \
831 if (condition) \
832 break; \
833 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100834} while (0)
835
836/**
837 * wait_event_lock_irq - sleep until a condition gets true. The
838 * condition is checked under the lock. This
839 * is expected to be called with the lock
840 * taken.
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100841 * @wq_head: the waitqueue to wait on
Lukas Czernereed8c022012-11-30 11:42:40 +0100842 * @condition: a C expression for the event to wait for
843 * @lock: a locked spinlock_t, which will be released before schedule()
844 * and reacquired afterwards.
845 *
846 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
847 * @condition evaluates to true. The @condition is checked each time
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100848 * the waitqueue @wq_head is woken up.
Lukas Czernereed8c022012-11-30 11:42:40 +0100849 *
850 * wake_up() has to be called after changing any variable that could
851 * change the result of the wait condition.
852 *
853 * This is supposed to be called while holding the lock. The lock is
854 * dropped before going to sleep and is reacquired afterwards.
855 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100856#define wait_event_lock_irq(wq_head, condition, lock) \
857do { \
858 if (condition) \
859 break; \
860 __wait_event_lock_irq(wq_head, condition, lock, ); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100861} while (0)
862
863
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100864#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
865 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
866 spin_unlock_irq(&lock); \
867 cmd; \
868 schedule(); \
Peter Zijlstra8fbd88f2013-10-02 11:22:28 +0200869 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100870
871/**
872 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
873 * The condition is checked under the lock. This is expected to
874 * be called with the lock taken.
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100875 * @wq_head: the waitqueue to wait on
Lukas Czernereed8c022012-11-30 11:42:40 +0100876 * @condition: a C expression for the event to wait for
877 * @lock: a locked spinlock_t, which will be released before cmd and
878 * schedule() and reacquired afterwards.
879 * @cmd: a command which is invoked outside the critical section before
880 * sleep
881 *
882 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
883 * @condition evaluates to true or a signal is received. The @condition is
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100884 * checked each time the waitqueue @wq_head is woken up.
Lukas Czernereed8c022012-11-30 11:42:40 +0100885 *
886 * wake_up() has to be called after changing any variable that could
887 * change the result of the wait condition.
888 *
889 * This is supposed to be called while holding the lock. The lock is
890 * dropped before invoking the cmd and going to sleep and is reacquired
891 * afterwards.
892 *
893 * The macro will return -ERESTARTSYS if it was interrupted by a signal
894 * and 0 if @condition evaluated to true.
895 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100896#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
897({ \
898 int __ret = 0; \
899 if (!(condition)) \
900 __ret = __wait_event_interruptible_lock_irq(wq_head, \
901 condition, lock, cmd); \
902 __ret; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100903})
904
905/**
906 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
907 * The condition is checked under the lock. This is expected
908 * to be called with the lock taken.
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100909 * @wq_head: the waitqueue to wait on
Lukas Czernereed8c022012-11-30 11:42:40 +0100910 * @condition: a C expression for the event to wait for
911 * @lock: a locked spinlock_t, which will be released before schedule()
912 * and reacquired afterwards.
913 *
914 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
915 * @condition evaluates to true or signal is received. The @condition is
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100916 * checked each time the waitqueue @wq_head is woken up.
Lukas Czernereed8c022012-11-30 11:42:40 +0100917 *
918 * wake_up() has to be called after changing any variable that could
919 * change the result of the wait condition.
920 *
921 * This is supposed to be called while holding the lock. The lock is
922 * dropped before going to sleep and is reacquired afterwards.
923 *
924 * The macro will return -ERESTARTSYS if it was interrupted by a signal
925 * and 0 if @condition evaluated to true.
926 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100927#define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
928({ \
929 int __ret = 0; \
930 if (!(condition)) \
931 __ret = __wait_event_interruptible_lock_irq(wq_head, \
932 condition, lock,); \
933 __ret; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100934})
935
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100936#define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \
937 lock, timeout) \
938 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
939 TASK_INTERRUPTIBLE, 0, timeout, \
940 spin_unlock_irq(&lock); \
941 __ret = schedule_timeout(__ret); \
Peter Zijlstraa1dc6852013-10-02 11:22:29 +0200942 spin_lock_irq(&lock));
Martin Peschked79ff142013-08-22 17:45:36 +0200943
944/**
Ingo Molnarfb869b62013-10-04 10:24:49 +0200945 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
946 * true or a timeout elapses. The condition is checked under
947 * the lock. This is expected to be called with the lock taken.
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100948 * @wq_head: the waitqueue to wait on
Martin Peschked79ff142013-08-22 17:45:36 +0200949 * @condition: a C expression for the event to wait for
950 * @lock: a locked spinlock_t, which will be released before schedule()
951 * and reacquired afterwards.
952 * @timeout: timeout, in jiffies
953 *
954 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
955 * @condition evaluates to true or signal is received. The @condition is
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100956 * checked each time the waitqueue @wq_head is woken up.
Martin Peschked79ff142013-08-22 17:45:36 +0200957 *
958 * wake_up() has to be called after changing any variable that could
959 * change the result of the wait condition.
960 *
961 * This is supposed to be called while holding the lock. The lock is
962 * dropped before going to sleep and is reacquired afterwards.
963 *
964 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
965 * was interrupted by a signal, and the remaining jiffies otherwise
966 * if the condition evaluated to true before the timeout elapsed.
967 */
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100968#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
969 timeout) \
970({ \
971 long __ret = timeout; \
972 if (!___wait_cond_timeout(condition)) \
973 __ret = __wait_event_interruptible_lock_irq_timeout( \
974 wq_head, condition, lock, timeout); \
975 __ret; \
Martin Peschked79ff142013-08-22 17:45:36 +0200976})
977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978/*
979 * Waitqueues which are removed from the waitqueue_head at wakeup time
980 */
Ingo Molnar9d9d6762017-03-05 11:10:18 +0100981void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
982void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
983long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
984void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
Ingo Molnar50816c42017-03-05 10:33:16 +0100985long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
986int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
987int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100989#define DEFINE_WAIT_FUNC(name, function) \
990 struct wait_queue_entry name = { \
991 .private = current, \
992 .func = function, \
Ingo Molnar2055da92017-06-20 12:06:46 +0200993 .entry = LIST_HEAD_INIT((name).entry), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 }
995
Eric Dumazetbf368e42009-04-28 02:24:21 -0700996#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
997
Ingo Molnar4b1c4802017-03-05 12:07:33 +0100998#define init_wait(wait) \
999 do { \
1000 (wait)->private = current; \
1001 (wait)->func = autoremove_wake_function; \
Ingo Molnar2055da92017-06-20 12:06:46 +02001002 INIT_LIST_HEAD(&(wait)->entry); \
Ingo Molnar4b1c4802017-03-05 12:07:33 +01001003 (wait)->flags = 0; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 } while (0)
1005
Ingo Molnarfb869b62013-10-04 10:24:49 +02001006#endif /* _LINUX_WAIT_H */