blob: bd68819f081592b57a4a2fc74ce62c3af7df3b13 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
Ingo Molnarfb869b62013-10-04 10:24:49 +02003/*
4 * Linux wait queue related types and methods
5 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <asm/current.h>
David Howells607ca462012-10-13 10:46:48 +010010#include <uapi/linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12typedef struct __wait_queue wait_queue_t;
Peter Zijlstra7d478722009-09-14 19:55:44 +020013typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16struct __wait_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020017 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define WQ_FLAG_EXCLUSIVE 0x01
Ingo Molnarfb869b62013-10-04 10:24:49 +020019 void *private;
20 wait_queue_func_t func;
21 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022};
23
24struct wait_bit_key {
Ingo Molnarfb869b62013-10-04 10:24:49 +020025 void *flags;
26 int bit_nr;
27#define WAIT_ATOMIC_T_BIT_NR -1
Linus Torvalds1da177e2005-04-16 15:20:36 -070028};
29
30struct wait_bit_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020031 struct wait_bit_key key;
32 wait_queue_t wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033};
34
35struct __wait_queue_head {
Ingo Molnarfb869b62013-10-04 10:24:49 +020036 spinlock_t lock;
37 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038};
39typedef struct __wait_queue_head wait_queue_head_t;
40
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080041struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43/*
44 * Macros for declaration and initialisaton of the datatypes
45 */
46
47#define __WAITQUEUE_INITIALIZER(name, tsk) { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070048 .private = tsk, \
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 .func = default_wake_function, \
50 .task_list = { NULL, NULL } }
51
52#define DECLARE_WAITQUEUE(name, tsk) \
53 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
54
55#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
Ingo Molnare4d91912006-07-03 00:24:34 -070056 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 .task_list = { &(name).task_list, &(name).task_list } }
58
59#define DECLARE_WAIT_QUEUE_HEAD(name) \
60 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
61
62#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
63 { .flags = word, .bit_nr = bit, }
64
David Howellscb655372013-05-10 19:50:26 +010065#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
66 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
67
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010068extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
Peter Zijlstra2fc39112009-08-10 12:33:05 +010069
70#define init_waitqueue_head(q) \
71 do { \
72 static struct lock_class_key __key; \
73 \
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010074 __init_waitqueue_head((q), #q, &__key); \
Peter Zijlstra2fc39112009-08-10 12:33:05 +010075 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080077#ifdef CONFIG_LOCKDEP
78# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
79 ({ init_waitqueue_head(&name); name; })
80# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
81 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
82#else
83# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
84#endif
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
87{
Ingo Molnarfb869b62013-10-04 10:24:49 +020088 q->flags = 0;
89 q->private = p;
90 q->func = default_wake_function;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Ingo Molnarfb869b62013-10-04 10:24:49 +020093static inline void
94init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Ingo Molnarfb869b62013-10-04 10:24:49 +020096 q->flags = 0;
97 q->private = NULL;
98 q->func = func;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101static inline int waitqueue_active(wait_queue_head_t *q)
102{
103 return !list_empty(&q->task_list);
104}
105
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800106extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
107extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
108extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
111{
112 list_add(&new->task_list, &head->task_list);
113}
114
115/*
116 * Used for wake-one threads:
117 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200118static inline void
119__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800120{
121 wait->flags |= WQ_FLAG_EXCLUSIVE;
122 __add_wait_queue(q, wait);
123}
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125static inline void __add_wait_queue_tail(wait_queue_head_t *head,
Changli Gaoa93d2f12010-05-07 14:33:26 +0800126 wait_queue_t *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
128 list_add_tail(&new->task_list, &head->task_list);
129}
130
Ingo Molnarfb869b62013-10-04 10:24:49 +0200131static inline void
132__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800133{
134 wait->flags |= WQ_FLAG_EXCLUSIVE;
135 __add_wait_queue_tail(q, wait);
136}
137
Ingo Molnarfb869b62013-10-04 10:24:49 +0200138static inline void
139__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
141 list_del(&old->task_list);
142}
143
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800144void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700145void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200146void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Thomas Gleixner63b20012011-12-01 00:04:00 +0100147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800149void __wake_up_bit(wait_queue_head_t *, void *, int);
150int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152void wake_up_bit(void *, int);
David Howellscb655372013-05-10 19:50:26 +0100153void wake_up_atomic_t(atomic_t *);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800154int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
David Howellscb655372013-05-10 19:50:26 +0100156int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800157wait_queue_head_t *bit_waitqueue(void *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500159#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
160#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
161#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
Thomas Gleixner63b20012011-12-01 00:04:00 +0100162#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
163#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500168#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800170/*
Davide Libenzic0da3772009-03-31 15:24:20 -0700171 * Wakeup macros to be used to report events to the targets.
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800172 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200173#define wake_up_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200175#define wake_up_locked_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200177#define wake_up_interruptible_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800181
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200182#define ___wait_cond_timeout(condition) \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200183({ \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200184 bool __cond = (condition); \
185 if (__cond && !__ret) \
186 __ret = 1; \
187 __cond || !__ret; \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200188})
189
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200190#define ___wait_is_interruptible(state) \
191 (!__builtin_constant_p(state) || \
192 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200193
Peter Zijlstra8b322012014-04-18 15:07:17 -0700194/*
195 * The below macro ___wait_event() has an explicit shadow of the __ret
196 * variable when used from the wait_event_*() macros.
197 *
198 * This is so that both can use the ___wait_cond_timeout() construct
199 * to wrap the condition.
200 *
201 * The type inconsistency of the wait_event_*() __ret variable is also
202 * on purpose; we use long where we can return timeout values and int
203 * otherwise.
204 */
205
Peter Zijlstra41a14312013-10-02 11:22:21 +0200206#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200207({ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200208 __label__ __out; \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200209 wait_queue_t __wait; \
Peter Zijlstra8b322012014-04-18 15:07:17 -0700210 long __ret = ret; /* explicit shadow */ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200211 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200212 INIT_LIST_HEAD(&__wait.task_list); \
213 if (exclusive) \
214 __wait.flags = WQ_FLAG_EXCLUSIVE; \
215 else \
216 __wait.flags = 0; \
217 \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200218 for (;;) { \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200219 long __int = prepare_to_wait_event(&wq, &__wait, state);\
Peter Zijlstra41a14312013-10-02 11:22:21 +0200220 \
221 if (condition) \
222 break; \
223 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200224 if (___wait_is_interruptible(state) && __int) { \
225 __ret = __int; \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200226 if (exclusive) { \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200227 abort_exclusive_wait(&wq, &__wait, \
228 state, NULL); \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200229 goto __out; \
230 } \
231 break; \
232 } \
233 \
234 cmd; \
235 } \
236 finish_wait(&wq, &__wait); \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200237__out: __ret; \
238})
Peter Zijlstra41a14312013-10-02 11:22:21 +0200239
Ingo Molnarfb869b62013-10-04 10:24:49 +0200240#define __wait_event(wq, condition) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200241 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
242 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244/**
245 * wait_event - sleep until a condition gets true
246 * @wq: the waitqueue to wait on
247 * @condition: a C expression for the event to wait for
248 *
249 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
250 * @condition evaluates to true. The @condition is checked each time
251 * the waitqueue @wq is woken up.
252 *
253 * wake_up() has to be called after changing any variable that could
254 * change the result of the wait condition.
255 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200256#define wait_event(wq, condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257do { \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200258 if (condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 break; \
260 __wait_event(wq, condition); \
261} while (0)
262
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200263#define __wait_event_timeout(wq, condition, timeout) \
264 ___wait_event(wq, ___wait_cond_timeout(condition), \
265 TASK_UNINTERRUPTIBLE, 0, timeout, \
266 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268/**
269 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
270 * @wq: the waitqueue to wait on
271 * @condition: a C expression for the event to wait for
272 * @timeout: timeout, in jiffies
273 *
274 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
275 * @condition evaluates to true. The @condition is checked each time
276 * the waitqueue @wq is woken up.
277 *
278 * wake_up() has to be called after changing any variable that could
279 * change the result of the wait condition.
280 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700281 * The function returns 0 if the @timeout elapsed, or the remaining
282 * jiffies (at least 1) if the @condition evaluated to %true before
283 * the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 */
285#define wait_event_timeout(wq, condition, timeout) \
286({ \
287 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200288 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200289 __ret = __wait_event_timeout(wq, condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 __ret; \
291})
292
Shaohua Li82e06c82013-11-14 15:16:16 +1100293#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
294 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
295 cmd1; schedule(); cmd2)
296
297/**
298 * wait_event_cmd - sleep until a condition gets true
299 * @wq: the waitqueue to wait on
300 * @condition: a C expression for the event to wait for
Masanari Iidaf434f7a2014-01-22 01:22:06 +0900301 * @cmd1: the command will be executed before sleep
302 * @cmd2: the command will be executed after sleep
Shaohua Li82e06c82013-11-14 15:16:16 +1100303 *
304 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
305 * @condition evaluates to true. The @condition is checked each time
306 * the waitqueue @wq is woken up.
307 *
308 * wake_up() has to be called after changing any variable that could
309 * change the result of the wait condition.
310 */
311#define wait_event_cmd(wq, condition, cmd1, cmd2) \
312do { \
313 if (condition) \
314 break; \
315 __wait_event_cmd(wq, condition, cmd1, cmd2); \
316} while (0)
317
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200318#define __wait_event_interruptible(wq, condition) \
319 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstraf13f4c42013-10-02 11:22:24 +0200320 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
322/**
323 * wait_event_interruptible - sleep until a condition gets true
324 * @wq: the waitqueue to wait on
325 * @condition: a C expression for the event to wait for
326 *
327 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
328 * @condition evaluates to true or a signal is received.
329 * The @condition is checked each time the waitqueue @wq is woken up.
330 *
331 * wake_up() has to be called after changing any variable that could
332 * change the result of the wait condition.
333 *
334 * The function will return -ERESTARTSYS if it was interrupted by a
335 * signal and 0 if @condition evaluated to true.
336 */
337#define wait_event_interruptible(wq, condition) \
338({ \
339 int __ret = 0; \
340 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200341 __ret = __wait_event_interruptible(wq, condition); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 __ret; \
343})
344
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200345#define __wait_event_interruptible_timeout(wq, condition, timeout) \
346 ___wait_event(wq, ___wait_cond_timeout(condition), \
347 TASK_INTERRUPTIBLE, 0, timeout, \
348 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
350/**
351 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
352 * @wq: the waitqueue to wait on
353 * @condition: a C expression for the event to wait for
354 * @timeout: timeout, in jiffies
355 *
356 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
357 * @condition evaluates to true or a signal is received.
358 * The @condition is checked each time the waitqueue @wq is woken up.
359 *
360 * wake_up() has to be called after changing any variable that could
361 * change the result of the wait condition.
362 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700363 * Returns:
364 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
365 * a signal, or the remaining jiffies (at least 1) if the @condition
366 * evaluated to %true before the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 */
368#define wait_event_interruptible_timeout(wq, condition, timeout) \
369({ \
370 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200371 if (!___wait_cond_timeout(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200372 __ret = __wait_event_interruptible_timeout(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200373 condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 __ret; \
375})
376
Kent Overstreet774a08b2013-05-07 16:18:43 -0700377#define __wait_event_hrtimeout(wq, condition, timeout, state) \
378({ \
379 int __ret = 0; \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700380 struct hrtimer_sleeper __t; \
381 \
382 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
383 HRTIMER_MODE_REL); \
384 hrtimer_init_sleeper(&__t, current); \
385 if ((timeout).tv64 != KTIME_MAX) \
386 hrtimer_start_range_ns(&__t.timer, timeout, \
387 current->timer_slack_ns, \
388 HRTIMER_MODE_REL); \
389 \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200390 __ret = ___wait_event(wq, condition, state, 0, 0, \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700391 if (!__t.task) { \
392 __ret = -ETIME; \
393 break; \
394 } \
Peter Zijlstraebdc1952013-10-02 11:22:32 +0200395 schedule()); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700396 \
397 hrtimer_cancel(&__t.timer); \
398 destroy_hrtimer_on_stack(&__t.timer); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700399 __ret; \
400})
401
402/**
403 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
404 * @wq: the waitqueue to wait on
405 * @condition: a C expression for the event to wait for
406 * @timeout: timeout, as a ktime_t
407 *
408 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
409 * @condition evaluates to true or a signal is received.
410 * The @condition is checked each time the waitqueue @wq is woken up.
411 *
412 * wake_up() has to be called after changing any variable that could
413 * change the result of the wait condition.
414 *
415 * The function returns 0 if @condition became true, or -ETIME if the timeout
416 * elapsed.
417 */
418#define wait_event_hrtimeout(wq, condition, timeout) \
419({ \
420 int __ret = 0; \
421 if (!(condition)) \
422 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
423 TASK_UNINTERRUPTIBLE); \
424 __ret; \
425})
426
427/**
428 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
429 * @wq: the waitqueue to wait on
430 * @condition: a C expression for the event to wait for
431 * @timeout: timeout, as a ktime_t
432 *
433 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
434 * @condition evaluates to true or a signal is received.
435 * The @condition is checked each time the waitqueue @wq is woken up.
436 *
437 * wake_up() has to be called after changing any variable that could
438 * change the result of the wait condition.
439 *
440 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
441 * interrupted by a signal, or -ETIME if the timeout elapsed.
442 */
443#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
444({ \
445 long __ret = 0; \
446 if (!(condition)) \
447 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
448 TASK_INTERRUPTIBLE); \
449 __ret; \
450})
451
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200452#define __wait_event_interruptible_exclusive(wq, condition) \
453 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
Peter Zijlstra48c25212013-10-02 11:22:26 +0200454 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456#define wait_event_interruptible_exclusive(wq, condition) \
457({ \
458 int __ret = 0; \
459 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200460 __ret = __wait_event_interruptible_exclusive(wq, condition);\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 __ret; \
462})
463
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200464
465#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
466({ \
467 int __ret = 0; \
468 DEFINE_WAIT(__wait); \
469 if (exclusive) \
470 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
471 do { \
472 if (likely(list_empty(&__wait.task_list))) \
473 __add_wait_queue_tail(&(wq), &__wait); \
474 set_current_state(TASK_INTERRUPTIBLE); \
475 if (signal_pending(current)) { \
476 __ret = -ERESTARTSYS; \
477 break; \
478 } \
479 if (irq) \
480 spin_unlock_irq(&(wq).lock); \
481 else \
482 spin_unlock(&(wq).lock); \
483 schedule(); \
484 if (irq) \
485 spin_lock_irq(&(wq).lock); \
486 else \
487 spin_lock(&(wq).lock); \
488 } while (!(condition)); \
489 __remove_wait_queue(&(wq), &__wait); \
490 __set_current_state(TASK_RUNNING); \
491 __ret; \
492})
493
494
495/**
496 * wait_event_interruptible_locked - sleep until a condition gets true
497 * @wq: the waitqueue to wait on
498 * @condition: a C expression for the event to wait for
499 *
500 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
501 * @condition evaluates to true or a signal is received.
502 * The @condition is checked each time the waitqueue @wq is woken up.
503 *
504 * It must be called with wq.lock being held. This spinlock is
505 * unlocked while sleeping but @condition testing is done while lock
506 * is held and when this macro exits the lock is held.
507 *
508 * The lock is locked/unlocked using spin_lock()/spin_unlock()
509 * functions which must match the way they are locked/unlocked outside
510 * of this macro.
511 *
512 * wake_up_locked() has to be called after changing any variable that could
513 * change the result of the wait condition.
514 *
515 * The function will return -ERESTARTSYS if it was interrupted by a
516 * signal and 0 if @condition evaluated to true.
517 */
518#define wait_event_interruptible_locked(wq, condition) \
519 ((condition) \
520 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
521
522/**
523 * wait_event_interruptible_locked_irq - sleep until a condition gets true
524 * @wq: the waitqueue to wait on
525 * @condition: a C expression for the event to wait for
526 *
527 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
528 * @condition evaluates to true or a signal is received.
529 * The @condition is checked each time the waitqueue @wq is woken up.
530 *
531 * It must be called with wq.lock being held. This spinlock is
532 * unlocked while sleeping but @condition testing is done while lock
533 * is held and when this macro exits the lock is held.
534 *
535 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
536 * functions which must match the way they are locked/unlocked outside
537 * of this macro.
538 *
539 * wake_up_locked() has to be called after changing any variable that could
540 * change the result of the wait condition.
541 *
542 * The function will return -ERESTARTSYS if it was interrupted by a
543 * signal and 0 if @condition evaluated to true.
544 */
545#define wait_event_interruptible_locked_irq(wq, condition) \
546 ((condition) \
547 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
548
549/**
550 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
551 * @wq: the waitqueue to wait on
552 * @condition: a C expression for the event to wait for
553 *
554 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
555 * @condition evaluates to true or a signal is received.
556 * The @condition is checked each time the waitqueue @wq is woken up.
557 *
558 * It must be called with wq.lock being held. This spinlock is
559 * unlocked while sleeping but @condition testing is done while lock
560 * is held and when this macro exits the lock is held.
561 *
562 * The lock is locked/unlocked using spin_lock()/spin_unlock()
563 * functions which must match the way they are locked/unlocked outside
564 * of this macro.
565 *
566 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
567 * set thus when other process waits process on the list if this
568 * process is awaken further processes are not considered.
569 *
570 * wake_up_locked() has to be called after changing any variable that could
571 * change the result of the wait condition.
572 *
573 * The function will return -ERESTARTSYS if it was interrupted by a
574 * signal and 0 if @condition evaluated to true.
575 */
576#define wait_event_interruptible_exclusive_locked(wq, condition) \
577 ((condition) \
578 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
579
580/**
581 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
582 * @wq: the waitqueue to wait on
583 * @condition: a C expression for the event to wait for
584 *
585 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
586 * @condition evaluates to true or a signal is received.
587 * The @condition is checked each time the waitqueue @wq is woken up.
588 *
589 * It must be called with wq.lock being held. This spinlock is
590 * unlocked while sleeping but @condition testing is done while lock
591 * is held and when this macro exits the lock is held.
592 *
593 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
594 * functions which must match the way they are locked/unlocked outside
595 * of this macro.
596 *
597 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
598 * set thus when other process waits process on the list if this
599 * process is awaken further processes are not considered.
600 *
601 * wake_up_locked() has to be called after changing any variable that could
602 * change the result of the wait condition.
603 *
604 * The function will return -ERESTARTSYS if it was interrupted by a
605 * signal and 0 if @condition evaluated to true.
606 */
607#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
608 ((condition) \
609 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
610
611
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200612#define __wait_event_killable(wq, condition) \
613 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500614
615/**
616 * wait_event_killable - sleep until a condition gets true
617 * @wq: the waitqueue to wait on
618 * @condition: a C expression for the event to wait for
619 *
620 * The process is put to sleep (TASK_KILLABLE) until the
621 * @condition evaluates to true or a signal is received.
622 * The @condition is checked each time the waitqueue @wq is woken up.
623 *
624 * wake_up() has to be called after changing any variable that could
625 * change the result of the wait condition.
626 *
627 * The function will return -ERESTARTSYS if it was interrupted by a
628 * signal and 0 if @condition evaluated to true.
629 */
630#define wait_event_killable(wq, condition) \
631({ \
632 int __ret = 0; \
633 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200634 __ret = __wait_event_killable(wq, condition); \
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500635 __ret; \
636})
637
Lukas Czernereed8c022012-11-30 11:42:40 +0100638
639#define __wait_event_lock_irq(wq, condition, lock, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200640 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
641 spin_unlock_irq(&lock); \
642 cmd; \
643 schedule(); \
644 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100645
646/**
647 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
648 * condition is checked under the lock. This
649 * is expected to be called with the lock
650 * taken.
651 * @wq: the waitqueue to wait on
652 * @condition: a C expression for the event to wait for
653 * @lock: a locked spinlock_t, which will be released before cmd
654 * and schedule() and reacquired afterwards.
655 * @cmd: a command which is invoked outside the critical section before
656 * sleep
657 *
658 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
659 * @condition evaluates to true. The @condition is checked each time
660 * the waitqueue @wq is woken up.
661 *
662 * wake_up() has to be called after changing any variable that could
663 * change the result of the wait condition.
664 *
665 * This is supposed to be called while holding the lock. The lock is
666 * dropped before invoking the cmd and going to sleep and is reacquired
667 * afterwards.
668 */
669#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
670do { \
671 if (condition) \
672 break; \
673 __wait_event_lock_irq(wq, condition, lock, cmd); \
674} while (0)
675
676/**
677 * wait_event_lock_irq - sleep until a condition gets true. The
678 * condition is checked under the lock. This
679 * is expected to be called with the lock
680 * taken.
681 * @wq: the waitqueue to wait on
682 * @condition: a C expression for the event to wait for
683 * @lock: a locked spinlock_t, which will be released before schedule()
684 * and reacquired afterwards.
685 *
686 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
687 * @condition evaluates to true. The @condition is checked each time
688 * the waitqueue @wq is woken up.
689 *
690 * wake_up() has to be called after changing any variable that could
691 * change the result of the wait condition.
692 *
693 * This is supposed to be called while holding the lock. The lock is
694 * dropped before going to sleep and is reacquired afterwards.
695 */
696#define wait_event_lock_irq(wq, condition, lock) \
697do { \
698 if (condition) \
699 break; \
700 __wait_event_lock_irq(wq, condition, lock, ); \
701} while (0)
702
703
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200704#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200705 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200706 spin_unlock_irq(&lock); \
707 cmd; \
708 schedule(); \
Peter Zijlstra8fbd88f2013-10-02 11:22:28 +0200709 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100710
711/**
712 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
713 * The condition is checked under the lock. This is expected to
714 * be called with the lock taken.
715 * @wq: the waitqueue to wait on
716 * @condition: a C expression for the event to wait for
717 * @lock: a locked spinlock_t, which will be released before cmd and
718 * schedule() and reacquired afterwards.
719 * @cmd: a command which is invoked outside the critical section before
720 * sleep
721 *
722 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
723 * @condition evaluates to true or a signal is received. The @condition is
724 * checked each time the waitqueue @wq is woken up.
725 *
726 * wake_up() has to be called after changing any variable that could
727 * change the result of the wait condition.
728 *
729 * This is supposed to be called while holding the lock. The lock is
730 * dropped before invoking the cmd and going to sleep and is reacquired
731 * afterwards.
732 *
733 * The macro will return -ERESTARTSYS if it was interrupted by a signal
734 * and 0 if @condition evaluated to true.
735 */
736#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
737({ \
738 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100739 if (!(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200740 __ret = __wait_event_interruptible_lock_irq(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200741 condition, lock, cmd); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100742 __ret; \
743})
744
745/**
746 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
747 * The condition is checked under the lock. This is expected
748 * to be called with the lock taken.
749 * @wq: the waitqueue to wait on
750 * @condition: a C expression for the event to wait for
751 * @lock: a locked spinlock_t, which will be released before schedule()
752 * and reacquired afterwards.
753 *
754 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
755 * @condition evaluates to true or signal is received. The @condition is
756 * checked each time the waitqueue @wq is woken up.
757 *
758 * wake_up() has to be called after changing any variable that could
759 * change the result of the wait condition.
760 *
761 * This is supposed to be called while holding the lock. The lock is
762 * dropped before going to sleep and is reacquired afterwards.
763 *
764 * The macro will return -ERESTARTSYS if it was interrupted by a signal
765 * and 0 if @condition evaluated to true.
766 */
767#define wait_event_interruptible_lock_irq(wq, condition, lock) \
768({ \
769 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100770 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200771 __ret = __wait_event_interruptible_lock_irq(wq, \
Thierry Reding92ec1182013-10-23 13:40:55 +0200772 condition, lock,); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100773 __ret; \
774})
775
Ingo Molnarfb869b62013-10-04 10:24:49 +0200776#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
777 lock, timeout) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200778 ___wait_event(wq, ___wait_cond_timeout(condition), \
Heiko Carstens7d716452013-10-31 12:48:14 +0100779 TASK_INTERRUPTIBLE, 0, timeout, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200780 spin_unlock_irq(&lock); \
781 __ret = schedule_timeout(__ret); \
Peter Zijlstraa1dc68522013-10-02 11:22:29 +0200782 spin_lock_irq(&lock));
Martin Peschked79ff142013-08-22 17:45:36 +0200783
784/**
Ingo Molnarfb869b62013-10-04 10:24:49 +0200785 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
786 * true or a timeout elapses. The condition is checked under
787 * the lock. This is expected to be called with the lock taken.
Martin Peschked79ff142013-08-22 17:45:36 +0200788 * @wq: the waitqueue to wait on
789 * @condition: a C expression for the event to wait for
790 * @lock: a locked spinlock_t, which will be released before schedule()
791 * and reacquired afterwards.
792 * @timeout: timeout, in jiffies
793 *
794 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
795 * @condition evaluates to true or signal is received. The @condition is
796 * checked each time the waitqueue @wq is woken up.
797 *
798 * wake_up() has to be called after changing any variable that could
799 * change the result of the wait condition.
800 *
801 * This is supposed to be called while holding the lock. The lock is
802 * dropped before going to sleep and is reacquired afterwards.
803 *
804 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
805 * was interrupted by a signal, and the remaining jiffies otherwise
806 * if the condition evaluated to true before the timeout elapsed.
807 */
808#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
809 timeout) \
810({ \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200811 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200812 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200813 __ret = __wait_event_interruptible_lock_irq_timeout( \
814 wq, condition, lock, timeout); \
Martin Peschked79ff142013-08-22 17:45:36 +0200815 __ret; \
816})
817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818/*
819 * Waitqueues which are removed from the waitqueue_head at wakeup time
820 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800821void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
822void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200823long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800824void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200825void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
827int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
828
Eric Dumazetbf368e42009-04-28 02:24:21 -0700829#define DEFINE_WAIT_FUNC(name, function) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 wait_queue_t name = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700831 .private = current, \
Eric Dumazetbf368e42009-04-28 02:24:21 -0700832 .func = function, \
blaisorblade@yahoo.it7e43c842005-05-25 01:31:42 +0200833 .task_list = LIST_HEAD_INIT((name).task_list), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 }
835
Eric Dumazetbf368e42009-04-28 02:24:21 -0700836#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838#define DEFINE_WAIT_BIT(name, word, bit) \
839 struct wait_bit_queue name = { \
840 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
841 .wait = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700842 .private = current, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 .func = wake_bit_function, \
844 .task_list = \
845 LIST_HEAD_INIT((name).wait.task_list), \
846 }, \
847 }
848
849#define init_wait(wait) \
850 do { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700851 (wait)->private = current; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 (wait)->func = autoremove_wake_function; \
853 INIT_LIST_HEAD(&(wait)->task_list); \
Evgeny Kuznetsov231d0ae2010-10-05 12:47:57 +0400854 (wait)->flags = 0; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 } while (0)
856
857/**
858 * wait_on_bit - wait for a bit to be cleared
859 * @word: the word being waited on, a kernel virtual address
860 * @bit: the bit of the word being waited on
861 * @action: the function used to sleep, which may take special actions
862 * @mode: the task state to sleep in
863 *
864 * There is a standard hashed waitqueue table for generic use. This
865 * is the part of the hashtable's accessor API that waits on a bit.
866 * For instance, if one were to have waiters on a bitflag, one would
867 * call wait_on_bit() in threads waiting for the bit to clear.
868 * One uses wait_on_bit() where one is waiting for the bit to clear,
869 * but has no intention of setting it.
870 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200871static inline int
872wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873{
874 if (!test_bit(bit, word))
875 return 0;
876 return out_of_line_wait_on_bit(word, bit, action, mode);
877}
878
879/**
880 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
881 * @word: the word being waited on, a kernel virtual address
882 * @bit: the bit of the word being waited on
883 * @action: the function used to sleep, which may take special actions
884 * @mode: the task state to sleep in
885 *
886 * There is a standard hashed waitqueue table for generic use. This
887 * is the part of the hashtable's accessor API that waits on a bit
888 * when one intends to set it, for instance, trying to lock bitflags.
889 * For instance, if one were to have waiters trying to set bitflag
890 * and waiting for it to clear before setting it, one would call
891 * wait_on_bit() in threads waiting to be able to set the bit.
892 * One uses wait_on_bit_lock() where one is waiting for the bit to
893 * clear with the intention of setting it, and when done, clearing it.
894 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200895static inline int
896wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897{
898 if (!test_and_set_bit(bit, word))
899 return 0;
900 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
901}
David Howellscb655372013-05-10 19:50:26 +0100902
903/**
904 * wait_on_atomic_t - Wait for an atomic_t to become 0
905 * @val: The atomic value being waited on, a kernel virtual address
906 * @action: the function used to sleep, which may take special actions
907 * @mode: the task state to sleep in
908 *
909 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
910 * the purpose of getting a waitqueue, but we set the key to a bit number
911 * outside of the target 'word'.
912 */
913static inline
914int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
915{
916 if (atomic_read(val) == 0)
917 return 0;
918 return out_of_line_wait_on_atomic_t(val, action, mode);
919}
Ingo Molnarfb869b62013-10-04 10:24:49 +0200920
921#endif /* _LINUX_WAIT_H */