blob: eaa00b10abaaa53cf441170841c3faec588e9de0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
Ingo Molnarfb869b62013-10-04 10:24:49 +02003/*
4 * Linux wait queue related types and methods
5 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <asm/current.h>
David Howells607ca462012-10-13 10:46:48 +010010#include <uapi/linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12typedef struct __wait_queue wait_queue_t;
Peter Zijlstra7d478722009-09-14 19:55:44 +020013typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16struct __wait_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020017 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define WQ_FLAG_EXCLUSIVE 0x01
Ingo Molnarfb869b62013-10-04 10:24:49 +020019 void *private;
20 wait_queue_func_t func;
21 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022};
23
24struct wait_bit_key {
Ingo Molnarfb869b62013-10-04 10:24:49 +020025 void *flags;
26 int bit_nr;
27#define WAIT_ATOMIC_T_BIT_NR -1
Linus Torvalds1da177e2005-04-16 15:20:36 -070028};
29
30struct wait_bit_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020031 struct wait_bit_key key;
32 wait_queue_t wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033};
34
35struct __wait_queue_head {
Ingo Molnarfb869b62013-10-04 10:24:49 +020036 spinlock_t lock;
37 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038};
39typedef struct __wait_queue_head wait_queue_head_t;
40
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080041struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43/*
44 * Macros for declaration and initialisaton of the datatypes
45 */
46
47#define __WAITQUEUE_INITIALIZER(name, tsk) { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070048 .private = tsk, \
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 .func = default_wake_function, \
50 .task_list = { NULL, NULL } }
51
52#define DECLARE_WAITQUEUE(name, tsk) \
53 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
54
55#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
Ingo Molnare4d91912006-07-03 00:24:34 -070056 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 .task_list = { &(name).task_list, &(name).task_list } }
58
59#define DECLARE_WAIT_QUEUE_HEAD(name) \
60 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
61
62#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
63 { .flags = word, .bit_nr = bit, }
64
David Howellscb655372013-05-10 19:50:26 +010065#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
66 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
67
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010068extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
Peter Zijlstra2fc39112009-08-10 12:33:05 +010069
70#define init_waitqueue_head(q) \
71 do { \
72 static struct lock_class_key __key; \
73 \
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010074 __init_waitqueue_head((q), #q, &__key); \
Peter Zijlstra2fc39112009-08-10 12:33:05 +010075 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080077#ifdef CONFIG_LOCKDEP
78# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
79 ({ init_waitqueue_head(&name); name; })
80# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
81 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
82#else
83# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
84#endif
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
87{
Ingo Molnarfb869b62013-10-04 10:24:49 +020088 q->flags = 0;
89 q->private = p;
90 q->func = default_wake_function;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
Ingo Molnarfb869b62013-10-04 10:24:49 +020093static inline void
94init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Ingo Molnarfb869b62013-10-04 10:24:49 +020096 q->flags = 0;
97 q->private = NULL;
98 q->func = func;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101static inline int waitqueue_active(wait_queue_head_t *q)
102{
103 return !list_empty(&q->task_list);
104}
105
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800106extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
107extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
108extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
111{
112 list_add(&new->task_list, &head->task_list);
113}
114
115/*
116 * Used for wake-one threads:
117 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200118static inline void
119__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800120{
121 wait->flags |= WQ_FLAG_EXCLUSIVE;
122 __add_wait_queue(q, wait);
123}
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125static inline void __add_wait_queue_tail(wait_queue_head_t *head,
Changli Gaoa93d2f12010-05-07 14:33:26 +0800126 wait_queue_t *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
128 list_add_tail(&new->task_list, &head->task_list);
129}
130
Ingo Molnarfb869b62013-10-04 10:24:49 +0200131static inline void
132__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800133{
134 wait->flags |= WQ_FLAG_EXCLUSIVE;
135 __add_wait_queue_tail(q, wait);
136}
137
Ingo Molnarfb869b62013-10-04 10:24:49 +0200138static inline void
139__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
141 list_del(&old->task_list);
142}
143
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800144void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700145void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200146void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Thomas Gleixner63b20012011-12-01 00:04:00 +0100147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800149void __wake_up_bit(wait_queue_head_t *, void *, int);
150int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152void wake_up_bit(void *, int);
David Howellscb655372013-05-10 19:50:26 +0100153void wake_up_atomic_t(atomic_t *);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800154int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
David Howellscb655372013-05-10 19:50:26 +0100156int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800157wait_queue_head_t *bit_waitqueue(void *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500159#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
160#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
161#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
Thomas Gleixner63b20012011-12-01 00:04:00 +0100162#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
163#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500168#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800170/*
Davide Libenzic0da3772009-03-31 15:24:20 -0700171 * Wakeup macros to be used to report events to the targets.
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800172 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200173#define wake_up_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200175#define wake_up_locked_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200177#define wake_up_interruptible_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800181
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200182#define ___wait_cond_timeout(condition) \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200183({ \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200184 bool __cond = (condition); \
185 if (__cond && !__ret) \
186 __ret = 1; \
187 __cond || !__ret; \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200188})
189
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200190#define ___wait_is_interruptible(state) \
191 (!__builtin_constant_p(state) || \
192 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200193
Peter Zijlstra41a14312013-10-02 11:22:21 +0200194#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200195({ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200196 __label__ __out; \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200197 wait_queue_t __wait; \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200198 long __ret = ret; \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200199 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200200 INIT_LIST_HEAD(&__wait.task_list); \
201 if (exclusive) \
202 __wait.flags = WQ_FLAG_EXCLUSIVE; \
203 else \
204 __wait.flags = 0; \
205 \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200206 for (;;) { \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200207 long __int = prepare_to_wait_event(&wq, &__wait, state);\
Peter Zijlstra41a14312013-10-02 11:22:21 +0200208 \
209 if (condition) \
210 break; \
211 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200212 if (___wait_is_interruptible(state) && __int) { \
213 __ret = __int; \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200214 if (exclusive) { \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200215 abort_exclusive_wait(&wq, &__wait, \
216 state, NULL); \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200217 goto __out; \
218 } \
219 break; \
220 } \
221 \
222 cmd; \
223 } \
224 finish_wait(&wq, &__wait); \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200225__out: __ret; \
226})
Peter Zijlstra41a14312013-10-02 11:22:21 +0200227
Ingo Molnarfb869b62013-10-04 10:24:49 +0200228#define __wait_event(wq, condition) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200229 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
230 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232/**
233 * wait_event - sleep until a condition gets true
234 * @wq: the waitqueue to wait on
235 * @condition: a C expression for the event to wait for
236 *
237 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
238 * @condition evaluates to true. The @condition is checked each time
239 * the waitqueue @wq is woken up.
240 *
241 * wake_up() has to be called after changing any variable that could
242 * change the result of the wait condition.
243 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200244#define wait_event(wq, condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245do { \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200246 if (condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 break; \
248 __wait_event(wq, condition); \
249} while (0)
250
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200251#define __wait_event_timeout(wq, condition, timeout) \
252 ___wait_event(wq, ___wait_cond_timeout(condition), \
253 TASK_UNINTERRUPTIBLE, 0, timeout, \
254 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256/**
257 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
258 * @wq: the waitqueue to wait on
259 * @condition: a C expression for the event to wait for
260 * @timeout: timeout, in jiffies
261 *
262 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
263 * @condition evaluates to true. The @condition is checked each time
264 * the waitqueue @wq is woken up.
265 *
266 * wake_up() has to be called after changing any variable that could
267 * change the result of the wait condition.
268 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700269 * The function returns 0 if the @timeout elapsed, or the remaining
270 * jiffies (at least 1) if the @condition evaluated to %true before
271 * the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 */
273#define wait_event_timeout(wq, condition, timeout) \
274({ \
275 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200276 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200277 __ret = __wait_event_timeout(wq, condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 __ret; \
279})
280
Shaohua Li82e06c82013-11-14 15:16:16 +1100281#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
282 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
283 cmd1; schedule(); cmd2)
284
285/**
286 * wait_event_cmd - sleep until a condition gets true
287 * @wq: the waitqueue to wait on
288 * @condition: a C expression for the event to wait for
289 * cmd1: the command will be executed before sleep
290 * cmd2: the command will be executed after sleep
291 *
292 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
293 * @condition evaluates to true. The @condition is checked each time
294 * the waitqueue @wq is woken up.
295 *
296 * wake_up() has to be called after changing any variable that could
297 * change the result of the wait condition.
298 */
299#define wait_event_cmd(wq, condition, cmd1, cmd2) \
300do { \
301 if (condition) \
302 break; \
303 __wait_event_cmd(wq, condition, cmd1, cmd2); \
304} while (0)
305
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200306#define __wait_event_interruptible(wq, condition) \
307 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstraf13f4c42013-10-02 11:22:24 +0200308 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310/**
311 * wait_event_interruptible - sleep until a condition gets true
312 * @wq: the waitqueue to wait on
313 * @condition: a C expression for the event to wait for
314 *
315 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
316 * @condition evaluates to true or a signal is received.
317 * The @condition is checked each time the waitqueue @wq is woken up.
318 *
319 * wake_up() has to be called after changing any variable that could
320 * change the result of the wait condition.
321 *
322 * The function will return -ERESTARTSYS if it was interrupted by a
323 * signal and 0 if @condition evaluated to true.
324 */
325#define wait_event_interruptible(wq, condition) \
326({ \
327 int __ret = 0; \
328 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200329 __ret = __wait_event_interruptible(wq, condition); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 __ret; \
331})
332
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200333#define __wait_event_interruptible_timeout(wq, condition, timeout) \
334 ___wait_event(wq, ___wait_cond_timeout(condition), \
335 TASK_INTERRUPTIBLE, 0, timeout, \
336 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338/**
339 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
340 * @wq: the waitqueue to wait on
341 * @condition: a C expression for the event to wait for
342 * @timeout: timeout, in jiffies
343 *
344 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
345 * @condition evaluates to true or a signal is received.
346 * The @condition is checked each time the waitqueue @wq is woken up.
347 *
348 * wake_up() has to be called after changing any variable that could
349 * change the result of the wait condition.
350 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700351 * Returns:
352 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
353 * a signal, or the remaining jiffies (at least 1) if the @condition
354 * evaluated to %true before the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 */
356#define wait_event_interruptible_timeout(wq, condition, timeout) \
357({ \
358 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200359 if (!___wait_cond_timeout(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200360 __ret = __wait_event_interruptible_timeout(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200361 condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 __ret; \
363})
364
Kent Overstreet774a08b2013-05-07 16:18:43 -0700365#define __wait_event_hrtimeout(wq, condition, timeout, state) \
366({ \
367 int __ret = 0; \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700368 struct hrtimer_sleeper __t; \
369 \
370 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
371 HRTIMER_MODE_REL); \
372 hrtimer_init_sleeper(&__t, current); \
373 if ((timeout).tv64 != KTIME_MAX) \
374 hrtimer_start_range_ns(&__t.timer, timeout, \
375 current->timer_slack_ns, \
376 HRTIMER_MODE_REL); \
377 \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200378 __ret = ___wait_event(wq, condition, state, 0, 0, \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700379 if (!__t.task) { \
380 __ret = -ETIME; \
381 break; \
382 } \
Peter Zijlstraebdc1952013-10-02 11:22:32 +0200383 schedule()); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700384 \
385 hrtimer_cancel(&__t.timer); \
386 destroy_hrtimer_on_stack(&__t.timer); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700387 __ret; \
388})
389
390/**
391 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
392 * @wq: the waitqueue to wait on
393 * @condition: a C expression for the event to wait for
394 * @timeout: timeout, as a ktime_t
395 *
396 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
397 * @condition evaluates to true or a signal is received.
398 * The @condition is checked each time the waitqueue @wq is woken up.
399 *
400 * wake_up() has to be called after changing any variable that could
401 * change the result of the wait condition.
402 *
403 * The function returns 0 if @condition became true, or -ETIME if the timeout
404 * elapsed.
405 */
406#define wait_event_hrtimeout(wq, condition, timeout) \
407({ \
408 int __ret = 0; \
409 if (!(condition)) \
410 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
411 TASK_UNINTERRUPTIBLE); \
412 __ret; \
413})
414
415/**
416 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
417 * @wq: the waitqueue to wait on
418 * @condition: a C expression for the event to wait for
419 * @timeout: timeout, as a ktime_t
420 *
421 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
422 * @condition evaluates to true or a signal is received.
423 * The @condition is checked each time the waitqueue @wq is woken up.
424 *
425 * wake_up() has to be called after changing any variable that could
426 * change the result of the wait condition.
427 *
428 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
429 * interrupted by a signal, or -ETIME if the timeout elapsed.
430 */
431#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
432({ \
433 long __ret = 0; \
434 if (!(condition)) \
435 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
436 TASK_INTERRUPTIBLE); \
437 __ret; \
438})
439
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200440#define __wait_event_interruptible_exclusive(wq, condition) \
441 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
Peter Zijlstra48c25212013-10-02 11:22:26 +0200442 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444#define wait_event_interruptible_exclusive(wq, condition) \
445({ \
446 int __ret = 0; \
447 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200448 __ret = __wait_event_interruptible_exclusive(wq, condition);\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 __ret; \
450})
451
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200452
453#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
454({ \
455 int __ret = 0; \
456 DEFINE_WAIT(__wait); \
457 if (exclusive) \
458 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
459 do { \
460 if (likely(list_empty(&__wait.task_list))) \
461 __add_wait_queue_tail(&(wq), &__wait); \
462 set_current_state(TASK_INTERRUPTIBLE); \
463 if (signal_pending(current)) { \
464 __ret = -ERESTARTSYS; \
465 break; \
466 } \
467 if (irq) \
468 spin_unlock_irq(&(wq).lock); \
469 else \
470 spin_unlock(&(wq).lock); \
471 schedule(); \
472 if (irq) \
473 spin_lock_irq(&(wq).lock); \
474 else \
475 spin_lock(&(wq).lock); \
476 } while (!(condition)); \
477 __remove_wait_queue(&(wq), &__wait); \
478 __set_current_state(TASK_RUNNING); \
479 __ret; \
480})
481
482
483/**
484 * wait_event_interruptible_locked - sleep until a condition gets true
485 * @wq: the waitqueue to wait on
486 * @condition: a C expression for the event to wait for
487 *
488 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
489 * @condition evaluates to true or a signal is received.
490 * The @condition is checked each time the waitqueue @wq is woken up.
491 *
492 * It must be called with wq.lock being held. This spinlock is
493 * unlocked while sleeping but @condition testing is done while lock
494 * is held and when this macro exits the lock is held.
495 *
496 * The lock is locked/unlocked using spin_lock()/spin_unlock()
497 * functions which must match the way they are locked/unlocked outside
498 * of this macro.
499 *
500 * wake_up_locked() has to be called after changing any variable that could
501 * change the result of the wait condition.
502 *
503 * The function will return -ERESTARTSYS if it was interrupted by a
504 * signal and 0 if @condition evaluated to true.
505 */
506#define wait_event_interruptible_locked(wq, condition) \
507 ((condition) \
508 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
509
510/**
511 * wait_event_interruptible_locked_irq - sleep until a condition gets true
512 * @wq: the waitqueue to wait on
513 * @condition: a C expression for the event to wait for
514 *
515 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
516 * @condition evaluates to true or a signal is received.
517 * The @condition is checked each time the waitqueue @wq is woken up.
518 *
519 * It must be called with wq.lock being held. This spinlock is
520 * unlocked while sleeping but @condition testing is done while lock
521 * is held and when this macro exits the lock is held.
522 *
523 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
524 * functions which must match the way they are locked/unlocked outside
525 * of this macro.
526 *
527 * wake_up_locked() has to be called after changing any variable that could
528 * change the result of the wait condition.
529 *
530 * The function will return -ERESTARTSYS if it was interrupted by a
531 * signal and 0 if @condition evaluated to true.
532 */
533#define wait_event_interruptible_locked_irq(wq, condition) \
534 ((condition) \
535 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
536
537/**
538 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
539 * @wq: the waitqueue to wait on
540 * @condition: a C expression for the event to wait for
541 *
542 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
543 * @condition evaluates to true or a signal is received.
544 * The @condition is checked each time the waitqueue @wq is woken up.
545 *
546 * It must be called with wq.lock being held. This spinlock is
547 * unlocked while sleeping but @condition testing is done while lock
548 * is held and when this macro exits the lock is held.
549 *
550 * The lock is locked/unlocked using spin_lock()/spin_unlock()
551 * functions which must match the way they are locked/unlocked outside
552 * of this macro.
553 *
554 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
555 * set thus when other process waits process on the list if this
556 * process is awaken further processes are not considered.
557 *
558 * wake_up_locked() has to be called after changing any variable that could
559 * change the result of the wait condition.
560 *
561 * The function will return -ERESTARTSYS if it was interrupted by a
562 * signal and 0 if @condition evaluated to true.
563 */
564#define wait_event_interruptible_exclusive_locked(wq, condition) \
565 ((condition) \
566 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
567
568/**
569 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
570 * @wq: the waitqueue to wait on
571 * @condition: a C expression for the event to wait for
572 *
573 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
574 * @condition evaluates to true or a signal is received.
575 * The @condition is checked each time the waitqueue @wq is woken up.
576 *
577 * It must be called with wq.lock being held. This spinlock is
578 * unlocked while sleeping but @condition testing is done while lock
579 * is held and when this macro exits the lock is held.
580 *
581 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
582 * functions which must match the way they are locked/unlocked outside
583 * of this macro.
584 *
585 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
586 * set thus when other process waits process on the list if this
587 * process is awaken further processes are not considered.
588 *
589 * wake_up_locked() has to be called after changing any variable that could
590 * change the result of the wait condition.
591 *
592 * The function will return -ERESTARTSYS if it was interrupted by a
593 * signal and 0 if @condition evaluated to true.
594 */
595#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
596 ((condition) \
597 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
598
599
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200600#define __wait_event_killable(wq, condition) \
601 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500602
603/**
604 * wait_event_killable - sleep until a condition gets true
605 * @wq: the waitqueue to wait on
606 * @condition: a C expression for the event to wait for
607 *
608 * The process is put to sleep (TASK_KILLABLE) until the
609 * @condition evaluates to true or a signal is received.
610 * The @condition is checked each time the waitqueue @wq is woken up.
611 *
612 * wake_up() has to be called after changing any variable that could
613 * change the result of the wait condition.
614 *
615 * The function will return -ERESTARTSYS if it was interrupted by a
616 * signal and 0 if @condition evaluated to true.
617 */
618#define wait_event_killable(wq, condition) \
619({ \
620 int __ret = 0; \
621 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200622 __ret = __wait_event_killable(wq, condition); \
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500623 __ret; \
624})
625
Lukas Czernereed8c022012-11-30 11:42:40 +0100626
627#define __wait_event_lock_irq(wq, condition, lock, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200628 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
629 spin_unlock_irq(&lock); \
630 cmd; \
631 schedule(); \
632 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100633
634/**
635 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
636 * condition is checked under the lock. This
637 * is expected to be called with the lock
638 * taken.
639 * @wq: the waitqueue to wait on
640 * @condition: a C expression for the event to wait for
641 * @lock: a locked spinlock_t, which will be released before cmd
642 * and schedule() and reacquired afterwards.
643 * @cmd: a command which is invoked outside the critical section before
644 * sleep
645 *
646 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
647 * @condition evaluates to true. The @condition is checked each time
648 * the waitqueue @wq is woken up.
649 *
650 * wake_up() has to be called after changing any variable that could
651 * change the result of the wait condition.
652 *
653 * This is supposed to be called while holding the lock. The lock is
654 * dropped before invoking the cmd and going to sleep and is reacquired
655 * afterwards.
656 */
657#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
658do { \
659 if (condition) \
660 break; \
661 __wait_event_lock_irq(wq, condition, lock, cmd); \
662} while (0)
663
664/**
665 * wait_event_lock_irq - sleep until a condition gets true. The
666 * condition is checked under the lock. This
667 * is expected to be called with the lock
668 * taken.
669 * @wq: the waitqueue to wait on
670 * @condition: a C expression for the event to wait for
671 * @lock: a locked spinlock_t, which will be released before schedule()
672 * and reacquired afterwards.
673 *
674 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
675 * @condition evaluates to true. The @condition is checked each time
676 * the waitqueue @wq is woken up.
677 *
678 * wake_up() has to be called after changing any variable that could
679 * change the result of the wait condition.
680 *
681 * This is supposed to be called while holding the lock. The lock is
682 * dropped before going to sleep and is reacquired afterwards.
683 */
684#define wait_event_lock_irq(wq, condition, lock) \
685do { \
686 if (condition) \
687 break; \
688 __wait_event_lock_irq(wq, condition, lock, ); \
689} while (0)
690
691
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200692#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200693 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200694 spin_unlock_irq(&lock); \
695 cmd; \
696 schedule(); \
Peter Zijlstra8fbd88f2013-10-02 11:22:28 +0200697 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100698
699/**
700 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
701 * The condition is checked under the lock. This is expected to
702 * be called with the lock taken.
703 * @wq: the waitqueue to wait on
704 * @condition: a C expression for the event to wait for
705 * @lock: a locked spinlock_t, which will be released before cmd and
706 * schedule() and reacquired afterwards.
707 * @cmd: a command which is invoked outside the critical section before
708 * sleep
709 *
710 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
711 * @condition evaluates to true or a signal is received. The @condition is
712 * checked each time the waitqueue @wq is woken up.
713 *
714 * wake_up() has to be called after changing any variable that could
715 * change the result of the wait condition.
716 *
717 * This is supposed to be called while holding the lock. The lock is
718 * dropped before invoking the cmd and going to sleep and is reacquired
719 * afterwards.
720 *
721 * The macro will return -ERESTARTSYS if it was interrupted by a signal
722 * and 0 if @condition evaluated to true.
723 */
724#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
725({ \
726 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100727 if (!(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200728 __ret = __wait_event_interruptible_lock_irq(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200729 condition, lock, cmd); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100730 __ret; \
731})
732
733/**
734 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
735 * The condition is checked under the lock. This is expected
736 * to be called with the lock taken.
737 * @wq: the waitqueue to wait on
738 * @condition: a C expression for the event to wait for
739 * @lock: a locked spinlock_t, which will be released before schedule()
740 * and reacquired afterwards.
741 *
742 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
743 * @condition evaluates to true or signal is received. The @condition is
744 * checked each time the waitqueue @wq is woken up.
745 *
746 * wake_up() has to be called after changing any variable that could
747 * change the result of the wait condition.
748 *
749 * This is supposed to be called while holding the lock. The lock is
750 * dropped before going to sleep and is reacquired afterwards.
751 *
752 * The macro will return -ERESTARTSYS if it was interrupted by a signal
753 * and 0 if @condition evaluated to true.
754 */
755#define wait_event_interruptible_lock_irq(wq, condition, lock) \
756({ \
757 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100758 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200759 __ret = __wait_event_interruptible_lock_irq(wq, \
Thierry Reding92ec1182013-10-23 13:40:55 +0200760 condition, lock,); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100761 __ret; \
762})
763
Ingo Molnarfb869b62013-10-04 10:24:49 +0200764#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
765 lock, timeout) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200766 ___wait_event(wq, ___wait_cond_timeout(condition), \
Heiko Carstens7d716452013-10-31 12:48:14 +0100767 TASK_INTERRUPTIBLE, 0, timeout, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200768 spin_unlock_irq(&lock); \
769 __ret = schedule_timeout(__ret); \
Peter Zijlstraa1dc68522013-10-02 11:22:29 +0200770 spin_lock_irq(&lock));
Martin Peschked79ff142013-08-22 17:45:36 +0200771
772/**
Ingo Molnarfb869b62013-10-04 10:24:49 +0200773 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
774 * true or a timeout elapses. The condition is checked under
775 * the lock. This is expected to be called with the lock taken.
Martin Peschked79ff142013-08-22 17:45:36 +0200776 * @wq: the waitqueue to wait on
777 * @condition: a C expression for the event to wait for
778 * @lock: a locked spinlock_t, which will be released before schedule()
779 * and reacquired afterwards.
780 * @timeout: timeout, in jiffies
781 *
782 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
783 * @condition evaluates to true or signal is received. The @condition is
784 * checked each time the waitqueue @wq is woken up.
785 *
786 * wake_up() has to be called after changing any variable that could
787 * change the result of the wait condition.
788 *
789 * This is supposed to be called while holding the lock. The lock is
790 * dropped before going to sleep and is reacquired afterwards.
791 *
792 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
793 * was interrupted by a signal, and the remaining jiffies otherwise
794 * if the condition evaluated to true before the timeout elapsed.
795 */
796#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
797 timeout) \
798({ \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200799 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200800 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200801 __ret = __wait_event_interruptible_lock_irq_timeout( \
802 wq, condition, lock, timeout); \
Martin Peschked79ff142013-08-22 17:45:36 +0200803 __ret; \
804})
805
Lukas Czernereed8c022012-11-30 11:42:40 +0100806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 * These are the old interfaces to sleep waiting for an event.
Ingo Molnar0fec1712007-07-09 18:52:01 +0200809 * They are racy. DO NOT use them, use the wait_event* interfaces above.
810 * We plan to remove these interfaces.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 */
Ingo Molnar0fec1712007-07-09 18:52:01 +0200812extern void sleep_on(wait_queue_head_t *q);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200813extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
Ingo Molnar0fec1712007-07-09 18:52:01 +0200814extern void interruptible_sleep_on(wait_queue_head_t *q);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200815extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817/*
818 * Waitqueues which are removed from the waitqueue_head at wakeup time
819 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800820void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
821void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200822long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800823void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200824void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
826int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
827
Eric Dumazetbf368e42009-04-28 02:24:21 -0700828#define DEFINE_WAIT_FUNC(name, function) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 wait_queue_t name = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700830 .private = current, \
Eric Dumazetbf368e42009-04-28 02:24:21 -0700831 .func = function, \
blaisorblade@yahoo.it7e43c842005-05-25 01:31:42 +0200832 .task_list = LIST_HEAD_INIT((name).task_list), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 }
834
Eric Dumazetbf368e42009-04-28 02:24:21 -0700835#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837#define DEFINE_WAIT_BIT(name, word, bit) \
838 struct wait_bit_queue name = { \
839 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
840 .wait = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700841 .private = current, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 .func = wake_bit_function, \
843 .task_list = \
844 LIST_HEAD_INIT((name).wait.task_list), \
845 }, \
846 }
847
848#define init_wait(wait) \
849 do { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700850 (wait)->private = current; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 (wait)->func = autoremove_wake_function; \
852 INIT_LIST_HEAD(&(wait)->task_list); \
Evgeny Kuznetsov231d0ae2010-10-05 12:47:57 +0400853 (wait)->flags = 0; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 } while (0)
855
856/**
857 * wait_on_bit - wait for a bit to be cleared
858 * @word: the word being waited on, a kernel virtual address
859 * @bit: the bit of the word being waited on
860 * @action: the function used to sleep, which may take special actions
861 * @mode: the task state to sleep in
862 *
863 * There is a standard hashed waitqueue table for generic use. This
864 * is the part of the hashtable's accessor API that waits on a bit.
865 * For instance, if one were to have waiters on a bitflag, one would
866 * call wait_on_bit() in threads waiting for the bit to clear.
867 * One uses wait_on_bit() where one is waiting for the bit to clear,
868 * but has no intention of setting it.
869 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200870static inline int
871wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872{
873 if (!test_bit(bit, word))
874 return 0;
875 return out_of_line_wait_on_bit(word, bit, action, mode);
876}
877
878/**
879 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
880 * @word: the word being waited on, a kernel virtual address
881 * @bit: the bit of the word being waited on
882 * @action: the function used to sleep, which may take special actions
883 * @mode: the task state to sleep in
884 *
885 * There is a standard hashed waitqueue table for generic use. This
886 * is the part of the hashtable's accessor API that waits on a bit
887 * when one intends to set it, for instance, trying to lock bitflags.
888 * For instance, if one were to have waiters trying to set bitflag
889 * and waiting for it to clear before setting it, one would call
890 * wait_on_bit() in threads waiting to be able to set the bit.
891 * One uses wait_on_bit_lock() where one is waiting for the bit to
892 * clear with the intention of setting it, and when done, clearing it.
893 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200894static inline int
895wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
897 if (!test_and_set_bit(bit, word))
898 return 0;
899 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
900}
David Howellscb655372013-05-10 19:50:26 +0100901
902/**
903 * wait_on_atomic_t - Wait for an atomic_t to become 0
904 * @val: The atomic value being waited on, a kernel virtual address
905 * @action: the function used to sleep, which may take special actions
906 * @mode: the task state to sleep in
907 *
908 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
909 * the purpose of getting a waitqueue, but we set the key to a bit number
910 * outside of the target 'word'.
911 */
912static inline
913int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
914{
915 if (atomic_read(val) == 0)
916 return 0;
917 return out_of_line_wait_on_atomic_t(val, action, mode);
918}
Ingo Molnarfb869b62013-10-04 10:24:49 +0200919
920#endif /* _LINUX_WAIT_H */