blob: 6fb1ba5f9b2f41f1fd4d6e4210f5128b662ff9ba [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
Ingo Molnarfb869b62013-10-04 10:24:49 +02003/*
4 * Linux wait queue related types and methods
5 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <asm/current.h>
David Howells607ca462012-10-13 10:46:48 +010010#include <uapi/linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12typedef struct __wait_queue wait_queue_t;
Peter Zijlstra7d478722009-09-14 19:55:44 +020013typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16struct __wait_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020017 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define WQ_FLAG_EXCLUSIVE 0x01
Ingo Molnarfb869b62013-10-04 10:24:49 +020019 void *private;
20 wait_queue_func_t func;
21 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022};
23
24struct wait_bit_key {
Ingo Molnarfb869b62013-10-04 10:24:49 +020025 void *flags;
26 int bit_nr;
27#define WAIT_ATOMIC_T_BIT_NR -1
NeilBrownc1221322014-07-07 15:16:04 +100028 unsigned long private;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029};
30
31struct wait_bit_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020032 struct wait_bit_key key;
33 wait_queue_t wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034};
35
36struct __wait_queue_head {
Ingo Molnarfb869b62013-10-04 10:24:49 +020037 spinlock_t lock;
38 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039};
40typedef struct __wait_queue_head wait_queue_head_t;
41
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080042struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44/*
45 * Macros for declaration and initialisaton of the datatypes
46 */
47
48#define __WAITQUEUE_INITIALIZER(name, tsk) { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070049 .private = tsk, \
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 .func = default_wake_function, \
51 .task_list = { NULL, NULL } }
52
53#define DECLARE_WAITQUEUE(name, tsk) \
54 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
55
56#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
Ingo Molnare4d91912006-07-03 00:24:34 -070057 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 .task_list = { &(name).task_list, &(name).task_list } }
59
60#define DECLARE_WAIT_QUEUE_HEAD(name) \
61 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
62
63#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
64 { .flags = word, .bit_nr = bit, }
65
David Howellscb655372013-05-10 19:50:26 +010066#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
67 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
68
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010069extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
Peter Zijlstra2fc39112009-08-10 12:33:05 +010070
71#define init_waitqueue_head(q) \
72 do { \
73 static struct lock_class_key __key; \
74 \
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010075 __init_waitqueue_head((q), #q, &__key); \
Peter Zijlstra2fc39112009-08-10 12:33:05 +010076 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080078#ifdef CONFIG_LOCKDEP
79# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
80 ({ init_waitqueue_head(&name); name; })
81# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
82 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
83#else
84# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
85#endif
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
88{
Ingo Molnarfb869b62013-10-04 10:24:49 +020089 q->flags = 0;
90 q->private = p;
91 q->func = default_wake_function;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
Ingo Molnarfb869b62013-10-04 10:24:49 +020094static inline void
95init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
Ingo Molnarfb869b62013-10-04 10:24:49 +020097 q->flags = 0;
98 q->private = NULL;
99 q->func = func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101
102static inline int waitqueue_active(wait_queue_head_t *q)
103{
104 return !list_empty(&q->task_list);
105}
106
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800107extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
108extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
109extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
112{
113 list_add(&new->task_list, &head->task_list);
114}
115
116/*
117 * Used for wake-one threads:
118 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200119static inline void
120__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800121{
122 wait->flags |= WQ_FLAG_EXCLUSIVE;
123 __add_wait_queue(q, wait);
124}
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126static inline void __add_wait_queue_tail(wait_queue_head_t *head,
Changli Gaoa93d2f12010-05-07 14:33:26 +0800127 wait_queue_t *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 list_add_tail(&new->task_list, &head->task_list);
130}
131
Ingo Molnarfb869b62013-10-04 10:24:49 +0200132static inline void
133__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800134{
135 wait->flags |= WQ_FLAG_EXCLUSIVE;
136 __add_wait_queue_tail(q, wait);
137}
138
Ingo Molnarfb869b62013-10-04 10:24:49 +0200139static inline void
140__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
142 list_del(&old->task_list);
143}
144
NeilBrownc1221322014-07-07 15:16:04 +1000145typedef int wait_bit_action_f(struct wait_bit_key *);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800146void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700147void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200148void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Thomas Gleixner63b20012011-12-01 00:04:00 +0100149void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700150void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800151void __wake_up_bit(wait_queue_head_t *, void *, int);
NeilBrownc1221322014-07-07 15:16:04 +1000152int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
153int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800154void wake_up_bit(void *, int);
David Howellscb655372013-05-10 19:50:26 +0100155void wake_up_atomic_t(atomic_t *);
NeilBrownc1221322014-07-07 15:16:04 +1000156int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
157int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
David Howellscb655372013-05-10 19:50:26 +0100158int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800159wait_queue_head_t *bit_waitqueue(void *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500161#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
162#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
163#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
Thomas Gleixner63b20012011-12-01 00:04:00 +0100164#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
165#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
168#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
169#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500170#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800172/*
Davide Libenzic0da3772009-03-31 15:24:20 -0700173 * Wakeup macros to be used to report events to the targets.
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800174 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200175#define wake_up_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700176 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200177#define wake_up_locked_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700178 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200179#define wake_up_interruptible_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700180 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
181#define wake_up_interruptible_sync_poll(x, m) \
182 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800183
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200184#define ___wait_cond_timeout(condition) \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200185({ \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200186 bool __cond = (condition); \
187 if (__cond && !__ret) \
188 __ret = 1; \
189 __cond || !__ret; \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200190})
191
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200192#define ___wait_is_interruptible(state) \
193 (!__builtin_constant_p(state) || \
194 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200195
Peter Zijlstra8b322012014-04-18 15:07:17 -0700196/*
197 * The below macro ___wait_event() has an explicit shadow of the __ret
198 * variable when used from the wait_event_*() macros.
199 *
200 * This is so that both can use the ___wait_cond_timeout() construct
201 * to wrap the condition.
202 *
203 * The type inconsistency of the wait_event_*() __ret variable is also
204 * on purpose; we use long where we can return timeout values and int
205 * otherwise.
206 */
207
Peter Zijlstra41a14312013-10-02 11:22:21 +0200208#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200209({ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200210 __label__ __out; \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200211 wait_queue_t __wait; \
Peter Zijlstra8b322012014-04-18 15:07:17 -0700212 long __ret = ret; /* explicit shadow */ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200213 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200214 INIT_LIST_HEAD(&__wait.task_list); \
215 if (exclusive) \
216 __wait.flags = WQ_FLAG_EXCLUSIVE; \
217 else \
218 __wait.flags = 0; \
219 \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200220 for (;;) { \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200221 long __int = prepare_to_wait_event(&wq, &__wait, state);\
Peter Zijlstra41a14312013-10-02 11:22:21 +0200222 \
223 if (condition) \
224 break; \
225 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200226 if (___wait_is_interruptible(state) && __int) { \
227 __ret = __int; \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200228 if (exclusive) { \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200229 abort_exclusive_wait(&wq, &__wait, \
230 state, NULL); \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200231 goto __out; \
232 } \
233 break; \
234 } \
235 \
236 cmd; \
237 } \
238 finish_wait(&wq, &__wait); \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200239__out: __ret; \
240})
Peter Zijlstra41a14312013-10-02 11:22:21 +0200241
Ingo Molnarfb869b62013-10-04 10:24:49 +0200242#define __wait_event(wq, condition) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200243 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
244 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246/**
247 * wait_event - sleep until a condition gets true
248 * @wq: the waitqueue to wait on
249 * @condition: a C expression for the event to wait for
250 *
251 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
252 * @condition evaluates to true. The @condition is checked each time
253 * the waitqueue @wq is woken up.
254 *
255 * wake_up() has to be called after changing any variable that could
256 * change the result of the wait condition.
257 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200258#define wait_event(wq, condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259do { \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200260 if (condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 break; \
262 __wait_event(wq, condition); \
263} while (0)
264
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200265#define __wait_event_timeout(wq, condition, timeout) \
266 ___wait_event(wq, ___wait_cond_timeout(condition), \
267 TASK_UNINTERRUPTIBLE, 0, timeout, \
268 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
270/**
271 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
272 * @wq: the waitqueue to wait on
273 * @condition: a C expression for the event to wait for
274 * @timeout: timeout, in jiffies
275 *
276 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
277 * @condition evaluates to true. The @condition is checked each time
278 * the waitqueue @wq is woken up.
279 *
280 * wake_up() has to be called after changing any variable that could
281 * change the result of the wait condition.
282 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700283 * The function returns 0 if the @timeout elapsed, or the remaining
284 * jiffies (at least 1) if the @condition evaluated to %true before
285 * the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 */
287#define wait_event_timeout(wq, condition, timeout) \
288({ \
289 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200290 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200291 __ret = __wait_event_timeout(wq, condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 __ret; \
293})
294
Shaohua Li82e06c82013-11-14 15:16:16 +1100295#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
296 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
297 cmd1; schedule(); cmd2)
298
299/**
300 * wait_event_cmd - sleep until a condition gets true
301 * @wq: the waitqueue to wait on
302 * @condition: a C expression for the event to wait for
Masanari Iidaf434f7a2014-01-22 01:22:06 +0900303 * @cmd1: the command will be executed before sleep
304 * @cmd2: the command will be executed after sleep
Shaohua Li82e06c82013-11-14 15:16:16 +1100305 *
306 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
307 * @condition evaluates to true. The @condition is checked each time
308 * the waitqueue @wq is woken up.
309 *
310 * wake_up() has to be called after changing any variable that could
311 * change the result of the wait condition.
312 */
313#define wait_event_cmd(wq, condition, cmd1, cmd2) \
314do { \
315 if (condition) \
316 break; \
317 __wait_event_cmd(wq, condition, cmd1, cmd2); \
318} while (0)
319
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200320#define __wait_event_interruptible(wq, condition) \
321 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstraf13f4c42013-10-02 11:22:24 +0200322 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324/**
325 * wait_event_interruptible - sleep until a condition gets true
326 * @wq: the waitqueue to wait on
327 * @condition: a C expression for the event to wait for
328 *
329 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
330 * @condition evaluates to true or a signal is received.
331 * The @condition is checked each time the waitqueue @wq is woken up.
332 *
333 * wake_up() has to be called after changing any variable that could
334 * change the result of the wait condition.
335 *
336 * The function will return -ERESTARTSYS if it was interrupted by a
337 * signal and 0 if @condition evaluated to true.
338 */
339#define wait_event_interruptible(wq, condition) \
340({ \
341 int __ret = 0; \
342 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200343 __ret = __wait_event_interruptible(wq, condition); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 __ret; \
345})
346
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200347#define __wait_event_interruptible_timeout(wq, condition, timeout) \
348 ___wait_event(wq, ___wait_cond_timeout(condition), \
349 TASK_INTERRUPTIBLE, 0, timeout, \
350 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352/**
353 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
354 * @wq: the waitqueue to wait on
355 * @condition: a C expression for the event to wait for
356 * @timeout: timeout, in jiffies
357 *
358 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
359 * @condition evaluates to true or a signal is received.
360 * The @condition is checked each time the waitqueue @wq is woken up.
361 *
362 * wake_up() has to be called after changing any variable that could
363 * change the result of the wait condition.
364 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700365 * Returns:
366 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
367 * a signal, or the remaining jiffies (at least 1) if the @condition
368 * evaluated to %true before the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 */
370#define wait_event_interruptible_timeout(wq, condition, timeout) \
371({ \
372 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200373 if (!___wait_cond_timeout(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200374 __ret = __wait_event_interruptible_timeout(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200375 condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 __ret; \
377})
378
Kent Overstreet774a08b2013-05-07 16:18:43 -0700379#define __wait_event_hrtimeout(wq, condition, timeout, state) \
380({ \
381 int __ret = 0; \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700382 struct hrtimer_sleeper __t; \
383 \
384 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
385 HRTIMER_MODE_REL); \
386 hrtimer_init_sleeper(&__t, current); \
387 if ((timeout).tv64 != KTIME_MAX) \
388 hrtimer_start_range_ns(&__t.timer, timeout, \
389 current->timer_slack_ns, \
390 HRTIMER_MODE_REL); \
391 \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200392 __ret = ___wait_event(wq, condition, state, 0, 0, \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700393 if (!__t.task) { \
394 __ret = -ETIME; \
395 break; \
396 } \
Peter Zijlstraebdc1952013-10-02 11:22:32 +0200397 schedule()); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700398 \
399 hrtimer_cancel(&__t.timer); \
400 destroy_hrtimer_on_stack(&__t.timer); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700401 __ret; \
402})
403
404/**
405 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
406 * @wq: the waitqueue to wait on
407 * @condition: a C expression for the event to wait for
408 * @timeout: timeout, as a ktime_t
409 *
410 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
411 * @condition evaluates to true or a signal is received.
412 * The @condition is checked each time the waitqueue @wq is woken up.
413 *
414 * wake_up() has to be called after changing any variable that could
415 * change the result of the wait condition.
416 *
417 * The function returns 0 if @condition became true, or -ETIME if the timeout
418 * elapsed.
419 */
420#define wait_event_hrtimeout(wq, condition, timeout) \
421({ \
422 int __ret = 0; \
423 if (!(condition)) \
424 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
425 TASK_UNINTERRUPTIBLE); \
426 __ret; \
427})
428
429/**
430 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
431 * @wq: the waitqueue to wait on
432 * @condition: a C expression for the event to wait for
433 * @timeout: timeout, as a ktime_t
434 *
435 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
436 * @condition evaluates to true or a signal is received.
437 * The @condition is checked each time the waitqueue @wq is woken up.
438 *
439 * wake_up() has to be called after changing any variable that could
440 * change the result of the wait condition.
441 *
442 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
443 * interrupted by a signal, or -ETIME if the timeout elapsed.
444 */
445#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
446({ \
447 long __ret = 0; \
448 if (!(condition)) \
449 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
450 TASK_INTERRUPTIBLE); \
451 __ret; \
452})
453
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200454#define __wait_event_interruptible_exclusive(wq, condition) \
455 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
Peter Zijlstra48c25212013-10-02 11:22:26 +0200456 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
458#define wait_event_interruptible_exclusive(wq, condition) \
459({ \
460 int __ret = 0; \
461 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200462 __ret = __wait_event_interruptible_exclusive(wq, condition);\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 __ret; \
464})
465
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200466
467#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
468({ \
469 int __ret = 0; \
470 DEFINE_WAIT(__wait); \
471 if (exclusive) \
472 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
473 do { \
474 if (likely(list_empty(&__wait.task_list))) \
475 __add_wait_queue_tail(&(wq), &__wait); \
476 set_current_state(TASK_INTERRUPTIBLE); \
477 if (signal_pending(current)) { \
478 __ret = -ERESTARTSYS; \
479 break; \
480 } \
481 if (irq) \
482 spin_unlock_irq(&(wq).lock); \
483 else \
484 spin_unlock(&(wq).lock); \
485 schedule(); \
486 if (irq) \
487 spin_lock_irq(&(wq).lock); \
488 else \
489 spin_lock(&(wq).lock); \
490 } while (!(condition)); \
491 __remove_wait_queue(&(wq), &__wait); \
492 __set_current_state(TASK_RUNNING); \
493 __ret; \
494})
495
496
497/**
498 * wait_event_interruptible_locked - sleep until a condition gets true
499 * @wq: the waitqueue to wait on
500 * @condition: a C expression for the event to wait for
501 *
502 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
503 * @condition evaluates to true or a signal is received.
504 * The @condition is checked each time the waitqueue @wq is woken up.
505 *
506 * It must be called with wq.lock being held. This spinlock is
507 * unlocked while sleeping but @condition testing is done while lock
508 * is held and when this macro exits the lock is held.
509 *
510 * The lock is locked/unlocked using spin_lock()/spin_unlock()
511 * functions which must match the way they are locked/unlocked outside
512 * of this macro.
513 *
514 * wake_up_locked() has to be called after changing any variable that could
515 * change the result of the wait condition.
516 *
517 * The function will return -ERESTARTSYS if it was interrupted by a
518 * signal and 0 if @condition evaluated to true.
519 */
520#define wait_event_interruptible_locked(wq, condition) \
521 ((condition) \
522 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
523
524/**
525 * wait_event_interruptible_locked_irq - sleep until a condition gets true
526 * @wq: the waitqueue to wait on
527 * @condition: a C expression for the event to wait for
528 *
529 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
530 * @condition evaluates to true or a signal is received.
531 * The @condition is checked each time the waitqueue @wq is woken up.
532 *
533 * It must be called with wq.lock being held. This spinlock is
534 * unlocked while sleeping but @condition testing is done while lock
535 * is held and when this macro exits the lock is held.
536 *
537 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
538 * functions which must match the way they are locked/unlocked outside
539 * of this macro.
540 *
541 * wake_up_locked() has to be called after changing any variable that could
542 * change the result of the wait condition.
543 *
544 * The function will return -ERESTARTSYS if it was interrupted by a
545 * signal and 0 if @condition evaluated to true.
546 */
547#define wait_event_interruptible_locked_irq(wq, condition) \
548 ((condition) \
549 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
550
551/**
552 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
553 * @wq: the waitqueue to wait on
554 * @condition: a C expression for the event to wait for
555 *
556 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
557 * @condition evaluates to true or a signal is received.
558 * The @condition is checked each time the waitqueue @wq is woken up.
559 *
560 * It must be called with wq.lock being held. This spinlock is
561 * unlocked while sleeping but @condition testing is done while lock
562 * is held and when this macro exits the lock is held.
563 *
564 * The lock is locked/unlocked using spin_lock()/spin_unlock()
565 * functions which must match the way they are locked/unlocked outside
566 * of this macro.
567 *
568 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
569 * set thus when other process waits process on the list if this
570 * process is awaken further processes are not considered.
571 *
572 * wake_up_locked() has to be called after changing any variable that could
573 * change the result of the wait condition.
574 *
575 * The function will return -ERESTARTSYS if it was interrupted by a
576 * signal and 0 if @condition evaluated to true.
577 */
578#define wait_event_interruptible_exclusive_locked(wq, condition) \
579 ((condition) \
580 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
581
582/**
583 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
584 * @wq: the waitqueue to wait on
585 * @condition: a C expression for the event to wait for
586 *
587 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
588 * @condition evaluates to true or a signal is received.
589 * The @condition is checked each time the waitqueue @wq is woken up.
590 *
591 * It must be called with wq.lock being held. This spinlock is
592 * unlocked while sleeping but @condition testing is done while lock
593 * is held and when this macro exits the lock is held.
594 *
595 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
596 * functions which must match the way they are locked/unlocked outside
597 * of this macro.
598 *
599 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
600 * set thus when other process waits process on the list if this
601 * process is awaken further processes are not considered.
602 *
603 * wake_up_locked() has to be called after changing any variable that could
604 * change the result of the wait condition.
605 *
606 * The function will return -ERESTARTSYS if it was interrupted by a
607 * signal and 0 if @condition evaluated to true.
608 */
609#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
610 ((condition) \
611 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
612
613
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200614#define __wait_event_killable(wq, condition) \
615 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500616
617/**
618 * wait_event_killable - sleep until a condition gets true
619 * @wq: the waitqueue to wait on
620 * @condition: a C expression for the event to wait for
621 *
622 * The process is put to sleep (TASK_KILLABLE) until the
623 * @condition evaluates to true or a signal is received.
624 * The @condition is checked each time the waitqueue @wq is woken up.
625 *
626 * wake_up() has to be called after changing any variable that could
627 * change the result of the wait condition.
628 *
629 * The function will return -ERESTARTSYS if it was interrupted by a
630 * signal and 0 if @condition evaluated to true.
631 */
632#define wait_event_killable(wq, condition) \
633({ \
634 int __ret = 0; \
635 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200636 __ret = __wait_event_killable(wq, condition); \
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500637 __ret; \
638})
639
Lukas Czernereed8c022012-11-30 11:42:40 +0100640
641#define __wait_event_lock_irq(wq, condition, lock, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200642 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
643 spin_unlock_irq(&lock); \
644 cmd; \
645 schedule(); \
646 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100647
648/**
649 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
650 * condition is checked under the lock. This
651 * is expected to be called with the lock
652 * taken.
653 * @wq: the waitqueue to wait on
654 * @condition: a C expression for the event to wait for
655 * @lock: a locked spinlock_t, which will be released before cmd
656 * and schedule() and reacquired afterwards.
657 * @cmd: a command which is invoked outside the critical section before
658 * sleep
659 *
660 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
661 * @condition evaluates to true. The @condition is checked each time
662 * the waitqueue @wq is woken up.
663 *
664 * wake_up() has to be called after changing any variable that could
665 * change the result of the wait condition.
666 *
667 * This is supposed to be called while holding the lock. The lock is
668 * dropped before invoking the cmd and going to sleep and is reacquired
669 * afterwards.
670 */
671#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
672do { \
673 if (condition) \
674 break; \
675 __wait_event_lock_irq(wq, condition, lock, cmd); \
676} while (0)
677
678/**
679 * wait_event_lock_irq - sleep until a condition gets true. The
680 * condition is checked under the lock. This
681 * is expected to be called with the lock
682 * taken.
683 * @wq: the waitqueue to wait on
684 * @condition: a C expression for the event to wait for
685 * @lock: a locked spinlock_t, which will be released before schedule()
686 * and reacquired afterwards.
687 *
688 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
689 * @condition evaluates to true. The @condition is checked each time
690 * the waitqueue @wq is woken up.
691 *
692 * wake_up() has to be called after changing any variable that could
693 * change the result of the wait condition.
694 *
695 * This is supposed to be called while holding the lock. The lock is
696 * dropped before going to sleep and is reacquired afterwards.
697 */
698#define wait_event_lock_irq(wq, condition, lock) \
699do { \
700 if (condition) \
701 break; \
702 __wait_event_lock_irq(wq, condition, lock, ); \
703} while (0)
704
705
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200706#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200707 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200708 spin_unlock_irq(&lock); \
709 cmd; \
710 schedule(); \
Peter Zijlstra8fbd88f2013-10-02 11:22:28 +0200711 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100712
713/**
714 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
715 * The condition is checked under the lock. This is expected to
716 * be called with the lock taken.
717 * @wq: the waitqueue to wait on
718 * @condition: a C expression for the event to wait for
719 * @lock: a locked spinlock_t, which will be released before cmd and
720 * schedule() and reacquired afterwards.
721 * @cmd: a command which is invoked outside the critical section before
722 * sleep
723 *
724 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
725 * @condition evaluates to true or a signal is received. The @condition is
726 * checked each time the waitqueue @wq is woken up.
727 *
728 * wake_up() has to be called after changing any variable that could
729 * change the result of the wait condition.
730 *
731 * This is supposed to be called while holding the lock. The lock is
732 * dropped before invoking the cmd and going to sleep and is reacquired
733 * afterwards.
734 *
735 * The macro will return -ERESTARTSYS if it was interrupted by a signal
736 * and 0 if @condition evaluated to true.
737 */
738#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
739({ \
740 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100741 if (!(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200742 __ret = __wait_event_interruptible_lock_irq(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200743 condition, lock, cmd); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100744 __ret; \
745})
746
747/**
748 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
749 * The condition is checked under the lock. This is expected
750 * to be called with the lock taken.
751 * @wq: the waitqueue to wait on
752 * @condition: a C expression for the event to wait for
753 * @lock: a locked spinlock_t, which will be released before schedule()
754 * and reacquired afterwards.
755 *
756 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
757 * @condition evaluates to true or signal is received. The @condition is
758 * checked each time the waitqueue @wq is woken up.
759 *
760 * wake_up() has to be called after changing any variable that could
761 * change the result of the wait condition.
762 *
763 * This is supposed to be called while holding the lock. The lock is
764 * dropped before going to sleep and is reacquired afterwards.
765 *
766 * The macro will return -ERESTARTSYS if it was interrupted by a signal
767 * and 0 if @condition evaluated to true.
768 */
769#define wait_event_interruptible_lock_irq(wq, condition, lock) \
770({ \
771 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100772 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200773 __ret = __wait_event_interruptible_lock_irq(wq, \
Thierry Reding92ec1182013-10-23 13:40:55 +0200774 condition, lock,); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100775 __ret; \
776})
777
Ingo Molnarfb869b62013-10-04 10:24:49 +0200778#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
779 lock, timeout) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200780 ___wait_event(wq, ___wait_cond_timeout(condition), \
Heiko Carstens7d716452013-10-31 12:48:14 +0100781 TASK_INTERRUPTIBLE, 0, timeout, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200782 spin_unlock_irq(&lock); \
783 __ret = schedule_timeout(__ret); \
Peter Zijlstraa1dc68522013-10-02 11:22:29 +0200784 spin_lock_irq(&lock));
Martin Peschked79ff142013-08-22 17:45:36 +0200785
786/**
Ingo Molnarfb869b62013-10-04 10:24:49 +0200787 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
788 * true or a timeout elapses. The condition is checked under
789 * the lock. This is expected to be called with the lock taken.
Martin Peschked79ff142013-08-22 17:45:36 +0200790 * @wq: the waitqueue to wait on
791 * @condition: a C expression for the event to wait for
792 * @lock: a locked spinlock_t, which will be released before schedule()
793 * and reacquired afterwards.
794 * @timeout: timeout, in jiffies
795 *
796 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
797 * @condition evaluates to true or signal is received. The @condition is
798 * checked each time the waitqueue @wq is woken up.
799 *
800 * wake_up() has to be called after changing any variable that could
801 * change the result of the wait condition.
802 *
803 * This is supposed to be called while holding the lock. The lock is
804 * dropped before going to sleep and is reacquired afterwards.
805 *
806 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
807 * was interrupted by a signal, and the remaining jiffies otherwise
808 * if the condition evaluated to true before the timeout elapsed.
809 */
810#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
811 timeout) \
812({ \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200813 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200814 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200815 __ret = __wait_event_interruptible_lock_irq_timeout( \
816 wq, condition, lock, timeout); \
Martin Peschked79ff142013-08-22 17:45:36 +0200817 __ret; \
818})
819
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820/*
821 * Waitqueues which are removed from the waitqueue_head at wakeup time
822 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800823void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
824void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200825long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800826void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200827void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
829int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
830
Eric Dumazetbf368e42009-04-28 02:24:21 -0700831#define DEFINE_WAIT_FUNC(name, function) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 wait_queue_t name = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700833 .private = current, \
Eric Dumazetbf368e42009-04-28 02:24:21 -0700834 .func = function, \
blaisorblade@yahoo.it7e43c842005-05-25 01:31:42 +0200835 .task_list = LIST_HEAD_INIT((name).task_list), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 }
837
Eric Dumazetbf368e42009-04-28 02:24:21 -0700838#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
839
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840#define DEFINE_WAIT_BIT(name, word, bit) \
841 struct wait_bit_queue name = { \
842 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
843 .wait = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700844 .private = current, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 .func = wake_bit_function, \
846 .task_list = \
847 LIST_HEAD_INIT((name).wait.task_list), \
848 }, \
849 }
850
851#define init_wait(wait) \
852 do { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700853 (wait)->private = current; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 (wait)->func = autoremove_wake_function; \
855 INIT_LIST_HEAD(&(wait)->task_list); \
Evgeny Kuznetsov231d0ae2010-10-05 12:47:57 +0400856 (wait)->flags = 0; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 } while (0)
858
NeilBrown74316202014-07-07 15:16:04 +1000859
NeilBrownc1221322014-07-07 15:16:04 +1000860extern int bit_wait(struct wait_bit_key *);
861extern int bit_wait_io(struct wait_bit_key *);
NeilBrown74316202014-07-07 15:16:04 +1000862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863/**
864 * wait_on_bit - wait for a bit to be cleared
865 * @word: the word being waited on, a kernel virtual address
866 * @bit: the bit of the word being waited on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 * @mode: the task state to sleep in
868 *
869 * There is a standard hashed waitqueue table for generic use. This
870 * is the part of the hashtable's accessor API that waits on a bit.
871 * For instance, if one were to have waiters on a bitflag, one would
872 * call wait_on_bit() in threads waiting for the bit to clear.
873 * One uses wait_on_bit() where one is waiting for the bit to clear,
874 * but has no intention of setting it.
NeilBrown74316202014-07-07 15:16:04 +1000875 * Returned value will be zero if the bit was cleared, or non-zero
876 * if the process received a signal and the mode permitted wakeup
877 * on that signal.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200879static inline int
NeilBrown74316202014-07-07 15:16:04 +1000880wait_on_bit(void *word, int bit, unsigned mode)
881{
882 if (!test_bit(bit, word))
883 return 0;
884 return out_of_line_wait_on_bit(word, bit,
885 bit_wait,
886 mode);
887}
888
889/**
890 * wait_on_bit_io - wait for a bit to be cleared
891 * @word: the word being waited on, a kernel virtual address
892 * @bit: the bit of the word being waited on
893 * @mode: the task state to sleep in
894 *
895 * Use the standard hashed waitqueue table to wait for a bit
896 * to be cleared. This is similar to wait_on_bit(), but calls
897 * io_schedule() instead of schedule() for the actual waiting.
898 *
899 * Returned value will be zero if the bit was cleared, or non-zero
900 * if the process received a signal and the mode permitted wakeup
901 * on that signal.
902 */
903static inline int
904wait_on_bit_io(void *word, int bit, unsigned mode)
905{
906 if (!test_bit(bit, word))
907 return 0;
908 return out_of_line_wait_on_bit(word, bit,
909 bit_wait_io,
910 mode);
911}
912
913/**
914 * wait_on_bit_action - wait for a bit to be cleared
915 * @word: the word being waited on, a kernel virtual address
916 * @bit: the bit of the word being waited on
917 * @action: the function used to sleep, which may take special actions
918 * @mode: the task state to sleep in
919 *
920 * Use the standard hashed waitqueue table to wait for a bit
921 * to be cleared, and allow the waiting action to be specified.
922 * This is like wait_on_bit() but allows fine control of how the waiting
923 * is done.
924 *
925 * Returned value will be zero if the bit was cleared, or non-zero
926 * if the process received a signal and the mode permitted wakeup
927 * on that signal.
928 */
929static inline int
NeilBrownc1221322014-07-07 15:16:04 +1000930wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931{
932 if (!test_bit(bit, word))
933 return 0;
934 return out_of_line_wait_on_bit(word, bit, action, mode);
935}
936
937/**
938 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
939 * @word: the word being waited on, a kernel virtual address
940 * @bit: the bit of the word being waited on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 * @mode: the task state to sleep in
942 *
943 * There is a standard hashed waitqueue table for generic use. This
944 * is the part of the hashtable's accessor API that waits on a bit
945 * when one intends to set it, for instance, trying to lock bitflags.
946 * For instance, if one were to have waiters trying to set bitflag
947 * and waiting for it to clear before setting it, one would call
948 * wait_on_bit() in threads waiting to be able to set the bit.
949 * One uses wait_on_bit_lock() where one is waiting for the bit to
950 * clear with the intention of setting it, and when done, clearing it.
NeilBrown74316202014-07-07 15:16:04 +1000951 *
952 * Returns zero if the bit was (eventually) found to be clear and was
953 * set. Returns non-zero if a signal was delivered to the process and
954 * the @mode allows that signal to wake the process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200956static inline int
NeilBrown74316202014-07-07 15:16:04 +1000957wait_on_bit_lock(void *word, int bit, unsigned mode)
958{
959 if (!test_and_set_bit(bit, word))
960 return 0;
961 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
962}
963
964/**
965 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
966 * @word: the word being waited on, a kernel virtual address
967 * @bit: the bit of the word being waited on
968 * @mode: the task state to sleep in
969 *
970 * Use the standard hashed waitqueue table to wait for a bit
971 * to be cleared and then to atomically set it. This is similar
972 * to wait_on_bit(), but calls io_schedule() instead of schedule()
973 * for the actual waiting.
974 *
975 * Returns zero if the bit was (eventually) found to be clear and was
976 * set. Returns non-zero if a signal was delivered to the process and
977 * the @mode allows that signal to wake the process.
978 */
979static inline int
980wait_on_bit_lock_io(void *word, int bit, unsigned mode)
981{
982 if (!test_and_set_bit(bit, word))
983 return 0;
984 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
985}
986
987/**
988 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
989 * @word: the word being waited on, a kernel virtual address
990 * @bit: the bit of the word being waited on
991 * @action: the function used to sleep, which may take special actions
992 * @mode: the task state to sleep in
993 *
994 * Use the standard hashed waitqueue table to wait for a bit
995 * to be cleared and then to set it, and allow the waiting action
996 * to be specified.
997 * This is like wait_on_bit() but allows fine control of how the waiting
998 * is done.
999 *
1000 * Returns zero if the bit was (eventually) found to be clear and was
1001 * set. Returns non-zero if a signal was delivered to the process and
1002 * the @mode allows that signal to wake the process.
1003 */
1004static inline int
NeilBrownc1221322014-07-07 15:16:04 +10001005wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006{
1007 if (!test_and_set_bit(bit, word))
1008 return 0;
1009 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1010}
David Howellscb655372013-05-10 19:50:26 +01001011
1012/**
1013 * wait_on_atomic_t - Wait for an atomic_t to become 0
1014 * @val: The atomic value being waited on, a kernel virtual address
1015 * @action: the function used to sleep, which may take special actions
1016 * @mode: the task state to sleep in
1017 *
1018 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1019 * the purpose of getting a waitqueue, but we set the key to a bit number
1020 * outside of the target 'word'.
1021 */
1022static inline
1023int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1024{
1025 if (atomic_read(val) == 0)
1026 return 0;
1027 return out_of_line_wait_on_atomic_t(val, action, mode);
1028}
Ingo Molnarfb869b62013-10-04 10:24:49 +02001029
1030#endif /* _LINUX_WAIT_H */