blob: e4a8eb9312eabb508c045b8e0274ae7dd3809a38 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
Ingo Molnarfb869b62013-10-04 10:24:49 +02003/*
4 * Linux wait queue related types and methods
5 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <asm/current.h>
David Howells607ca462012-10-13 10:46:48 +010010#include <uapi/linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12typedef struct __wait_queue wait_queue_t;
Peter Zijlstra7d478722009-09-14 19:55:44 +020013typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16struct __wait_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020017 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define WQ_FLAG_EXCLUSIVE 0x01
Ingo Molnarfb869b62013-10-04 10:24:49 +020019 void *private;
20 wait_queue_func_t func;
21 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070022};
23
24struct wait_bit_key {
Ingo Molnarfb869b62013-10-04 10:24:49 +020025 void *flags;
26 int bit_nr;
27#define WAIT_ATOMIC_T_BIT_NR -1
NeilBrowncbbce822014-09-25 13:55:19 +100028 unsigned long timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029};
30
31struct wait_bit_queue {
Ingo Molnarfb869b62013-10-04 10:24:49 +020032 struct wait_bit_key key;
33 wait_queue_t wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034};
35
36struct __wait_queue_head {
Ingo Molnarfb869b62013-10-04 10:24:49 +020037 spinlock_t lock;
38 struct list_head task_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039};
40typedef struct __wait_queue_head wait_queue_head_t;
41
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080042struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44/*
45 * Macros for declaration and initialisaton of the datatypes
46 */
47
48#define __WAITQUEUE_INITIALIZER(name, tsk) { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070049 .private = tsk, \
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 .func = default_wake_function, \
51 .task_list = { NULL, NULL } }
52
53#define DECLARE_WAITQUEUE(name, tsk) \
54 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
55
56#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
Ingo Molnare4d91912006-07-03 00:24:34 -070057 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 .task_list = { &(name).task_list, &(name).task_list } }
59
60#define DECLARE_WAIT_QUEUE_HEAD(name) \
61 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
62
63#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
64 { .flags = word, .bit_nr = bit, }
65
David Howellscb655372013-05-10 19:50:26 +010066#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
67 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
68
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010069extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
Peter Zijlstra2fc39112009-08-10 12:33:05 +010070
71#define init_waitqueue_head(q) \
72 do { \
73 static struct lock_class_key __key; \
74 \
Peter Zijlstraf07fdec2011-12-13 13:20:54 +010075 __init_waitqueue_head((q), #q, &__key); \
Peter Zijlstra2fc39112009-08-10 12:33:05 +010076 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080078#ifdef CONFIG_LOCKDEP
79# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
80 ({ init_waitqueue_head(&name); name; })
81# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
82 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
83#else
84# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
85#endif
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
88{
Ingo Molnarfb869b62013-10-04 10:24:49 +020089 q->flags = 0;
90 q->private = p;
91 q->func = default_wake_function;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
Ingo Molnarfb869b62013-10-04 10:24:49 +020094static inline void
95init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
Ingo Molnarfb869b62013-10-04 10:24:49 +020097 q->flags = 0;
98 q->private = NULL;
99 q->func = func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101
102static inline int waitqueue_active(wait_queue_head_t *q)
103{
104 return !list_empty(&q->task_list);
105}
106
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800107extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
108extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
109extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
112{
113 list_add(&new->task_list, &head->task_list);
114}
115
116/*
117 * Used for wake-one threads:
118 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200119static inline void
120__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800121{
122 wait->flags |= WQ_FLAG_EXCLUSIVE;
123 __add_wait_queue(q, wait);
124}
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126static inline void __add_wait_queue_tail(wait_queue_head_t *head,
Changli Gaoa93d2f12010-05-07 14:33:26 +0800127 wait_queue_t *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 list_add_tail(&new->task_list, &head->task_list);
130}
131
Ingo Molnarfb869b62013-10-04 10:24:49 +0200132static inline void
133__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
Changli Gaoa93d2f12010-05-07 14:33:26 +0800134{
135 wait->flags |= WQ_FLAG_EXCLUSIVE;
136 __add_wait_queue_tail(q, wait);
137}
138
Ingo Molnarfb869b62013-10-04 10:24:49 +0200139static inline void
140__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
142 list_del(&old->task_list);
143}
144
NeilBrownc1221322014-07-07 15:16:04 +1000145typedef int wait_bit_action_f(struct wait_bit_key *);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800146void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700147void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200148void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Thomas Gleixner63b20012011-12-01 00:04:00 +0100149void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700150void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800151void __wake_up_bit(wait_queue_head_t *, void *, int);
NeilBrownc1221322014-07-07 15:16:04 +1000152int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
153int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800154void wake_up_bit(void *, int);
David Howellscb655372013-05-10 19:50:26 +0100155void wake_up_atomic_t(atomic_t *);
NeilBrownc1221322014-07-07 15:16:04 +1000156int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
NeilBrowncbbce822014-09-25 13:55:19 +1000157int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
NeilBrownc1221322014-07-07 15:16:04 +1000158int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
David Howellscb655372013-05-10 19:50:26 +0100159int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800160wait_queue_head_t *bit_waitqueue(void *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500162#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
163#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
164#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
Thomas Gleixner63b20012011-12-01 00:04:00 +0100165#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
166#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
169#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
170#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500171#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800173/*
Davide Libenzic0da3772009-03-31 15:24:20 -0700174 * Wakeup macros to be used to report events to the targets.
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800175 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200176#define wake_up_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700177 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200178#define wake_up_locked_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700179 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
Ingo Molnarfb869b62013-10-04 10:24:49 +0200180#define wake_up_interruptible_poll(x, m) \
Davide Libenzic0da3772009-03-31 15:24:20 -0700181 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
182#define wake_up_interruptible_sync_poll(x, m) \
183 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800184
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200185#define ___wait_cond_timeout(condition) \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200186({ \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200187 bool __cond = (condition); \
188 if (__cond && !__ret) \
189 __ret = 1; \
190 __cond || !__ret; \
Peter Zijlstra2953ef22013-10-02 11:22:19 +0200191})
192
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200193#define ___wait_is_interruptible(state) \
194 (!__builtin_constant_p(state) || \
195 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200196
Peter Zijlstra8b322012014-04-18 15:07:17 -0700197/*
198 * The below macro ___wait_event() has an explicit shadow of the __ret
199 * variable when used from the wait_event_*() macros.
200 *
201 * This is so that both can use the ___wait_cond_timeout() construct
202 * to wrap the condition.
203 *
204 * The type inconsistency of the wait_event_*() __ret variable is also
205 * on purpose; we use long where we can return timeout values and int
206 * otherwise.
207 */
208
Peter Zijlstra41a14312013-10-02 11:22:21 +0200209#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200210({ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200211 __label__ __out; \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200212 wait_queue_t __wait; \
Peter Zijlstra8b322012014-04-18 15:07:17 -0700213 long __ret = ret; /* explicit shadow */ \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200214 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200215 INIT_LIST_HEAD(&__wait.task_list); \
216 if (exclusive) \
217 __wait.flags = WQ_FLAG_EXCLUSIVE; \
218 else \
219 __wait.flags = 0; \
220 \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200221 for (;;) { \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200222 long __int = prepare_to_wait_event(&wq, &__wait, state);\
Peter Zijlstra41a14312013-10-02 11:22:21 +0200223 \
224 if (condition) \
225 break; \
226 \
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200227 if (___wait_is_interruptible(state) && __int) { \
228 __ret = __int; \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200229 if (exclusive) { \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200230 abort_exclusive_wait(&wq, &__wait, \
231 state, NULL); \
Peter Zijlstra41a14312013-10-02 11:22:21 +0200232 goto __out; \
233 } \
234 break; \
235 } \
236 \
237 cmd; \
238 } \
239 finish_wait(&wq, &__wait); \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200240__out: __ret; \
241})
Peter Zijlstra41a14312013-10-02 11:22:21 +0200242
Ingo Molnarfb869b62013-10-04 10:24:49 +0200243#define __wait_event(wq, condition) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200244 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
245 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247/**
248 * wait_event - sleep until a condition gets true
249 * @wq: the waitqueue to wait on
250 * @condition: a C expression for the event to wait for
251 *
252 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
253 * @condition evaluates to true. The @condition is checked each time
254 * the waitqueue @wq is woken up.
255 *
256 * wake_up() has to be called after changing any variable that could
257 * change the result of the wait condition.
258 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200259#define wait_event(wq, condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260do { \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200261 if (condition) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 break; \
263 __wait_event(wq, condition); \
264} while (0)
265
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200266#define __wait_event_timeout(wq, condition, timeout) \
267 ___wait_event(wq, ___wait_cond_timeout(condition), \
268 TASK_UNINTERRUPTIBLE, 0, timeout, \
269 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271/**
272 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
273 * @wq: the waitqueue to wait on
274 * @condition: a C expression for the event to wait for
275 * @timeout: timeout, in jiffies
276 *
277 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
278 * @condition evaluates to true. The @condition is checked each time
279 * the waitqueue @wq is woken up.
280 *
281 * wake_up() has to be called after changing any variable that could
282 * change the result of the wait condition.
283 *
Scot Doyle6b44f512014-08-24 17:12:27 +0000284 * Returns:
285 * 0 if the @condition evaluated to %false after the @timeout elapsed,
286 * 1 if the @condition evaluated to %true after the @timeout elapsed,
287 * or the remaining jiffies (at least 1) if the @condition evaluated
288 * to %true before the @timeout elapsed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 */
290#define wait_event_timeout(wq, condition, timeout) \
291({ \
292 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200293 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200294 __ret = __wait_event_timeout(wq, condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 __ret; \
296})
297
Shaohua Li82e06c82013-11-14 15:16:16 +1100298#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
299 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
300 cmd1; schedule(); cmd2)
301
302/**
303 * wait_event_cmd - sleep until a condition gets true
304 * @wq: the waitqueue to wait on
305 * @condition: a C expression for the event to wait for
Masanari Iidaf434f7a2014-01-22 01:22:06 +0900306 * @cmd1: the command will be executed before sleep
307 * @cmd2: the command will be executed after sleep
Shaohua Li82e06c82013-11-14 15:16:16 +1100308 *
309 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
310 * @condition evaluates to true. The @condition is checked each time
311 * the waitqueue @wq is woken up.
312 *
313 * wake_up() has to be called after changing any variable that could
314 * change the result of the wait condition.
315 */
316#define wait_event_cmd(wq, condition, cmd1, cmd2) \
317do { \
318 if (condition) \
319 break; \
320 __wait_event_cmd(wq, condition, cmd1, cmd2); \
321} while (0)
322
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200323#define __wait_event_interruptible(wq, condition) \
324 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstraf13f4c42013-10-02 11:22:24 +0200325 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
327/**
328 * wait_event_interruptible - sleep until a condition gets true
329 * @wq: the waitqueue to wait on
330 * @condition: a C expression for the event to wait for
331 *
332 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
333 * @condition evaluates to true or a signal is received.
334 * The @condition is checked each time the waitqueue @wq is woken up.
335 *
336 * wake_up() has to be called after changing any variable that could
337 * change the result of the wait condition.
338 *
339 * The function will return -ERESTARTSYS if it was interrupted by a
340 * signal and 0 if @condition evaluated to true.
341 */
342#define wait_event_interruptible(wq, condition) \
343({ \
344 int __ret = 0; \
345 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200346 __ret = __wait_event_interruptible(wq, condition); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 __ret; \
348})
349
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200350#define __wait_event_interruptible_timeout(wq, condition, timeout) \
351 ___wait_event(wq, ___wait_cond_timeout(condition), \
352 TASK_INTERRUPTIBLE, 0, timeout, \
353 __ret = schedule_timeout(__ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
355/**
356 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
357 * @wq: the waitqueue to wait on
358 * @condition: a C expression for the event to wait for
359 * @timeout: timeout, in jiffies
360 *
361 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
362 * @condition evaluates to true or a signal is received.
363 * The @condition is checked each time the waitqueue @wq is woken up.
364 *
365 * wake_up() has to be called after changing any variable that could
366 * change the result of the wait condition.
367 *
Imre Deak4c663cf2013-05-24 15:55:09 -0700368 * Returns:
Scot Doyle6b44f512014-08-24 17:12:27 +0000369 * 0 if the @condition evaluated to %false after the @timeout elapsed,
370 * 1 if the @condition evaluated to %true after the @timeout elapsed,
371 * the remaining jiffies (at least 1) if the @condition evaluated
372 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
373 * interrupted by a signal.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 */
375#define wait_event_interruptible_timeout(wq, condition, timeout) \
376({ \
377 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200378 if (!___wait_cond_timeout(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200379 __ret = __wait_event_interruptible_timeout(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200380 condition, timeout); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 __ret; \
382})
383
Kent Overstreet774a08b2013-05-07 16:18:43 -0700384#define __wait_event_hrtimeout(wq, condition, timeout, state) \
385({ \
386 int __ret = 0; \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700387 struct hrtimer_sleeper __t; \
388 \
389 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
390 HRTIMER_MODE_REL); \
391 hrtimer_init_sleeper(&__t, current); \
392 if ((timeout).tv64 != KTIME_MAX) \
393 hrtimer_start_range_ns(&__t.timer, timeout, \
394 current->timer_slack_ns, \
395 HRTIMER_MODE_REL); \
396 \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200397 __ret = ___wait_event(wq, condition, state, 0, 0, \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700398 if (!__t.task) { \
399 __ret = -ETIME; \
400 break; \
401 } \
Peter Zijlstraebdc1952013-10-02 11:22:32 +0200402 schedule()); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700403 \
404 hrtimer_cancel(&__t.timer); \
405 destroy_hrtimer_on_stack(&__t.timer); \
Kent Overstreet774a08b2013-05-07 16:18:43 -0700406 __ret; \
407})
408
409/**
410 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
411 * @wq: the waitqueue to wait on
412 * @condition: a C expression for the event to wait for
413 * @timeout: timeout, as a ktime_t
414 *
415 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
416 * @condition evaluates to true or a signal is received.
417 * The @condition is checked each time the waitqueue @wq is woken up.
418 *
419 * wake_up() has to be called after changing any variable that could
420 * change the result of the wait condition.
421 *
422 * The function returns 0 if @condition became true, or -ETIME if the timeout
423 * elapsed.
424 */
425#define wait_event_hrtimeout(wq, condition, timeout) \
426({ \
427 int __ret = 0; \
428 if (!(condition)) \
429 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
430 TASK_UNINTERRUPTIBLE); \
431 __ret; \
432})
433
434/**
435 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
436 * @wq: the waitqueue to wait on
437 * @condition: a C expression for the event to wait for
438 * @timeout: timeout, as a ktime_t
439 *
440 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
441 * @condition evaluates to true or a signal is received.
442 * The @condition is checked each time the waitqueue @wq is woken up.
443 *
444 * wake_up() has to be called after changing any variable that could
445 * change the result of the wait condition.
446 *
447 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
448 * interrupted by a signal, or -ETIME if the timeout elapsed.
449 */
450#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
451({ \
452 long __ret = 0; \
453 if (!(condition)) \
454 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
455 TASK_INTERRUPTIBLE); \
456 __ret; \
457})
458
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200459#define __wait_event_interruptible_exclusive(wq, condition) \
460 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
Peter Zijlstra48c25212013-10-02 11:22:26 +0200461 schedule())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463#define wait_event_interruptible_exclusive(wq, condition) \
464({ \
465 int __ret = 0; \
466 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200467 __ret = __wait_event_interruptible_exclusive(wq, condition);\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 __ret; \
469})
470
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200471
472#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
473({ \
474 int __ret = 0; \
475 DEFINE_WAIT(__wait); \
476 if (exclusive) \
477 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
478 do { \
479 if (likely(list_empty(&__wait.task_list))) \
480 __add_wait_queue_tail(&(wq), &__wait); \
481 set_current_state(TASK_INTERRUPTIBLE); \
482 if (signal_pending(current)) { \
483 __ret = -ERESTARTSYS; \
484 break; \
485 } \
486 if (irq) \
487 spin_unlock_irq(&(wq).lock); \
488 else \
489 spin_unlock(&(wq).lock); \
490 schedule(); \
491 if (irq) \
492 spin_lock_irq(&(wq).lock); \
493 else \
494 spin_lock(&(wq).lock); \
495 } while (!(condition)); \
496 __remove_wait_queue(&(wq), &__wait); \
497 __set_current_state(TASK_RUNNING); \
498 __ret; \
499})
500
501
502/**
503 * wait_event_interruptible_locked - sleep until a condition gets true
504 * @wq: the waitqueue to wait on
505 * @condition: a C expression for the event to wait for
506 *
507 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
508 * @condition evaluates to true or a signal is received.
509 * The @condition is checked each time the waitqueue @wq is woken up.
510 *
511 * It must be called with wq.lock being held. This spinlock is
512 * unlocked while sleeping but @condition testing is done while lock
513 * is held and when this macro exits the lock is held.
514 *
515 * The lock is locked/unlocked using spin_lock()/spin_unlock()
516 * functions which must match the way they are locked/unlocked outside
517 * of this macro.
518 *
519 * wake_up_locked() has to be called after changing any variable that could
520 * change the result of the wait condition.
521 *
522 * The function will return -ERESTARTSYS if it was interrupted by a
523 * signal and 0 if @condition evaluated to true.
524 */
525#define wait_event_interruptible_locked(wq, condition) \
526 ((condition) \
527 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
528
529/**
530 * wait_event_interruptible_locked_irq - sleep until a condition gets true
531 * @wq: the waitqueue to wait on
532 * @condition: a C expression for the event to wait for
533 *
534 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
535 * @condition evaluates to true or a signal is received.
536 * The @condition is checked each time the waitqueue @wq is woken up.
537 *
538 * It must be called with wq.lock being held. This spinlock is
539 * unlocked while sleeping but @condition testing is done while lock
540 * is held and when this macro exits the lock is held.
541 *
542 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
543 * functions which must match the way they are locked/unlocked outside
544 * of this macro.
545 *
546 * wake_up_locked() has to be called after changing any variable that could
547 * change the result of the wait condition.
548 *
549 * The function will return -ERESTARTSYS if it was interrupted by a
550 * signal and 0 if @condition evaluated to true.
551 */
552#define wait_event_interruptible_locked_irq(wq, condition) \
553 ((condition) \
554 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
555
556/**
557 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
558 * @wq: the waitqueue to wait on
559 * @condition: a C expression for the event to wait for
560 *
561 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
562 * @condition evaluates to true or a signal is received.
563 * The @condition is checked each time the waitqueue @wq is woken up.
564 *
565 * It must be called with wq.lock being held. This spinlock is
566 * unlocked while sleeping but @condition testing is done while lock
567 * is held and when this macro exits the lock is held.
568 *
569 * The lock is locked/unlocked using spin_lock()/spin_unlock()
570 * functions which must match the way they are locked/unlocked outside
571 * of this macro.
572 *
573 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
574 * set thus when other process waits process on the list if this
575 * process is awaken further processes are not considered.
576 *
577 * wake_up_locked() has to be called after changing any variable that could
578 * change the result of the wait condition.
579 *
580 * The function will return -ERESTARTSYS if it was interrupted by a
581 * signal and 0 if @condition evaluated to true.
582 */
583#define wait_event_interruptible_exclusive_locked(wq, condition) \
584 ((condition) \
585 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
586
587/**
588 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
589 * @wq: the waitqueue to wait on
590 * @condition: a C expression for the event to wait for
591 *
592 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
593 * @condition evaluates to true or a signal is received.
594 * The @condition is checked each time the waitqueue @wq is woken up.
595 *
596 * It must be called with wq.lock being held. This spinlock is
597 * unlocked while sleeping but @condition testing is done while lock
598 * is held and when this macro exits the lock is held.
599 *
600 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
601 * functions which must match the way they are locked/unlocked outside
602 * of this macro.
603 *
604 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
605 * set thus when other process waits process on the list if this
606 * process is awaken further processes are not considered.
607 *
608 * wake_up_locked() has to be called after changing any variable that could
609 * change the result of the wait condition.
610 *
611 * The function will return -ERESTARTSYS if it was interrupted by a
612 * signal and 0 if @condition evaluated to true.
613 */
614#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
615 ((condition) \
616 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
617
618
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200619#define __wait_event_killable(wq, condition) \
620 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500621
622/**
623 * wait_event_killable - sleep until a condition gets true
624 * @wq: the waitqueue to wait on
625 * @condition: a C expression for the event to wait for
626 *
627 * The process is put to sleep (TASK_KILLABLE) until the
628 * @condition evaluates to true or a signal is received.
629 * The @condition is checked each time the waitqueue @wq is woken up.
630 *
631 * wake_up() has to be called after changing any variable that could
632 * change the result of the wait condition.
633 *
634 * The function will return -ERESTARTSYS if it was interrupted by a
635 * signal and 0 if @condition evaluated to true.
636 */
637#define wait_event_killable(wq, condition) \
638({ \
639 int __ret = 0; \
640 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200641 __ret = __wait_event_killable(wq, condition); \
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500642 __ret; \
643})
644
Lukas Czernereed8c022012-11-30 11:42:40 +0100645
646#define __wait_event_lock_irq(wq, condition, lock, cmd) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200647 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
648 spin_unlock_irq(&lock); \
649 cmd; \
650 schedule(); \
651 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100652
653/**
654 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
655 * condition is checked under the lock. This
656 * is expected to be called with the lock
657 * taken.
658 * @wq: the waitqueue to wait on
659 * @condition: a C expression for the event to wait for
660 * @lock: a locked spinlock_t, which will be released before cmd
661 * and schedule() and reacquired afterwards.
662 * @cmd: a command which is invoked outside the critical section before
663 * sleep
664 *
665 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
666 * @condition evaluates to true. The @condition is checked each time
667 * the waitqueue @wq is woken up.
668 *
669 * wake_up() has to be called after changing any variable that could
670 * change the result of the wait condition.
671 *
672 * This is supposed to be called while holding the lock. The lock is
673 * dropped before invoking the cmd and going to sleep and is reacquired
674 * afterwards.
675 */
676#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
677do { \
678 if (condition) \
679 break; \
680 __wait_event_lock_irq(wq, condition, lock, cmd); \
681} while (0)
682
683/**
684 * wait_event_lock_irq - sleep until a condition gets true. The
685 * condition is checked under the lock. This
686 * is expected to be called with the lock
687 * taken.
688 * @wq: the waitqueue to wait on
689 * @condition: a C expression for the event to wait for
690 * @lock: a locked spinlock_t, which will be released before schedule()
691 * and reacquired afterwards.
692 *
693 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
694 * @condition evaluates to true. The @condition is checked each time
695 * the waitqueue @wq is woken up.
696 *
697 * wake_up() has to be called after changing any variable that could
698 * change the result of the wait condition.
699 *
700 * This is supposed to be called while holding the lock. The lock is
701 * dropped before going to sleep and is reacquired afterwards.
702 */
703#define wait_event_lock_irq(wq, condition, lock) \
704do { \
705 if (condition) \
706 break; \
707 __wait_event_lock_irq(wq, condition, lock, ); \
708} while (0)
709
710
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200711#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200712 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200713 spin_unlock_irq(&lock); \
714 cmd; \
715 schedule(); \
Peter Zijlstra8fbd88f2013-10-02 11:22:28 +0200716 spin_lock_irq(&lock))
Lukas Czernereed8c022012-11-30 11:42:40 +0100717
718/**
719 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
720 * The condition is checked under the lock. This is expected to
721 * be called with the lock taken.
722 * @wq: the waitqueue to wait on
723 * @condition: a C expression for the event to wait for
724 * @lock: a locked spinlock_t, which will be released before cmd and
725 * schedule() and reacquired afterwards.
726 * @cmd: a command which is invoked outside the critical section before
727 * sleep
728 *
729 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
730 * @condition evaluates to true or a signal is received. The @condition is
731 * checked each time the waitqueue @wq is woken up.
732 *
733 * wake_up() has to be called after changing any variable that could
734 * change the result of the wait condition.
735 *
736 * This is supposed to be called while holding the lock. The lock is
737 * dropped before invoking the cmd and going to sleep and is reacquired
738 * afterwards.
739 *
740 * The macro will return -ERESTARTSYS if it was interrupted by a signal
741 * and 0 if @condition evaluated to true.
742 */
743#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
744({ \
745 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100746 if (!(condition)) \
Ingo Molnarfb869b62013-10-04 10:24:49 +0200747 __ret = __wait_event_interruptible_lock_irq(wq, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200748 condition, lock, cmd); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100749 __ret; \
750})
751
752/**
753 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
754 * The condition is checked under the lock. This is expected
755 * to be called with the lock taken.
756 * @wq: the waitqueue to wait on
757 * @condition: a C expression for the event to wait for
758 * @lock: a locked spinlock_t, which will be released before schedule()
759 * and reacquired afterwards.
760 *
761 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
762 * @condition evaluates to true or signal is received. The @condition is
763 * checked each time the waitqueue @wq is woken up.
764 *
765 * wake_up() has to be called after changing any variable that could
766 * change the result of the wait condition.
767 *
768 * This is supposed to be called while holding the lock. The lock is
769 * dropped before going to sleep and is reacquired afterwards.
770 *
771 * The macro will return -ERESTARTSYS if it was interrupted by a signal
772 * and 0 if @condition evaluated to true.
773 */
774#define wait_event_interruptible_lock_irq(wq, condition, lock) \
775({ \
776 int __ret = 0; \
Lukas Czernereed8c022012-11-30 11:42:40 +0100777 if (!(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200778 __ret = __wait_event_interruptible_lock_irq(wq, \
Thierry Reding92ec1182013-10-23 13:40:55 +0200779 condition, lock,); \
Lukas Czernereed8c022012-11-30 11:42:40 +0100780 __ret; \
781})
782
Ingo Molnarfb869b62013-10-04 10:24:49 +0200783#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
784 lock, timeout) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200785 ___wait_event(wq, ___wait_cond_timeout(condition), \
Heiko Carstens7d716452013-10-31 12:48:14 +0100786 TASK_INTERRUPTIBLE, 0, timeout, \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200787 spin_unlock_irq(&lock); \
788 __ret = schedule_timeout(__ret); \
Peter Zijlstraa1dc6852013-10-02 11:22:29 +0200789 spin_lock_irq(&lock));
Martin Peschked79ff142013-08-22 17:45:36 +0200790
791/**
Ingo Molnarfb869b62013-10-04 10:24:49 +0200792 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
793 * true or a timeout elapses. The condition is checked under
794 * the lock. This is expected to be called with the lock taken.
Martin Peschked79ff142013-08-22 17:45:36 +0200795 * @wq: the waitqueue to wait on
796 * @condition: a C expression for the event to wait for
797 * @lock: a locked spinlock_t, which will be released before schedule()
798 * and reacquired afterwards.
799 * @timeout: timeout, in jiffies
800 *
801 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
802 * @condition evaluates to true or signal is received. The @condition is
803 * checked each time the waitqueue @wq is woken up.
804 *
805 * wake_up() has to be called after changing any variable that could
806 * change the result of the wait condition.
807 *
808 * This is supposed to be called while holding the lock. The lock is
809 * dropped before going to sleep and is reacquired afterwards.
810 *
811 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
812 * was interrupted by a signal, and the remaining jiffies otherwise
813 * if the condition evaluated to true before the timeout elapsed.
814 */
815#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
816 timeout) \
817({ \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200818 long __ret = timeout; \
Oleg Nesterov89229152013-10-07 20:31:06 +0200819 if (!___wait_cond_timeout(condition)) \
Peter Zijlstra35a2af92013-10-02 11:22:33 +0200820 __ret = __wait_event_interruptible_lock_irq_timeout( \
821 wq, condition, lock, timeout); \
Martin Peschked79ff142013-08-22 17:45:36 +0200822 __ret; \
823})
824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825/*
826 * Waitqueues which are removed from the waitqueue_head at wakeup time
827 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800828void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
829void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
Oleg Nesterovc2d81642013-10-07 18:18:24 +0200830long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800831void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
Ingo Molnarfb869b62013-10-04 10:24:49 +0200832void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
834int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
835
Eric Dumazetbf368e42009-04-28 02:24:21 -0700836#define DEFINE_WAIT_FUNC(name, function) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 wait_queue_t name = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700838 .private = current, \
Eric Dumazetbf368e42009-04-28 02:24:21 -0700839 .func = function, \
blaisorblade@yahoo.it7e43c842005-05-25 01:31:42 +0200840 .task_list = LIST_HEAD_INIT((name).task_list), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 }
842
Eric Dumazetbf368e42009-04-28 02:24:21 -0700843#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845#define DEFINE_WAIT_BIT(name, word, bit) \
846 struct wait_bit_queue name = { \
847 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
848 .wait = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700849 .private = current, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 .func = wake_bit_function, \
851 .task_list = \
852 LIST_HEAD_INIT((name).wait.task_list), \
853 }, \
854 }
855
856#define init_wait(wait) \
857 do { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700858 (wait)->private = current; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 (wait)->func = autoremove_wake_function; \
860 INIT_LIST_HEAD(&(wait)->task_list); \
Evgeny Kuznetsov231d0ae2010-10-05 12:47:57 +0400861 (wait)->flags = 0; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 } while (0)
863
NeilBrown74316202014-07-07 15:16:04 +1000864
NeilBrownc1221322014-07-07 15:16:04 +1000865extern int bit_wait(struct wait_bit_key *);
866extern int bit_wait_io(struct wait_bit_key *);
NeilBrowncbbce822014-09-25 13:55:19 +1000867extern int bit_wait_timeout(struct wait_bit_key *);
868extern int bit_wait_io_timeout(struct wait_bit_key *);
NeilBrown74316202014-07-07 15:16:04 +1000869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870/**
871 * wait_on_bit - wait for a bit to be cleared
872 * @word: the word being waited on, a kernel virtual address
873 * @bit: the bit of the word being waited on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 * @mode: the task state to sleep in
875 *
876 * There is a standard hashed waitqueue table for generic use. This
877 * is the part of the hashtable's accessor API that waits on a bit.
878 * For instance, if one were to have waiters on a bitflag, one would
879 * call wait_on_bit() in threads waiting for the bit to clear.
880 * One uses wait_on_bit() where one is waiting for the bit to clear,
881 * but has no intention of setting it.
NeilBrown74316202014-07-07 15:16:04 +1000882 * Returned value will be zero if the bit was cleared, or non-zero
883 * if the process received a signal and the mode permitted wakeup
884 * on that signal.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200886static inline int
NeilBrown74316202014-07-07 15:16:04 +1000887wait_on_bit(void *word, int bit, unsigned mode)
888{
889 if (!test_bit(bit, word))
890 return 0;
891 return out_of_line_wait_on_bit(word, bit,
892 bit_wait,
893 mode);
894}
895
896/**
897 * wait_on_bit_io - wait for a bit to be cleared
898 * @word: the word being waited on, a kernel virtual address
899 * @bit: the bit of the word being waited on
900 * @mode: the task state to sleep in
901 *
902 * Use the standard hashed waitqueue table to wait for a bit
903 * to be cleared. This is similar to wait_on_bit(), but calls
904 * io_schedule() instead of schedule() for the actual waiting.
905 *
906 * Returned value will be zero if the bit was cleared, or non-zero
907 * if the process received a signal and the mode permitted wakeup
908 * on that signal.
909 */
910static inline int
911wait_on_bit_io(void *word, int bit, unsigned mode)
912{
913 if (!test_bit(bit, word))
914 return 0;
915 return out_of_line_wait_on_bit(word, bit,
916 bit_wait_io,
917 mode);
918}
919
920/**
921 * wait_on_bit_action - wait for a bit to be cleared
922 * @word: the word being waited on, a kernel virtual address
923 * @bit: the bit of the word being waited on
924 * @action: the function used to sleep, which may take special actions
925 * @mode: the task state to sleep in
926 *
927 * Use the standard hashed waitqueue table to wait for a bit
928 * to be cleared, and allow the waiting action to be specified.
929 * This is like wait_on_bit() but allows fine control of how the waiting
930 * is done.
931 *
932 * Returned value will be zero if the bit was cleared, or non-zero
933 * if the process received a signal and the mode permitted wakeup
934 * on that signal.
935 */
936static inline int
NeilBrownc1221322014-07-07 15:16:04 +1000937wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938{
939 if (!test_bit(bit, word))
940 return 0;
941 return out_of_line_wait_on_bit(word, bit, action, mode);
942}
943
944/**
945 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
946 * @word: the word being waited on, a kernel virtual address
947 * @bit: the bit of the word being waited on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 * @mode: the task state to sleep in
949 *
950 * There is a standard hashed waitqueue table for generic use. This
951 * is the part of the hashtable's accessor API that waits on a bit
952 * when one intends to set it, for instance, trying to lock bitflags.
953 * For instance, if one were to have waiters trying to set bitflag
954 * and waiting for it to clear before setting it, one would call
955 * wait_on_bit() in threads waiting to be able to set the bit.
956 * One uses wait_on_bit_lock() where one is waiting for the bit to
957 * clear with the intention of setting it, and when done, clearing it.
NeilBrown74316202014-07-07 15:16:04 +1000958 *
959 * Returns zero if the bit was (eventually) found to be clear and was
960 * set. Returns non-zero if a signal was delivered to the process and
961 * the @mode allows that signal to wake the process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 */
Ingo Molnarfb869b62013-10-04 10:24:49 +0200963static inline int
NeilBrown74316202014-07-07 15:16:04 +1000964wait_on_bit_lock(void *word, int bit, unsigned mode)
965{
966 if (!test_and_set_bit(bit, word))
967 return 0;
968 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
969}
970
971/**
972 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
973 * @word: the word being waited on, a kernel virtual address
974 * @bit: the bit of the word being waited on
975 * @mode: the task state to sleep in
976 *
977 * Use the standard hashed waitqueue table to wait for a bit
978 * to be cleared and then to atomically set it. This is similar
979 * to wait_on_bit(), but calls io_schedule() instead of schedule()
980 * for the actual waiting.
981 *
982 * Returns zero if the bit was (eventually) found to be clear and was
983 * set. Returns non-zero if a signal was delivered to the process and
984 * the @mode allows that signal to wake the process.
985 */
986static inline int
987wait_on_bit_lock_io(void *word, int bit, unsigned mode)
988{
989 if (!test_and_set_bit(bit, word))
990 return 0;
991 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
992}
993
994/**
995 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
996 * @word: the word being waited on, a kernel virtual address
997 * @bit: the bit of the word being waited on
998 * @action: the function used to sleep, which may take special actions
999 * @mode: the task state to sleep in
1000 *
1001 * Use the standard hashed waitqueue table to wait for a bit
1002 * to be cleared and then to set it, and allow the waiting action
1003 * to be specified.
1004 * This is like wait_on_bit() but allows fine control of how the waiting
1005 * is done.
1006 *
1007 * Returns zero if the bit was (eventually) found to be clear and was
1008 * set. Returns non-zero if a signal was delivered to the process and
1009 * the @mode allows that signal to wake the process.
1010 */
1011static inline int
NeilBrownc1221322014-07-07 15:16:04 +10001012wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013{
1014 if (!test_and_set_bit(bit, word))
1015 return 0;
1016 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1017}
David Howellscb655372013-05-10 19:50:26 +01001018
1019/**
1020 * wait_on_atomic_t - Wait for an atomic_t to become 0
1021 * @val: The atomic value being waited on, a kernel virtual address
1022 * @action: the function used to sleep, which may take special actions
1023 * @mode: the task state to sleep in
1024 *
1025 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1026 * the purpose of getting a waitqueue, but we set the key to a bit number
1027 * outside of the target 'word'.
1028 */
1029static inline
1030int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1031{
1032 if (atomic_read(val) == 0)
1033 return 0;
1034 return out_of_line_wait_on_atomic_t(val, action, mode);
1035}
Ingo Molnarfb869b62013-10-04 10:24:49 +02001036
1037#endif /* _LINUX_WAIT_H */