blob: f584aba78ca9a8d1add3b73797811b5a34071357 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * workqueue.h --- work queue handling for Linux.
3 */
4
5#ifndef _LINUX_WORKQUEUE_H
6#define _LINUX_WORKQUEUE_H
7
8#include <linux/timer.h>
9#include <linux/linkage.h>
10#include <linux/bitops.h>
Johannes Berg4e6045f2007-10-18 23:39:55 -070011#include <linux/lockdep.h>
Tejun Heo7a22ad72010-06-29 10:07:13 +020012#include <linux/threads.h>
Linus Torvaldsa08727b2006-12-16 09:53:50 -080013#include <asm/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
15struct workqueue_struct;
16
David Howells65f27f32006-11-22 14:55:48 +000017struct work_struct;
18typedef void (*work_func_t)(struct work_struct *work);
David Howells6bb49e52006-11-22 14:54:45 +000019
Linus Torvaldsa08727b2006-12-16 09:53:50 -080020/*
21 * The first word is the work queue pointer and the flags rolled into
22 * one
23 */
24#define work_data_bits(work) ((unsigned long *)(&(work)->data))
25
Tejun Heo22df02b2010-06-29 10:07:10 +020026enum {
27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +020028 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
29 WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
30 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
Tejun Heo22df02b2010-06-29 10:07:10 +020031#ifdef CONFIG_DEBUG_OBJECTS_WORK
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +020032 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
33 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
Tejun Heo0f900042010-06-29 10:07:11 +020034#else
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +020035 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
Tejun Heo22df02b2010-06-29 10:07:10 +020036#endif
37
Tejun Heo73f53c42010-06-29 10:07:11 +020038 WORK_STRUCT_COLOR_BITS = 4,
39
Tejun Heo22df02b2010-06-29 10:07:10 +020040 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +020041 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
Tejun Heoe1201532010-07-22 14:14:25 +020042 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
Tejun Heoaffee4b2010-06-29 10:07:12 +020043 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
Tejun Heo22df02b2010-06-29 10:07:10 +020044#ifdef CONFIG_DEBUG_OBJECTS_WORK
45 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
46#else
47 WORK_STRUCT_STATIC = 0,
48#endif
49
Tejun Heo73f53c42010-06-29 10:07:11 +020050 /*
51 * The last color is no color used for works which don't
52 * participate in workqueue flushing.
53 */
54 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
55 WORK_NO_COLOR = WORK_NR_COLORS,
56
Tejun Heobdbc5dd2010-07-02 10:03:51 +020057 /* special cpu IDs */
Tejun Heof3421792010-07-02 10:03:51 +020058 WORK_CPU_UNBOUND = NR_CPUS,
59 WORK_CPU_NONE = NR_CPUS + 1,
Tejun Heobdbc5dd2010-07-02 10:03:51 +020060 WORK_CPU_LAST = WORK_CPU_NONE,
61
Tejun Heo73f53c42010-06-29 10:07:11 +020062 /*
Tejun Heoe1201532010-07-22 14:14:25 +020063 * Reserve 7 bits off of cwq pointer w/ debugobjects turned
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +020064 * off. This makes cwqs aligned to 256 bytes and allows 15
65 * workqueue flush colors.
Tejun Heo73f53c42010-06-29 10:07:11 +020066 */
67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 WORK_STRUCT_COLOR_BITS,
69
Tejun Heo0f900042010-06-29 10:07:11 +020070 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
Tejun Heo22df02b2010-06-29 10:07:10 +020071 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
Tejun Heobdbc5dd2010-07-02 10:03:51 +020072 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
Tejun Heodcd989c2010-06-29 10:07:14 +020073
74 /* bit mask for work_busy() return values */
75 WORK_BUSY_PENDING = 1 << 0,
76 WORK_BUSY_RUNNING = 1 << 1,
Tejun Heo22df02b2010-06-29 10:07:10 +020077};
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079struct work_struct {
Linus Torvaldsa08727b2006-12-16 09:53:50 -080080 atomic_long_t data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 struct list_head entry;
David Howells6bb49e52006-11-22 14:54:45 +000082 work_func_t func;
Johannes Berg4e6045f2007-10-18 23:39:55 -070083#ifdef CONFIG_LOCKDEP
84 struct lockdep_map lockdep_map;
85#endif
David Howells52bad642006-11-22 14:54:01 +000086};
87
Tejun Heo7a22ad72010-06-29 10:07:13 +020088#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
89#define WORK_DATA_STATIC_INIT() \
90 ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
Linus Torvaldsa08727b2006-12-16 09:53:50 -080091
David Howells52bad642006-11-22 14:54:01 +000092struct delayed_work {
93 struct work_struct work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 struct timer_list timer;
95};
96
Jean Delvarebf6aede2009-04-02 16:56:54 -070097static inline struct delayed_work *to_delayed_work(struct work_struct *work)
98{
99 return container_of(work, struct delayed_work, work);
100}
101
James Bottomley1fa44ec2006-02-23 12:43:43 -0600102struct execute_work {
103 struct work_struct work;
104};
105
Johannes Berg4e6045f2007-10-18 23:39:55 -0700106#ifdef CONFIG_LOCKDEP
107/*
108 * NB: because we have to copy the lockdep_map, setting _key
109 * here is required, otherwise it could get initialised to the
110 * copy of the lockdep_map!
111 */
112#define __WORK_INIT_LOCKDEP_MAP(n, k) \
113 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
114#else
115#define __WORK_INIT_LOCKDEP_MAP(n, k)
116#endif
117
David Howells65f27f32006-11-22 14:55:48 +0000118#define __WORK_INITIALIZER(n, f) { \
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900119 .data = WORK_DATA_STATIC_INIT(), \
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700120 .entry = { &(n).entry, &(n).entry }, \
David Howells65f27f32006-11-22 14:55:48 +0000121 .func = (f), \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700122 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
David Howells65f27f32006-11-22 14:55:48 +0000123 }
124
125#define __DELAYED_WORK_INITIALIZER(n, f) { \
126 .work = __WORK_INITIALIZER((n).work, (f)), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
128 }
129
Phil Carmodydd6414b2010-10-20 15:57:33 -0700130#define __DEFERRED_WORK_INITIALIZER(n, f) { \
131 .work = __WORK_INITIALIZER((n).work, (f)), \
132 .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \
133 }
134
David Howells65f27f32006-11-22 14:55:48 +0000135#define DECLARE_WORK(n, f) \
136 struct work_struct n = __WORK_INITIALIZER(n, f)
137
David Howells65f27f32006-11-22 14:55:48 +0000138#define DECLARE_DELAYED_WORK(n, f) \
139 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
140
Phil Carmodydd6414b2010-10-20 15:57:33 -0700141#define DECLARE_DEFERRED_WORK(n, f) \
142 struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f)
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144/*
David Howells65f27f32006-11-22 14:55:48 +0000145 * initialize a work item's function pointer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 */
David Howells65f27f32006-11-22 14:55:48 +0000147#define PREPARE_WORK(_work, _func) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 do { \
David Howells52bad642006-11-22 14:54:01 +0000149 (_work)->func = (_func); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 } while (0)
151
David Howells65f27f32006-11-22 14:55:48 +0000152#define PREPARE_DELAYED_WORK(_work, _func) \
153 PREPARE_WORK(&(_work)->work, (_func))
David Howells52bad642006-11-22 14:54:01 +0000154
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900155#ifdef CONFIG_DEBUG_OBJECTS_WORK
156extern void __init_work(struct work_struct *work, int onstack);
157extern void destroy_work_on_stack(struct work_struct *work);
Tejun Heo4690c4a2010-06-29 10:07:10 +0200158static inline unsigned int work_static(struct work_struct *work)
159{
Tejun Heo22df02b2010-06-29 10:07:10 +0200160 return *work_data_bits(work) & WORK_STRUCT_STATIC;
Tejun Heo4690c4a2010-06-29 10:07:10 +0200161}
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900162#else
163static inline void __init_work(struct work_struct *work, int onstack) { }
164static inline void destroy_work_on_stack(struct work_struct *work) { }
Tejun Heo4690c4a2010-06-29 10:07:10 +0200165static inline unsigned int work_static(struct work_struct *work) { return 0; }
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900166#endif
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/*
David Howells52bad642006-11-22 14:54:01 +0000169 * initialize all of a work item in one go
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800170 *
Dmitri Vorobievb9049df2009-06-23 12:09:29 +0200171 * NOTE! No point in using "atomic_long_set()": using a direct
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800172 * assignment of the work data initializer allows the compiler
173 * to generate better code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 */
Johannes Berg4e6045f2007-10-18 23:39:55 -0700175#ifdef CONFIG_LOCKDEP
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900176#define __INIT_WORK(_work, _func, _onstack) \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700177 do { \
178 static struct lock_class_key __key; \
179 \
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900180 __init_work((_work), _onstack); \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700181 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
182 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
183 INIT_LIST_HEAD(&(_work)->entry); \
184 PREPARE_WORK((_work), (_func)); \
185 } while (0)
186#else
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900187#define __INIT_WORK(_work, _func, _onstack) \
David Howells65f27f32006-11-22 14:55:48 +0000188 do { \
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900189 __init_work((_work), _onstack); \
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700190 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
David Howells65f27f32006-11-22 14:55:48 +0000191 INIT_LIST_HEAD(&(_work)->entry); \
192 PREPARE_WORK((_work), (_func)); \
193 } while (0)
Johannes Berg4e6045f2007-10-18 23:39:55 -0700194#endif
David Howells65f27f32006-11-22 14:55:48 +0000195
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900196#define INIT_WORK(_work, _func) \
197 do { \
198 __INIT_WORK((_work), (_func), 0); \
199 } while (0)
200
Andrew Mortonca1cab32010-10-26 14:22:34 -0700201#define INIT_WORK_ONSTACK(_work, _func) \
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900202 do { \
203 __INIT_WORK((_work), (_func), 1); \
204 } while (0)
205
David Howells65f27f32006-11-22 14:55:48 +0000206#define INIT_DELAYED_WORK(_work, _func) \
David Howells52bad642006-11-22 14:54:01 +0000207 do { \
David Howells65f27f32006-11-22 14:55:48 +0000208 INIT_WORK(&(_work)->work, (_func)); \
209 init_timer(&(_work)->timer); \
210 } while (0)
211
Andrew Mortonca1cab32010-10-26 14:22:34 -0700212#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
Peter Zijlstra6d612b02009-01-12 12:52:23 +0100213 do { \
Andrew Mortonca1cab32010-10-26 14:22:34 -0700214 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
Peter Zijlstra6d612b02009-01-12 12:52:23 +0100215 init_timer_on_stack(&(_work)->timer); \
216 } while (0)
217
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900218#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
Venki Pallipadi28287032007-05-08 00:27:47 -0700219 do { \
220 INIT_WORK(&(_work)->work, (_func)); \
221 init_timer_deferrable(&(_work)->timer); \
222 } while (0)
223
David Howells365970a2006-11-22 14:54:49 +0000224/**
225 * work_pending - Find out whether a work item is currently pending
226 * @work: The work item in question
227 */
228#define work_pending(work) \
Tejun Heo22df02b2010-06-29 10:07:10 +0200229 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
David Howells365970a2006-11-22 14:54:49 +0000230
231/**
232 * delayed_work_pending - Find out whether a delayable work item is currently
233 * pending
234 * @work: The work item in question
235 */
Linus Torvalds02218722006-12-15 14:13:51 -0800236#define delayed_work_pending(w) \
237 work_pending(&(w)->work)
David Howells365970a2006-11-22 14:54:49 +0000238
David Howells65f27f32006-11-22 14:55:48 +0000239/**
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700240 * work_clear_pending - for internal use only, mark a work item as not pending
241 * @work: The work item in question
David Howells65f27f32006-11-22 14:55:48 +0000242 */
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700243#define work_clear_pending(work) \
Tejun Heo22df02b2010-06-29 10:07:10 +0200244 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
David Howells65f27f32006-11-22 14:55:48 +0000245
Tejun Heoc54fce62010-09-10 16:51:36 +0200246/*
247 * Workqueue flags and constants. For details, please refer to
248 * Documentation/workqueue.txt.
249 */
Tejun Heo97e37d72010-06-29 10:07:10 +0200250enum {
Tejun Heobdbc5dd2010-07-02 10:03:51 +0200251 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
Tejun Heoc7fc77f2010-07-02 10:03:51 +0200252 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
Tejun Heo58a69cb2011-02-16 09:25:31 +0100253 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
Tejun Heo6370a6a2010-10-11 15:12:27 +0200254 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
Tejun Heo649027d2010-06-29 10:07:14 +0200255 WQ_HIGHPRI = 1 << 4, /* high priority */
Tejun Heofb0e7be2010-06-29 10:07:15 +0200256 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
Tejun Heob71ab8c2010-06-29 10:07:14 +0200257
Tejun Heoe41e7042010-08-24 14:22:47 +0200258 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
Tejun Heo6370a6a2010-10-11 15:12:27 +0200259 WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
Tejun Heoe41e7042010-08-24 14:22:47 +0200260
Tejun Heob71ab8c2010-06-29 10:07:14 +0200261 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
Tejun Heof3421792010-07-02 10:03:51 +0200262 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
Tejun Heob71ab8c2010-06-29 10:07:14 +0200263 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
Tejun Heo97e37d72010-06-29 10:07:10 +0200264};
David Howells52bad642006-11-22 14:54:01 +0000265
Tejun Heof3421792010-07-02 10:03:51 +0200266/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
267#define WQ_UNBOUND_MAX_ACTIVE \
268 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
269
Tejun Heod320c032010-06-29 10:07:14 +0200270/*
271 * System-wide workqueues which are always present.
272 *
273 * system_wq is the one used by schedule[_delayed]_work[_on]().
274 * Multi-CPU multi-threaded. There are users which expect relatively
275 * short queue flush time. Don't queue works which can run for too
276 * long.
277 *
278 * system_long_wq is similar to system_wq but may host long running
279 * works. Queue flushing might take relatively long.
280 *
281 * system_nrt_wq is non-reentrant and guarantees that any given work
282 * item is never executed in parallel by multiple CPUs. Queue
283 * flushing might take relatively long.
Tejun Heof3421792010-07-02 10:03:51 +0200284 *
285 * system_unbound_wq is unbound workqueue. Workers are not bound to
286 * any specific CPU, not concurrency managed, and all queued works are
287 * executed immediately as long as max_active limit is not reached and
288 * resources are available.
Tejun Heo4149efb2011-02-08 10:39:03 +0100289 *
Tejun Heo24d51ad2011-02-21 09:52:50 +0100290 * system_freezable_wq is equivalent to system_wq except that it's
291 * freezable.
Tejun Heod320c032010-06-29 10:07:14 +0200292 */
293extern struct workqueue_struct *system_wq;
294extern struct workqueue_struct *system_long_wq;
295extern struct workqueue_struct *system_nrt_wq;
Tejun Heof3421792010-07-02 10:03:51 +0200296extern struct workqueue_struct *system_unbound_wq;
Tejun Heo24d51ad2011-02-21 09:52:50 +0100297extern struct workqueue_struct *system_freezable_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
Johannes Berg4e6045f2007-10-18 23:39:55 -0700299extern struct workqueue_struct *
Tejun Heod320c032010-06-29 10:07:14 +0200300__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
301 struct lock_class_key *key, const char *lock_name);
Johannes Berg4e6045f2007-10-18 23:39:55 -0700302
303#ifdef CONFIG_LOCKDEP
Tejun Heod320c032010-06-29 10:07:14 +0200304#define alloc_workqueue(name, flags, max_active) \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700305({ \
306 static struct lock_class_key __key; \
Johannes Bergeb13ba82008-01-16 09:51:58 +0100307 const char *__lock_name; \
308 \
309 if (__builtin_constant_p(name)) \
310 __lock_name = (name); \
311 else \
312 __lock_name = #name; \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700313 \
Tejun Heod320c032010-06-29 10:07:14 +0200314 __alloc_workqueue_key((name), (flags), (max_active), \
315 &__key, __lock_name); \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700316})
317#else
Tejun Heod320c032010-06-29 10:07:14 +0200318#define alloc_workqueue(name, flags, max_active) \
319 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
Johannes Berg4e6045f2007-10-18 23:39:55 -0700320#endif
321
Tejun Heo81dcaf62010-09-16 10:17:35 +0200322/**
323 * alloc_ordered_workqueue - allocate an ordered workqueue
324 * @name: name of the workqueue
Tejun Heo58a69cb2011-02-16 09:25:31 +0100325 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
Tejun Heo81dcaf62010-09-16 10:17:35 +0200326 *
327 * Allocate an ordered workqueue. An ordered workqueue executes at
328 * most one work item at any given time in the queued order. They are
329 * implemented as unbound workqueues with @max_active of one.
330 *
331 * RETURNS:
332 * Pointer to the allocated workqueue on success, %NULL on failure.
333 */
334static inline struct workqueue_struct *
335alloc_ordered_workqueue(const char *name, unsigned int flags)
336{
337 return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
338}
339
Tejun Heo97e37d72010-06-29 10:07:10 +0200340#define create_workqueue(name) \
Tejun Heo6370a6a2010-10-11 15:12:27 +0200341 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
Tejun Heo58a69cb2011-02-16 09:25:31 +0100342#define create_freezable_workqueue(name) \
343 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
Tejun Heo97e37d72010-06-29 10:07:10 +0200344#define create_singlethread_workqueue(name) \
Tejun Heo6370a6a2010-10-11 15:12:27 +0200345 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347extern void destroy_workqueue(struct workqueue_struct *wq);
348
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800349extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
Zhang Ruic1a220e2008-07-23 21:28:39 -0700350extern int queue_work_on(int cpu, struct workqueue_struct *wq,
351 struct work_struct *work);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800352extern int queue_delayed_work(struct workqueue_struct *wq,
353 struct delayed_work *work, unsigned long delay);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700354extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700355 struct delayed_work *work, unsigned long delay);
356
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800357extern void flush_workqueue(struct workqueue_struct *wq);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700358extern void flush_scheduled_work(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800360extern int schedule_work(struct work_struct *work);
Zhang Ruic1a220e2008-07-23 21:28:39 -0700361extern int schedule_work_on(int cpu, struct work_struct *work);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800362extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700363extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
364 unsigned long delay);
David Howells65f27f32006-11-22 14:55:48 +0000365extern int schedule_on_each_cpu(work_func_t func);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366extern int keventd_up(void);
367
David Howells65f27f32006-11-22 14:55:48 +0000368int execute_in_process_context(work_func_t fn, struct execute_work *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Tejun Heo401a8d02010-09-16 10:36:00 +0200370extern bool flush_work(struct work_struct *work);
Tejun Heo09383492010-09-16 10:48:29 +0200371extern bool flush_work_sync(struct work_struct *work);
Tejun Heo401a8d02010-09-16 10:36:00 +0200372extern bool cancel_work_sync(struct work_struct *work);
373
374extern bool flush_delayed_work(struct delayed_work *dwork);
Tejun Heo09383492010-09-16 10:48:29 +0200375extern bool flush_delayed_work_sync(struct delayed_work *work);
Tejun Heo401a8d02010-09-16 10:36:00 +0200376extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700377
Tejun Heodcd989c2010-06-29 10:07:14 +0200378extern void workqueue_set_max_active(struct workqueue_struct *wq,
379 int max_active);
380extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
381extern unsigned int work_cpu(struct work_struct *work);
382extern unsigned int work_busy(struct work_struct *work);
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384/*
385 * Kill off a pending schedule_delayed_work(). Note that the work callback
Oleg Nesterov071b6382007-04-26 15:45:32 -0700386 * function may still be running on return from cancel_delayed_work(), unless
387 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700388 * cancel_work_sync() to wait on it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 */
Tejun Heo401a8d02010-09-16 10:36:00 +0200390static inline bool cancel_delayed_work(struct delayed_work *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391{
Tejun Heo401a8d02010-09-16 10:36:00 +0200392 bool ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Oleg Nesterov223a10a2007-05-18 00:36:42 -0700394 ret = del_timer_sync(&work->timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 if (ret)
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700396 work_clear_pending(&work->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 return ret;
398}
399
Oleg Nesterov4e496272009-09-05 11:17:06 -0700400/*
401 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
402 * if it returns 0 the timer function may be running and the queueing is in
403 * progress.
404 */
Tejun Heo401a8d02010-09-16 10:36:00 +0200405static inline bool __cancel_delayed_work(struct delayed_work *work)
Oleg Nesterov4e496272009-09-05 11:17:06 -0700406{
Tejun Heo401a8d02010-09-16 10:36:00 +0200407 bool ret;
Oleg Nesterov4e496272009-09-05 11:17:06 -0700408
409 ret = del_timer(&work->timer);
410 if (ret)
411 work_clear_pending(&work->work);
412 return ret;
413}
414
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700415/* Obsolete. use cancel_delayed_work_sync() */
Tejun Heoed413902010-12-14 16:23:10 +0100416static inline __deprecated
Oleg Nesterov1634c482007-05-09 02:34:18 -0700417void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
418 struct delayed_work *work)
419{
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700420 cancel_delayed_work_sync(work);
421}
422
423/* Obsolete. use cancel_delayed_work_sync() */
Tejun Heoed413902010-12-14 16:23:10 +0100424static inline __deprecated
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700425void cancel_rearming_delayed_work(struct delayed_work *work)
426{
427 cancel_delayed_work_sync(work);
Oleg Nesterov1634c482007-05-09 02:34:18 -0700428}
429
Rusty Russell2d3854a2008-11-05 13:39:10 +1100430#ifndef CONFIG_SMP
431static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
432{
433 return fn(arg);
434}
435#else
436long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
437#endif /* CONFIG_SMP */
Paul E. McKenneya25909a2010-05-13 12:32:28 -0700438
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200439#ifdef CONFIG_FREEZER
440extern void freeze_workqueues_begin(void);
441extern bool freeze_workqueues_busy(void);
442extern void thaw_workqueues(void);
443#endif /* CONFIG_FREEZER */
444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445#endif