blob: 0c0771f06bfa745e8e4e5add4ec4823cf52eb813 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * workqueue.h --- work queue handling for Linux.
3 */
4
5#ifndef _LINUX_WORKQUEUE_H
6#define _LINUX_WORKQUEUE_H
7
8#include <linux/timer.h>
9#include <linux/linkage.h>
10#include <linux/bitops.h>
Johannes Berg4e6045f2007-10-18 23:39:55 -070011#include <linux/lockdep.h>
Tejun Heo7a22ad72010-06-29 10:07:13 +020012#include <linux/threads.h>
Linus Torvaldsa08727b2006-12-16 09:53:50 -080013#include <asm/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
15struct workqueue_struct;
16
David Howells65f27f32006-11-22 14:55:48 +000017struct work_struct;
18typedef void (*work_func_t)(struct work_struct *work);
David Howells6bb49e52006-11-22 14:54:45 +000019
Linus Torvaldsa08727b2006-12-16 09:53:50 -080020/*
21 * The first word is the work queue pointer and the flags rolled into
22 * one
23 */
24#define work_data_bits(work) ((unsigned long *)(&(work)->data))
25
Tejun Heo22df02b2010-06-29 10:07:10 +020026enum {
27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +020028 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
29 WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
30 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
Tejun Heo22df02b2010-06-29 10:07:10 +020031#ifdef CONFIG_DEBUG_OBJECTS_WORK
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +020032 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
33 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
Tejun Heo0f900042010-06-29 10:07:11 +020034#else
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +020035 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
Tejun Heo22df02b2010-06-29 10:07:10 +020036#endif
37
Tejun Heo73f53c42010-06-29 10:07:11 +020038 WORK_STRUCT_COLOR_BITS = 4,
39
Tejun Heo22df02b2010-06-29 10:07:10 +020040 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +020041 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
Tejun Heoe1201532010-07-22 14:14:25 +020042 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
Tejun Heoaffee4b2010-06-29 10:07:12 +020043 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
Tejun Heo22df02b2010-06-29 10:07:10 +020044#ifdef CONFIG_DEBUG_OBJECTS_WORK
45 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
46#else
47 WORK_STRUCT_STATIC = 0,
48#endif
49
Tejun Heo73f53c42010-06-29 10:07:11 +020050 /*
51 * The last color is no color used for works which don't
52 * participate in workqueue flushing.
53 */
54 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
55 WORK_NO_COLOR = WORK_NR_COLORS,
56
Tejun Heobdbc5dd2010-07-02 10:03:51 +020057 /* special cpu IDs */
Tejun Heof3421792010-07-02 10:03:51 +020058 WORK_CPU_UNBOUND = NR_CPUS,
59 WORK_CPU_NONE = NR_CPUS + 1,
Tejun Heobdbc5dd2010-07-02 10:03:51 +020060 WORK_CPU_LAST = WORK_CPU_NONE,
61
Tejun Heo73f53c42010-06-29 10:07:11 +020062 /*
Tejun Heoe1201532010-07-22 14:14:25 +020063 * Reserve 7 bits off of cwq pointer w/ debugobjects turned
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +020064 * off. This makes cwqs aligned to 256 bytes and allows 15
65 * workqueue flush colors.
Tejun Heo73f53c42010-06-29 10:07:11 +020066 */
67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 WORK_STRUCT_COLOR_BITS,
69
Tejun Heo0f900042010-06-29 10:07:11 +020070 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
Tejun Heo22df02b2010-06-29 10:07:10 +020071 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
Tejun Heobdbc5dd2010-07-02 10:03:51 +020072 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
Tejun Heodcd989c2010-06-29 10:07:14 +020073
74 /* bit mask for work_busy() return values */
75 WORK_BUSY_PENDING = 1 << 0,
76 WORK_BUSY_RUNNING = 1 << 1,
Tejun Heo22df02b2010-06-29 10:07:10 +020077};
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079struct work_struct {
Linus Torvaldsa08727b2006-12-16 09:53:50 -080080 atomic_long_t data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 struct list_head entry;
David Howells6bb49e52006-11-22 14:54:45 +000082 work_func_t func;
Johannes Berg4e6045f2007-10-18 23:39:55 -070083#ifdef CONFIG_LOCKDEP
84 struct lockdep_map lockdep_map;
85#endif
David Howells52bad642006-11-22 14:54:01 +000086};
87
Tejun Heo7a22ad72010-06-29 10:07:13 +020088#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
89#define WORK_DATA_STATIC_INIT() \
90 ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
Linus Torvaldsa08727b2006-12-16 09:53:50 -080091
David Howells52bad642006-11-22 14:54:01 +000092struct delayed_work {
93 struct work_struct work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 struct timer_list timer;
95};
96
Jean Delvarebf6aede2009-04-02 16:56:54 -070097static inline struct delayed_work *to_delayed_work(struct work_struct *work)
98{
99 return container_of(work, struct delayed_work, work);
100}
101
James Bottomley1fa44ec2006-02-23 12:43:43 -0600102struct execute_work {
103 struct work_struct work;
104};
105
Johannes Berg4e6045f2007-10-18 23:39:55 -0700106#ifdef CONFIG_LOCKDEP
107/*
108 * NB: because we have to copy the lockdep_map, setting _key
109 * here is required, otherwise it could get initialised to the
110 * copy of the lockdep_map!
111 */
112#define __WORK_INIT_LOCKDEP_MAP(n, k) \
113 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
114#else
115#define __WORK_INIT_LOCKDEP_MAP(n, k)
116#endif
117
David Howells65f27f32006-11-22 14:55:48 +0000118#define __WORK_INITIALIZER(n, f) { \
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900119 .data = WORK_DATA_STATIC_INIT(), \
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700120 .entry = { &(n).entry, &(n).entry }, \
David Howells65f27f32006-11-22 14:55:48 +0000121 .func = (f), \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700122 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
David Howells65f27f32006-11-22 14:55:48 +0000123 }
124
125#define __DELAYED_WORK_INITIALIZER(n, f) { \
126 .work = __WORK_INITIALIZER((n).work, (f)), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
128 }
129
David Howells65f27f32006-11-22 14:55:48 +0000130#define DECLARE_WORK(n, f) \
131 struct work_struct n = __WORK_INITIALIZER(n, f)
132
David Howells65f27f32006-11-22 14:55:48 +0000133#define DECLARE_DELAYED_WORK(n, f) \
134 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/*
David Howells65f27f32006-11-22 14:55:48 +0000137 * initialize a work item's function pointer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 */
David Howells65f27f32006-11-22 14:55:48 +0000139#define PREPARE_WORK(_work, _func) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 do { \
David Howells52bad642006-11-22 14:54:01 +0000141 (_work)->func = (_func); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 } while (0)
143
David Howells65f27f32006-11-22 14:55:48 +0000144#define PREPARE_DELAYED_WORK(_work, _func) \
145 PREPARE_WORK(&(_work)->work, (_func))
David Howells52bad642006-11-22 14:54:01 +0000146
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900147#ifdef CONFIG_DEBUG_OBJECTS_WORK
148extern void __init_work(struct work_struct *work, int onstack);
149extern void destroy_work_on_stack(struct work_struct *work);
Tejun Heo4690c4a2010-06-29 10:07:10 +0200150static inline unsigned int work_static(struct work_struct *work)
151{
Tejun Heo22df02b2010-06-29 10:07:10 +0200152 return *work_data_bits(work) & WORK_STRUCT_STATIC;
Tejun Heo4690c4a2010-06-29 10:07:10 +0200153}
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900154#else
155static inline void __init_work(struct work_struct *work, int onstack) { }
156static inline void destroy_work_on_stack(struct work_struct *work) { }
Tejun Heo4690c4a2010-06-29 10:07:10 +0200157static inline unsigned int work_static(struct work_struct *work) { return 0; }
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900158#endif
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160/*
David Howells52bad642006-11-22 14:54:01 +0000161 * initialize all of a work item in one go
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800162 *
Dmitri Vorobievb9049df2009-06-23 12:09:29 +0200163 * NOTE! No point in using "atomic_long_set()": using a direct
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800164 * assignment of the work data initializer allows the compiler
165 * to generate better code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 */
Johannes Berg4e6045f2007-10-18 23:39:55 -0700167#ifdef CONFIG_LOCKDEP
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900168#define __INIT_WORK(_work, _func, _onstack) \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700169 do { \
170 static struct lock_class_key __key; \
171 \
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900172 __init_work((_work), _onstack); \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700173 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
174 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
175 INIT_LIST_HEAD(&(_work)->entry); \
176 PREPARE_WORK((_work), (_func)); \
177 } while (0)
178#else
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900179#define __INIT_WORK(_work, _func, _onstack) \
David Howells65f27f32006-11-22 14:55:48 +0000180 do { \
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900181 __init_work((_work), _onstack); \
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700182 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
David Howells65f27f32006-11-22 14:55:48 +0000183 INIT_LIST_HEAD(&(_work)->entry); \
184 PREPARE_WORK((_work), (_func)); \
185 } while (0)
Johannes Berg4e6045f2007-10-18 23:39:55 -0700186#endif
David Howells65f27f32006-11-22 14:55:48 +0000187
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900188#define INIT_WORK(_work, _func) \
189 do { \
190 __INIT_WORK((_work), (_func), 0); \
191 } while (0)
192
Andrew Mortonca1cab32010-10-26 14:22:34 -0700193#define INIT_WORK_ONSTACK(_work, _func) \
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900194 do { \
195 __INIT_WORK((_work), (_func), 1); \
196 } while (0)
197
David Howells65f27f32006-11-22 14:55:48 +0000198#define INIT_DELAYED_WORK(_work, _func) \
David Howells52bad642006-11-22 14:54:01 +0000199 do { \
David Howells65f27f32006-11-22 14:55:48 +0000200 INIT_WORK(&(_work)->work, (_func)); \
201 init_timer(&(_work)->timer); \
202 } while (0)
203
Andrew Mortonca1cab32010-10-26 14:22:34 -0700204#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
Peter Zijlstra6d612b02009-01-12 12:52:23 +0100205 do { \
Andrew Mortonca1cab32010-10-26 14:22:34 -0700206 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
Peter Zijlstra6d612b02009-01-12 12:52:23 +0100207 init_timer_on_stack(&(_work)->timer); \
208 } while (0)
209
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900210#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
Venki Pallipadi28287032007-05-08 00:27:47 -0700211 do { \
212 INIT_WORK(&(_work)->work, (_func)); \
213 init_timer_deferrable(&(_work)->timer); \
214 } while (0)
215
David Howells365970a2006-11-22 14:54:49 +0000216/**
217 * work_pending - Find out whether a work item is currently pending
218 * @work: The work item in question
219 */
220#define work_pending(work) \
Tejun Heo22df02b2010-06-29 10:07:10 +0200221 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
David Howells365970a2006-11-22 14:54:49 +0000222
223/**
224 * delayed_work_pending - Find out whether a delayable work item is currently
225 * pending
226 * @work: The work item in question
227 */
Linus Torvalds02218722006-12-15 14:13:51 -0800228#define delayed_work_pending(w) \
229 work_pending(&(w)->work)
David Howells365970a2006-11-22 14:54:49 +0000230
David Howells65f27f32006-11-22 14:55:48 +0000231/**
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700232 * work_clear_pending - for internal use only, mark a work item as not pending
233 * @work: The work item in question
David Howells65f27f32006-11-22 14:55:48 +0000234 */
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700235#define work_clear_pending(work) \
Tejun Heo22df02b2010-06-29 10:07:10 +0200236 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
David Howells65f27f32006-11-22 14:55:48 +0000237
Tejun Heoc54fce62010-09-10 16:51:36 +0200238/*
239 * Workqueue flags and constants. For details, please refer to
240 * Documentation/workqueue.txt.
241 */
Tejun Heo97e37d72010-06-29 10:07:10 +0200242enum {
Tejun Heobdbc5dd2010-07-02 10:03:51 +0200243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
Tejun Heoc7fc77f2010-07-02 10:03:51 +0200244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
Tejun Heobdbc5dd2010-07-02 10:03:51 +0200245 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
Tejun Heo6370a6a2010-10-11 15:12:27 +0200246 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
Tejun Heo649027d2010-06-29 10:07:14 +0200247 WQ_HIGHPRI = 1 << 4, /* high priority */
Tejun Heofb0e7be2010-06-29 10:07:15 +0200248 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
Tejun Heob71ab8c2010-06-29 10:07:14 +0200249
Tejun Heoe41e7042010-08-24 14:22:47 +0200250 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
Tejun Heo6370a6a2010-10-11 15:12:27 +0200251 WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
Tejun Heoe41e7042010-08-24 14:22:47 +0200252
Tejun Heob71ab8c2010-06-29 10:07:14 +0200253 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
Tejun Heof3421792010-07-02 10:03:51 +0200254 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
Tejun Heob71ab8c2010-06-29 10:07:14 +0200255 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
Tejun Heo97e37d72010-06-29 10:07:10 +0200256};
David Howells52bad642006-11-22 14:54:01 +0000257
Tejun Heof3421792010-07-02 10:03:51 +0200258/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
259#define WQ_UNBOUND_MAX_ACTIVE \
260 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
261
Tejun Heod320c032010-06-29 10:07:14 +0200262/*
263 * System-wide workqueues which are always present.
264 *
265 * system_wq is the one used by schedule[_delayed]_work[_on]().
266 * Multi-CPU multi-threaded. There are users which expect relatively
267 * short queue flush time. Don't queue works which can run for too
268 * long.
269 *
270 * system_long_wq is similar to system_wq but may host long running
271 * works. Queue flushing might take relatively long.
272 *
273 * system_nrt_wq is non-reentrant and guarantees that any given work
274 * item is never executed in parallel by multiple CPUs. Queue
275 * flushing might take relatively long.
Tejun Heof3421792010-07-02 10:03:51 +0200276 *
277 * system_unbound_wq is unbound workqueue. Workers are not bound to
278 * any specific CPU, not concurrency managed, and all queued works are
279 * executed immediately as long as max_active limit is not reached and
280 * resources are available.
Tejun Heod320c032010-06-29 10:07:14 +0200281 */
282extern struct workqueue_struct *system_wq;
283extern struct workqueue_struct *system_long_wq;
284extern struct workqueue_struct *system_nrt_wq;
Tejun Heof3421792010-07-02 10:03:51 +0200285extern struct workqueue_struct *system_unbound_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Johannes Berg4e6045f2007-10-18 23:39:55 -0700287extern struct workqueue_struct *
Tejun Heod320c032010-06-29 10:07:14 +0200288__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
289 struct lock_class_key *key, const char *lock_name);
Johannes Berg4e6045f2007-10-18 23:39:55 -0700290
291#ifdef CONFIG_LOCKDEP
Tejun Heod320c032010-06-29 10:07:14 +0200292#define alloc_workqueue(name, flags, max_active) \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700293({ \
294 static struct lock_class_key __key; \
Johannes Bergeb13ba82008-01-16 09:51:58 +0100295 const char *__lock_name; \
296 \
297 if (__builtin_constant_p(name)) \
298 __lock_name = (name); \
299 else \
300 __lock_name = #name; \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700301 \
Tejun Heod320c032010-06-29 10:07:14 +0200302 __alloc_workqueue_key((name), (flags), (max_active), \
303 &__key, __lock_name); \
Johannes Berg4e6045f2007-10-18 23:39:55 -0700304})
305#else
Tejun Heod320c032010-06-29 10:07:14 +0200306#define alloc_workqueue(name, flags, max_active) \
307 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
Johannes Berg4e6045f2007-10-18 23:39:55 -0700308#endif
309
Tejun Heo81dcaf62010-09-16 10:17:35 +0200310/**
311 * alloc_ordered_workqueue - allocate an ordered workqueue
312 * @name: name of the workqueue
Tejun Heo6370a6a2010-10-11 15:12:27 +0200313 * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
Tejun Heo81dcaf62010-09-16 10:17:35 +0200314 *
315 * Allocate an ordered workqueue. An ordered workqueue executes at
316 * most one work item at any given time in the queued order. They are
317 * implemented as unbound workqueues with @max_active of one.
318 *
319 * RETURNS:
320 * Pointer to the allocated workqueue on success, %NULL on failure.
321 */
322static inline struct workqueue_struct *
323alloc_ordered_workqueue(const char *name, unsigned int flags)
324{
325 return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
326}
327
Tejun Heo97e37d72010-06-29 10:07:10 +0200328#define create_workqueue(name) \
Tejun Heo6370a6a2010-10-11 15:12:27 +0200329 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
Tejun Heo97e37d72010-06-29 10:07:10 +0200330#define create_freezeable_workqueue(name) \
Tejun Heo6370a6a2010-10-11 15:12:27 +0200331 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
Tejun Heo97e37d72010-06-29 10:07:10 +0200332#define create_singlethread_workqueue(name) \
Tejun Heo6370a6a2010-10-11 15:12:27 +0200333 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335extern void destroy_workqueue(struct workqueue_struct *wq);
336
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800337extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
Zhang Ruic1a220e2008-07-23 21:28:39 -0700338extern int queue_work_on(int cpu, struct workqueue_struct *wq,
339 struct work_struct *work);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800340extern int queue_delayed_work(struct workqueue_struct *wq,
341 struct delayed_work *work, unsigned long delay);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700342extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700343 struct delayed_work *work, unsigned long delay);
344
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800345extern void flush_workqueue(struct workqueue_struct *wq);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700346extern void flush_scheduled_work(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800348extern int schedule_work(struct work_struct *work);
Zhang Ruic1a220e2008-07-23 21:28:39 -0700349extern int schedule_work_on(int cpu, struct work_struct *work);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800350extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700351extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
352 unsigned long delay);
David Howells65f27f32006-11-22 14:55:48 +0000353extern int schedule_on_each_cpu(work_func_t func);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354extern int keventd_up(void);
355
David Howells65f27f32006-11-22 14:55:48 +0000356int execute_in_process_context(work_func_t fn, struct execute_work *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Tejun Heo401a8d02010-09-16 10:36:00 +0200358extern bool flush_work(struct work_struct *work);
Tejun Heo09383492010-09-16 10:48:29 +0200359extern bool flush_work_sync(struct work_struct *work);
Tejun Heo401a8d02010-09-16 10:36:00 +0200360extern bool cancel_work_sync(struct work_struct *work);
361
362extern bool flush_delayed_work(struct delayed_work *dwork);
Tejun Heo09383492010-09-16 10:48:29 +0200363extern bool flush_delayed_work_sync(struct delayed_work *work);
Tejun Heo401a8d02010-09-16 10:36:00 +0200364extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700365
Tejun Heodcd989c2010-06-29 10:07:14 +0200366extern void workqueue_set_max_active(struct workqueue_struct *wq,
367 int max_active);
368extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
369extern unsigned int work_cpu(struct work_struct *work);
370extern unsigned int work_busy(struct work_struct *work);
371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372/*
373 * Kill off a pending schedule_delayed_work(). Note that the work callback
Oleg Nesterov071b6382007-04-26 15:45:32 -0700374 * function may still be running on return from cancel_delayed_work(), unless
375 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700376 * cancel_work_sync() to wait on it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 */
Tejun Heo401a8d02010-09-16 10:36:00 +0200378static inline bool cancel_delayed_work(struct delayed_work *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379{
Tejun Heo401a8d02010-09-16 10:36:00 +0200380 bool ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Oleg Nesterov223a10a2007-05-18 00:36:42 -0700382 ret = del_timer_sync(&work->timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (ret)
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700384 work_clear_pending(&work->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 return ret;
386}
387
Oleg Nesterov4e496272009-09-05 11:17:06 -0700388/*
389 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
390 * if it returns 0 the timer function may be running and the queueing is in
391 * progress.
392 */
Tejun Heo401a8d02010-09-16 10:36:00 +0200393static inline bool __cancel_delayed_work(struct delayed_work *work)
Oleg Nesterov4e496272009-09-05 11:17:06 -0700394{
Tejun Heo401a8d02010-09-16 10:36:00 +0200395 bool ret;
Oleg Nesterov4e496272009-09-05 11:17:06 -0700396
397 ret = del_timer(&work->timer);
398 if (ret)
399 work_clear_pending(&work->work);
400 return ret;
401}
402
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700403/* Obsolete. use cancel_delayed_work_sync() */
Oleg Nesterov1634c482007-05-09 02:34:18 -0700404static inline
405void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
406 struct delayed_work *work)
407{
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700408 cancel_delayed_work_sync(work);
409}
410
411/* Obsolete. use cancel_delayed_work_sync() */
412static inline
413void cancel_rearming_delayed_work(struct delayed_work *work)
414{
415 cancel_delayed_work_sync(work);
Oleg Nesterov1634c482007-05-09 02:34:18 -0700416}
417
Rusty Russell2d3854a2008-11-05 13:39:10 +1100418#ifndef CONFIG_SMP
419static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
420{
421 return fn(arg);
422}
423#else
424long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
425#endif /* CONFIG_SMP */
Paul E. McKenneya25909a2010-05-13 12:32:28 -0700426
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200427#ifdef CONFIG_FREEZER
428extern void freeze_workqueues_begin(void);
429extern bool freeze_workqueues_busy(void);
430extern void thaw_workqueues(void);
431#endif /* CONFIG_FREEZER */
432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433#endif