Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * workqueue.h --- work queue handling for Linux. |
| 3 | */ |
| 4 | |
| 5 | #ifndef _LINUX_WORKQUEUE_H |
| 6 | #define _LINUX_WORKQUEUE_H |
| 7 | |
| 8 | #include <linux/timer.h> |
| 9 | #include <linux/linkage.h> |
| 10 | #include <linux/bitops.h> |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 11 | #include <linux/lockdep.h> |
Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 12 | #include <asm/atomic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
| 14 | struct workqueue_struct; |
| 15 | |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 16 | struct work_struct; |
| 17 | typedef void (*work_func_t)(struct work_struct *work); |
David Howells | 6bb49e5 | 2006-11-22 14:54:45 +0000 | [diff] [blame] | 18 | |
Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 19 | /* |
| 20 | * The first word is the work queue pointer and the flags rolled into |
| 21 | * one |
| 22 | */ |
| 23 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | struct work_struct { |
Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 26 | atomic_long_t data; |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 27 | #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ |
| 28 | #define WORK_STRUCT_FLAG_MASK (3UL) |
| 29 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | struct list_head entry; |
David Howells | 6bb49e5 | 2006-11-22 14:54:45 +0000 | [diff] [blame] | 31 | work_func_t func; |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 32 | #ifdef CONFIG_LOCKDEP |
| 33 | struct lockdep_map lockdep_map; |
| 34 | #endif |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 35 | }; |
| 36 | |
Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 37 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) |
Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 38 | |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 39 | struct delayed_work { |
| 40 | struct work_struct work; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | struct timer_list timer; |
| 42 | }; |
| 43 | |
Jean Delvare | bf6aede | 2009-04-02 16:56:54 -0700 | [diff] [blame] | 44 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) |
| 45 | { |
| 46 | return container_of(work, struct delayed_work, work); |
| 47 | } |
| 48 | |
James Bottomley | 1fa44ec | 2006-02-23 12:43:43 -0600 | [diff] [blame] | 49 | struct execute_work { |
| 50 | struct work_struct work; |
| 51 | }; |
| 52 | |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 53 | #ifdef CONFIG_LOCKDEP |
| 54 | /* |
| 55 | * NB: because we have to copy the lockdep_map, setting _key |
| 56 | * here is required, otherwise it could get initialised to the |
| 57 | * copy of the lockdep_map! |
| 58 | */ |
| 59 | #define __WORK_INIT_LOCKDEP_MAP(n, k) \ |
| 60 | .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), |
| 61 | #else |
| 62 | #define __WORK_INIT_LOCKDEP_MAP(n, k) |
| 63 | #endif |
| 64 | |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 65 | #define __WORK_INITIALIZER(n, f) { \ |
Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 66 | .data = WORK_DATA_INIT(), \ |
| 67 | .entry = { &(n).entry, &(n).entry }, \ |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 68 | .func = (f), \ |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 69 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ |
| 73 | .work = __WORK_INITIALIZER((n).work, (f)), \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ |
| 75 | } |
| 76 | |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 77 | #define DECLARE_WORK(n, f) \ |
| 78 | struct work_struct n = __WORK_INITIALIZER(n, f) |
| 79 | |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 80 | #define DECLARE_DELAYED_WORK(n, f) \ |
| 81 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) |
| 82 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | /* |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 84 | * initialize a work item's function pointer |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | */ |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 86 | #define PREPARE_WORK(_work, _func) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | do { \ |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 88 | (_work)->func = (_func); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | } while (0) |
| 90 | |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 91 | #define PREPARE_DELAYED_WORK(_work, _func) \ |
| 92 | PREPARE_WORK(&(_work)->work, (_func)) |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 93 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | /* |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 95 | * initialize all of a work item in one go |
Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 96 | * |
| 97 | * NOTE! No point in using "atomic_long_set()": useing a direct |
| 98 | * assignment of the work data initializer allows the compiler |
| 99 | * to generate better code. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | */ |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 101 | #ifdef CONFIG_LOCKDEP |
| 102 | #define INIT_WORK(_work, _func) \ |
| 103 | do { \ |
| 104 | static struct lock_class_key __key; \ |
| 105 | \ |
| 106 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
| 107 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ |
| 108 | INIT_LIST_HEAD(&(_work)->entry); \ |
| 109 | PREPARE_WORK((_work), (_func)); \ |
| 110 | } while (0) |
| 111 | #else |
Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 112 | #define INIT_WORK(_work, _func) \ |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 113 | do { \ |
Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 114 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 115 | INIT_LIST_HEAD(&(_work)->entry); \ |
| 116 | PREPARE_WORK((_work), (_func)); \ |
| 117 | } while (0) |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 118 | #endif |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 119 | |
| 120 | #define INIT_DELAYED_WORK(_work, _func) \ |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 121 | do { \ |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 122 | INIT_WORK(&(_work)->work, (_func)); \ |
| 123 | init_timer(&(_work)->timer); \ |
| 124 | } while (0) |
| 125 | |
Peter Zijlstra | 6d612b0 | 2009-01-12 12:52:23 +0100 | [diff] [blame] | 126 | #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ |
| 127 | do { \ |
| 128 | INIT_WORK(&(_work)->work, (_func)); \ |
| 129 | init_timer_on_stack(&(_work)->timer); \ |
| 130 | } while (0) |
| 131 | |
Venki Pallipadi | 2828703 | 2007-05-08 00:27:47 -0700 | [diff] [blame] | 132 | #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ |
| 133 | do { \ |
| 134 | INIT_WORK(&(_work)->work, (_func)); \ |
| 135 | init_timer_deferrable(&(_work)->timer); \ |
| 136 | } while (0) |
| 137 | |
Thomas Gleixner | 336f6c3 | 2009-01-22 09:50:44 +0100 | [diff] [blame] | 138 | #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ |
| 139 | do { \ |
| 140 | INIT_WORK(&(_work)->work, (_func)); \ |
| 141 | init_timer_on_stack(&(_work)->timer); \ |
| 142 | } while (0) |
| 143 | |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 144 | /** |
| 145 | * work_pending - Find out whether a work item is currently pending |
| 146 | * @work: The work item in question |
| 147 | */ |
| 148 | #define work_pending(work) \ |
Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 149 | test_bit(WORK_STRUCT_PENDING, work_data_bits(work)) |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 150 | |
| 151 | /** |
| 152 | * delayed_work_pending - Find out whether a delayable work item is currently |
| 153 | * pending |
| 154 | * @work: The work item in question |
| 155 | */ |
Linus Torvalds | 0221872 | 2006-12-15 14:13:51 -0800 | [diff] [blame] | 156 | #define delayed_work_pending(w) \ |
| 157 | work_pending(&(w)->work) |
David Howells | 365970a | 2006-11-22 14:54:49 +0000 | [diff] [blame] | 158 | |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 159 | /** |
Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 160 | * work_clear_pending - for internal use only, mark a work item as not pending |
| 161 | * @work: The work item in question |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 162 | */ |
Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 163 | #define work_clear_pending(work) \ |
Linus Torvalds | a08727b | 2006-12-16 09:53:50 -0800 | [diff] [blame] | 164 | clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 165 | |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 166 | |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 167 | extern struct workqueue_struct * |
| 168 | __create_workqueue_key(const char *name, int singlethread, |
Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 169 | int freezeable, int rt, struct lock_class_key *key, |
Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 170 | const char *lock_name); |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 171 | |
| 172 | #ifdef CONFIG_LOCKDEP |
Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 173 | #define __create_workqueue(name, singlethread, freezeable, rt) \ |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 174 | ({ \ |
| 175 | static struct lock_class_key __key; \ |
Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 176 | const char *__lock_name; \ |
| 177 | \ |
| 178 | if (__builtin_constant_p(name)) \ |
| 179 | __lock_name = (name); \ |
| 180 | else \ |
| 181 | __lock_name = #name; \ |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 182 | \ |
| 183 | __create_workqueue_key((name), (singlethread), \ |
Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 184 | (freezeable), (rt), &__key, \ |
Johannes Berg | eb13ba8 | 2008-01-16 09:51:58 +0100 | [diff] [blame] | 185 | __lock_name); \ |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 186 | }) |
| 187 | #else |
Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 188 | #define __create_workqueue(name, singlethread, freezeable, rt) \ |
| 189 | __create_workqueue_key((name), (singlethread), (freezeable), (rt), \ |
| 190 | NULL, NULL) |
Johannes Berg | 4e6045f | 2007-10-18 23:39:55 -0700 | [diff] [blame] | 191 | #endif |
| 192 | |
Heiko Carstens | 0d557dc | 2008-10-13 23:50:09 +0200 | [diff] [blame] | 193 | #define create_workqueue(name) __create_workqueue((name), 0, 0, 0) |
| 194 | #define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1) |
| 195 | #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0) |
| 196 | #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | |
| 198 | extern void destroy_workqueue(struct workqueue_struct *wq); |
| 199 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 200 | extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); |
Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 201 | extern int queue_work_on(int cpu, struct workqueue_struct *wq, |
| 202 | struct work_struct *work); |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 203 | extern int queue_delayed_work(struct workqueue_struct *wq, |
| 204 | struct delayed_work *work, unsigned long delay); |
Venkatesh Pallipadi | 7a6bc1c | 2006-06-28 13:50:33 -0700 | [diff] [blame] | 205 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 206 | struct delayed_work *work, unsigned long delay); |
| 207 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 208 | extern void flush_workqueue(struct workqueue_struct *wq); |
Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 209 | extern void flush_scheduled_work(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 211 | extern int schedule_work(struct work_struct *work); |
Zhang Rui | c1a220e | 2008-07-23 21:28:39 -0700 | [diff] [blame] | 212 | extern int schedule_work_on(int cpu, struct work_struct *work); |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 213 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); |
Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 214 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, |
| 215 | unsigned long delay); |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 216 | extern int schedule_on_each_cpu(work_func_t func); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | extern int current_is_keventd(void); |
| 218 | extern int keventd_up(void); |
| 219 | |
| 220 | extern void init_workqueues(void); |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 221 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | |
Oleg Nesterov | db70089 | 2008-07-25 01:47:49 -0700 | [diff] [blame] | 223 | extern int flush_work(struct work_struct *work); |
| 224 | |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 225 | extern int cancel_work_sync(struct work_struct *work); |
Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | /* |
| 228 | * Kill off a pending schedule_delayed_work(). Note that the work callback |
Oleg Nesterov | 071b638 | 2007-04-26 15:45:32 -0700 | [diff] [blame] | 229 | * function may still be running on return from cancel_delayed_work(), unless |
| 230 | * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or |
Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 231 | * cancel_work_sync() to wait on it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | */ |
David Howells | 52bad64 | 2006-11-22 14:54:01 +0000 | [diff] [blame] | 233 | static inline int cancel_delayed_work(struct delayed_work *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | { |
| 235 | int ret; |
| 236 | |
Oleg Nesterov | 223a10a | 2007-05-18 00:36:42 -0700 | [diff] [blame] | 237 | ret = del_timer_sync(&work->timer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | if (ret) |
Oleg Nesterov | 23b2e59 | 2007-05-09 02:34:19 -0700 | [diff] [blame] | 239 | work_clear_pending(&work->work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | return ret; |
| 241 | } |
| 242 | |
Oleg Nesterov | 1f1f642 | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 243 | extern int cancel_delayed_work_sync(struct delayed_work *work); |
Oleg Nesterov | 1634c48 | 2007-05-09 02:34:18 -0700 | [diff] [blame] | 244 | |
Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 245 | /* Obsolete. use cancel_delayed_work_sync() */ |
Oleg Nesterov | 1634c48 | 2007-05-09 02:34:18 -0700 | [diff] [blame] | 246 | static inline |
| 247 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, |
| 248 | struct delayed_work *work) |
| 249 | { |
Oleg Nesterov | f5a421a | 2007-07-15 23:41:44 -0700 | [diff] [blame] | 250 | cancel_delayed_work_sync(work); |
| 251 | } |
| 252 | |
| 253 | /* Obsolete. use cancel_delayed_work_sync() */ |
| 254 | static inline |
| 255 | void cancel_rearming_delayed_work(struct delayed_work *work) |
| 256 | { |
| 257 | cancel_delayed_work_sync(work); |
Oleg Nesterov | 1634c48 | 2007-05-09 02:34:18 -0700 | [diff] [blame] | 258 | } |
| 259 | |
Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 260 | #ifndef CONFIG_SMP |
| 261 | static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
| 262 | { |
| 263 | return fn(arg); |
| 264 | } |
| 265 | #else |
| 266 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); |
| 267 | #endif /* CONFIG_SMP */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | #endif |