blob: fb56fedd5c0274a3458bd0e7cbc8ae4e0d86cf6d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080037 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 struct list_head worklist;
45 wait_queue_head_t more_work;
Oleg Nesterov3af244332007-05-09 02:34:09 -070046 struct work_struct *current_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070049 struct task_struct *thread;
Oleg Nesterov3af244332007-05-09 02:34:09 -070050 int should_stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080060 struct cpu_workqueue_struct *cpu_wq;
Oleg Nesterovcce1a162007-05-09 02:34:13 -070061 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 const char *name;
Oleg Nesterovcce1a162007-05-09 02:34:13 -070063 int singlethread;
Oleg Nesterov319c2a92007-05-09 02:34:06 -070064 int freezeable; /* Freeze threads during suspend */
Linus Torvalds1da177e2005-04-16 15:20:36 -070065};
66
67/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
68 threads to each one as cpus come/go. */
Andrew Morton9b41ea72006-08-13 23:24:26 -070069static DEFINE_MUTEX(workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070static LIST_HEAD(workqueues);
71
Oleg Nesterov3af244332007-05-09 02:34:09 -070072static int singlethread_cpu __read_mostly;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070073static cpumask_t cpu_singlethread_map __read_mostly;
Oleg Nesterov3af244332007-05-09 02:34:09 -070074/* optimization, we could use cpu_possible_map */
75static cpumask_t cpu_populated_map __read_mostly;
Nathan Lynchf756d5e2006-01-08 01:05:12 -080076
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* If it's single threaded, it isn't in the list of workqueues. */
78static inline int is_single_threaded(struct workqueue_struct *wq)
79{
Oleg Nesterovcce1a162007-05-09 02:34:13 -070080 return wq->singlethread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070083static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
84{
85 return is_single_threaded(wq)
86 ? &cpu_singlethread_map : &cpu_populated_map;
87}
88
Oleg Nesterova848e3b2007-05-09 02:34:17 -070089static
90struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
91{
92 if (unlikely(is_single_threaded(wq)))
93 cpu = singlethread_cpu;
94 return per_cpu_ptr(wq->cpu_wq, cpu);
95}
96
David Howells4594bf12006-12-07 11:33:26 +000097/*
98 * Set the workqueue on which a work item is to be run
99 * - Must *only* be called if the pending flag is set
100 */
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700101static inline void set_wq_data(struct work_struct *work,
102 struct cpu_workqueue_struct *cwq)
David Howells365970a2006-11-22 14:54:49 +0000103{
David Howells4594bf12006-12-07 11:33:26 +0000104 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +0000105
David Howells4594bf12006-12-07 11:33:26 +0000106 BUG_ON(!work_pending(work));
107
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700108 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800109 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
110 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +0000111}
112
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700113static inline
114struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
David Howells365970a2006-11-22 14:54:49 +0000115{
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800116 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +0000117}
118
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700119static void insert_work(struct cpu_workqueue_struct *cwq,
120 struct work_struct *work, int tail)
121{
122 set_wq_data(work, cwq);
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700123 /*
124 * Ensure that we get the right work->data if we see the
125 * result of list_add() below, see try_to_grab_pending().
126 */
127 smp_wmb();
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700128 if (tail)
129 list_add_tail(&work->entry, &cwq->worklist);
130 else
131 list_add(&work->entry, &cwq->worklist);
132 wake_up(&cwq->more_work);
133}
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135/* Preempt must be disabled. */
136static void __queue_work(struct cpu_workqueue_struct *cwq,
137 struct work_struct *work)
138{
139 unsigned long flags;
140
141 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700142 insert_work(cwq, work, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 spin_unlock_irqrestore(&cwq->lock, flags);
144}
145
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700146/**
147 * queue_work - queue work on a workqueue
148 * @wq: workqueue to use
149 * @work: work to queue
150 *
Alan Stern057647f2006-10-28 10:38:58 -0700151 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 *
153 * We queue the work to the CPU it was submitted, but there is no
154 * guarantee that it will be processed by that CPU.
155 */
156int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
157{
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700158 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800160 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 BUG_ON(!list_empty(&work->entry));
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700162 __queue_work(wq_per_cpu(wq, get_cpu()), work);
163 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 ret = 1;
165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 return ret;
167}
Dave Jonesae90dd52006-06-30 01:40:45 -0400168EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800170void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
David Howells52bad642006-11-22 14:54:01 +0000172 struct delayed_work *dwork = (struct delayed_work *)__data;
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700173 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
174 struct workqueue_struct *wq = cwq->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700176 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700179/**
180 * queue_delayed_work - queue work on a workqueue after delay
181 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800182 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700183 * @delay: number of jiffies to wait before queueing
184 *
Alan Stern057647f2006-10-28 10:38:58 -0700185 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700186 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187int fastcall queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000188 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189{
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700190 timer_stats_timer_set_start_info(&dwork->timer);
David Howells52bad642006-11-22 14:54:01 +0000191 if (delay == 0)
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700192 return queue_work(wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700194 return queue_delayed_work_on(-1, wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
Dave Jonesae90dd52006-06-30 01:40:45 -0400196EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700198/**
199 * queue_delayed_work_on - queue work on specific CPU after delay
200 * @cpu: CPU number to execute work on
201 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800202 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700203 * @delay: number of jiffies to wait before queueing
204 *
Alan Stern057647f2006-10-28 10:38:58 -0700205 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700206 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700207int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000208 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700209{
210 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000211 struct timer_list *timer = &dwork->timer;
212 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700213
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800214 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700215 BUG_ON(timer_pending(timer));
216 BUG_ON(!list_empty(&work->entry));
217
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700218 /* This stores cwq for the moment, for the timer_fn */
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700219 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700220 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000221 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700222 timer->function = delayed_work_timer_fn;
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700223
224 if (unlikely(cpu >= 0))
225 add_timer_on(timer, cpu);
226 else
227 add_timer(timer);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700228 ret = 1;
229 }
230 return ret;
231}
Dave Jonesae90dd52006-06-30 01:40:45 -0400232EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Arjan van de Ven858119e2006-01-14 13:20:43 -0800234static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235{
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700236 spin_lock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 cwq->run_depth++;
238 if (cwq->run_depth > 3) {
239 /* morton gets to eat his hat */
240 printk("%s: recursion depth exceeded: %d\n",
241 __FUNCTION__, cwq->run_depth);
242 dump_stack();
243 }
244 while (!list_empty(&cwq->worklist)) {
245 struct work_struct *work = list_entry(cwq->worklist.next,
246 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000247 work_func_t f = work->func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700249 cwq->current_work = work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 list_del_init(cwq->worklist.next);
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700251 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
David Howells365970a2006-11-22 14:54:49 +0000253 BUG_ON(get_wq_data(work) != cwq);
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700254 work_clear_pending(work);
David Howells65f27f32006-11-22 14:55:48 +0000255 f(work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800257 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
258 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
259 "%s/0x%08x/%d\n",
260 current->comm, preempt_count(),
261 current->pid);
262 printk(KERN_ERR " last function: ");
263 print_symbol("%s\n", (unsigned long)f);
264 debug_show_held_locks(current);
265 dump_stack();
266 }
267
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700268 spin_lock_irq(&cwq->lock);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700269 cwq->current_work = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 }
271 cwq->run_depth--;
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700272 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273}
274
Oleg Nesterov3af244332007-05-09 02:34:09 -0700275/*
276 * NOTE: the caller must not touch *cwq if this func returns true
277 */
278static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
279{
280 int should_stop = cwq->should_stop;
281
282 if (unlikely(should_stop)) {
283 spin_lock_irq(&cwq->lock);
284 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
285 if (should_stop)
286 cwq->thread = NULL;
287 spin_unlock_irq(&cwq->lock);
288 }
289
290 return should_stop;
291}
292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293static int worker_thread(void *__cwq)
294{
295 struct cpu_workqueue_struct *cwq = __cwq;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700296 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700298 if (!cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800299 current->flags |= PF_NOFREEZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301 set_user_nice(current, -5);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Oleg Nesterov3af244332007-05-09 02:34:09 -0700303 for (;;) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700304 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
Oleg Nesterov85f41862007-05-09 02:34:20 -0700305 if (!freezing(current) && !cwq->should_stop
306 && list_empty(&cwq->worklist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 schedule();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700308 finish_wait(&cwq->more_work, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
Oleg Nesterov85f41862007-05-09 02:34:20 -0700310 try_to_freeze();
311
Oleg Nesterov3af244332007-05-09 02:34:09 -0700312 if (cwq_should_stop(cwq))
313 break;
314
315 run_workqueue(cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 }
Oleg Nesterov3af244332007-05-09 02:34:09 -0700317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 return 0;
319}
320
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700321struct wq_barrier {
322 struct work_struct work;
323 struct completion done;
324};
325
326static void wq_barrier_func(struct work_struct *work)
327{
328 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
329 complete(&barr->done);
330}
331
Oleg Nesterov83c22522007-05-09 02:33:54 -0700332static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
333 struct wq_barrier *barr, int tail)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700334{
335 INIT_WORK(&barr->work, wq_barrier_func);
336 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
337
338 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700339
340 insert_work(cwq, &barr->work, tail);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700341}
342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
344{
345 if (cwq->thread == current) {
346 /*
347 * Probably keventd trying to flush its own queue. So simply run
348 * it by hand rather than deadlocking.
349 */
350 run_workqueue(cwq);
351 } else {
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700352 struct wq_barrier barr;
Oleg Nesterov83c22522007-05-09 02:33:54 -0700353 int active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Oleg Nesterov83c22522007-05-09 02:33:54 -0700355 spin_lock_irq(&cwq->lock);
356 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
357 insert_wq_barrier(cwq, &barr, 1);
358 active = 1;
359 }
360 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
Oleg Nesterovd7213042007-05-09 02:34:07 -0700362 if (active)
Oleg Nesterov83c22522007-05-09 02:33:54 -0700363 wait_for_completion(&barr.done);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 }
365}
366
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700367/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700369 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 *
371 * Forces execution of the workqueue and blocks until its completion.
372 * This is typically used in driver shutdown handlers.
373 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700374 * We sleep until all works which were queued on entry have been handled,
375 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 *
377 * This function used to run the workqueues itself. Now we just wait for the
378 * helper threads to do it.
379 */
380void fastcall flush_workqueue(struct workqueue_struct *wq)
381{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700382 const cpumask_t *cpu_map = wq_cpu_map(wq);
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700383 int cpu;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700384
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700385 might_sleep();
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700386 for_each_cpu_mask(cpu, *cpu_map)
387 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388}
Dave Jonesae90dd52006-06-30 01:40:45 -0400389EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700391/*
392 * Upon a successful return, the caller "owns" WORK_STRUCT_PENDING bit,
393 * so this work can't be re-armed in any way.
394 */
395static int try_to_grab_pending(struct work_struct *work)
396{
397 struct cpu_workqueue_struct *cwq;
398 int ret = 0;
399
400 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
401 return 1;
402
403 /*
404 * The queueing is in progress, or it is already queued. Try to
405 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
406 */
407
408 cwq = get_wq_data(work);
409 if (!cwq)
410 return ret;
411
412 spin_lock_irq(&cwq->lock);
413 if (!list_empty(&work->entry)) {
414 /*
415 * This work is queued, but perhaps we locked the wrong cwq.
416 * In that case we must see the new value after rmb(), see
417 * insert_work()->wmb().
418 */
419 smp_rmb();
420 if (cwq == get_wq_data(work)) {
421 list_del_init(&work->entry);
422 ret = 1;
423 }
424 }
425 spin_unlock_irq(&cwq->lock);
426
427 return ret;
428}
429
430static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700431 struct work_struct *work)
432{
433 struct wq_barrier barr;
434 int running = 0;
435
436 spin_lock_irq(&cwq->lock);
437 if (unlikely(cwq->current_work == work)) {
Oleg Nesterov83c22522007-05-09 02:33:54 -0700438 insert_wq_barrier(cwq, &barr, 0);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700439 running = 1;
440 }
441 spin_unlock_irq(&cwq->lock);
442
Oleg Nesterov3af244332007-05-09 02:34:09 -0700443 if (unlikely(running))
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700444 wait_for_completion(&barr.done);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700445}
446
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700447static void wait_on_work(struct work_struct *work)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700448{
449 struct cpu_workqueue_struct *cwq;
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700450 struct workqueue_struct *wq;
451 const cpumask_t *cpu_map;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700452 int cpu;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700453
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700454 might_sleep();
455
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700456 cwq = get_wq_data(work);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700457 if (!cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700458 return;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700459
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700460 wq = cwq->wq;
461 cpu_map = wq_cpu_map(wq);
462
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700463 for_each_cpu_mask(cpu, *cpu_map)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700464 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
465}
466
467/**
468 * cancel_work_sync - block until a work_struct's callback has terminated
469 * @work: the work which is to be flushed
470 *
471 * cancel_work_sync() will cancel the work if it is queued. If the work's
472 * callback appears to be running, cancel_work_sync() will block until it
473 * has completed.
474 *
475 * It is possible to use this function if the work re-queues itself. It can
476 * cancel the work even if it migrates to another workqueue, however in that
477 * case it only guarantees that work->func() has completed on the last queued
478 * workqueue.
479 *
480 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
481 * pending, otherwise it goes into a busy-wait loop until the timer expires.
482 *
483 * The caller must ensure that workqueue_struct on which this work was last
484 * queued can't be destroyed before this function returns.
485 */
486void cancel_work_sync(struct work_struct *work)
487{
488 while (!try_to_grab_pending(work))
489 cpu_relax();
490 wait_on_work(work);
491 work_clear_pending(work);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700492}
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700493EXPORT_SYMBOL_GPL(cancel_work_sync);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700494
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700495/**
496 * cancel_rearming_delayed_work - reliably kill off a delayed work.
497 * @dwork: the delayed work struct
498 *
499 * It is possible to use this function if @dwork rearms itself via queue_work()
500 * or queue_delayed_work(). See also the comment for cancel_work_sync().
501 */
502void cancel_rearming_delayed_work(struct delayed_work *dwork)
503{
504 while (!del_timer(&dwork->timer) &&
505 !try_to_grab_pending(&dwork->work))
506 cpu_relax();
507 wait_on_work(&dwork->work);
508 work_clear_pending(&dwork->work);
509}
510EXPORT_SYMBOL(cancel_rearming_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700512static struct workqueue_struct *keventd_wq __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700514/**
515 * schedule_work - put work task in global workqueue
516 * @work: job to be done
517 *
518 * This puts a job in the kernel-global workqueue.
519 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520int fastcall schedule_work(struct work_struct *work)
521{
522 return queue_work(keventd_wq, work);
523}
Dave Jonesae90dd52006-06-30 01:40:45 -0400524EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700526/**
527 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000528 * @dwork: job to be done
529 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700530 *
531 * After waiting for a given time this puts a job in the kernel-global
532 * workqueue.
533 */
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800534int fastcall schedule_delayed_work(struct delayed_work *dwork,
535 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536{
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800537 timer_stats_timer_set_start_info(&dwork->timer);
David Howells52bad642006-11-22 14:54:01 +0000538 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539}
Dave Jonesae90dd52006-06-30 01:40:45 -0400540EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700542/**
543 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
544 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000545 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700546 * @delay: number of jiffies to wait
547 *
548 * After waiting for a given time this puts a job in the kernel-global
549 * workqueue on the specified CPU.
550 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000552 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553{
David Howells52bad642006-11-22 14:54:01 +0000554 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555}
Dave Jonesae90dd52006-06-30 01:40:45 -0400556EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Andrew Mortonb6136772006-06-25 05:47:49 -0700558/**
559 * schedule_on_each_cpu - call a function on each online CPU from keventd
560 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700561 *
562 * Returns zero on success.
563 * Returns -ve errno on failure.
564 *
565 * Appears to be racy against CPU hotplug.
566 *
567 * schedule_on_each_cpu() is very slow.
568 */
David Howells65f27f32006-11-22 14:55:48 +0000569int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800570{
571 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700572 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800573
Andrew Mortonb6136772006-06-25 05:47:49 -0700574 works = alloc_percpu(struct work_struct);
575 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800576 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700577
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700578 preempt_disable(); /* CPU hotplug */
Christoph Lameter15316ba2006-01-08 01:00:43 -0800579 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100580 struct work_struct *work = per_cpu_ptr(works, cpu);
581
582 INIT_WORK(work, func);
583 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
584 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800585 }
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700586 preempt_enable();
Christoph Lameter15316ba2006-01-08 01:00:43 -0800587 flush_workqueue(keventd_wq);
Andrew Mortonb6136772006-06-25 05:47:49 -0700588 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800589 return 0;
590}
591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592void flush_scheduled_work(void)
593{
594 flush_workqueue(keventd_wq);
595}
Dave Jonesae90dd52006-06-30 01:40:45 -0400596EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
598/**
James Bottomley1fa44ec2006-02-23 12:43:43 -0600599 * execute_in_process_context - reliably execute the routine with user context
600 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600601 * @ew: guaranteed storage for the execute work structure (must
602 * be available when the work executes)
603 *
604 * Executes the function immediately if process context is available,
605 * otherwise schedules the function for delayed execution.
606 *
607 * Returns: 0 - function was executed
608 * 1 - function was scheduled for execution
609 */
David Howells65f27f32006-11-22 14:55:48 +0000610int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600611{
612 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000613 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600614 return 0;
615 }
616
David Howells65f27f32006-11-22 14:55:48 +0000617 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600618 schedule_work(&ew->work);
619
620 return 1;
621}
622EXPORT_SYMBOL_GPL(execute_in_process_context);
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624int keventd_up(void)
625{
626 return keventd_wq != NULL;
627}
628
629int current_is_keventd(void)
630{
631 struct cpu_workqueue_struct *cwq;
632 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
633 int ret = 0;
634
635 BUG_ON(!keventd_wq);
636
Christoph Lameter89ada672005-10-30 15:01:59 -0800637 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 if (current == cwq->thread)
639 ret = 1;
640
641 return ret;
642
643}
644
Oleg Nesterov3af244332007-05-09 02:34:09 -0700645static struct cpu_workqueue_struct *
646init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Christoph Lameter89ada672005-10-30 15:01:59 -0800648 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
Oleg Nesterov3af244332007-05-09 02:34:09 -0700650 cwq->wq = wq;
651 spin_lock_init(&cwq->lock);
652 INIT_LIST_HEAD(&cwq->worklist);
653 init_waitqueue_head(&cwq->more_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Oleg Nesterov3af244332007-05-09 02:34:09 -0700655 return cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656}
657
Oleg Nesterov3af244332007-05-09 02:34:09 -0700658static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700660 struct workqueue_struct *wq = cwq->wq;
661 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
662 struct task_struct *p;
663
664 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
665 /*
666 * Nobody can add the work_struct to this cwq,
667 * if (caller is __create_workqueue)
668 * nobody should see this wq
669 * else // caller is CPU_UP_PREPARE
670 * cpu is not on cpu_online_map
671 * so we can abort safely.
672 */
673 if (IS_ERR(p))
674 return PTR_ERR(p);
675
676 cwq->thread = p;
677 cwq->should_stop = 0;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700678
679 return 0;
680}
681
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700682static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
683{
684 struct task_struct *p = cwq->thread;
685
686 if (p != NULL) {
687 if (cpu >= 0)
688 kthread_bind(p, cpu);
689 wake_up_process(p);
690 }
691}
692
Oleg Nesterov3af244332007-05-09 02:34:09 -0700693struct workqueue_struct *__create_workqueue(const char *name,
694 int singlethread, int freezeable)
695{
696 struct workqueue_struct *wq;
697 struct cpu_workqueue_struct *cwq;
698 int err = 0, cpu;
699
700 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
701 if (!wq)
702 return NULL;
703
704 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
705 if (!wq->cpu_wq) {
706 kfree(wq);
707 return NULL;
708 }
709
710 wq->name = name;
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700711 wq->singlethread = singlethread;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700712 wq->freezeable = freezeable;
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700713 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700714
715 if (singlethread) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700716 cwq = init_cpu_workqueue(wq, singlethread_cpu);
717 err = create_workqueue_thread(cwq, singlethread_cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700718 start_workqueue_thread(cwq, -1);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700719 } else {
720 mutex_lock(&workqueue_mutex);
721 list_add(&wq->list, &workqueues);
722
723 for_each_possible_cpu(cpu) {
724 cwq = init_cpu_workqueue(wq, cpu);
725 if (err || !cpu_online(cpu))
726 continue;
727 err = create_workqueue_thread(cwq, cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700728 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700729 }
730 mutex_unlock(&workqueue_mutex);
731 }
732
733 if (err) {
734 destroy_workqueue(wq);
735 wq = NULL;
736 }
737 return wq;
738}
739EXPORT_SYMBOL_GPL(__create_workqueue);
740
741static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
742{
743 struct wq_barrier barr;
744 int alive = 0;
745
746 spin_lock_irq(&cwq->lock);
747 if (cwq->thread != NULL) {
748 insert_wq_barrier(cwq, &barr, 1);
749 cwq->should_stop = 1;
750 alive = 1;
751 }
752 spin_unlock_irq(&cwq->lock);
753
754 if (alive) {
755 wait_for_completion(&barr.done);
756
757 while (unlikely(cwq->thread != NULL))
758 cpu_relax();
759 /*
760 * Wait until cwq->thread unlocks cwq->lock,
761 * it won't touch *cwq after that.
762 */
763 smp_rmb();
764 spin_unlock_wait(&cwq->lock);
765 }
766}
767
768/**
769 * destroy_workqueue - safely terminate a workqueue
770 * @wq: target workqueue
771 *
772 * Safely destroy a workqueue. All work currently pending will be done first.
773 */
774void destroy_workqueue(struct workqueue_struct *wq)
775{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700776 const cpumask_t *cpu_map = wq_cpu_map(wq);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700777 struct cpu_workqueue_struct *cwq;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700778 int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700779
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700780 mutex_lock(&workqueue_mutex);
781 list_del(&wq->list);
782 mutex_unlock(&workqueue_mutex);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700783
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700784 for_each_cpu_mask(cpu, *cpu_map) {
785 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
786 cleanup_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700787 }
788
789 free_percpu(wq->cpu_wq);
790 kfree(wq);
791}
792EXPORT_SYMBOL_GPL(destroy_workqueue);
793
794static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
795 unsigned long action,
796 void *hcpu)
797{
798 unsigned int cpu = (unsigned long)hcpu;
799 struct cpu_workqueue_struct *cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 struct workqueue_struct *wq;
801
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700802 action &= ~CPU_TASKS_FROZEN;
803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 switch (action) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700805 case CPU_LOCK_ACQUIRE:
806 mutex_lock(&workqueue_mutex);
807 return NOTIFY_OK;
808
809 case CPU_LOCK_RELEASE:
810 mutex_unlock(&workqueue_mutex);
811 return NOTIFY_OK;
812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 case CPU_UP_PREPARE:
Oleg Nesterov3af244332007-05-09 02:34:09 -0700814 cpu_set(cpu, cpu_populated_map);
815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
Oleg Nesterov3af244332007-05-09 02:34:09 -0700817 list_for_each_entry(wq, &workqueues, list) {
818 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Christoph Lameter89ada672005-10-30 15:01:59 -0800819
Oleg Nesterov3af244332007-05-09 02:34:09 -0700820 switch (action) {
821 case CPU_UP_PREPARE:
822 if (!create_workqueue_thread(cwq, cpu))
823 break;
824 printk(KERN_ERR "workqueue for %i failed\n", cpu);
825 return NOTIFY_BAD;
826
827 case CPU_ONLINE:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700828 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700829 break;
830
831 case CPU_UP_CANCELED:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700832 start_workqueue_thread(cwq, -1);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700833 case CPU_DEAD:
834 cleanup_workqueue_thread(cwq, cpu);
835 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 }
838
839 return NOTIFY_OK;
840}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
Oleg Nesterovc12920d2007-05-09 02:34:14 -0700842void __init init_workqueues(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700844 cpu_populated_map = cpu_online_map;
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800845 singlethread_cpu = first_cpu(cpu_possible_map);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700846 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 hotcpu_notifier(workqueue_cpu_callback, 0);
848 keventd_wq = create_workqueue("events");
849 BUG_ON(!keventd_wq);
850}