blob: 0eb9b33f1d917a95da8df146b0657d8db3f53d39 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080037 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 struct list_head worklist;
45 wait_queue_head_t more_work;
Oleg Nesterov3af244332007-05-09 02:34:09 -070046 struct work_struct *current_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070049 struct task_struct *thread;
Oleg Nesterov3af244332007-05-09 02:34:09 -070050 int should_stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080060 struct cpu_workqueue_struct *cpu_wq;
Oleg Nesterovcce1a162007-05-09 02:34:13 -070061 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 const char *name;
Oleg Nesterovcce1a162007-05-09 02:34:13 -070063 int singlethread;
Oleg Nesterov319c2a92007-05-09 02:34:06 -070064 int freezeable; /* Freeze threads during suspend */
Linus Torvalds1da177e2005-04-16 15:20:36 -070065};
66
67/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
68 threads to each one as cpus come/go. */
Andrew Morton9b41ea72006-08-13 23:24:26 -070069static DEFINE_MUTEX(workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070static LIST_HEAD(workqueues);
71
Oleg Nesterov3af244332007-05-09 02:34:09 -070072static int singlethread_cpu __read_mostly;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070073static cpumask_t cpu_singlethread_map __read_mostly;
Oleg Nesterov3af244332007-05-09 02:34:09 -070074/* optimization, we could use cpu_possible_map */
75static cpumask_t cpu_populated_map __read_mostly;
Nathan Lynchf756d5e2006-01-08 01:05:12 -080076
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* If it's single threaded, it isn't in the list of workqueues. */
78static inline int is_single_threaded(struct workqueue_struct *wq)
79{
Oleg Nesterovcce1a162007-05-09 02:34:13 -070080 return wq->singlethread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070083static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
84{
85 return is_single_threaded(wq)
86 ? &cpu_singlethread_map : &cpu_populated_map;
87}
88
David Howells4594bf12006-12-07 11:33:26 +000089/*
90 * Set the workqueue on which a work item is to be run
91 * - Must *only* be called if the pending flag is set
92 */
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -070093static inline void set_wq_data(struct work_struct *work,
94 struct cpu_workqueue_struct *cwq)
David Howells365970a2006-11-22 14:54:49 +000095{
David Howells4594bf12006-12-07 11:33:26 +000096 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +000097
David Howells4594bf12006-12-07 11:33:26 +000098 BUG_ON(!work_pending(work));
99
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700100 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800101 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
102 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +0000103}
104
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700105static inline
106struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
David Howells365970a2006-11-22 14:54:49 +0000107{
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800108 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +0000109}
110
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700111static void insert_work(struct cpu_workqueue_struct *cwq,
112 struct work_struct *work, int tail)
113{
114 set_wq_data(work, cwq);
115 if (tail)
116 list_add_tail(&work->entry, &cwq->worklist);
117 else
118 list_add(&work->entry, &cwq->worklist);
119 wake_up(&cwq->more_work);
120}
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122/* Preempt must be disabled. */
123static void __queue_work(struct cpu_workqueue_struct *cwq,
124 struct work_struct *work)
125{
126 unsigned long flags;
127
128 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700129 insert_work(cwq, work, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 spin_unlock_irqrestore(&cwq->lock, flags);
131}
132
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700133/**
134 * queue_work - queue work on a workqueue
135 * @wq: workqueue to use
136 * @work: work to queue
137 *
Alan Stern057647f2006-10-28 10:38:58 -0700138 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 *
140 * We queue the work to the CPU it was submitted, but there is no
141 * guarantee that it will be processed by that CPU.
142 */
143int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
144{
145 int ret = 0, cpu = get_cpu();
146
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800147 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800149 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 BUG_ON(!list_empty(&work->entry));
Christoph Lameter89ada672005-10-30 15:01:59 -0800151 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 ret = 1;
153 }
154 put_cpu();
155 return ret;
156}
Dave Jonesae90dd52006-06-30 01:40:45 -0400157EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800159void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
David Howells52bad642006-11-22 14:54:01 +0000161 struct delayed_work *dwork = (struct delayed_work *)__data;
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700162 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
163 struct workqueue_struct *wq = cwq->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 int cpu = smp_processor_id();
165
166 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800167 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
David Howells52bad642006-11-22 14:54:01 +0000169 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170}
171
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700172/**
173 * queue_delayed_work - queue work on a workqueue after delay
174 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800175 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700176 * @delay: number of jiffies to wait before queueing
177 *
Alan Stern057647f2006-10-28 10:38:58 -0700178 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700179 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180int fastcall queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000181 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700183 timer_stats_timer_set_start_info(&dwork->timer);
David Howells52bad642006-11-22 14:54:01 +0000184 if (delay == 0)
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700185 return queue_work(wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700187 return queue_delayed_work_on(-1, wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
Dave Jonesae90dd52006-06-30 01:40:45 -0400189EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700191/**
192 * queue_delayed_work_on - queue work on specific CPU after delay
193 * @cpu: CPU number to execute work on
194 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800195 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700196 * @delay: number of jiffies to wait before queueing
197 *
Alan Stern057647f2006-10-28 10:38:58 -0700198 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700199 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700200int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000201 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700202{
203 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000204 struct timer_list *timer = &dwork->timer;
205 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700206
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800207 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700208 BUG_ON(timer_pending(timer));
209 BUG_ON(!list_empty(&work->entry));
210
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700211 /* This stores cwq for the moment, for the timer_fn */
212 set_wq_data(work,
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700213 per_cpu_ptr(wq->cpu_wq, wq->singlethread ?
214 singlethread_cpu : raw_smp_processor_id()));
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700215 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000216 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700217 timer->function = delayed_work_timer_fn;
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700218
219 if (unlikely(cpu >= 0))
220 add_timer_on(timer, cpu);
221 else
222 add_timer(timer);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700223 ret = 1;
224 }
225 return ret;
226}
Dave Jonesae90dd52006-06-30 01:40:45 -0400227EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Arjan van de Ven858119e2006-01-14 13:20:43 -0800229static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700231 spin_lock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 cwq->run_depth++;
233 if (cwq->run_depth > 3) {
234 /* morton gets to eat his hat */
235 printk("%s: recursion depth exceeded: %d\n",
236 __FUNCTION__, cwq->run_depth);
237 dump_stack();
238 }
239 while (!list_empty(&cwq->worklist)) {
240 struct work_struct *work = list_entry(cwq->worklist.next,
241 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000242 work_func_t f = work->func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700244 cwq->current_work = work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 list_del_init(cwq->worklist.next);
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700246 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
David Howells365970a2006-11-22 14:54:49 +0000248 BUG_ON(get_wq_data(work) != cwq);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800249 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
David Howells65f27f32006-11-22 14:55:48 +0000250 work_release(work);
251 f(work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800253 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
254 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
255 "%s/0x%08x/%d\n",
256 current->comm, preempt_count(),
257 current->pid);
258 printk(KERN_ERR " last function: ");
259 print_symbol("%s\n", (unsigned long)f);
260 debug_show_held_locks(current);
261 dump_stack();
262 }
263
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700264 spin_lock_irq(&cwq->lock);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700265 cwq->current_work = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 }
267 cwq->run_depth--;
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700268 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269}
270
Oleg Nesterov3af244332007-05-09 02:34:09 -0700271/*
272 * NOTE: the caller must not touch *cwq if this func returns true
273 */
274static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
275{
276 int should_stop = cwq->should_stop;
277
278 if (unlikely(should_stop)) {
279 spin_lock_irq(&cwq->lock);
280 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
281 if (should_stop)
282 cwq->thread = NULL;
283 spin_unlock_irq(&cwq->lock);
284 }
285
286 return should_stop;
287}
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289static int worker_thread(void *__cwq)
290{
291 struct cpu_workqueue_struct *cwq = __cwq;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700292 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 struct k_sigaction sa;
294 sigset_t blocked;
295
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700296 if (!cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800297 current->flags |= PF_NOFREEZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
299 set_user_nice(current, -5);
300
301 /* Block and flush all signals */
302 sigfillset(&blocked);
303 sigprocmask(SIG_BLOCK, &blocked, NULL);
304 flush_signals(current);
305
Christoph Lameter46934022006-10-11 01:21:26 -0700306 /*
307 * We inherited MPOL_INTERLEAVE from the booting kernel.
308 * Set MPOL_DEFAULT to insure node local allocations.
309 */
310 numa_default_policy();
311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
313 sa.sa.sa_handler = SIG_IGN;
314 sa.sa.sa_flags = 0;
315 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
316 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
317
Oleg Nesterov3af244332007-05-09 02:34:09 -0700318 for (;;) {
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700319 if (cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800320 try_to_freeze();
321
Oleg Nesterov3af244332007-05-09 02:34:09 -0700322 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
323 if (!cwq->should_stop && list_empty(&cwq->worklist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 schedule();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700325 finish_wait(&cwq->more_work, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
Oleg Nesterov3af244332007-05-09 02:34:09 -0700327 if (cwq_should_stop(cwq))
328 break;
329
330 run_workqueue(cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 }
Oleg Nesterov3af244332007-05-09 02:34:09 -0700332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 return 0;
334}
335
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700336struct wq_barrier {
337 struct work_struct work;
338 struct completion done;
339};
340
341static void wq_barrier_func(struct work_struct *work)
342{
343 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
344 complete(&barr->done);
345}
346
Oleg Nesterov83c22522007-05-09 02:33:54 -0700347static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
348 struct wq_barrier *barr, int tail)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700349{
350 INIT_WORK(&barr->work, wq_barrier_func);
351 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
352
353 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700354
355 insert_work(cwq, &barr->work, tail);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700356}
357
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
359{
360 if (cwq->thread == current) {
361 /*
362 * Probably keventd trying to flush its own queue. So simply run
363 * it by hand rather than deadlocking.
364 */
365 run_workqueue(cwq);
366 } else {
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700367 struct wq_barrier barr;
Oleg Nesterov83c22522007-05-09 02:33:54 -0700368 int active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Oleg Nesterov83c22522007-05-09 02:33:54 -0700370 spin_lock_irq(&cwq->lock);
371 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
372 insert_wq_barrier(cwq, &barr, 1);
373 active = 1;
374 }
375 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
Oleg Nesterovd7213042007-05-09 02:34:07 -0700377 if (active)
Oleg Nesterov83c22522007-05-09 02:33:54 -0700378 wait_for_completion(&barr.done);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 }
380}
381
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700382/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700384 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 *
386 * Forces execution of the workqueue and blocks until its completion.
387 * This is typically used in driver shutdown handlers.
388 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700389 * We sleep until all works which were queued on entry have been handled,
390 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 *
392 * This function used to run the workqueues itself. Now we just wait for the
393 * helper threads to do it.
394 */
395void fastcall flush_workqueue(struct workqueue_struct *wq)
396{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700397 const cpumask_t *cpu_map = wq_cpu_map(wq);
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700398 int cpu;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700399
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700400 might_sleep();
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700401 for_each_cpu_mask(cpu, *cpu_map)
402 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
Dave Jonesae90dd52006-06-30 01:40:45 -0400404EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700406static void wait_on_work(struct cpu_workqueue_struct *cwq,
407 struct work_struct *work)
408{
409 struct wq_barrier barr;
410 int running = 0;
411
412 spin_lock_irq(&cwq->lock);
413 if (unlikely(cwq->current_work == work)) {
Oleg Nesterov83c22522007-05-09 02:33:54 -0700414 insert_wq_barrier(cwq, &barr, 0);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700415 running = 1;
416 }
417 spin_unlock_irq(&cwq->lock);
418
Oleg Nesterov3af244332007-05-09 02:34:09 -0700419 if (unlikely(running))
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700420 wait_for_completion(&barr.done);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700421}
422
423/**
424 * flush_work - block until a work_struct's callback has terminated
425 * @wq: the workqueue on which the work is queued
426 * @work: the work which is to be flushed
427 *
428 * flush_work() will attempt to cancel the work if it is queued. If the work's
429 * callback appears to be running, flush_work() will block until it has
430 * completed.
431 *
432 * flush_work() is designed to be used when the caller is tearing down data
433 * structures which the callback function operates upon. It is expected that,
434 * prior to calling flush_work(), the caller has arranged for the work to not
435 * be requeued.
436 */
437void flush_work(struct workqueue_struct *wq, struct work_struct *work)
438{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700439 const cpumask_t *cpu_map = wq_cpu_map(wq);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700440 struct cpu_workqueue_struct *cwq;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700441 int cpu;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700442
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700443 might_sleep();
444
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700445 cwq = get_wq_data(work);
446 /* Was it ever queued ? */
447 if (!cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700448 return;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700449
450 /*
Oleg Nesterov3af244332007-05-09 02:34:09 -0700451 * This work can't be re-queued, no need to re-check that
452 * get_wq_data() is still the same when we take cwq->lock.
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700453 */
454 spin_lock_irq(&cwq->lock);
455 list_del_init(&work->entry);
456 work_release(work);
457 spin_unlock_irq(&cwq->lock);
458
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700459 for_each_cpu_mask(cpu, *cpu_map)
460 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700461}
462EXPORT_SYMBOL_GPL(flush_work);
463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465static struct workqueue_struct *keventd_wq;
466
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700467/**
468 * schedule_work - put work task in global workqueue
469 * @work: job to be done
470 *
471 * This puts a job in the kernel-global workqueue.
472 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473int fastcall schedule_work(struct work_struct *work)
474{
475 return queue_work(keventd_wq, work);
476}
Dave Jonesae90dd52006-06-30 01:40:45 -0400477EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700479/**
480 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000481 * @dwork: job to be done
482 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700483 *
484 * After waiting for a given time this puts a job in the kernel-global
485 * workqueue.
486 */
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800487int fastcall schedule_delayed_work(struct delayed_work *dwork,
488 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800490 timer_stats_timer_set_start_info(&dwork->timer);
David Howells52bad642006-11-22 14:54:01 +0000491 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492}
Dave Jonesae90dd52006-06-30 01:40:45 -0400493EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700495/**
496 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
497 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000498 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700499 * @delay: number of jiffies to wait
500 *
501 * After waiting for a given time this puts a job in the kernel-global
502 * workqueue on the specified CPU.
503 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000505 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
David Howells52bad642006-11-22 14:54:01 +0000507 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508}
Dave Jonesae90dd52006-06-30 01:40:45 -0400509EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Andrew Mortonb6136772006-06-25 05:47:49 -0700511/**
512 * schedule_on_each_cpu - call a function on each online CPU from keventd
513 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700514 *
515 * Returns zero on success.
516 * Returns -ve errno on failure.
517 *
518 * Appears to be racy against CPU hotplug.
519 *
520 * schedule_on_each_cpu() is very slow.
521 */
David Howells65f27f32006-11-22 14:55:48 +0000522int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800523{
524 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700525 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800526
Andrew Mortonb6136772006-06-25 05:47:49 -0700527 works = alloc_percpu(struct work_struct);
528 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800529 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700530
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700531 preempt_disable(); /* CPU hotplug */
Christoph Lameter15316ba2006-01-08 01:00:43 -0800532 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100533 struct work_struct *work = per_cpu_ptr(works, cpu);
534
535 INIT_WORK(work, func);
536 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
537 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800538 }
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700539 preempt_enable();
Christoph Lameter15316ba2006-01-08 01:00:43 -0800540 flush_workqueue(keventd_wq);
Andrew Mortonb6136772006-06-25 05:47:49 -0700541 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800542 return 0;
543}
544
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545void flush_scheduled_work(void)
546{
547 flush_workqueue(keventd_wq);
548}
Dave Jonesae90dd52006-06-30 01:40:45 -0400549EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700551void flush_work_keventd(struct work_struct *work)
552{
553 flush_work(keventd_wq, work);
554}
555EXPORT_SYMBOL(flush_work_keventd);
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557/**
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700558 * cancel_rearming_delayed_workqueue - kill off a delayed work whose handler rearms the delayed work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 * @wq: the controlling workqueue structure
David Howells52bad642006-11-22 14:54:01 +0000560 * @dwork: the delayed work struct
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700561 *
562 * Note that the work callback function may still be running on return from
563 * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 */
James Bottomley81ddef72005-04-16 15:23:59 -0700565void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000566 struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567{
Oleg Nesterovdfb4b822007-05-09 02:34:11 -0700568 /* Was it ever queued ? */
569 if (!get_wq_data(&dwork->work))
570 return;
571
David Howells52bad642006-11-22 14:54:01 +0000572 while (!cancel_delayed_work(dwork))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 flush_workqueue(wq);
574}
James Bottomley81ddef72005-04-16 15:23:59 -0700575EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577/**
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700578 * cancel_rearming_delayed_work - kill off a delayed keventd work whose handler rearms the delayed work.
David Howells52bad642006-11-22 14:54:01 +0000579 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 */
David Howells52bad642006-11-22 14:54:01 +0000581void cancel_rearming_delayed_work(struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582{
David Howells52bad642006-11-22 14:54:01 +0000583 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584}
585EXPORT_SYMBOL(cancel_rearming_delayed_work);
586
James Bottomley1fa44ec2006-02-23 12:43:43 -0600587/**
588 * execute_in_process_context - reliably execute the routine with user context
589 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600590 * @ew: guaranteed storage for the execute work structure (must
591 * be available when the work executes)
592 *
593 * Executes the function immediately if process context is available,
594 * otherwise schedules the function for delayed execution.
595 *
596 * Returns: 0 - function was executed
597 * 1 - function was scheduled for execution
598 */
David Howells65f27f32006-11-22 14:55:48 +0000599int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600600{
601 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000602 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600603 return 0;
604 }
605
David Howells65f27f32006-11-22 14:55:48 +0000606 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600607 schedule_work(&ew->work);
608
609 return 1;
610}
611EXPORT_SYMBOL_GPL(execute_in_process_context);
612
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613int keventd_up(void)
614{
615 return keventd_wq != NULL;
616}
617
618int current_is_keventd(void)
619{
620 struct cpu_workqueue_struct *cwq;
621 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
622 int ret = 0;
623
624 BUG_ON(!keventd_wq);
625
Christoph Lameter89ada672005-10-30 15:01:59 -0800626 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 if (current == cwq->thread)
628 ret = 1;
629
630 return ret;
631
632}
633
Oleg Nesterov3af244332007-05-09 02:34:09 -0700634static struct cpu_workqueue_struct *
635init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636{
Christoph Lameter89ada672005-10-30 15:01:59 -0800637 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Oleg Nesterov3af244332007-05-09 02:34:09 -0700639 cwq->wq = wq;
640 spin_lock_init(&cwq->lock);
641 INIT_LIST_HEAD(&cwq->worklist);
642 init_waitqueue_head(&cwq->more_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Oleg Nesterov3af244332007-05-09 02:34:09 -0700644 return cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645}
646
Oleg Nesterov3af244332007-05-09 02:34:09 -0700647static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700649 struct workqueue_struct *wq = cwq->wq;
650 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
651 struct task_struct *p;
652
653 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
654 /*
655 * Nobody can add the work_struct to this cwq,
656 * if (caller is __create_workqueue)
657 * nobody should see this wq
658 * else // caller is CPU_UP_PREPARE
659 * cpu is not on cpu_online_map
660 * so we can abort safely.
661 */
662 if (IS_ERR(p))
663 return PTR_ERR(p);
664
665 cwq->thread = p;
666 cwq->should_stop = 0;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700667
668 return 0;
669}
670
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700671static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
672{
673 struct task_struct *p = cwq->thread;
674
675 if (p != NULL) {
676 if (cpu >= 0)
677 kthread_bind(p, cpu);
678 wake_up_process(p);
679 }
680}
681
Oleg Nesterov3af244332007-05-09 02:34:09 -0700682struct workqueue_struct *__create_workqueue(const char *name,
683 int singlethread, int freezeable)
684{
685 struct workqueue_struct *wq;
686 struct cpu_workqueue_struct *cwq;
687 int err = 0, cpu;
688
689 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
690 if (!wq)
691 return NULL;
692
693 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
694 if (!wq->cpu_wq) {
695 kfree(wq);
696 return NULL;
697 }
698
699 wq->name = name;
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700700 wq->singlethread = singlethread;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700701 wq->freezeable = freezeable;
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700702 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700703
704 if (singlethread) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700705 cwq = init_cpu_workqueue(wq, singlethread_cpu);
706 err = create_workqueue_thread(cwq, singlethread_cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700707 start_workqueue_thread(cwq, -1);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700708 } else {
709 mutex_lock(&workqueue_mutex);
710 list_add(&wq->list, &workqueues);
711
712 for_each_possible_cpu(cpu) {
713 cwq = init_cpu_workqueue(wq, cpu);
714 if (err || !cpu_online(cpu))
715 continue;
716 err = create_workqueue_thread(cwq, cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700717 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700718 }
719 mutex_unlock(&workqueue_mutex);
720 }
721
722 if (err) {
723 destroy_workqueue(wq);
724 wq = NULL;
725 }
726 return wq;
727}
728EXPORT_SYMBOL_GPL(__create_workqueue);
729
730static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
731{
732 struct wq_barrier barr;
733 int alive = 0;
734
735 spin_lock_irq(&cwq->lock);
736 if (cwq->thread != NULL) {
737 insert_wq_barrier(cwq, &barr, 1);
738 cwq->should_stop = 1;
739 alive = 1;
740 }
741 spin_unlock_irq(&cwq->lock);
742
743 if (alive) {
744 wait_for_completion(&barr.done);
745
746 while (unlikely(cwq->thread != NULL))
747 cpu_relax();
748 /*
749 * Wait until cwq->thread unlocks cwq->lock,
750 * it won't touch *cwq after that.
751 */
752 smp_rmb();
753 spin_unlock_wait(&cwq->lock);
754 }
755}
756
757/**
758 * destroy_workqueue - safely terminate a workqueue
759 * @wq: target workqueue
760 *
761 * Safely destroy a workqueue. All work currently pending will be done first.
762 */
763void destroy_workqueue(struct workqueue_struct *wq)
764{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700765 const cpumask_t *cpu_map = wq_cpu_map(wq);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700766 struct cpu_workqueue_struct *cwq;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700767 int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700768
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700769 mutex_lock(&workqueue_mutex);
770 list_del(&wq->list);
771 mutex_unlock(&workqueue_mutex);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700772
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700773 for_each_cpu_mask(cpu, *cpu_map) {
774 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
775 cleanup_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700776 }
777
778 free_percpu(wq->cpu_wq);
779 kfree(wq);
780}
781EXPORT_SYMBOL_GPL(destroy_workqueue);
782
783static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
784 unsigned long action,
785 void *hcpu)
786{
787 unsigned int cpu = (unsigned long)hcpu;
788 struct cpu_workqueue_struct *cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 struct workqueue_struct *wq;
790
791 switch (action) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700792 case CPU_LOCK_ACQUIRE:
793 mutex_lock(&workqueue_mutex);
794 return NOTIFY_OK;
795
796 case CPU_LOCK_RELEASE:
797 mutex_unlock(&workqueue_mutex);
798 return NOTIFY_OK;
799
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 case CPU_UP_PREPARE:
Oleg Nesterov3af244332007-05-09 02:34:09 -0700801 cpu_set(cpu, cpu_populated_map);
802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
Oleg Nesterov3af244332007-05-09 02:34:09 -0700804 list_for_each_entry(wq, &workqueues, list) {
805 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Christoph Lameter89ada672005-10-30 15:01:59 -0800806
Oleg Nesterov3af244332007-05-09 02:34:09 -0700807 switch (action) {
808 case CPU_UP_PREPARE:
809 if (!create_workqueue_thread(cwq, cpu))
810 break;
811 printk(KERN_ERR "workqueue for %i failed\n", cpu);
812 return NOTIFY_BAD;
813
814 case CPU_ONLINE:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700815 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700816 break;
817
818 case CPU_UP_CANCELED:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700819 start_workqueue_thread(cwq, -1);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700820 case CPU_DEAD:
821 cleanup_workqueue_thread(cwq, cpu);
822 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 }
825
826 return NOTIFY_OK;
827}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
Oleg Nesterovc12920d2007-05-09 02:34:14 -0700829void __init init_workqueues(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700831 cpu_populated_map = cpu_online_map;
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800832 singlethread_cpu = first_cpu(cpu_possible_map);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700833 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 hotcpu_notifier(workqueue_cpu_callback, 0);
835 keventd_wq = create_workqueue("events");
836 BUG_ON(!keventd_wq);
837}