blob: 2945b094d871acdd2e0faa879080f75ae08fc750 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080035 * The per-CPU workqueue (if single thread, we always use the first
36 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 *
38 * The sequence counters are for flush_scheduled_work(). It wants to wait
Rolf Eike Beer9f5d7852006-10-03 23:07:31 +020039 * until all currently-scheduled works are completed, but it doesn't
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * want to be livelocked by new, incoming ones. So it waits until
41 * remove_sequence is >= the insert_sequence which pertained when
42 * flush_scheduled_work() was called.
43 */
44struct cpu_workqueue_struct {
45
46 spinlock_t lock;
47
48 long remove_sequence; /* Least-recently added (next to run) */
49 long insert_sequence; /* Next to add */
50
51 struct list_head worklist;
52 wait_queue_head_t more_work;
53 wait_queue_head_t work_done;
54
55 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070056 struct task_struct *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58 int run_depth; /* Detect run_workqueue() recursion depth */
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080059
60 int freezeable; /* Freeze the thread during suspend */
Linus Torvalds1da177e2005-04-16 15:20:36 -070061} ____cacheline_aligned;
62
63/*
64 * The externally visible workqueue abstraction is an array of
65 * per-CPU workqueues:
66 */
67struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080068 struct cpu_workqueue_struct *cpu_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 const char *name;
70 struct list_head list; /* Empty if single thread */
71};
72
73/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
74 threads to each one as cpus come/go. */
Andrew Morton9b41ea72006-08-13 23:24:26 -070075static DEFINE_MUTEX(workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076static LIST_HEAD(workqueues);
77
Nathan Lynchf756d5e2006-01-08 01:05:12 -080078static int singlethread_cpu;
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080/* If it's single threaded, it isn't in the list of workqueues. */
81static inline int is_single_threaded(struct workqueue_struct *wq)
82{
83 return list_empty(&wq->list);
84}
85
David Howells365970a2006-11-22 14:54:49 +000086static inline void set_wq_data(struct work_struct *work, void *wq)
87{
88 unsigned long new, old, res;
89
90 /* assume the pending flag is already set and that the task has already
91 * been queued on this workqueue */
92 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
93 res = work->management;
94 if (res != new) {
95 do {
96 old = res;
97 new = (unsigned long) wq;
98 new |= (old & WORK_STRUCT_FLAG_MASK);
99 res = cmpxchg(&work->management, old, new);
100 } while (res != old);
101 }
102}
103
104static inline void *get_wq_data(struct work_struct *work)
105{
106 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
107}
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/* Preempt must be disabled. */
110static void __queue_work(struct cpu_workqueue_struct *cwq,
111 struct work_struct *work)
112{
113 unsigned long flags;
114
115 spin_lock_irqsave(&cwq->lock, flags);
David Howells365970a2006-11-22 14:54:49 +0000116 set_wq_data(work, cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 list_add_tail(&work->entry, &cwq->worklist);
118 cwq->insert_sequence++;
119 wake_up(&cwq->more_work);
120 spin_unlock_irqrestore(&cwq->lock, flags);
121}
122
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700123/**
124 * queue_work - queue work on a workqueue
125 * @wq: workqueue to use
126 * @work: work to queue
127 *
Alan Stern057647f2006-10-28 10:38:58 -0700128 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 *
130 * We queue the work to the CPU it was submitted, but there is no
131 * guarantee that it will be processed by that CPU.
132 */
133int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
134{
135 int ret = 0, cpu = get_cpu();
136
David Howells365970a2006-11-22 14:54:49 +0000137 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800139 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 BUG_ON(!list_empty(&work->entry));
Christoph Lameter89ada672005-10-30 15:01:59 -0800141 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 ret = 1;
143 }
144 put_cpu();
145 return ret;
146}
Dave Jonesae90dd52006-06-30 01:40:45 -0400147EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
149static void delayed_work_timer_fn(unsigned long __data)
150{
David Howells52bad642006-11-22 14:54:01 +0000151 struct delayed_work *dwork = (struct delayed_work *)__data;
David Howells365970a2006-11-22 14:54:49 +0000152 struct workqueue_struct *wq = get_wq_data(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 int cpu = smp_processor_id();
154
155 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800156 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
David Howells52bad642006-11-22 14:54:01 +0000158 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159}
160
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700161/**
162 * queue_delayed_work - queue work on a workqueue after delay
163 * @wq: workqueue to use
David Howells52bad642006-11-22 14:54:01 +0000164 * @work: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700165 * @delay: number of jiffies to wait before queueing
166 *
Alan Stern057647f2006-10-28 10:38:58 -0700167 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700168 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169int fastcall queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000170 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000173 struct timer_list *timer = &dwork->timer;
174 struct work_struct *work = &dwork->work;
175
176 if (delay == 0)
177 return queue_work(wq, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
David Howells365970a2006-11-22 14:54:49 +0000179 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 BUG_ON(timer_pending(timer));
181 BUG_ON(!list_empty(&work->entry));
182
183 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000184 set_wq_data(work, wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000186 timer->data = (unsigned long)dwork;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 timer->function = delayed_work_timer_fn;
188 add_timer(timer);
189 ret = 1;
190 }
191 return ret;
192}
Dave Jonesae90dd52006-06-30 01:40:45 -0400193EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700195/**
196 * queue_delayed_work_on - queue work on specific CPU after delay
197 * @cpu: CPU number to execute work on
198 * @wq: workqueue to use
199 * @work: work to queue
200 * @delay: number of jiffies to wait before queueing
201 *
Alan Stern057647f2006-10-28 10:38:58 -0700202 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700203 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700204int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000205 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700206{
207 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000208 struct timer_list *timer = &dwork->timer;
209 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700210
David Howells365970a2006-11-22 14:54:49 +0000211 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700212 BUG_ON(timer_pending(timer));
213 BUG_ON(!list_empty(&work->entry));
214
215 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000216 set_wq_data(work, wq);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700217 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000218 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700219 timer->function = delayed_work_timer_fn;
220 add_timer_on(timer, cpu);
221 ret = 1;
222 }
223 return ret;
224}
Dave Jonesae90dd52006-06-30 01:40:45 -0400225EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Arjan van de Ven858119e2006-01-14 13:20:43 -0800227static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228{
229 unsigned long flags;
230
231 /*
232 * Keep taking off work from the queue until
233 * done.
234 */
235 spin_lock_irqsave(&cwq->lock, flags);
236 cwq->run_depth++;
237 if (cwq->run_depth > 3) {
238 /* morton gets to eat his hat */
239 printk("%s: recursion depth exceeded: %d\n",
240 __FUNCTION__, cwq->run_depth);
241 dump_stack();
242 }
243 while (!list_empty(&cwq->worklist)) {
244 struct work_struct *work = list_entry(cwq->worklist.next,
245 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000246 work_func_t f = work->func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 list_del_init(cwq->worklist.next);
249 spin_unlock_irqrestore(&cwq->lock, flags);
250
David Howells365970a2006-11-22 14:54:49 +0000251 BUG_ON(get_wq_data(work) != cwq);
David Howells65f27f32006-11-22 14:55:48 +0000252 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
253 work_release(work);
254 f(work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 spin_lock_irqsave(&cwq->lock, flags);
257 cwq->remove_sequence++;
258 wake_up(&cwq->work_done);
259 }
260 cwq->run_depth--;
261 spin_unlock_irqrestore(&cwq->lock, flags);
262}
263
264static int worker_thread(void *__cwq)
265{
266 struct cpu_workqueue_struct *cwq = __cwq;
267 DECLARE_WAITQUEUE(wait, current);
268 struct k_sigaction sa;
269 sigset_t blocked;
270
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800271 if (!cwq->freezeable)
272 current->flags |= PF_NOFREEZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274 set_user_nice(current, -5);
275
276 /* Block and flush all signals */
277 sigfillset(&blocked);
278 sigprocmask(SIG_BLOCK, &blocked, NULL);
279 flush_signals(current);
280
Christoph Lameter46934022006-10-11 01:21:26 -0700281 /*
282 * We inherited MPOL_INTERLEAVE from the booting kernel.
283 * Set MPOL_DEFAULT to insure node local allocations.
284 */
285 numa_default_policy();
286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
288 sa.sa.sa_handler = SIG_IGN;
289 sa.sa.sa_flags = 0;
290 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
291 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
292
293 set_current_state(TASK_INTERRUPTIBLE);
294 while (!kthread_should_stop()) {
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800295 if (cwq->freezeable)
296 try_to_freeze();
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 add_wait_queue(&cwq->more_work, &wait);
299 if (list_empty(&cwq->worklist))
300 schedule();
301 else
302 __set_current_state(TASK_RUNNING);
303 remove_wait_queue(&cwq->more_work, &wait);
304
305 if (!list_empty(&cwq->worklist))
306 run_workqueue(cwq);
307 set_current_state(TASK_INTERRUPTIBLE);
308 }
309 __set_current_state(TASK_RUNNING);
310 return 0;
311}
312
313static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
314{
315 if (cwq->thread == current) {
316 /*
317 * Probably keventd trying to flush its own queue. So simply run
318 * it by hand rather than deadlocking.
319 */
320 run_workqueue(cwq);
321 } else {
322 DEFINE_WAIT(wait);
323 long sequence_needed;
324
325 spin_lock_irq(&cwq->lock);
326 sequence_needed = cwq->insert_sequence;
327
328 while (sequence_needed - cwq->remove_sequence > 0) {
329 prepare_to_wait(&cwq->work_done, &wait,
330 TASK_UNINTERRUPTIBLE);
331 spin_unlock_irq(&cwq->lock);
332 schedule();
333 spin_lock_irq(&cwq->lock);
334 }
335 finish_wait(&cwq->work_done, &wait);
336 spin_unlock_irq(&cwq->lock);
337 }
338}
339
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700340/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700342 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 *
344 * Forces execution of the workqueue and blocks until its completion.
345 * This is typically used in driver shutdown handlers.
346 *
347 * This function will sample each workqueue's current insert_sequence number and
348 * will sleep until the head sequence is greater than or equal to that. This
349 * means that we sleep until all works which were queued on entry have been
350 * handled, but we are not livelocked by new incoming ones.
351 *
352 * This function used to run the workqueues itself. Now we just wait for the
353 * helper threads to do it.
354 */
355void fastcall flush_workqueue(struct workqueue_struct *wq)
356{
357 might_sleep();
358
359 if (is_single_threaded(wq)) {
Ben Collinsbce61dd2005-11-28 13:43:56 -0800360 /* Always use first cpu's area. */
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800361 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 } else {
363 int cpu;
364
Andrew Morton9b41ea72006-08-13 23:24:26 -0700365 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 for_each_online_cpu(cpu)
Christoph Lameter89ada672005-10-30 15:01:59 -0800367 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Andrew Morton9b41ea72006-08-13 23:24:26 -0700368 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 }
370}
Dave Jonesae90dd52006-06-30 01:40:45 -0400371EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800374 int cpu, int freezeable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
Christoph Lameter89ada672005-10-30 15:01:59 -0800376 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 struct task_struct *p;
378
379 spin_lock_init(&cwq->lock);
380 cwq->wq = wq;
381 cwq->thread = NULL;
382 cwq->insert_sequence = 0;
383 cwq->remove_sequence = 0;
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800384 cwq->freezeable = freezeable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 INIT_LIST_HEAD(&cwq->worklist);
386 init_waitqueue_head(&cwq->more_work);
387 init_waitqueue_head(&cwq->work_done);
388
389 if (is_single_threaded(wq))
390 p = kthread_create(worker_thread, cwq, "%s", wq->name);
391 else
392 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
393 if (IS_ERR(p))
394 return NULL;
395 cwq->thread = p;
396 return p;
397}
398
399struct workqueue_struct *__create_workqueue(const char *name,
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800400 int singlethread, int freezeable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401{
402 int cpu, destroy = 0;
403 struct workqueue_struct *wq;
404 struct task_struct *p;
405
Pekka J Enbergdd392712005-09-06 15:18:31 -0700406 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 if (!wq)
408 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Christoph Lameter89ada672005-10-30 15:01:59 -0800410 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
Ben Collins676121f2006-01-08 01:03:04 -0800411 if (!wq->cpu_wq) {
412 kfree(wq);
413 return NULL;
414 }
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 wq->name = name;
Andrew Morton9b41ea72006-08-13 23:24:26 -0700417 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 if (singlethread) {
419 INIT_LIST_HEAD(&wq->list);
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800420 p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 if (!p)
422 destroy = 1;
423 else
424 wake_up_process(p);
425 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 list_add(&wq->list, &workqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 for_each_online_cpu(cpu) {
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800428 p = create_workqueue_thread(wq, cpu, freezeable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 if (p) {
430 kthread_bind(p, cpu);
431 wake_up_process(p);
432 } else
433 destroy = 1;
434 }
435 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700436 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 /*
439 * Was there any error during startup? If yes then clean up:
440 */
441 if (destroy) {
442 destroy_workqueue(wq);
443 wq = NULL;
444 }
445 return wq;
446}
Dave Jonesae90dd52006-06-30 01:40:45 -0400447EXPORT_SYMBOL_GPL(__create_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
450{
451 struct cpu_workqueue_struct *cwq;
452 unsigned long flags;
453 struct task_struct *p;
454
Christoph Lameter89ada672005-10-30 15:01:59 -0800455 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 spin_lock_irqsave(&cwq->lock, flags);
457 p = cwq->thread;
458 cwq->thread = NULL;
459 spin_unlock_irqrestore(&cwq->lock, flags);
460 if (p)
461 kthread_stop(p);
462}
463
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700464/**
465 * destroy_workqueue - safely terminate a workqueue
466 * @wq: target workqueue
467 *
468 * Safely destroy a workqueue. All work currently pending will be done first.
469 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470void destroy_workqueue(struct workqueue_struct *wq)
471{
472 int cpu;
473
474 flush_workqueue(wq);
475
476 /* We don't need the distraction of CPUs appearing and vanishing. */
Andrew Morton9b41ea72006-08-13 23:24:26 -0700477 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 if (is_single_threaded(wq))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800479 cleanup_workqueue_thread(wq, singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 else {
481 for_each_online_cpu(cpu)
482 cleanup_workqueue_thread(wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 list_del(&wq->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700485 mutex_unlock(&workqueue_mutex);
Christoph Lameter89ada672005-10-30 15:01:59 -0800486 free_percpu(wq->cpu_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 kfree(wq);
488}
Dave Jonesae90dd52006-06-30 01:40:45 -0400489EXPORT_SYMBOL_GPL(destroy_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
491static struct workqueue_struct *keventd_wq;
492
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700493/**
494 * schedule_work - put work task in global workqueue
495 * @work: job to be done
496 *
497 * This puts a job in the kernel-global workqueue.
498 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499int fastcall schedule_work(struct work_struct *work)
500{
501 return queue_work(keventd_wq, work);
502}
Dave Jonesae90dd52006-06-30 01:40:45 -0400503EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700505/**
506 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000507 * @dwork: job to be done
508 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700509 *
510 * After waiting for a given time this puts a job in the kernel-global
511 * workqueue.
512 */
David Howells52bad642006-11-22 14:54:01 +0000513int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514{
David Howells52bad642006-11-22 14:54:01 +0000515 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516}
Dave Jonesae90dd52006-06-30 01:40:45 -0400517EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700519/**
520 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
521 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000522 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700523 * @delay: number of jiffies to wait
524 *
525 * After waiting for a given time this puts a job in the kernel-global
526 * workqueue on the specified CPU.
527 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000529 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
David Howells52bad642006-11-22 14:54:01 +0000531 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532}
Dave Jonesae90dd52006-06-30 01:40:45 -0400533EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
Andrew Mortonb6136772006-06-25 05:47:49 -0700535/**
536 * schedule_on_each_cpu - call a function on each online CPU from keventd
537 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700538 *
539 * Returns zero on success.
540 * Returns -ve errno on failure.
541 *
542 * Appears to be racy against CPU hotplug.
543 *
544 * schedule_on_each_cpu() is very slow.
545 */
David Howells65f27f32006-11-22 14:55:48 +0000546int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba82006-01-08 01:00:43 -0800547{
548 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700549 struct work_struct *works;
Christoph Lameter15316ba82006-01-08 01:00:43 -0800550
Andrew Mortonb6136772006-06-25 05:47:49 -0700551 works = alloc_percpu(struct work_struct);
552 if (!works)
Christoph Lameter15316ba82006-01-08 01:00:43 -0800553 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700554
Andrew Morton9b41ea72006-08-13 23:24:26 -0700555 mutex_lock(&workqueue_mutex);
Christoph Lameter15316ba82006-01-08 01:00:43 -0800556 for_each_online_cpu(cpu) {
David Howells65f27f32006-11-22 14:55:48 +0000557 INIT_WORK(per_cpu_ptr(works, cpu), func);
Christoph Lameter15316ba82006-01-08 01:00:43 -0800558 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
Andrew Mortonb6136772006-06-25 05:47:49 -0700559 per_cpu_ptr(works, cpu));
Christoph Lameter15316ba82006-01-08 01:00:43 -0800560 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700561 mutex_unlock(&workqueue_mutex);
Christoph Lameter15316ba82006-01-08 01:00:43 -0800562 flush_workqueue(keventd_wq);
Andrew Mortonb6136772006-06-25 05:47:49 -0700563 free_percpu(works);
Christoph Lameter15316ba82006-01-08 01:00:43 -0800564 return 0;
565}
566
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567void flush_scheduled_work(void)
568{
569 flush_workqueue(keventd_wq);
570}
Dave Jonesae90dd52006-06-30 01:40:45 -0400571EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
573/**
574 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
575 * work whose handler rearms the delayed work.
576 * @wq: the controlling workqueue structure
David Howells52bad642006-11-22 14:54:01 +0000577 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 */
James Bottomley81ddef72005-04-16 15:23:59 -0700579void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000580 struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
David Howells52bad642006-11-22 14:54:01 +0000582 while (!cancel_delayed_work(dwork))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 flush_workqueue(wq);
584}
James Bottomley81ddef72005-04-16 15:23:59 -0700585EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587/**
588 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
589 * work whose handler rearms the delayed work.
David Howells52bad642006-11-22 14:54:01 +0000590 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 */
David Howells52bad642006-11-22 14:54:01 +0000592void cancel_rearming_delayed_work(struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
David Howells52bad642006-11-22 14:54:01 +0000594 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595}
596EXPORT_SYMBOL(cancel_rearming_delayed_work);
597
James Bottomley1fa44ec2006-02-23 12:43:43 -0600598/**
599 * execute_in_process_context - reliably execute the routine with user context
600 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600601 * @ew: guaranteed storage for the execute work structure (must
602 * be available when the work executes)
603 *
604 * Executes the function immediately if process context is available,
605 * otherwise schedules the function for delayed execution.
606 *
607 * Returns: 0 - function was executed
608 * 1 - function was scheduled for execution
609 */
David Howells65f27f32006-11-22 14:55:48 +0000610int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600611{
612 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000613 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600614 return 0;
615 }
616
David Howells65f27f32006-11-22 14:55:48 +0000617 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600618 schedule_work(&ew->work);
619
620 return 1;
621}
622EXPORT_SYMBOL_GPL(execute_in_process_context);
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624int keventd_up(void)
625{
626 return keventd_wq != NULL;
627}
628
629int current_is_keventd(void)
630{
631 struct cpu_workqueue_struct *cwq;
632 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
633 int ret = 0;
634
635 BUG_ON(!keventd_wq);
636
Christoph Lameter89ada672005-10-30 15:01:59 -0800637 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 if (current == cwq->thread)
639 ret = 1;
640
641 return ret;
642
643}
644
645#ifdef CONFIG_HOTPLUG_CPU
646/* Take the work from this (downed) CPU. */
647static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
648{
Christoph Lameter89ada672005-10-30 15:01:59 -0800649 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700650 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 struct work_struct *work;
652
653 spin_lock_irq(&cwq->lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700654 list_replace_init(&cwq->worklist, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
656 while (!list_empty(&list)) {
657 printk("Taking work for %s\n", wq->name);
658 work = list_entry(list.next,struct work_struct,entry);
659 list_del(&work->entry);
Christoph Lameter89ada672005-10-30 15:01:59 -0800660 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 }
662 spin_unlock_irq(&cwq->lock);
663}
664
665/* We're holding the cpucontrol mutex here */
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -0700666static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 unsigned long action,
668 void *hcpu)
669{
670 unsigned int hotcpu = (unsigned long)hcpu;
671 struct workqueue_struct *wq;
672
673 switch (action) {
674 case CPU_UP_PREPARE:
Andrew Morton9b41ea72006-08-13 23:24:26 -0700675 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 /* Create a new workqueue thread for it. */
677 list_for_each_entry(wq, &workqueues, list) {
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800678 if (!create_workqueue_thread(wq, hotcpu, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 printk("workqueue for %i failed\n", hotcpu);
680 return NOTIFY_BAD;
681 }
682 }
683 break;
684
685 case CPU_ONLINE:
686 /* Kick off worker threads. */
687 list_for_each_entry(wq, &workqueues, list) {
Christoph Lameter89ada672005-10-30 15:01:59 -0800688 struct cpu_workqueue_struct *cwq;
689
690 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
691 kthread_bind(cwq->thread, hotcpu);
692 wake_up_process(cwq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700694 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 break;
696
697 case CPU_UP_CANCELED:
698 list_for_each_entry(wq, &workqueues, list) {
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700699 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
700 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 /* Unbind so it can run. */
Christoph Lameter89ada672005-10-30 15:01:59 -0800702 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800703 any_online_cpu(cpu_online_map));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 cleanup_workqueue_thread(wq, hotcpu);
705 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700706 mutex_unlock(&workqueue_mutex);
707 break;
708
709 case CPU_DOWN_PREPARE:
710 mutex_lock(&workqueue_mutex);
711 break;
712
713 case CPU_DOWN_FAILED:
714 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 break;
716
717 case CPU_DEAD:
718 list_for_each_entry(wq, &workqueues, list)
719 cleanup_workqueue_thread(wq, hotcpu);
720 list_for_each_entry(wq, &workqueues, list)
721 take_over_work(wq, hotcpu);
Andrew Morton9b41ea72006-08-13 23:24:26 -0700722 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 break;
724 }
725
726 return NOTIFY_OK;
727}
728#endif
729
730void init_workqueues(void)
731{
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800732 singlethread_cpu = first_cpu(cpu_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 hotcpu_notifier(workqueue_cpu_callback, 0);
734 keventd_wq = create_workqueue("events");
735 BUG_ON(!keventd_wq);
736}
737