blob: 4fcb75b98443f6d2b3680d1e3bd200fc232f39b5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
Christoph Lametercde53532008-07-04 09:59:22 -070016 * Made to use alloc_percpu by Christoph Lameter.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Johannes Berg4e6045f2007-10-18 23:39:55 -070035#include <linux/lockdep.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080038 * The per-CPU workqueue (if single thread, we always use the first
39 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 */
41struct cpu_workqueue_struct {
42
43 spinlock_t lock;
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 struct list_head worklist;
46 wait_queue_head_t more_work;
Oleg Nesterov3af244332007-05-09 02:34:09 -070047 struct work_struct *current_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070050 struct task_struct *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080060 struct cpu_workqueue_struct *cpu_wq;
Oleg Nesterovcce1a162007-05-09 02:34:13 -070061 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 const char *name;
Oleg Nesterovcce1a162007-05-09 02:34:13 -070063 int singlethread;
Oleg Nesterov319c2a92007-05-09 02:34:06 -070064 int freezeable; /* Freeze threads during suspend */
Johannes Berg4e6045f2007-10-18 23:39:55 -070065#ifdef CONFIG_LOCKDEP
66 struct lockdep_map lockdep_map;
67#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070068};
69
Gautham R Shenoy95402b32008-01-25 21:08:02 +010070/* Serializes the accesses to the list of workqueues. */
71static DEFINE_SPINLOCK(workqueue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072static LIST_HEAD(workqueues);
73
Oleg Nesterov3af244332007-05-09 02:34:09 -070074static int singlethread_cpu __read_mostly;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070075static cpumask_t cpu_singlethread_map __read_mostly;
Oleg Nesterov14441962007-05-23 13:57:57 -070076/*
77 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
78 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
79 * which comes in between can't use for_each_online_cpu(). We could
80 * use cpu_possible_map, the cpumask below is more a documentation
81 * than optimization.
82 */
Oleg Nesterov3af244332007-05-09 02:34:09 -070083static cpumask_t cpu_populated_map __read_mostly;
Nathan Lynchf756d5e2006-01-08 01:05:12 -080084
Linus Torvalds1da177e2005-04-16 15:20:36 -070085/* If it's single threaded, it isn't in the list of workqueues. */
86static inline int is_single_threaded(struct workqueue_struct *wq)
87{
Oleg Nesterovcce1a162007-05-09 02:34:13 -070088 return wq->singlethread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070091static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
92{
93 return is_single_threaded(wq)
94 ? &cpu_singlethread_map : &cpu_populated_map;
95}
96
Oleg Nesterova848e3b2007-05-09 02:34:17 -070097static
98struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
99{
100 if (unlikely(is_single_threaded(wq)))
101 cpu = singlethread_cpu;
102 return per_cpu_ptr(wq->cpu_wq, cpu);
103}
104
David Howells4594bf12006-12-07 11:33:26 +0000105/*
106 * Set the workqueue on which a work item is to be run
107 * - Must *only* be called if the pending flag is set
108 */
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700109static inline void set_wq_data(struct work_struct *work,
110 struct cpu_workqueue_struct *cwq)
David Howells365970a2006-11-22 14:54:49 +0000111{
David Howells4594bf12006-12-07 11:33:26 +0000112 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +0000113
David Howells4594bf12006-12-07 11:33:26 +0000114 BUG_ON(!work_pending(work));
115
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700116 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800117 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
118 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +0000119}
120
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700121static inline
122struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
David Howells365970a2006-11-22 14:54:49 +0000123{
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800124 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +0000125}
126
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700127static void insert_work(struct cpu_workqueue_struct *cwq,
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700128 struct work_struct *work, struct list_head *head)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700129{
130 set_wq_data(work, cwq);
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700131 /*
132 * Ensure that we get the right work->data if we see the
133 * result of list_add() below, see try_to_grab_pending().
134 */
135 smp_wmb();
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700136 list_add_tail(&work->entry, head);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700137 wake_up(&cwq->more_work);
138}
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static void __queue_work(struct cpu_workqueue_struct *cwq,
141 struct work_struct *work)
142{
143 unsigned long flags;
144
145 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700146 insert_work(cwq, work, &cwq->worklist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 spin_unlock_irqrestore(&cwq->lock, flags);
148}
149
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700150/**
151 * queue_work - queue work on a workqueue
152 * @wq: workqueue to use
153 * @work: work to queue
154 *
Alan Stern057647f2006-10-28 10:38:58 -0700155 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 *
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700157 * We queue the work to the CPU on which it was submitted, but if the CPU dies
158 * it can be processed by another CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800160int queue_work(struct workqueue_struct *wq, struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700162 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800164 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 BUG_ON(!list_empty(&work->entry));
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700166 __queue_work(wq_per_cpu(wq, get_cpu()), work);
167 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 ret = 1;
169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 return ret;
171}
Dave Jonesae90dd52006-06-30 01:40:45 -0400172EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Zhang Ruic1a220e2008-07-23 21:28:39 -0700174/**
175 * queue_work_on - queue work on specific cpu
176 * @cpu: CPU number to execute work on
177 * @wq: workqueue to use
178 * @work: work to queue
179 *
180 * Returns 0 if @work was already on a queue, non-zero otherwise.
181 *
182 * We queue the work to a specific CPU, the caller must ensure it
183 * can't go away.
184 */
185int
186queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
187{
188 int ret = 0;
189
190 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
191 BUG_ON(!list_empty(&work->entry));
192 __queue_work(wq_per_cpu(wq, cpu), work);
193 ret = 1;
194 }
195 return ret;
196}
197EXPORT_SYMBOL_GPL(queue_work_on);
198
Li Zefan6d141c32008-02-08 04:21:09 -0800199static void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
David Howells52bad642006-11-22 14:54:01 +0000201 struct delayed_work *dwork = (struct delayed_work *)__data;
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700202 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
203 struct workqueue_struct *wq = cwq->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700205 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700208/**
209 * queue_delayed_work - queue work on a workqueue after delay
210 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800211 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700212 * @delay: number of jiffies to wait before queueing
213 *
Alan Stern057647f2006-10-28 10:38:58 -0700214 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700215 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800216int queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000217 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218{
David Howells52bad642006-11-22 14:54:01 +0000219 if (delay == 0)
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700220 return queue_work(wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700222 return queue_delayed_work_on(-1, wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
Dave Jonesae90dd52006-06-30 01:40:45 -0400224EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700226/**
227 * queue_delayed_work_on - queue work on specific CPU after delay
228 * @cpu: CPU number to execute work on
229 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800230 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700231 * @delay: number of jiffies to wait before queueing
232 *
Alan Stern057647f2006-10-28 10:38:58 -0700233 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700234 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700235int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000236 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700237{
238 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000239 struct timer_list *timer = &dwork->timer;
240 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700241
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800242 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700243 BUG_ON(timer_pending(timer));
244 BUG_ON(!list_empty(&work->entry));
245
Andrew Liu8a3e77c2008-05-01 04:35:14 -0700246 timer_stats_timer_set_start_info(&dwork->timer);
247
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700248 /* This stores cwq for the moment, for the timer_fn */
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700249 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700250 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000251 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700252 timer->function = delayed_work_timer_fn;
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700253
254 if (unlikely(cpu >= 0))
255 add_timer_on(timer, cpu);
256 else
257 add_timer(timer);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700258 ret = 1;
259 }
260 return ret;
261}
Dave Jonesae90dd52006-06-30 01:40:45 -0400262EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Arjan van de Ven858119e2006-01-14 13:20:43 -0800264static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700266 spin_lock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 cwq->run_depth++;
268 if (cwq->run_depth > 3) {
269 /* morton gets to eat his hat */
270 printk("%s: recursion depth exceeded: %d\n",
Harvey Harrisonaf1f16d2008-04-30 00:55:08 -0700271 __func__, cwq->run_depth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 dump_stack();
273 }
274 while (!list_empty(&cwq->worklist)) {
275 struct work_struct *work = list_entry(cwq->worklist.next,
276 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000277 work_func_t f = work->func;
Johannes Berg4e6045f2007-10-18 23:39:55 -0700278#ifdef CONFIG_LOCKDEP
279 /*
280 * It is permissible to free the struct work_struct
281 * from inside the function that is called from it,
282 * this we need to take into account for lockdep too.
283 * To avoid bogus "held lock freed" warnings as well
284 * as problems when looking into work->lockdep_map,
285 * make a copy and use that here.
286 */
287 struct lockdep_map lockdep_map = work->lockdep_map;
288#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700290 cwq->current_work = work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 list_del_init(cwq->worklist.next);
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700292 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
David Howells365970a2006-11-22 14:54:49 +0000294 BUG_ON(get_wq_data(work) != cwq);
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700295 work_clear_pending(work);
Johannes Berg4e6045f2007-10-18 23:39:55 -0700296 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
297 lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
David Howells65f27f32006-11-22 14:55:48 +0000298 f(work);
Johannes Berg4e6045f2007-10-18 23:39:55 -0700299 lock_release(&lockdep_map, 1, _THIS_IP_);
300 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800302 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
303 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
304 "%s/0x%08x/%d\n",
305 current->comm, preempt_count(),
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700306 task_pid_nr(current));
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800307 printk(KERN_ERR " last function: ");
308 print_symbol("%s\n", (unsigned long)f);
309 debug_show_held_locks(current);
310 dump_stack();
311 }
312
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700313 spin_lock_irq(&cwq->lock);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700314 cwq->current_work = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 }
316 cwq->run_depth--;
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700317 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
320static int worker_thread(void *__cwq)
321{
322 struct cpu_workqueue_struct *cwq = __cwq;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700323 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700325 if (cwq->wq->freezeable)
326 set_freezable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
328 set_user_nice(current, -5);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Oleg Nesterov3af244332007-05-09 02:34:09 -0700330 for (;;) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700331 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
Oleg Nesterov14441962007-05-23 13:57:57 -0700332 if (!freezing(current) &&
333 !kthread_should_stop() &&
334 list_empty(&cwq->worklist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 schedule();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700336 finish_wait(&cwq->more_work, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Oleg Nesterov85f41862007-05-09 02:34:20 -0700338 try_to_freeze();
339
Oleg Nesterov14441962007-05-23 13:57:57 -0700340 if (kthread_should_stop())
Oleg Nesterov3af244332007-05-09 02:34:09 -0700341 break;
342
343 run_workqueue(cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 }
Oleg Nesterov3af244332007-05-09 02:34:09 -0700345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return 0;
347}
348
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700349struct wq_barrier {
350 struct work_struct work;
351 struct completion done;
352};
353
354static void wq_barrier_func(struct work_struct *work)
355{
356 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
357 complete(&barr->done);
358}
359
Oleg Nesterov83c22522007-05-09 02:33:54 -0700360static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700361 struct wq_barrier *barr, struct list_head *head)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700362{
363 INIT_WORK(&barr->work, wq_barrier_func);
364 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
365
366 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700367
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700368 insert_work(cwq, &barr->work, head);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700369}
370
Oleg Nesterov14441962007-05-23 13:57:57 -0700371static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372{
Oleg Nesterov14441962007-05-23 13:57:57 -0700373 int active;
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 if (cwq->thread == current) {
376 /*
377 * Probably keventd trying to flush its own queue. So simply run
378 * it by hand rather than deadlocking.
379 */
380 run_workqueue(cwq);
Oleg Nesterov14441962007-05-23 13:57:57 -0700381 active = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 } else {
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700383 struct wq_barrier barr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Oleg Nesterov14441962007-05-23 13:57:57 -0700385 active = 0;
Oleg Nesterov83c22522007-05-09 02:33:54 -0700386 spin_lock_irq(&cwq->lock);
387 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700388 insert_wq_barrier(cwq, &barr, &cwq->worklist);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700389 active = 1;
390 }
391 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Oleg Nesterovd7213042007-05-09 02:34:07 -0700393 if (active)
Oleg Nesterov83c22522007-05-09 02:33:54 -0700394 wait_for_completion(&barr.done);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 }
Oleg Nesterov14441962007-05-23 13:57:57 -0700396
397 return active;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398}
399
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700400/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700402 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 *
404 * Forces execution of the workqueue and blocks until its completion.
405 * This is typically used in driver shutdown handlers.
406 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700407 * We sleep until all works which were queued on entry have been handled,
408 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 *
410 * This function used to run the workqueues itself. Now we just wait for the
411 * helper threads to do it.
412 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800413void flush_workqueue(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700415 const cpumask_t *cpu_map = wq_cpu_map(wq);
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700416 int cpu;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700417
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700418 might_sleep();
Johannes Berg4e6045f2007-10-18 23:39:55 -0700419 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
420 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
Mike Travis363ab6f2008-05-12 21:21:13 +0200421 for_each_cpu_mask_nr(cpu, *cpu_map)
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700422 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
Dave Jonesae90dd52006-06-30 01:40:45 -0400424EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Oleg Nesterovdb700892008-07-25 01:47:49 -0700426/**
427 * flush_work - block until a work_struct's callback has terminated
428 * @work: the work which is to be flushed
429 *
Oleg Nesterova67da702008-07-25 01:47:52 -0700430 * Returns false if @work has already terminated.
431 *
Oleg Nesterovdb700892008-07-25 01:47:49 -0700432 * It is expected that, prior to calling flush_work(), the caller has
433 * arranged for the work to not be requeued, otherwise it doesn't make
434 * sense to use this function.
435 */
436int flush_work(struct work_struct *work)
437{
438 struct cpu_workqueue_struct *cwq;
439 struct list_head *prev;
440 struct wq_barrier barr;
441
442 might_sleep();
443 cwq = get_wq_data(work);
444 if (!cwq)
445 return 0;
446
Oleg Nesterova67da702008-07-25 01:47:52 -0700447 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
448 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
449
Oleg Nesterovdb700892008-07-25 01:47:49 -0700450 prev = NULL;
451 spin_lock_irq(&cwq->lock);
452 if (!list_empty(&work->entry)) {
453 /*
454 * See the comment near try_to_grab_pending()->smp_rmb().
455 * If it was re-queued under us we are not going to wait.
456 */
457 smp_rmb();
458 if (unlikely(cwq != get_wq_data(work)))
459 goto out;
460 prev = &work->entry;
461 } else {
462 if (cwq->current_work != work)
463 goto out;
464 prev = &cwq->worklist;
465 }
466 insert_wq_barrier(cwq, &barr, prev->next);
467out:
468 spin_unlock_irq(&cwq->lock);
469 if (!prev)
470 return 0;
471
472 wait_for_completion(&barr.done);
473 return 1;
474}
475EXPORT_SYMBOL_GPL(flush_work);
476
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700477/*
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700478 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700479 * so this work can't be re-armed in any way.
480 */
481static int try_to_grab_pending(struct work_struct *work)
482{
483 struct cpu_workqueue_struct *cwq;
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700484 int ret = -1;
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700485
486 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700487 return 0;
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700488
489 /*
490 * The queueing is in progress, or it is already queued. Try to
491 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
492 */
493
494 cwq = get_wq_data(work);
495 if (!cwq)
496 return ret;
497
498 spin_lock_irq(&cwq->lock);
499 if (!list_empty(&work->entry)) {
500 /*
501 * This work is queued, but perhaps we locked the wrong cwq.
502 * In that case we must see the new value after rmb(), see
503 * insert_work()->wmb().
504 */
505 smp_rmb();
506 if (cwq == get_wq_data(work)) {
507 list_del_init(&work->entry);
508 ret = 1;
509 }
510 }
511 spin_unlock_irq(&cwq->lock);
512
513 return ret;
514}
515
516static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700517 struct work_struct *work)
518{
519 struct wq_barrier barr;
520 int running = 0;
521
522 spin_lock_irq(&cwq->lock);
523 if (unlikely(cwq->current_work == work)) {
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700524 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700525 running = 1;
526 }
527 spin_unlock_irq(&cwq->lock);
528
Oleg Nesterov3af244332007-05-09 02:34:09 -0700529 if (unlikely(running))
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700530 wait_for_completion(&barr.done);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700531}
532
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700533static void wait_on_work(struct work_struct *work)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700534{
535 struct cpu_workqueue_struct *cwq;
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700536 struct workqueue_struct *wq;
537 const cpumask_t *cpu_map;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700538 int cpu;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700539
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700540 might_sleep();
541
Johannes Berg4e6045f2007-10-18 23:39:55 -0700542 lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
543 lock_release(&work->lockdep_map, 1, _THIS_IP_);
544
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700545 cwq = get_wq_data(work);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700546 if (!cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700547 return;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700548
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700549 wq = cwq->wq;
550 cpu_map = wq_cpu_map(wq);
551
Mike Travis363ab6f2008-05-12 21:21:13 +0200552 for_each_cpu_mask_nr(cpu, *cpu_map)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700553 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
554}
555
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700556static int __cancel_work_timer(struct work_struct *work,
557 struct timer_list* timer)
558{
559 int ret;
560
561 do {
562 ret = (timer && likely(del_timer(timer)));
563 if (!ret)
564 ret = try_to_grab_pending(work);
565 wait_on_work(work);
566 } while (unlikely(ret < 0));
567
568 work_clear_pending(work);
569 return ret;
570}
571
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700572/**
573 * cancel_work_sync - block until a work_struct's callback has terminated
574 * @work: the work which is to be flushed
575 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700576 * Returns true if @work was pending.
577 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700578 * cancel_work_sync() will cancel the work if it is queued. If the work's
579 * callback appears to be running, cancel_work_sync() will block until it
580 * has completed.
581 *
582 * It is possible to use this function if the work re-queues itself. It can
583 * cancel the work even if it migrates to another workqueue, however in that
584 * case it only guarantees that work->func() has completed on the last queued
585 * workqueue.
586 *
587 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
588 * pending, otherwise it goes into a busy-wait loop until the timer expires.
589 *
590 * The caller must ensure that workqueue_struct on which this work was last
591 * queued can't be destroyed before this function returns.
592 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700593int cancel_work_sync(struct work_struct *work)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700594{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700595 return __cancel_work_timer(work, NULL);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700596}
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700597EXPORT_SYMBOL_GPL(cancel_work_sync);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700598
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700599/**
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700600 * cancel_delayed_work_sync - reliably kill off a delayed work.
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700601 * @dwork: the delayed work struct
602 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700603 * Returns true if @dwork was pending.
604 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700605 * It is possible to use this function if @dwork rearms itself via queue_work()
606 * or queue_delayed_work(). See also the comment for cancel_work_sync().
607 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700608int cancel_delayed_work_sync(struct delayed_work *dwork)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700609{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700610 return __cancel_work_timer(&dwork->work, &dwork->timer);
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700611}
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700612EXPORT_SYMBOL(cancel_delayed_work_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700614static struct workqueue_struct *keventd_wq __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700616/**
617 * schedule_work - put work task in global workqueue
618 * @work: job to be done
619 *
620 * This puts a job in the kernel-global workqueue.
621 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800622int schedule_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
624 return queue_work(keventd_wq, work);
625}
Dave Jonesae90dd52006-06-30 01:40:45 -0400626EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Zhang Ruic1a220e2008-07-23 21:28:39 -0700628/*
629 * schedule_work_on - put work task on a specific cpu
630 * @cpu: cpu to put the work task on
631 * @work: job to be done
632 *
633 * This puts a job on a specific cpu
634 */
635int schedule_work_on(int cpu, struct work_struct *work)
636{
637 return queue_work_on(cpu, keventd_wq, work);
638}
639EXPORT_SYMBOL(schedule_work_on);
640
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700641/**
642 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000643 * @dwork: job to be done
644 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700645 *
646 * After waiting for a given time this puts a job in the kernel-global
647 * workqueue.
648 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800649int schedule_delayed_work(struct delayed_work *dwork,
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800650 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
David Howells52bad642006-11-22 14:54:01 +0000652 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653}
Dave Jonesae90dd52006-06-30 01:40:45 -0400654EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700656/**
657 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
658 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000659 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700660 * @delay: number of jiffies to wait
661 *
662 * After waiting for a given time this puts a job in the kernel-global
663 * workqueue on the specified CPU.
664 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000666 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
David Howells52bad642006-11-22 14:54:01 +0000668 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669}
Dave Jonesae90dd52006-06-30 01:40:45 -0400670EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Andrew Mortonb6136772006-06-25 05:47:49 -0700672/**
673 * schedule_on_each_cpu - call a function on each online CPU from keventd
674 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700675 *
676 * Returns zero on success.
677 * Returns -ve errno on failure.
678 *
Andrew Mortonb6136772006-06-25 05:47:49 -0700679 * schedule_on_each_cpu() is very slow.
680 */
David Howells65f27f32006-11-22 14:55:48 +0000681int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800682{
683 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700684 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800685
Andrew Mortonb6136772006-06-25 05:47:49 -0700686 works = alloc_percpu(struct work_struct);
687 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800688 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700689
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100690 get_online_cpus();
Christoph Lameter15316ba2006-01-08 01:00:43 -0800691 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100692 struct work_struct *work = per_cpu_ptr(works, cpu);
693
694 INIT_WORK(work, func);
695 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
696 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800697 }
Oleg Nesterov8616a892008-07-25 01:47:49 -0700698 for_each_online_cpu(cpu)
699 flush_work(per_cpu_ptr(works, cpu));
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100700 put_online_cpus();
Andrew Mortonb6136772006-06-25 05:47:49 -0700701 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800702 return 0;
703}
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705void flush_scheduled_work(void)
706{
707 flush_workqueue(keventd_wq);
708}
Dave Jonesae90dd52006-06-30 01:40:45 -0400709EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711/**
James Bottomley1fa44ec2006-02-23 12:43:43 -0600712 * execute_in_process_context - reliably execute the routine with user context
713 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600714 * @ew: guaranteed storage for the execute work structure (must
715 * be available when the work executes)
716 *
717 * Executes the function immediately if process context is available,
718 * otherwise schedules the function for delayed execution.
719 *
720 * Returns: 0 - function was executed
721 * 1 - function was scheduled for execution
722 */
David Howells65f27f32006-11-22 14:55:48 +0000723int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600724{
725 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000726 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600727 return 0;
728 }
729
David Howells65f27f32006-11-22 14:55:48 +0000730 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600731 schedule_work(&ew->work);
732
733 return 1;
734}
735EXPORT_SYMBOL_GPL(execute_in_process_context);
736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737int keventd_up(void)
738{
739 return keventd_wq != NULL;
740}
741
742int current_is_keventd(void)
743{
744 struct cpu_workqueue_struct *cwq;
Hugh Dickinsd2437692007-08-27 16:06:19 +0100745 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 int ret = 0;
747
748 BUG_ON(!keventd_wq);
749
Christoph Lameter89ada672005-10-30 15:01:59 -0800750 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 if (current == cwq->thread)
752 ret = 1;
753
754 return ret;
755
756}
757
Oleg Nesterov3af244332007-05-09 02:34:09 -0700758static struct cpu_workqueue_struct *
759init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760{
Christoph Lameter89ada672005-10-30 15:01:59 -0800761 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Oleg Nesterov3af244332007-05-09 02:34:09 -0700763 cwq->wq = wq;
764 spin_lock_init(&cwq->lock);
765 INIT_LIST_HEAD(&cwq->worklist);
766 init_waitqueue_head(&cwq->more_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
Oleg Nesterov3af244332007-05-09 02:34:09 -0700768 return cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769}
770
Oleg Nesterov3af244332007-05-09 02:34:09 -0700771static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700773 struct workqueue_struct *wq = cwq->wq;
774 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
775 struct task_struct *p;
776
777 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
778 /*
779 * Nobody can add the work_struct to this cwq,
780 * if (caller is __create_workqueue)
781 * nobody should see this wq
782 * else // caller is CPU_UP_PREPARE
783 * cpu is not on cpu_online_map
784 * so we can abort safely.
785 */
786 if (IS_ERR(p))
787 return PTR_ERR(p);
788
789 cwq->thread = p;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700790
791 return 0;
792}
793
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700794static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
795{
796 struct task_struct *p = cwq->thread;
797
798 if (p != NULL) {
799 if (cpu >= 0)
800 kthread_bind(p, cpu);
801 wake_up_process(p);
802 }
803}
804
Johannes Berg4e6045f2007-10-18 23:39:55 -0700805struct workqueue_struct *__create_workqueue_key(const char *name,
806 int singlethread,
807 int freezeable,
Johannes Bergeb13ba82008-01-16 09:51:58 +0100808 struct lock_class_key *key,
809 const char *lock_name)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700810{
811 struct workqueue_struct *wq;
812 struct cpu_workqueue_struct *cwq;
813 int err = 0, cpu;
814
815 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
816 if (!wq)
817 return NULL;
818
819 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
820 if (!wq->cpu_wq) {
821 kfree(wq);
822 return NULL;
823 }
824
825 wq->name = name;
Johannes Bergeb13ba82008-01-16 09:51:58 +0100826 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700827 wq->singlethread = singlethread;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700828 wq->freezeable = freezeable;
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700829 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700830
831 if (singlethread) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700832 cwq = init_cpu_workqueue(wq, singlethread_cpu);
833 err = create_workqueue_thread(cwq, singlethread_cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700834 start_workqueue_thread(cwq, -1);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700835 } else {
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700836 cpu_maps_update_begin();
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100837 spin_lock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700838 list_add(&wq->list, &workqueues);
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100839 spin_unlock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700840
841 for_each_possible_cpu(cpu) {
842 cwq = init_cpu_workqueue(wq, cpu);
843 if (err || !cpu_online(cpu))
844 continue;
845 err = create_workqueue_thread(cwq, cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700846 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700847 }
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700848 cpu_maps_update_done();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700849 }
850
851 if (err) {
852 destroy_workqueue(wq);
853 wq = NULL;
854 }
855 return wq;
856}
Johannes Berg4e6045f2007-10-18 23:39:55 -0700857EXPORT_SYMBOL_GPL(__create_workqueue_key);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700858
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -0700859static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700860{
Oleg Nesterov14441962007-05-23 13:57:57 -0700861 /*
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700862 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
863 * cpu_add_remove_lock protects cwq->thread.
Oleg Nesterov14441962007-05-23 13:57:57 -0700864 */
865 if (cwq->thread == NULL)
866 return;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700867
Johannes Berg4e6045f2007-10-18 23:39:55 -0700868 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
869 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
870
Oleg Nesterov13c22162007-07-17 04:03:55 -0700871 flush_cpu_workqueue(cwq);
Oleg Nesterov14441962007-05-23 13:57:57 -0700872 /*
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700873 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
Oleg Nesterov13c22162007-07-17 04:03:55 -0700874 * a concurrent flush_workqueue() can insert a barrier after us.
875 * However, in that case run_workqueue() won't return and check
876 * kthread_should_stop() until it flushes all work_struct's.
Oleg Nesterov14441962007-05-23 13:57:57 -0700877 * When ->worklist becomes empty it is safe to exit because no
878 * more work_structs can be queued on this cwq: flush_workqueue
879 * checks list_empty(), and a "normal" queue_work() can't use
880 * a dead CPU.
881 */
Oleg Nesterov14441962007-05-23 13:57:57 -0700882 kthread_stop(cwq->thread);
883 cwq->thread = NULL;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700884}
885
886/**
887 * destroy_workqueue - safely terminate a workqueue
888 * @wq: target workqueue
889 *
890 * Safely destroy a workqueue. All work currently pending will be done first.
891 */
892void destroy_workqueue(struct workqueue_struct *wq)
893{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700894 const cpumask_t *cpu_map = wq_cpu_map(wq);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700895 int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700896
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700897 cpu_maps_update_begin();
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100898 spin_lock(&workqueue_lock);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700899 list_del(&wq->list);
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100900 spin_unlock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700901
Mike Travis363ab6f2008-05-12 21:21:13 +0200902 for_each_cpu_mask_nr(cpu, *cpu_map)
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -0700903 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700904 cpu_maps_update_done();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700905
906 free_percpu(wq->cpu_wq);
907 kfree(wq);
908}
909EXPORT_SYMBOL_GPL(destroy_workqueue);
910
911static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
912 unsigned long action,
913 void *hcpu)
914{
915 unsigned int cpu = (unsigned long)hcpu;
916 struct cpu_workqueue_struct *cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 struct workqueue_struct *wq;
918
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700919 action &= ~CPU_TASKS_FROZEN;
920
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 switch (action) {
922 case CPU_UP_PREPARE:
Oleg Nesterov3af244332007-05-09 02:34:09 -0700923 cpu_set(cpu, cpu_populated_map);
924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
Oleg Nesterov3af244332007-05-09 02:34:09 -0700926 list_for_each_entry(wq, &workqueues, list) {
927 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Christoph Lameter89ada672005-10-30 15:01:59 -0800928
Oleg Nesterov3af244332007-05-09 02:34:09 -0700929 switch (action) {
930 case CPU_UP_PREPARE:
931 if (!create_workqueue_thread(cwq, cpu))
932 break;
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100933 printk(KERN_ERR "workqueue [%s] for %i failed\n",
934 wq->name, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700935 return NOTIFY_BAD;
936
937 case CPU_ONLINE:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700938 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700939 break;
940
941 case CPU_UP_CANCELED:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700942 start_workqueue_thread(cwq, -1);
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700943 case CPU_POST_DEAD:
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -0700944 cleanup_workqueue_thread(cwq);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700945 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 }
948
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700949 switch (action) {
950 case CPU_UP_CANCELED:
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700951 case CPU_POST_DEAD:
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700952 cpu_clear(cpu, cpu_populated_map);
953 }
954
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 return NOTIFY_OK;
956}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
Oleg Nesterovc12920d2007-05-09 02:34:14 -0700958void __init init_workqueues(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700960 cpu_populated_map = cpu_online_map;
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800961 singlethread_cpu = first_cpu(cpu_possible_map);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700962 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 hotcpu_notifier(workqueue_cpu_callback, 0);
964 keventd_wq = create_workqueue("events");
965 BUG_ON(!keventd_wq);
966}