blob: 47cdd7e76f2bc3a47d8b9dba8ba8154b96e91163 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
Francois Camie1f8e872008-10-15 22:01:59 -070012 * Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
Christoph Lametercde53532008-07-04 09:59:22 -070016 * Made to use alloc_percpu by Christoph Lameter.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Johannes Berg4e6045f2007-10-18 23:39:55 -070035#include <linux/lockdep.h>
Zhaoleifb391252009-04-17 15:15:51 +080036#define CREATE_TRACE_POINTS
37#include <trace/events/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080040 * The per-CPU workqueue (if single thread, we always use the first
41 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 */
43struct cpu_workqueue_struct {
44
45 spinlock_t lock;
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 struct list_head worklist;
48 wait_queue_head_t more_work;
Oleg Nesterov3af244332007-05-09 02:34:09 -070049 struct work_struct *current_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070052 struct task_struct *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080060 struct cpu_workqueue_struct *cpu_wq;
Oleg Nesterovcce1a162007-05-09 02:34:13 -070061 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 const char *name;
Oleg Nesterovcce1a162007-05-09 02:34:13 -070063 int singlethread;
Oleg Nesterov319c2a92007-05-09 02:34:06 -070064 int freezeable; /* Freeze threads during suspend */
Heiko Carstens0d557dc2008-10-13 23:50:09 +020065 int rt;
Johannes Berg4e6045f2007-10-18 23:39:55 -070066#ifdef CONFIG_LOCKDEP
67 struct lockdep_map lockdep_map;
68#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070069};
70
Gautham R Shenoy95402b32008-01-25 21:08:02 +010071/* Serializes the accesses to the list of workqueues. */
72static DEFINE_SPINLOCK(workqueue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073static LIST_HEAD(workqueues);
74
Oleg Nesterov3af244332007-05-09 02:34:09 -070075static int singlethread_cpu __read_mostly;
Rusty Russelle7577c52009-01-01 10:12:25 +103076static const struct cpumask *cpu_singlethread_map __read_mostly;
Oleg Nesterov14441962007-05-23 13:57:57 -070077/*
78 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
79 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
80 * which comes in between can't use for_each_online_cpu(). We could
81 * use cpu_possible_map, the cpumask below is more a documentation
82 * than optimization.
83 */
Rusty Russelle7577c52009-01-01 10:12:25 +103084static cpumask_var_t cpu_populated_map __read_mostly;
Nathan Lynchf756d5e2006-01-08 01:05:12 -080085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086/* If it's single threaded, it isn't in the list of workqueues. */
David Howells6cc88bc2008-11-14 10:39:21 +110087static inline int is_wq_single_threaded(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Oleg Nesterovcce1a162007-05-09 02:34:13 -070089 return wq->singlethread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Rusty Russelle7577c52009-01-01 10:12:25 +103092static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070093{
David Howells6cc88bc2008-11-14 10:39:21 +110094 return is_wq_single_threaded(wq)
Rusty Russelle7577c52009-01-01 10:12:25 +103095 ? cpu_singlethread_map : cpu_populated_map;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -070096}
97
Oleg Nesterova848e3b2007-05-09 02:34:17 -070098static
99struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
100{
David Howells6cc88bc2008-11-14 10:39:21 +1100101 if (unlikely(is_wq_single_threaded(wq)))
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700102 cpu = singlethread_cpu;
103 return per_cpu_ptr(wq->cpu_wq, cpu);
104}
105
David Howells4594bf12006-12-07 11:33:26 +0000106/*
107 * Set the workqueue on which a work item is to be run
108 * - Must *only* be called if the pending flag is set
109 */
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700110static inline void set_wq_data(struct work_struct *work,
111 struct cpu_workqueue_struct *cwq)
David Howells365970a2006-11-22 14:54:49 +0000112{
David Howells4594bf12006-12-07 11:33:26 +0000113 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +0000114
David Howells4594bf12006-12-07 11:33:26 +0000115 BUG_ON(!work_pending(work));
116
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700117 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800118 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
119 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +0000120}
121
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700122static inline
123struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
David Howells365970a2006-11-22 14:54:49 +0000124{
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800125 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +0000126}
127
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700128static void insert_work(struct cpu_workqueue_struct *cwq,
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700129 struct work_struct *work, struct list_head *head)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700130{
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100131 trace_workqueue_insertion(cwq->thread, work);
132
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700133 set_wq_data(work, cwq);
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700134 /*
135 * Ensure that we get the right work->data if we see the
136 * result of list_add() below, see try_to_grab_pending().
137 */
138 smp_wmb();
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700139 list_add_tail(&work->entry, head);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700140 wake_up(&cwq->more_work);
141}
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143static void __queue_work(struct cpu_workqueue_struct *cwq,
144 struct work_struct *work)
145{
146 unsigned long flags;
147
148 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700149 insert_work(cwq, work, &cwq->worklist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 spin_unlock_irqrestore(&cwq->lock, flags);
151}
152
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700153/**
154 * queue_work - queue work on a workqueue
155 * @wq: workqueue to use
156 * @work: work to queue
157 *
Alan Stern057647f2006-10-28 10:38:58 -0700158 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 *
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700160 * We queue the work to the CPU on which it was submitted, but if the CPU dies
161 * it can be processed by another CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800163int queue_work(struct workqueue_struct *wq, struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164{
Oleg Nesterovef1ca232008-07-25 01:47:53 -0700165 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Oleg Nesterovef1ca232008-07-25 01:47:53 -0700167 ret = queue_work_on(get_cpu(), wq, work);
168 put_cpu();
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 return ret;
171}
Dave Jonesae90dd52006-06-30 01:40:45 -0400172EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Zhang Ruic1a220e2008-07-23 21:28:39 -0700174/**
175 * queue_work_on - queue work on specific cpu
176 * @cpu: CPU number to execute work on
177 * @wq: workqueue to use
178 * @work: work to queue
179 *
180 * Returns 0 if @work was already on a queue, non-zero otherwise.
181 *
182 * We queue the work to a specific CPU, the caller must ensure it
183 * can't go away.
184 */
185int
186queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
187{
188 int ret = 0;
189
190 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
191 BUG_ON(!list_empty(&work->entry));
192 __queue_work(wq_per_cpu(wq, cpu), work);
193 ret = 1;
194 }
195 return ret;
196}
197EXPORT_SYMBOL_GPL(queue_work_on);
198
Li Zefan6d141c32008-02-08 04:21:09 -0800199static void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
David Howells52bad642006-11-22 14:54:01 +0000201 struct delayed_work *dwork = (struct delayed_work *)__data;
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700202 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
203 struct workqueue_struct *wq = cwq->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700205 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700208/**
209 * queue_delayed_work - queue work on a workqueue after delay
210 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800211 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700212 * @delay: number of jiffies to wait before queueing
213 *
Alan Stern057647f2006-10-28 10:38:58 -0700214 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700215 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800216int queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000217 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218{
David Howells52bad642006-11-22 14:54:01 +0000219 if (delay == 0)
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700220 return queue_work(wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700222 return queue_delayed_work_on(-1, wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
Dave Jonesae90dd52006-06-30 01:40:45 -0400224EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700226/**
227 * queue_delayed_work_on - queue work on specific CPU after delay
228 * @cpu: CPU number to execute work on
229 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800230 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700231 * @delay: number of jiffies to wait before queueing
232 *
Alan Stern057647f2006-10-28 10:38:58 -0700233 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700234 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700235int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000236 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700237{
238 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000239 struct timer_list *timer = &dwork->timer;
240 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700241
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800242 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700243 BUG_ON(timer_pending(timer));
244 BUG_ON(!list_empty(&work->entry));
245
Andrew Liu8a3e77c2008-05-01 04:35:14 -0700246 timer_stats_timer_set_start_info(&dwork->timer);
247
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700248 /* This stores cwq for the moment, for the timer_fn */
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700249 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700250 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000251 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700252 timer->function = delayed_work_timer_fn;
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700253
254 if (unlikely(cpu >= 0))
255 add_timer_on(timer, cpu);
256 else
257 add_timer(timer);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700258 ret = 1;
259 }
260 return ret;
261}
Dave Jonesae90dd52006-06-30 01:40:45 -0400262EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Arjan van de Ven858119e2006-01-14 13:20:43 -0800264static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700266 spin_lock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 while (!list_empty(&cwq->worklist)) {
268 struct work_struct *work = list_entry(cwq->worklist.next,
269 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000270 work_func_t f = work->func;
Johannes Berg4e6045f2007-10-18 23:39:55 -0700271#ifdef CONFIG_LOCKDEP
272 /*
273 * It is permissible to free the struct work_struct
274 * from inside the function that is called from it,
275 * this we need to take into account for lockdep too.
276 * To avoid bogus "held lock freed" warnings as well
277 * as problems when looking into work->lockdep_map,
278 * make a copy and use that here.
279 */
280 struct lockdep_map lockdep_map = work->lockdep_map;
281#endif
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100282 trace_workqueue_execution(cwq->thread, work);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700283 cwq->current_work = work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 list_del_init(cwq->worklist.next);
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700285 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
David Howells365970a2006-11-22 14:54:49 +0000287 BUG_ON(get_wq_data(work) != cwq);
Oleg Nesterov23b2e592007-05-09 02:34:19 -0700288 work_clear_pending(work);
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200289 lock_map_acquire(&cwq->wq->lockdep_map);
290 lock_map_acquire(&lockdep_map);
David Howells65f27f32006-11-22 14:55:48 +0000291 f(work);
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200292 lock_map_release(&lockdep_map);
293 lock_map_release(&cwq->wq->lockdep_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800295 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
296 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
297 "%s/0x%08x/%d\n",
298 current->comm, preempt_count(),
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700299 task_pid_nr(current));
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800300 printk(KERN_ERR " last function: ");
301 print_symbol("%s\n", (unsigned long)f);
302 debug_show_held_locks(current);
303 dump_stack();
304 }
305
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700306 spin_lock_irq(&cwq->lock);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700307 cwq->current_work = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 }
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700309 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310}
311
312static int worker_thread(void *__cwq)
313{
314 struct cpu_workqueue_struct *cwq = __cwq;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700315 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700317 if (cwq->wq->freezeable)
318 set_freezable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Oleg Nesterov3af244332007-05-09 02:34:09 -0700320 for (;;) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700321 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
Oleg Nesterov14441962007-05-23 13:57:57 -0700322 if (!freezing(current) &&
323 !kthread_should_stop() &&
324 list_empty(&cwq->worklist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 schedule();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700326 finish_wait(&cwq->more_work, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Oleg Nesterov85f41862007-05-09 02:34:20 -0700328 try_to_freeze();
329
Oleg Nesterov14441962007-05-23 13:57:57 -0700330 if (kthread_should_stop())
Oleg Nesterov3af244332007-05-09 02:34:09 -0700331 break;
332
333 run_workqueue(cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 }
Oleg Nesterov3af244332007-05-09 02:34:09 -0700335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 return 0;
337}
338
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700339struct wq_barrier {
340 struct work_struct work;
341 struct completion done;
342};
343
344static void wq_barrier_func(struct work_struct *work)
345{
346 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
347 complete(&barr->done);
348}
349
Oleg Nesterov83c22522007-05-09 02:33:54 -0700350static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700351 struct wq_barrier *barr, struct list_head *head)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700352{
353 INIT_WORK(&barr->work, wq_barrier_func);
354 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
355
356 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700357
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700358 insert_work(cwq, &barr->work, head);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700359}
360
Oleg Nesterov14441962007-05-23 13:57:57 -0700361static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
Lai Jiangshan2355b702009-04-02 16:58:24 -0700363 int active = 0;
364 struct wq_barrier barr;
Oleg Nesterov14441962007-05-23 13:57:57 -0700365
Lai Jiangshan2355b702009-04-02 16:58:24 -0700366 WARN_ON(cwq->thread == current);
367
368 spin_lock_irq(&cwq->lock);
369 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
370 insert_wq_barrier(cwq, &barr, &cwq->worklist);
Oleg Nesterov14441962007-05-23 13:57:57 -0700371 active = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 }
Lai Jiangshan2355b702009-04-02 16:58:24 -0700373 spin_unlock_irq(&cwq->lock);
374
375 if (active)
376 wait_for_completion(&barr.done);
Oleg Nesterov14441962007-05-23 13:57:57 -0700377
378 return active;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379}
380
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700381/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700383 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 *
385 * Forces execution of the workqueue and blocks until its completion.
386 * This is typically used in driver shutdown handlers.
387 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700388 * We sleep until all works which were queued on entry have been handled,
389 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 *
391 * This function used to run the workqueues itself. Now we just wait for the
392 * helper threads to do it.
393 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800394void flush_workqueue(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395{
Rusty Russelle7577c52009-01-01 10:12:25 +1030396 const struct cpumask *cpu_map = wq_cpu_map(wq);
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700397 int cpu;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700398
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700399 might_sleep();
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200400 lock_map_acquire(&wq->lockdep_map);
401 lock_map_release(&wq->lockdep_map);
Rusty Russellaa85ea52009-03-30 22:05:15 -0600402 for_each_cpu(cpu, cpu_map)
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700403 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404}
Dave Jonesae90dd52006-06-30 01:40:45 -0400405EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
Oleg Nesterovdb700892008-07-25 01:47:49 -0700407/**
408 * flush_work - block until a work_struct's callback has terminated
409 * @work: the work which is to be flushed
410 *
Oleg Nesterova67da702008-07-25 01:47:52 -0700411 * Returns false if @work has already terminated.
412 *
Oleg Nesterovdb700892008-07-25 01:47:49 -0700413 * It is expected that, prior to calling flush_work(), the caller has
414 * arranged for the work to not be requeued, otherwise it doesn't make
415 * sense to use this function.
416 */
417int flush_work(struct work_struct *work)
418{
419 struct cpu_workqueue_struct *cwq;
420 struct list_head *prev;
421 struct wq_barrier barr;
422
423 might_sleep();
424 cwq = get_wq_data(work);
425 if (!cwq)
426 return 0;
427
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200428 lock_map_acquire(&cwq->wq->lockdep_map);
429 lock_map_release(&cwq->wq->lockdep_map);
Oleg Nesterova67da702008-07-25 01:47:52 -0700430
Oleg Nesterovdb700892008-07-25 01:47:49 -0700431 prev = NULL;
432 spin_lock_irq(&cwq->lock);
433 if (!list_empty(&work->entry)) {
434 /*
435 * See the comment near try_to_grab_pending()->smp_rmb().
436 * If it was re-queued under us we are not going to wait.
437 */
438 smp_rmb();
439 if (unlikely(cwq != get_wq_data(work)))
440 goto out;
441 prev = &work->entry;
442 } else {
443 if (cwq->current_work != work)
444 goto out;
445 prev = &cwq->worklist;
446 }
447 insert_wq_barrier(cwq, &barr, prev->next);
448out:
449 spin_unlock_irq(&cwq->lock);
450 if (!prev)
451 return 0;
452
453 wait_for_completion(&barr.done);
454 return 1;
455}
456EXPORT_SYMBOL_GPL(flush_work);
457
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700458/*
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700459 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700460 * so this work can't be re-armed in any way.
461 */
462static int try_to_grab_pending(struct work_struct *work)
463{
464 struct cpu_workqueue_struct *cwq;
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700465 int ret = -1;
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700466
467 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700468 return 0;
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700469
470 /*
471 * The queueing is in progress, or it is already queued. Try to
472 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
473 */
474
475 cwq = get_wq_data(work);
476 if (!cwq)
477 return ret;
478
479 spin_lock_irq(&cwq->lock);
480 if (!list_empty(&work->entry)) {
481 /*
482 * This work is queued, but perhaps we locked the wrong cwq.
483 * In that case we must see the new value after rmb(), see
484 * insert_work()->wmb().
485 */
486 smp_rmb();
487 if (cwq == get_wq_data(work)) {
488 list_del_init(&work->entry);
489 ret = 1;
490 }
491 }
492 spin_unlock_irq(&cwq->lock);
493
494 return ret;
495}
496
497static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700498 struct work_struct *work)
499{
500 struct wq_barrier barr;
501 int running = 0;
502
503 spin_lock_irq(&cwq->lock);
504 if (unlikely(cwq->current_work == work)) {
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700505 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700506 running = 1;
507 }
508 spin_unlock_irq(&cwq->lock);
509
Oleg Nesterov3af244332007-05-09 02:34:09 -0700510 if (unlikely(running))
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700511 wait_for_completion(&barr.done);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700512}
513
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700514static void wait_on_work(struct work_struct *work)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700515{
516 struct cpu_workqueue_struct *cwq;
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700517 struct workqueue_struct *wq;
Rusty Russelle7577c52009-01-01 10:12:25 +1030518 const struct cpumask *cpu_map;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700519 int cpu;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700520
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700521 might_sleep();
522
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200523 lock_map_acquire(&work->lockdep_map);
524 lock_map_release(&work->lockdep_map);
Johannes Berg4e6045f2007-10-18 23:39:55 -0700525
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700526 cwq = get_wq_data(work);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700527 if (!cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700528 return;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700529
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700530 wq = cwq->wq;
531 cpu_map = wq_cpu_map(wq);
532
Rusty Russellaa85ea52009-03-30 22:05:15 -0600533 for_each_cpu(cpu, cpu_map)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700534 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
535}
536
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700537static int __cancel_work_timer(struct work_struct *work,
538 struct timer_list* timer)
539{
540 int ret;
541
542 do {
543 ret = (timer && likely(del_timer(timer)));
544 if (!ret)
545 ret = try_to_grab_pending(work);
546 wait_on_work(work);
547 } while (unlikely(ret < 0));
548
549 work_clear_pending(work);
550 return ret;
551}
552
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700553/**
554 * cancel_work_sync - block until a work_struct's callback has terminated
555 * @work: the work which is to be flushed
556 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700557 * Returns true if @work was pending.
558 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700559 * cancel_work_sync() will cancel the work if it is queued. If the work's
560 * callback appears to be running, cancel_work_sync() will block until it
561 * has completed.
562 *
563 * It is possible to use this function if the work re-queues itself. It can
564 * cancel the work even if it migrates to another workqueue, however in that
565 * case it only guarantees that work->func() has completed on the last queued
566 * workqueue.
567 *
568 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
569 * pending, otherwise it goes into a busy-wait loop until the timer expires.
570 *
571 * The caller must ensure that workqueue_struct on which this work was last
572 * queued can't be destroyed before this function returns.
573 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700574int cancel_work_sync(struct work_struct *work)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700575{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700576 return __cancel_work_timer(work, NULL);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700577}
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700578EXPORT_SYMBOL_GPL(cancel_work_sync);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700579
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700580/**
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700581 * cancel_delayed_work_sync - reliably kill off a delayed work.
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700582 * @dwork: the delayed work struct
583 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700584 * Returns true if @dwork was pending.
585 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700586 * It is possible to use this function if @dwork rearms itself via queue_work()
587 * or queue_delayed_work(). See also the comment for cancel_work_sync().
588 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700589int cancel_delayed_work_sync(struct delayed_work *dwork)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700590{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700591 return __cancel_work_timer(&dwork->work, &dwork->timer);
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700592}
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700593EXPORT_SYMBOL(cancel_delayed_work_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700595static struct workqueue_struct *keventd_wq __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700597/**
598 * schedule_work - put work task in global workqueue
599 * @work: job to be done
600 *
Bart Van Assche5b0f437d2009-07-30 19:00:53 +0200601 * Returns zero if @work was already on the kernel-global workqueue and
602 * non-zero otherwise.
603 *
604 * This puts a job in the kernel-global workqueue if it was not already
605 * queued and leaves it in the same position on the kernel-global
606 * workqueue otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700607 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800608int schedule_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609{
610 return queue_work(keventd_wq, work);
611}
Dave Jonesae90dd52006-06-30 01:40:45 -0400612EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
Zhang Ruic1a220e2008-07-23 21:28:39 -0700614/*
615 * schedule_work_on - put work task on a specific cpu
616 * @cpu: cpu to put the work task on
617 * @work: job to be done
618 *
619 * This puts a job on a specific cpu
620 */
621int schedule_work_on(int cpu, struct work_struct *work)
622{
623 return queue_work_on(cpu, keventd_wq, work);
624}
625EXPORT_SYMBOL(schedule_work_on);
626
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700627/**
628 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000629 * @dwork: job to be done
630 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700631 *
632 * After waiting for a given time this puts a job in the kernel-global
633 * workqueue.
634 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800635int schedule_delayed_work(struct delayed_work *dwork,
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800636 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637{
David Howells52bad642006-11-22 14:54:01 +0000638 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639}
Dave Jonesae90dd52006-06-30 01:40:45 -0400640EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700642/**
Linus Torvalds8c53e462009-10-14 09:16:42 -0700643 * flush_delayed_work - block until a dwork_struct's callback has terminated
644 * @dwork: the delayed work which is to be flushed
645 *
646 * Any timeout is cancelled, and any pending work is run immediately.
647 */
648void flush_delayed_work(struct delayed_work *dwork)
649{
650 if (del_timer_sync(&dwork->timer)) {
651 struct cpu_workqueue_struct *cwq;
652 cwq = wq_per_cpu(keventd_wq, get_cpu());
653 __queue_work(cwq, &dwork->work);
654 put_cpu();
655 }
656 flush_work(&dwork->work);
657}
658EXPORT_SYMBOL(flush_delayed_work);
659
660/**
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700661 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
662 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000663 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700664 * @delay: number of jiffies to wait
665 *
666 * After waiting for a given time this puts a job in the kernel-global
667 * workqueue on the specified CPU.
668 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000670 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671{
David Howells52bad642006-11-22 14:54:01 +0000672 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
Dave Jonesae90dd52006-06-30 01:40:45 -0400674EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Andrew Mortonb6136772006-06-25 05:47:49 -0700676/**
677 * schedule_on_each_cpu - call a function on each online CPU from keventd
678 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700679 *
680 * Returns zero on success.
681 * Returns -ve errno on failure.
682 *
Andrew Mortonb6136772006-06-25 05:47:49 -0700683 * schedule_on_each_cpu() is very slow.
684 */
David Howells65f27f32006-11-22 14:55:48 +0000685int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800686{
687 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700688 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800689
Andrew Mortonb6136772006-06-25 05:47:49 -0700690 works = alloc_percpu(struct work_struct);
691 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800692 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700693
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100694 get_online_cpus();
Christoph Lameter15316ba2006-01-08 01:00:43 -0800695 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100696 struct work_struct *work = per_cpu_ptr(works, cpu);
697
698 INIT_WORK(work, func);
Oleg Nesterov8de6d302008-07-25 01:47:53 -0700699 schedule_work_on(cpu, work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800700 }
Oleg Nesterov8616a892008-07-25 01:47:49 -0700701 for_each_online_cpu(cpu)
702 flush_work(per_cpu_ptr(works, cpu));
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100703 put_online_cpus();
Andrew Mortonb6136772006-06-25 05:47:49 -0700704 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800705 return 0;
706}
707
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708void flush_scheduled_work(void)
709{
710 flush_workqueue(keventd_wq);
711}
Dave Jonesae90dd52006-06-30 01:40:45 -0400712EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714/**
James Bottomley1fa44ec2006-02-23 12:43:43 -0600715 * execute_in_process_context - reliably execute the routine with user context
716 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600717 * @ew: guaranteed storage for the execute work structure (must
718 * be available when the work executes)
719 *
720 * Executes the function immediately if process context is available,
721 * otherwise schedules the function for delayed execution.
722 *
723 * Returns: 0 - function was executed
724 * 1 - function was scheduled for execution
725 */
David Howells65f27f32006-11-22 14:55:48 +0000726int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600727{
728 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000729 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600730 return 0;
731 }
732
David Howells65f27f32006-11-22 14:55:48 +0000733 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600734 schedule_work(&ew->work);
735
736 return 1;
737}
738EXPORT_SYMBOL_GPL(execute_in_process_context);
739
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740int keventd_up(void)
741{
742 return keventd_wq != NULL;
743}
744
745int current_is_keventd(void)
746{
747 struct cpu_workqueue_struct *cwq;
Hugh Dickinsd2437692007-08-27 16:06:19 +0100748 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 int ret = 0;
750
751 BUG_ON(!keventd_wq);
752
Christoph Lameter89ada672005-10-30 15:01:59 -0800753 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 if (current == cwq->thread)
755 ret = 1;
756
757 return ret;
758
759}
760
Oleg Nesterov3af244332007-05-09 02:34:09 -0700761static struct cpu_workqueue_struct *
762init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
Christoph Lameter89ada672005-10-30 15:01:59 -0800764 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Oleg Nesterov3af244332007-05-09 02:34:09 -0700766 cwq->wq = wq;
767 spin_lock_init(&cwq->lock);
768 INIT_LIST_HEAD(&cwq->worklist);
769 init_waitqueue_head(&cwq->more_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Oleg Nesterov3af244332007-05-09 02:34:09 -0700771 return cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772}
773
Oleg Nesterov3af244332007-05-09 02:34:09 -0700774static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
Heiko Carstens0d557dc2008-10-13 23:50:09 +0200776 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
Oleg Nesterov3af244332007-05-09 02:34:09 -0700777 struct workqueue_struct *wq = cwq->wq;
David Howells6cc88bc2008-11-14 10:39:21 +1100778 const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
Oleg Nesterov3af244332007-05-09 02:34:09 -0700779 struct task_struct *p;
780
781 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
782 /*
783 * Nobody can add the work_struct to this cwq,
784 * if (caller is __create_workqueue)
785 * nobody should see this wq
786 * else // caller is CPU_UP_PREPARE
787 * cpu is not on cpu_online_map
788 * so we can abort safely.
789 */
790 if (IS_ERR(p))
791 return PTR_ERR(p);
Heiko Carstens0d557dc2008-10-13 23:50:09 +0200792 if (cwq->wq->rt)
793 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700794 cwq->thread = p;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700795
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100796 trace_workqueue_creation(cwq->thread, cpu);
797
Oleg Nesterov3af244332007-05-09 02:34:09 -0700798 return 0;
799}
800
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700801static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
802{
803 struct task_struct *p = cwq->thread;
804
805 if (p != NULL) {
806 if (cpu >= 0)
807 kthread_bind(p, cpu);
808 wake_up_process(p);
809 }
810}
811
Johannes Berg4e6045f2007-10-18 23:39:55 -0700812struct workqueue_struct *__create_workqueue_key(const char *name,
813 int singlethread,
814 int freezeable,
Heiko Carstens0d557dc2008-10-13 23:50:09 +0200815 int rt,
Johannes Bergeb13ba82008-01-16 09:51:58 +0100816 struct lock_class_key *key,
817 const char *lock_name)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700818{
819 struct workqueue_struct *wq;
820 struct cpu_workqueue_struct *cwq;
821 int err = 0, cpu;
822
823 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
824 if (!wq)
825 return NULL;
826
827 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
828 if (!wq->cpu_wq) {
829 kfree(wq);
830 return NULL;
831 }
832
833 wq->name = name;
Johannes Bergeb13ba82008-01-16 09:51:58 +0100834 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700835 wq->singlethread = singlethread;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700836 wq->freezeable = freezeable;
Heiko Carstens0d557dc2008-10-13 23:50:09 +0200837 wq->rt = rt;
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700838 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700839
840 if (singlethread) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700841 cwq = init_cpu_workqueue(wq, singlethread_cpu);
842 err = create_workqueue_thread(cwq, singlethread_cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700843 start_workqueue_thread(cwq, -1);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700844 } else {
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700845 cpu_maps_update_begin();
Oleg Nesterov6af8bf32008-07-29 22:33:49 -0700846 /*
847 * We must place this wq on list even if the code below fails.
848 * cpu_down(cpu) can remove cpu from cpu_populated_map before
849 * destroy_workqueue() takes the lock, in that case we leak
850 * cwq[cpu]->thread.
851 */
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100852 spin_lock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700853 list_add(&wq->list, &workqueues);
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100854 spin_unlock(&workqueue_lock);
Oleg Nesterov6af8bf32008-07-29 22:33:49 -0700855 /*
856 * We must initialize cwqs for each possible cpu even if we
857 * are going to call destroy_workqueue() finally. Otherwise
858 * cpu_up() can hit the uninitialized cwq once we drop the
859 * lock.
860 */
Oleg Nesterov3af244332007-05-09 02:34:09 -0700861 for_each_possible_cpu(cpu) {
862 cwq = init_cpu_workqueue(wq, cpu);
863 if (err || !cpu_online(cpu))
864 continue;
865 err = create_workqueue_thread(cwq, cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700866 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700867 }
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700868 cpu_maps_update_done();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700869 }
870
871 if (err) {
872 destroy_workqueue(wq);
873 wq = NULL;
874 }
875 return wq;
876}
Johannes Berg4e6045f2007-10-18 23:39:55 -0700877EXPORT_SYMBOL_GPL(__create_workqueue_key);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700878
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -0700879static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700880{
Oleg Nesterov14441962007-05-23 13:57:57 -0700881 /*
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700882 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
883 * cpu_add_remove_lock protects cwq->thread.
Oleg Nesterov14441962007-05-23 13:57:57 -0700884 */
885 if (cwq->thread == NULL)
886 return;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700887
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200888 lock_map_acquire(&cwq->wq->lockdep_map);
889 lock_map_release(&cwq->wq->lockdep_map);
Johannes Berg4e6045f2007-10-18 23:39:55 -0700890
Oleg Nesterov13c22162007-07-17 04:03:55 -0700891 flush_cpu_workqueue(cwq);
Oleg Nesterov14441962007-05-23 13:57:57 -0700892 /*
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700893 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
Oleg Nesterov13c22162007-07-17 04:03:55 -0700894 * a concurrent flush_workqueue() can insert a barrier after us.
895 * However, in that case run_workqueue() won't return and check
896 * kthread_should_stop() until it flushes all work_struct's.
Oleg Nesterov14441962007-05-23 13:57:57 -0700897 * When ->worklist becomes empty it is safe to exit because no
898 * more work_structs can be queued on this cwq: flush_workqueue
899 * checks list_empty(), and a "normal" queue_work() can't use
900 * a dead CPU.
901 */
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100902 trace_workqueue_destruction(cwq->thread);
Oleg Nesterov14441962007-05-23 13:57:57 -0700903 kthread_stop(cwq->thread);
904 cwq->thread = NULL;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700905}
906
907/**
908 * destroy_workqueue - safely terminate a workqueue
909 * @wq: target workqueue
910 *
911 * Safely destroy a workqueue. All work currently pending will be done first.
912 */
913void destroy_workqueue(struct workqueue_struct *wq)
914{
Rusty Russelle7577c52009-01-01 10:12:25 +1030915 const struct cpumask *cpu_map = wq_cpu_map(wq);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700916 int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700917
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700918 cpu_maps_update_begin();
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100919 spin_lock(&workqueue_lock);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700920 list_del(&wq->list);
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100921 spin_unlock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700922
Rusty Russellaa85ea52009-03-30 22:05:15 -0600923 for_each_cpu(cpu, cpu_map)
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -0700924 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700925 cpu_maps_update_done();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700926
927 free_percpu(wq->cpu_wq);
928 kfree(wq);
929}
930EXPORT_SYMBOL_GPL(destroy_workqueue);
931
932static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
933 unsigned long action,
934 void *hcpu)
935{
936 unsigned int cpu = (unsigned long)hcpu;
937 struct cpu_workqueue_struct *cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 struct workqueue_struct *wq;
Oleg Nesterov84485022008-07-25 01:47:54 -0700939 int ret = NOTIFY_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700941 action &= ~CPU_TASKS_FROZEN;
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 switch (action) {
944 case CPU_UP_PREPARE:
Rusty Russelle7577c52009-01-01 10:12:25 +1030945 cpumask_set_cpu(cpu, cpu_populated_map);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700946 }
Oleg Nesterov84485022008-07-25 01:47:54 -0700947undo:
Oleg Nesterov3af244332007-05-09 02:34:09 -0700948 list_for_each_entry(wq, &workqueues, list) {
949 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Christoph Lameter89ada672005-10-30 15:01:59 -0800950
Oleg Nesterov3af244332007-05-09 02:34:09 -0700951 switch (action) {
952 case CPU_UP_PREPARE:
953 if (!create_workqueue_thread(cwq, cpu))
954 break;
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100955 printk(KERN_ERR "workqueue [%s] for %i failed\n",
956 wq->name, cpu);
Oleg Nesterov84485022008-07-25 01:47:54 -0700957 action = CPU_UP_CANCELED;
958 ret = NOTIFY_BAD;
959 goto undo;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700960
961 case CPU_ONLINE:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700962 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700963 break;
964
965 case CPU_UP_CANCELED:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -0700966 start_workqueue_thread(cwq, -1);
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700967 case CPU_POST_DEAD:
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -0700968 cleanup_workqueue_thread(cwq);
Oleg Nesterov3af244332007-05-09 02:34:09 -0700969 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 }
972
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700973 switch (action) {
974 case CPU_UP_CANCELED:
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700975 case CPU_POST_DEAD:
Rusty Russelle7577c52009-01-01 10:12:25 +1030976 cpumask_clear_cpu(cpu, cpu_populated_map);
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700977 }
978
Oleg Nesterov84485022008-07-25 01:47:54 -0700979 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Rusty Russell2d3854a2008-11-05 13:39:10 +1100982#ifdef CONFIG_SMP
Rusty Russell8ccad402009-01-16 15:31:15 -0800983
Rusty Russell2d3854a2008-11-05 13:39:10 +1100984struct work_for_cpu {
Andrew Morton6b44003e2009-04-09 09:50:37 -0600985 struct completion completion;
Rusty Russell2d3854a2008-11-05 13:39:10 +1100986 long (*fn)(void *);
987 void *arg;
988 long ret;
989};
990
Andrew Morton6b44003e2009-04-09 09:50:37 -0600991static int do_work_for_cpu(void *_wfc)
Rusty Russell2d3854a2008-11-05 13:39:10 +1100992{
Andrew Morton6b44003e2009-04-09 09:50:37 -0600993 struct work_for_cpu *wfc = _wfc;
Rusty Russell2d3854a2008-11-05 13:39:10 +1100994 wfc->ret = wfc->fn(wfc->arg);
Andrew Morton6b44003e2009-04-09 09:50:37 -0600995 complete(&wfc->completion);
996 return 0;
Rusty Russell2d3854a2008-11-05 13:39:10 +1100997}
998
999/**
1000 * work_on_cpu - run a function in user context on a particular cpu
1001 * @cpu: the cpu to run on
1002 * @fn: the function to run
1003 * @arg: the function arg
1004 *
Rusty Russell31ad9082009-01-16 15:31:15 -08001005 * This will return the value @fn returns.
1006 * It is up to the caller to ensure that the cpu doesn't go offline.
Andrew Morton6b44003e2009-04-09 09:50:37 -06001007 * The caller must not hold any locks which would prevent @fn from completing.
Rusty Russell2d3854a2008-11-05 13:39:10 +11001008 */
1009long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1010{
Andrew Morton6b44003e2009-04-09 09:50:37 -06001011 struct task_struct *sub_thread;
1012 struct work_for_cpu wfc = {
1013 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
1014 .fn = fn,
1015 .arg = arg,
1016 };
Rusty Russell2d3854a2008-11-05 13:39:10 +11001017
Andrew Morton6b44003e2009-04-09 09:50:37 -06001018 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1019 if (IS_ERR(sub_thread))
1020 return PTR_ERR(sub_thread);
1021 kthread_bind(sub_thread, cpu);
1022 wake_up_process(sub_thread);
1023 wait_for_completion(&wfc.completion);
Rusty Russell2d3854a2008-11-05 13:39:10 +11001024 return wfc.ret;
1025}
1026EXPORT_SYMBOL_GPL(work_on_cpu);
1027#endif /* CONFIG_SMP */
1028
Oleg Nesterovc12920d2007-05-09 02:34:14 -07001029void __init init_workqueues(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030{
Rusty Russelle7577c52009-01-01 10:12:25 +10301031 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1032
1033 cpumask_copy(cpu_populated_map, cpu_online_mask);
1034 singlethread_cpu = cpumask_first(cpu_possible_mask);
1035 cpu_singlethread_map = cpumask_of(singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 hotcpu_notifier(workqueue_cpu_callback, 0);
1037 keventd_wq = create_workqueue("events");
1038 BUG_ON(!keventd_wq);
1039}