blob: 3a94905fa5d27c4d0ef59ad56e83ad353e14f1fa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01006 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
David S. Miller54514a72008-09-23 22:15:57 -07009 *
10 * Remote softirq infrastructure is by Jens Axboe.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13#include <linux/module.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070021#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050024#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080025#include <linux/smp.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080026#include <linux/tick.h>
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020027
28#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040029#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/irq.h>
32/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
51irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52EXPORT_SYMBOL(irq_stat);
53#endif
54
Alexey Dobriyan978b0112008-09-06 20:04:36 +020055static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
Jason Baron5d592b42009-03-12 14:33:36 -040059char *softirq_to_name[NR_SOFTIRQS] = {
Steven Rostedt899039e2009-03-13 00:43:33 -040060 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
Jason Baron5d592b42009-03-12 14:33:36 -040062};
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +010070void wakeup_softirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
79/*
Ingo Molnarde30a2b2006-07-03 00:24:42 -070080 * This one is for softirq.c-internal use,
81 * where hardirqs are disabled legitimately:
82 */
Tim Chen3c829c32006-07-30 03:04:02 -070083#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnarde30a2b2006-07-03 00:24:42 -070084static void __local_bh_disable(unsigned long ip)
85{
86 unsigned long flags;
87
88 WARN_ON_ONCE(in_irq());
89
90 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050091 /*
92 * The preempt tracer hooks into add_preempt_count and will break
93 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
94 * is set and before current->softirq_enabled is cleared.
95 * We must manually increment preempt_count here and manually
96 * call the trace_preempt_off later.
97 */
98 preempt_count() += SOFTIRQ_OFFSET;
Ingo Molnarde30a2b2006-07-03 00:24:42 -070099 /*
100 * Were softirqs turned off above:
101 */
102 if (softirq_count() == SOFTIRQ_OFFSET)
103 trace_softirqs_off(ip);
104 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500105
106 if (preempt_count() == SOFTIRQ_OFFSET)
107 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700108}
Tim Chen3c829c32006-07-30 03:04:02 -0700109#else /* !CONFIG_TRACE_IRQFLAGS */
110static inline void __local_bh_disable(unsigned long ip)
111{
112 add_preempt_count(SOFTIRQ_OFFSET);
113 barrier();
114}
115#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700116
117void local_bh_disable(void)
118{
119 __local_bh_disable((unsigned long)__builtin_return_address(0));
120}
121
122EXPORT_SYMBOL(local_bh_disable);
123
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700124/*
125 * Special-case - softirqs can safely be enabled in
126 * cond_resched_softirq(), or by __do_softirq(),
127 * without processing still-pending softirqs:
128 */
129void _local_bh_enable(void)
130{
131 WARN_ON_ONCE(in_irq());
132 WARN_ON_ONCE(!irqs_disabled());
133
134 if (softirq_count() == SOFTIRQ_OFFSET)
135 trace_softirqs_on((unsigned long)__builtin_return_address(0));
136 sub_preempt_count(SOFTIRQ_OFFSET);
137}
138
139EXPORT_SYMBOL(_local_bh_enable);
140
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200141static inline void _local_bh_enable_ip(unsigned long ip)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700142{
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200143 WARN_ON_ONCE(in_irq() || irqs_disabled());
Tim Chen3c829c32006-07-30 03:04:02 -0700144#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200145 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700146#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700147 /*
148 * Are softirqs going to be turned on now:
149 */
150 if (softirq_count() == SOFTIRQ_OFFSET)
151 trace_softirqs_on(ip);
152 /*
153 * Keep preemption disabled until we are done with
154 * softirq processing:
155 */
156 sub_preempt_count(SOFTIRQ_OFFSET - 1);
157
158 if (unlikely(!in_interrupt() && local_softirq_pending()))
159 do_softirq();
160
161 dec_preempt_count();
Tim Chen3c829c32006-07-30 03:04:02 -0700162#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200163 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700164#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700165 preempt_check_resched();
166}
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200167
168void local_bh_enable(void)
169{
170 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
171}
172EXPORT_SYMBOL(local_bh_enable);
173
174void local_bh_enable_ip(unsigned long ip)
175{
176 _local_bh_enable_ip(ip);
177}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700178EXPORT_SYMBOL(local_bh_enable_ip);
179
180/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
182 * and we fall back to softirqd after that.
183 *
184 * This number has been established via experimentation.
185 * The two things to balance is latency against fairness -
186 * we want to handle softirqs as soon as possible, but they
187 * should not be able to lock up the box.
188 */
189#define MAX_SOFTIRQ_RESTART 10
190
191asmlinkage void __do_softirq(void)
192{
193 struct softirq_action *h;
194 __u32 pending;
195 int max_restart = MAX_SOFTIRQ_RESTART;
196 int cpu;
197
198 pending = local_softirq_pending();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700199 account_system_vtime(current);
200
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700201 __local_bh_disable((unsigned long)__builtin_return_address(0));
Ingo Molnard820ac42009-03-13 01:30:40 +0100202 lockdep_softirq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 cpu = smp_processor_id();
205restart:
206 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200207 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700209 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 h = softirq_vec;
212
213 do {
214 if (pending & 1) {
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200215 int prev_count = preempt_count();
Keika Kobayashiaa0ce5b2009-06-17 16:25:52 -0700216 kstat_incr_softirqs_this_cpu(h - softirq_vec);
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200217
Jason Baron39842322009-03-12 14:36:03 -0400218 trace_softirq_entry(h, softirq_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 h->action(h);
Jason Baron39842322009-03-12 14:36:03 -0400220 trace_softirq_exit(h, softirq_vec);
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200221 if (unlikely(prev_count != preempt_count())) {
Jason Baron5d592b42009-03-12 14:33:36 -0400222 printk(KERN_ERR "huh, entered softirq %td %s %p"
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200223 "with preempt_count %08x,"
224 " exited with %08x?\n", h - softirq_vec,
Jason Baron5d592b42009-03-12 14:33:36 -0400225 softirq_to_name[h - softirq_vec],
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200226 h->action, prev_count, preempt_count());
227 preempt_count() = prev_count;
228 }
229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 rcu_bh_qsctr_inc(cpu);
231 }
232 h++;
233 pending >>= 1;
234 } while (pending);
235
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700236 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238 pending = local_softirq_pending();
239 if (pending && --max_restart)
240 goto restart;
241
242 if (pending)
243 wakeup_softirqd();
244
Ingo Molnard820ac42009-03-13 01:30:40 +0100245 lockdep_softirq_exit();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700246
247 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700248 _local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
251#ifndef __ARCH_HAS_DO_SOFTIRQ
252
253asmlinkage void do_softirq(void)
254{
255 __u32 pending;
256 unsigned long flags;
257
258 if (in_interrupt())
259 return;
260
261 local_irq_save(flags);
262
263 pending = local_softirq_pending();
264
265 if (pending)
266 __do_softirq();
267
268 local_irq_restore(flags);
269}
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271#endif
272
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800273/*
274 * Enter an interrupt context.
275 */
276void irq_enter(void)
277{
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100278 int cpu = smp_processor_id();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200279
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100280 rcu_irq_enter();
Thomas Gleixneree5f80a2008-11-07 11:06:00 +0100281 if (idle_cpu(cpu) && !in_interrupt()) {
282 __irq_enter();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200283 tick_check_idle(cpu);
Thomas Gleixneree5f80a2008-11-07 11:06:00 +0100284 } else
285 __irq_enter();
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800286}
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
289# define invoke_softirq() __do_softirq()
290#else
291# define invoke_softirq() do_softirq()
292#endif
293
294/*
295 * Exit an interrupt context. Process softirqs if needed and possible:
296 */
297void irq_exit(void)
298{
299 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700300 trace_hardirq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 sub_preempt_count(IRQ_EXIT_OFFSET);
302 if (!in_interrupt() && local_softirq_pending())
303 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800304
305#ifdef CONFIG_NO_HZ
306 /* Make sure that timer wheel updates are propagated */
Steven Rostedt2232c2d2008-02-29 18:46:50 +0100307 rcu_irq_exit();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309 tick_nohz_stop_sched_tick(0);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800310#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 preempt_enable_no_resched();
312}
313
314/*
315 * This function must run with irqs disabled!
316 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800317inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
319 __raise_softirq_irqoff(nr);
320
321 /*
322 * If we're in an interrupt or softirq, we're done
323 * (this also catches softirq-disabled code). We will
324 * actually run the softirq once we return from
325 * the irq or softirq.
326 *
327 * Otherwise we wake up ksoftirqd to make sure we
328 * schedule the softirq soon.
329 */
330 if (!in_interrupt())
331 wakeup_softirqd();
332}
333
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800334void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
336 unsigned long flags;
337
338 local_irq_save(flags);
339 raise_softirq_irqoff(nr);
340 local_irq_restore(flags);
341}
342
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300343void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 softirq_vec[nr].action = action;
346}
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348/* Tasklets */
349struct tasklet_head
350{
Olof Johansson48f20a92008-03-04 15:23:25 -0800351 struct tasklet_struct *head;
352 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353};
354
Vegard Nossum4620b492008-06-12 23:21:53 +0200355static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
356static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800358void __tasklet_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359{
360 unsigned long flags;
361
362 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800363 t->next = NULL;
364 *__get_cpu_var(tasklet_vec).tail = t;
365 __get_cpu_var(tasklet_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 raise_softirq_irqoff(TASKLET_SOFTIRQ);
367 local_irq_restore(flags);
368}
369
370EXPORT_SYMBOL(__tasklet_schedule);
371
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800372void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
374 unsigned long flags;
375
376 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800377 t->next = NULL;
378 *__get_cpu_var(tasklet_hi_vec).tail = t;
379 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 raise_softirq_irqoff(HI_SOFTIRQ);
381 local_irq_restore(flags);
382}
383
384EXPORT_SYMBOL(__tasklet_hi_schedule);
385
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200386void __tasklet_hi_schedule_first(struct tasklet_struct *t)
387{
388 BUG_ON(!irqs_disabled());
389
390 t->next = __get_cpu_var(tasklet_hi_vec).head;
391 __get_cpu_var(tasklet_hi_vec).head = t;
392 __raise_softirq_irqoff(HI_SOFTIRQ);
393}
394
395EXPORT_SYMBOL(__tasklet_hi_schedule_first);
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397static void tasklet_action(struct softirq_action *a)
398{
399 struct tasklet_struct *list;
400
401 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800402 list = __get_cpu_var(tasklet_vec).head;
403 __get_cpu_var(tasklet_vec).head = NULL;
404 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 local_irq_enable();
406
407 while (list) {
408 struct tasklet_struct *t = list;
409
410 list = list->next;
411
412 if (tasklet_trylock(t)) {
413 if (!atomic_read(&t->count)) {
414 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
415 BUG();
416 t->func(t->data);
417 tasklet_unlock(t);
418 continue;
419 }
420 tasklet_unlock(t);
421 }
422
423 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800424 t->next = NULL;
425 *__get_cpu_var(tasklet_vec).tail = t;
426 __get_cpu_var(tasklet_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
428 local_irq_enable();
429 }
430}
431
432static void tasklet_hi_action(struct softirq_action *a)
433{
434 struct tasklet_struct *list;
435
436 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800437 list = __get_cpu_var(tasklet_hi_vec).head;
438 __get_cpu_var(tasklet_hi_vec).head = NULL;
439 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 local_irq_enable();
441
442 while (list) {
443 struct tasklet_struct *t = list;
444
445 list = list->next;
446
447 if (tasklet_trylock(t)) {
448 if (!atomic_read(&t->count)) {
449 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
450 BUG();
451 t->func(t->data);
452 tasklet_unlock(t);
453 continue;
454 }
455 tasklet_unlock(t);
456 }
457
458 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800459 t->next = NULL;
460 *__get_cpu_var(tasklet_hi_vec).tail = t;
461 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 __raise_softirq_irqoff(HI_SOFTIRQ);
463 local_irq_enable();
464 }
465}
466
467
468void tasklet_init(struct tasklet_struct *t,
469 void (*func)(unsigned long), unsigned long data)
470{
471 t->next = NULL;
472 t->state = 0;
473 atomic_set(&t->count, 0);
474 t->func = func;
475 t->data = data;
476}
477
478EXPORT_SYMBOL(tasklet_init);
479
480void tasklet_kill(struct tasklet_struct *t)
481{
482 if (in_interrupt())
483 printk("Attempt to kill tasklet from interrupt\n");
484
485 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400486 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 yield();
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400488 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 }
490 tasklet_unlock_wait(t);
491 clear_bit(TASKLET_STATE_SCHED, &t->state);
492}
493
494EXPORT_SYMBOL(tasklet_kill);
495
David S. Miller54514a72008-09-23 22:15:57 -0700496DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
497EXPORT_PER_CPU_SYMBOL(softirq_work_list);
498
499static void __local_trigger(struct call_single_data *cp, int softirq)
500{
501 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
502
503 list_add_tail(&cp->list, head);
504
505 /* Trigger the softirq only if the list was previously empty. */
506 if (head->next == &cp->list)
507 raise_softirq_irqoff(softirq);
508}
509
510#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
511static void remote_softirq_receive(void *data)
512{
513 struct call_single_data *cp = data;
514 unsigned long flags;
515 int softirq;
516
517 softirq = cp->priv;
518
519 local_irq_save(flags);
520 __local_trigger(cp, softirq);
521 local_irq_restore(flags);
522}
523
524static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
525{
526 if (cpu_online(cpu)) {
527 cp->func = remote_softirq_receive;
528 cp->info = cp;
529 cp->flags = 0;
530 cp->priv = softirq;
531
Peter Zijlstra6e275632009-02-25 13:59:48 +0100532 __smp_call_function_single(cpu, cp, 0);
David S. Miller54514a72008-09-23 22:15:57 -0700533 return 0;
534 }
535 return 1;
536}
537#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
538static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
539{
540 return 1;
541}
542#endif
543
544/**
545 * __send_remote_softirq - try to schedule softirq work on a remote cpu
546 * @cp: private SMP call function data area
547 * @cpu: the remote cpu
548 * @this_cpu: the currently executing cpu
549 * @softirq: the softirq for the work
550 *
551 * Attempt to schedule softirq work on a remote cpu. If this cannot be
552 * done, the work is instead queued up on the local cpu.
553 *
554 * Interrupts must be disabled.
555 */
556void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
557{
558 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
559 __local_trigger(cp, softirq);
560}
561EXPORT_SYMBOL(__send_remote_softirq);
562
563/**
564 * send_remote_softirq - try to schedule softirq work on a remote cpu
565 * @cp: private SMP call function data area
566 * @cpu: the remote cpu
567 * @softirq: the softirq for the work
568 *
569 * Like __send_remote_softirq except that disabling interrupts and
570 * computing the current cpu is done for the caller.
571 */
572void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
573{
574 unsigned long flags;
575 int this_cpu;
576
577 local_irq_save(flags);
578 this_cpu = smp_processor_id();
579 __send_remote_softirq(cp, cpu, this_cpu, softirq);
580 local_irq_restore(flags);
581}
582EXPORT_SYMBOL(send_remote_softirq);
583
584static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
585 unsigned long action, void *hcpu)
586{
587 /*
588 * If a CPU goes away, splice its entries to the current CPU
589 * and trigger a run of the softirq
590 */
591 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
592 int cpu = (unsigned long) hcpu;
593 int i;
594
595 local_irq_disable();
596 for (i = 0; i < NR_SOFTIRQS; i++) {
597 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
598 struct list_head *local_head;
599
600 if (list_empty(head))
601 continue;
602
603 local_head = &__get_cpu_var(softirq_work_list[i]);
604 list_splice_init(head, local_head);
605 raise_softirq_irqoff(i);
606 }
607 local_irq_enable();
608 }
609
610 return NOTIFY_OK;
611}
612
613static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
614 .notifier_call = remote_softirq_cpu_notify,
615};
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617void __init softirq_init(void)
618{
Olof Johansson48f20a92008-03-04 15:23:25 -0800619 int cpu;
620
621 for_each_possible_cpu(cpu) {
David S. Miller54514a72008-09-23 22:15:57 -0700622 int i;
623
Olof Johansson48f20a92008-03-04 15:23:25 -0800624 per_cpu(tasklet_vec, cpu).tail =
625 &per_cpu(tasklet_vec, cpu).head;
626 per_cpu(tasklet_hi_vec, cpu).tail =
627 &per_cpu(tasklet_hi_vec, cpu).head;
David S. Miller54514a72008-09-23 22:15:57 -0700628 for (i = 0; i < NR_SOFTIRQS; i++)
629 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
Olof Johansson48f20a92008-03-04 15:23:25 -0800630 }
631
David S. Miller54514a72008-09-23 22:15:57 -0700632 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
633
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300634 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
635 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636}
637
638static int ksoftirqd(void * __bind_cpu)
639{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 set_current_state(TASK_INTERRUPTIBLE);
641
642 while (!kthread_should_stop()) {
643 preempt_disable();
644 if (!local_softirq_pending()) {
645 preempt_enable_no_resched();
646 schedule();
647 preempt_disable();
648 }
649
650 __set_current_state(TASK_RUNNING);
651
652 while (local_softirq_pending()) {
653 /* Preempt disable stops cpu going offline.
654 If already offline, we'll be on wrong CPU:
655 don't process */
656 if (cpu_is_offline((long)__bind_cpu))
657 goto wait_to_die;
658 do_softirq();
659 preempt_enable_no_resched();
660 cond_resched();
661 preempt_disable();
Eric Dumazet64ca5ab2009-03-04 12:11:56 -0800662 rcu_qsctr_inc((long)__bind_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 }
664 preempt_enable();
665 set_current_state(TASK_INTERRUPTIBLE);
666 }
667 __set_current_state(TASK_RUNNING);
668 return 0;
669
670wait_to_die:
671 preempt_enable();
672 /* Wait for kthread_stop */
673 set_current_state(TASK_INTERRUPTIBLE);
674 while (!kthread_should_stop()) {
675 schedule();
676 set_current_state(TASK_INTERRUPTIBLE);
677 }
678 __set_current_state(TASK_RUNNING);
679 return 0;
680}
681
682#ifdef CONFIG_HOTPLUG_CPU
683/*
684 * tasklet_kill_immediate is called to remove a tasklet which can already be
685 * scheduled for execution on @cpu.
686 *
687 * Unlike tasklet_kill, this function removes the tasklet
688 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
689 *
690 * When this function is called, @cpu must be in the CPU_DEAD state.
691 */
692void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
693{
694 struct tasklet_struct **i;
695
696 BUG_ON(cpu_online(cpu));
697 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
698
699 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
700 return;
701
702 /* CPU is dead, so no lock needed. */
Olof Johansson48f20a92008-03-04 15:23:25 -0800703 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 if (*i == t) {
705 *i = t->next;
Olof Johansson48f20a92008-03-04 15:23:25 -0800706 /* If this was the tail element, move the tail ptr */
707 if (*i == NULL)
708 per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 return;
710 }
711 }
712 BUG();
713}
714
715static void takeover_tasklets(unsigned int cpu)
716{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 /* CPU is dead, so no lock needed. */
718 local_irq_disable();
719
720 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700721 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
722 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
723 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
724 per_cpu(tasklet_vec, cpu).head = NULL;
725 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
726 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 raise_softirq_irqoff(TASKLET_SOFTIRQ);
728
Christian Borntraegere5e41722008-05-01 04:34:23 -0700729 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
730 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
731 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
732 per_cpu(tasklet_hi_vec, cpu).head = NULL;
733 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
734 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 raise_softirq_irqoff(HI_SOFTIRQ);
736
737 local_irq_enable();
738}
739#endif /* CONFIG_HOTPLUG_CPU */
740
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700741static int __cpuinit cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 unsigned long action,
743 void *hcpu)
744{
745 int hotcpu = (unsigned long)hcpu;
746 struct task_struct *p;
747
748 switch (action) {
749 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700750 case CPU_UP_PREPARE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
752 if (IS_ERR(p)) {
753 printk("ksoftirqd for %i failed\n", hotcpu);
754 return NOTIFY_BAD;
755 }
756 kthread_bind(p, hotcpu);
757 per_cpu(ksoftirqd, hotcpu) = p;
758 break;
759 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700760 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 wake_up_process(per_cpu(ksoftirqd, hotcpu));
762 break;
763#ifdef CONFIG_HOTPLUG_CPU
764 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700765 case CPU_UP_CANCELED_FROZEN:
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700766 if (!per_cpu(ksoftirqd, hotcpu))
767 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 /* Unbind so it can run. Fall thru. */
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800769 kthread_bind(per_cpu(ksoftirqd, hotcpu),
Rusty Russellf1fc0572009-01-01 10:12:23 +1030770 cpumask_any(cpu_online_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 case CPU_DEAD:
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700772 case CPU_DEAD_FROZEN: {
773 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 p = per_cpu(ksoftirqd, hotcpu);
776 per_cpu(ksoftirqd, hotcpu) = NULL;
Rusty Russell961ccdd2008-06-23 13:55:38 +1000777 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 kthread_stop(p);
779 takeover_tasklets(hotcpu);
780 break;
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782#endif /* CONFIG_HOTPLUG_CPU */
783 }
784 return NOTIFY_OK;
785}
786
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700787static struct notifier_block __cpuinitdata cpu_nfb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 .notifier_call = cpu_callback
789};
790
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700791static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792{
793 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -0700794 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
795
796 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
798 register_cpu_notifier(&cpu_nfb);
799 return 0;
800}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700801early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800802
803#ifdef CONFIG_SMP
804/*
805 * Call a function on all processors
806 */
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200807int on_each_cpu(void (*func) (void *info), void *info, int wait)
Andrew Morton78eef012006-03-22 00:08:16 -0800808{
809 int ret = 0;
810
811 preempt_disable();
Jens Axboe8691e5a2008-06-06 11:18:06 +0200812 ret = smp_call_function(func, info, wait);
Andrew Morton78eef012006-03-22 00:08:16 -0800813 local_irq_disable();
814 func(info);
815 local_irq_enable();
816 preempt_enable();
817 return ret;
818}
819EXPORT_SYMBOL(on_each_cpu);
820#endif
Yinghai Lu43a25632008-12-28 16:01:13 -0800821
822/*
823 * [ These __weak aliases are kept in a separate compilation unit, so that
824 * GCC does not inline them incorrectly. ]
825 */
826
827int __init __weak early_irq_init(void)
828{
829 return 0;
830}
831
Yinghai Lu4a046d12009-01-12 17:39:24 -0800832int __init __weak arch_probe_nr_irqs(void)
833{
834 return 0;
835}
836
Yinghai Lu43a25632008-12-28 16:01:13 -0800837int __init __weak arch_early_irq_init(void)
838{
839 return 0;
840}
841
Yinghai Lu85ac16d2009-04-27 18:00:38 -0700842int __weak arch_init_chip_data(struct irq_desc *desc, int node)
Yinghai Lu43a25632008-12-28 16:01:13 -0800843{
844 return 0;
845}