blob: a5e81231ca7acd3a9dba0937cddd1dd2669837d6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01006 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
David S. Miller54514a72008-09-23 22:15:57 -07009 *
10 * Remote softirq infrastructure is by Jens Axboe.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13#include <linux/module.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070021#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050024#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080025#include <linux/smp.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080026#include <linux/tick.h>
Jason Baron39842322009-03-12 14:36:03 -040027#include <trace/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29#include <asm/irq.h>
30/*
31 - No shared variables, all the data are CPU local.
32 - If a softirq needs serialization, let it serialize itself
33 by its own spinlocks.
34 - Even if softirq is serialized, only local cpu is marked for
35 execution. Hence, we get something sort of weak cpu binding.
36 Though it is still not clear, will it result in better locality
37 or will not.
38
39 Examples:
40 - NET RX softirq. It is multithreaded and does not require
41 any global serialization.
42 - NET TX softirq. It kicks software netdevice queues, hence
43 it is logically serialized per device, but this serialization
44 is invisible to common code.
45 - Tasklets: serialized wrt itself.
46 */
47
48#ifndef __ARCH_IRQ_STAT
49irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
50EXPORT_SYMBOL(irq_stat);
51#endif
52
Alexey Dobriyan978b0112008-09-06 20:04:36 +020053static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
56
Jason Baron5d592b42009-03-12 14:33:36 -040057char *softirq_to_name[NR_SOFTIRQS] = {
58 "HI_SOFTIRQ", "TIMER_SOFTIRQ", "NET_TX_SOFTIRQ", "NET_RX_SOFTIRQ",
59 "BLOCK_SOFTIRQ", "TASKLET_SOFTIRQ", "SCHED_SOFTIRQ", "HRTIMER_SOFTIRQ",
60 "RCU_SOFTIRQ"
61};
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/*
64 * we cannot loop indefinitely here to avoid userspace starvation,
65 * but we also don't want to introduce a worst case 1/HZ latency
66 * to the pending events, so lets the scheduler to balance
67 * the softirq load for us.
68 */
69static inline void wakeup_softirqd(void)
70{
71 /* Interrupts are disabled: no need to stop preemption */
72 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
73
74 if (tsk && tsk->state != TASK_RUNNING)
75 wake_up_process(tsk);
76}
77
78/*
Ingo Molnarde30a2b2006-07-03 00:24:42 -070079 * This one is for softirq.c-internal use,
80 * where hardirqs are disabled legitimately:
81 */
Tim Chen3c829c32006-07-30 03:04:02 -070082#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnarde30a2b2006-07-03 00:24:42 -070083static void __local_bh_disable(unsigned long ip)
84{
85 unsigned long flags;
86
87 WARN_ON_ONCE(in_irq());
88
89 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050090 /*
91 * The preempt tracer hooks into add_preempt_count and will break
92 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
93 * is set and before current->softirq_enabled is cleared.
94 * We must manually increment preempt_count here and manually
95 * call the trace_preempt_off later.
96 */
97 preempt_count() += SOFTIRQ_OFFSET;
Ingo Molnarde30a2b2006-07-03 00:24:42 -070098 /*
99 * Were softirqs turned off above:
100 */
101 if (softirq_count() == SOFTIRQ_OFFSET)
102 trace_softirqs_off(ip);
103 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500104
105 if (preempt_count() == SOFTIRQ_OFFSET)
106 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700107}
Tim Chen3c829c32006-07-30 03:04:02 -0700108#else /* !CONFIG_TRACE_IRQFLAGS */
109static inline void __local_bh_disable(unsigned long ip)
110{
111 add_preempt_count(SOFTIRQ_OFFSET);
112 barrier();
113}
114#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700115
116void local_bh_disable(void)
117{
118 __local_bh_disable((unsigned long)__builtin_return_address(0));
119}
120
121EXPORT_SYMBOL(local_bh_disable);
122
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700123/*
124 * Special-case - softirqs can safely be enabled in
125 * cond_resched_softirq(), or by __do_softirq(),
126 * without processing still-pending softirqs:
127 */
128void _local_bh_enable(void)
129{
130 WARN_ON_ONCE(in_irq());
131 WARN_ON_ONCE(!irqs_disabled());
132
133 if (softirq_count() == SOFTIRQ_OFFSET)
134 trace_softirqs_on((unsigned long)__builtin_return_address(0));
135 sub_preempt_count(SOFTIRQ_OFFSET);
136}
137
138EXPORT_SYMBOL(_local_bh_enable);
139
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200140static inline void _local_bh_enable_ip(unsigned long ip)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700141{
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200142 WARN_ON_ONCE(in_irq() || irqs_disabled());
Tim Chen3c829c32006-07-30 03:04:02 -0700143#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200144 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700145#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700146 /*
147 * Are softirqs going to be turned on now:
148 */
149 if (softirq_count() == SOFTIRQ_OFFSET)
150 trace_softirqs_on(ip);
151 /*
152 * Keep preemption disabled until we are done with
153 * softirq processing:
154 */
155 sub_preempt_count(SOFTIRQ_OFFSET - 1);
156
157 if (unlikely(!in_interrupt() && local_softirq_pending()))
158 do_softirq();
159
160 dec_preempt_count();
Tim Chen3c829c32006-07-30 03:04:02 -0700161#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200162 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700163#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700164 preempt_check_resched();
165}
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200166
167void local_bh_enable(void)
168{
169 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
170}
171EXPORT_SYMBOL(local_bh_enable);
172
173void local_bh_enable_ip(unsigned long ip)
174{
175 _local_bh_enable_ip(ip);
176}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700177EXPORT_SYMBOL(local_bh_enable_ip);
178
179/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
181 * and we fall back to softirqd after that.
182 *
183 * This number has been established via experimentation.
184 * The two things to balance is latency against fairness -
185 * we want to handle softirqs as soon as possible, but they
186 * should not be able to lock up the box.
187 */
188#define MAX_SOFTIRQ_RESTART 10
189
Jason Baron39842322009-03-12 14:36:03 -0400190DEFINE_TRACE(softirq_entry);
191DEFINE_TRACE(softirq_exit);
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193asmlinkage void __do_softirq(void)
194{
195 struct softirq_action *h;
196 __u32 pending;
197 int max_restart = MAX_SOFTIRQ_RESTART;
198 int cpu;
199
200 pending = local_softirq_pending();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700201 account_system_vtime(current);
202
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700203 __local_bh_disable((unsigned long)__builtin_return_address(0));
Ingo Molnard820ac42009-03-13 01:30:40 +0100204 lockdep_softirq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 cpu = smp_processor_id();
207restart:
208 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200209 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700211 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213 h = softirq_vec;
214
215 do {
216 if (pending & 1) {
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200217 int prev_count = preempt_count();
218
Jason Baron39842322009-03-12 14:36:03 -0400219 trace_softirq_entry(h, softirq_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 h->action(h);
Jason Baron39842322009-03-12 14:36:03 -0400221 trace_softirq_exit(h, softirq_vec);
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200222 if (unlikely(prev_count != preempt_count())) {
Jason Baron5d592b42009-03-12 14:33:36 -0400223 printk(KERN_ERR "huh, entered softirq %td %s %p"
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200224 "with preempt_count %08x,"
225 " exited with %08x?\n", h - softirq_vec,
Jason Baron5d592b42009-03-12 14:33:36 -0400226 softirq_to_name[h - softirq_vec],
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200227 h->action, prev_count, preempt_count());
228 preempt_count() = prev_count;
229 }
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 rcu_bh_qsctr_inc(cpu);
232 }
233 h++;
234 pending >>= 1;
235 } while (pending);
236
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700237 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239 pending = local_softirq_pending();
240 if (pending && --max_restart)
241 goto restart;
242
243 if (pending)
244 wakeup_softirqd();
245
Ingo Molnard820ac42009-03-13 01:30:40 +0100246 lockdep_softirq_exit();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700247
248 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700249 _local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
252#ifndef __ARCH_HAS_DO_SOFTIRQ
253
254asmlinkage void do_softirq(void)
255{
256 __u32 pending;
257 unsigned long flags;
258
259 if (in_interrupt())
260 return;
261
262 local_irq_save(flags);
263
264 pending = local_softirq_pending();
265
266 if (pending)
267 __do_softirq();
268
269 local_irq_restore(flags);
270}
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272#endif
273
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800274/*
275 * Enter an interrupt context.
276 */
277void irq_enter(void)
278{
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100279 int cpu = smp_processor_id();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200280
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100281 rcu_irq_enter();
Thomas Gleixneree5f80a2008-11-07 11:06:00 +0100282 if (idle_cpu(cpu) && !in_interrupt()) {
283 __irq_enter();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200284 tick_check_idle(cpu);
Thomas Gleixneree5f80a2008-11-07 11:06:00 +0100285 } else
286 __irq_enter();
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800287}
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
290# define invoke_softirq() __do_softirq()
291#else
292# define invoke_softirq() do_softirq()
293#endif
294
295/*
296 * Exit an interrupt context. Process softirqs if needed and possible:
297 */
298void irq_exit(void)
299{
300 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700301 trace_hardirq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 sub_preempt_count(IRQ_EXIT_OFFSET);
303 if (!in_interrupt() && local_softirq_pending())
304 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800305
306#ifdef CONFIG_NO_HZ
307 /* Make sure that timer wheel updates are propagated */
Steven Rostedt2232c2d2008-02-29 18:46:50 +0100308 rcu_irq_exit();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100309 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
310 tick_nohz_stop_sched_tick(0);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800311#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 preempt_enable_no_resched();
313}
314
315/*
316 * This function must run with irqs disabled!
317 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800318inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319{
320 __raise_softirq_irqoff(nr);
321
322 /*
323 * If we're in an interrupt or softirq, we're done
324 * (this also catches softirq-disabled code). We will
325 * actually run the softirq once we return from
326 * the irq or softirq.
327 *
328 * Otherwise we wake up ksoftirqd to make sure we
329 * schedule the softirq soon.
330 */
331 if (!in_interrupt())
332 wakeup_softirqd();
333}
334
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800335void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 unsigned long flags;
338
339 local_irq_save(flags);
340 raise_softirq_irqoff(nr);
341 local_irq_restore(flags);
342}
343
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300344void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 softirq_vec[nr].action = action;
347}
348
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349/* Tasklets */
350struct tasklet_head
351{
Olof Johansson48f20a92008-03-04 15:23:25 -0800352 struct tasklet_struct *head;
353 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354};
355
Vegard Nossum4620b492008-06-12 23:21:53 +0200356static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
357static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800359void __tasklet_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
361 unsigned long flags;
362
363 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800364 t->next = NULL;
365 *__get_cpu_var(tasklet_vec).tail = t;
366 __get_cpu_var(tasklet_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 raise_softirq_irqoff(TASKLET_SOFTIRQ);
368 local_irq_restore(flags);
369}
370
371EXPORT_SYMBOL(__tasklet_schedule);
372
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800373void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
375 unsigned long flags;
376
377 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800378 t->next = NULL;
379 *__get_cpu_var(tasklet_hi_vec).tail = t;
380 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 raise_softirq_irqoff(HI_SOFTIRQ);
382 local_irq_restore(flags);
383}
384
385EXPORT_SYMBOL(__tasklet_hi_schedule);
386
387static void tasklet_action(struct softirq_action *a)
388{
389 struct tasklet_struct *list;
390
391 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800392 list = __get_cpu_var(tasklet_vec).head;
393 __get_cpu_var(tasklet_vec).head = NULL;
394 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 local_irq_enable();
396
397 while (list) {
398 struct tasklet_struct *t = list;
399
400 list = list->next;
401
402 if (tasklet_trylock(t)) {
403 if (!atomic_read(&t->count)) {
404 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
405 BUG();
406 t->func(t->data);
407 tasklet_unlock(t);
408 continue;
409 }
410 tasklet_unlock(t);
411 }
412
413 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800414 t->next = NULL;
415 *__get_cpu_var(tasklet_vec).tail = t;
416 __get_cpu_var(tasklet_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
418 local_irq_enable();
419 }
420}
421
422static void tasklet_hi_action(struct softirq_action *a)
423{
424 struct tasklet_struct *list;
425
426 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800427 list = __get_cpu_var(tasklet_hi_vec).head;
428 __get_cpu_var(tasklet_hi_vec).head = NULL;
429 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 local_irq_enable();
431
432 while (list) {
433 struct tasklet_struct *t = list;
434
435 list = list->next;
436
437 if (tasklet_trylock(t)) {
438 if (!atomic_read(&t->count)) {
439 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
440 BUG();
441 t->func(t->data);
442 tasklet_unlock(t);
443 continue;
444 }
445 tasklet_unlock(t);
446 }
447
448 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800449 t->next = NULL;
450 *__get_cpu_var(tasklet_hi_vec).tail = t;
451 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 __raise_softirq_irqoff(HI_SOFTIRQ);
453 local_irq_enable();
454 }
455}
456
457
458void tasklet_init(struct tasklet_struct *t,
459 void (*func)(unsigned long), unsigned long data)
460{
461 t->next = NULL;
462 t->state = 0;
463 atomic_set(&t->count, 0);
464 t->func = func;
465 t->data = data;
466}
467
468EXPORT_SYMBOL(tasklet_init);
469
470void tasklet_kill(struct tasklet_struct *t)
471{
472 if (in_interrupt())
473 printk("Attempt to kill tasklet from interrupt\n");
474
475 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
476 do
477 yield();
478 while (test_bit(TASKLET_STATE_SCHED, &t->state));
479 }
480 tasklet_unlock_wait(t);
481 clear_bit(TASKLET_STATE_SCHED, &t->state);
482}
483
484EXPORT_SYMBOL(tasklet_kill);
485
David S. Miller54514a72008-09-23 22:15:57 -0700486DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
487EXPORT_PER_CPU_SYMBOL(softirq_work_list);
488
489static void __local_trigger(struct call_single_data *cp, int softirq)
490{
491 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
492
493 list_add_tail(&cp->list, head);
494
495 /* Trigger the softirq only if the list was previously empty. */
496 if (head->next == &cp->list)
497 raise_softirq_irqoff(softirq);
498}
499
500#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
501static void remote_softirq_receive(void *data)
502{
503 struct call_single_data *cp = data;
504 unsigned long flags;
505 int softirq;
506
507 softirq = cp->priv;
508
509 local_irq_save(flags);
510 __local_trigger(cp, softirq);
511 local_irq_restore(flags);
512}
513
514static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
515{
516 if (cpu_online(cpu)) {
517 cp->func = remote_softirq_receive;
518 cp->info = cp;
519 cp->flags = 0;
520 cp->priv = softirq;
521
522 __smp_call_function_single(cpu, cp);
523 return 0;
524 }
525 return 1;
526}
527#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
528static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
529{
530 return 1;
531}
532#endif
533
534/**
535 * __send_remote_softirq - try to schedule softirq work on a remote cpu
536 * @cp: private SMP call function data area
537 * @cpu: the remote cpu
538 * @this_cpu: the currently executing cpu
539 * @softirq: the softirq for the work
540 *
541 * Attempt to schedule softirq work on a remote cpu. If this cannot be
542 * done, the work is instead queued up on the local cpu.
543 *
544 * Interrupts must be disabled.
545 */
546void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
547{
548 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
549 __local_trigger(cp, softirq);
550}
551EXPORT_SYMBOL(__send_remote_softirq);
552
553/**
554 * send_remote_softirq - try to schedule softirq work on a remote cpu
555 * @cp: private SMP call function data area
556 * @cpu: the remote cpu
557 * @softirq: the softirq for the work
558 *
559 * Like __send_remote_softirq except that disabling interrupts and
560 * computing the current cpu is done for the caller.
561 */
562void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
563{
564 unsigned long flags;
565 int this_cpu;
566
567 local_irq_save(flags);
568 this_cpu = smp_processor_id();
569 __send_remote_softirq(cp, cpu, this_cpu, softirq);
570 local_irq_restore(flags);
571}
572EXPORT_SYMBOL(send_remote_softirq);
573
574static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
575 unsigned long action, void *hcpu)
576{
577 /*
578 * If a CPU goes away, splice its entries to the current CPU
579 * and trigger a run of the softirq
580 */
581 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
582 int cpu = (unsigned long) hcpu;
583 int i;
584
585 local_irq_disable();
586 for (i = 0; i < NR_SOFTIRQS; i++) {
587 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
588 struct list_head *local_head;
589
590 if (list_empty(head))
591 continue;
592
593 local_head = &__get_cpu_var(softirq_work_list[i]);
594 list_splice_init(head, local_head);
595 raise_softirq_irqoff(i);
596 }
597 local_irq_enable();
598 }
599
600 return NOTIFY_OK;
601}
602
603static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
604 .notifier_call = remote_softirq_cpu_notify,
605};
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607void __init softirq_init(void)
608{
Olof Johansson48f20a92008-03-04 15:23:25 -0800609 int cpu;
610
611 for_each_possible_cpu(cpu) {
David S. Miller54514a72008-09-23 22:15:57 -0700612 int i;
613
Olof Johansson48f20a92008-03-04 15:23:25 -0800614 per_cpu(tasklet_vec, cpu).tail =
615 &per_cpu(tasklet_vec, cpu).head;
616 per_cpu(tasklet_hi_vec, cpu).tail =
617 &per_cpu(tasklet_hi_vec, cpu).head;
David S. Miller54514a72008-09-23 22:15:57 -0700618 for (i = 0; i < NR_SOFTIRQS; i++)
619 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
Olof Johansson48f20a92008-03-04 15:23:25 -0800620 }
621
David S. Miller54514a72008-09-23 22:15:57 -0700622 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
623
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300624 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
625 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626}
627
628static int ksoftirqd(void * __bind_cpu)
629{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 set_current_state(TASK_INTERRUPTIBLE);
631
632 while (!kthread_should_stop()) {
633 preempt_disable();
634 if (!local_softirq_pending()) {
635 preempt_enable_no_resched();
636 schedule();
637 preempt_disable();
638 }
639
640 __set_current_state(TASK_RUNNING);
641
642 while (local_softirq_pending()) {
643 /* Preempt disable stops cpu going offline.
644 If already offline, we'll be on wrong CPU:
645 don't process */
646 if (cpu_is_offline((long)__bind_cpu))
647 goto wait_to_die;
648 do_softirq();
649 preempt_enable_no_resched();
650 cond_resched();
651 preempt_disable();
Eric Dumazet64ca5ab2009-03-04 12:11:56 -0800652 rcu_qsctr_inc((long)__bind_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
654 preempt_enable();
655 set_current_state(TASK_INTERRUPTIBLE);
656 }
657 __set_current_state(TASK_RUNNING);
658 return 0;
659
660wait_to_die:
661 preempt_enable();
662 /* Wait for kthread_stop */
663 set_current_state(TASK_INTERRUPTIBLE);
664 while (!kthread_should_stop()) {
665 schedule();
666 set_current_state(TASK_INTERRUPTIBLE);
667 }
668 __set_current_state(TASK_RUNNING);
669 return 0;
670}
671
672#ifdef CONFIG_HOTPLUG_CPU
673/*
674 * tasklet_kill_immediate is called to remove a tasklet which can already be
675 * scheduled for execution on @cpu.
676 *
677 * Unlike tasklet_kill, this function removes the tasklet
678 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
679 *
680 * When this function is called, @cpu must be in the CPU_DEAD state.
681 */
682void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
683{
684 struct tasklet_struct **i;
685
686 BUG_ON(cpu_online(cpu));
687 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
688
689 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
690 return;
691
692 /* CPU is dead, so no lock needed. */
Olof Johansson48f20a92008-03-04 15:23:25 -0800693 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 if (*i == t) {
695 *i = t->next;
Olof Johansson48f20a92008-03-04 15:23:25 -0800696 /* If this was the tail element, move the tail ptr */
697 if (*i == NULL)
698 per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 return;
700 }
701 }
702 BUG();
703}
704
705static void takeover_tasklets(unsigned int cpu)
706{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 /* CPU is dead, so no lock needed. */
708 local_irq_disable();
709
710 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700711 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
712 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
713 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
714 per_cpu(tasklet_vec, cpu).head = NULL;
715 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
716 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 raise_softirq_irqoff(TASKLET_SOFTIRQ);
718
Christian Borntraegere5e41722008-05-01 04:34:23 -0700719 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
720 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
721 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
722 per_cpu(tasklet_hi_vec, cpu).head = NULL;
723 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 raise_softirq_irqoff(HI_SOFTIRQ);
726
727 local_irq_enable();
728}
729#endif /* CONFIG_HOTPLUG_CPU */
730
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700731static int __cpuinit cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 unsigned long action,
733 void *hcpu)
734{
735 int hotcpu = (unsigned long)hcpu;
736 struct task_struct *p;
737
738 switch (action) {
739 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700740 case CPU_UP_PREPARE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
742 if (IS_ERR(p)) {
743 printk("ksoftirqd for %i failed\n", hotcpu);
744 return NOTIFY_BAD;
745 }
746 kthread_bind(p, hotcpu);
747 per_cpu(ksoftirqd, hotcpu) = p;
748 break;
749 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700750 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 wake_up_process(per_cpu(ksoftirqd, hotcpu));
752 break;
753#ifdef CONFIG_HOTPLUG_CPU
754 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700755 case CPU_UP_CANCELED_FROZEN:
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700756 if (!per_cpu(ksoftirqd, hotcpu))
757 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 /* Unbind so it can run. Fall thru. */
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800759 kthread_bind(per_cpu(ksoftirqd, hotcpu),
Rusty Russellf1fc0572009-01-01 10:12:23 +1030760 cpumask_any(cpu_online_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 case CPU_DEAD:
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700762 case CPU_DEAD_FROZEN: {
763 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 p = per_cpu(ksoftirqd, hotcpu);
766 per_cpu(ksoftirqd, hotcpu) = NULL;
Rusty Russell961ccdd2008-06-23 13:55:38 +1000767 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 kthread_stop(p);
769 takeover_tasklets(hotcpu);
770 break;
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700771 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772#endif /* CONFIG_HOTPLUG_CPU */
773 }
774 return NOTIFY_OK;
775}
776
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700777static struct notifier_block __cpuinitdata cpu_nfb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 .notifier_call = cpu_callback
779};
780
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700781static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782{
783 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -0700784 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
785
786 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
788 register_cpu_notifier(&cpu_nfb);
789 return 0;
790}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700791early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800792
793#ifdef CONFIG_SMP
794/*
795 * Call a function on all processors
796 */
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200797int on_each_cpu(void (*func) (void *info), void *info, int wait)
Andrew Morton78eef012006-03-22 00:08:16 -0800798{
799 int ret = 0;
800
801 preempt_disable();
Jens Axboe8691e5a2008-06-06 11:18:06 +0200802 ret = smp_call_function(func, info, wait);
Andrew Morton78eef012006-03-22 00:08:16 -0800803 local_irq_disable();
804 func(info);
805 local_irq_enable();
806 preempt_enable();
807 return ret;
808}
809EXPORT_SYMBOL(on_each_cpu);
810#endif
Yinghai Lu43a25632008-12-28 16:01:13 -0800811
812/*
813 * [ These __weak aliases are kept in a separate compilation unit, so that
814 * GCC does not inline them incorrectly. ]
815 */
816
817int __init __weak early_irq_init(void)
818{
819 return 0;
820}
821
Yinghai Lu4a046d12009-01-12 17:39:24 -0800822int __init __weak arch_probe_nr_irqs(void)
823{
824 return 0;
825}
826
Yinghai Lu43a25632008-12-28 16:01:13 -0800827int __init __weak arch_early_irq_init(void)
828{
829 return 0;
830}
831
832int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
833{
834 return 0;
835}