blob: 2cf2502b7c731d0d1f303286b67d1b19dc44e672 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01006 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
11#include <linux/module.h>
12#include <linux/kernel_stat.h>
13#include <linux/interrupt.h>
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/notifier.h>
17#include <linux/percpu.h>
18#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070019#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/kthread.h>
21#include <linux/rcupdate.h>
Andrew Morton78eef012006-03-22 00:08:16 -080022#include <linux/smp.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080023#include <linux/tick.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include <asm/irq.h>
26/*
27 - No shared variables, all the data are CPU local.
28 - If a softirq needs serialization, let it serialize itself
29 by its own spinlocks.
30 - Even if softirq is serialized, only local cpu is marked for
31 execution. Hence, we get something sort of weak cpu binding.
32 Though it is still not clear, will it result in better locality
33 or will not.
34
35 Examples:
36 - NET RX softirq. It is multithreaded and does not require
37 any global serialization.
38 - NET TX softirq. It kicks software netdevice queues, hence
39 it is logically serialized per device, but this serialization
40 is invisible to common code.
41 - Tasklets: serialized wrt itself.
42 */
43
44#ifndef __ARCH_IRQ_STAT
45irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
46EXPORT_SYMBOL(irq_stat);
47#endif
48
49static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
50
51static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
52
53/*
54 * we cannot loop indefinitely here to avoid userspace starvation,
55 * but we also don't want to introduce a worst case 1/HZ latency
56 * to the pending events, so lets the scheduler to balance
57 * the softirq load for us.
58 */
59static inline void wakeup_softirqd(void)
60{
61 /* Interrupts are disabled: no need to stop preemption */
62 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
63
64 if (tsk && tsk->state != TASK_RUNNING)
65 wake_up_process(tsk);
66}
67
68/*
Ingo Molnarde30a2b2006-07-03 00:24:42 -070069 * This one is for softirq.c-internal use,
70 * where hardirqs are disabled legitimately:
71 */
Tim Chen3c829c32006-07-30 03:04:02 -070072#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnarde30a2b2006-07-03 00:24:42 -070073static void __local_bh_disable(unsigned long ip)
74{
75 unsigned long flags;
76
77 WARN_ON_ONCE(in_irq());
78
79 raw_local_irq_save(flags);
80 add_preempt_count(SOFTIRQ_OFFSET);
81 /*
82 * Were softirqs turned off above:
83 */
84 if (softirq_count() == SOFTIRQ_OFFSET)
85 trace_softirqs_off(ip);
86 raw_local_irq_restore(flags);
87}
Tim Chen3c829c32006-07-30 03:04:02 -070088#else /* !CONFIG_TRACE_IRQFLAGS */
89static inline void __local_bh_disable(unsigned long ip)
90{
91 add_preempt_count(SOFTIRQ_OFFSET);
92 barrier();
93}
94#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -070095
96void local_bh_disable(void)
97{
98 __local_bh_disable((unsigned long)__builtin_return_address(0));
99}
100
101EXPORT_SYMBOL(local_bh_disable);
102
103void __local_bh_enable(void)
104{
105 WARN_ON_ONCE(in_irq());
106
107 /*
108 * softirqs should never be enabled by __local_bh_enable(),
109 * it always nests inside local_bh_enable() sections:
110 */
111 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
112
113 sub_preempt_count(SOFTIRQ_OFFSET);
114}
115EXPORT_SYMBOL_GPL(__local_bh_enable);
116
117/*
118 * Special-case - softirqs can safely be enabled in
119 * cond_resched_softirq(), or by __do_softirq(),
120 * without processing still-pending softirqs:
121 */
122void _local_bh_enable(void)
123{
124 WARN_ON_ONCE(in_irq());
125 WARN_ON_ONCE(!irqs_disabled());
126
127 if (softirq_count() == SOFTIRQ_OFFSET)
128 trace_softirqs_on((unsigned long)__builtin_return_address(0));
129 sub_preempt_count(SOFTIRQ_OFFSET);
130}
131
132EXPORT_SYMBOL(_local_bh_enable);
133
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200134static inline void _local_bh_enable_ip(unsigned long ip)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700135{
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200136 WARN_ON_ONCE(in_irq() || irqs_disabled());
Tim Chen3c829c32006-07-30 03:04:02 -0700137#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200138 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700139#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700140 /*
141 * Are softirqs going to be turned on now:
142 */
143 if (softirq_count() == SOFTIRQ_OFFSET)
144 trace_softirqs_on(ip);
145 /*
146 * Keep preemption disabled until we are done with
147 * softirq processing:
148 */
149 sub_preempt_count(SOFTIRQ_OFFSET - 1);
150
151 if (unlikely(!in_interrupt() && local_softirq_pending()))
152 do_softirq();
153
154 dec_preempt_count();
Tim Chen3c829c32006-07-30 03:04:02 -0700155#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200156 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700157#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700158 preempt_check_resched();
159}
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200160
161void local_bh_enable(void)
162{
163 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
164}
165EXPORT_SYMBOL(local_bh_enable);
166
167void local_bh_enable_ip(unsigned long ip)
168{
169 _local_bh_enable_ip(ip);
170}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700171EXPORT_SYMBOL(local_bh_enable_ip);
172
173/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
175 * and we fall back to softirqd after that.
176 *
177 * This number has been established via experimentation.
178 * The two things to balance is latency against fairness -
179 * we want to handle softirqs as soon as possible, but they
180 * should not be able to lock up the box.
181 */
182#define MAX_SOFTIRQ_RESTART 10
183
184asmlinkage void __do_softirq(void)
185{
186 struct softirq_action *h;
187 __u32 pending;
188 int max_restart = MAX_SOFTIRQ_RESTART;
189 int cpu;
190
191 pending = local_softirq_pending();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700192 account_system_vtime(current);
193
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700194 __local_bh_disable((unsigned long)__builtin_return_address(0));
195 trace_softirq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 cpu = smp_processor_id();
198restart:
199 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200200 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700202 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 h = softirq_vec;
205
206 do {
207 if (pending & 1) {
208 h->action(h);
209 rcu_bh_qsctr_inc(cpu);
210 }
211 h++;
212 pending >>= 1;
213 } while (pending);
214
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700215 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217 pending = local_softirq_pending();
218 if (pending && --max_restart)
219 goto restart;
220
221 if (pending)
222 wakeup_softirqd();
223
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700224 trace_softirq_exit();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700225
226 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700227 _local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228}
229
230#ifndef __ARCH_HAS_DO_SOFTIRQ
231
232asmlinkage void do_softirq(void)
233{
234 __u32 pending;
235 unsigned long flags;
236
237 if (in_interrupt())
238 return;
239
240 local_irq_save(flags);
241
242 pending = local_softirq_pending();
243
244 if (pending)
245 __do_softirq();
246
247 local_irq_restore(flags);
248}
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250#endif
251
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800252/*
253 * Enter an interrupt context.
254 */
255void irq_enter(void)
256{
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100257#ifdef CONFIG_NO_HZ
258 int cpu = smp_processor_id();
259 if (idle_cpu(cpu) && !in_interrupt())
260 tick_nohz_stop_idle(cpu);
261#endif
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800262 __irq_enter();
263#ifdef CONFIG_NO_HZ
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100264 if (idle_cpu(cpu))
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800265 tick_nohz_update_jiffies();
266#endif
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800267}
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
270# define invoke_softirq() __do_softirq()
271#else
272# define invoke_softirq() do_softirq()
273#endif
274
275/*
276 * Exit an interrupt context. Process softirqs if needed and possible:
277 */
278void irq_exit(void)
279{
280 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700281 trace_hardirq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 sub_preempt_count(IRQ_EXIT_OFFSET);
283 if (!in_interrupt() && local_softirq_pending())
284 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800285
286#ifdef CONFIG_NO_HZ
287 /* Make sure that timer wheel updates are propagated */
288 if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
289 tick_nohz_stop_sched_tick();
Steven Rostedt2232c2d2008-02-29 18:46:50 +0100290 rcu_irq_exit();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800291#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 preempt_enable_no_resched();
293}
294
295/*
296 * This function must run with irqs disabled!
297 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800298inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
300 __raise_softirq_irqoff(nr);
301
302 /*
303 * If we're in an interrupt or softirq, we're done
304 * (this also catches softirq-disabled code). We will
305 * actually run the softirq once we return from
306 * the irq or softirq.
307 *
308 * Otherwise we wake up ksoftirqd to make sure we
309 * schedule the softirq soon.
310 */
311 if (!in_interrupt())
312 wakeup_softirqd();
313}
314
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800315void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316{
317 unsigned long flags;
318
319 local_irq_save(flags);
320 raise_softirq_irqoff(nr);
321 local_irq_restore(flags);
322}
323
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300324void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 softirq_vec[nr].action = action;
327}
328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329/* Tasklets */
330struct tasklet_head
331{
Olof Johansson48f20a92008-03-04 15:23:25 -0800332 struct tasklet_struct *head;
333 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334};
335
Vegard Nossum4620b492008-06-12 23:21:53 +0200336static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
337static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800339void __tasklet_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340{
341 unsigned long flags;
342
343 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800344 t->next = NULL;
345 *__get_cpu_var(tasklet_vec).tail = t;
346 __get_cpu_var(tasklet_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 raise_softirq_irqoff(TASKLET_SOFTIRQ);
348 local_irq_restore(flags);
349}
350
351EXPORT_SYMBOL(__tasklet_schedule);
352
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800353void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354{
355 unsigned long flags;
356
357 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800358 t->next = NULL;
359 *__get_cpu_var(tasklet_hi_vec).tail = t;
360 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 raise_softirq_irqoff(HI_SOFTIRQ);
362 local_irq_restore(flags);
363}
364
365EXPORT_SYMBOL(__tasklet_hi_schedule);
366
367static void tasklet_action(struct softirq_action *a)
368{
369 struct tasklet_struct *list;
370
371 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800372 list = __get_cpu_var(tasklet_vec).head;
373 __get_cpu_var(tasklet_vec).head = NULL;
374 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 local_irq_enable();
376
377 while (list) {
378 struct tasklet_struct *t = list;
379
380 list = list->next;
381
382 if (tasklet_trylock(t)) {
383 if (!atomic_read(&t->count)) {
384 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
385 BUG();
386 t->func(t->data);
387 tasklet_unlock(t);
388 continue;
389 }
390 tasklet_unlock(t);
391 }
392
393 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800394 t->next = NULL;
395 *__get_cpu_var(tasklet_vec).tail = t;
396 __get_cpu_var(tasklet_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
398 local_irq_enable();
399 }
400}
401
402static void tasklet_hi_action(struct softirq_action *a)
403{
404 struct tasklet_struct *list;
405
406 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800407 list = __get_cpu_var(tasklet_hi_vec).head;
408 __get_cpu_var(tasklet_hi_vec).head = NULL;
409 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 local_irq_enable();
411
412 while (list) {
413 struct tasklet_struct *t = list;
414
415 list = list->next;
416
417 if (tasklet_trylock(t)) {
418 if (!atomic_read(&t->count)) {
419 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
420 BUG();
421 t->func(t->data);
422 tasklet_unlock(t);
423 continue;
424 }
425 tasklet_unlock(t);
426 }
427
428 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800429 t->next = NULL;
430 *__get_cpu_var(tasklet_hi_vec).tail = t;
431 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 __raise_softirq_irqoff(HI_SOFTIRQ);
433 local_irq_enable();
434 }
435}
436
437
438void tasklet_init(struct tasklet_struct *t,
439 void (*func)(unsigned long), unsigned long data)
440{
441 t->next = NULL;
442 t->state = 0;
443 atomic_set(&t->count, 0);
444 t->func = func;
445 t->data = data;
446}
447
448EXPORT_SYMBOL(tasklet_init);
449
450void tasklet_kill(struct tasklet_struct *t)
451{
452 if (in_interrupt())
453 printk("Attempt to kill tasklet from interrupt\n");
454
455 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
456 do
457 yield();
458 while (test_bit(TASKLET_STATE_SCHED, &t->state));
459 }
460 tasklet_unlock_wait(t);
461 clear_bit(TASKLET_STATE_SCHED, &t->state);
462}
463
464EXPORT_SYMBOL(tasklet_kill);
465
466void __init softirq_init(void)
467{
Olof Johansson48f20a92008-03-04 15:23:25 -0800468 int cpu;
469
470 for_each_possible_cpu(cpu) {
471 per_cpu(tasklet_vec, cpu).tail =
472 &per_cpu(tasklet_vec, cpu).head;
473 per_cpu(tasklet_hi_vec, cpu).tail =
474 &per_cpu(tasklet_hi_vec, cpu).head;
475 }
476
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300477 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
478 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479}
480
481static int ksoftirqd(void * __bind_cpu)
482{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 set_current_state(TASK_INTERRUPTIBLE);
484
485 while (!kthread_should_stop()) {
486 preempt_disable();
487 if (!local_softirq_pending()) {
488 preempt_enable_no_resched();
489 schedule();
490 preempt_disable();
491 }
492
493 __set_current_state(TASK_RUNNING);
494
495 while (local_softirq_pending()) {
496 /* Preempt disable stops cpu going offline.
497 If already offline, we'll be on wrong CPU:
498 don't process */
499 if (cpu_is_offline((long)__bind_cpu))
500 goto wait_to_die;
501 do_softirq();
502 preempt_enable_no_resched();
503 cond_resched();
504 preempt_disable();
505 }
506 preempt_enable();
507 set_current_state(TASK_INTERRUPTIBLE);
508 }
509 __set_current_state(TASK_RUNNING);
510 return 0;
511
512wait_to_die:
513 preempt_enable();
514 /* Wait for kthread_stop */
515 set_current_state(TASK_INTERRUPTIBLE);
516 while (!kthread_should_stop()) {
517 schedule();
518 set_current_state(TASK_INTERRUPTIBLE);
519 }
520 __set_current_state(TASK_RUNNING);
521 return 0;
522}
523
524#ifdef CONFIG_HOTPLUG_CPU
525/*
526 * tasklet_kill_immediate is called to remove a tasklet which can already be
527 * scheduled for execution on @cpu.
528 *
529 * Unlike tasklet_kill, this function removes the tasklet
530 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
531 *
532 * When this function is called, @cpu must be in the CPU_DEAD state.
533 */
534void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
535{
536 struct tasklet_struct **i;
537
538 BUG_ON(cpu_online(cpu));
539 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
540
541 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
542 return;
543
544 /* CPU is dead, so no lock needed. */
Olof Johansson48f20a92008-03-04 15:23:25 -0800545 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 if (*i == t) {
547 *i = t->next;
Olof Johansson48f20a92008-03-04 15:23:25 -0800548 /* If this was the tail element, move the tail ptr */
549 if (*i == NULL)
550 per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 return;
552 }
553 }
554 BUG();
555}
556
557static void takeover_tasklets(unsigned int cpu)
558{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 /* CPU is dead, so no lock needed. */
560 local_irq_disable();
561
562 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700563 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
564 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
565 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
566 per_cpu(tasklet_vec, cpu).head = NULL;
567 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 raise_softirq_irqoff(TASKLET_SOFTIRQ);
570
Christian Borntraegere5e41722008-05-01 04:34:23 -0700571 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
572 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
573 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
574 per_cpu(tasklet_hi_vec, cpu).head = NULL;
575 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
576 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 raise_softirq_irqoff(HI_SOFTIRQ);
578
579 local_irq_enable();
580}
581#endif /* CONFIG_HOTPLUG_CPU */
582
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700583static int __cpuinit cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 unsigned long action,
585 void *hcpu)
586{
587 int hotcpu = (unsigned long)hcpu;
588 struct task_struct *p;
589
590 switch (action) {
591 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700592 case CPU_UP_PREPARE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
594 if (IS_ERR(p)) {
595 printk("ksoftirqd for %i failed\n", hotcpu);
596 return NOTIFY_BAD;
597 }
598 kthread_bind(p, hotcpu);
599 per_cpu(ksoftirqd, hotcpu) = p;
600 break;
601 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700602 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 wake_up_process(per_cpu(ksoftirqd, hotcpu));
604 break;
605#ifdef CONFIG_HOTPLUG_CPU
606 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700607 case CPU_UP_CANCELED_FROZEN:
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700608 if (!per_cpu(ksoftirqd, hotcpu))
609 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 /* Unbind so it can run. Fall thru. */
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800611 kthread_bind(per_cpu(ksoftirqd, hotcpu),
612 any_online_cpu(cpu_online_map));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 case CPU_DEAD:
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700614 case CPU_DEAD_FROZEN: {
615 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 p = per_cpu(ksoftirqd, hotcpu);
618 per_cpu(ksoftirqd, hotcpu) = NULL;
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700619 sched_setscheduler(p, SCHED_FIFO, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 kthread_stop(p);
621 takeover_tasklets(hotcpu);
622 break;
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624#endif /* CONFIG_HOTPLUG_CPU */
625 }
626 return NOTIFY_OK;
627}
628
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700629static struct notifier_block __cpuinitdata cpu_nfb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 .notifier_call = cpu_callback
631};
632
633__init int spawn_ksoftirqd(void)
634{
635 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -0700636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
637
638 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
640 register_cpu_notifier(&cpu_nfb);
641 return 0;
642}
Andrew Morton78eef012006-03-22 00:08:16 -0800643
644#ifdef CONFIG_SMP
645/*
646 * Call a function on all processors
647 */
648int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
649{
650 int ret = 0;
651
652 preempt_disable();
653 ret = smp_call_function(func, info, retry, wait);
654 local_irq_disable();
655 func(info);
656 local_irq_enable();
657 preempt_enable();
658 return ret;
659}
660EXPORT_SYMBOL(on_each_cpu);
661#endif