blob: 9041ea7948feffbf887ed92fb8ce1e73ad55c81c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01006 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
David S. Miller54514a72008-09-23 22:15:57 -07009 *
10 * Remote softirq infrastructure is by Jens Axboe.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13#include <linux/module.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070021#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/rcupdate.h>
Andrew Morton78eef012006-03-22 00:08:16 -080024#include <linux/smp.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080025#include <linux/tick.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27#include <asm/irq.h>
28/*
29 - No shared variables, all the data are CPU local.
30 - If a softirq needs serialization, let it serialize itself
31 by its own spinlocks.
32 - Even if softirq is serialized, only local cpu is marked for
33 execution. Hence, we get something sort of weak cpu binding.
34 Though it is still not clear, will it result in better locality
35 or will not.
36
37 Examples:
38 - NET RX softirq. It is multithreaded and does not require
39 any global serialization.
40 - NET TX softirq. It kicks software netdevice queues, hence
41 it is logically serialized per device, but this serialization
42 is invisible to common code.
43 - Tasklets: serialized wrt itself.
44 */
45
46#ifndef __ARCH_IRQ_STAT
47irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
48EXPORT_SYMBOL(irq_stat);
49#endif
50
Alexey Dobriyan978b0112008-09-06 20:04:36 +020051static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
54
55/*
56 * we cannot loop indefinitely here to avoid userspace starvation,
57 * but we also don't want to introduce a worst case 1/HZ latency
58 * to the pending events, so lets the scheduler to balance
59 * the softirq load for us.
60 */
61static inline void wakeup_softirqd(void)
62{
63 /* Interrupts are disabled: no need to stop preemption */
64 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
65
66 if (tsk && tsk->state != TASK_RUNNING)
67 wake_up_process(tsk);
68}
69
70/*
Ingo Molnarde30a2b2006-07-03 00:24:42 -070071 * This one is for softirq.c-internal use,
72 * where hardirqs are disabled legitimately:
73 */
Tim Chen3c829c32006-07-30 03:04:02 -070074#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnarde30a2b2006-07-03 00:24:42 -070075static void __local_bh_disable(unsigned long ip)
76{
77 unsigned long flags;
78
79 WARN_ON_ONCE(in_irq());
80
81 raw_local_irq_save(flags);
82 add_preempt_count(SOFTIRQ_OFFSET);
83 /*
84 * Were softirqs turned off above:
85 */
86 if (softirq_count() == SOFTIRQ_OFFSET)
87 trace_softirqs_off(ip);
88 raw_local_irq_restore(flags);
89}
Tim Chen3c829c32006-07-30 03:04:02 -070090#else /* !CONFIG_TRACE_IRQFLAGS */
91static inline void __local_bh_disable(unsigned long ip)
92{
93 add_preempt_count(SOFTIRQ_OFFSET);
94 barrier();
95}
96#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -070097
98void local_bh_disable(void)
99{
100 __local_bh_disable((unsigned long)__builtin_return_address(0));
101}
102
103EXPORT_SYMBOL(local_bh_disable);
104
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700105/*
106 * Special-case - softirqs can safely be enabled in
107 * cond_resched_softirq(), or by __do_softirq(),
108 * without processing still-pending softirqs:
109 */
110void _local_bh_enable(void)
111{
112 WARN_ON_ONCE(in_irq());
113 WARN_ON_ONCE(!irqs_disabled());
114
115 if (softirq_count() == SOFTIRQ_OFFSET)
116 trace_softirqs_on((unsigned long)__builtin_return_address(0));
117 sub_preempt_count(SOFTIRQ_OFFSET);
118}
119
120EXPORT_SYMBOL(_local_bh_enable);
121
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200122static inline void _local_bh_enable_ip(unsigned long ip)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700123{
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200124 WARN_ON_ONCE(in_irq() || irqs_disabled());
Tim Chen3c829c32006-07-30 03:04:02 -0700125#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200126 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700127#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700128 /*
129 * Are softirqs going to be turned on now:
130 */
131 if (softirq_count() == SOFTIRQ_OFFSET)
132 trace_softirqs_on(ip);
133 /*
134 * Keep preemption disabled until we are done with
135 * softirq processing:
136 */
137 sub_preempt_count(SOFTIRQ_OFFSET - 1);
138
139 if (unlikely(!in_interrupt() && local_softirq_pending()))
140 do_softirq();
141
142 dec_preempt_count();
Tim Chen3c829c32006-07-30 03:04:02 -0700143#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200144 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700145#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700146 preempt_check_resched();
147}
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200148
149void local_bh_enable(void)
150{
151 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
152}
153EXPORT_SYMBOL(local_bh_enable);
154
155void local_bh_enable_ip(unsigned long ip)
156{
157 _local_bh_enable_ip(ip);
158}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700159EXPORT_SYMBOL(local_bh_enable_ip);
160
161/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
163 * and we fall back to softirqd after that.
164 *
165 * This number has been established via experimentation.
166 * The two things to balance is latency against fairness -
167 * we want to handle softirqs as soon as possible, but they
168 * should not be able to lock up the box.
169 */
170#define MAX_SOFTIRQ_RESTART 10
171
172asmlinkage void __do_softirq(void)
173{
174 struct softirq_action *h;
175 __u32 pending;
176 int max_restart = MAX_SOFTIRQ_RESTART;
177 int cpu;
178
179 pending = local_softirq_pending();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700180 account_system_vtime(current);
181
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700182 __local_bh_disable((unsigned long)__builtin_return_address(0));
183 trace_softirq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 cpu = smp_processor_id();
186restart:
187 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200188 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700190 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192 h = softirq_vec;
193
194 do {
195 if (pending & 1) {
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200196 int prev_count = preempt_count();
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 h->action(h);
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200199
200 if (unlikely(prev_count != preempt_count())) {
Linus Torvalds1c95e1b2008-10-16 15:32:46 -0700201 printk(KERN_ERR "huh, entered softirq %td %p"
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200202 "with preempt_count %08x,"
203 " exited with %08x?\n", h - softirq_vec,
204 h->action, prev_count, preempt_count());
205 preempt_count() = prev_count;
206 }
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 rcu_bh_qsctr_inc(cpu);
209 }
210 h++;
211 pending >>= 1;
212 } while (pending);
213
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700214 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216 pending = local_softirq_pending();
217 if (pending && --max_restart)
218 goto restart;
219
220 if (pending)
221 wakeup_softirqd();
222
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700223 trace_softirq_exit();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700224
225 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700226 _local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
228
229#ifndef __ARCH_HAS_DO_SOFTIRQ
230
231asmlinkage void do_softirq(void)
232{
233 __u32 pending;
234 unsigned long flags;
235
236 if (in_interrupt())
237 return;
238
239 local_irq_save(flags);
240
241 pending = local_softirq_pending();
242
243 if (pending)
244 __do_softirq();
245
246 local_irq_restore(flags);
247}
248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249#endif
250
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800251/*
252 * Enter an interrupt context.
253 */
254void irq_enter(void)
255{
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100256 int cpu = smp_processor_id();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200257
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100258 rcu_irq_enter();
Thomas Gleixneree5f80a2008-11-07 11:06:00 +0100259 if (idle_cpu(cpu) && !in_interrupt()) {
260 __irq_enter();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200261 tick_check_idle(cpu);
Thomas Gleixneree5f80a2008-11-07 11:06:00 +0100262 } else
263 __irq_enter();
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800264}
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
267# define invoke_softirq() __do_softirq()
268#else
269# define invoke_softirq() do_softirq()
270#endif
271
272/*
273 * Exit an interrupt context. Process softirqs if needed and possible:
274 */
275void irq_exit(void)
276{
277 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700278 trace_hardirq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 sub_preempt_count(IRQ_EXIT_OFFSET);
280 if (!in_interrupt() && local_softirq_pending())
281 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800282
283#ifdef CONFIG_NO_HZ
284 /* Make sure that timer wheel updates are propagated */
Steven Rostedt2232c2d2008-02-29 18:46:50 +0100285 rcu_irq_exit();
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100286 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
287 tick_nohz_stop_sched_tick(0);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800288#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 preempt_enable_no_resched();
290}
291
292/*
293 * This function must run with irqs disabled!
294 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800295inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296{
297 __raise_softirq_irqoff(nr);
298
299 /*
300 * If we're in an interrupt or softirq, we're done
301 * (this also catches softirq-disabled code). We will
302 * actually run the softirq once we return from
303 * the irq or softirq.
304 *
305 * Otherwise we wake up ksoftirqd to make sure we
306 * schedule the softirq soon.
307 */
308 if (!in_interrupt())
309 wakeup_softirqd();
310}
311
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800312void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313{
314 unsigned long flags;
315
316 local_irq_save(flags);
317 raise_softirq_irqoff(nr);
318 local_irq_restore(flags);
319}
320
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300321void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 softirq_vec[nr].action = action;
324}
325
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326/* Tasklets */
327struct tasklet_head
328{
Olof Johansson48f20a92008-03-04 15:23:25 -0800329 struct tasklet_struct *head;
330 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331};
332
Vegard Nossum4620b492008-06-12 23:21:53 +0200333static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
334static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800336void __tasklet_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
338 unsigned long flags;
339
340 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800341 t->next = NULL;
342 *__get_cpu_var(tasklet_vec).tail = t;
343 __get_cpu_var(tasklet_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 raise_softirq_irqoff(TASKLET_SOFTIRQ);
345 local_irq_restore(flags);
346}
347
348EXPORT_SYMBOL(__tasklet_schedule);
349
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800350void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
352 unsigned long flags;
353
354 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800355 t->next = NULL;
356 *__get_cpu_var(tasklet_hi_vec).tail = t;
357 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 raise_softirq_irqoff(HI_SOFTIRQ);
359 local_irq_restore(flags);
360}
361
362EXPORT_SYMBOL(__tasklet_hi_schedule);
363
364static void tasklet_action(struct softirq_action *a)
365{
366 struct tasklet_struct *list;
367
368 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800369 list = __get_cpu_var(tasklet_vec).head;
370 __get_cpu_var(tasklet_vec).head = NULL;
371 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 local_irq_enable();
373
374 while (list) {
375 struct tasklet_struct *t = list;
376
377 list = list->next;
378
379 if (tasklet_trylock(t)) {
380 if (!atomic_read(&t->count)) {
381 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
382 BUG();
383 t->func(t->data);
384 tasklet_unlock(t);
385 continue;
386 }
387 tasklet_unlock(t);
388 }
389
390 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800391 t->next = NULL;
392 *__get_cpu_var(tasklet_vec).tail = t;
393 __get_cpu_var(tasklet_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
395 local_irq_enable();
396 }
397}
398
399static void tasklet_hi_action(struct softirq_action *a)
400{
401 struct tasklet_struct *list;
402
403 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800404 list = __get_cpu_var(tasklet_hi_vec).head;
405 __get_cpu_var(tasklet_hi_vec).head = NULL;
406 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 local_irq_enable();
408
409 while (list) {
410 struct tasklet_struct *t = list;
411
412 list = list->next;
413
414 if (tasklet_trylock(t)) {
415 if (!atomic_read(&t->count)) {
416 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
417 BUG();
418 t->func(t->data);
419 tasklet_unlock(t);
420 continue;
421 }
422 tasklet_unlock(t);
423 }
424
425 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800426 t->next = NULL;
427 *__get_cpu_var(tasklet_hi_vec).tail = t;
428 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 __raise_softirq_irqoff(HI_SOFTIRQ);
430 local_irq_enable();
431 }
432}
433
434
435void tasklet_init(struct tasklet_struct *t,
436 void (*func)(unsigned long), unsigned long data)
437{
438 t->next = NULL;
439 t->state = 0;
440 atomic_set(&t->count, 0);
441 t->func = func;
442 t->data = data;
443}
444
445EXPORT_SYMBOL(tasklet_init);
446
447void tasklet_kill(struct tasklet_struct *t)
448{
449 if (in_interrupt())
450 printk("Attempt to kill tasklet from interrupt\n");
451
452 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
453 do
454 yield();
455 while (test_bit(TASKLET_STATE_SCHED, &t->state));
456 }
457 tasklet_unlock_wait(t);
458 clear_bit(TASKLET_STATE_SCHED, &t->state);
459}
460
461EXPORT_SYMBOL(tasklet_kill);
462
David S. Miller54514a72008-09-23 22:15:57 -0700463DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
464EXPORT_PER_CPU_SYMBOL(softirq_work_list);
465
466static void __local_trigger(struct call_single_data *cp, int softirq)
467{
468 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
469
470 list_add_tail(&cp->list, head);
471
472 /* Trigger the softirq only if the list was previously empty. */
473 if (head->next == &cp->list)
474 raise_softirq_irqoff(softirq);
475}
476
477#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
478static void remote_softirq_receive(void *data)
479{
480 struct call_single_data *cp = data;
481 unsigned long flags;
482 int softirq;
483
484 softirq = cp->priv;
485
486 local_irq_save(flags);
487 __local_trigger(cp, softirq);
488 local_irq_restore(flags);
489}
490
491static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
492{
493 if (cpu_online(cpu)) {
494 cp->func = remote_softirq_receive;
495 cp->info = cp;
496 cp->flags = 0;
497 cp->priv = softirq;
498
499 __smp_call_function_single(cpu, cp);
500 return 0;
501 }
502 return 1;
503}
504#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
505static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
506{
507 return 1;
508}
509#endif
510
511/**
512 * __send_remote_softirq - try to schedule softirq work on a remote cpu
513 * @cp: private SMP call function data area
514 * @cpu: the remote cpu
515 * @this_cpu: the currently executing cpu
516 * @softirq: the softirq for the work
517 *
518 * Attempt to schedule softirq work on a remote cpu. If this cannot be
519 * done, the work is instead queued up on the local cpu.
520 *
521 * Interrupts must be disabled.
522 */
523void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
524{
525 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
526 __local_trigger(cp, softirq);
527}
528EXPORT_SYMBOL(__send_remote_softirq);
529
530/**
531 * send_remote_softirq - try to schedule softirq work on a remote cpu
532 * @cp: private SMP call function data area
533 * @cpu: the remote cpu
534 * @softirq: the softirq for the work
535 *
536 * Like __send_remote_softirq except that disabling interrupts and
537 * computing the current cpu is done for the caller.
538 */
539void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
540{
541 unsigned long flags;
542 int this_cpu;
543
544 local_irq_save(flags);
545 this_cpu = smp_processor_id();
546 __send_remote_softirq(cp, cpu, this_cpu, softirq);
547 local_irq_restore(flags);
548}
549EXPORT_SYMBOL(send_remote_softirq);
550
551static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
552 unsigned long action, void *hcpu)
553{
554 /*
555 * If a CPU goes away, splice its entries to the current CPU
556 * and trigger a run of the softirq
557 */
558 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
559 int cpu = (unsigned long) hcpu;
560 int i;
561
562 local_irq_disable();
563 for (i = 0; i < NR_SOFTIRQS; i++) {
564 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
565 struct list_head *local_head;
566
567 if (list_empty(head))
568 continue;
569
570 local_head = &__get_cpu_var(softirq_work_list[i]);
571 list_splice_init(head, local_head);
572 raise_softirq_irqoff(i);
573 }
574 local_irq_enable();
575 }
576
577 return NOTIFY_OK;
578}
579
580static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
581 .notifier_call = remote_softirq_cpu_notify,
582};
583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584void __init softirq_init(void)
585{
Olof Johansson48f20a92008-03-04 15:23:25 -0800586 int cpu;
587
588 for_each_possible_cpu(cpu) {
David S. Miller54514a72008-09-23 22:15:57 -0700589 int i;
590
Olof Johansson48f20a92008-03-04 15:23:25 -0800591 per_cpu(tasklet_vec, cpu).tail =
592 &per_cpu(tasklet_vec, cpu).head;
593 per_cpu(tasklet_hi_vec, cpu).tail =
594 &per_cpu(tasklet_hi_vec, cpu).head;
David S. Miller54514a72008-09-23 22:15:57 -0700595 for (i = 0; i < NR_SOFTIRQS; i++)
596 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
Olof Johansson48f20a92008-03-04 15:23:25 -0800597 }
598
David S. Miller54514a72008-09-23 22:15:57 -0700599 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
600
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300601 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
602 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603}
604
605static int ksoftirqd(void * __bind_cpu)
606{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 set_current_state(TASK_INTERRUPTIBLE);
608
609 while (!kthread_should_stop()) {
610 preempt_disable();
611 if (!local_softirq_pending()) {
612 preempt_enable_no_resched();
613 schedule();
614 preempt_disable();
615 }
616
617 __set_current_state(TASK_RUNNING);
618
619 while (local_softirq_pending()) {
620 /* Preempt disable stops cpu going offline.
621 If already offline, we'll be on wrong CPU:
622 don't process */
623 if (cpu_is_offline((long)__bind_cpu))
624 goto wait_to_die;
625 do_softirq();
626 preempt_enable_no_resched();
627 cond_resched();
628 preempt_disable();
Eric Dumazet64ca5ab2009-03-04 12:11:56 -0800629 rcu_qsctr_inc((long)__bind_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 }
631 preempt_enable();
632 set_current_state(TASK_INTERRUPTIBLE);
633 }
634 __set_current_state(TASK_RUNNING);
635 return 0;
636
637wait_to_die:
638 preempt_enable();
639 /* Wait for kthread_stop */
640 set_current_state(TASK_INTERRUPTIBLE);
641 while (!kthread_should_stop()) {
642 schedule();
643 set_current_state(TASK_INTERRUPTIBLE);
644 }
645 __set_current_state(TASK_RUNNING);
646 return 0;
647}
648
649#ifdef CONFIG_HOTPLUG_CPU
650/*
651 * tasklet_kill_immediate is called to remove a tasklet which can already be
652 * scheduled for execution on @cpu.
653 *
654 * Unlike tasklet_kill, this function removes the tasklet
655 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
656 *
657 * When this function is called, @cpu must be in the CPU_DEAD state.
658 */
659void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
660{
661 struct tasklet_struct **i;
662
663 BUG_ON(cpu_online(cpu));
664 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
665
666 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
667 return;
668
669 /* CPU is dead, so no lock needed. */
Olof Johansson48f20a92008-03-04 15:23:25 -0800670 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if (*i == t) {
672 *i = t->next;
Olof Johansson48f20a92008-03-04 15:23:25 -0800673 /* If this was the tail element, move the tail ptr */
674 if (*i == NULL)
675 per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return;
677 }
678 }
679 BUG();
680}
681
682static void takeover_tasklets(unsigned int cpu)
683{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 /* CPU is dead, so no lock needed. */
685 local_irq_disable();
686
687 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700688 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
689 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
690 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
691 per_cpu(tasklet_vec, cpu).head = NULL;
692 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
693 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 raise_softirq_irqoff(TASKLET_SOFTIRQ);
695
Christian Borntraegere5e41722008-05-01 04:34:23 -0700696 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
697 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
698 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
699 per_cpu(tasklet_hi_vec, cpu).head = NULL;
700 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
701 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 raise_softirq_irqoff(HI_SOFTIRQ);
703
704 local_irq_enable();
705}
706#endif /* CONFIG_HOTPLUG_CPU */
707
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700708static int __cpuinit cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 unsigned long action,
710 void *hcpu)
711{
712 int hotcpu = (unsigned long)hcpu;
713 struct task_struct *p;
714
715 switch (action) {
716 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700717 case CPU_UP_PREPARE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
719 if (IS_ERR(p)) {
720 printk("ksoftirqd for %i failed\n", hotcpu);
721 return NOTIFY_BAD;
722 }
723 kthread_bind(p, hotcpu);
724 per_cpu(ksoftirqd, hotcpu) = p;
725 break;
726 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700727 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 wake_up_process(per_cpu(ksoftirqd, hotcpu));
729 break;
730#ifdef CONFIG_HOTPLUG_CPU
731 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700732 case CPU_UP_CANCELED_FROZEN:
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700733 if (!per_cpu(ksoftirqd, hotcpu))
734 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 /* Unbind so it can run. Fall thru. */
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800736 kthread_bind(per_cpu(ksoftirqd, hotcpu),
Rusty Russellf1fc0572009-01-01 10:12:23 +1030737 cpumask_any(cpu_online_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 case CPU_DEAD:
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700739 case CPU_DEAD_FROZEN: {
740 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
741
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 p = per_cpu(ksoftirqd, hotcpu);
743 per_cpu(ksoftirqd, hotcpu) = NULL;
Rusty Russell961ccdd2008-06-23 13:55:38 +1000744 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 kthread_stop(p);
746 takeover_tasklets(hotcpu);
747 break;
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700748 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749#endif /* CONFIG_HOTPLUG_CPU */
750 }
751 return NOTIFY_OK;
752}
753
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700754static struct notifier_block __cpuinitdata cpu_nfb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 .notifier_call = cpu_callback
756};
757
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700758static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
760 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -0700761 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
762
763 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
765 register_cpu_notifier(&cpu_nfb);
766 return 0;
767}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700768early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800769
770#ifdef CONFIG_SMP
771/*
772 * Call a function on all processors
773 */
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200774int on_each_cpu(void (*func) (void *info), void *info, int wait)
Andrew Morton78eef012006-03-22 00:08:16 -0800775{
776 int ret = 0;
777
778 preempt_disable();
Jens Axboe8691e5a2008-06-06 11:18:06 +0200779 ret = smp_call_function(func, info, wait);
Andrew Morton78eef012006-03-22 00:08:16 -0800780 local_irq_disable();
781 func(info);
782 local_irq_enable();
783 preempt_enable();
784 return ret;
785}
786EXPORT_SYMBOL(on_each_cpu);
787#endif
Yinghai Lu43a25632008-12-28 16:01:13 -0800788
789/*
790 * [ These __weak aliases are kept in a separate compilation unit, so that
791 * GCC does not inline them incorrectly. ]
792 */
793
794int __init __weak early_irq_init(void)
795{
796 return 0;
797}
798
799int __init __weak arch_early_irq_init(void)
800{
801 return 0;
802}
803
804int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
805{
806 return 0;
807}