blob: dacd0ab51df4b782815dd61326f4e8512e25ddf2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01006 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
David S. Miller54514a72008-09-23 22:15:57 -07009 *
10 * Remote softirq infrastructure is by Jens Axboe.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
Paul Gortmaker9984de12011-05-23 14:51:41 -040013#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070021#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050024#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080025#include <linux/smp.h>
Thomas Gleixner3e339b52012-07-16 10:42:37 +000026#include <linux/smpboot.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080027#include <linux/tick.h>
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020028
29#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040030#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Linus Torvalds1da177e2005-04-16 15:20:36 -070032/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
51irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52EXPORT_SYMBOL(irq_stat);
53#endif
54
Alexey Dobriyan978b0112008-09-06 20:04:36 +020055static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -080057DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Jason Baron5d592b42009-03-12 14:33:36 -040059char *softirq_to_name[NR_SOFTIRQS] = {
Li Zefan5dd4de52009-09-17 17:38:32 +080060 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
Shaohua Li09223372011-06-14 13:26:25 +080061 "TASKLET", "SCHED", "HRTIMER", "RCU"
Jason Baron5d592b42009-03-12 14:33:36 -040062};
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
Thomas Gleixner676cb022009-07-20 23:33:49 +020070static void wakeup_softirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 /* Interrupts are disabled: no need to stop preemption */
Christoph Lameter909ea962010-12-08 16:22:55 +010073 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
79/*
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -070080 * preempt_count and SOFTIRQ_OFFSET usage:
81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
82 * softirq processing.
83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
84 * on local_bh_disable or local_bh_enable.
85 * This lets us distinguish between whether we are currently processing
86 * softirq and whether we just have bh disabled.
87 */
88
89/*
Ingo Molnarde30a2b2006-07-03 00:24:42 -070090 * This one is for softirq.c-internal use,
91 * where hardirqs are disabled legitimately:
92 */
Tim Chen3c829c32006-07-30 03:04:02 -070093#ifdef CONFIG_TRACE_IRQFLAGS
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -070094static void __local_bh_disable(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -070095{
96 unsigned long flags;
97
98 WARN_ON_ONCE(in_irq());
99
100 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500101 /*
102 * The preempt tracer hooks into add_preempt_count and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later.
107 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700108 preempt_count() += cnt;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700109 /*
110 * Were softirqs turned off above:
111 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700112 if (softirq_count() == cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700113 trace_softirqs_off(ip);
114 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500115
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700116 if (preempt_count() == cnt)
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500117 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700118}
Tim Chen3c829c32006-07-30 03:04:02 -0700119#else /* !CONFIG_TRACE_IRQFLAGS */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700120static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
Tim Chen3c829c32006-07-30 03:04:02 -0700121{
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700122 add_preempt_count(cnt);
Tim Chen3c829c32006-07-30 03:04:02 -0700123 barrier();
124}
125#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700126
127void local_bh_disable(void)
128{
Davidlohr Buesod2e08472013-04-30 11:46:09 -0700129 __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700130}
131
132EXPORT_SYMBOL(local_bh_disable);
133
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700134static void __local_bh_enable(unsigned int cnt)
135{
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700136 WARN_ON_ONCE(!irqs_disabled());
137
138 if (softirq_count() == cnt)
Davidlohr Buesod2e08472013-04-30 11:46:09 -0700139 trace_softirqs_on(_RET_IP_);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700140 sub_preempt_count(cnt);
141}
142
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700143/*
144 * Special-case - softirqs can safely be enabled in
145 * cond_resched_softirq(), or by __do_softirq(),
146 * without processing still-pending softirqs:
147 */
148void _local_bh_enable(void)
149{
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200150 WARN_ON_ONCE(in_irq());
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700151 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700152}
153
154EXPORT_SYMBOL(_local_bh_enable);
155
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200156static inline void _local_bh_enable_ip(unsigned long ip)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700157{
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200158 WARN_ON_ONCE(in_irq() || irqs_disabled());
Tim Chen3c829c32006-07-30 03:04:02 -0700159#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200160 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700161#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700162 /*
163 * Are softirqs going to be turned on now:
164 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700165 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700166 trace_softirqs_on(ip);
167 /*
168 * Keep preemption disabled until we are done with
169 * softirq processing:
170 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700171 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700172
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200173 if (unlikely(!in_interrupt() && local_softirq_pending())) {
174 /*
175 * Run softirq if any pending. And do it in its own stack
176 * as we may be calling this deep in a task call stack already.
177 */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700178 do_softirq();
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200179 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700180
181 dec_preempt_count();
Tim Chen3c829c32006-07-30 03:04:02 -0700182#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200183 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700184#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700185 preempt_check_resched();
186}
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200187
188void local_bh_enable(void)
189{
Davidlohr Buesod2e08472013-04-30 11:46:09 -0700190 _local_bh_enable_ip(_RET_IP_);
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200191}
192EXPORT_SYMBOL(local_bh_enable);
193
194void local_bh_enable_ip(unsigned long ip)
195{
196 _local_bh_enable_ip(ip);
197}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700198EXPORT_SYMBOL(local_bh_enable_ip);
199
200/*
Ben Greear34376a52013-06-06 14:29:49 -0700201 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
202 * but break the loop if need_resched() is set or after 2 ms.
203 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
204 * certain cases, such as stop_machine(), jiffies may cease to
205 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
206 * well to make sure we eventually return from this method.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 *
Eric Dumazetc10d7362013-01-10 15:26:34 -0800208 * These limits have been established via experimentation.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 * The two things to balance is latency against fairness -
210 * we want to handle softirqs as soon as possible, but they
211 * should not be able to lock up the box.
212 */
Eric Dumazetc10d7362013-01-10 15:26:34 -0800213#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
Ben Greear34376a52013-06-06 14:29:49 -0700214#define MAX_SOFTIRQ_RESTART 10
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216asmlinkage void __do_softirq(void)
217{
218 struct softirq_action *h;
219 __u32 pending;
Eric Dumazetc10d7362013-01-10 15:26:34 -0800220 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 int cpu;
Mel Gorman907aed42012-07-31 16:44:07 -0700222 unsigned long old_flags = current->flags;
Ben Greear34376a52013-06-06 14:29:49 -0700223 int max_restart = MAX_SOFTIRQ_RESTART;
Mel Gorman907aed42012-07-31 16:44:07 -0700224
225 /*
226 * Mask out PF_MEMALLOC s current task context is borrowed for the
227 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
228 * again if the socket is related to swap
229 */
230 current->flags &= ~PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232 pending = local_softirq_pending();
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100233 account_irq_enter_time(current);
Paul Mackerras829035fd2006-07-03 00:25:40 -0700234
Davidlohr Buesod2e08472013-04-30 11:46:09 -0700235 __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
Ingo Molnard820ac42009-03-13 01:30:40 +0100236 lockdep_softirq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 cpu = smp_processor_id();
239restart:
240 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200241 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700243 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245 h = softirq_vec;
246
247 do {
248 if (pending & 1) {
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200249 unsigned int vec_nr = h - softirq_vec;
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200250 int prev_count = preempt_count();
251
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200252 kstat_incr_softirqs_this_cpu(vec_nr);
253
254 trace_softirq_entry(vec_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 h->action(h);
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200256 trace_softirq_exit(vec_nr);
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200257 if (unlikely(prev_count != preempt_count())) {
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200258 printk(KERN_ERR "huh, entered softirq %u %s %p"
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200259 "with preempt_count %08x,"
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200260 " exited with %08x?\n", vec_nr,
261 softirq_to_name[vec_nr], h->action,
262 prev_count, preempt_count());
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200263 preempt_count() = prev_count;
264 }
265
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700266 rcu_bh_qs(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 }
268 h++;
269 pending >>= 1;
270 } while (pending);
271
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700272 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274 pending = local_softirq_pending();
Eric Dumazetc10d7362013-01-10 15:26:34 -0800275 if (pending) {
Ben Greear34376a52013-06-06 14:29:49 -0700276 if (time_before(jiffies, end) && !need_resched() &&
277 --max_restart)
Eric Dumazetc10d7362013-01-10 15:26:34 -0800278 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 wakeup_softirqd();
Eric Dumazetc10d7362013-01-10 15:26:34 -0800281 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Ingo Molnard820ac42009-03-13 01:30:40 +0100283 lockdep_softirq_exit();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700284
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100285 account_irq_exit_time(current);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700286 __local_bh_enable(SOFTIRQ_OFFSET);
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200287 WARN_ON_ONCE(in_interrupt());
Mel Gorman907aed42012-07-31 16:44:07 -0700288 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
290
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293asmlinkage void do_softirq(void)
294{
295 __u32 pending;
296 unsigned long flags;
297
298 if (in_interrupt())
299 return;
300
301 local_irq_save(flags);
302
303 pending = local_softirq_pending();
304
305 if (pending)
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200306 do_softirq_own_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
308 local_irq_restore(flags);
309}
310
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800311/*
312 * Enter an interrupt context.
313 */
314void irq_enter(void)
315{
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100316 int cpu = smp_processor_id();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200317
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100318 rcu_irq_enter();
Frederic Weisbecker0a8a2e72012-01-24 18:59:44 +0100319 if (is_idle_task(current) && !in_interrupt()) {
Venkatesh Pallipadid267f872010-10-04 17:03:23 -0700320 /*
321 * Prevent raise_softirq from needlessly waking up ksoftirqd
322 * here, as softirq will be serviced on return from interrupt.
323 */
324 local_bh_disable();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200325 tick_check_idle(cpu);
Venkatesh Pallipadid267f872010-10-04 17:03:23 -0700326 _local_bh_enable();
327 }
328
329 __irq_enter();
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800330}
331
Heiko Carstensb2a00172012-03-05 15:07:25 -0800332static inline void invoke_softirq(void)
333{
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200334 if (!force_irqthreads) {
Frederic Weisbeckercc1f0272013-09-24 17:17:47 +0200335#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200336 /*
337 * We can safely execute softirq on the current stack if
338 * it is the irq stack, because it should be near empty
Frederic Weisbeckercc1f0272013-09-24 17:17:47 +0200339 * at this stage.
340 */
341 __do_softirq();
342#else
343 /*
344 * Otherwise, irq_exit() is called on the task stack that can
345 * be potentially deep already. So call softirq in its own stack
346 * to prevent from any overrun.
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200347 */
Frederic Weisbeckerbe6e1012013-09-24 16:39:41 +0200348 do_softirq_own_stack();
Frederic Weisbeckercc1f0272013-09-24 17:17:47 +0200349#endif
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200350 } else {
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000351 wakeup_softirqd();
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200352 }
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000353}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200355static inline void tick_irq_exit(void)
356{
357#ifdef CONFIG_NO_HZ_COMMON
358 int cpu = smp_processor_id();
359
360 /* Make sure that timer wheel updates are propagated */
361 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
362 if (!in_interrupt())
363 tick_nohz_irq_exit();
364 }
365#endif
366}
367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368/*
369 * Exit an interrupt context. Process softirqs if needed and possible:
370 */
371void irq_exit(void)
372{
Thomas Gleixner74eed012013-02-20 22:00:48 +0100373#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
Frederic Weisbecker4cd5d112013-02-28 20:00:43 +0100374 local_irq_disable();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100375#else
376 WARN_ON_ONCE(!irqs_disabled());
377#endif
378
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100379 account_irq_exit_time(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700380 trace_hardirq_exit();
Frederic Weisbecker4d4c4e22013-02-22 00:05:07 +0100381 sub_preempt_count(HARDIRQ_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 if (!in_interrupt() && local_softirq_pending())
383 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800384
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200385 tick_irq_exit();
Frederic Weisbecker416eb332011-10-07 16:31:02 -0700386 rcu_irq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387}
388
389/*
390 * This function must run with irqs disabled!
391 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800392inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
394 __raise_softirq_irqoff(nr);
395
396 /*
397 * If we're in an interrupt or softirq, we're done
398 * (this also catches softirq-disabled code). We will
399 * actually run the softirq once we return from
400 * the irq or softirq.
401 *
402 * Otherwise we wake up ksoftirqd to make sure we
403 * schedule the softirq soon.
404 */
405 if (!in_interrupt())
406 wakeup_softirqd();
407}
408
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800409void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410{
411 unsigned long flags;
412
413 local_irq_save(flags);
414 raise_softirq_irqoff(nr);
415 local_irq_restore(flags);
416}
417
Steven Rostedtf0696862012-01-25 20:18:55 -0500418void __raise_softirq_irqoff(unsigned int nr)
419{
420 trace_softirq_raise(nr);
421 or_softirq_pending(1UL << nr);
422}
423
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300424void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 softirq_vec[nr].action = action;
427}
428
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200429/*
430 * Tasklets
431 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432struct tasklet_head
433{
Olof Johansson48f20a92008-03-04 15:23:25 -0800434 struct tasklet_struct *head;
435 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436};
437
Vegard Nossum4620b492008-06-12 23:21:53 +0200438static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
439static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800441void __tasklet_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 unsigned long flags;
444
445 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800446 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100447 *__this_cpu_read(tasklet_vec.tail) = t;
448 __this_cpu_write(tasklet_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 raise_softirq_irqoff(TASKLET_SOFTIRQ);
450 local_irq_restore(flags);
451}
452
453EXPORT_SYMBOL(__tasklet_schedule);
454
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800455void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456{
457 unsigned long flags;
458
459 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800460 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100461 *__this_cpu_read(tasklet_hi_vec.tail) = t;
462 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 raise_softirq_irqoff(HI_SOFTIRQ);
464 local_irq_restore(flags);
465}
466
467EXPORT_SYMBOL(__tasklet_hi_schedule);
468
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200469void __tasklet_hi_schedule_first(struct tasklet_struct *t)
470{
471 BUG_ON(!irqs_disabled());
472
Christoph Lameter909ea962010-12-08 16:22:55 +0100473 t->next = __this_cpu_read(tasklet_hi_vec.head);
474 __this_cpu_write(tasklet_hi_vec.head, t);
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200475 __raise_softirq_irqoff(HI_SOFTIRQ);
476}
477
478EXPORT_SYMBOL(__tasklet_hi_schedule_first);
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480static void tasklet_action(struct softirq_action *a)
481{
482 struct tasklet_struct *list;
483
484 local_irq_disable();
Christoph Lameter909ea962010-12-08 16:22:55 +0100485 list = __this_cpu_read(tasklet_vec.head);
486 __this_cpu_write(tasklet_vec.head, NULL);
487 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 local_irq_enable();
489
490 while (list) {
491 struct tasklet_struct *t = list;
492
493 list = list->next;
494
495 if (tasklet_trylock(t)) {
496 if (!atomic_read(&t->count)) {
497 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
498 BUG();
499 t->func(t->data);
500 tasklet_unlock(t);
501 continue;
502 }
503 tasklet_unlock(t);
504 }
505
506 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800507 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100508 *__this_cpu_read(tasklet_vec.tail) = t;
509 __this_cpu_write(tasklet_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
511 local_irq_enable();
512 }
513}
514
515static void tasklet_hi_action(struct softirq_action *a)
516{
517 struct tasklet_struct *list;
518
519 local_irq_disable();
Christoph Lameter909ea962010-12-08 16:22:55 +0100520 list = __this_cpu_read(tasklet_hi_vec.head);
521 __this_cpu_write(tasklet_hi_vec.head, NULL);
522 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 local_irq_enable();
524
525 while (list) {
526 struct tasklet_struct *t = list;
527
528 list = list->next;
529
530 if (tasklet_trylock(t)) {
531 if (!atomic_read(&t->count)) {
532 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
533 BUG();
534 t->func(t->data);
535 tasklet_unlock(t);
536 continue;
537 }
538 tasklet_unlock(t);
539 }
540
541 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800542 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100543 *__this_cpu_read(tasklet_hi_vec.tail) = t;
544 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 __raise_softirq_irqoff(HI_SOFTIRQ);
546 local_irq_enable();
547 }
548}
549
550
551void tasklet_init(struct tasklet_struct *t,
552 void (*func)(unsigned long), unsigned long data)
553{
554 t->next = NULL;
555 t->state = 0;
556 atomic_set(&t->count, 0);
557 t->func = func;
558 t->data = data;
559}
560
561EXPORT_SYMBOL(tasklet_init);
562
563void tasklet_kill(struct tasklet_struct *t)
564{
565 if (in_interrupt())
566 printk("Attempt to kill tasklet from interrupt\n");
567
568 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400569 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 yield();
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400571 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 }
573 tasklet_unlock_wait(t);
574 clear_bit(TASKLET_STATE_SCHED, &t->state);
575}
576
577EXPORT_SYMBOL(tasklet_kill);
578
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200579/*
580 * tasklet_hrtimer
581 */
582
583/*
Peter Zijlstrab9c30322010-02-03 18:08:52 +0100584 * The trampoline is called when the hrtimer expires. It schedules a tasklet
585 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
586 * hrtimer callback, but from softirq context.
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200587 */
588static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
589{
590 struct tasklet_hrtimer *ttimer =
591 container_of(timer, struct tasklet_hrtimer, timer);
592
Peter Zijlstrab9c30322010-02-03 18:08:52 +0100593 tasklet_hi_schedule(&ttimer->tasklet);
594 return HRTIMER_NORESTART;
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200595}
596
597/*
598 * Helper function which calls the hrtimer callback from
599 * tasklet/softirq context
600 */
601static void __tasklet_hrtimer_trampoline(unsigned long data)
602{
603 struct tasklet_hrtimer *ttimer = (void *)data;
604 enum hrtimer_restart restart;
605
606 restart = ttimer->function(&ttimer->timer);
607 if (restart != HRTIMER_NORESTART)
608 hrtimer_restart(&ttimer->timer);
609}
610
611/**
612 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
613 * @ttimer: tasklet_hrtimer which is initialized
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300614 * @function: hrtimer callback function which gets called from softirq context
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200615 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
616 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
617 */
618void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
619 enum hrtimer_restart (*function)(struct hrtimer *),
620 clockid_t which_clock, enum hrtimer_mode mode)
621{
622 hrtimer_init(&ttimer->timer, which_clock, mode);
623 ttimer->timer.function = __hrtimer_tasklet_trampoline;
624 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
625 (unsigned long)ttimer);
626 ttimer->function = function;
627}
628EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
629
630/*
631 * Remote softirq bits
632 */
633
David S. Miller54514a72008-09-23 22:15:57 -0700634DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
635EXPORT_PER_CPU_SYMBOL(softirq_work_list);
636
637static void __local_trigger(struct call_single_data *cp, int softirq)
638{
639 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
640
641 list_add_tail(&cp->list, head);
642
643 /* Trigger the softirq only if the list was previously empty. */
644 if (head->next == &cp->list)
645 raise_softirq_irqoff(softirq);
646}
647
648#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
649static void remote_softirq_receive(void *data)
650{
651 struct call_single_data *cp = data;
652 unsigned long flags;
653 int softirq;
654
liguang3440a1c2013-04-30 15:27:26 -0700655 softirq = *(int *)cp->info;
David S. Miller54514a72008-09-23 22:15:57 -0700656 local_irq_save(flags);
657 __local_trigger(cp, softirq);
658 local_irq_restore(flags);
659}
660
661static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
662{
663 if (cpu_online(cpu)) {
664 cp->func = remote_softirq_receive;
liguang3440a1c2013-04-30 15:27:26 -0700665 cp->info = &softirq;
David S. Miller54514a72008-09-23 22:15:57 -0700666 cp->flags = 0;
David S. Miller54514a72008-09-23 22:15:57 -0700667
Peter Zijlstra6e275632009-02-25 13:59:48 +0100668 __smp_call_function_single(cpu, cp, 0);
David S. Miller54514a72008-09-23 22:15:57 -0700669 return 0;
670 }
671 return 1;
672}
673#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
674static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
675{
676 return 1;
677}
678#endif
679
680/**
681 * __send_remote_softirq - try to schedule softirq work on a remote cpu
682 * @cp: private SMP call function data area
683 * @cpu: the remote cpu
684 * @this_cpu: the currently executing cpu
685 * @softirq: the softirq for the work
686 *
687 * Attempt to schedule softirq work on a remote cpu. If this cannot be
688 * done, the work is instead queued up on the local cpu.
689 *
690 * Interrupts must be disabled.
691 */
692void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
693{
694 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
695 __local_trigger(cp, softirq);
696}
697EXPORT_SYMBOL(__send_remote_softirq);
698
699/**
700 * send_remote_softirq - try to schedule softirq work on a remote cpu
701 * @cp: private SMP call function data area
702 * @cpu: the remote cpu
703 * @softirq: the softirq for the work
704 *
705 * Like __send_remote_softirq except that disabling interrupts and
706 * computing the current cpu is done for the caller.
707 */
708void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
709{
710 unsigned long flags;
711 int this_cpu;
712
713 local_irq_save(flags);
714 this_cpu = smp_processor_id();
715 __send_remote_softirq(cp, cpu, this_cpu, softirq);
716 local_irq_restore(flags);
717}
718EXPORT_SYMBOL(send_remote_softirq);
719
Paul Gortmaker0db06282013-06-19 14:53:51 -0400720static int remote_softirq_cpu_notify(struct notifier_block *self,
David S. Miller54514a72008-09-23 22:15:57 -0700721 unsigned long action, void *hcpu)
722{
723 /*
724 * If a CPU goes away, splice its entries to the current CPU
725 * and trigger a run of the softirq
726 */
727 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
728 int cpu = (unsigned long) hcpu;
729 int i;
730
731 local_irq_disable();
732 for (i = 0; i < NR_SOFTIRQS; i++) {
733 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
734 struct list_head *local_head;
735
736 if (list_empty(head))
737 continue;
738
739 local_head = &__get_cpu_var(softirq_work_list[i]);
740 list_splice_init(head, local_head);
741 raise_softirq_irqoff(i);
742 }
743 local_irq_enable();
744 }
745
746 return NOTIFY_OK;
747}
748
Paul Gortmaker0db06282013-06-19 14:53:51 -0400749static struct notifier_block remote_softirq_cpu_notifier = {
David S. Miller54514a72008-09-23 22:15:57 -0700750 .notifier_call = remote_softirq_cpu_notify,
751};
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753void __init softirq_init(void)
754{
Olof Johansson48f20a92008-03-04 15:23:25 -0800755 int cpu;
756
757 for_each_possible_cpu(cpu) {
David S. Miller54514a72008-09-23 22:15:57 -0700758 int i;
759
Olof Johansson48f20a92008-03-04 15:23:25 -0800760 per_cpu(tasklet_vec, cpu).tail =
761 &per_cpu(tasklet_vec, cpu).head;
762 per_cpu(tasklet_hi_vec, cpu).tail =
763 &per_cpu(tasklet_hi_vec, cpu).head;
David S. Miller54514a72008-09-23 22:15:57 -0700764 for (i = 0; i < NR_SOFTIRQS; i++)
765 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
Olof Johansson48f20a92008-03-04 15:23:25 -0800766 }
767
David S. Miller54514a72008-09-23 22:15:57 -0700768 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
769
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300770 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
771 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772}
773
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000774static int ksoftirqd_should_run(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000776 return local_softirq_pending();
777}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000779static void run_ksoftirqd(unsigned int cpu)
780{
781 local_irq_disable();
782 if (local_softirq_pending()) {
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200783 /*
784 * We can safely run softirq on inline stack, as we are not deep
785 * in the task stack here.
786 */
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000787 __do_softirq();
788 rcu_note_context_switch(cpu);
789 local_irq_enable();
790 cond_resched();
791 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 }
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000793 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794}
795
796#ifdef CONFIG_HOTPLUG_CPU
797/*
798 * tasklet_kill_immediate is called to remove a tasklet which can already be
799 * scheduled for execution on @cpu.
800 *
801 * Unlike tasklet_kill, this function removes the tasklet
802 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
803 *
804 * When this function is called, @cpu must be in the CPU_DEAD state.
805 */
806void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
807{
808 struct tasklet_struct **i;
809
810 BUG_ON(cpu_online(cpu));
811 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
812
813 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
814 return;
815
816 /* CPU is dead, so no lock needed. */
Olof Johansson48f20a92008-03-04 15:23:25 -0800817 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (*i == t) {
819 *i = t->next;
Olof Johansson48f20a92008-03-04 15:23:25 -0800820 /* If this was the tail element, move the tail ptr */
821 if (*i == NULL)
822 per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 return;
824 }
825 }
826 BUG();
827}
828
829static void takeover_tasklets(unsigned int cpu)
830{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 /* CPU is dead, so no lock needed. */
832 local_irq_disable();
833
834 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700835 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100836 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
837 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700838 per_cpu(tasklet_vec, cpu).head = NULL;
839 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 raise_softirq_irqoff(TASKLET_SOFTIRQ);
842
Christian Borntraegere5e41722008-05-01 04:34:23 -0700843 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100844 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
845 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700846 per_cpu(tasklet_hi_vec, cpu).head = NULL;
847 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
848 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 raise_softirq_irqoff(HI_SOFTIRQ);
850
851 local_irq_enable();
852}
853#endif /* CONFIG_HOTPLUG_CPU */
854
Paul Gortmaker0db06282013-06-19 14:53:51 -0400855static int cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 unsigned long action,
857 void *hcpu)
858{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 switch (action) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860#ifdef CONFIG_HOTPLUG_CPU
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 case CPU_DEAD:
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000862 case CPU_DEAD_FROZEN:
863 takeover_tasklets((unsigned long)hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 break;
865#endif /* CONFIG_HOTPLUG_CPU */
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000866 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return NOTIFY_OK;
868}
869
Paul Gortmaker0db06282013-06-19 14:53:51 -0400870static struct notifier_block cpu_nfb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 .notifier_call = cpu_callback
872};
873
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000874static struct smp_hotplug_thread softirq_threads = {
875 .store = &ksoftirqd,
876 .thread_should_run = ksoftirqd_should_run,
877 .thread_fn = run_ksoftirqd,
878 .thread_comm = "ksoftirqd/%u",
879};
880
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700881static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 register_cpu_notifier(&cpu_nfb);
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000884
885 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
886
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 return 0;
888}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700889early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800890
Yinghai Lu43a25632008-12-28 16:01:13 -0800891/*
892 * [ These __weak aliases are kept in a separate compilation unit, so that
893 * GCC does not inline them incorrectly. ]
894 */
895
896int __init __weak early_irq_init(void)
897{
898 return 0;
899}
900
Yinghai Lu4a046d12009-01-12 17:39:24 -0800901int __init __weak arch_probe_nr_irqs(void)
902{
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200903 return NR_IRQS_LEGACY;
Yinghai Lu4a046d12009-01-12 17:39:24 -0800904}
905
Yinghai Lu43a25632008-12-28 16:01:13 -0800906int __init __weak arch_early_irq_init(void)
907{
908 return 0;
909}