blob: 7c1a67ef027431e8f82d341ed7e729b9c0eec044 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01006 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
David S. Miller54514a72008-09-23 22:15:57 -07009 *
10 * Remote softirq infrastructure is by Jens Axboe.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13#include <linux/module.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070021#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050024#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080025#include <linux/smp.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080026#include <linux/tick.h>
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020027
28#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040029#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/irq.h>
32/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
51irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52EXPORT_SYMBOL(irq_stat);
53#endif
54
Alexey Dobriyan978b0112008-09-06 20:04:36 +020055static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
Jason Baron5d592b42009-03-12 14:33:36 -040059char *softirq_to_name[NR_SOFTIRQS] = {
Li Zefan5dd4de52009-09-17 17:38:32 +080060 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
Steven Rostedt899039e2009-03-13 00:43:33 -040061 "TASKLET", "SCHED", "HRTIMER", "RCU"
Jason Baron5d592b42009-03-12 14:33:36 -040062};
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +010070void wakeup_softirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
79/*
Ingo Molnarde30a2b2006-07-03 00:24:42 -070080 * This one is for softirq.c-internal use,
81 * where hardirqs are disabled legitimately:
82 */
Tim Chen3c829c32006-07-30 03:04:02 -070083#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnarde30a2b2006-07-03 00:24:42 -070084static void __local_bh_disable(unsigned long ip)
85{
86 unsigned long flags;
87
88 WARN_ON_ONCE(in_irq());
89
90 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050091 /*
92 * The preempt tracer hooks into add_preempt_count and will break
93 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
94 * is set and before current->softirq_enabled is cleared.
95 * We must manually increment preempt_count here and manually
96 * call the trace_preempt_off later.
97 */
98 preempt_count() += SOFTIRQ_OFFSET;
Ingo Molnarde30a2b2006-07-03 00:24:42 -070099 /*
100 * Were softirqs turned off above:
101 */
102 if (softirq_count() == SOFTIRQ_OFFSET)
103 trace_softirqs_off(ip);
104 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500105
106 if (preempt_count() == SOFTIRQ_OFFSET)
107 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700108}
Tim Chen3c829c32006-07-30 03:04:02 -0700109#else /* !CONFIG_TRACE_IRQFLAGS */
110static inline void __local_bh_disable(unsigned long ip)
111{
112 add_preempt_count(SOFTIRQ_OFFSET);
113 barrier();
114}
115#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700116
117void local_bh_disable(void)
118{
119 __local_bh_disable((unsigned long)__builtin_return_address(0));
120}
121
122EXPORT_SYMBOL(local_bh_disable);
123
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700124/*
125 * Special-case - softirqs can safely be enabled in
126 * cond_resched_softirq(), or by __do_softirq(),
127 * without processing still-pending softirqs:
128 */
129void _local_bh_enable(void)
130{
131 WARN_ON_ONCE(in_irq());
132 WARN_ON_ONCE(!irqs_disabled());
133
134 if (softirq_count() == SOFTIRQ_OFFSET)
135 trace_softirqs_on((unsigned long)__builtin_return_address(0));
136 sub_preempt_count(SOFTIRQ_OFFSET);
137}
138
139EXPORT_SYMBOL(_local_bh_enable);
140
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200141static inline void _local_bh_enable_ip(unsigned long ip)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700142{
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200143 WARN_ON_ONCE(in_irq() || irqs_disabled());
Tim Chen3c829c32006-07-30 03:04:02 -0700144#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200145 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700146#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700147 /*
148 * Are softirqs going to be turned on now:
149 */
150 if (softirq_count() == SOFTIRQ_OFFSET)
151 trace_softirqs_on(ip);
152 /*
153 * Keep preemption disabled until we are done with
154 * softirq processing:
155 */
156 sub_preempt_count(SOFTIRQ_OFFSET - 1);
157
158 if (unlikely(!in_interrupt() && local_softirq_pending()))
159 do_softirq();
160
161 dec_preempt_count();
Tim Chen3c829c32006-07-30 03:04:02 -0700162#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200163 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700164#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700165 preempt_check_resched();
166}
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200167
168void local_bh_enable(void)
169{
170 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
171}
172EXPORT_SYMBOL(local_bh_enable);
173
174void local_bh_enable_ip(unsigned long ip)
175{
176 _local_bh_enable_ip(ip);
177}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700178EXPORT_SYMBOL(local_bh_enable_ip);
179
180/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
182 * and we fall back to softirqd after that.
183 *
184 * This number has been established via experimentation.
185 * The two things to balance is latency against fairness -
186 * we want to handle softirqs as soon as possible, but they
187 * should not be able to lock up the box.
188 */
189#define MAX_SOFTIRQ_RESTART 10
190
191asmlinkage void __do_softirq(void)
192{
193 struct softirq_action *h;
194 __u32 pending;
195 int max_restart = MAX_SOFTIRQ_RESTART;
196 int cpu;
197
198 pending = local_softirq_pending();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700199 account_system_vtime(current);
200
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700201 __local_bh_disable((unsigned long)__builtin_return_address(0));
Ingo Molnard820ac42009-03-13 01:30:40 +0100202 lockdep_softirq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 cpu = smp_processor_id();
205restart:
206 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200207 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700209 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 h = softirq_vec;
212
213 do {
214 if (pending & 1) {
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200215 int prev_count = preempt_count();
Keika Kobayashiaa0ce5b2009-06-17 16:25:52 -0700216 kstat_incr_softirqs_this_cpu(h - softirq_vec);
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200217
Jason Baron39842322009-03-12 14:36:03 -0400218 trace_softirq_entry(h, softirq_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 h->action(h);
Jason Baron39842322009-03-12 14:36:03 -0400220 trace_softirq_exit(h, softirq_vec);
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200221 if (unlikely(prev_count != preempt_count())) {
Jason Baron5d592b42009-03-12 14:33:36 -0400222 printk(KERN_ERR "huh, entered softirq %td %s %p"
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200223 "with preempt_count %08x,"
224 " exited with %08x?\n", h - softirq_vec,
Jason Baron5d592b42009-03-12 14:33:36 -0400225 softirq_to_name[h - softirq_vec],
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200226 h->action, prev_count, preempt_count());
227 preempt_count() = prev_count;
228 }
229
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700230 rcu_bh_qs(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 }
232 h++;
233 pending >>= 1;
234 } while (pending);
235
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700236 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238 pending = local_softirq_pending();
239 if (pending && --max_restart)
240 goto restart;
241
242 if (pending)
243 wakeup_softirqd();
244
Ingo Molnard820ac42009-03-13 01:30:40 +0100245 lockdep_softirq_exit();
Paul Mackerras829035fd2006-07-03 00:25:40 -0700246
247 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700248 _local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
251#ifndef __ARCH_HAS_DO_SOFTIRQ
252
253asmlinkage void do_softirq(void)
254{
255 __u32 pending;
256 unsigned long flags;
257
258 if (in_interrupt())
259 return;
260
261 local_irq_save(flags);
262
263 pending = local_softirq_pending();
264
265 if (pending)
266 __do_softirq();
267
268 local_irq_restore(flags);
269}
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271#endif
272
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800273/*
274 * Enter an interrupt context.
275 */
276void irq_enter(void)
277{
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100278 int cpu = smp_processor_id();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200279
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100280 rcu_irq_enter();
Thomas Gleixneree5f80a2008-11-07 11:06:00 +0100281 if (idle_cpu(cpu) && !in_interrupt()) {
282 __irq_enter();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200283 tick_check_idle(cpu);
Thomas Gleixneree5f80a2008-11-07 11:06:00 +0100284 } else
285 __irq_enter();
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800286}
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
289# define invoke_softirq() __do_softirq()
290#else
291# define invoke_softirq() do_softirq()
292#endif
293
294/*
295 * Exit an interrupt context. Process softirqs if needed and possible:
296 */
297void irq_exit(void)
298{
299 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700300 trace_hardirq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 sub_preempt_count(IRQ_EXIT_OFFSET);
302 if (!in_interrupt() && local_softirq_pending())
303 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800304
Lai Jiangshanc5e0cb32009-10-28 08:14:48 -0700305 rcu_irq_exit();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800306#ifdef CONFIG_NO_HZ
307 /* Make sure that timer wheel updates are propagated */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309 tick_nohz_stop_sched_tick(0);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800310#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 preempt_enable_no_resched();
312}
313
314/*
315 * This function must run with irqs disabled!
316 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800317inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
319 __raise_softirq_irqoff(nr);
320
321 /*
322 * If we're in an interrupt or softirq, we're done
323 * (this also catches softirq-disabled code). We will
324 * actually run the softirq once we return from
325 * the irq or softirq.
326 *
327 * Otherwise we wake up ksoftirqd to make sure we
328 * schedule the softirq soon.
329 */
330 if (!in_interrupt())
331 wakeup_softirqd();
332}
333
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800334void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
336 unsigned long flags;
337
338 local_irq_save(flags);
339 raise_softirq_irqoff(nr);
340 local_irq_restore(flags);
341}
342
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300343void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 softirq_vec[nr].action = action;
346}
347
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200348/*
349 * Tasklets
350 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351struct tasklet_head
352{
Olof Johansson48f20a92008-03-04 15:23:25 -0800353 struct tasklet_struct *head;
354 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355};
356
Vegard Nossum4620b492008-06-12 23:21:53 +0200357static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
358static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800360void __tasklet_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 unsigned long flags;
363
364 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800365 t->next = NULL;
366 *__get_cpu_var(tasklet_vec).tail = t;
367 __get_cpu_var(tasklet_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 raise_softirq_irqoff(TASKLET_SOFTIRQ);
369 local_irq_restore(flags);
370}
371
372EXPORT_SYMBOL(__tasklet_schedule);
373
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800374void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
376 unsigned long flags;
377
378 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800379 t->next = NULL;
380 *__get_cpu_var(tasklet_hi_vec).tail = t;
381 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 raise_softirq_irqoff(HI_SOFTIRQ);
383 local_irq_restore(flags);
384}
385
386EXPORT_SYMBOL(__tasklet_hi_schedule);
387
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200388void __tasklet_hi_schedule_first(struct tasklet_struct *t)
389{
390 BUG_ON(!irqs_disabled());
391
392 t->next = __get_cpu_var(tasklet_hi_vec).head;
393 __get_cpu_var(tasklet_hi_vec).head = t;
394 __raise_softirq_irqoff(HI_SOFTIRQ);
395}
396
397EXPORT_SYMBOL(__tasklet_hi_schedule_first);
398
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399static void tasklet_action(struct softirq_action *a)
400{
401 struct tasklet_struct *list;
402
403 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800404 list = __get_cpu_var(tasklet_vec).head;
405 __get_cpu_var(tasklet_vec).head = NULL;
406 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 local_irq_enable();
408
409 while (list) {
410 struct tasklet_struct *t = list;
411
412 list = list->next;
413
414 if (tasklet_trylock(t)) {
415 if (!atomic_read(&t->count)) {
416 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
417 BUG();
418 t->func(t->data);
419 tasklet_unlock(t);
420 continue;
421 }
422 tasklet_unlock(t);
423 }
424
425 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800426 t->next = NULL;
427 *__get_cpu_var(tasklet_vec).tail = t;
428 __get_cpu_var(tasklet_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
430 local_irq_enable();
431 }
432}
433
434static void tasklet_hi_action(struct softirq_action *a)
435{
436 struct tasklet_struct *list;
437
438 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800439 list = __get_cpu_var(tasklet_hi_vec).head;
440 __get_cpu_var(tasklet_hi_vec).head = NULL;
441 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 local_irq_enable();
443
444 while (list) {
445 struct tasklet_struct *t = list;
446
447 list = list->next;
448
449 if (tasklet_trylock(t)) {
450 if (!atomic_read(&t->count)) {
451 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
452 BUG();
453 t->func(t->data);
454 tasklet_unlock(t);
455 continue;
456 }
457 tasklet_unlock(t);
458 }
459
460 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800461 t->next = NULL;
462 *__get_cpu_var(tasklet_hi_vec).tail = t;
463 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 __raise_softirq_irqoff(HI_SOFTIRQ);
465 local_irq_enable();
466 }
467}
468
469
470void tasklet_init(struct tasklet_struct *t,
471 void (*func)(unsigned long), unsigned long data)
472{
473 t->next = NULL;
474 t->state = 0;
475 atomic_set(&t->count, 0);
476 t->func = func;
477 t->data = data;
478}
479
480EXPORT_SYMBOL(tasklet_init);
481
482void tasklet_kill(struct tasklet_struct *t)
483{
484 if (in_interrupt())
485 printk("Attempt to kill tasklet from interrupt\n");
486
487 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400488 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 yield();
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400490 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
492 tasklet_unlock_wait(t);
493 clear_bit(TASKLET_STATE_SCHED, &t->state);
494}
495
496EXPORT_SYMBOL(tasklet_kill);
497
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200498/*
499 * tasklet_hrtimer
500 */
501
502/*
Peter Zijlstrab9c30322010-02-03 18:08:52 +0100503 * The trampoline is called when the hrtimer expires. It schedules a tasklet
504 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
505 * hrtimer callback, but from softirq context.
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200506 */
507static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
508{
509 struct tasklet_hrtimer *ttimer =
510 container_of(timer, struct tasklet_hrtimer, timer);
511
Peter Zijlstrab9c30322010-02-03 18:08:52 +0100512 tasklet_hi_schedule(&ttimer->tasklet);
513 return HRTIMER_NORESTART;
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200514}
515
516/*
517 * Helper function which calls the hrtimer callback from
518 * tasklet/softirq context
519 */
520static void __tasklet_hrtimer_trampoline(unsigned long data)
521{
522 struct tasklet_hrtimer *ttimer = (void *)data;
523 enum hrtimer_restart restart;
524
525 restart = ttimer->function(&ttimer->timer);
526 if (restart != HRTIMER_NORESTART)
527 hrtimer_restart(&ttimer->timer);
528}
529
530/**
531 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
532 * @ttimer: tasklet_hrtimer which is initialized
533 * @function: hrtimer callback funtion which gets called from softirq context
534 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
535 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
536 */
537void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
538 enum hrtimer_restart (*function)(struct hrtimer *),
539 clockid_t which_clock, enum hrtimer_mode mode)
540{
541 hrtimer_init(&ttimer->timer, which_clock, mode);
542 ttimer->timer.function = __hrtimer_tasklet_trampoline;
543 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
544 (unsigned long)ttimer);
545 ttimer->function = function;
546}
547EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
548
549/*
550 * Remote softirq bits
551 */
552
David S. Miller54514a72008-09-23 22:15:57 -0700553DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
554EXPORT_PER_CPU_SYMBOL(softirq_work_list);
555
556static void __local_trigger(struct call_single_data *cp, int softirq)
557{
558 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
559
560 list_add_tail(&cp->list, head);
561
562 /* Trigger the softirq only if the list was previously empty. */
563 if (head->next == &cp->list)
564 raise_softirq_irqoff(softirq);
565}
566
567#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
568static void remote_softirq_receive(void *data)
569{
570 struct call_single_data *cp = data;
571 unsigned long flags;
572 int softirq;
573
574 softirq = cp->priv;
575
576 local_irq_save(flags);
577 __local_trigger(cp, softirq);
578 local_irq_restore(flags);
579}
580
581static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
582{
583 if (cpu_online(cpu)) {
584 cp->func = remote_softirq_receive;
585 cp->info = cp;
586 cp->flags = 0;
587 cp->priv = softirq;
588
Peter Zijlstra6e275632009-02-25 13:59:48 +0100589 __smp_call_function_single(cpu, cp, 0);
David S. Miller54514a72008-09-23 22:15:57 -0700590 return 0;
591 }
592 return 1;
593}
594#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
595static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
596{
597 return 1;
598}
599#endif
600
601/**
602 * __send_remote_softirq - try to schedule softirq work on a remote cpu
603 * @cp: private SMP call function data area
604 * @cpu: the remote cpu
605 * @this_cpu: the currently executing cpu
606 * @softirq: the softirq for the work
607 *
608 * Attempt to schedule softirq work on a remote cpu. If this cannot be
609 * done, the work is instead queued up on the local cpu.
610 *
611 * Interrupts must be disabled.
612 */
613void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
614{
615 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
616 __local_trigger(cp, softirq);
617}
618EXPORT_SYMBOL(__send_remote_softirq);
619
620/**
621 * send_remote_softirq - try to schedule softirq work on a remote cpu
622 * @cp: private SMP call function data area
623 * @cpu: the remote cpu
624 * @softirq: the softirq for the work
625 *
626 * Like __send_remote_softirq except that disabling interrupts and
627 * computing the current cpu is done for the caller.
628 */
629void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
630{
631 unsigned long flags;
632 int this_cpu;
633
634 local_irq_save(flags);
635 this_cpu = smp_processor_id();
636 __send_remote_softirq(cp, cpu, this_cpu, softirq);
637 local_irq_restore(flags);
638}
639EXPORT_SYMBOL(send_remote_softirq);
640
641static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
642 unsigned long action, void *hcpu)
643{
644 /*
645 * If a CPU goes away, splice its entries to the current CPU
646 * and trigger a run of the softirq
647 */
648 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
649 int cpu = (unsigned long) hcpu;
650 int i;
651
652 local_irq_disable();
653 for (i = 0; i < NR_SOFTIRQS; i++) {
654 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
655 struct list_head *local_head;
656
657 if (list_empty(head))
658 continue;
659
660 local_head = &__get_cpu_var(softirq_work_list[i]);
661 list_splice_init(head, local_head);
662 raise_softirq_irqoff(i);
663 }
664 local_irq_enable();
665 }
666
667 return NOTIFY_OK;
668}
669
670static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
671 .notifier_call = remote_softirq_cpu_notify,
672};
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674void __init softirq_init(void)
675{
Olof Johansson48f20a92008-03-04 15:23:25 -0800676 int cpu;
677
678 for_each_possible_cpu(cpu) {
David S. Miller54514a72008-09-23 22:15:57 -0700679 int i;
680
Olof Johansson48f20a92008-03-04 15:23:25 -0800681 per_cpu(tasklet_vec, cpu).tail =
682 &per_cpu(tasklet_vec, cpu).head;
683 per_cpu(tasklet_hi_vec, cpu).tail =
684 &per_cpu(tasklet_hi_vec, cpu).head;
David S. Miller54514a72008-09-23 22:15:57 -0700685 for (i = 0; i < NR_SOFTIRQS; i++)
686 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
Olof Johansson48f20a92008-03-04 15:23:25 -0800687 }
688
David S. Miller54514a72008-09-23 22:15:57 -0700689 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
690
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300691 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
692 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693}
694
Tejun Heo1871e522009-10-29 22:34:13 +0900695static int run_ksoftirqd(void * __bind_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 set_current_state(TASK_INTERRUPTIBLE);
698
699 while (!kthread_should_stop()) {
700 preempt_disable();
701 if (!local_softirq_pending()) {
702 preempt_enable_no_resched();
703 schedule();
704 preempt_disable();
705 }
706
707 __set_current_state(TASK_RUNNING);
708
709 while (local_softirq_pending()) {
710 /* Preempt disable stops cpu going offline.
711 If already offline, we'll be on wrong CPU:
712 don't process */
713 if (cpu_is_offline((long)__bind_cpu))
714 goto wait_to_die;
715 do_softirq();
716 preempt_enable_no_resched();
717 cond_resched();
718 preempt_disable();
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700719 rcu_sched_qs((long)__bind_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 }
721 preempt_enable();
722 set_current_state(TASK_INTERRUPTIBLE);
723 }
724 __set_current_state(TASK_RUNNING);
725 return 0;
726
727wait_to_die:
728 preempt_enable();
729 /* Wait for kthread_stop */
730 set_current_state(TASK_INTERRUPTIBLE);
731 while (!kthread_should_stop()) {
732 schedule();
733 set_current_state(TASK_INTERRUPTIBLE);
734 }
735 __set_current_state(TASK_RUNNING);
736 return 0;
737}
738
739#ifdef CONFIG_HOTPLUG_CPU
740/*
741 * tasklet_kill_immediate is called to remove a tasklet which can already be
742 * scheduled for execution on @cpu.
743 *
744 * Unlike tasklet_kill, this function removes the tasklet
745 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
746 *
747 * When this function is called, @cpu must be in the CPU_DEAD state.
748 */
749void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
750{
751 struct tasklet_struct **i;
752
753 BUG_ON(cpu_online(cpu));
754 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
755
756 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
757 return;
758
759 /* CPU is dead, so no lock needed. */
Olof Johansson48f20a92008-03-04 15:23:25 -0800760 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 if (*i == t) {
762 *i = t->next;
Olof Johansson48f20a92008-03-04 15:23:25 -0800763 /* If this was the tail element, move the tail ptr */
764 if (*i == NULL)
765 per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 return;
767 }
768 }
769 BUG();
770}
771
772static void takeover_tasklets(unsigned int cpu)
773{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 /* CPU is dead, so no lock needed. */
775 local_irq_disable();
776
777 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700778 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
779 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
780 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
781 per_cpu(tasklet_vec, cpu).head = NULL;
782 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
783 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 raise_softirq_irqoff(TASKLET_SOFTIRQ);
785
Christian Borntraegere5e41722008-05-01 04:34:23 -0700786 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
787 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
788 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
789 per_cpu(tasklet_hi_vec, cpu).head = NULL;
790 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 raise_softirq_irqoff(HI_SOFTIRQ);
793
794 local_irq_enable();
795}
796#endif /* CONFIG_HOTPLUG_CPU */
797
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700798static int __cpuinit cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 unsigned long action,
800 void *hcpu)
801{
802 int hotcpu = (unsigned long)hcpu;
803 struct task_struct *p;
804
805 switch (action) {
806 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700807 case CPU_UP_PREPARE_FROZEN:
Tejun Heo1871e522009-10-29 22:34:13 +0900808 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 if (IS_ERR(p)) {
810 printk("ksoftirqd for %i failed\n", hotcpu);
811 return NOTIFY_BAD;
812 }
813 kthread_bind(p, hotcpu);
814 per_cpu(ksoftirqd, hotcpu) = p;
815 break;
816 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700817 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 wake_up_process(per_cpu(ksoftirqd, hotcpu));
819 break;
820#ifdef CONFIG_HOTPLUG_CPU
821 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700822 case CPU_UP_CANCELED_FROZEN:
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700823 if (!per_cpu(ksoftirqd, hotcpu))
824 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 /* Unbind so it can run. Fall thru. */
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800826 kthread_bind(per_cpu(ksoftirqd, hotcpu),
Rusty Russellf1fc0572009-01-01 10:12:23 +1030827 cpumask_any(cpu_online_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 case CPU_DEAD:
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700829 case CPU_DEAD_FROZEN: {
830 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 p = per_cpu(ksoftirqd, hotcpu);
833 per_cpu(ksoftirqd, hotcpu) = NULL;
Rusty Russell961ccdd2008-06-23 13:55:38 +1000834 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 kthread_stop(p);
836 takeover_tasklets(hotcpu);
837 break;
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839#endif /* CONFIG_HOTPLUG_CPU */
840 }
841 return NOTIFY_OK;
842}
843
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700844static struct notifier_block __cpuinitdata cpu_nfb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 .notifier_call = cpu_callback
846};
847
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700848static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
850 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -0700851 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
852
853 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
855 register_cpu_notifier(&cpu_nfb);
856 return 0;
857}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700858early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800859
860#ifdef CONFIG_SMP
861/*
862 * Call a function on all processors
863 */
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200864int on_each_cpu(void (*func) (void *info), void *info, int wait)
Andrew Morton78eef012006-03-22 00:08:16 -0800865{
866 int ret = 0;
867
868 preempt_disable();
Jens Axboe8691e5a2008-06-06 11:18:06 +0200869 ret = smp_call_function(func, info, wait);
Andrew Morton78eef012006-03-22 00:08:16 -0800870 local_irq_disable();
871 func(info);
872 local_irq_enable();
873 preempt_enable();
874 return ret;
875}
876EXPORT_SYMBOL(on_each_cpu);
877#endif
Yinghai Lu43a25632008-12-28 16:01:13 -0800878
879/*
880 * [ These __weak aliases are kept in a separate compilation unit, so that
881 * GCC does not inline them incorrectly. ]
882 */
883
884int __init __weak early_irq_init(void)
885{
886 return 0;
887}
888
Yinghai Lu4a046d12009-01-12 17:39:24 -0800889int __init __weak arch_probe_nr_irqs(void)
890{
891 return 0;
892}
893
Yinghai Lu43a25632008-12-28 16:01:13 -0800894int __init __weak arch_early_irq_init(void)
895{
896 return 0;
897}
898
Yinghai Lu85ac16d2009-04-27 18:00:38 -0700899int __weak arch_init_chip_data(struct irq_desc *desc, int node)
Yinghai Lu43a25632008-12-28 16:01:13 -0800900{
901 return 0;
902}