blob: 2aea5d22db07bc08a9bd0071ac49b7ab59ab592c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* interrupt.h */
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
Jan Beulich908dcec2006-06-23 02:06:00 -070010#include <linux/irqreturn.h>
Thomas Gleixnerdd3a1db2008-10-16 18:20:58 +020011#include <linux/irqnr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/hardirq.h>
Ingo Molnarde30a2b2006-07-03 00:24:42 -070013#include <linux/irqflags.h>
David S. Miller54514a72008-09-23 22:15:57 -070014#include <linux/smp.h>
15#include <linux/percpu.h>
Peter Zijlstra9ba5f002009-07-22 14:18:35 +020016#include <linux/hrtimer.h>
Ben Hutchingscd7eab42011-01-19 21:01:44 +000017#include <linux/kref.h>
18#include <linux/workqueue.h>
Ingo Molnar0ebb26e2008-12-12 11:26:39 +010019
Arun Sharma600634972011-07-26 16:09:06 -070020#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Thomas Gleixner6e213612006-07-01 19:29:03 -070023/*
24 * These correspond to the IORESOURCE_IRQ_* defines in
25 * linux/ioport.h to select the interrupt line behaviour. When
26 * requesting an interrupt without specifying a IRQF_TRIGGER, the
27 * setting should be assumed to be "as already configured", which
28 * may be as per machine or firmware initialisation.
29 */
30#define IRQF_TRIGGER_NONE 0x00000000
31#define IRQF_TRIGGER_RISING 0x00000001
32#define IRQF_TRIGGER_FALLING 0x00000002
33#define IRQF_TRIGGER_HIGH 0x00000004
34#define IRQF_TRIGGER_LOW 0x00000008
35#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
37#define IRQF_TRIGGER_PROBE 0x00000010
38
39/*
40 * These flags used only by the kernel as part of the
41 * irq handling routines.
42 *
Thomas Gleixner6932bf32010-03-26 00:06:55 +000043 * IRQF_DISABLED - keep irqs disabled when calling the action handler.
44 * DEPRECATED. This flag is a NOOP and scheduled to be removed
Thomas Gleixner6e213612006-07-01 19:29:03 -070045 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
46 * IRQF_SHARED - allow sharing the irq among several devices
47 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
48 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
Thomas Gleixner950f4422007-02-16 01:27:24 -080049 * IRQF_PERCPU - Interrupt is per cpu
50 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
Bernhard Walled85a60d2007-05-08 00:35:24 -070051 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
52 * registered first in an shared interrupt is considered for
53 * performance reasons)
Thomas Gleixnerb25c3402009-08-13 12:17:22 +020054 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
55 * Used by threaded interrupts which need to keep the
56 * irq line disabled until the threaded handler has been run.
Ian Campbell685fd0b2010-07-29 11:16:32 +010057 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +010058 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000059 * IRQF_NO_THREAD - Interrupt cannot be threaded
Ian Campbell9bab0b72011-10-03 15:37:00 +010060 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
61 * resume time.
Thomas Gleixner6e213612006-07-01 19:29:03 -070062 */
63#define IRQF_DISABLED 0x00000020
64#define IRQF_SAMPLE_RANDOM 0x00000040
65#define IRQF_SHARED 0x00000080
66#define IRQF_PROBE_SHARED 0x00000100
Ian Campbell685fd0b2010-07-29 11:16:32 +010067#define __IRQF_TIMER 0x00000200
Thomas Gleixner284c6682006-07-03 02:20:32 +020068#define IRQF_PERCPU 0x00000400
Thomas Gleixner950f4422007-02-16 01:27:24 -080069#define IRQF_NOBALANCING 0x00000800
Bernhard Walled85a60d2007-05-08 00:35:24 -070070#define IRQF_IRQPOLL 0x00001000
Thomas Gleixnerb25c3402009-08-13 12:17:22 +020071#define IRQF_ONESHOT 0x00002000
Ian Campbell685fd0b2010-07-29 11:16:32 +010072#define IRQF_NO_SUSPEND 0x00004000
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +010073#define IRQF_FORCE_RESUME 0x00008000
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000074#define IRQF_NO_THREAD 0x00010000
Ian Campbell9bab0b72011-10-03 15:37:00 +010075#define IRQF_EARLY_RESUME 0x00020000
Ian Campbell685fd0b2010-07-29 11:16:32 +010076
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000077#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010078
Randy Dunlapb4e6b092010-05-21 09:03:01 -070079/*
Marc Zyngierae731f82010-03-15 22:56:33 +000080 * These values can be returned by request_any_context_irq() and
81 * describe the context the interrupt will be run in.
82 *
83 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
84 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
85 */
86enum {
87 IRQC_IS_HARDIRQ = 0,
88 IRQC_IS_NESTED,
89};
90
David Howells7d12e782006-10-05 14:55:46 +010091typedef irqreturn_t (*irq_handler_t)(int, void *);
David Howellsda482792006-10-05 13:06:34 +010092
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +010093/**
94 * struct irqaction - per interrupt action descriptor
95 * @handler: interrupt handler function
96 * @flags: flags (see IRQF_* above)
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +010097 * @name: name of the device
98 * @dev_id: cookie to identify the device
Marc Zyngier31d9d9b2011-09-23 17:03:06 +010099 * @percpu_dev_id: cookie to identify the device
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +0100100 * @next: pointer to the next irqaction for shared interrupts
101 * @irq: interrupt number
102 * @dir: pointer to the proc/irq/NN/name entry
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300103 * @thread_fn: interrupt handler function for threaded interrupts
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100104 * @thread: thread pointer for threaded interrupts
105 * @thread_flags: flags related to @thread
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000106 * @thread_mask: bitmask for keeping track of @thread activity
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +0100107 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108struct irqaction {
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100109 irq_handler_t handler;
110 unsigned long flags;
111 void *dev_id;
112 void __percpu *percpu_dev_id;
113 struct irqaction *next;
114 int irq;
115 irq_handler_t thread_fn;
116 struct task_struct *thread;
117 unsigned long thread_flags;
118 unsigned long thread_mask;
119 const char *name;
120 struct proc_dir_entry *dir;
Eric Dumazetf6cd2472010-11-04 11:13:48 +0100121} ____cacheline_internodealigned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
David Howells7d12e782006-10-05 14:55:46 +0100123extern irqreturn_t no_action(int cpl, void *dev_id);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100124
Thomas Gleixner3a381482009-03-24 20:27:39 +0100125#ifdef CONFIG_GENERIC_HARDIRQS
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100126extern int __must_check
127request_threaded_irq(unsigned int irq, irq_handler_t handler,
128 irq_handler_t thread_fn,
129 unsigned long flags, const char *name, void *dev);
130
131static inline int __must_check
132request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
133 const char *name, void *dev)
134{
135 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
136}
137
Marc Zyngierae731f82010-03-15 22:56:33 +0000138extern int __must_check
139request_any_context_irq(unsigned int irq, irq_handler_t handler,
140 unsigned long flags, const char *name, void *dev_id);
141
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100142extern int __must_check
143request_percpu_irq(unsigned int irq, irq_handler_t handler,
144 const char *devname, void __percpu *percpu_dev_id);
145
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100146extern void exit_irq_thread(void);
147#else
Thomas Gleixner3a381482009-03-24 20:27:39 +0100148
149extern int __must_check
150request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
151 const char *name, void *dev);
152
Thomas Gleixnerde188362009-03-25 17:33:38 +0100153/*
154 * Special function to avoid ifdeffery in kernel/irq/devres.c which
155 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
156 * m68k). I really love these $@%#!* obvious Makefile references:
157 * ../../../kernel/irq/devres.o
158 */
159static inline int __must_check
160request_threaded_irq(unsigned int irq, irq_handler_t handler,
161 irq_handler_t thread_fn,
162 unsigned long flags, const char *name, void *dev)
163{
164 return request_irq(irq, handler, flags, name, dev);
165}
166
Marc Zyngierae731f82010-03-15 22:56:33 +0000167static inline int __must_check
168request_any_context_irq(unsigned int irq, irq_handler_t handler,
169 unsigned long flags, const char *name, void *dev_id)
170{
171 return request_irq(irq, handler, flags, name, dev_id);
172}
173
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100174static inline int __must_check
175request_percpu_irq(unsigned int irq, irq_handler_t handler,
176 const char *devname, void __percpu *percpu_dev_id)
177{
178 return request_irq(irq, handler, 0, devname, percpu_dev_id);
179}
180
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100181static inline void exit_irq_thread(void) { }
182#endif
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184extern void free_irq(unsigned int, void *);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100185extern void free_percpu_irq(unsigned int, void __percpu *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Al Viro0af36782007-07-27 14:24:33 +0100187struct device;
188
Arjan van de Ven935bd5b2009-03-23 18:28:16 +0100189extern int __must_check
190devm_request_threaded_irq(struct device *dev, unsigned int irq,
191 irq_handler_t handler, irq_handler_t thread_fn,
192 unsigned long irqflags, const char *devname,
193 void *dev_id);
194
195static inline int __must_check
196devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
197 unsigned long irqflags, const char *devname, void *dev_id)
198{
199 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
200 devname, dev_id);
201}
202
Tejun Heo9ac78492007-01-20 16:00:26 +0900203extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
204
Ingo Molnard7e96292006-07-03 00:24:27 -0700205/*
206 * On lockdep we dont want to enable hardirqs in hardirq
207 * context. Use local_irq_enable_in_hardirq() to annotate
208 * kernel code that has to do this nevertheless (pretty much
209 * the only valid case is for old/broken hardware that is
210 * insanely slow).
211 *
212 * NOTE: in theory this might break fragile code that relies
213 * on hardirq delivery - in practice we dont seem to have such
214 * places left. So the only effect should be slightly increased
215 * irqs-off latencies.
216 */
217#ifdef CONFIG_LOCKDEP
218# define local_irq_enable_in_hardirq() do { } while (0)
219#else
220# define local_irq_enable_in_hardirq() local_irq_enable()
221#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223extern void disable_irq_nosync(unsigned int irq);
224extern void disable_irq(unsigned int irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100225extern void disable_percpu_irq(unsigned int irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226extern void enable_irq(unsigned int irq);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +0100227extern void enable_percpu_irq(unsigned int irq, unsigned int type);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700228
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100229/* The following three functions are for the core kernel use only. */
Heiko Carstens5818a6e2009-06-11 21:59:21 +0200230#ifdef CONFIG_GENERIC_HARDIRQS
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100231extern void suspend_device_irqs(void);
232extern void resume_device_irqs(void);
233#ifdef CONFIG_PM_SLEEP
234extern int check_wakeup_irqs(void);
235#else
236static inline int check_wakeup_irqs(void) { return 0; }
237#endif
Heiko Carstens5818a6e2009-06-11 21:59:21 +0200238#else
239static inline void suspend_device_irqs(void) { };
240static inline void resume_device_irqs(void) { };
241static inline int check_wakeup_irqs(void) { return 0; }
242#endif
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100243
Russell Kingd7b90682008-04-17 07:46:24 +0200244#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
245
Rusty Russelld036e672009-01-01 10:12:26 +1030246extern cpumask_var_t irq_default_affinity;
Max Krasnyansky18404752008-05-29 11:02:52 -0700247
Rusty Russell0de26522008-12-13 21:20:26 +1030248extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
Russell Kingd7b90682008-04-17 07:46:24 +0200249extern int irq_can_set_affinity(unsigned int irq);
Max Krasnyansky18404752008-05-29 11:02:52 -0700250extern int irq_select_affinity(unsigned int irq);
Russell Kingd7b90682008-04-17 07:46:24 +0200251
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700252extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000253
254/**
255 * struct irq_affinity_notify - context for notification of IRQ affinity changes
256 * @irq: Interrupt to which notification applies
257 * @kref: Reference count, for internal use
258 * @work: Work item, for internal use
259 * @notify: Function to be called on change. This will be
260 * called in process context.
261 * @release: Function to be called on release. This will be
262 * called in process context. Once registered, the
263 * structure must only be freed when this function is
264 * called or later.
265 */
266struct irq_affinity_notify {
267 unsigned int irq;
268 struct kref kref;
269 struct work_struct work;
270 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
271 void (*release)(struct kref *ref);
272};
273
274extern int
275irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
276
277static inline void irq_run_affinity_notifiers(void)
278{
279 flush_scheduled_work();
280}
281
Russell Kingd7b90682008-04-17 07:46:24 +0200282#else /* CONFIG_SMP */
283
Rusty Russell0de26522008-12-13 21:20:26 +1030284static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
Russell Kingd7b90682008-04-17 07:46:24 +0200285{
286 return -EINVAL;
287}
288
289static inline int irq_can_set_affinity(unsigned int irq)
290{
291 return 0;
292}
293
Max Krasnyansky18404752008-05-29 11:02:52 -0700294static inline int irq_select_affinity(unsigned int irq) { return 0; }
295
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700296static inline int irq_set_affinity_hint(unsigned int irq,
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000297 const struct cpumask *m)
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700298{
299 return -EINVAL;
300}
Russell Kingd7b90682008-04-17 07:46:24 +0200301#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
302
Al Viroe9ed7e72007-07-21 23:29:12 +0100303#ifdef CONFIG_GENERIC_HARDIRQS
Ingo Molnarc01d4032006-07-03 00:24:27 -0700304/*
305 * Special lockdep variants of irq disabling/enabling.
306 * These should be used for locking constructs that
307 * know that a particular irq context which is disabled,
308 * and which is the only irq-context user of a lock,
309 * that it's safe to take the lock in the irq-disabled
310 * section without disabling hardirqs.
311 *
312 * On !CONFIG_LOCKDEP they are equivalent to the normal
313 * irq disable/enable methods.
314 */
315static inline void disable_irq_nosync_lockdep(unsigned int irq)
316{
317 disable_irq_nosync(irq);
318#ifdef CONFIG_LOCKDEP
319 local_irq_disable();
320#endif
321}
322
Arjan van de Vene8106b92006-09-29 02:01:08 -0700323static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
324{
325 disable_irq_nosync(irq);
326#ifdef CONFIG_LOCKDEP
327 local_irq_save(*flags);
328#endif
329}
330
Ingo Molnarc01d4032006-07-03 00:24:27 -0700331static inline void disable_irq_lockdep(unsigned int irq)
332{
333 disable_irq(irq);
334#ifdef CONFIG_LOCKDEP
335 local_irq_disable();
336#endif
337}
338
339static inline void enable_irq_lockdep(unsigned int irq)
340{
341#ifdef CONFIG_LOCKDEP
342 local_irq_enable();
343#endif
344 enable_irq(irq);
345}
346
Arjan van de Vene8106b92006-09-29 02:01:08 -0700347static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
348{
349#ifdef CONFIG_LOCKDEP
350 local_irq_restore(*flags);
351#endif
352 enable_irq(irq);
353}
354
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700355/* IRQ wakeup (PM) control: */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100356extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
357
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700358static inline int enable_irq_wake(unsigned int irq)
359{
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100360 return irq_set_irq_wake(irq, 1);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700361}
362
363static inline int disable_irq_wake(unsigned int irq)
364{
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100365 return irq_set_irq_wake(irq, 0);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700366}
367
Ingo Molnarc01d4032006-07-03 00:24:27 -0700368#else /* !CONFIG_GENERIC_HARDIRQS */
369/*
370 * NOTE: non-genirq architectures, if they want to support the lock
371 * validator need to define the methods below in their asm/irq.h
372 * files, under an #ifdef CONFIG_LOCKDEP section.
373 */
Roman Zippelb3e2fd92007-05-01 22:32:42 +0200374#ifndef CONFIG_LOCKDEP
Ingo Molnarc01d4032006-07-03 00:24:27 -0700375# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
Roman Zippelb3e2fd92007-05-01 22:32:42 +0200376# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
377 disable_irq_nosync(irq)
Ingo Molnarc01d4032006-07-03 00:24:27 -0700378# define disable_irq_lockdep(irq) disable_irq(irq)
379# define enable_irq_lockdep(irq) enable_irq(irq)
Roman Zippelb3e2fd92007-05-01 22:32:42 +0200380# define enable_irq_lockdep_irqrestore(irq, flags) \
381 enable_irq(irq)
Ingo Molnarc01d4032006-07-03 00:24:27 -0700382# endif
383
Guennadi Liakhovetskiaa5346a2007-10-16 01:24:01 -0700384static inline int enable_irq_wake(unsigned int irq)
385{
386 return 0;
387}
388
389static inline int disable_irq_wake(unsigned int irq)
390{
391 return 0;
392}
Ingo Molnarc01d4032006-07-03 00:24:27 -0700393#endif /* CONFIG_GENERIC_HARDIRQS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000395
396#ifdef CONFIG_IRQ_FORCED_THREADING
397extern bool force_irqthreads;
398#else
399#define force_irqthreads (0)
400#endif
401
Andi Kleen3f744782005-09-12 18:49:24 +0200402#ifndef __ARCH_SET_SOFTIRQ_PENDING
403#define set_softirq_pending(x) (local_softirq_pending() = (x))
404#define or_softirq_pending(x) (local_softirq_pending() |= (x))
405#endif
406
Benjamin Herrenschmidt2d3fbbb2007-05-10 22:22:46 -0700407/* Some architectures might implement lazy enabling/disabling of
408 * interrupts. In some cases, such as stop_machine, we might want
409 * to ensure that after a local_irq_disable(), interrupts have
410 * really been disabled in hardware. Such architectures need to
411 * implement the following hook.
412 */
413#ifndef hard_irq_disable
414#define hard_irq_disable() do { } while(0)
415#endif
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
418 frequency threaded job scheduling. For almost all the purposes
419 tasklets are more than enough. F.e. all serial device BHs et
420 al. should be converted to tasklets, not to softirqs.
421 */
422
423enum
424{
425 HI_SOFTIRQ=0,
426 TIMER_SOFTIRQ,
427 NET_TX_SOFTIRQ,
428 NET_RX_SOFTIRQ,
Jens Axboeff856ba2006-01-09 16:02:34 +0100429 BLOCK_SOFTIRQ,
Jens Axboe5e605b62009-08-05 09:07:21 +0200430 BLOCK_IOPOLL_SOFTIRQ,
Christoph Lameterc9819f42006-12-10 02:20:25 -0800431 TASKLET_SOFTIRQ,
432 SCHED_SOFTIRQ,
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100433 HRTIMER_SOFTIRQ,
Shaohua Li09223372011-06-14 13:26:25 +0800434 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
Alexey Dobriyan978b0112008-09-06 20:04:36 +0200435
436 NR_SOFTIRQS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437};
438
Jason Baron5d592b42009-03-12 14:33:36 -0400439/* map softirq index to softirq name. update 'softirq_to_name' in
440 * kernel/softirq.c when adding a new softirq.
441 */
442extern char *softirq_to_name[NR_SOFTIRQS];
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444/* softirq mask and active fields moved to irq_cpustat_t in
445 * asm/hardirq.h to get better cache usage. KAO
446 */
447
448struct softirq_action
449{
450 void (*action)(struct softirq_action *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451};
452
453asmlinkage void do_softirq(void);
Adrian Bunkeb0f1c42008-04-29 00:59:12 -0700454asmlinkage void __do_softirq(void);
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300455extern void open_softirq(int nr, void (*action)(struct softirq_action *));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456extern void softirq_init(void);
Steven Rostedtf0696862012-01-25 20:18:55 -0500457extern void __raise_softirq_irqoff(unsigned int nr);
Lai Jiangshan2bf21602010-08-23 18:42:48 +0900458
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800459extern void raise_softirq_irqoff(unsigned int nr);
460extern void raise_softirq(unsigned int nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
David S. Miller54514a72008-09-23 22:15:57 -0700462/* This is the worklist that queues up per-cpu softirq work.
463 *
464 * send_remote_sendirq() adds work to these lists, and
465 * the softirq handler itself dequeues from them. The queues
466 * are protected by disabling local cpu interrupts and they must
467 * only be accessed by the local cpu that they are for.
468 */
469DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
470
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -0800471DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
472
473static inline struct task_struct *this_cpu_ksoftirqd(void)
474{
475 return this_cpu_read(ksoftirqd);
476}
477
David S. Miller54514a72008-09-23 22:15:57 -0700478/* Try to send a softirq to a remote cpu. If this cannot be done, the
479 * work will be queued to the local cpu.
480 */
481extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
482
483/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
484 * and compute the current cpu, passed in as 'this_cpu'.
485 */
486extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
487 int this_cpu, int softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
489/* Tasklets --- multithreaded analogue of BHs.
490
491 Main feature differing them of generic softirqs: tasklet
492 is running only on one CPU simultaneously.
493
494 Main feature differing them of BHs: different tasklets
495 may be run simultaneously on different CPUs.
496
497 Properties:
498 * If tasklet_schedule() is called, then tasklet is guaranteed
499 to be executed on some cpu at least once after this.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300500 * If the tasklet is already scheduled, but its execution is still not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 started, it will be executed only once.
502 * If this tasklet is already running on another CPU (or schedule is called
503 from tasklet itself), it is rescheduled for later.
504 * Tasklet is strictly serialized wrt itself, but not
505 wrt another tasklets. If client needs some intertask synchronization,
506 he makes it with spinlocks.
507 */
508
509struct tasklet_struct
510{
511 struct tasklet_struct *next;
512 unsigned long state;
513 atomic_t count;
514 void (*func)(unsigned long);
515 unsigned long data;
516};
517
518#define DECLARE_TASKLET(name, func, data) \
519struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
520
521#define DECLARE_TASKLET_DISABLED(name, func, data) \
522struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
523
524
525enum
526{
527 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
528 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
529};
530
531#ifdef CONFIG_SMP
532static inline int tasklet_trylock(struct tasklet_struct *t)
533{
534 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
535}
536
537static inline void tasklet_unlock(struct tasklet_struct *t)
538{
539 smp_mb__before_clear_bit();
540 clear_bit(TASKLET_STATE_RUN, &(t)->state);
541}
542
543static inline void tasklet_unlock_wait(struct tasklet_struct *t)
544{
545 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
546}
547#else
548#define tasklet_trylock(t) 1
549#define tasklet_unlock_wait(t) do { } while (0)
550#define tasklet_unlock(t) do { } while (0)
551#endif
552
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800553extern void __tasklet_schedule(struct tasklet_struct *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555static inline void tasklet_schedule(struct tasklet_struct *t)
556{
557 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
558 __tasklet_schedule(t);
559}
560
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800561extern void __tasklet_hi_schedule(struct tasklet_struct *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563static inline void tasklet_hi_schedule(struct tasklet_struct *t)
564{
565 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
566 __tasklet_hi_schedule(t);
567}
568
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200569extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
570
571/*
572 * This version avoids touching any other tasklets. Needed for kmemcheck
573 * in order not to take any page faults while enqueueing this tasklet;
574 * consider VERY carefully whether you really need this or
575 * tasklet_hi_schedule()...
576 */
577static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
578{
579 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
580 __tasklet_hi_schedule_first(t);
581}
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
584static inline void tasklet_disable_nosync(struct tasklet_struct *t)
585{
586 atomic_inc(&t->count);
587 smp_mb__after_atomic_inc();
588}
589
590static inline void tasklet_disable(struct tasklet_struct *t)
591{
592 tasklet_disable_nosync(t);
593 tasklet_unlock_wait(t);
594 smp_mb();
595}
596
597static inline void tasklet_enable(struct tasklet_struct *t)
598{
599 smp_mb__before_atomic_dec();
600 atomic_dec(&t->count);
601}
602
603static inline void tasklet_hi_enable(struct tasklet_struct *t)
604{
605 smp_mb__before_atomic_dec();
606 atomic_dec(&t->count);
607}
608
609extern void tasklet_kill(struct tasklet_struct *t);
610extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
611extern void tasklet_init(struct tasklet_struct *t,
612 void (*func)(unsigned long), unsigned long data);
613
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200614struct tasklet_hrtimer {
615 struct hrtimer timer;
616 struct tasklet_struct tasklet;
617 enum hrtimer_restart (*function)(struct hrtimer *);
618};
619
620extern void
621tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
622 enum hrtimer_restart (*function)(struct hrtimer *),
623 clockid_t which_clock, enum hrtimer_mode mode);
624
625static inline
626int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
627 const enum hrtimer_mode mode)
628{
629 return hrtimer_start(&ttimer->timer, time, mode);
630}
631
632static inline
633void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
634{
635 hrtimer_cancel(&ttimer->timer);
636 tasklet_kill(&ttimer->tasklet);
637}
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639/*
640 * Autoprobing for irqs:
641 *
642 * probe_irq_on() and probe_irq_off() provide robust primitives
643 * for accurate IRQ probing during kernel initialization. They are
644 * reasonably simple to use, are not "fooled" by spurious interrupts,
645 * and, unlike other attempts at IRQ probing, they do not get hung on
646 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
647 *
648 * For reasonably foolproof probing, use them as follows:
649 *
650 * 1. clear and/or mask the device's internal interrupt.
651 * 2. sti();
652 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
653 * 4. enable the device and cause it to trigger an interrupt.
654 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
655 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
656 * 7. service the device to clear its pending interrupt.
657 * 8. loop again if paranoia is required.
658 *
659 * probe_irq_on() returns a mask of allocated irq's.
660 *
661 * probe_irq_off() takes the mask as a parameter,
662 * and returns the irq number which occurred,
663 * or zero if none occurred, or a negative irq number
664 * if more than one irq occurred.
665 */
666
667#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
668static inline unsigned long probe_irq_on(void)
669{
670 return 0;
671}
672static inline int probe_irq_off(unsigned long val)
673{
674 return 0;
675}
676static inline unsigned int probe_irq_mask(unsigned long val)
677{
678 return 0;
679}
680#else
681extern unsigned long probe_irq_on(void); /* returns 0 on failure */
682extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
683extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
684#endif
685
Andrew Morton6168a702007-02-17 21:22:39 -0800686#ifdef CONFIG_PROC_FS
687/* Initialize /proc/irq/ */
688extern void init_irq_proc(void);
689#else
690static inline void init_irq_proc(void)
691{
692}
693#endif
694
Alexey Dobriyand43c36d2009-10-07 17:09:06 +0400695struct seq_file;
Adrian Bunkf74596d2008-02-06 01:36:35 -0800696int show_interrupts(struct seq_file *p, void *v);
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100697int arch_show_interrupts(struct seq_file *p, int prec);
Adrian Bunkf74596d2008-02-06 01:36:35 -0800698
Yinghai Lu43a25632008-12-28 16:01:13 -0800699extern int early_irq_init(void);
Yinghai Lu4a046d12009-01-12 17:39:24 -0800700extern int arch_probe_nr_irqs(void);
Yinghai Lu43a25632008-12-28 16:01:13 -0800701extern int arch_early_irq_init(void);
Yinghai Lu43a25632008-12-28 16:01:13 -0800702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703#endif