blob: 1cdfd09c8abccb401f82e005a4a2359966869f1d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* interrupt.h */
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
Jan Beulich908dcec2006-06-23 02:06:00 -070010#include <linux/irqreturn.h>
Thomas Gleixnerdd3a1db2008-10-16 18:20:58 +020011#include <linux/irqnr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/hardirq.h>
Ingo Molnarde30a2b2006-07-03 00:24:42 -070013#include <linux/irqflags.h>
David S. Miller54514a72008-09-23 22:15:57 -070014#include <linux/smp.h>
15#include <linux/percpu.h>
Peter Zijlstra9ba5f002009-07-22 14:18:35 +020016#include <linux/hrtimer.h>
Ben Hutchingscd7eab42011-01-19 21:01:44 +000017#include <linux/kref.h>
18#include <linux/workqueue.h>
Ingo Molnar0ebb26e2008-12-12 11:26:39 +010019
Arun Sharma60063492011-07-26 16:09:06 -070020#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/ptrace.h>
22#include <asm/system.h>
Lai Jiangshan2bf21602010-08-23 18:42:48 +090023#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Thomas Gleixner6e213612006-07-01 19:29:03 -070025/*
26 * These correspond to the IORESOURCE_IRQ_* defines in
27 * linux/ioport.h to select the interrupt line behaviour. When
28 * requesting an interrupt without specifying a IRQF_TRIGGER, the
29 * setting should be assumed to be "as already configured", which
30 * may be as per machine or firmware initialisation.
31 */
32#define IRQF_TRIGGER_NONE 0x00000000
33#define IRQF_TRIGGER_RISING 0x00000001
34#define IRQF_TRIGGER_FALLING 0x00000002
35#define IRQF_TRIGGER_HIGH 0x00000004
36#define IRQF_TRIGGER_LOW 0x00000008
37#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
38 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
39#define IRQF_TRIGGER_PROBE 0x00000010
40
41/*
42 * These flags used only by the kernel as part of the
43 * irq handling routines.
44 *
Thomas Gleixner6932bf32010-03-26 00:06:55 +000045 * IRQF_DISABLED - keep irqs disabled when calling the action handler.
46 * DEPRECATED. This flag is a NOOP and scheduled to be removed
Thomas Gleixner6e213612006-07-01 19:29:03 -070047 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
48 * IRQF_SHARED - allow sharing the irq among several devices
49 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
50 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
Thomas Gleixner950f4422007-02-16 01:27:24 -080051 * IRQF_PERCPU - Interrupt is per cpu
52 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
Bernhard Walled85a60d2007-05-08 00:35:24 -070053 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
54 * registered first in an shared interrupt is considered for
55 * performance reasons)
Thomas Gleixnerb25c3402009-08-13 12:17:22 +020056 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
57 * Used by threaded interrupts which need to keep the
58 * irq line disabled until the threaded handler has been run.
Ian Campbell685fd0b2010-07-29 11:16:32 +010059 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +010060 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000061 * IRQF_NO_THREAD - Interrupt cannot be threaded
Thomas Gleixner6e213612006-07-01 19:29:03 -070062 */
63#define IRQF_DISABLED 0x00000020
64#define IRQF_SAMPLE_RANDOM 0x00000040
65#define IRQF_SHARED 0x00000080
66#define IRQF_PROBE_SHARED 0x00000100
Ian Campbell685fd0b2010-07-29 11:16:32 +010067#define __IRQF_TIMER 0x00000200
Thomas Gleixner284c6682006-07-03 02:20:32 +020068#define IRQF_PERCPU 0x00000400
Thomas Gleixner950f4422007-02-16 01:27:24 -080069#define IRQF_NOBALANCING 0x00000800
Bernhard Walled85a60d2007-05-08 00:35:24 -070070#define IRQF_IRQPOLL 0x00001000
Thomas Gleixnerb25c3402009-08-13 12:17:22 +020071#define IRQF_ONESHOT 0x00002000
Ian Campbell685fd0b2010-07-29 11:16:32 +010072#define IRQF_NO_SUSPEND 0x00004000
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +010073#define IRQF_FORCE_RESUME 0x00008000
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000074#define IRQF_NO_THREAD 0x00010000
Ian Campbell685fd0b2010-07-29 11:16:32 +010075
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000076#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010077
Randy Dunlapb4e6b092010-05-21 09:03:01 -070078/*
Marc Zyngierae731f82010-03-15 22:56:33 +000079 * These values can be returned by request_any_context_irq() and
80 * describe the context the interrupt will be run in.
81 *
82 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
83 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
84 */
85enum {
86 IRQC_IS_HARDIRQ = 0,
87 IRQC_IS_NESTED,
88};
89
David Howells7d12e782006-10-05 14:55:46 +010090typedef irqreturn_t (*irq_handler_t)(int, void *);
David Howellsda482792006-10-05 13:06:34 +010091
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +010092/**
93 * struct irqaction - per interrupt action descriptor
94 * @handler: interrupt handler function
95 * @flags: flags (see IRQF_* above)
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +010096 * @name: name of the device
97 * @dev_id: cookie to identify the device
Marc Zyngier31d9d9b2011-09-23 17:03:06 +010098 * @percpu_dev_id: cookie to identify the device
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +010099 * @next: pointer to the next irqaction for shared interrupts
100 * @irq: interrupt number
101 * @dir: pointer to the proc/irq/NN/name entry
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300102 * @thread_fn: interrupt handler function for threaded interrupts
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100103 * @thread: thread pointer for threaded interrupts
104 * @thread_flags: flags related to @thread
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000105 * @thread_mask: bitmask for keeping track of @thread activity
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +0100106 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107struct irqaction {
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100108 irq_handler_t handler;
109 unsigned long flags;
110 void *dev_id;
111 void __percpu *percpu_dev_id;
112 struct irqaction *next;
113 int irq;
114 irq_handler_t thread_fn;
115 struct task_struct *thread;
116 unsigned long thread_flags;
117 unsigned long thread_mask;
118 const char *name;
119 struct proc_dir_entry *dir;
Eric Dumazetf6cd2472010-11-04 11:13:48 +0100120} ____cacheline_internodealigned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
David Howells7d12e782006-10-05 14:55:46 +0100122extern irqreturn_t no_action(int cpl, void *dev_id);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100123
Thomas Gleixner3a381482009-03-24 20:27:39 +0100124#ifdef CONFIG_GENERIC_HARDIRQS
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100125extern int __must_check
126request_threaded_irq(unsigned int irq, irq_handler_t handler,
127 irq_handler_t thread_fn,
128 unsigned long flags, const char *name, void *dev);
129
130static inline int __must_check
131request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
132 const char *name, void *dev)
133{
134 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
135}
136
Marc Zyngierae731f82010-03-15 22:56:33 +0000137extern int __must_check
138request_any_context_irq(unsigned int irq, irq_handler_t handler,
139 unsigned long flags, const char *name, void *dev_id);
140
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100141extern int __must_check
142request_percpu_irq(unsigned int irq, irq_handler_t handler,
143 const char *devname, void __percpu *percpu_dev_id);
144
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100145extern void exit_irq_thread(void);
146#else
Thomas Gleixner3a381482009-03-24 20:27:39 +0100147
148extern int __must_check
149request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
150 const char *name, void *dev);
151
Thomas Gleixnerde188362009-03-25 17:33:38 +0100152/*
153 * Special function to avoid ifdeffery in kernel/irq/devres.c which
154 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
155 * m68k). I really love these $@%#!* obvious Makefile references:
156 * ../../../kernel/irq/devres.o
157 */
158static inline int __must_check
159request_threaded_irq(unsigned int irq, irq_handler_t handler,
160 irq_handler_t thread_fn,
161 unsigned long flags, const char *name, void *dev)
162{
163 return request_irq(irq, handler, flags, name, dev);
164}
165
Marc Zyngierae731f82010-03-15 22:56:33 +0000166static inline int __must_check
167request_any_context_irq(unsigned int irq, irq_handler_t handler,
168 unsigned long flags, const char *name, void *dev_id)
169{
170 return request_irq(irq, handler, flags, name, dev_id);
171}
172
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100173static inline int __must_check
174request_percpu_irq(unsigned int irq, irq_handler_t handler,
175 const char *devname, void __percpu *percpu_dev_id)
176{
177 return request_irq(irq, handler, 0, devname, percpu_dev_id);
178}
179
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100180static inline void exit_irq_thread(void) { }
181#endif
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183extern void free_irq(unsigned int, void *);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100184extern void free_percpu_irq(unsigned int, void __percpu *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Al Viro0af36782007-07-27 14:24:33 +0100186struct device;
187
Arjan van de Ven935bd5b2009-03-23 18:28:16 +0100188extern int __must_check
189devm_request_threaded_irq(struct device *dev, unsigned int irq,
190 irq_handler_t handler, irq_handler_t thread_fn,
191 unsigned long irqflags, const char *devname,
192 void *dev_id);
193
194static inline int __must_check
195devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
196 unsigned long irqflags, const char *devname, void *dev_id)
197{
198 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
199 devname, dev_id);
200}
201
Tejun Heo9ac78492007-01-20 16:00:26 +0900202extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
203
Ingo Molnard7e96292006-07-03 00:24:27 -0700204/*
205 * On lockdep we dont want to enable hardirqs in hardirq
206 * context. Use local_irq_enable_in_hardirq() to annotate
207 * kernel code that has to do this nevertheless (pretty much
208 * the only valid case is for old/broken hardware that is
209 * insanely slow).
210 *
211 * NOTE: in theory this might break fragile code that relies
212 * on hardirq delivery - in practice we dont seem to have such
213 * places left. So the only effect should be slightly increased
214 * irqs-off latencies.
215 */
216#ifdef CONFIG_LOCKDEP
217# define local_irq_enable_in_hardirq() do { } while (0)
218#else
219# define local_irq_enable_in_hardirq() local_irq_enable()
220#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222extern void disable_irq_nosync(unsigned int irq);
223extern void disable_irq(unsigned int irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100224extern void disable_percpu_irq(unsigned int irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225extern void enable_irq(unsigned int irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100226extern void enable_percpu_irq(unsigned int irq);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700227
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100228/* The following three functions are for the core kernel use only. */
Heiko Carstens5818a6e2009-06-11 21:59:21 +0200229#ifdef CONFIG_GENERIC_HARDIRQS
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100230extern void suspend_device_irqs(void);
231extern void resume_device_irqs(void);
232#ifdef CONFIG_PM_SLEEP
233extern int check_wakeup_irqs(void);
234#else
235static inline int check_wakeup_irqs(void) { return 0; }
236#endif
Heiko Carstens5818a6e2009-06-11 21:59:21 +0200237#else
238static inline void suspend_device_irqs(void) { };
239static inline void resume_device_irqs(void) { };
240static inline int check_wakeup_irqs(void) { return 0; }
241#endif
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100242
Russell Kingd7b90682008-04-17 07:46:24 +0200243#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
244
Rusty Russelld036e672009-01-01 10:12:26 +1030245extern cpumask_var_t irq_default_affinity;
Max Krasnyansky18404752008-05-29 11:02:52 -0700246
Rusty Russell0de26522008-12-13 21:20:26 +1030247extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
Russell Kingd7b90682008-04-17 07:46:24 +0200248extern int irq_can_set_affinity(unsigned int irq);
Max Krasnyansky18404752008-05-29 11:02:52 -0700249extern int irq_select_affinity(unsigned int irq);
Russell Kingd7b90682008-04-17 07:46:24 +0200250
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700251extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000252
253/**
254 * struct irq_affinity_notify - context for notification of IRQ affinity changes
255 * @irq: Interrupt to which notification applies
256 * @kref: Reference count, for internal use
257 * @work: Work item, for internal use
258 * @notify: Function to be called on change. This will be
259 * called in process context.
260 * @release: Function to be called on release. This will be
261 * called in process context. Once registered, the
262 * structure must only be freed when this function is
263 * called or later.
264 */
265struct irq_affinity_notify {
266 unsigned int irq;
267 struct kref kref;
268 struct work_struct work;
269 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
270 void (*release)(struct kref *ref);
271};
272
273extern int
274irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
275
276static inline void irq_run_affinity_notifiers(void)
277{
278 flush_scheduled_work();
279}
280
Russell Kingd7b90682008-04-17 07:46:24 +0200281#else /* CONFIG_SMP */
282
Rusty Russell0de26522008-12-13 21:20:26 +1030283static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
Russell Kingd7b90682008-04-17 07:46:24 +0200284{
285 return -EINVAL;
286}
287
288static inline int irq_can_set_affinity(unsigned int irq)
289{
290 return 0;
291}
292
Max Krasnyansky18404752008-05-29 11:02:52 -0700293static inline int irq_select_affinity(unsigned int irq) { return 0; }
294
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700295static inline int irq_set_affinity_hint(unsigned int irq,
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000296 const struct cpumask *m)
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700297{
298 return -EINVAL;
299}
Russell Kingd7b90682008-04-17 07:46:24 +0200300#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
301
Al Viroe9ed7e72007-07-21 23:29:12 +0100302#ifdef CONFIG_GENERIC_HARDIRQS
Ingo Molnarc01d4032006-07-03 00:24:27 -0700303/*
304 * Special lockdep variants of irq disabling/enabling.
305 * These should be used for locking constructs that
306 * know that a particular irq context which is disabled,
307 * and which is the only irq-context user of a lock,
308 * that it's safe to take the lock in the irq-disabled
309 * section without disabling hardirqs.
310 *
311 * On !CONFIG_LOCKDEP they are equivalent to the normal
312 * irq disable/enable methods.
313 */
314static inline void disable_irq_nosync_lockdep(unsigned int irq)
315{
316 disable_irq_nosync(irq);
317#ifdef CONFIG_LOCKDEP
318 local_irq_disable();
319#endif
320}
321
Arjan van de Vene8106b92006-09-29 02:01:08 -0700322static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
323{
324 disable_irq_nosync(irq);
325#ifdef CONFIG_LOCKDEP
326 local_irq_save(*flags);
327#endif
328}
329
Ingo Molnarc01d4032006-07-03 00:24:27 -0700330static inline void disable_irq_lockdep(unsigned int irq)
331{
332 disable_irq(irq);
333#ifdef CONFIG_LOCKDEP
334 local_irq_disable();
335#endif
336}
337
338static inline void enable_irq_lockdep(unsigned int irq)
339{
340#ifdef CONFIG_LOCKDEP
341 local_irq_enable();
342#endif
343 enable_irq(irq);
344}
345
Arjan van de Vene8106b92006-09-29 02:01:08 -0700346static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
347{
348#ifdef CONFIG_LOCKDEP
349 local_irq_restore(*flags);
350#endif
351 enable_irq(irq);
352}
353
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700354/* IRQ wakeup (PM) control: */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100355extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
356
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700357static inline int enable_irq_wake(unsigned int irq)
358{
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100359 return irq_set_irq_wake(irq, 1);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700360}
361
362static inline int disable_irq_wake(unsigned int irq)
363{
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100364 return irq_set_irq_wake(irq, 0);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700365}
366
Ingo Molnarc01d4032006-07-03 00:24:27 -0700367#else /* !CONFIG_GENERIC_HARDIRQS */
368/*
369 * NOTE: non-genirq architectures, if they want to support the lock
370 * validator need to define the methods below in their asm/irq.h
371 * files, under an #ifdef CONFIG_LOCKDEP section.
372 */
Roman Zippelb3e2fd92007-05-01 22:32:42 +0200373#ifndef CONFIG_LOCKDEP
Ingo Molnarc01d4032006-07-03 00:24:27 -0700374# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
Roman Zippelb3e2fd92007-05-01 22:32:42 +0200375# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
376 disable_irq_nosync(irq)
Ingo Molnarc01d4032006-07-03 00:24:27 -0700377# define disable_irq_lockdep(irq) disable_irq(irq)
378# define enable_irq_lockdep(irq) enable_irq(irq)
Roman Zippelb3e2fd92007-05-01 22:32:42 +0200379# define enable_irq_lockdep_irqrestore(irq, flags) \
380 enable_irq(irq)
Ingo Molnarc01d4032006-07-03 00:24:27 -0700381# endif
382
Guennadi Liakhovetskiaa5346a2007-10-16 01:24:01 -0700383static inline int enable_irq_wake(unsigned int irq)
384{
385 return 0;
386}
387
388static inline int disable_irq_wake(unsigned int irq)
389{
390 return 0;
391}
Ingo Molnarc01d4032006-07-03 00:24:27 -0700392#endif /* CONFIG_GENERIC_HARDIRQS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000394
395#ifdef CONFIG_IRQ_FORCED_THREADING
396extern bool force_irqthreads;
397#else
398#define force_irqthreads (0)
399#endif
400
Andi Kleen3f744782005-09-12 18:49:24 +0200401#ifndef __ARCH_SET_SOFTIRQ_PENDING
402#define set_softirq_pending(x) (local_softirq_pending() = (x))
403#define or_softirq_pending(x) (local_softirq_pending() |= (x))
404#endif
405
Benjamin Herrenschmidt2d3fbbb2007-05-10 22:22:46 -0700406/* Some architectures might implement lazy enabling/disabling of
407 * interrupts. In some cases, such as stop_machine, we might want
408 * to ensure that after a local_irq_disable(), interrupts have
409 * really been disabled in hardware. Such architectures need to
410 * implement the following hook.
411 */
412#ifndef hard_irq_disable
413#define hard_irq_disable() do { } while(0)
414#endif
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
417 frequency threaded job scheduling. For almost all the purposes
418 tasklets are more than enough. F.e. all serial device BHs et
419 al. should be converted to tasklets, not to softirqs.
420 */
421
422enum
423{
424 HI_SOFTIRQ=0,
425 TIMER_SOFTIRQ,
426 NET_TX_SOFTIRQ,
427 NET_RX_SOFTIRQ,
Jens Axboeff856ba2006-01-09 16:02:34 +0100428 BLOCK_SOFTIRQ,
Jens Axboe5e605b62009-08-05 09:07:21 +0200429 BLOCK_IOPOLL_SOFTIRQ,
Christoph Lameterc9819f42006-12-10 02:20:25 -0800430 TASKLET_SOFTIRQ,
431 SCHED_SOFTIRQ,
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100432 HRTIMER_SOFTIRQ,
Shaohua Li09223372011-06-14 13:26:25 +0800433 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
Alexey Dobriyan978b0112008-09-06 20:04:36 +0200434
435 NR_SOFTIRQS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436};
437
Jason Baron5d592b42009-03-12 14:33:36 -0400438/* map softirq index to softirq name. update 'softirq_to_name' in
439 * kernel/softirq.c when adding a new softirq.
440 */
441extern char *softirq_to_name[NR_SOFTIRQS];
442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443/* softirq mask and active fields moved to irq_cpustat_t in
444 * asm/hardirq.h to get better cache usage. KAO
445 */
446
447struct softirq_action
448{
449 void (*action)(struct softirq_action *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450};
451
452asmlinkage void do_softirq(void);
Adrian Bunkeb0f1c42008-04-29 00:59:12 -0700453asmlinkage void __do_softirq(void);
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300454extern void open_softirq(int nr, void (*action)(struct softirq_action *));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455extern void softirq_init(void);
Lai Jiangshan2bf21602010-08-23 18:42:48 +0900456static inline void __raise_softirq_irqoff(unsigned int nr)
457{
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200458 trace_softirq_raise(nr);
Lai Jiangshan2bf21602010-08-23 18:42:48 +0900459 or_softirq_pending(1UL << nr);
460}
461
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800462extern void raise_softirq_irqoff(unsigned int nr);
463extern void raise_softirq(unsigned int nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
David S. Miller54514a72008-09-23 22:15:57 -0700465/* This is the worklist that queues up per-cpu softirq work.
466 *
467 * send_remote_sendirq() adds work to these lists, and
468 * the softirq handler itself dequeues from them. The queues
469 * are protected by disabling local cpu interrupts and they must
470 * only be accessed by the local cpu that they are for.
471 */
472DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
473
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -0800474DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
475
476static inline struct task_struct *this_cpu_ksoftirqd(void)
477{
478 return this_cpu_read(ksoftirqd);
479}
480
David S. Miller54514a72008-09-23 22:15:57 -0700481/* Try to send a softirq to a remote cpu. If this cannot be done, the
482 * work will be queued to the local cpu.
483 */
484extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
485
486/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
487 * and compute the current cpu, passed in as 'this_cpu'.
488 */
489extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
490 int this_cpu, int softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492/* Tasklets --- multithreaded analogue of BHs.
493
494 Main feature differing them of generic softirqs: tasklet
495 is running only on one CPU simultaneously.
496
497 Main feature differing them of BHs: different tasklets
498 may be run simultaneously on different CPUs.
499
500 Properties:
501 * If tasklet_schedule() is called, then tasklet is guaranteed
502 to be executed on some cpu at least once after this.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300503 * If the tasklet is already scheduled, but its execution is still not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 started, it will be executed only once.
505 * If this tasklet is already running on another CPU (or schedule is called
506 from tasklet itself), it is rescheduled for later.
507 * Tasklet is strictly serialized wrt itself, but not
508 wrt another tasklets. If client needs some intertask synchronization,
509 he makes it with spinlocks.
510 */
511
512struct tasklet_struct
513{
514 struct tasklet_struct *next;
515 unsigned long state;
516 atomic_t count;
517 void (*func)(unsigned long);
518 unsigned long data;
519};
520
521#define DECLARE_TASKLET(name, func, data) \
522struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
523
524#define DECLARE_TASKLET_DISABLED(name, func, data) \
525struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
526
527
528enum
529{
530 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
531 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
532};
533
534#ifdef CONFIG_SMP
535static inline int tasklet_trylock(struct tasklet_struct *t)
536{
537 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
538}
539
540static inline void tasklet_unlock(struct tasklet_struct *t)
541{
542 smp_mb__before_clear_bit();
543 clear_bit(TASKLET_STATE_RUN, &(t)->state);
544}
545
546static inline void tasklet_unlock_wait(struct tasklet_struct *t)
547{
548 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
549}
550#else
551#define tasklet_trylock(t) 1
552#define tasklet_unlock_wait(t) do { } while (0)
553#define tasklet_unlock(t) do { } while (0)
554#endif
555
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800556extern void __tasklet_schedule(struct tasklet_struct *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
558static inline void tasklet_schedule(struct tasklet_struct *t)
559{
560 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
561 __tasklet_schedule(t);
562}
563
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800564extern void __tasklet_hi_schedule(struct tasklet_struct *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566static inline void tasklet_hi_schedule(struct tasklet_struct *t)
567{
568 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
569 __tasklet_hi_schedule(t);
570}
571
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200572extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
573
574/*
575 * This version avoids touching any other tasklets. Needed for kmemcheck
576 * in order not to take any page faults while enqueueing this tasklet;
577 * consider VERY carefully whether you really need this or
578 * tasklet_hi_schedule()...
579 */
580static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
581{
582 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
583 __tasklet_hi_schedule_first(t);
584}
585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587static inline void tasklet_disable_nosync(struct tasklet_struct *t)
588{
589 atomic_inc(&t->count);
590 smp_mb__after_atomic_inc();
591}
592
593static inline void tasklet_disable(struct tasklet_struct *t)
594{
595 tasklet_disable_nosync(t);
596 tasklet_unlock_wait(t);
597 smp_mb();
598}
599
600static inline void tasklet_enable(struct tasklet_struct *t)
601{
602 smp_mb__before_atomic_dec();
603 atomic_dec(&t->count);
604}
605
606static inline void tasklet_hi_enable(struct tasklet_struct *t)
607{
608 smp_mb__before_atomic_dec();
609 atomic_dec(&t->count);
610}
611
612extern void tasklet_kill(struct tasklet_struct *t);
613extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
614extern void tasklet_init(struct tasklet_struct *t,
615 void (*func)(unsigned long), unsigned long data);
616
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200617struct tasklet_hrtimer {
618 struct hrtimer timer;
619 struct tasklet_struct tasklet;
620 enum hrtimer_restart (*function)(struct hrtimer *);
621};
622
623extern void
624tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
625 enum hrtimer_restart (*function)(struct hrtimer *),
626 clockid_t which_clock, enum hrtimer_mode mode);
627
628static inline
629int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
630 const enum hrtimer_mode mode)
631{
632 return hrtimer_start(&ttimer->timer, time, mode);
633}
634
635static inline
636void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
637{
638 hrtimer_cancel(&ttimer->timer);
639 tasklet_kill(&ttimer->tasklet);
640}
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642/*
643 * Autoprobing for irqs:
644 *
645 * probe_irq_on() and probe_irq_off() provide robust primitives
646 * for accurate IRQ probing during kernel initialization. They are
647 * reasonably simple to use, are not "fooled" by spurious interrupts,
648 * and, unlike other attempts at IRQ probing, they do not get hung on
649 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
650 *
651 * For reasonably foolproof probing, use them as follows:
652 *
653 * 1. clear and/or mask the device's internal interrupt.
654 * 2. sti();
655 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
656 * 4. enable the device and cause it to trigger an interrupt.
657 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
658 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
659 * 7. service the device to clear its pending interrupt.
660 * 8. loop again if paranoia is required.
661 *
662 * probe_irq_on() returns a mask of allocated irq's.
663 *
664 * probe_irq_off() takes the mask as a parameter,
665 * and returns the irq number which occurred,
666 * or zero if none occurred, or a negative irq number
667 * if more than one irq occurred.
668 */
669
670#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
671static inline unsigned long probe_irq_on(void)
672{
673 return 0;
674}
675static inline int probe_irq_off(unsigned long val)
676{
677 return 0;
678}
679static inline unsigned int probe_irq_mask(unsigned long val)
680{
681 return 0;
682}
683#else
684extern unsigned long probe_irq_on(void); /* returns 0 on failure */
685extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
686extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
687#endif
688
Andrew Morton6168a702007-02-17 21:22:39 -0800689#ifdef CONFIG_PROC_FS
690/* Initialize /proc/irq/ */
691extern void init_irq_proc(void);
692#else
693static inline void init_irq_proc(void)
694{
695}
696#endif
697
Alexey Dobriyand43c36d2009-10-07 17:09:06 +0400698struct seq_file;
Adrian Bunkf74596d2008-02-06 01:36:35 -0800699int show_interrupts(struct seq_file *p, void *v);
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100700int arch_show_interrupts(struct seq_file *p, int prec);
Adrian Bunkf74596d2008-02-06 01:36:35 -0800701
Yinghai Lu43a25632008-12-28 16:01:13 -0800702extern int early_irq_init(void);
Yinghai Lu4a046d12009-01-12 17:39:24 -0800703extern int arch_probe_nr_irqs(void);
Yinghai Lu43a25632008-12-28 16:01:13 -0800704extern int arch_early_irq_init(void);
Yinghai Lu43a25632008-12-28 16:01:13 -0800705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706#endif