blob: b40771dd114a777841db9be3b1ebc5ecd6119c32 [file] [log] [blame]
Ingo Molnar06fcb0c2006-06-29 02:24:40 -07001#ifndef _LINUX_IRQ_H
2#define _LINUX_IRQ_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
4/*
5 * Please do not include this file in generic code. There is currently
6 * no requirement for any architecture to implement anything held
7 * within this file.
8 *
9 * Thanks. --rmk
10 */
11
Adrian Bunk23f9b312005-12-21 02:27:50 +010012#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Ingo Molnar06fcb0c2006-06-29 02:24:40 -070014#ifndef CONFIG_S390
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16#include <linux/linkage.h>
17#include <linux/cache.h>
18#include <linux/spinlock.h>
19#include <linux/cpumask.h>
Jan Beulich908dcec2006-06-23 02:06:00 -070020#include <linux/irqreturn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include <asm/irq.h>
23#include <asm/ptrace.h>
24
25/*
26 * IRQ line status.
27 */
28#define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */
29#define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */
30#define IRQ_PENDING 4 /* IRQ pending - replay on enable */
31#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
32#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
33#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
34#define IRQ_LEVEL 64 /* IRQ level triggered */
35#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
Ingo Molnar0d7012a2006-06-29 02:24:43 -070036#ifdef CONFIG_IRQ_PER_CPU
Karsten Wiesef26fdd52005-09-06 15:17:25 -070037# define IRQ_PER_CPU 256 /* IRQ is per CPU */
38# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
39#else
40# define CHECK_IRQ_PER_CPU(var) 0
41#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Thomas Gleixner3418d722006-06-29 02:24:49 -070043#define IRQ_NOPROBE 512 /* IRQ is not valid for probing */
Thomas Gleixner6550c772006-06-29 02:24:49 -070044#define IRQ_NOREQUEST 1024 /* IRQ cannot be requested */
Thomas Gleixner94d39e12006-06-29 02:24:50 -070045#define IRQ_NOAUTOEN 2048 /* IRQ will not be enabled on request irq */
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070046#define IRQ_DELAYED_DISABLE \
47 4096 /* IRQ disable (masking) happens delayed. */
48
49/*
50 * IRQ types, see also include/linux/interrupt.h
51 */
52#define IRQ_TYPE_NONE 0x0000 /* Default, unspecified type */
53#define IRQ_TYPE_EDGE_RISING 0x0001 /* Edge rising type */
54#define IRQ_TYPE_EDGE_FALLING 0x0002 /* Edge falling type */
55#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)
56#define IRQ_TYPE_LEVEL_HIGH 0x0004 /* Level high type */
57#define IRQ_TYPE_LEVEL_LOW 0x0008 /* Level low type */
58#define IRQ_TYPE_SIMPLE 0x0010 /* Simple type */
59#define IRQ_TYPE_PERCPU 0x0020 /* Per CPU type */
60#define IRQ_TYPE_PROBE 0x0040 /* Probing in progress */
61
62struct proc_dir_entry;
63
Ingo Molnar8fee5c32006-06-29 02:24:45 -070064/**
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070065 * struct irq_chip - hardware interrupt chip descriptor
Ingo Molnar8fee5c32006-06-29 02:24:45 -070066 *
67 * @name: name for /proc/interrupts
68 * @startup: start up the interrupt (defaults to ->enable if NULL)
69 * @shutdown: shut down the interrupt (defaults to ->disable if NULL)
70 * @enable: enable the interrupt (defaults to chip->unmask if NULL)
71 * @disable: disable the interrupt (defaults to chip->mask if NULL)
Ingo Molnar8fee5c32006-06-29 02:24:45 -070072 * @ack: start of a new interrupt
73 * @mask: mask an interrupt source
74 * @mask_ack: ack and mask an interrupt source
75 * @unmask: unmask an interrupt source
Ingo Molnar8fee5c32006-06-29 02:24:45 -070076 * @end: end of interrupt
77 * @set_affinity: set the CPU affinity on SMP machines
78 * @retrigger: resend an IRQ to the CPU
79 * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
80 * @set_wake: enable/disable power-management wake-on of an IRQ
81 *
82 * @release: release function solely used by UML
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070083 * @typename: obsoleted by name, kept as migration helper
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 */
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070085struct irq_chip {
86 const char *name;
Ingo Molnar71d218b2006-06-29 02:24:41 -070087 unsigned int (*startup)(unsigned int irq);
88 void (*shutdown)(unsigned int irq);
89 void (*enable)(unsigned int irq);
90 void (*disable)(unsigned int irq);
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070091
Ingo Molnar71d218b2006-06-29 02:24:41 -070092 void (*ack)(unsigned int irq);
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070093 void (*mask)(unsigned int irq);
94 void (*mask_ack)(unsigned int irq);
95 void (*unmask)(unsigned int irq);
96
Ingo Molnar71d218b2006-06-29 02:24:41 -070097 void (*end)(unsigned int irq);
98 void (*set_affinity)(unsigned int irq, cpumask_t dest);
Ingo Molnarc0ad90a2006-06-29 02:24:44 -070099 int (*retrigger)(unsigned int irq);
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700100 int (*set_type)(unsigned int irq, unsigned int flow_type);
101 int (*set_wake)(unsigned int irq, unsigned int on);
Ingo Molnarc0ad90a2006-06-29 02:24:44 -0700102
Paolo 'Blaisorblade' Giarrussob77d6ad2005-06-21 17:16:24 -0700103 /* Currently used only by UML, might disappear one day.*/
104#ifdef CONFIG_IRQ_RELEASE_METHOD
Ingo Molnar71d218b2006-06-29 02:24:41 -0700105 void (*release)(unsigned int irq, void *dev_id);
Paolo 'Blaisorblade' Giarrussob77d6ad2005-06-21 17:16:24 -0700106#endif
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700107 /*
108 * For compatibility, ->typename is copied into ->name.
109 * Will disappear.
110 */
111 const char *typename;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112};
113
Ingo Molnar8fee5c32006-06-29 02:24:45 -0700114/**
115 * struct irq_desc - interrupt descriptor
116 *
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700117 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
118 * @chip: low level interrupt hardware access
119 * @handler_data: per-IRQ data for the irq_chip methods
120 * @chip_data: platform-specific per-chip private data for the chip
121 * methods, to allow shared chip implementations
Ingo Molnar8fee5c32006-06-29 02:24:45 -0700122 * @action: the irq action chain
123 * @status: status information
124 * @depth: disable-depth, for nested irq_disable() calls
125 * @irq_count: stats field to detect stalled irqs
126 * @irqs_unhandled: stats field for spurious unhandled interrupts
127 * @lock: locking for SMP
128 * @affinity: IRQ affinity on SMP
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700129 * @cpu: cpu index useful for balancing
Ingo Molnar8fee5c32006-06-29 02:24:45 -0700130 * @pending_mask: pending rebalanced interrupts
131 * @move_irq: need to re-target IRQ destination
132 * @dir: /proc/irq/ procfs entry
133 * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 *
135 * Pad this out to 32 bytes for cache and indexing reasons.
136 */
Ingo Molnar34ffdb72006-06-29 02:24:40 -0700137struct irq_desc {
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700138 void fastcall (*handle_irq)(unsigned int irq,
139 struct irq_desc *desc,
140 struct pt_regs *regs);
141 struct irq_chip *chip;
142 void *handler_data;
Ingo Molnar71d218b2006-06-29 02:24:41 -0700143 void *chip_data;
144 struct irqaction *action; /* IRQ action list */
145 unsigned int status; /* IRQ status */
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700146
Ingo Molnar71d218b2006-06-29 02:24:41 -0700147 unsigned int depth; /* nested irq disables */
148 unsigned int irq_count; /* For detecting broken IRQs */
149 unsigned int irqs_unhandled;
150 spinlock_t lock;
Ingo Molnara53da522006-06-29 02:24:38 -0700151#ifdef CONFIG_SMP
Ingo Molnar71d218b2006-06-29 02:24:41 -0700152 cpumask_t affinity;
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700153 unsigned int cpu;
Ingo Molnara53da522006-06-29 02:24:38 -0700154#endif
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700155#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
Ingo Molnarcd916d32006-06-29 02:24:42 -0700156 cpumask_t pending_mask;
Ingo Molnar71d218b2006-06-29 02:24:41 -0700157 unsigned int move_irq; /* need to re-target IRQ dest */
Ashok Raj54d5d422005-09-06 15:16:15 -0700158#endif
Ingo Molnar4a733ee2006-06-29 02:24:42 -0700159#ifdef CONFIG_PROC_FS
160 struct proc_dir_entry *dir;
161#endif
Ingo Molnar34ffdb72006-06-29 02:24:40 -0700162} ____cacheline_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Ingo Molnar34ffdb72006-06-29 02:24:40 -0700164extern struct irq_desc irq_desc[NR_IRQS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Ingo Molnar34ffdb72006-06-29 02:24:40 -0700166/*
167 * Migration helpers for obsolete names, they will go away:
168 */
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700169#define hw_interrupt_type irq_chip
170typedef struct irq_chip hw_irq_controller;
171#define no_irq_type no_irq_chip
Ingo Molnar34ffdb72006-06-29 02:24:40 -0700172typedef struct irq_desc irq_desc_t;
173
174/*
175 * Pick up the arch-dependent methods:
176 */
177#include <asm/hw_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700179extern int setup_irq(unsigned int irq, struct irqaction *new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
181#ifdef CONFIG_GENERIC_HARDIRQS
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700182
Ashok Raj54d5d422005-09-06 15:16:15 -0700183#ifdef CONFIG_SMP
184static inline void set_native_irq_info(int irq, cpumask_t mask)
185{
Ingo Molnara53da522006-06-29 02:24:38 -0700186 irq_desc[irq].affinity = mask;
Ashok Raj54d5d422005-09-06 15:16:15 -0700187}
188#else
189static inline void set_native_irq_info(int irq, cpumask_t mask)
190{
191}
192#endif
193
194#ifdef CONFIG_SMP
195
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700196#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
Ashok Raj54d5d422005-09-06 15:16:15 -0700197
Andrew Mortonc777ac52006-03-25 03:07:36 -0800198void set_pending_irq(unsigned int irq, cpumask_t mask);
199void move_native_irq(int irq);
Ashok Raj54d5d422005-09-06 15:16:15 -0700200
201#ifdef CONFIG_PCI_MSI
202/*
203 * Wonder why these are dummies?
204 * For e.g the set_ioapic_affinity_vector() calls the set_ioapic_affinity_irq()
205 * counter part after translating the vector to irq info. We need to perform
206 * this operation on the real irq, when we dont use vector, i.e when
207 * pci_use_vector() is false.
208 */
209static inline void move_irq(int irq)
210{
211}
212
213static inline void set_irq_info(int irq, cpumask_t mask)
214{
215}
216
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700217#else /* CONFIG_PCI_MSI */
Ashok Raj54d5d422005-09-06 15:16:15 -0700218
219static inline void move_irq(int irq)
220{
221 move_native_irq(irq);
222}
223
224static inline void set_irq_info(int irq, cpumask_t mask)
225{
226 set_native_irq_info(irq, mask);
227}
Ashok Raj54d5d422005-09-06 15:16:15 -0700228
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700229#endif /* CONFIG_PCI_MSI */
Ashok Raj54d5d422005-09-06 15:16:15 -0700230
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700231#else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */
232
233static inline void move_irq(int irq)
234{
235}
236
237static inline void move_native_irq(int irq)
238{
239}
240
241static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
242{
243}
244
Ashok Raj54d5d422005-09-06 15:16:15 -0700245static inline void set_irq_info(int irq, cpumask_t mask)
246{
247 set_native_irq_info(irq, mask);
248}
249
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700250#endif /* CONFIG_GENERIC_PENDING_IRQ */
Ashok Raj54d5d422005-09-06 15:16:15 -0700251
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700252#else /* CONFIG_SMP */
Ashok Raj54d5d422005-09-06 15:16:15 -0700253
254#define move_irq(x)
255#define move_native_irq(x)
256
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700257#endif /* CONFIG_SMP */
Ashok Raj54d5d422005-09-06 15:16:15 -0700258
Zhang Yanmin1b61b912006-06-23 02:04:22 -0700259#ifdef CONFIG_IRQBALANCE
260extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask);
261#else
262static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
263{
264}
265#endif
266
Ingo Molnar71d218b2006-06-29 02:24:41 -0700267#ifdef CONFIG_AUTO_IRQ_AFFINITY
268extern int select_smp_affinity(unsigned int irq);
269#else
270static inline int select_smp_affinity(unsigned int irq)
271{
272 return 1;
273}
274#endif
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276extern int no_irq_affinity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700278/* Handle irq action chains: */
279extern int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
280 struct irqaction *action);
281
Ingo Molnar2e60bbb2006-06-29 02:24:39 -0700282/*
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700283 * Built-in IRQ handlers for various IRQ types,
284 * callable via desc->chip->handle_irq()
285 */
286extern void fastcall
287handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs);
288extern void fastcall
289handle_fastack_irq(unsigned int irq, struct irq_desc *desc,
290 struct pt_regs *regs);
291extern void fastcall
292handle_edge_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs);
293extern void fastcall
294handle_simple_irq(unsigned int irq, struct irq_desc *desc,
295 struct pt_regs *regs);
296extern void fastcall
297handle_percpu_irq(unsigned int irq, struct irq_desc *desc,
298 struct pt_regs *regs);
299extern void fastcall
300handle_bad_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs);
301
302/*
303 * Get a descriptive string for the highlevel handler, for
304 * /proc/interrupts output:
305 */
306extern const char *
307handle_irq_name(void fastcall (*handle)(unsigned int, struct irq_desc *,
308 struct pt_regs *));
309
310/*
311 * Monolithic do_IRQ implementation.
312 * (is an explicit fastcall, because i386 4KSTACKS calls it from assembly)
Ingo Molnar2e60bbb2006-06-29 02:24:39 -0700313 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
Ingo Molnar2e60bbb2006-06-29 02:24:39 -0700315
Ingo Molnardae86202006-06-29 02:24:52 -0700316/*
317 * Architectures call this to let the generic IRQ layer
318 * handle an interrupt. If the descriptor is attached to an
319 * irqchip-style controller then we call the ->handle_irq() handler,
320 * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
321 */
322static inline void generic_handle_irq(unsigned int irq, struct pt_regs *regs)
323{
324 struct irq_desc *desc = irq_desc + irq;
325
326 if (likely(desc->handle_irq))
327 desc->handle_irq(irq, desc, regs);
328 else
329 __do_IRQ(irq, regs);
330}
331
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700332/* Handling of unhandled and spurious interrupts: */
Ingo Molnar34ffdb72006-06-29 02:24:40 -0700333extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
Ingo Molnar2e60bbb2006-06-29 02:24:39 -0700334 int action_ret, struct pt_regs *regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Thomas Gleixnera4633adc2006-06-29 02:24:48 -0700336/* Resending of interrupts :*/
337void check_irq_resend(struct irq_desc *desc, unsigned int irq);
338
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700339/* Initialize /proc/irq/ */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340extern void init_irq_proc(void);
Ivan Kokshayskyeee45262006-01-06 00:12:21 -0800341
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700342/* Enable/disable irq debugging output: */
343extern int noirqdebug_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -0700345/* Checks whether the interrupt can be requested by request_irq(): */
346extern int can_request_irq(unsigned int irq, unsigned long irqflags);
347
348/* Dummy irq-chip implementation: */
349extern struct irq_chip no_irq_chip;
350
351extern void
352set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
353 void fastcall (*handle)(unsigned int,
354 struct irq_desc *,
355 struct pt_regs *));
356extern void
357__set_irq_handler(unsigned int irq,
358 void fastcall (*handle)(unsigned int, struct irq_desc *,
359 struct pt_regs *),
360 int is_chained);
361
362/*
363 * Set a highlevel flow handler for a given IRQ:
364 */
365static inline void
366set_irq_handler(unsigned int irq,
367 void fastcall (*handle)(unsigned int, struct irq_desc *,
368 struct pt_regs *))
369{
370 __set_irq_handler(irq, handle, 0);
371}
372
373/*
374 * Set a highlevel chained flow handler for a given IRQ.
375 * (a chained handler is automatically enabled and set to
376 * IRQ_NOREQUEST and IRQ_NOPROBE)
377 */
378static inline void
379set_irq_chained_handler(unsigned int irq,
380 void fastcall (*handle)(unsigned int, struct irq_desc *,
381 struct pt_regs *))
382{
383 __set_irq_handler(irq, handle, 1);
384}
385
386#endif /* CONFIG_GENERIC_HARDIRQS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700388#endif /* !CONFIG_S390 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700390#endif /* _LINUX_IRQ_H */