blob: a58f0eecc68f91b5d5a0a1c2ce6d4617dc5ba42d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle
7 * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
8 */
9#ifndef _ASM_IRQ_H
10#define _ASM_IRQ_H
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/linkage.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010013
14#include <asm/mipsmtregs.h>
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <irq.h>
17
18#ifdef CONFIG_I8259
19static inline int irq_canonicalize(int irq)
20{
Atsushi Nemoto2fa79372007-01-14 23:41:42 +090021 return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070022}
23#else
24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
25#endif
26
Ralf Baechle1146fe32007-09-21 17:13:55 +010027#ifdef CONFIG_MIPS_MT_SMTC
28
29struct irqaction;
30
31extern unsigned long irq_hwmask[];
32extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
33 unsigned long hwmask);
34
35static inline void smtc_im_ack_irq(unsigned int irq)
36{
37 if (irq_hwmask[irq] & ST0_IM)
38 set_c0_status(irq_hwmask[irq] & ST0_IM);
39}
40
41#else
42
43static inline void smtc_im_ack_irq(unsigned int irq)
44{
45}
46
47#endif /* CONFIG_MIPS_MT_SMTC */
48
Kevin D. Kissellf571eff2007-08-03 19:38:03 +020049#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
50#include <linux/cpumask.h>
51
52extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity);
53extern void smtc_forward_irq(unsigned int irq);
54
55/*
56 * IRQ affinity hook invoked at the beginning of interrupt dispatch
57 * if option is enabled.
58 *
59 * Up through Linux 2.6.22 (at least) cpumask operations are very
60 * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
61 * used a "fast path" per-IRQ-descriptor cache of affinity information
62 * to reduce latency. As there is a project afoot to optimize the
63 * cpumask implementations, this version is optimistically assuming
64 * that cpumask.h macro overhead is reasonable during interrupt dispatch.
65 */
66#define IRQ_AFFINITY_HOOK(irq) \
67do { \
68 if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \
69 smtc_forward_irq(irq); \
70 irq_exit(); \
71 return; \
72 } \
73} while (0)
74
75#else /* Not doing SMTC affinity */
76
77#define IRQ_AFFINITY_HOOK(irq) do { } while (0)
78
79#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
80
Kevin D. Kissell0db34212007-07-12 16:21:08 +010081#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
Ralf Baechle1146fe32007-09-21 17:13:55 +010082
Ralf Baechle41c594a2006-04-05 09:45:45 +010083/*
84 * Clear interrupt mask handling "backstop" if irq_hwmask
85 * entry so indicates. This implies that the ack() or end()
86 * functions will take over re-enabling the low-level mask.
87 * Otherwise it will be done on return from exception.
88 */
Atsushi Nemotof9bba752007-01-08 00:50:34 +090089#define __DO_IRQ_SMTC_HOOK(irq) \
Ralf Baechle41c594a2006-04-05 09:45:45 +010090do { \
Kevin D. Kissellf571eff2007-08-03 19:38:03 +020091 IRQ_AFFINITY_HOOK(irq); \
Ralf Baechle41c594a2006-04-05 09:45:45 +010092 if (irq_hwmask[irq] & 0x0000ff00) \
93 write_c0_tccontext(read_c0_tccontext() & \
Kevin D. Kissellf571eff2007-08-03 19:38:03 +020094 ~(irq_hwmask[irq] & 0x0000ff00)); \
Ralf Baechle41c594a2006-04-05 09:45:45 +010095} while (0)
Kevin D. Kissellf571eff2007-08-03 19:38:03 +020096
97#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \
98do { \
99 if (irq_hwmask[irq] & 0x0000ff00) \
100 write_c0_tccontext(read_c0_tccontext() & \
101 ~(irq_hwmask[irq] & 0x0000ff00)); \
102} while (0)
103
Ralf Baechle41c594a2006-04-05 09:45:45 +0100104#else
Ralf Baechle1146fe32007-09-21 17:13:55 +0100105
Kevin D. Kissellf571eff2007-08-03 19:38:03 +0200106#define __DO_IRQ_SMTC_HOOK(irq) \
107do { \
108 IRQ_AFFINITY_HOOK(irq); \
109} while (0)
110#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0)
111
Ralf Baechle41c594a2006-04-05 09:45:45 +0100112#endif
113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114/*
115 * do_IRQ handles all normal device IRQ's (the special
116 * SMP cross-CPU interrupts have their own specific
117 * handlers).
118 *
119 * Ideally there should be away to get this into kernel/irq/handle.c to
120 * avoid the overhead of a call for just a tiny function ...
121 */
Ralf Baechle937a8012006-10-07 19:44:33 +0100122#define do_IRQ(irq) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123do { \
124 irq_enter(); \
Atsushi Nemotof9bba752007-01-08 00:50:34 +0900125 __DO_IRQ_SMTC_HOOK(irq); \
Atsushi Nemoto14178362006-11-14 01:13:18 +0900126 generic_handle_irq(irq); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 irq_exit(); \
128} while (0)
129
Kevin D. Kissellf571eff2007-08-03 19:38:03 +0200130#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
131/*
132 * To avoid inefficient and in some cases pathological re-checking of
133 * IRQ affinity, we have this variant that skips the affinity check.
134 */
135
136
137#define do_IRQ_no_affinity(irq) \
138do { \
139 irq_enter(); \
140 __NO_AFFINITY_IRQ_SMTC_HOOK(irq); \
141 generic_handle_irq(irq); \
142 irq_exit(); \
143} while (0)
144
145#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147extern void arch_init_irq(void);
Ralf Baechle937a8012006-10-07 19:44:33 +0100148extern void spurious_interrupt(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Ralf Baechle4a4cf772006-11-06 17:41:06 +0000150extern int allocate_irqno(void);
151extern void alloc_legacy_irqno(void);
152extern void free_irqno(unsigned int irq);
153
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +0100154/*
155 * Before R2 the timer and performance counter interrupts were both fixed to
156 * IE7. Since R2 their number has to be read from the c0_intctl register.
157 */
158#define CP0_LEGACY_COMPARE_IRQ 7
159
160extern int cp0_compare_irq;
161extern int cp0_perfcount_irq;
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163#endif /* _ASM_IRQ_H */