Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle |
| 7 | * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle |
| 8 | */ |
| 9 | #ifndef _ASM_IRQ_H |
| 10 | #define _ASM_IRQ_H |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/linkage.h> |
Ralf Baechle | 631330f | 2009-06-19 14:05:26 +0100 | [diff] [blame] | 13 | #include <linux/smp.h> |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 14 | |
| 15 | #include <asm/mipsmtregs.h> |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <irq.h> |
| 18 | |
Dezhong Diao | f2ffa5a | 2010-10-13 00:52:46 -0600 | [diff] [blame] | 19 | static inline void irq_dispose_mapping(unsigned int virq) |
| 20 | { |
| 21 | return; |
| 22 | } |
| 23 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #ifdef CONFIG_I8259 |
| 25 | static inline int irq_canonicalize(int irq) |
| 26 | { |
Atsushi Nemoto | 2fa7937 | 2007-01-14 23:41:42 +0900 | [diff] [blame] | 27 | return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | } |
| 29 | #else |
| 30 | #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ |
| 31 | #endif |
| 32 | |
Ralf Baechle | 1146fe3 | 2007-09-21 17:13:55 +0100 | [diff] [blame] | 33 | #ifdef CONFIG_MIPS_MT_SMTC |
| 34 | |
| 35 | struct irqaction; |
| 36 | |
| 37 | extern unsigned long irq_hwmask[]; |
| 38 | extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, |
| 39 | unsigned long hwmask); |
| 40 | |
| 41 | static inline void smtc_im_ack_irq(unsigned int irq) |
| 42 | { |
| 43 | if (irq_hwmask[irq] & ST0_IM) |
| 44 | set_c0_status(irq_hwmask[irq] & ST0_IM); |
| 45 | } |
| 46 | |
| 47 | #else |
| 48 | |
| 49 | static inline void smtc_im_ack_irq(unsigned int irq) |
| 50 | { |
| 51 | } |
| 52 | |
| 53 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 54 | |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 55 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
| 56 | #include <linux/cpumask.h> |
| 57 | |
Thomas Gleixner | 7c8d948 | 2011-03-23 21:08:57 +0000 | [diff] [blame] | 58 | extern int plat_set_irq_affinity(struct irq_data *d, |
| 59 | const struct cpumask *affinity, bool force); |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame^] | 60 | extern void smtc_forward_irq(struct irq_data *d); |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 61 | |
| 62 | /* |
| 63 | * IRQ affinity hook invoked at the beginning of interrupt dispatch |
| 64 | * if option is enabled. |
| 65 | * |
| 66 | * Up through Linux 2.6.22 (at least) cpumask operations are very |
| 67 | * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity |
| 68 | * used a "fast path" per-IRQ-descriptor cache of affinity information |
| 69 | * to reduce latency. As there is a project afoot to optimize the |
| 70 | * cpumask implementations, this version is optimistically assuming |
| 71 | * that cpumask.h macro overhead is reasonable during interrupt dispatch. |
| 72 | */ |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame^] | 73 | static inline int handle_on_other_cpu(unsigned int irq) |
| 74 | { |
| 75 | struct irq_data *d = irq_get_irq_data(irq); |
| 76 | |
| 77 | if (cpumask_test_cpu(smp_processor_id(), d->affinity)) |
| 78 | return 0; |
| 79 | smtc_forward_irq(d); |
| 80 | return 1; |
| 81 | } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 82 | |
| 83 | #else /* Not doing SMTC affinity */ |
| 84 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame^] | 85 | static inline int handle_on_other_cpu(unsigned int irq) { return 0; } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 86 | |
| 87 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
| 88 | |
Kevin D. Kissell | 0db3421 | 2007-07-12 16:21:08 +0100 | [diff] [blame] | 89 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
Ralf Baechle | 1146fe3 | 2007-09-21 17:13:55 +0100 | [diff] [blame] | 90 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame^] | 91 | static inline void smtc_im_backstop(unsigned int irq) |
| 92 | { |
| 93 | if (irq_hwmask[irq] & 0x0000ff00) |
| 94 | write_c0_tccontext(read_c0_tccontext() & |
| 95 | ~(irq_hwmask[irq] & 0x0000ff00)); |
| 96 | } |
| 97 | |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 98 | /* |
| 99 | * Clear interrupt mask handling "backstop" if irq_hwmask |
| 100 | * entry so indicates. This implies that the ack() or end() |
| 101 | * functions will take over re-enabling the low-level mask. |
| 102 | * Otherwise it will be done on return from exception. |
| 103 | */ |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame^] | 104 | static inline int smtc_handle_on_other_cpu(unsigned int irq) |
| 105 | { |
| 106 | int ret = handle_on_other_cpu(irq); |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 107 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame^] | 108 | if (!ret) |
| 109 | smtc_im_backstop(irq); |
| 110 | return ret; |
| 111 | } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 112 | |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 113 | #else |
Ralf Baechle | 1146fe3 | 2007-09-21 17:13:55 +0100 | [diff] [blame] | 114 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame^] | 115 | static inline void smtc_im_backstop(unsigned int irq) { } |
| 116 | static inline int smtc_handle_on_other_cpu(unsigned int irq) |
| 117 | { |
| 118 | return handle_on_other_cpu(irq); |
| 119 | } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 120 | |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 121 | #endif |
| 122 | |
Wu Zhangjin | 8f99a16 | 2009-11-20 20:34:33 +0800 | [diff] [blame] | 123 | extern void do_IRQ(unsigned int irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 125 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 126 | |
Wu Zhangjin | 8f99a16 | 2009-11-20 20:34:33 +0800 | [diff] [blame] | 127 | extern void do_IRQ_no_affinity(unsigned int irq); |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 128 | |
| 129 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
| 130 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | extern void arch_init_irq(void); |
Ralf Baechle | 937a801 | 2006-10-07 19:44:33 +0100 | [diff] [blame] | 132 | extern void spurious_interrupt(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | |
Ralf Baechle | 4a4cf77 | 2006-11-06 17:41:06 +0000 | [diff] [blame] | 134 | extern int allocate_irqno(void); |
| 135 | extern void alloc_legacy_irqno(void); |
| 136 | extern void free_irqno(unsigned int irq); |
| 137 | |
Ralf Baechle | 3b1d4ed | 2007-06-20 22:27:10 +0100 | [diff] [blame] | 138 | /* |
| 139 | * Before R2 the timer and performance counter interrupts were both fixed to |
| 140 | * IE7. Since R2 their number has to be read from the c0_intctl register. |
| 141 | */ |
| 142 | #define CP0_LEGACY_COMPARE_IRQ 7 |
| 143 | |
| 144 | extern int cp0_compare_irq; |
David VomLehn | 010c108 | 2009-12-21 17:49:22 -0800 | [diff] [blame] | 145 | extern int cp0_compare_irq_shift; |
Ralf Baechle | 3b1d4ed | 2007-06-20 22:27:10 +0100 | [diff] [blame] | 146 | extern int cp0_perfcount_irq; |
| 147 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | #endif /* _ASM_IRQ_H */ |