Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1994 by Waldorf GMBH, written by Ralf Baechle |
| 7 | * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle |
| 8 | */ |
| 9 | #ifndef _ASM_IRQ_H |
| 10 | #define _ASM_IRQ_H |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/linkage.h> |
Ralf Baechle | 631330f | 2009-06-19 14:05:26 +0100 | [diff] [blame] | 13 | #include <linux/smp.h> |
Grant Likely | abd2363 | 2012-02-24 08:07:06 -0700 | [diff] [blame] | 14 | #include <linux/irqdomain.h> |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 15 | |
| 16 | #include <asm/mipsmtregs.h> |
| 17 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <irq.h> |
| 19 | |
| 20 | #ifdef CONFIG_I8259 |
| 21 | static inline int irq_canonicalize(int irq) |
| 22 | { |
Atsushi Nemoto | 2fa7937 | 2007-01-14 23:41:42 +0900 | [diff] [blame] | 23 | return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | } |
| 25 | #else |
| 26 | #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ |
| 27 | #endif |
| 28 | |
Ralf Baechle | 1146fe3 | 2007-09-21 17:13:55 +0100 | [diff] [blame] | 29 | #ifdef CONFIG_MIPS_MT_SMTC |
| 30 | |
| 31 | struct irqaction; |
| 32 | |
| 33 | extern unsigned long irq_hwmask[]; |
| 34 | extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, |
| 35 | unsigned long hwmask); |
| 36 | |
| 37 | static inline void smtc_im_ack_irq(unsigned int irq) |
| 38 | { |
| 39 | if (irq_hwmask[irq] & ST0_IM) |
| 40 | set_c0_status(irq_hwmask[irq] & ST0_IM); |
| 41 | } |
| 42 | |
| 43 | #else |
| 44 | |
| 45 | static inline void smtc_im_ack_irq(unsigned int irq) |
| 46 | { |
| 47 | } |
| 48 | |
| 49 | #endif /* CONFIG_MIPS_MT_SMTC */ |
| 50 | |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 51 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
| 52 | #include <linux/cpumask.h> |
| 53 | |
Thomas Gleixner | 7c8d948 | 2011-03-23 21:08:57 +0000 | [diff] [blame] | 54 | extern int plat_set_irq_affinity(struct irq_data *d, |
| 55 | const struct cpumask *affinity, bool force); |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 56 | extern void smtc_forward_irq(struct irq_data *d); |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 57 | |
| 58 | /* |
| 59 | * IRQ affinity hook invoked at the beginning of interrupt dispatch |
| 60 | * if option is enabled. |
| 61 | * |
| 62 | * Up through Linux 2.6.22 (at least) cpumask operations are very |
| 63 | * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity |
| 64 | * used a "fast path" per-IRQ-descriptor cache of affinity information |
| 65 | * to reduce latency. As there is a project afoot to optimize the |
| 66 | * cpumask implementations, this version is optimistically assuming |
| 67 | * that cpumask.h macro overhead is reasonable during interrupt dispatch. |
| 68 | */ |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 69 | static inline int handle_on_other_cpu(unsigned int irq) |
| 70 | { |
| 71 | struct irq_data *d = irq_get_irq_data(irq); |
| 72 | |
| 73 | if (cpumask_test_cpu(smp_processor_id(), d->affinity)) |
| 74 | return 0; |
| 75 | smtc_forward_irq(d); |
| 76 | return 1; |
| 77 | } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 78 | |
| 79 | #else /* Not doing SMTC affinity */ |
| 80 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 81 | static inline int handle_on_other_cpu(unsigned int irq) { return 0; } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 82 | |
| 83 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
| 84 | |
Kevin D. Kissell | 0db3421 | 2007-07-12 16:21:08 +0100 | [diff] [blame] | 85 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
Ralf Baechle | 1146fe3 | 2007-09-21 17:13:55 +0100 | [diff] [blame] | 86 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 87 | static inline void smtc_im_backstop(unsigned int irq) |
| 88 | { |
| 89 | if (irq_hwmask[irq] & 0x0000ff00) |
| 90 | write_c0_tccontext(read_c0_tccontext() & |
| 91 | ~(irq_hwmask[irq] & 0x0000ff00)); |
| 92 | } |
| 93 | |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 94 | /* |
| 95 | * Clear interrupt mask handling "backstop" if irq_hwmask |
| 96 | * entry so indicates. This implies that the ack() or end() |
| 97 | * functions will take over re-enabling the low-level mask. |
| 98 | * Otherwise it will be done on return from exception. |
| 99 | */ |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 100 | static inline int smtc_handle_on_other_cpu(unsigned int irq) |
| 101 | { |
| 102 | int ret = handle_on_other_cpu(irq); |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 103 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 104 | if (!ret) |
| 105 | smtc_im_backstop(irq); |
| 106 | return ret; |
| 107 | } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 108 | |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 109 | #else |
Ralf Baechle | 1146fe3 | 2007-09-21 17:13:55 +0100 | [diff] [blame] | 110 | |
Thomas Gleixner | 930cd54 | 2011-03-23 21:09:04 +0000 | [diff] [blame] | 111 | static inline void smtc_im_backstop(unsigned int irq) { } |
| 112 | static inline int smtc_handle_on_other_cpu(unsigned int irq) |
| 113 | { |
| 114 | return handle_on_other_cpu(irq); |
| 115 | } |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 116 | |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 117 | #endif |
| 118 | |
Wu Zhangjin | 8f99a16 | 2009-11-20 20:34:33 +0800 | [diff] [blame] | 119 | extern void do_IRQ(unsigned int irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 121 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 122 | |
Wu Zhangjin | 8f99a16 | 2009-11-20 20:34:33 +0800 | [diff] [blame] | 123 | extern void do_IRQ_no_affinity(unsigned int irq); |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 124 | |
| 125 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |
| 126 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | extern void arch_init_irq(void); |
Ralf Baechle | 937a801 | 2006-10-07 19:44:33 +0100 | [diff] [blame] | 128 | extern void spurious_interrupt(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | |
Ralf Baechle | 4a4cf77 | 2006-11-06 17:41:06 +0000 | [diff] [blame] | 130 | extern int allocate_irqno(void); |
| 131 | extern void alloc_legacy_irqno(void); |
| 132 | extern void free_irqno(unsigned int irq); |
| 133 | |
Ralf Baechle | 3b1d4ed | 2007-06-20 22:27:10 +0100 | [diff] [blame] | 134 | /* |
| 135 | * Before R2 the timer and performance counter interrupts were both fixed to |
| 136 | * IE7. Since R2 their number has to be read from the c0_intctl register. |
| 137 | */ |
| 138 | #define CP0_LEGACY_COMPARE_IRQ 7 |
| 139 | |
| 140 | extern int cp0_compare_irq; |
David VomLehn | 010c108 | 2009-12-21 17:49:22 -0800 | [diff] [blame] | 141 | extern int cp0_compare_irq_shift; |
Ralf Baechle | 3b1d4ed | 2007-06-20 22:27:10 +0100 | [diff] [blame] | 142 | extern int cp0_perfcount_irq; |
| 143 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | #endif /* _ASM_IRQ_H */ |