Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * linux/arch/sh/kernel/irq.c |
| 3 | * |
| 4 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
| 5 | * |
| 6 | * |
| 7 | * SuperH version: Copyright (C) 1999 Niibe Yutaka |
| 8 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/irq.h> |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 10 | #include <linux/interrupt.h> |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 11 | #include <linux/module.h> |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 12 | #include <linux/kernel_stat.h> |
| 13 | #include <linux/seq_file.h> |
Paul Mundt | ba93483 | 2009-10-26 09:58:31 +0900 | [diff] [blame] | 14 | #include <linux/ftrace.h> |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 15 | #include <linux/delay.h> |
Paul Mundt | 9ab3a15 | 2011-06-30 15:10:06 +0900 | [diff] [blame] | 16 | #include <linux/ratelimit.h> |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 17 | #include <asm/processor.h> |
Paul Mundt | be782df | 2007-03-12 14:09:35 +0900 | [diff] [blame] | 18 | #include <asm/machvec.h> |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 19 | #include <asm/uaccess.h> |
| 20 | #include <asm/thread_info.h> |
Paul Mundt | f15cbe6 | 2008-07-29 08:09:44 +0900 | [diff] [blame] | 21 | #include <cpu/mmu_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 23 | atomic_t irq_err_count; |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | /* |
| 26 | * 'what should we do if we get a hw irq event on an illegal vector'. |
| 27 | * each architecture has to answer this themselves, it doesn't deserve |
| 28 | * a generic callback i think. |
| 29 | */ |
| 30 | void ack_bad_irq(unsigned int irq) |
| 31 | { |
Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 32 | atomic_inc(&irq_err_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | printk("unexpected IRQ trap at vector %02x\n", irq); |
| 34 | } |
| 35 | |
| 36 | #if defined(CONFIG_PROC_FS) |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 37 | /* |
Paul Mundt | 3d44ae4 | 2011-03-17 17:31:51 +0900 | [diff] [blame] | 38 | * /proc/interrupts printing for arch specific interrupts |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 39 | */ |
Paul Mundt | 3d44ae4 | 2011-03-17 17:31:51 +0900 | [diff] [blame] | 40 | int arch_show_interrupts(struct seq_file *p, int prec) |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 41 | { |
Paul Mundt | 731ba33 | 2009-10-14 16:42:28 +0900 | [diff] [blame] | 42 | int j; |
| 43 | |
| 44 | seq_printf(p, "%*s: ", prec, "NMI"); |
| 45 | for_each_online_cpu(j) |
| 46 | seq_printf(p, "%10u ", irq_stat[j].__nmi_count); |
| 47 | seq_printf(p, " Non-maskable interrupts\n"); |
| 48 | |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 49 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); |
Paul Mundt | 731ba33 | 2009-10-14 16:42:28 +0900 | [diff] [blame] | 50 | |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 51 | return 0; |
| 52 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #endif |
| 54 | |
Paul Mundt | 110ed28 | 2007-11-02 12:16:51 +0900 | [diff] [blame] | 55 | #ifdef CONFIG_IRQSTACKS |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 56 | /* |
| 57 | * per-CPU IRQ handling contexts (thread information and stack) |
| 58 | */ |
| 59 | union irq_ctx { |
| 60 | struct thread_info tinfo; |
| 61 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
| 62 | }; |
| 63 | |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 64 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
| 65 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 66 | |
Paul Mundt | dc825b1 | 2010-04-15 13:13:52 +0900 | [diff] [blame] | 67 | static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
| 68 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
| 69 | |
| 70 | static inline void handle_one_irq(unsigned int irq) |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 71 | { |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 72 | union irq_ctx *curctx, *irqctx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 74 | curctx = (union irq_ctx *)current_thread_info(); |
| 75 | irqctx = hardirq_ctx[smp_processor_id()]; |
| 76 | |
| 77 | /* |
| 78 | * this is where we switch to the IRQ stack. However, if we are |
| 79 | * already using the IRQ stack (because we interrupted a hardirq |
| 80 | * handler) we can't do that and just have to keep using the |
| 81 | * current stack (which is the irq stack already after all) |
| 82 | */ |
| 83 | if (curctx != irqctx) { |
| 84 | u32 *isp; |
| 85 | |
| 86 | isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); |
| 87 | irqctx->tinfo.task = curctx->tinfo.task; |
| 88 | irqctx->tinfo.previous_sp = current_stack_pointer; |
| 89 | |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 90 | /* |
| 91 | * Copy the softirq bits in preempt_count so that the |
| 92 | * softirq checks work in the hardirq context. |
| 93 | */ |
| 94 | irqctx->tinfo.preempt_count = |
| 95 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | |
| 96 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); |
| 97 | |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 98 | __asm__ __volatile__ ( |
| 99 | "mov %0, r4 \n" |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 100 | "mov r15, r8 \n" |
Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 101 | "jsr @%1 \n" |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 102 | /* swith to the irq stack */ |
Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 103 | " mov %2, r15 \n" |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 104 | /* restore the stack (ring zero) */ |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 105 | "mov r8, r15 \n" |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 106 | : /* no outputs */ |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 107 | : "r" (irq), "r" (generic_handle_irq), "r" (isp) |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 108 | : "memory", "r0", "r1", "r2", "r3", "r4", |
| 109 | "r5", "r6", "r7", "r8", "t", "pr" |
| 110 | ); |
| 111 | } else |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 112 | generic_handle_irq(irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | } |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 114 | |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 115 | /* |
| 116 | * allocate per-cpu stacks for hardirq and for softirq processing |
| 117 | */ |
| 118 | void irq_ctx_init(int cpu) |
| 119 | { |
| 120 | union irq_ctx *irqctx; |
| 121 | |
| 122 | if (hardirq_ctx[cpu]) |
| 123 | return; |
| 124 | |
| 125 | irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; |
| 126 | irqctx->tinfo.task = NULL; |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 127 | irqctx->tinfo.cpu = cpu; |
| 128 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
| 129 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
| 130 | |
| 131 | hardirq_ctx[cpu] = irqctx; |
| 132 | |
| 133 | irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; |
| 134 | irqctx->tinfo.task = NULL; |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 135 | irqctx->tinfo.cpu = cpu; |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 136 | irqctx->tinfo.preempt_count = 0; |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 137 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
| 138 | |
| 139 | softirq_ctx[cpu] = irqctx; |
| 140 | |
| 141 | printk("CPU %u irqstacks, hard=%p soft=%p\n", |
| 142 | cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); |
| 143 | } |
| 144 | |
| 145 | void irq_ctx_exit(int cpu) |
| 146 | { |
| 147 | hardirq_ctx[cpu] = NULL; |
| 148 | } |
| 149 | |
Frederic Weisbecker | 7d65f4a | 2013-09-05 15:49:45 +0200 | [diff] [blame] | 150 | void do_softirq_own_stack(void) |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 151 | { |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 152 | struct thread_info *curctx; |
| 153 | union irq_ctx *irqctx; |
| 154 | u32 *isp; |
| 155 | |
Frederic Weisbecker | 7d65f4a | 2013-09-05 15:49:45 +0200 | [diff] [blame] | 156 | curctx = current_thread_info(); |
| 157 | irqctx = softirq_ctx[smp_processor_id()]; |
| 158 | irqctx->tinfo.task = curctx->task; |
| 159 | irqctx->tinfo.previous_sp = current_stack_pointer; |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 160 | |
Frederic Weisbecker | 7d65f4a | 2013-09-05 15:49:45 +0200 | [diff] [blame] | 161 | /* build the stack frame on the softirq stack */ |
| 162 | isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 163 | |
Frederic Weisbecker | 7d65f4a | 2013-09-05 15:49:45 +0200 | [diff] [blame] | 164 | __asm__ __volatile__ ( |
| 165 | "mov r15, r9 \n" |
| 166 | "jsr @%0 \n" |
| 167 | /* switch to the softirq stack */ |
| 168 | " mov %1, r15 \n" |
| 169 | /* restore the thread stack */ |
| 170 | "mov r9, r15 \n" |
| 171 | : /* no outputs */ |
| 172 | : "r" (__do_softirq), "r" (isp) |
| 173 | : "memory", "r0", "r1", "r2", "r3", "r4", |
| 174 | "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" |
| 175 | ); |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 176 | } |
Paul Mundt | dc825b1 | 2010-04-15 13:13:52 +0900 | [diff] [blame] | 177 | #else |
| 178 | static inline void handle_one_irq(unsigned int irq) |
| 179 | { |
| 180 | generic_handle_irq(irq); |
| 181 | } |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 182 | #endif |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 183 | |
Paul Mundt | dc825b1 | 2010-04-15 13:13:52 +0900 | [diff] [blame] | 184 | asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) |
| 185 | { |
| 186 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 187 | |
| 188 | irq_enter(); |
| 189 | |
| 190 | irq = irq_demux(irq_lookup(irq)); |
| 191 | |
| 192 | if (irq != NO_IRQ_IGNORE) { |
| 193 | handle_one_irq(irq); |
| 194 | irq_finish(irq); |
| 195 | } |
| 196 | |
| 197 | irq_exit(); |
| 198 | |
| 199 | set_irq_regs(old_regs); |
| 200 | |
| 201 | return IRQ_HANDLED; |
| 202 | } |
| 203 | |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 204 | void __init init_IRQ(void) |
| 205 | { |
Magnus Damm | 90015c8 | 2007-07-18 17:57:34 +0900 | [diff] [blame] | 206 | plat_irq_setup(); |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 207 | |
| 208 | /* Perform the machine specific initialisation */ |
| 209 | if (sh_mv.mv_init_irq) |
| 210 | sh_mv.mv_init_irq(); |
| 211 | |
Paul Mundt | c1e30ad | 2010-10-05 04:47:03 +0900 | [diff] [blame] | 212 | intc_finalize(); |
| 213 | |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 214 | irq_ctx_init(smp_processor_id()); |
| 215 | } |
Paul Mundt | d8586ba | 2009-05-22 01:36:13 +0900 | [diff] [blame] | 216 | |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 217 | #ifdef CONFIG_HOTPLUG_CPU |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 218 | /* |
| 219 | * The CPU has been marked offline. Migrate IRQs off this CPU. If |
| 220 | * the affinity settings do not allow other CPUs, force them onto any |
| 221 | * available CPU. |
| 222 | */ |
| 223 | void migrate_irqs(void) |
| 224 | { |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 225 | unsigned int irq, cpu = smp_processor_id(); |
| 226 | |
Paul Mundt | fb41a49 | 2010-10-28 11:33:21 +0900 | [diff] [blame] | 227 | for_each_active_irq(irq) { |
| 228 | struct irq_data *data = irq_get_irq_data(irq); |
| 229 | |
Jiang Liu | cde5c27 | 2015-07-13 20:51:20 +0000 | [diff] [blame] | 230 | if (irq_data_get_node(data) == cpu) { |
Thomas Gleixner | 8b8149d | 2015-07-13 20:51:23 +0000 | [diff] [blame] | 231 | struct cpumask *mask = irq_data_get_affinity_mask(data); |
| 232 | unsigned int newcpu = cpumask_any_and(mask, |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 233 | cpu_online_mask); |
| 234 | if (newcpu >= nr_cpu_ids) { |
Paul Mundt | 9ab3a15 | 2011-06-30 15:10:06 +0900 | [diff] [blame] | 235 | pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", |
| 236 | irq, cpu); |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 237 | |
Thomas Gleixner | 8b8149d | 2015-07-13 20:51:23 +0000 | [diff] [blame] | 238 | cpumask_setall(mask); |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 239 | } |
Thomas Gleixner | 8b8149d | 2015-07-13 20:51:23 +0000 | [diff] [blame] | 240 | irq_set_affinity(irq, mask); |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 241 | } |
| 242 | } |
| 243 | } |
| 244 | #endif |