Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * linux/arch/sh/kernel/irq.c |
| 3 | * |
| 4 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
| 5 | * |
| 6 | * |
| 7 | * SuperH version: Copyright (C) 1999 Niibe Yutaka |
| 8 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/irq.h> |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 10 | #include <linux/interrupt.h> |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 11 | #include <linux/module.h> |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 12 | #include <linux/kernel_stat.h> |
| 13 | #include <linux/seq_file.h> |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 14 | #include <linux/irq.h> |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 15 | #include <asm/processor.h> |
Paul Mundt | be782df | 2007-03-12 14:09:35 +0900 | [diff] [blame] | 16 | #include <asm/machvec.h> |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 17 | #include <asm/uaccess.h> |
| 18 | #include <asm/thread_info.h> |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 19 | #include <asm/cpu/mmu_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 21 | atomic_t irq_err_count; |
| 22 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | /* |
| 24 | * 'what should we do if we get a hw irq event on an illegal vector'. |
| 25 | * each architecture has to answer this themselves, it doesn't deserve |
| 26 | * a generic callback i think. |
| 27 | */ |
| 28 | void ack_bad_irq(unsigned int irq) |
| 29 | { |
Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 30 | atomic_inc(&irq_err_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | printk("unexpected IRQ trap at vector %02x\n", irq); |
| 32 | } |
| 33 | |
| 34 | #if defined(CONFIG_PROC_FS) |
| 35 | int show_interrupts(struct seq_file *p, void *v) |
| 36 | { |
| 37 | int i = *(loff_t *) v, j; |
| 38 | struct irqaction * action; |
| 39 | unsigned long flags; |
| 40 | |
| 41 | if (i == 0) { |
| 42 | seq_puts(p, " "); |
Andrew Morton | 394e390 | 2006-03-23 03:01:05 -0800 | [diff] [blame] | 43 | for_each_online_cpu(j) |
| 44 | seq_printf(p, "CPU%d ",j); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | seq_putc(p, '\n'); |
| 46 | } |
| 47 | |
Paul Mundt | be782df | 2007-03-12 14:09:35 +0900 | [diff] [blame] | 48 | if (i < sh_mv.mv_nr_irqs) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | spin_lock_irqsave(&irq_desc[i].lock, flags); |
| 50 | action = irq_desc[i].action; |
| 51 | if (!action) |
| 52 | goto unlock; |
| 53 | seq_printf(p, "%3d: ",i); |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 54 | for_each_online_cpu(j) |
| 55 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
| 56 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
Paul Mundt | 709bc44 | 2006-10-19 17:32:56 +0900 | [diff] [blame] | 57 | seq_printf(p, "-%-8s", irq_desc[i].name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | seq_printf(p, " %s", action->name); |
| 59 | |
| 60 | for (action=action->next; action; action = action->next) |
| 61 | seq_printf(p, ", %s", action->name); |
| 62 | seq_putc(p, '\n'); |
| 63 | unlock: |
| 64 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
Paul Mundt | be782df | 2007-03-12 14:09:35 +0900 | [diff] [blame] | 65 | } else if (i == sh_mv.mv_nr_irqs) |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 66 | seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); |
| 67 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | return 0; |
| 69 | } |
| 70 | #endif |
| 71 | |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 72 | #ifdef CONFIG_4KSTACKS |
| 73 | /* |
| 74 | * per-CPU IRQ handling contexts (thread information and stack) |
| 75 | */ |
| 76 | union irq_ctx { |
| 77 | struct thread_info tinfo; |
| 78 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
| 79 | }; |
| 80 | |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 81 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
| 82 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 83 | #endif |
| 84 | |
Paul Mundt | 3afb209 | 2007-03-14 13:03:35 +0900 | [diff] [blame] | 85 | asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 86 | { |
Stuart Menefy | f0bc814 | 2006-11-21 11:16:57 +0900 | [diff] [blame] | 87 | struct pt_regs *old_regs = set_irq_regs(regs); |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 88 | #ifdef CONFIG_4KSTACKS |
| 89 | union irq_ctx *curctx, *irqctx; |
| 90 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
| 92 | irq_enter(); |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 93 | |
Paul Mundt | d153ea8 | 2006-09-27 18:20:16 +0900 | [diff] [blame] | 94 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
| 95 | /* Debugging check for stack overflow: is there less than 1KB free? */ |
| 96 | { |
| 97 | long sp; |
| 98 | |
| 99 | __asm__ __volatile__ ("and r15, %0" : |
| 100 | "=r" (sp) : "0" (THREAD_SIZE - 1)); |
| 101 | |
| 102 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { |
| 103 | printk("do_IRQ: stack overflow: %ld\n", |
| 104 | sp - sizeof(struct thread_info)); |
| 105 | dump_stack(); |
| 106 | } |
| 107 | } |
| 108 | #endif |
| 109 | |
Paul Mundt | 3afb209 | 2007-03-14 13:03:35 +0900 | [diff] [blame] | 110 | irq = irq_demux(evt2irq(irq)); |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 111 | |
| 112 | #ifdef CONFIG_4KSTACKS |
| 113 | curctx = (union irq_ctx *)current_thread_info(); |
| 114 | irqctx = hardirq_ctx[smp_processor_id()]; |
| 115 | |
| 116 | /* |
| 117 | * this is where we switch to the IRQ stack. However, if we are |
| 118 | * already using the IRQ stack (because we interrupted a hardirq |
| 119 | * handler) we can't do that and just have to keep using the |
| 120 | * current stack (which is the irq stack already after all) |
| 121 | */ |
| 122 | if (curctx != irqctx) { |
| 123 | u32 *isp; |
| 124 | |
| 125 | isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); |
| 126 | irqctx->tinfo.task = curctx->tinfo.task; |
| 127 | irqctx->tinfo.previous_sp = current_stack_pointer; |
| 128 | |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 129 | /* |
| 130 | * Copy the softirq bits in preempt_count so that the |
| 131 | * softirq checks work in the hardirq context. |
| 132 | */ |
| 133 | irqctx->tinfo.preempt_count = |
| 134 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | |
| 135 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); |
| 136 | |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 137 | __asm__ __volatile__ ( |
| 138 | "mov %0, r4 \n" |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 139 | "mov r15, r8 \n" |
Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 140 | "jsr @%1 \n" |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 141 | /* swith to the irq stack */ |
Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 142 | " mov %2, r15 \n" |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 143 | /* restore the stack (ring zero) */ |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 144 | "mov r8, r15 \n" |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 145 | : /* no outputs */ |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 146 | : "r" (irq), "r" (generic_handle_irq), "r" (isp) |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 147 | : "memory", "r0", "r1", "r2", "r3", "r4", |
| 148 | "r5", "r6", "r7", "r8", "t", "pr" |
| 149 | ); |
| 150 | } else |
| 151 | #endif |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 152 | generic_handle_irq(irq); |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 153 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | irq_exit(); |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 155 | |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 156 | set_irq_regs(old_regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | return 1; |
| 158 | } |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 159 | |
| 160 | #ifdef CONFIG_4KSTACKS |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 161 | static char softirq_stack[NR_CPUS * THREAD_SIZE] |
Robert P. J. Day | bdf4fa5 | 2007-07-12 10:41:52 +0900 | [diff] [blame] | 162 | __attribute__((__section__(".bss.page_aligned"))); |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 163 | |
| 164 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] |
Robert P. J. Day | bdf4fa5 | 2007-07-12 10:41:52 +0900 | [diff] [blame] | 165 | __attribute__((__section__(".bss.page_aligned"))); |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 166 | |
| 167 | /* |
| 168 | * allocate per-cpu stacks for hardirq and for softirq processing |
| 169 | */ |
| 170 | void irq_ctx_init(int cpu) |
| 171 | { |
| 172 | union irq_ctx *irqctx; |
| 173 | |
| 174 | if (hardirq_ctx[cpu]) |
| 175 | return; |
| 176 | |
| 177 | irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; |
| 178 | irqctx->tinfo.task = NULL; |
| 179 | irqctx->tinfo.exec_domain = NULL; |
| 180 | irqctx->tinfo.cpu = cpu; |
| 181 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
| 182 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
| 183 | |
| 184 | hardirq_ctx[cpu] = irqctx; |
| 185 | |
| 186 | irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; |
| 187 | irqctx->tinfo.task = NULL; |
| 188 | irqctx->tinfo.exec_domain = NULL; |
| 189 | irqctx->tinfo.cpu = cpu; |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 190 | irqctx->tinfo.preempt_count = 0; |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 191 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
| 192 | |
| 193 | softirq_ctx[cpu] = irqctx; |
| 194 | |
| 195 | printk("CPU %u irqstacks, hard=%p soft=%p\n", |
| 196 | cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); |
| 197 | } |
| 198 | |
| 199 | void irq_ctx_exit(int cpu) |
| 200 | { |
| 201 | hardirq_ctx[cpu] = NULL; |
| 202 | } |
| 203 | |
| 204 | extern asmlinkage void __do_softirq(void); |
| 205 | |
| 206 | asmlinkage void do_softirq(void) |
| 207 | { |
| 208 | unsigned long flags; |
| 209 | struct thread_info *curctx; |
| 210 | union irq_ctx *irqctx; |
| 211 | u32 *isp; |
| 212 | |
| 213 | if (in_interrupt()) |
| 214 | return; |
| 215 | |
| 216 | local_irq_save(flags); |
| 217 | |
| 218 | if (local_softirq_pending()) { |
| 219 | curctx = current_thread_info(); |
| 220 | irqctx = softirq_ctx[smp_processor_id()]; |
| 221 | irqctx->tinfo.task = curctx->task; |
| 222 | irqctx->tinfo.previous_sp = current_stack_pointer; |
| 223 | |
| 224 | /* build the stack frame on the softirq stack */ |
| 225 | isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); |
| 226 | |
| 227 | __asm__ __volatile__ ( |
| 228 | "mov r15, r9 \n" |
| 229 | "jsr @%0 \n" |
| 230 | /* switch to the softirq stack */ |
| 231 | " mov %1, r15 \n" |
| 232 | /* restore the thread stack */ |
| 233 | "mov r9, r15 \n" |
| 234 | : /* no outputs */ |
| 235 | : "r" (__do_softirq), "r" (isp) |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 236 | : "memory", "r0", "r1", "r2", "r3", "r4", |
| 237 | "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" |
| 238 | ); |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 239 | |
| 240 | /* |
| 241 | * Shouldnt happen, we returned above if in_interrupt(): |
| 242 | */ |
| 243 | WARN_ON_ONCE(softirq_count()); |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 244 | } |
| 245 | |
| 246 | local_irq_restore(flags); |
| 247 | } |
| 248 | EXPORT_SYMBOL(do_softirq); |
| 249 | #endif |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 250 | |
| 251 | void __init init_IRQ(void) |
| 252 | { |
| 253 | #ifdef CONFIG_CPU_HAS_PINT_IRQ |
| 254 | init_IRQ_pint(); |
| 255 | #endif |
Magnus Damm | 90015c8 | 2007-07-18 17:57:34 +0900 | [diff] [blame^] | 256 | plat_irq_setup(); |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 257 | |
| 258 | /* Perform the machine specific initialisation */ |
| 259 | if (sh_mv.mv_init_irq) |
| 260 | sh_mv.mv_init_irq(); |
| 261 | |
| 262 | irq_ctx_init(smp_processor_id()); |
| 263 | } |