Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 1 | /* |
| 2 | * RM200 specific code |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General Public |
| 5 | * License. See the file "COPYING" in the main directory of this archive |
| 6 | * for more details. |
| 7 | * |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 8 | * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de) |
| 9 | * |
| 10 | * i8259 parts ripped out of arch/mips/kernel/i8259.c |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 11 | */ |
| 12 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 13 | #include <linux/delay.h> |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 14 | #include <linux/init.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/platform_device.h> |
| 17 | #include <linux/serial_8250.h> |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 18 | #include <linux/io.h> |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 19 | |
| 20 | #include <asm/sni.h> |
| 21 | #include <asm/time.h> |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 22 | #include <asm/irq_cpu.h> |
| 23 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 24 | #define RM200_I8259A_IRQ_BASE 32 |
| 25 | |
| 26 | #define MEMPORT(_base,_irq) \ |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 27 | { \ |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 28 | .mapbase = _base, \ |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 29 | .irq = _irq, \ |
| 30 | .uartclk = 1843200, \ |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 31 | .iotype = UPIO_MEM, \ |
| 32 | .flags = UPF_BOOT_AUTOCONF|UPF_IOREMAP, \ |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 33 | } |
| 34 | |
| 35 | static struct plat_serial8250_port rm200_data[] = { |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 36 | MEMPORT(0x160003f8, RM200_I8259A_IRQ_BASE + 4), |
| 37 | MEMPORT(0x160002f8, RM200_I8259A_IRQ_BASE + 3), |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 38 | { }, |
| 39 | }; |
| 40 | |
| 41 | static struct platform_device rm200_serial8250_device = { |
| 42 | .name = "serial8250", |
| 43 | .id = PLAT8250_DEV_PLATFORM, |
| 44 | .dev = { |
| 45 | .platform_data = rm200_data, |
| 46 | }, |
| 47 | }; |
| 48 | |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 49 | static struct resource rm200_ds1216_rsrc[] = { |
| 50 | { |
| 51 | .start = 0x1cd41ffc, |
| 52 | .end = 0x1cd41fff, |
| 53 | .flags = IORESOURCE_MEM |
| 54 | } |
| 55 | }; |
| 56 | |
| 57 | static struct platform_device rm200_ds1216_device = { |
| 58 | .name = "rtc-ds1216", |
| 59 | .num_resources = ARRAY_SIZE(rm200_ds1216_rsrc), |
| 60 | .resource = rm200_ds1216_rsrc |
| 61 | }; |
| 62 | |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 63 | static struct resource snirm_82596_rm200_rsrc[] = { |
| 64 | { |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 65 | .start = 0x18000000, |
| 66 | .end = 0x180fffff, |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 67 | .flags = IORESOURCE_MEM |
| 68 | }, |
| 69 | { |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 70 | .start = 0x1b000000, |
| 71 | .end = 0x1b000004, |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 72 | .flags = IORESOURCE_MEM |
| 73 | }, |
| 74 | { |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 75 | .start = 0x1ff00000, |
| 76 | .end = 0x1ff00020, |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 77 | .flags = IORESOURCE_MEM |
| 78 | }, |
| 79 | { |
| 80 | .start = 27, |
| 81 | .end = 27, |
| 82 | .flags = IORESOURCE_IRQ |
| 83 | }, |
| 84 | { |
| 85 | .flags = 0x00 |
| 86 | } |
| 87 | }; |
| 88 | |
| 89 | static struct platform_device snirm_82596_rm200_pdev = { |
| 90 | .name = "snirm_82596", |
| 91 | .num_resources = ARRAY_SIZE(snirm_82596_rm200_rsrc), |
| 92 | .resource = snirm_82596_rm200_rsrc |
| 93 | }; |
| 94 | |
| 95 | static struct resource snirm_53c710_rm200_rsrc[] = { |
| 96 | { |
Thomas Bogendoerfer | 9815778 | 2007-07-11 19:10:39 +0200 | [diff] [blame] | 97 | .start = 0x19000000, |
| 98 | .end = 0x190fffff, |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 99 | .flags = IORESOURCE_MEM |
| 100 | }, |
| 101 | { |
| 102 | .start = 26, |
| 103 | .end = 26, |
| 104 | .flags = IORESOURCE_IRQ |
| 105 | } |
| 106 | }; |
| 107 | |
| 108 | static struct platform_device snirm_53c710_rm200_pdev = { |
| 109 | .name = "snirm_53c710", |
| 110 | .num_resources = ARRAY_SIZE(snirm_53c710_rm200_rsrc), |
| 111 | .resource = snirm_53c710_rm200_rsrc |
| 112 | }; |
| 113 | |
| 114 | static int __init snirm_setup_devinit(void) |
| 115 | { |
| 116 | if (sni_brd_type == SNI_BRD_RM200) { |
| 117 | platform_device_register(&rm200_serial8250_device); |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 118 | platform_device_register(&rm200_ds1216_device); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 119 | platform_device_register(&snirm_82596_rm200_pdev); |
| 120 | platform_device_register(&snirm_53c710_rm200_pdev); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 121 | sni_eisa_root_init(); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 122 | } |
| 123 | return 0; |
| 124 | } |
| 125 | |
| 126 | device_initcall(snirm_setup_devinit); |
| 127 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 128 | /* |
| 129 | * RM200 has an ISA and an EISA bus. The iSA bus is only used |
| 130 | * for onboard devices and also has twi i8259 PICs. Since these |
| 131 | * PICs are no accessible via inb/outb the following code uses |
| 132 | * readb/writeb to access them |
| 133 | */ |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 134 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 135 | DEFINE_SPINLOCK(sni_rm200_i8259A_lock); |
| 136 | #define PIC_CMD 0x00 |
| 137 | #define PIC_IMR 0x01 |
| 138 | #define PIC_ISR PIC_CMD |
| 139 | #define PIC_POLL PIC_ISR |
| 140 | #define PIC_OCW3 PIC_ISR |
| 141 | |
| 142 | /* i8259A PIC related value */ |
| 143 | #define PIC_CASCADE_IR 2 |
| 144 | #define MASTER_ICW4_DEFAULT 0x01 |
| 145 | #define SLAVE_ICW4_DEFAULT 0x01 |
| 146 | |
| 147 | /* |
| 148 | * This contains the irq mask for both 8259A irq controllers, |
| 149 | */ |
| 150 | static unsigned int rm200_cached_irq_mask = 0xffff; |
| 151 | static __iomem u8 *rm200_pic_master; |
| 152 | static __iomem u8 *rm200_pic_slave; |
| 153 | |
| 154 | #define cached_master_mask (rm200_cached_irq_mask) |
| 155 | #define cached_slave_mask (rm200_cached_irq_mask >> 8) |
| 156 | |
| 157 | static void sni_rm200_disable_8259A_irq(unsigned int irq) |
| 158 | { |
| 159 | unsigned int mask; |
| 160 | unsigned long flags; |
| 161 | |
| 162 | irq -= RM200_I8259A_IRQ_BASE; |
| 163 | mask = 1 << irq; |
| 164 | spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); |
| 165 | rm200_cached_irq_mask |= mask; |
| 166 | if (irq & 8) |
| 167 | writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR); |
| 168 | else |
| 169 | writeb(cached_master_mask, rm200_pic_master + PIC_IMR); |
| 170 | spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags); |
| 171 | } |
| 172 | |
| 173 | static void sni_rm200_enable_8259A_irq(unsigned int irq) |
| 174 | { |
| 175 | unsigned int mask; |
| 176 | unsigned long flags; |
| 177 | |
| 178 | irq -= RM200_I8259A_IRQ_BASE; |
| 179 | mask = ~(1 << irq); |
| 180 | spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); |
| 181 | rm200_cached_irq_mask &= mask; |
| 182 | if (irq & 8) |
| 183 | writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR); |
| 184 | else |
| 185 | writeb(cached_master_mask, rm200_pic_master + PIC_IMR); |
| 186 | spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags); |
| 187 | } |
| 188 | |
| 189 | static inline int sni_rm200_i8259A_irq_real(unsigned int irq) |
| 190 | { |
| 191 | int value; |
| 192 | int irqmask = 1 << irq; |
| 193 | |
| 194 | if (irq < 8) { |
| 195 | writeb(0x0B, rm200_pic_master + PIC_CMD); |
| 196 | value = readb(rm200_pic_master + PIC_CMD) & irqmask; |
| 197 | writeb(0x0A, rm200_pic_master + PIC_CMD); |
| 198 | return value; |
| 199 | } |
| 200 | writeb(0x0B, rm200_pic_slave + PIC_CMD); /* ISR register */ |
| 201 | value = readb(rm200_pic_slave + PIC_CMD) & (irqmask >> 8); |
| 202 | writeb(0x0A, rm200_pic_slave + PIC_CMD); |
| 203 | return value; |
| 204 | } |
| 205 | |
| 206 | /* |
| 207 | * Careful! The 8259A is a fragile beast, it pretty |
| 208 | * much _has_ to be done exactly like this (mask it |
| 209 | * first, _then_ send the EOI, and the order of EOI |
| 210 | * to the two 8259s is important! |
| 211 | */ |
| 212 | void sni_rm200_mask_and_ack_8259A(unsigned int irq) |
| 213 | { |
| 214 | unsigned int irqmask; |
| 215 | unsigned long flags; |
| 216 | |
| 217 | irq -= RM200_I8259A_IRQ_BASE; |
| 218 | irqmask = 1 << irq; |
| 219 | spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); |
| 220 | /* |
| 221 | * Lightweight spurious IRQ detection. We do not want |
| 222 | * to overdo spurious IRQ handling - it's usually a sign |
| 223 | * of hardware problems, so we only do the checks we can |
| 224 | * do without slowing down good hardware unnecessarily. |
| 225 | * |
| 226 | * Note that IRQ7 and IRQ15 (the two spurious IRQs |
| 227 | * usually resulting from the 8259A-1|2 PICs) occur |
| 228 | * even if the IRQ is masked in the 8259A. Thus we |
| 229 | * can check spurious 8259A IRQs without doing the |
| 230 | * quite slow i8259A_irq_real() call for every IRQ. |
| 231 | * This does not cover 100% of spurious interrupts, |
| 232 | * but should be enough to warn the user that there |
| 233 | * is something bad going on ... |
| 234 | */ |
| 235 | if (rm200_cached_irq_mask & irqmask) |
| 236 | goto spurious_8259A_irq; |
| 237 | rm200_cached_irq_mask |= irqmask; |
| 238 | |
| 239 | handle_real_irq: |
| 240 | if (irq & 8) { |
| 241 | readb(rm200_pic_slave + PIC_IMR); |
| 242 | writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR); |
| 243 | writeb(0x60+(irq & 7), rm200_pic_slave + PIC_CMD); |
| 244 | writeb(0x60+PIC_CASCADE_IR, rm200_pic_master + PIC_CMD); |
| 245 | } else { |
| 246 | readb(rm200_pic_master + PIC_IMR); |
| 247 | writeb(cached_master_mask, rm200_pic_master + PIC_IMR); |
| 248 | writeb(0x60+irq, rm200_pic_master + PIC_CMD); |
| 249 | } |
| 250 | spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags); |
| 251 | return; |
| 252 | |
| 253 | spurious_8259A_irq: |
| 254 | /* |
| 255 | * this is the slow path - should happen rarely. |
| 256 | */ |
| 257 | if (sni_rm200_i8259A_irq_real(irq)) |
| 258 | /* |
| 259 | * oops, the IRQ _is_ in service according to the |
| 260 | * 8259A - not spurious, go handle it. |
| 261 | */ |
| 262 | goto handle_real_irq; |
| 263 | |
| 264 | { |
| 265 | static int spurious_irq_mask; |
| 266 | /* |
| 267 | * At this point we can be sure the IRQ is spurious, |
| 268 | * lets ACK and report it. [once per IRQ] |
| 269 | */ |
| 270 | if (!(spurious_irq_mask & irqmask)) { |
| 271 | printk(KERN_DEBUG |
| 272 | "spurious RM200 8259A interrupt: IRQ%d.\n", irq); |
| 273 | spurious_irq_mask |= irqmask; |
| 274 | } |
| 275 | atomic_inc(&irq_err_count); |
| 276 | /* |
| 277 | * Theoretically we do not have to handle this IRQ, |
| 278 | * but in Linux this does not cause problems and is |
| 279 | * simpler for us. |
| 280 | */ |
| 281 | goto handle_real_irq; |
| 282 | } |
| 283 | } |
| 284 | |
| 285 | static struct irq_chip sni_rm200_i8259A_chip = { |
| 286 | .name = "RM200-XT-PIC", |
| 287 | .mask = sni_rm200_disable_8259A_irq, |
| 288 | .unmask = sni_rm200_enable_8259A_irq, |
| 289 | .mask_ack = sni_rm200_mask_and_ack_8259A, |
| 290 | }; |
| 291 | |
| 292 | /* |
| 293 | * Do the traditional i8259 interrupt polling thing. This is for the few |
| 294 | * cases where no better interrupt acknowledge method is available and we |
| 295 | * absolutely must touch the i8259. |
| 296 | */ |
| 297 | static inline int sni_rm200_i8259_irq(void) |
| 298 | { |
| 299 | int irq; |
| 300 | |
| 301 | spin_lock(&sni_rm200_i8259A_lock); |
| 302 | |
| 303 | /* Perform an interrupt acknowledge cycle on controller 1. */ |
| 304 | writeb(0x0C, rm200_pic_master + PIC_CMD); /* prepare for poll */ |
| 305 | irq = readb(rm200_pic_master + PIC_CMD) & 7; |
| 306 | if (irq == PIC_CASCADE_IR) { |
| 307 | /* |
| 308 | * Interrupt is cascaded so perform interrupt |
| 309 | * acknowledge on controller 2. |
| 310 | */ |
| 311 | writeb(0x0C, rm200_pic_slave + PIC_CMD); /* prepare for poll */ |
| 312 | irq = (readb(rm200_pic_slave + PIC_CMD) & 7) + 8; |
| 313 | } |
| 314 | |
| 315 | if (unlikely(irq == 7)) { |
| 316 | /* |
| 317 | * This may be a spurious interrupt. |
| 318 | * |
| 319 | * Read the interrupt status register (ISR). If the most |
| 320 | * significant bit is not set then there is no valid |
| 321 | * interrupt. |
| 322 | */ |
| 323 | writeb(0x0B, rm200_pic_master + PIC_ISR); /* ISR register */ |
| 324 | if (~readb(rm200_pic_master + PIC_ISR) & 0x80) |
| 325 | irq = -1; |
| 326 | } |
| 327 | |
| 328 | spin_unlock(&sni_rm200_i8259A_lock); |
| 329 | |
| 330 | return likely(irq >= 0) ? irq + RM200_I8259A_IRQ_BASE : irq; |
| 331 | } |
| 332 | |
| 333 | void sni_rm200_init_8259A(void) |
| 334 | { |
| 335 | unsigned long flags; |
| 336 | |
| 337 | spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); |
| 338 | |
| 339 | writeb(0xff, rm200_pic_master + PIC_IMR); |
| 340 | writeb(0xff, rm200_pic_slave + PIC_IMR); |
| 341 | |
| 342 | writeb(0x11, rm200_pic_master + PIC_CMD); |
| 343 | writeb(0, rm200_pic_master + PIC_IMR); |
| 344 | writeb(1U << PIC_CASCADE_IR, rm200_pic_master + PIC_IMR); |
| 345 | writeb(MASTER_ICW4_DEFAULT, rm200_pic_master + PIC_IMR); |
| 346 | writeb(0x11, rm200_pic_slave + PIC_CMD); |
| 347 | writeb(8, rm200_pic_slave + PIC_IMR); |
| 348 | writeb(PIC_CASCADE_IR, rm200_pic_slave + PIC_IMR); |
| 349 | writeb(SLAVE_ICW4_DEFAULT, rm200_pic_slave + PIC_IMR); |
| 350 | udelay(100); /* wait for 8259A to initialize */ |
| 351 | |
| 352 | writeb(cached_master_mask, rm200_pic_master + PIC_IMR); |
| 353 | writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR); |
| 354 | |
| 355 | spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags); |
| 356 | } |
| 357 | |
| 358 | /* |
| 359 | * IRQ2 is cascade interrupt to second interrupt controller |
| 360 | */ |
| 361 | static struct irqaction sni_rm200_irq2 = { |
| 362 | no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL |
| 363 | }; |
| 364 | |
| 365 | static struct resource sni_rm200_pic1_resource = { |
| 366 | .name = "onboard ISA pic1", |
| 367 | .start = 0x16000020, |
| 368 | .end = 0x16000023, |
| 369 | .flags = IORESOURCE_BUSY |
| 370 | }; |
| 371 | |
| 372 | static struct resource sni_rm200_pic2_resource = { |
| 373 | .name = "onboard ISA pic2", |
| 374 | .start = 0x160000a0, |
| 375 | .end = 0x160000a3, |
| 376 | .flags = IORESOURCE_BUSY |
| 377 | }; |
| 378 | |
| 379 | /* ISA irq handler */ |
| 380 | static irqreturn_t sni_rm200_i8259A_irq_handler(int dummy, void *p) |
| 381 | { |
| 382 | int irq; |
| 383 | |
| 384 | irq = sni_rm200_i8259_irq(); |
| 385 | if (unlikely(irq < 0)) |
| 386 | return IRQ_NONE; |
| 387 | |
| 388 | do_IRQ(irq); |
| 389 | return IRQ_HANDLED; |
| 390 | } |
| 391 | |
| 392 | struct irqaction sni_rm200_i8259A_irq = { |
| 393 | .handler = sni_rm200_i8259A_irq_handler, |
| 394 | .name = "onboard ISA", |
| 395 | .flags = IRQF_SHARED |
| 396 | }; |
| 397 | |
| 398 | void __init sni_rm200_i8259_irqs(void) |
| 399 | { |
| 400 | int i; |
| 401 | |
| 402 | rm200_pic_master = ioremap_nocache(0x16000020, 4); |
| 403 | if (!rm200_pic_master) |
| 404 | return; |
| 405 | rm200_pic_slave = ioremap_nocache(0x160000a0, 4); |
| 406 | if (!rm200_pic_master) { |
| 407 | iounmap(rm200_pic_master); |
| 408 | return; |
| 409 | } |
| 410 | |
| 411 | insert_resource(&iomem_resource, &sni_rm200_pic1_resource); |
| 412 | insert_resource(&iomem_resource, &sni_rm200_pic2_resource); |
| 413 | |
| 414 | sni_rm200_init_8259A(); |
| 415 | |
| 416 | for (i = RM200_I8259A_IRQ_BASE; i < RM200_I8259A_IRQ_BASE + 16; i++) |
| 417 | set_irq_chip_and_handler(i, &sni_rm200_i8259A_chip, |
| 418 | handle_level_irq); |
| 419 | |
| 420 | setup_irq(RM200_I8259A_IRQ_BASE + PIC_CASCADE_IR, &sni_rm200_irq2); |
| 421 | } |
| 422 | |
| 423 | |
| 424 | #define SNI_RM200_INT_STAT_REG CKSEG1ADDR(0xbc000000) |
| 425 | #define SNI_RM200_INT_ENA_REG CKSEG1ADDR(0xbc080000) |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 426 | |
| 427 | #define SNI_RM200_INT_START 24 |
| 428 | #define SNI_RM200_INT_END 28 |
| 429 | |
| 430 | static void enable_rm200_irq(unsigned int irq) |
| 431 | { |
| 432 | unsigned int mask = 1 << (irq - SNI_RM200_INT_START); |
| 433 | |
| 434 | *(volatile u8 *)SNI_RM200_INT_ENA_REG &= ~mask; |
| 435 | } |
| 436 | |
| 437 | void disable_rm200_irq(unsigned int irq) |
| 438 | { |
| 439 | unsigned int mask = 1 << (irq - SNI_RM200_INT_START); |
| 440 | |
| 441 | *(volatile u8 *)SNI_RM200_INT_ENA_REG |= mask; |
| 442 | } |
| 443 | |
| 444 | void end_rm200_irq(unsigned int irq) |
| 445 | { |
| 446 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) |
| 447 | enable_rm200_irq(irq); |
| 448 | } |
| 449 | |
| 450 | static struct irq_chip rm200_irq_type = { |
| 451 | .typename = "RM200", |
| 452 | .ack = disable_rm200_irq, |
| 453 | .mask = disable_rm200_irq, |
| 454 | .mask_ack = disable_rm200_irq, |
| 455 | .unmask = enable_rm200_irq, |
| 456 | .end = end_rm200_irq, |
| 457 | }; |
| 458 | |
| 459 | static void sni_rm200_hwint(void) |
| 460 | { |
| 461 | u32 pending = read_c0_cause() & read_c0_status(); |
| 462 | u8 mask; |
| 463 | u8 stat; |
| 464 | int irq; |
| 465 | |
| 466 | if (pending & C_IRQ5) |
Ralf Baechle | 49a89ef | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 467 | do_IRQ(MIPS_CPU_IRQ_BASE + 7); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 468 | else if (pending & C_IRQ0) { |
Ralf Baechle | 49a89ef | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 469 | clear_c0_status(IE_IRQ0); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 470 | mask = *(volatile u8 *)SNI_RM200_INT_ENA_REG ^ 0x1f; |
| 471 | stat = *(volatile u8 *)SNI_RM200_INT_STAT_REG ^ 0x14; |
| 472 | irq = ffs(stat & mask & 0x1f); |
| 473 | |
| 474 | if (likely(irq > 0)) |
Ralf Baechle | 49a89ef | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 475 | do_IRQ(irq + SNI_RM200_INT_START - 1); |
| 476 | set_c0_status(IE_IRQ0); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 477 | } |
| 478 | } |
| 479 | |
| 480 | void __init sni_rm200_irq_init(void) |
| 481 | { |
| 482 | int i; |
| 483 | |
| 484 | * (volatile u8 *)SNI_RM200_INT_ENA_REG = 0x1f; |
| 485 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 486 | sni_rm200_i8259_irqs(); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 487 | mips_cpu_irq_init(); |
| 488 | /* Actually we've got more interrupts to handle ... */ |
| 489 | for (i = SNI_RM200_INT_START; i <= SNI_RM200_INT_END; i++) |
| 490 | set_irq_chip(i, &rm200_irq_type); |
| 491 | sni_hwint = sni_rm200_hwint; |
| 492 | change_c0_status(ST0_IM, IE_IRQ0); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 493 | setup_irq(SNI_RM200_INT_START + 0, &sni_rm200_i8259A_irq); |
| 494 | setup_irq(SNI_RM200_INT_START + 1, &sni_isa_irq); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 495 | } |
| 496 | |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 497 | void __init sni_rm200_init(void) |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 498 | { |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 499 | } |