Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 1 | /* |
| 2 | * RM200 specific code |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General Public |
| 5 | * License. See the file "COPYING" in the main directory of this archive |
| 6 | * for more details. |
| 7 | * |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 8 | * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de) |
| 9 | * |
| 10 | * i8259 parts ripped out of arch/mips/kernel/i8259.c |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 11 | */ |
| 12 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 13 | #include <linux/delay.h> |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 14 | #include <linux/init.h> |
| 15 | #include <linux/interrupt.h> |
David Howells | ca4d3e67 | 2010-10-07 14:08:54 +0100 | [diff] [blame] | 16 | #include <linux/irq.h> |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 17 | #include <linux/platform_device.h> |
| 18 | #include <linux/serial_8250.h> |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 19 | #include <linux/io.h> |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 20 | |
| 21 | #include <asm/sni.h> |
| 22 | #include <asm/time.h> |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 23 | #include <asm/irq_cpu.h> |
| 24 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 25 | #define RM200_I8259A_IRQ_BASE 32 |
| 26 | |
| 27 | #define MEMPORT(_base,_irq) \ |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 28 | { \ |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 29 | .mapbase = _base, \ |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 30 | .irq = _irq, \ |
| 31 | .uartclk = 1843200, \ |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 32 | .iotype = UPIO_MEM, \ |
| 33 | .flags = UPF_BOOT_AUTOCONF|UPF_IOREMAP, \ |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 34 | } |
| 35 | |
| 36 | static struct plat_serial8250_port rm200_data[] = { |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 37 | MEMPORT(0x160003f8, RM200_I8259A_IRQ_BASE + 4), |
| 38 | MEMPORT(0x160002f8, RM200_I8259A_IRQ_BASE + 3), |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 39 | { }, |
| 40 | }; |
| 41 | |
| 42 | static struct platform_device rm200_serial8250_device = { |
| 43 | .name = "serial8250", |
| 44 | .id = PLAT8250_DEV_PLATFORM, |
| 45 | .dev = { |
| 46 | .platform_data = rm200_data, |
| 47 | }, |
| 48 | }; |
| 49 | |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 50 | static struct resource rm200_ds1216_rsrc[] = { |
| 51 | { |
| 52 | .start = 0x1cd41ffc, |
| 53 | .end = 0x1cd41fff, |
| 54 | .flags = IORESOURCE_MEM |
| 55 | } |
| 56 | }; |
| 57 | |
| 58 | static struct platform_device rm200_ds1216_device = { |
| 59 | .name = "rtc-ds1216", |
| 60 | .num_resources = ARRAY_SIZE(rm200_ds1216_rsrc), |
| 61 | .resource = rm200_ds1216_rsrc |
| 62 | }; |
| 63 | |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 64 | static struct resource snirm_82596_rm200_rsrc[] = { |
| 65 | { |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 66 | .start = 0x18000000, |
| 67 | .end = 0x180fffff, |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 68 | .flags = IORESOURCE_MEM |
| 69 | }, |
| 70 | { |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 71 | .start = 0x1b000000, |
| 72 | .end = 0x1b000004, |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 73 | .flags = IORESOURCE_MEM |
| 74 | }, |
| 75 | { |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 76 | .start = 0x1ff00000, |
| 77 | .end = 0x1ff00020, |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 78 | .flags = IORESOURCE_MEM |
| 79 | }, |
| 80 | { |
| 81 | .start = 27, |
| 82 | .end = 27, |
| 83 | .flags = IORESOURCE_IRQ |
| 84 | }, |
| 85 | { |
| 86 | .flags = 0x00 |
| 87 | } |
| 88 | }; |
| 89 | |
| 90 | static struct platform_device snirm_82596_rm200_pdev = { |
| 91 | .name = "snirm_82596", |
| 92 | .num_resources = ARRAY_SIZE(snirm_82596_rm200_rsrc), |
| 93 | .resource = snirm_82596_rm200_rsrc |
| 94 | }; |
| 95 | |
| 96 | static struct resource snirm_53c710_rm200_rsrc[] = { |
| 97 | { |
Thomas Bogendoerfer | 9815778 | 2007-07-11 19:10:39 +0200 | [diff] [blame] | 98 | .start = 0x19000000, |
| 99 | .end = 0x190fffff, |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 100 | .flags = IORESOURCE_MEM |
| 101 | }, |
| 102 | { |
| 103 | .start = 26, |
| 104 | .end = 26, |
| 105 | .flags = IORESOURCE_IRQ |
| 106 | } |
| 107 | }; |
| 108 | |
| 109 | static struct platform_device snirm_53c710_rm200_pdev = { |
| 110 | .name = "snirm_53c710", |
| 111 | .num_resources = ARRAY_SIZE(snirm_53c710_rm200_rsrc), |
| 112 | .resource = snirm_53c710_rm200_rsrc |
| 113 | }; |
| 114 | |
| 115 | static int __init snirm_setup_devinit(void) |
| 116 | { |
| 117 | if (sni_brd_type == SNI_BRD_RM200) { |
| 118 | platform_device_register(&rm200_serial8250_device); |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 119 | platform_device_register(&rm200_ds1216_device); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 120 | platform_device_register(&snirm_82596_rm200_pdev); |
| 121 | platform_device_register(&snirm_53c710_rm200_pdev); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 122 | sni_eisa_root_init(); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 123 | } |
| 124 | return 0; |
| 125 | } |
| 126 | |
| 127 | device_initcall(snirm_setup_devinit); |
| 128 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 129 | /* |
| 130 | * RM200 has an ISA and an EISA bus. The iSA bus is only used |
| 131 | * for onboard devices and also has twi i8259 PICs. Since these |
| 132 | * PICs are no accessible via inb/outb the following code uses |
| 133 | * readb/writeb to access them |
| 134 | */ |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 135 | |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 136 | static DEFINE_RAW_SPINLOCK(sni_rm200_i8259A_lock); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 137 | #define PIC_CMD 0x00 |
| 138 | #define PIC_IMR 0x01 |
| 139 | #define PIC_ISR PIC_CMD |
| 140 | #define PIC_POLL PIC_ISR |
| 141 | #define PIC_OCW3 PIC_ISR |
| 142 | |
| 143 | /* i8259A PIC related value */ |
| 144 | #define PIC_CASCADE_IR 2 |
| 145 | #define MASTER_ICW4_DEFAULT 0x01 |
| 146 | #define SLAVE_ICW4_DEFAULT 0x01 |
| 147 | |
| 148 | /* |
| 149 | * This contains the irq mask for both 8259A irq controllers, |
| 150 | */ |
| 151 | static unsigned int rm200_cached_irq_mask = 0xffff; |
| 152 | static __iomem u8 *rm200_pic_master; |
| 153 | static __iomem u8 *rm200_pic_slave; |
| 154 | |
| 155 | #define cached_master_mask (rm200_cached_irq_mask) |
| 156 | #define cached_slave_mask (rm200_cached_irq_mask >> 8) |
| 157 | |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 158 | static void sni_rm200_disable_8259A_irq(struct irq_data *d) |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 159 | { |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 160 | unsigned int mask, irq = d->irq - RM200_I8259A_IRQ_BASE; |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 161 | unsigned long flags; |
| 162 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 163 | mask = 1 << irq; |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 164 | raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 165 | rm200_cached_irq_mask |= mask; |
| 166 | if (irq & 8) |
| 167 | writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR); |
| 168 | else |
| 169 | writeb(cached_master_mask, rm200_pic_master + PIC_IMR); |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 170 | raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 171 | } |
| 172 | |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 173 | static void sni_rm200_enable_8259A_irq(struct irq_data *d) |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 174 | { |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 175 | unsigned int mask, irq = d->irq - RM200_I8259A_IRQ_BASE; |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 176 | unsigned long flags; |
| 177 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 178 | mask = ~(1 << irq); |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 179 | raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 180 | rm200_cached_irq_mask &= mask; |
| 181 | if (irq & 8) |
| 182 | writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR); |
| 183 | else |
| 184 | writeb(cached_master_mask, rm200_pic_master + PIC_IMR); |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 185 | raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | static inline int sni_rm200_i8259A_irq_real(unsigned int irq) |
| 189 | { |
| 190 | int value; |
| 191 | int irqmask = 1 << irq; |
| 192 | |
| 193 | if (irq < 8) { |
| 194 | writeb(0x0B, rm200_pic_master + PIC_CMD); |
| 195 | value = readb(rm200_pic_master + PIC_CMD) & irqmask; |
| 196 | writeb(0x0A, rm200_pic_master + PIC_CMD); |
| 197 | return value; |
| 198 | } |
| 199 | writeb(0x0B, rm200_pic_slave + PIC_CMD); /* ISR register */ |
| 200 | value = readb(rm200_pic_slave + PIC_CMD) & (irqmask >> 8); |
| 201 | writeb(0x0A, rm200_pic_slave + PIC_CMD); |
| 202 | return value; |
| 203 | } |
| 204 | |
| 205 | /* |
| 206 | * Careful! The 8259A is a fragile beast, it pretty |
| 207 | * much _has_ to be done exactly like this (mask it |
| 208 | * first, _then_ send the EOI, and the order of EOI |
| 209 | * to the two 8259s is important! |
| 210 | */ |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 211 | void sni_rm200_mask_and_ack_8259A(struct irq_data *d) |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 212 | { |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 213 | unsigned int irqmask, irq = d->irq - RM200_I8259A_IRQ_BASE; |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 214 | unsigned long flags; |
| 215 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 216 | irqmask = 1 << irq; |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 217 | raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 218 | /* |
| 219 | * Lightweight spurious IRQ detection. We do not want |
| 220 | * to overdo spurious IRQ handling - it's usually a sign |
| 221 | * of hardware problems, so we only do the checks we can |
| 222 | * do without slowing down good hardware unnecessarily. |
| 223 | * |
| 224 | * Note that IRQ7 and IRQ15 (the two spurious IRQs |
| 225 | * usually resulting from the 8259A-1|2 PICs) occur |
| 226 | * even if the IRQ is masked in the 8259A. Thus we |
| 227 | * can check spurious 8259A IRQs without doing the |
| 228 | * quite slow i8259A_irq_real() call for every IRQ. |
| 229 | * This does not cover 100% of spurious interrupts, |
| 230 | * but should be enough to warn the user that there |
| 231 | * is something bad going on ... |
| 232 | */ |
| 233 | if (rm200_cached_irq_mask & irqmask) |
| 234 | goto spurious_8259A_irq; |
| 235 | rm200_cached_irq_mask |= irqmask; |
| 236 | |
| 237 | handle_real_irq: |
| 238 | if (irq & 8) { |
| 239 | readb(rm200_pic_slave + PIC_IMR); |
| 240 | writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR); |
| 241 | writeb(0x60+(irq & 7), rm200_pic_slave + PIC_CMD); |
| 242 | writeb(0x60+PIC_CASCADE_IR, rm200_pic_master + PIC_CMD); |
| 243 | } else { |
| 244 | readb(rm200_pic_master + PIC_IMR); |
| 245 | writeb(cached_master_mask, rm200_pic_master + PIC_IMR); |
| 246 | writeb(0x60+irq, rm200_pic_master + PIC_CMD); |
| 247 | } |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 248 | raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 249 | return; |
| 250 | |
| 251 | spurious_8259A_irq: |
| 252 | /* |
| 253 | * this is the slow path - should happen rarely. |
| 254 | */ |
| 255 | if (sni_rm200_i8259A_irq_real(irq)) |
| 256 | /* |
| 257 | * oops, the IRQ _is_ in service according to the |
| 258 | * 8259A - not spurious, go handle it. |
| 259 | */ |
| 260 | goto handle_real_irq; |
| 261 | |
| 262 | { |
| 263 | static int spurious_irq_mask; |
| 264 | /* |
| 265 | * At this point we can be sure the IRQ is spurious, |
| 266 | * lets ACK and report it. [once per IRQ] |
| 267 | */ |
| 268 | if (!(spurious_irq_mask & irqmask)) { |
| 269 | printk(KERN_DEBUG |
| 270 | "spurious RM200 8259A interrupt: IRQ%d.\n", irq); |
| 271 | spurious_irq_mask |= irqmask; |
| 272 | } |
| 273 | atomic_inc(&irq_err_count); |
| 274 | /* |
| 275 | * Theoretically we do not have to handle this IRQ, |
| 276 | * but in Linux this does not cause problems and is |
| 277 | * simpler for us. |
| 278 | */ |
| 279 | goto handle_real_irq; |
| 280 | } |
| 281 | } |
| 282 | |
| 283 | static struct irq_chip sni_rm200_i8259A_chip = { |
| 284 | .name = "RM200-XT-PIC", |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 285 | .irq_mask = sni_rm200_disable_8259A_irq, |
| 286 | .irq_unmask = sni_rm200_enable_8259A_irq, |
| 287 | .irq_mask_ack = sni_rm200_mask_and_ack_8259A, |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 288 | }; |
| 289 | |
| 290 | /* |
| 291 | * Do the traditional i8259 interrupt polling thing. This is for the few |
| 292 | * cases where no better interrupt acknowledge method is available and we |
| 293 | * absolutely must touch the i8259. |
| 294 | */ |
| 295 | static inline int sni_rm200_i8259_irq(void) |
| 296 | { |
| 297 | int irq; |
| 298 | |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 299 | raw_spin_lock(&sni_rm200_i8259A_lock); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 300 | |
| 301 | /* Perform an interrupt acknowledge cycle on controller 1. */ |
| 302 | writeb(0x0C, rm200_pic_master + PIC_CMD); /* prepare for poll */ |
| 303 | irq = readb(rm200_pic_master + PIC_CMD) & 7; |
| 304 | if (irq == PIC_CASCADE_IR) { |
| 305 | /* |
| 306 | * Interrupt is cascaded so perform interrupt |
| 307 | * acknowledge on controller 2. |
| 308 | */ |
| 309 | writeb(0x0C, rm200_pic_slave + PIC_CMD); /* prepare for poll */ |
| 310 | irq = (readb(rm200_pic_slave + PIC_CMD) & 7) + 8; |
| 311 | } |
| 312 | |
| 313 | if (unlikely(irq == 7)) { |
| 314 | /* |
| 315 | * This may be a spurious interrupt. |
| 316 | * |
| 317 | * Read the interrupt status register (ISR). If the most |
| 318 | * significant bit is not set then there is no valid |
| 319 | * interrupt. |
| 320 | */ |
| 321 | writeb(0x0B, rm200_pic_master + PIC_ISR); /* ISR register */ |
| 322 | if (~readb(rm200_pic_master + PIC_ISR) & 0x80) |
| 323 | irq = -1; |
| 324 | } |
| 325 | |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 326 | raw_spin_unlock(&sni_rm200_i8259A_lock); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 327 | |
| 328 | return likely(irq >= 0) ? irq + RM200_I8259A_IRQ_BASE : irq; |
| 329 | } |
| 330 | |
| 331 | void sni_rm200_init_8259A(void) |
| 332 | { |
| 333 | unsigned long flags; |
| 334 | |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 335 | raw_spin_lock_irqsave(&sni_rm200_i8259A_lock, flags); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 336 | |
| 337 | writeb(0xff, rm200_pic_master + PIC_IMR); |
| 338 | writeb(0xff, rm200_pic_slave + PIC_IMR); |
| 339 | |
| 340 | writeb(0x11, rm200_pic_master + PIC_CMD); |
| 341 | writeb(0, rm200_pic_master + PIC_IMR); |
| 342 | writeb(1U << PIC_CASCADE_IR, rm200_pic_master + PIC_IMR); |
| 343 | writeb(MASTER_ICW4_DEFAULT, rm200_pic_master + PIC_IMR); |
| 344 | writeb(0x11, rm200_pic_slave + PIC_CMD); |
| 345 | writeb(8, rm200_pic_slave + PIC_IMR); |
| 346 | writeb(PIC_CASCADE_IR, rm200_pic_slave + PIC_IMR); |
| 347 | writeb(SLAVE_ICW4_DEFAULT, rm200_pic_slave + PIC_IMR); |
| 348 | udelay(100); /* wait for 8259A to initialize */ |
| 349 | |
| 350 | writeb(cached_master_mask, rm200_pic_master + PIC_IMR); |
| 351 | writeb(cached_slave_mask, rm200_pic_slave + PIC_IMR); |
| 352 | |
Ralf Baechle | 36946d7 | 2010-02-27 12:53:39 +0100 | [diff] [blame] | 353 | raw_spin_unlock_irqrestore(&sni_rm200_i8259A_lock, flags); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 354 | } |
| 355 | |
| 356 | /* |
| 357 | * IRQ2 is cascade interrupt to second interrupt controller |
| 358 | */ |
| 359 | static struct irqaction sni_rm200_irq2 = { |
Rusty Russell | 1a8a510 | 2009-03-30 22:05:13 -0600 | [diff] [blame] | 360 | .handler = no_action, |
| 361 | .name = "cascade", |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 362 | }; |
| 363 | |
| 364 | static struct resource sni_rm200_pic1_resource = { |
| 365 | .name = "onboard ISA pic1", |
| 366 | .start = 0x16000020, |
| 367 | .end = 0x16000023, |
| 368 | .flags = IORESOURCE_BUSY |
| 369 | }; |
| 370 | |
| 371 | static struct resource sni_rm200_pic2_resource = { |
| 372 | .name = "onboard ISA pic2", |
| 373 | .start = 0x160000a0, |
| 374 | .end = 0x160000a3, |
| 375 | .flags = IORESOURCE_BUSY |
| 376 | }; |
| 377 | |
| 378 | /* ISA irq handler */ |
| 379 | static irqreturn_t sni_rm200_i8259A_irq_handler(int dummy, void *p) |
| 380 | { |
| 381 | int irq; |
| 382 | |
| 383 | irq = sni_rm200_i8259_irq(); |
| 384 | if (unlikely(irq < 0)) |
| 385 | return IRQ_NONE; |
| 386 | |
| 387 | do_IRQ(irq); |
| 388 | return IRQ_HANDLED; |
| 389 | } |
| 390 | |
| 391 | struct irqaction sni_rm200_i8259A_irq = { |
| 392 | .handler = sni_rm200_i8259A_irq_handler, |
| 393 | .name = "onboard ISA", |
| 394 | .flags = IRQF_SHARED |
| 395 | }; |
| 396 | |
| 397 | void __init sni_rm200_i8259_irqs(void) |
| 398 | { |
| 399 | int i; |
| 400 | |
| 401 | rm200_pic_master = ioremap_nocache(0x16000020, 4); |
| 402 | if (!rm200_pic_master) |
| 403 | return; |
| 404 | rm200_pic_slave = ioremap_nocache(0x160000a0, 4); |
Julia Lawall | c2d5b5e | 2010-02-06 09:42:16 +0100 | [diff] [blame] | 405 | if (!rm200_pic_slave) { |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 406 | iounmap(rm200_pic_master); |
| 407 | return; |
| 408 | } |
| 409 | |
| 410 | insert_resource(&iomem_resource, &sni_rm200_pic1_resource); |
| 411 | insert_resource(&iomem_resource, &sni_rm200_pic2_resource); |
| 412 | |
| 413 | sni_rm200_init_8259A(); |
| 414 | |
| 415 | for (i = RM200_I8259A_IRQ_BASE; i < RM200_I8259A_IRQ_BASE + 16; i++) |
Thomas Gleixner | e4ec798 | 2011-03-27 15:19:28 +0200 | [diff] [blame] | 416 | irq_set_chip_and_handler(i, &sni_rm200_i8259A_chip, |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 417 | handle_level_irq); |
| 418 | |
| 419 | setup_irq(RM200_I8259A_IRQ_BASE + PIC_CASCADE_IR, &sni_rm200_irq2); |
| 420 | } |
| 421 | |
| 422 | |
| 423 | #define SNI_RM200_INT_STAT_REG CKSEG1ADDR(0xbc000000) |
| 424 | #define SNI_RM200_INT_ENA_REG CKSEG1ADDR(0xbc080000) |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 425 | |
| 426 | #define SNI_RM200_INT_START 24 |
| 427 | #define SNI_RM200_INT_END 28 |
| 428 | |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 429 | static void enable_rm200_irq(struct irq_data *d) |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 430 | { |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 431 | unsigned int mask = 1 << (d->irq - SNI_RM200_INT_START); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 432 | |
| 433 | *(volatile u8 *)SNI_RM200_INT_ENA_REG &= ~mask; |
| 434 | } |
| 435 | |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 436 | void disable_rm200_irq(struct irq_data *d) |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 437 | { |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 438 | unsigned int mask = 1 << (d->irq - SNI_RM200_INT_START); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 439 | |
| 440 | *(volatile u8 *)SNI_RM200_INT_ENA_REG |= mask; |
| 441 | } |
| 442 | |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 443 | static struct irq_chip rm200_irq_type = { |
Thomas Gleixner | 8922f79 | 2009-11-17 22:51:03 +0000 | [diff] [blame] | 444 | .name = "RM200", |
Thomas Gleixner | 0b888c7 | 2011-03-23 21:09:15 +0000 | [diff] [blame] | 445 | .irq_mask = disable_rm200_irq, |
| 446 | .irq_unmask = enable_rm200_irq, |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 447 | }; |
| 448 | |
| 449 | static void sni_rm200_hwint(void) |
| 450 | { |
| 451 | u32 pending = read_c0_cause() & read_c0_status(); |
| 452 | u8 mask; |
| 453 | u8 stat; |
| 454 | int irq; |
| 455 | |
| 456 | if (pending & C_IRQ5) |
Ralf Baechle | 49a89ef | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 457 | do_IRQ(MIPS_CPU_IRQ_BASE + 7); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 458 | else if (pending & C_IRQ0) { |
Ralf Baechle | 49a89ef | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 459 | clear_c0_status(IE_IRQ0); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 460 | mask = *(volatile u8 *)SNI_RM200_INT_ENA_REG ^ 0x1f; |
| 461 | stat = *(volatile u8 *)SNI_RM200_INT_STAT_REG ^ 0x14; |
| 462 | irq = ffs(stat & mask & 0x1f); |
| 463 | |
| 464 | if (likely(irq > 0)) |
Ralf Baechle | 49a89ef | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 465 | do_IRQ(irq + SNI_RM200_INT_START - 1); |
| 466 | set_c0_status(IE_IRQ0); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 467 | } |
| 468 | } |
| 469 | |
| 470 | void __init sni_rm200_irq_init(void) |
| 471 | { |
| 472 | int i; |
| 473 | |
| 474 | * (volatile u8 *)SNI_RM200_INT_ENA_REG = 0x1f; |
| 475 | |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 476 | sni_rm200_i8259_irqs(); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 477 | mips_cpu_irq_init(); |
| 478 | /* Actually we've got more interrupts to handle ... */ |
| 479 | for (i = SNI_RM200_INT_START; i <= SNI_RM200_INT_END; i++) |
Thomas Gleixner | e4ec798 | 2011-03-27 15:19:28 +0200 | [diff] [blame] | 480 | irq_set_chip_and_handler(i, &rm200_irq_type, handle_level_irq); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 481 | sni_hwint = sni_rm200_hwint; |
| 482 | change_c0_status(ST0_IM, IE_IRQ0); |
Thomas Bogendoerfer | 231a35d | 2008-01-04 23:31:07 +0100 | [diff] [blame] | 483 | setup_irq(SNI_RM200_INT_START + 0, &sni_rm200_i8259A_irq); |
| 484 | setup_irq(SNI_RM200_INT_START + 1, &sni_isa_irq); |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 485 | } |
| 486 | |
Thomas Bogendoerfer | 06cf558 | 2007-06-20 23:36:47 +0200 | [diff] [blame] | 487 | void __init sni_rm200_init(void) |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 488 | { |
Thomas Bogendoerfer | c066a32 | 2006-12-28 18:22:32 +0100 | [diff] [blame] | 489 | } |