David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2004-2008 Cavium Networks |
| 7 | */ |
| 8 | #include <linux/irq.h> |
| 9 | #include <linux/interrupt.h> |
| 10 | #include <linux/hardirq.h> |
| 11 | |
| 12 | #include <asm/octeon/octeon.h> |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame^] | 13 | #include <asm/octeon/cvmx-pexp-defs.h> |
| 14 | #include <asm/octeon/cvmx-npi-defs.h> |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 15 | |
| 16 | DEFINE_RWLOCK(octeon_irq_ciu0_rwlock); |
| 17 | DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); |
| 18 | DEFINE_SPINLOCK(octeon_irq_msi_lock); |
| 19 | |
| 20 | static void octeon_irq_core_ack(unsigned int irq) |
| 21 | { |
| 22 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
| 23 | /* |
| 24 | * We don't need to disable IRQs to make these atomic since |
| 25 | * they are already disabled earlier in the low level |
| 26 | * interrupt code. |
| 27 | */ |
| 28 | clear_c0_status(0x100 << bit); |
| 29 | /* The two user interrupts must be cleared manually. */ |
| 30 | if (bit < 2) |
| 31 | clear_c0_cause(0x100 << bit); |
| 32 | } |
| 33 | |
| 34 | static void octeon_irq_core_eoi(unsigned int irq) |
| 35 | { |
Thomas Gleixner | ae03550 | 2009-03-11 00:45:51 +0000 | [diff] [blame] | 36 | struct irq_desc *desc = irq_desc + irq; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 37 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
| 38 | /* |
| 39 | * If an IRQ is being processed while we are disabling it the |
| 40 | * handler will attempt to unmask the interrupt after it has |
| 41 | * been disabled. |
| 42 | */ |
| 43 | if (desc->status & IRQ_DISABLED) |
| 44 | return; |
| 45 | |
| 46 | /* There is a race here. We should fix it. */ |
| 47 | |
| 48 | /* |
| 49 | * We don't need to disable IRQs to make these atomic since |
| 50 | * they are already disabled earlier in the low level |
| 51 | * interrupt code. |
| 52 | */ |
| 53 | set_c0_status(0x100 << bit); |
| 54 | } |
| 55 | |
| 56 | static void octeon_irq_core_enable(unsigned int irq) |
| 57 | { |
| 58 | unsigned long flags; |
| 59 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
| 60 | |
| 61 | /* |
| 62 | * We need to disable interrupts to make sure our updates are |
| 63 | * atomic. |
| 64 | */ |
| 65 | local_irq_save(flags); |
| 66 | set_c0_status(0x100 << bit); |
| 67 | local_irq_restore(flags); |
| 68 | } |
| 69 | |
| 70 | static void octeon_irq_core_disable_local(unsigned int irq) |
| 71 | { |
| 72 | unsigned long flags; |
| 73 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
| 74 | /* |
| 75 | * We need to disable interrupts to make sure our updates are |
| 76 | * atomic. |
| 77 | */ |
| 78 | local_irq_save(flags); |
| 79 | clear_c0_status(0x100 << bit); |
| 80 | local_irq_restore(flags); |
| 81 | } |
| 82 | |
| 83 | static void octeon_irq_core_disable(unsigned int irq) |
| 84 | { |
| 85 | #ifdef CONFIG_SMP |
| 86 | on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local, |
| 87 | (void *) (long) irq, 1); |
| 88 | #else |
| 89 | octeon_irq_core_disable_local(irq); |
| 90 | #endif |
| 91 | } |
| 92 | |
| 93 | static struct irq_chip octeon_irq_chip_core = { |
| 94 | .name = "Core", |
| 95 | .enable = octeon_irq_core_enable, |
| 96 | .disable = octeon_irq_core_disable, |
| 97 | .ack = octeon_irq_core_ack, |
| 98 | .eoi = octeon_irq_core_eoi, |
| 99 | }; |
| 100 | |
| 101 | |
| 102 | static void octeon_irq_ciu0_ack(unsigned int irq) |
| 103 | { |
| 104 | /* |
| 105 | * In order to avoid any locking accessing the CIU, we |
| 106 | * acknowledge CIU interrupts by disabling all of them. This |
| 107 | * way we can use a per core register and avoid any out of |
| 108 | * core locking requirements. This has the side affect that |
| 109 | * CIU interrupts can't be processed recursively. |
| 110 | * |
| 111 | * We don't need to disable IRQs to make these atomic since |
| 112 | * they are already disabled earlier in the low level |
| 113 | * interrupt code. |
| 114 | */ |
| 115 | clear_c0_status(0x100 << 2); |
| 116 | } |
| 117 | |
| 118 | static void octeon_irq_ciu0_eoi(unsigned int irq) |
| 119 | { |
| 120 | /* |
| 121 | * Enable all CIU interrupts again. We don't need to disable |
| 122 | * IRQs to make these atomic since they are already disabled |
| 123 | * earlier in the low level interrupt code. |
| 124 | */ |
| 125 | set_c0_status(0x100 << 2); |
| 126 | } |
| 127 | |
| 128 | static void octeon_irq_ciu0_enable(unsigned int irq) |
| 129 | { |
| 130 | int coreid = cvmx_get_core_num(); |
| 131 | unsigned long flags; |
| 132 | uint64_t en0; |
| 133 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
| 134 | |
| 135 | /* |
| 136 | * A read lock is used here to make sure only one core is ever |
| 137 | * updating the CIU enable bits at a time. During an enable |
| 138 | * the cores don't interfere with each other. During a disable |
| 139 | * the write lock stops any enables that might cause a |
| 140 | * problem. |
| 141 | */ |
| 142 | read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); |
| 143 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 144 | en0 |= 1ull << bit; |
| 145 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
| 146 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 147 | read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); |
| 148 | } |
| 149 | |
| 150 | static void octeon_irq_ciu0_disable(unsigned int irq) |
| 151 | { |
| 152 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
| 153 | unsigned long flags; |
| 154 | uint64_t en0; |
| 155 | #ifdef CONFIG_SMP |
| 156 | int cpu; |
| 157 | write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); |
| 158 | for_each_online_cpu(cpu) { |
| 159 | int coreid = cpu_logical_map(cpu); |
| 160 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 161 | en0 &= ~(1ull << bit); |
| 162 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
| 163 | } |
| 164 | /* |
| 165 | * We need to do a read after the last update to make sure all |
| 166 | * of them are done. |
| 167 | */ |
| 168 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); |
| 169 | write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); |
| 170 | #else |
| 171 | int coreid = cvmx_get_core_num(); |
| 172 | local_irq_save(flags); |
| 173 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 174 | en0 &= ~(1ull << bit); |
| 175 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
| 176 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 177 | local_irq_restore(flags); |
| 178 | #endif |
| 179 | } |
| 180 | |
| 181 | #ifdef CONFIG_SMP |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 182 | static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 183 | { |
| 184 | int cpu; |
| 185 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
| 186 | |
| 187 | write_lock(&octeon_irq_ciu0_rwlock); |
| 188 | for_each_online_cpu(cpu) { |
| 189 | int coreid = cpu_logical_map(cpu); |
| 190 | uint64_t en0 = |
| 191 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 192 | if (cpumask_test_cpu(cpu, dest)) |
| 193 | en0 |= 1ull << bit; |
| 194 | else |
| 195 | en0 &= ~(1ull << bit); |
| 196 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
| 197 | } |
| 198 | /* |
| 199 | * We need to do a read after the last update to make sure all |
| 200 | * of them are done. |
| 201 | */ |
| 202 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); |
| 203 | write_unlock(&octeon_irq_ciu0_rwlock); |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 204 | |
| 205 | return 0; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 206 | } |
| 207 | #endif |
| 208 | |
| 209 | static struct irq_chip octeon_irq_chip_ciu0 = { |
| 210 | .name = "CIU0", |
| 211 | .enable = octeon_irq_ciu0_enable, |
| 212 | .disable = octeon_irq_ciu0_disable, |
| 213 | .ack = octeon_irq_ciu0_ack, |
| 214 | .eoi = octeon_irq_ciu0_eoi, |
| 215 | #ifdef CONFIG_SMP |
| 216 | .set_affinity = octeon_irq_ciu0_set_affinity, |
| 217 | #endif |
| 218 | }; |
| 219 | |
| 220 | |
| 221 | static void octeon_irq_ciu1_ack(unsigned int irq) |
| 222 | { |
| 223 | /* |
| 224 | * In order to avoid any locking accessing the CIU, we |
| 225 | * acknowledge CIU interrupts by disabling all of them. This |
| 226 | * way we can use a per core register and avoid any out of |
| 227 | * core locking requirements. This has the side affect that |
| 228 | * CIU interrupts can't be processed recursively. We don't |
| 229 | * need to disable IRQs to make these atomic since they are |
| 230 | * already disabled earlier in the low level interrupt code. |
| 231 | */ |
| 232 | clear_c0_status(0x100 << 3); |
| 233 | } |
| 234 | |
| 235 | static void octeon_irq_ciu1_eoi(unsigned int irq) |
| 236 | { |
| 237 | /* |
| 238 | * Enable all CIU interrupts again. We don't need to disable |
| 239 | * IRQs to make these atomic since they are already disabled |
| 240 | * earlier in the low level interrupt code. |
| 241 | */ |
| 242 | set_c0_status(0x100 << 3); |
| 243 | } |
| 244 | |
| 245 | static void octeon_irq_ciu1_enable(unsigned int irq) |
| 246 | { |
| 247 | int coreid = cvmx_get_core_num(); |
| 248 | unsigned long flags; |
| 249 | uint64_t en1; |
| 250 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
| 251 | |
| 252 | /* |
| 253 | * A read lock is used here to make sure only one core is ever |
| 254 | * updating the CIU enable bits at a time. During an enable |
| 255 | * the cores don't interfere with each other. During a disable |
| 256 | * the write lock stops any enables that might cause a |
| 257 | * problem. |
| 258 | */ |
| 259 | read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); |
| 260 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 261 | en1 |= 1ull << bit; |
| 262 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
| 263 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 264 | read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); |
| 265 | } |
| 266 | |
| 267 | static void octeon_irq_ciu1_disable(unsigned int irq) |
| 268 | { |
| 269 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
| 270 | unsigned long flags; |
| 271 | uint64_t en1; |
| 272 | #ifdef CONFIG_SMP |
| 273 | int cpu; |
| 274 | write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); |
| 275 | for_each_online_cpu(cpu) { |
| 276 | int coreid = cpu_logical_map(cpu); |
| 277 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 278 | en1 &= ~(1ull << bit); |
| 279 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
| 280 | } |
| 281 | /* |
| 282 | * We need to do a read after the last update to make sure all |
| 283 | * of them are done. |
| 284 | */ |
| 285 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); |
| 286 | write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); |
| 287 | #else |
| 288 | int coreid = cvmx_get_core_num(); |
| 289 | local_irq_save(flags); |
| 290 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 291 | en1 &= ~(1ull << bit); |
| 292 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
| 293 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 294 | local_irq_restore(flags); |
| 295 | #endif |
| 296 | } |
| 297 | |
| 298 | #ifdef CONFIG_SMP |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 299 | static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest) |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 300 | { |
| 301 | int cpu; |
| 302 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
| 303 | |
| 304 | write_lock(&octeon_irq_ciu1_rwlock); |
| 305 | for_each_online_cpu(cpu) { |
| 306 | int coreid = cpu_logical_map(cpu); |
| 307 | uint64_t en1 = |
| 308 | cvmx_read_csr(CVMX_CIU_INTX_EN1 |
| 309 | (coreid * 2 + 1)); |
| 310 | if (cpumask_test_cpu(cpu, dest)) |
| 311 | en1 |= 1ull << bit; |
| 312 | else |
| 313 | en1 &= ~(1ull << bit); |
| 314 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
| 315 | } |
| 316 | /* |
| 317 | * We need to do a read after the last update to make sure all |
| 318 | * of them are done. |
| 319 | */ |
| 320 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); |
| 321 | write_unlock(&octeon_irq_ciu1_rwlock); |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 322 | |
| 323 | return 0; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 324 | } |
| 325 | #endif |
| 326 | |
| 327 | static struct irq_chip octeon_irq_chip_ciu1 = { |
| 328 | .name = "CIU1", |
| 329 | .enable = octeon_irq_ciu1_enable, |
| 330 | .disable = octeon_irq_ciu1_disable, |
| 331 | .ack = octeon_irq_ciu1_ack, |
| 332 | .eoi = octeon_irq_ciu1_eoi, |
| 333 | #ifdef CONFIG_SMP |
| 334 | .set_affinity = octeon_irq_ciu1_set_affinity, |
| 335 | #endif |
| 336 | }; |
| 337 | |
| 338 | #ifdef CONFIG_PCI_MSI |
| 339 | |
| 340 | static void octeon_irq_msi_ack(unsigned int irq) |
| 341 | { |
| 342 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { |
| 343 | /* These chips have PCI */ |
| 344 | cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV, |
| 345 | 1ull << (irq - OCTEON_IRQ_MSI_BIT0)); |
| 346 | } else { |
| 347 | /* |
| 348 | * These chips have PCIe. Thankfully the ACK doesn't |
| 349 | * need any locking. |
| 350 | */ |
| 351 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0, |
| 352 | 1ull << (irq - OCTEON_IRQ_MSI_BIT0)); |
| 353 | } |
| 354 | } |
| 355 | |
| 356 | static void octeon_irq_msi_eoi(unsigned int irq) |
| 357 | { |
| 358 | /* Nothing needed */ |
| 359 | } |
| 360 | |
| 361 | static void octeon_irq_msi_enable(unsigned int irq) |
| 362 | { |
| 363 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { |
| 364 | /* |
| 365 | * Octeon PCI doesn't have the ability to mask/unmask |
| 366 | * MSI interrupts individually. Instead of |
| 367 | * masking/unmasking them in groups of 16, we simple |
| 368 | * assume MSI devices are well behaved. MSI |
| 369 | * interrupts are always enable and the ACK is assumed |
| 370 | * to be enough. |
| 371 | */ |
| 372 | } else { |
| 373 | /* These chips have PCIe. Note that we only support |
| 374 | * the first 64 MSI interrupts. Unfortunately all the |
| 375 | * MSI enables are in the same register. We use |
| 376 | * MSI0's lock to control access to them all. |
| 377 | */ |
| 378 | uint64_t en; |
| 379 | unsigned long flags; |
| 380 | spin_lock_irqsave(&octeon_irq_msi_lock, flags); |
| 381 | en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); |
| 382 | en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0); |
| 383 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); |
| 384 | cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); |
| 385 | spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); |
| 386 | } |
| 387 | } |
| 388 | |
| 389 | static void octeon_irq_msi_disable(unsigned int irq) |
| 390 | { |
| 391 | if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { |
| 392 | /* See comment in enable */ |
| 393 | } else { |
| 394 | /* |
| 395 | * These chips have PCIe. Note that we only support |
| 396 | * the first 64 MSI interrupts. Unfortunately all the |
| 397 | * MSI enables are in the same register. We use |
| 398 | * MSI0's lock to control access to them all. |
| 399 | */ |
| 400 | uint64_t en; |
| 401 | unsigned long flags; |
| 402 | spin_lock_irqsave(&octeon_irq_msi_lock, flags); |
| 403 | en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); |
| 404 | en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0)); |
| 405 | cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); |
| 406 | cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); |
| 407 | spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); |
| 408 | } |
| 409 | } |
| 410 | |
| 411 | static struct irq_chip octeon_irq_chip_msi = { |
| 412 | .name = "MSI", |
| 413 | .enable = octeon_irq_msi_enable, |
| 414 | .disable = octeon_irq_msi_disable, |
| 415 | .ack = octeon_irq_msi_ack, |
| 416 | .eoi = octeon_irq_msi_eoi, |
| 417 | }; |
| 418 | #endif |
| 419 | |
| 420 | void __init arch_init_irq(void) |
| 421 | { |
| 422 | int irq; |
| 423 | |
| 424 | #ifdef CONFIG_SMP |
| 425 | /* Set the default affinity to the boot cpu. */ |
| 426 | cpumask_clear(irq_default_affinity); |
| 427 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
| 428 | #endif |
| 429 | |
| 430 | if (NR_IRQS < OCTEON_IRQ_LAST) |
| 431 | pr_err("octeon_irq_init: NR_IRQS is set too low\n"); |
| 432 | |
| 433 | /* 0 - 15 reserved for i8259 master and slave controller. */ |
| 434 | |
| 435 | /* 17 - 23 Mips internal */ |
| 436 | for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) { |
| 437 | set_irq_chip_and_handler(irq, &octeon_irq_chip_core, |
| 438 | handle_percpu_irq); |
| 439 | } |
| 440 | |
| 441 | /* 24 - 87 CIU_INT_SUM0 */ |
| 442 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { |
| 443 | set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0, |
| 444 | handle_percpu_irq); |
| 445 | } |
| 446 | |
| 447 | /* 88 - 151 CIU_INT_SUM1 */ |
| 448 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) { |
| 449 | set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1, |
| 450 | handle_percpu_irq); |
| 451 | } |
| 452 | |
| 453 | #ifdef CONFIG_PCI_MSI |
| 454 | /* 152 - 215 PCI/PCIe MSI interrupts */ |
| 455 | for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) { |
| 456 | set_irq_chip_and_handler(irq, &octeon_irq_chip_msi, |
| 457 | handle_percpu_irq); |
| 458 | } |
| 459 | #endif |
| 460 | set_c0_status(0x300 << 2); |
| 461 | } |
| 462 | |
| 463 | asmlinkage void plat_irq_dispatch(void) |
| 464 | { |
| 465 | const unsigned long core_id = cvmx_get_core_num(); |
| 466 | const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2); |
| 467 | const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2); |
| 468 | const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1; |
| 469 | const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1); |
| 470 | unsigned long cop0_cause; |
| 471 | unsigned long cop0_status; |
| 472 | uint64_t ciu_en; |
| 473 | uint64_t ciu_sum; |
| 474 | |
| 475 | while (1) { |
| 476 | cop0_cause = read_c0_cause(); |
| 477 | cop0_status = read_c0_status(); |
| 478 | cop0_cause &= cop0_status; |
| 479 | cop0_cause &= ST0_IM; |
| 480 | |
| 481 | if (unlikely(cop0_cause & STATUSF_IP2)) { |
| 482 | ciu_sum = cvmx_read_csr(ciu_sum0_address); |
| 483 | ciu_en = cvmx_read_csr(ciu_en0_address); |
| 484 | ciu_sum &= ciu_en; |
| 485 | if (likely(ciu_sum)) |
| 486 | do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1); |
| 487 | else |
| 488 | spurious_interrupt(); |
| 489 | } else if (unlikely(cop0_cause & STATUSF_IP3)) { |
| 490 | ciu_sum = cvmx_read_csr(ciu_sum1_address); |
| 491 | ciu_en = cvmx_read_csr(ciu_en1_address); |
| 492 | ciu_sum &= ciu_en; |
| 493 | if (likely(ciu_sum)) |
| 494 | do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1); |
| 495 | else |
| 496 | spurious_interrupt(); |
| 497 | } else if (likely(cop0_cause)) { |
| 498 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
| 499 | } else { |
| 500 | break; |
| 501 | } |
| 502 | } |
| 503 | } |