David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 6 | * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 7 | */ |
| 8 | #include <linux/irq.h> |
| 9 | #include <linux/interrupt.h> |
Ralf Baechle | 631330f | 2009-06-19 14:05:26 +0100 | [diff] [blame] | 10 | #include <linux/smp.h> |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 11 | |
| 12 | #include <asm/octeon/octeon.h> |
| 13 | |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 14 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); |
| 15 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 16 | |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 17 | static int octeon_coreid_for_cpu(int cpu) |
| 18 | { |
| 19 | #ifdef CONFIG_SMP |
| 20 | return cpu_logical_map(cpu); |
| 21 | #else |
| 22 | return cvmx_get_core_num(); |
| 23 | #endif |
| 24 | } |
| 25 | |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 26 | static void octeon_irq_core_ack(unsigned int irq) |
| 27 | { |
| 28 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
| 29 | /* |
| 30 | * We don't need to disable IRQs to make these atomic since |
| 31 | * they are already disabled earlier in the low level |
| 32 | * interrupt code. |
| 33 | */ |
| 34 | clear_c0_status(0x100 << bit); |
| 35 | /* The two user interrupts must be cleared manually. */ |
| 36 | if (bit < 2) |
| 37 | clear_c0_cause(0x100 << bit); |
| 38 | } |
| 39 | |
| 40 | static void octeon_irq_core_eoi(unsigned int irq) |
| 41 | { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 42 | struct irq_desc *desc = irq_to_desc(irq); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 43 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
| 44 | /* |
| 45 | * If an IRQ is being processed while we are disabling it the |
| 46 | * handler will attempt to unmask the interrupt after it has |
| 47 | * been disabled. |
| 48 | */ |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 49 | if ((unlikely(desc->status & IRQ_DISABLED))) |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 50 | return; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 51 | /* |
| 52 | * We don't need to disable IRQs to make these atomic since |
| 53 | * they are already disabled earlier in the low level |
| 54 | * interrupt code. |
| 55 | */ |
| 56 | set_c0_status(0x100 << bit); |
| 57 | } |
| 58 | |
| 59 | static void octeon_irq_core_enable(unsigned int irq) |
| 60 | { |
| 61 | unsigned long flags; |
| 62 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
| 63 | |
| 64 | /* |
| 65 | * We need to disable interrupts to make sure our updates are |
| 66 | * atomic. |
| 67 | */ |
| 68 | local_irq_save(flags); |
| 69 | set_c0_status(0x100 << bit); |
| 70 | local_irq_restore(flags); |
| 71 | } |
| 72 | |
| 73 | static void octeon_irq_core_disable_local(unsigned int irq) |
| 74 | { |
| 75 | unsigned long flags; |
| 76 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
| 77 | /* |
| 78 | * We need to disable interrupts to make sure our updates are |
| 79 | * atomic. |
| 80 | */ |
| 81 | local_irq_save(flags); |
| 82 | clear_c0_status(0x100 << bit); |
| 83 | local_irq_restore(flags); |
| 84 | } |
| 85 | |
| 86 | static void octeon_irq_core_disable(unsigned int irq) |
| 87 | { |
| 88 | #ifdef CONFIG_SMP |
| 89 | on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local, |
| 90 | (void *) (long) irq, 1); |
| 91 | #else |
| 92 | octeon_irq_core_disable_local(irq); |
| 93 | #endif |
| 94 | } |
| 95 | |
| 96 | static struct irq_chip octeon_irq_chip_core = { |
| 97 | .name = "Core", |
| 98 | .enable = octeon_irq_core_enable, |
| 99 | .disable = octeon_irq_core_disable, |
| 100 | .ack = octeon_irq_core_ack, |
| 101 | .eoi = octeon_irq_core_eoi, |
| 102 | }; |
| 103 | |
| 104 | |
| 105 | static void octeon_irq_ciu0_ack(unsigned int irq) |
| 106 | { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 107 | switch (irq) { |
| 108 | case OCTEON_IRQ_GMX_DRP0: |
| 109 | case OCTEON_IRQ_GMX_DRP1: |
| 110 | case OCTEON_IRQ_IPD_DRP: |
| 111 | case OCTEON_IRQ_KEY_ZERO: |
| 112 | case OCTEON_IRQ_TIMER0: |
| 113 | case OCTEON_IRQ_TIMER1: |
| 114 | case OCTEON_IRQ_TIMER2: |
| 115 | case OCTEON_IRQ_TIMER3: |
| 116 | { |
| 117 | int index = cvmx_get_core_num() * 2; |
| 118 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
| 119 | /* |
| 120 | * CIU timer type interrupts must be acknoleged by |
| 121 | * writing a '1' bit to their sum0 bit. |
| 122 | */ |
| 123 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); |
| 124 | break; |
| 125 | } |
| 126 | default: |
| 127 | break; |
| 128 | } |
| 129 | |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 130 | /* |
| 131 | * In order to avoid any locking accessing the CIU, we |
| 132 | * acknowledge CIU interrupts by disabling all of them. This |
| 133 | * way we can use a per core register and avoid any out of |
| 134 | * core locking requirements. This has the side affect that |
| 135 | * CIU interrupts can't be processed recursively. |
| 136 | * |
| 137 | * We don't need to disable IRQs to make these atomic since |
| 138 | * they are already disabled earlier in the low level |
| 139 | * interrupt code. |
| 140 | */ |
| 141 | clear_c0_status(0x100 << 2); |
| 142 | } |
| 143 | |
| 144 | static void octeon_irq_ciu0_eoi(unsigned int irq) |
| 145 | { |
| 146 | /* |
| 147 | * Enable all CIU interrupts again. We don't need to disable |
| 148 | * IRQs to make these atomic since they are already disabled |
| 149 | * earlier in the low level interrupt code. |
| 150 | */ |
| 151 | set_c0_status(0x100 << 2); |
| 152 | } |
| 153 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 154 | static int next_coreid_for_irq(struct irq_desc *desc) |
| 155 | { |
| 156 | |
| 157 | #ifdef CONFIG_SMP |
| 158 | int coreid; |
| 159 | int weight = cpumask_weight(desc->affinity); |
| 160 | |
| 161 | if (weight > 1) { |
| 162 | int cpu = smp_processor_id(); |
| 163 | for (;;) { |
| 164 | cpu = cpumask_next(cpu, desc->affinity); |
| 165 | if (cpu >= nr_cpu_ids) { |
| 166 | cpu = -1; |
| 167 | continue; |
| 168 | } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
| 169 | break; |
| 170 | } |
| 171 | } |
| 172 | coreid = octeon_coreid_for_cpu(cpu); |
| 173 | } else if (weight == 1) { |
| 174 | coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity)); |
| 175 | } else { |
| 176 | coreid = cvmx_get_core_num(); |
| 177 | } |
| 178 | return coreid; |
| 179 | #else |
| 180 | return cvmx_get_core_num(); |
| 181 | #endif |
| 182 | } |
| 183 | |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 184 | static void octeon_irq_ciu0_enable(unsigned int irq) |
| 185 | { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 186 | struct irq_desc *desc = irq_to_desc(irq); |
| 187 | int coreid = next_coreid_for_irq(desc); |
| 188 | unsigned long flags; |
| 189 | uint64_t en0; |
| 190 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
| 191 | |
| 192 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
| 193 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 194 | en0 |= 1ull << bit; |
| 195 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
| 196 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 197 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
| 198 | } |
| 199 | |
| 200 | static void octeon_irq_ciu0_enable_mbox(unsigned int irq) |
| 201 | { |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 202 | int coreid = cvmx_get_core_num(); |
| 203 | unsigned long flags; |
| 204 | uint64_t en0; |
| 205 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
| 206 | |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 207 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 208 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 209 | en0 |= 1ull << bit; |
| 210 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
| 211 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 212 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | static void octeon_irq_ciu0_disable(unsigned int irq) |
| 216 | { |
| 217 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
| 218 | unsigned long flags; |
| 219 | uint64_t en0; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 220 | int cpu; |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 221 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 222 | for_each_online_cpu(cpu) { |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 223 | int coreid = octeon_coreid_for_cpu(cpu); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 224 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 225 | en0 &= ~(1ull << bit); |
| 226 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
| 227 | } |
| 228 | /* |
| 229 | * We need to do a read after the last update to make sure all |
| 230 | * of them are done. |
| 231 | */ |
| 232 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 233 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | /* |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 237 | * Enable the irq on the next core in the affinity set for chips that |
| 238 | * have the EN*_W1{S,C} registers. |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 239 | */ |
| 240 | static void octeon_irq_ciu0_enable_v2(unsigned int irq) |
| 241 | { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 242 | int index; |
| 243 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
| 244 | struct irq_desc *desc = irq_to_desc(irq); |
| 245 | |
| 246 | if ((desc->status & IRQ_DISABLED) == 0) { |
| 247 | index = next_coreid_for_irq(desc) * 2; |
| 248 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | /* |
| 253 | * Enable the irq on the current CPU for chips that |
| 254 | * have the EN*_W1{S,C} registers. |
| 255 | */ |
| 256 | static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq) |
| 257 | { |
| 258 | int index; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 259 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
| 260 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 261 | index = cvmx_get_core_num() * 2; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 262 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
| 263 | } |
| 264 | |
| 265 | /* |
| 266 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} |
| 267 | * registers. |
| 268 | */ |
David Daney | dbb103b | 2010-01-07 11:05:00 -0800 | [diff] [blame] | 269 | static void octeon_irq_ciu0_ack_v2(unsigned int irq) |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 270 | { |
| 271 | int index = cvmx_get_core_num() * 2; |
| 272 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
| 273 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 274 | switch (irq) { |
| 275 | case OCTEON_IRQ_GMX_DRP0: |
| 276 | case OCTEON_IRQ_GMX_DRP1: |
| 277 | case OCTEON_IRQ_IPD_DRP: |
| 278 | case OCTEON_IRQ_KEY_ZERO: |
| 279 | case OCTEON_IRQ_TIMER0: |
| 280 | case OCTEON_IRQ_TIMER1: |
| 281 | case OCTEON_IRQ_TIMER2: |
| 282 | case OCTEON_IRQ_TIMER3: |
| 283 | /* |
| 284 | * CIU timer type interrupts must be acknoleged by |
| 285 | * writing a '1' bit to their sum0 bit. |
| 286 | */ |
| 287 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); |
| 288 | break; |
| 289 | default: |
| 290 | break; |
| 291 | } |
| 292 | |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 293 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
| 294 | } |
| 295 | |
| 296 | /* |
David Daney | dbb103b | 2010-01-07 11:05:00 -0800 | [diff] [blame] | 297 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} |
| 298 | * registers. |
| 299 | */ |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 300 | static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq) |
David Daney | dbb103b | 2010-01-07 11:05:00 -0800 | [diff] [blame] | 301 | { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 302 | struct irq_desc *desc = irq_to_desc(irq); |
David Daney | dbb103b | 2010-01-07 11:05:00 -0800 | [diff] [blame] | 303 | int index = cvmx_get_core_num() * 2; |
| 304 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
| 305 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 306 | if (likely((desc->status & IRQ_DISABLED) == 0)) |
David Daney | dbb103b | 2010-01-07 11:05:00 -0800 | [diff] [blame] | 307 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
| 308 | } |
| 309 | |
| 310 | /* |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 311 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} |
| 312 | * registers. |
| 313 | */ |
| 314 | static void octeon_irq_ciu0_disable_all_v2(unsigned int irq) |
| 315 | { |
| 316 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
| 317 | int index; |
| 318 | int cpu; |
| 319 | for_each_online_cpu(cpu) { |
| 320 | index = octeon_coreid_for_cpu(cpu) * 2; |
| 321 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
| 322 | } |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 323 | } |
| 324 | |
| 325 | #ifdef CONFIG_SMP |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 326 | static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 327 | { |
| 328 | int cpu; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 329 | struct irq_desc *desc = irq_to_desc(irq); |
| 330 | int enable_one = (desc->status & IRQ_DISABLED) == 0; |
David Daney | b6b74d5 | 2009-10-13 08:52:28 -0700 | [diff] [blame] | 331 | unsigned long flags; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 332 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
| 333 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 334 | /* |
| 335 | * For non-v2 CIU, we will allow only single CPU affinity. |
| 336 | * This removes the need to do locking in the .ack/.eoi |
| 337 | * functions. |
| 338 | */ |
| 339 | if (cpumask_weight(dest) != 1) |
| 340 | return -EINVAL; |
| 341 | |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 342 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 343 | for_each_online_cpu(cpu) { |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 344 | int coreid = octeon_coreid_for_cpu(cpu); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 345 | uint64_t en0 = |
| 346 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 347 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
| 348 | enable_one = 0; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 349 | en0 |= 1ull << bit; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 350 | } else { |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 351 | en0 &= ~(1ull << bit); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 352 | } |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 353 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
| 354 | } |
| 355 | /* |
| 356 | * We need to do a read after the last update to make sure all |
| 357 | * of them are done. |
| 358 | */ |
| 359 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 360 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 361 | |
| 362 | return 0; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 363 | } |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 364 | |
| 365 | /* |
| 366 | * Set affinity for the irq for chips that have the EN*_W1{S,C} |
| 367 | * registers. |
| 368 | */ |
| 369 | static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, |
| 370 | const struct cpumask *dest) |
| 371 | { |
| 372 | int cpu; |
| 373 | int index; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 374 | struct irq_desc *desc = irq_to_desc(irq); |
| 375 | int enable_one = (desc->status & IRQ_DISABLED) == 0; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 376 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 377 | |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 378 | for_each_online_cpu(cpu) { |
| 379 | index = octeon_coreid_for_cpu(cpu) * 2; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 380 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
| 381 | enable_one = 0; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 382 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 383 | } else { |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 384 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 385 | } |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 386 | } |
| 387 | return 0; |
| 388 | } |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 389 | #endif |
| 390 | |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 391 | /* |
| 392 | * Newer octeon chips have support for lockless CIU operation. |
| 393 | */ |
| 394 | static struct irq_chip octeon_irq_chip_ciu0_v2 = { |
| 395 | .name = "CIU0", |
| 396 | .enable = octeon_irq_ciu0_enable_v2, |
| 397 | .disable = octeon_irq_ciu0_disable_all_v2, |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 398 | .eoi = octeon_irq_ciu0_enable_v2, |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 399 | #ifdef CONFIG_SMP |
| 400 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, |
| 401 | #endif |
| 402 | }; |
| 403 | |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 404 | static struct irq_chip octeon_irq_chip_ciu0 = { |
| 405 | .name = "CIU0", |
| 406 | .enable = octeon_irq_ciu0_enable, |
| 407 | .disable = octeon_irq_ciu0_disable, |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 408 | .eoi = octeon_irq_ciu0_eoi, |
| 409 | #ifdef CONFIG_SMP |
| 410 | .set_affinity = octeon_irq_ciu0_set_affinity, |
| 411 | #endif |
| 412 | }; |
| 413 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 414 | /* The mbox versions don't do any affinity or round-robin. */ |
| 415 | static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = { |
| 416 | .name = "CIU0-M", |
| 417 | .enable = octeon_irq_ciu0_enable_mbox_v2, |
David Daney | 86568dc | 2010-02-15 12:13:18 -0800 | [diff] [blame] | 418 | .disable = octeon_irq_ciu0_disable, |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 419 | .eoi = octeon_irq_ciu0_eoi_mbox_v2, |
David Daney | 86568dc | 2010-02-15 12:13:18 -0800 | [diff] [blame] | 420 | }; |
| 421 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 422 | static struct irq_chip octeon_irq_chip_ciu0_mbox = { |
| 423 | .name = "CIU0-M", |
| 424 | .enable = octeon_irq_ciu0_enable_mbox, |
| 425 | .disable = octeon_irq_ciu0_disable, |
| 426 | .eoi = octeon_irq_ciu0_eoi, |
| 427 | }; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 428 | |
| 429 | static void octeon_irq_ciu1_ack(unsigned int irq) |
| 430 | { |
| 431 | /* |
| 432 | * In order to avoid any locking accessing the CIU, we |
| 433 | * acknowledge CIU interrupts by disabling all of them. This |
| 434 | * way we can use a per core register and avoid any out of |
| 435 | * core locking requirements. This has the side affect that |
| 436 | * CIU interrupts can't be processed recursively. We don't |
| 437 | * need to disable IRQs to make these atomic since they are |
| 438 | * already disabled earlier in the low level interrupt code. |
| 439 | */ |
| 440 | clear_c0_status(0x100 << 3); |
| 441 | } |
| 442 | |
| 443 | static void octeon_irq_ciu1_eoi(unsigned int irq) |
| 444 | { |
| 445 | /* |
| 446 | * Enable all CIU interrupts again. We don't need to disable |
| 447 | * IRQs to make these atomic since they are already disabled |
| 448 | * earlier in the low level interrupt code. |
| 449 | */ |
| 450 | set_c0_status(0x100 << 3); |
| 451 | } |
| 452 | |
| 453 | static void octeon_irq_ciu1_enable(unsigned int irq) |
| 454 | { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 455 | struct irq_desc *desc = irq_to_desc(irq); |
| 456 | int coreid = next_coreid_for_irq(desc); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 457 | unsigned long flags; |
| 458 | uint64_t en1; |
| 459 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
| 460 | |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 461 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 462 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 463 | en1 |= 1ull << bit; |
| 464 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
| 465 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 466 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 467 | } |
| 468 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 469 | /* |
| 470 | * Watchdog interrupts are special. They are associated with a single |
| 471 | * core, so we hardwire the affinity to that core. |
| 472 | */ |
| 473 | static void octeon_irq_ciu1_wd_enable(unsigned int irq) |
| 474 | { |
| 475 | unsigned long flags; |
| 476 | uint64_t en1; |
| 477 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
| 478 | int coreid = bit; |
| 479 | |
| 480 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
| 481 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 482 | en1 |= 1ull << bit; |
| 483 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
| 484 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 485 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
| 486 | } |
| 487 | |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 488 | static void octeon_irq_ciu1_disable(unsigned int irq) |
| 489 | { |
| 490 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
| 491 | unsigned long flags; |
| 492 | uint64_t en1; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 493 | int cpu; |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 494 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 495 | for_each_online_cpu(cpu) { |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 496 | int coreid = octeon_coreid_for_cpu(cpu); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 497 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 498 | en1 &= ~(1ull << bit); |
| 499 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
| 500 | } |
| 501 | /* |
| 502 | * We need to do a read after the last update to make sure all |
| 503 | * of them are done. |
| 504 | */ |
| 505 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 506 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 507 | } |
| 508 | |
| 509 | /* |
| 510 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} |
| 511 | * registers. |
| 512 | */ |
| 513 | static void octeon_irq_ciu1_enable_v2(unsigned int irq) |
| 514 | { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 515 | int index; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 516 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 517 | struct irq_desc *desc = irq_to_desc(irq); |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 518 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 519 | if ((desc->status & IRQ_DISABLED) == 0) { |
| 520 | index = next_coreid_for_irq(desc) * 2 + 1; |
| 521 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
| 522 | } |
| 523 | } |
| 524 | |
| 525 | /* |
| 526 | * Watchdog interrupts are special. They are associated with a single |
| 527 | * core, so we hardwire the affinity to that core. |
| 528 | */ |
| 529 | static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq) |
| 530 | { |
| 531 | int index; |
| 532 | int coreid = irq - OCTEON_IRQ_WDOG0; |
| 533 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
| 534 | struct irq_desc *desc = irq_to_desc(irq); |
| 535 | |
| 536 | if ((desc->status & IRQ_DISABLED) == 0) { |
| 537 | index = coreid * 2 + 1; |
| 538 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
| 539 | } |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 540 | } |
| 541 | |
| 542 | /* |
| 543 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} |
| 544 | * registers. |
| 545 | */ |
David Daney | dbb103b | 2010-01-07 11:05:00 -0800 | [diff] [blame] | 546 | static void octeon_irq_ciu1_ack_v2(unsigned int irq) |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 547 | { |
| 548 | int index = cvmx_get_core_num() * 2 + 1; |
| 549 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
| 550 | |
| 551 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
| 552 | } |
| 553 | |
| 554 | /* |
| 555 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} |
| 556 | * registers. |
| 557 | */ |
| 558 | static void octeon_irq_ciu1_disable_all_v2(unsigned int irq) |
| 559 | { |
| 560 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
| 561 | int index; |
| 562 | int cpu; |
| 563 | for_each_online_cpu(cpu) { |
| 564 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
| 565 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
| 566 | } |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 567 | } |
| 568 | |
| 569 | #ifdef CONFIG_SMP |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 570 | static int octeon_irq_ciu1_set_affinity(unsigned int irq, |
| 571 | const struct cpumask *dest) |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 572 | { |
| 573 | int cpu; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 574 | struct irq_desc *desc = irq_to_desc(irq); |
| 575 | int enable_one = (desc->status & IRQ_DISABLED) == 0; |
David Daney | b6b74d5 | 2009-10-13 08:52:28 -0700 | [diff] [blame] | 576 | unsigned long flags; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 577 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
| 578 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 579 | /* |
| 580 | * For non-v2 CIU, we will allow only single CPU affinity. |
| 581 | * This removes the need to do locking in the .ack/.eoi |
| 582 | * functions. |
| 583 | */ |
| 584 | if (cpumask_weight(dest) != 1) |
| 585 | return -EINVAL; |
| 586 | |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 587 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 588 | for_each_online_cpu(cpu) { |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 589 | int coreid = octeon_coreid_for_cpu(cpu); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 590 | uint64_t en1 = |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 591 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 592 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
| 593 | enable_one = 0; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 594 | en1 |= 1ull << bit; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 595 | } else { |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 596 | en1 &= ~(1ull << bit); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 597 | } |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 598 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
| 599 | } |
| 600 | /* |
| 601 | * We need to do a read after the last update to make sure all |
| 602 | * of them are done. |
| 603 | */ |
| 604 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); |
David Daney | 3996142 | 2010-02-18 11:47:40 -0800 | [diff] [blame] | 605 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 606 | |
| 607 | return 0; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 608 | } |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 609 | |
| 610 | /* |
| 611 | * Set affinity for the irq for chips that have the EN*_W1{S,C} |
| 612 | * registers. |
| 613 | */ |
| 614 | static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq, |
| 615 | const struct cpumask *dest) |
| 616 | { |
| 617 | int cpu; |
| 618 | int index; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 619 | struct irq_desc *desc = irq_to_desc(irq); |
| 620 | int enable_one = (desc->status & IRQ_DISABLED) == 0; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 621 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
| 622 | for_each_online_cpu(cpu) { |
| 623 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 624 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
| 625 | enable_one = 0; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 626 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 627 | } else { |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 628 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 629 | } |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 630 | } |
| 631 | return 0; |
| 632 | } |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 633 | #endif |
| 634 | |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 635 | /* |
| 636 | * Newer octeon chips have support for lockless CIU operation. |
| 637 | */ |
| 638 | static struct irq_chip octeon_irq_chip_ciu1_v2 = { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 639 | .name = "CIU1", |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 640 | .enable = octeon_irq_ciu1_enable_v2, |
| 641 | .disable = octeon_irq_ciu1_disable_all_v2, |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 642 | .eoi = octeon_irq_ciu1_enable_v2, |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 643 | #ifdef CONFIG_SMP |
| 644 | .set_affinity = octeon_irq_ciu1_set_affinity_v2, |
| 645 | #endif |
| 646 | }; |
| 647 | |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 648 | static struct irq_chip octeon_irq_chip_ciu1 = { |
| 649 | .name = "CIU1", |
| 650 | .enable = octeon_irq_ciu1_enable, |
| 651 | .disable = octeon_irq_ciu1_disable, |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 652 | .eoi = octeon_irq_ciu1_eoi, |
| 653 | #ifdef CONFIG_SMP |
| 654 | .set_affinity = octeon_irq_ciu1_set_affinity, |
| 655 | #endif |
| 656 | }; |
| 657 | |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 658 | static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = { |
| 659 | .name = "CIU1-W", |
| 660 | .enable = octeon_irq_ciu1_wd_enable_v2, |
| 661 | .disable = octeon_irq_ciu1_disable_all_v2, |
| 662 | .eoi = octeon_irq_ciu1_wd_enable_v2, |
| 663 | }; |
| 664 | |
| 665 | static struct irq_chip octeon_irq_chip_ciu1_wd = { |
| 666 | .name = "CIU1-W", |
| 667 | .enable = octeon_irq_ciu1_wd_enable, |
| 668 | .disable = octeon_irq_ciu1_disable, |
| 669 | .eoi = octeon_irq_ciu1_eoi, |
| 670 | }; |
| 671 | |
| 672 | static void (*octeon_ciu0_ack)(unsigned int); |
| 673 | static void (*octeon_ciu1_ack)(unsigned int); |
| 674 | |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 675 | void __init arch_init_irq(void) |
| 676 | { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 677 | unsigned int irq; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 678 | struct irq_chip *chip0; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 679 | struct irq_chip *chip0_mbox; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 680 | struct irq_chip *chip1; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 681 | struct irq_chip *chip1_wd; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 682 | |
| 683 | #ifdef CONFIG_SMP |
| 684 | /* Set the default affinity to the boot cpu. */ |
| 685 | cpumask_clear(irq_default_affinity); |
| 686 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
| 687 | #endif |
| 688 | |
| 689 | if (NR_IRQS < OCTEON_IRQ_LAST) |
| 690 | pr_err("octeon_irq_init: NR_IRQS is set too low\n"); |
| 691 | |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 692 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
| 693 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
| 694 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 695 | octeon_ciu0_ack = octeon_irq_ciu0_ack_v2; |
| 696 | octeon_ciu1_ack = octeon_irq_ciu1_ack_v2; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 697 | chip0 = &octeon_irq_chip_ciu0_v2; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 698 | chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 699 | chip1 = &octeon_irq_chip_ciu1_v2; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 700 | chip1_wd = &octeon_irq_chip_ciu1_wd_v2; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 701 | } else { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 702 | octeon_ciu0_ack = octeon_irq_ciu0_ack; |
| 703 | octeon_ciu1_ack = octeon_irq_ciu1_ack; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 704 | chip0 = &octeon_irq_chip_ciu0; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 705 | chip0_mbox = &octeon_irq_chip_ciu0_mbox; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 706 | chip1 = &octeon_irq_chip_ciu1; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 707 | chip1_wd = &octeon_irq_chip_ciu1_wd; |
David Daney | cd847b7 | 2009-10-13 11:26:03 -0700 | [diff] [blame] | 708 | } |
| 709 | |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 710 | /* 0 - 15 reserved for i8259 master and slave controller. */ |
| 711 | |
| 712 | /* 17 - 23 Mips internal */ |
| 713 | for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) { |
| 714 | set_irq_chip_and_handler(irq, &octeon_irq_chip_core, |
| 715 | handle_percpu_irq); |
| 716 | } |
| 717 | |
| 718 | /* 24 - 87 CIU_INT_SUM0 */ |
| 719 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { |
David Daney | 86568dc | 2010-02-15 12:13:18 -0800 | [diff] [blame] | 720 | switch (irq) { |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 721 | case OCTEON_IRQ_MBOX0: |
| 722 | case OCTEON_IRQ_MBOX1: |
| 723 | set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq); |
David Daney | 86568dc | 2010-02-15 12:13:18 -0800 | [diff] [blame] | 724 | break; |
| 725 | default: |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 726 | set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq); |
David Daney | 86568dc | 2010-02-15 12:13:18 -0800 | [diff] [blame] | 727 | break; |
| 728 | } |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 729 | } |
| 730 | |
| 731 | /* 88 - 151 CIU_INT_SUM1 */ |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 732 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++) |
| 733 | set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq); |
| 734 | |
| 735 | for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++) |
| 736 | set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 737 | |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 738 | set_c0_status(0x300 << 2); |
| 739 | } |
| 740 | |
| 741 | asmlinkage void plat_irq_dispatch(void) |
| 742 | { |
| 743 | const unsigned long core_id = cvmx_get_core_num(); |
| 744 | const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2); |
| 745 | const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2); |
| 746 | const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1; |
| 747 | const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1); |
| 748 | unsigned long cop0_cause; |
| 749 | unsigned long cop0_status; |
| 750 | uint64_t ciu_en; |
| 751 | uint64_t ciu_sum; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 752 | unsigned int irq; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 753 | |
| 754 | while (1) { |
| 755 | cop0_cause = read_c0_cause(); |
| 756 | cop0_status = read_c0_status(); |
| 757 | cop0_cause &= cop0_status; |
| 758 | cop0_cause &= ST0_IM; |
| 759 | |
| 760 | if (unlikely(cop0_cause & STATUSF_IP2)) { |
| 761 | ciu_sum = cvmx_read_csr(ciu_sum0_address); |
| 762 | ciu_en = cvmx_read_csr(ciu_en0_address); |
| 763 | ciu_sum &= ciu_en; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 764 | if (likely(ciu_sum)) { |
| 765 | irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1; |
| 766 | octeon_ciu0_ack(irq); |
| 767 | do_IRQ(irq); |
| 768 | } else { |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 769 | spurious_interrupt(); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 770 | } |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 771 | } else if (unlikely(cop0_cause & STATUSF_IP3)) { |
| 772 | ciu_sum = cvmx_read_csr(ciu_sum1_address); |
| 773 | ciu_en = cvmx_read_csr(ciu_en1_address); |
| 774 | ciu_sum &= ciu_en; |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 775 | if (likely(ciu_sum)) { |
| 776 | irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1; |
| 777 | octeon_ciu1_ack(irq); |
| 778 | do_IRQ(irq); |
| 779 | } else { |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 780 | spurious_interrupt(); |
David Daney | 5aae1fd | 2010-07-23 10:43:46 -0700 | [diff] [blame] | 781 | } |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 782 | } else if (likely(cop0_cause)) { |
| 783 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
| 784 | } else { |
| 785 | break; |
| 786 | } |
| 787 | } |
| 788 | } |
Ralf Baechle | 773cb77 | 2009-06-23 10:36:38 +0100 | [diff] [blame] | 789 | |
| 790 | #ifdef CONFIG_HOTPLUG_CPU |
Ralf Baechle | 773cb77 | 2009-06-23 10:36:38 +0100 | [diff] [blame] | 791 | |
| 792 | void fixup_irqs(void) |
| 793 | { |
David Daney | 3508920 | 2010-07-23 10:43:47 -0700 | [diff] [blame] | 794 | int irq; |
| 795 | struct irq_desc *desc; |
| 796 | cpumask_t new_affinity; |
| 797 | unsigned long flags; |
| 798 | int do_set_affinity; |
| 799 | int cpu; |
| 800 | |
| 801 | cpu = smp_processor_id(); |
Ralf Baechle | 773cb77 | 2009-06-23 10:36:38 +0100 | [diff] [blame] | 802 | |
| 803 | for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) |
| 804 | octeon_irq_core_disable_local(irq); |
| 805 | |
David Daney | 3508920 | 2010-07-23 10:43:47 -0700 | [diff] [blame] | 806 | for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) { |
| 807 | desc = irq_to_desc(irq); |
| 808 | switch (irq) { |
| 809 | case OCTEON_IRQ_MBOX0: |
| 810 | case OCTEON_IRQ_MBOX1: |
| 811 | /* The eoi function will disable them on this CPU. */ |
| 812 | desc->chip->eoi(irq); |
| 813 | break; |
| 814 | case OCTEON_IRQ_WDOG0: |
| 815 | case OCTEON_IRQ_WDOG1: |
| 816 | case OCTEON_IRQ_WDOG2: |
| 817 | case OCTEON_IRQ_WDOG3: |
| 818 | case OCTEON_IRQ_WDOG4: |
| 819 | case OCTEON_IRQ_WDOG5: |
| 820 | case OCTEON_IRQ_WDOG6: |
| 821 | case OCTEON_IRQ_WDOG7: |
| 822 | case OCTEON_IRQ_WDOG8: |
| 823 | case OCTEON_IRQ_WDOG9: |
| 824 | case OCTEON_IRQ_WDOG10: |
| 825 | case OCTEON_IRQ_WDOG11: |
| 826 | case OCTEON_IRQ_WDOG12: |
| 827 | case OCTEON_IRQ_WDOG13: |
| 828 | case OCTEON_IRQ_WDOG14: |
| 829 | case OCTEON_IRQ_WDOG15: |
| 830 | /* |
| 831 | * These have special per CPU semantics and |
| 832 | * are handled in the watchdog driver. |
| 833 | */ |
| 834 | break; |
| 835 | default: |
| 836 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 837 | /* |
| 838 | * If this irq has an action, it is in use and |
| 839 | * must be migrated if it has affinity to this |
| 840 | * cpu. |
| 841 | */ |
| 842 | if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) { |
| 843 | if (cpumask_weight(desc->affinity) > 1) { |
| 844 | /* |
| 845 | * It has multi CPU affinity, |
| 846 | * just remove this CPU from |
| 847 | * the affinity set. |
| 848 | */ |
| 849 | cpumask_copy(&new_affinity, desc->affinity); |
| 850 | cpumask_clear_cpu(cpu, &new_affinity); |
| 851 | } else { |
| 852 | /* |
| 853 | * Otherwise, put it on lowest |
| 854 | * numbered online CPU. |
| 855 | */ |
| 856 | cpumask_clear(&new_affinity); |
| 857 | cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); |
| 858 | } |
| 859 | do_set_affinity = 1; |
| 860 | } else { |
| 861 | do_set_affinity = 0; |
| 862 | } |
| 863 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Ralf Baechle | 773cb77 | 2009-06-23 10:36:38 +0100 | [diff] [blame] | 864 | |
David Daney | 3508920 | 2010-07-23 10:43:47 -0700 | [diff] [blame] | 865 | if (do_set_affinity) |
| 866 | irq_set_affinity(irq, &new_affinity); |
Ralf Baechle | 773cb77 | 2009-06-23 10:36:38 +0100 | [diff] [blame] | 867 | |
David Daney | 3508920 | 2010-07-23 10:43:47 -0700 | [diff] [blame] | 868 | break; |
Ralf Baechle | 773cb77 | 2009-06-23 10:36:38 +0100 | [diff] [blame] | 869 | } |
| 870 | } |
| 871 | } |
| 872 | |
| 873 | #endif /* CONFIG_HOTPLUG_CPU */ |