blob: 6f2acf09328dbe00a80215cc84acc6e6739bfe50 [file] [log] [blame]
David Daney5b3b1682009-01-08 16:46:40 -08001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2008 Cavium Networks
7 */
8#include <linux/irq.h>
9#include <linux/interrupt.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010010#include <linux/smp.h>
David Daney5b3b1682009-01-08 16:46:40 -080011
12#include <asm/octeon/octeon.h>
David Daneye8635b42009-04-23 17:44:38 -070013#include <asm/octeon/cvmx-pexp-defs.h>
14#include <asm/octeon/cvmx-npi-defs.h>
David Daney5b3b1682009-01-08 16:46:40 -080015
16DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
17DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
18DEFINE_SPINLOCK(octeon_irq_msi_lock);
19
David Daneycd847b72009-10-13 11:26:03 -070020static int octeon_coreid_for_cpu(int cpu)
21{
22#ifdef CONFIG_SMP
23 return cpu_logical_map(cpu);
24#else
25 return cvmx_get_core_num();
26#endif
27}
28
David Daney5b3b1682009-01-08 16:46:40 -080029static void octeon_irq_core_ack(unsigned int irq)
30{
31 unsigned int bit = irq - OCTEON_IRQ_SW0;
32 /*
33 * We don't need to disable IRQs to make these atomic since
34 * they are already disabled earlier in the low level
35 * interrupt code.
36 */
37 clear_c0_status(0x100 << bit);
38 /* The two user interrupts must be cleared manually. */
39 if (bit < 2)
40 clear_c0_cause(0x100 << bit);
41}
42
43static void octeon_irq_core_eoi(unsigned int irq)
44{
Thomas Gleixnerae035502009-03-11 00:45:51 +000045 struct irq_desc *desc = irq_desc + irq;
David Daney5b3b1682009-01-08 16:46:40 -080046 unsigned int bit = irq - OCTEON_IRQ_SW0;
47 /*
48 * If an IRQ is being processed while we are disabling it the
49 * handler will attempt to unmask the interrupt after it has
50 * been disabled.
51 */
52 if (desc->status & IRQ_DISABLED)
53 return;
54
55 /* There is a race here. We should fix it. */
56
57 /*
58 * We don't need to disable IRQs to make these atomic since
59 * they are already disabled earlier in the low level
60 * interrupt code.
61 */
62 set_c0_status(0x100 << bit);
63}
64
65static void octeon_irq_core_enable(unsigned int irq)
66{
67 unsigned long flags;
68 unsigned int bit = irq - OCTEON_IRQ_SW0;
69
70 /*
71 * We need to disable interrupts to make sure our updates are
72 * atomic.
73 */
74 local_irq_save(flags);
75 set_c0_status(0x100 << bit);
76 local_irq_restore(flags);
77}
78
79static void octeon_irq_core_disable_local(unsigned int irq)
80{
81 unsigned long flags;
82 unsigned int bit = irq - OCTEON_IRQ_SW0;
83 /*
84 * We need to disable interrupts to make sure our updates are
85 * atomic.
86 */
87 local_irq_save(flags);
88 clear_c0_status(0x100 << bit);
89 local_irq_restore(flags);
90}
91
92static void octeon_irq_core_disable(unsigned int irq)
93{
94#ifdef CONFIG_SMP
95 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
96 (void *) (long) irq, 1);
97#else
98 octeon_irq_core_disable_local(irq);
99#endif
100}
101
102static struct irq_chip octeon_irq_chip_core = {
103 .name = "Core",
104 .enable = octeon_irq_core_enable,
105 .disable = octeon_irq_core_disable,
106 .ack = octeon_irq_core_ack,
107 .eoi = octeon_irq_core_eoi,
108};
109
110
111static void octeon_irq_ciu0_ack(unsigned int irq)
112{
113 /*
114 * In order to avoid any locking accessing the CIU, we
115 * acknowledge CIU interrupts by disabling all of them. This
116 * way we can use a per core register and avoid any out of
117 * core locking requirements. This has the side affect that
118 * CIU interrupts can't be processed recursively.
119 *
120 * We don't need to disable IRQs to make these atomic since
121 * they are already disabled earlier in the low level
122 * interrupt code.
123 */
124 clear_c0_status(0x100 << 2);
125}
126
127static void octeon_irq_ciu0_eoi(unsigned int irq)
128{
129 /*
130 * Enable all CIU interrupts again. We don't need to disable
131 * IRQs to make these atomic since they are already disabled
132 * earlier in the low level interrupt code.
133 */
134 set_c0_status(0x100 << 2);
135}
136
137static void octeon_irq_ciu0_enable(unsigned int irq)
138{
139 int coreid = cvmx_get_core_num();
140 unsigned long flags;
141 uint64_t en0;
142 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
143
144 /*
145 * A read lock is used here to make sure only one core is ever
146 * updating the CIU enable bits at a time. During an enable
147 * the cores don't interfere with each other. During a disable
148 * the write lock stops any enables that might cause a
149 * problem.
150 */
151 read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
152 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
153 en0 |= 1ull << bit;
154 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
155 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
156 read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
157}
158
159static void octeon_irq_ciu0_disable(unsigned int irq)
160{
161 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
162 unsigned long flags;
163 uint64_t en0;
David Daney5b3b1682009-01-08 16:46:40 -0800164 int cpu;
165 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
166 for_each_online_cpu(cpu) {
David Daneycd847b72009-10-13 11:26:03 -0700167 int coreid = octeon_coreid_for_cpu(cpu);
David Daney5b3b1682009-01-08 16:46:40 -0800168 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
169 en0 &= ~(1ull << bit);
170 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
171 }
172 /*
173 * We need to do a read after the last update to make sure all
174 * of them are done.
175 */
176 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
177 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
David Daneycd847b72009-10-13 11:26:03 -0700178}
179
180/*
181 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
182 * registers.
183 */
184static void octeon_irq_ciu0_enable_v2(unsigned int irq)
185{
186 int index = cvmx_get_core_num() * 2;
187 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
188
189 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
190}
191
192/*
193 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
194 * registers.
195 */
196static void octeon_irq_ciu0_disable_v2(unsigned int irq)
197{
198 int index = cvmx_get_core_num() * 2;
199 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
200
201 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
202}
203
204/*
205 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
206 * registers.
207 */
208static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
209{
210 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
211 int index;
212 int cpu;
213 for_each_online_cpu(cpu) {
214 index = octeon_coreid_for_cpu(cpu) * 2;
215 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
216 }
David Daney5b3b1682009-01-08 16:46:40 -0800217}
218
219#ifdef CONFIG_SMP
Yinghai Lud5dedd42009-04-27 17:59:21 -0700220static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
David Daney5b3b1682009-01-08 16:46:40 -0800221{
222 int cpu;
David Daneyb6b74d52009-10-13 08:52:28 -0700223 unsigned long flags;
David Daney5b3b1682009-01-08 16:46:40 -0800224 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
225
David Daneyb6b74d52009-10-13 08:52:28 -0700226 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800227 for_each_online_cpu(cpu) {
David Daneycd847b72009-10-13 11:26:03 -0700228 int coreid = octeon_coreid_for_cpu(cpu);
David Daney5b3b1682009-01-08 16:46:40 -0800229 uint64_t en0 =
230 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
231 if (cpumask_test_cpu(cpu, dest))
232 en0 |= 1ull << bit;
233 else
234 en0 &= ~(1ull << bit);
235 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
236 }
237 /*
238 * We need to do a read after the last update to make sure all
239 * of them are done.
240 */
241 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
David Daneyb6b74d52009-10-13 08:52:28 -0700242 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700243
244 return 0;
David Daney5b3b1682009-01-08 16:46:40 -0800245}
David Daneycd847b72009-10-13 11:26:03 -0700246
247/*
248 * Set affinity for the irq for chips that have the EN*_W1{S,C}
249 * registers.
250 */
251static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
252 const struct cpumask *dest)
253{
254 int cpu;
255 int index;
256 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
257 for_each_online_cpu(cpu) {
258 index = octeon_coreid_for_cpu(cpu) * 2;
259 if (cpumask_test_cpu(cpu, dest))
260 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
261 else
262 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
263 }
264 return 0;
265}
David Daney5b3b1682009-01-08 16:46:40 -0800266#endif
267
David Daneycd847b72009-10-13 11:26:03 -0700268/*
269 * Newer octeon chips have support for lockless CIU operation.
270 */
271static struct irq_chip octeon_irq_chip_ciu0_v2 = {
272 .name = "CIU0",
273 .enable = octeon_irq_ciu0_enable_v2,
274 .disable = octeon_irq_ciu0_disable_all_v2,
275 .ack = octeon_irq_ciu0_disable_v2,
276 .eoi = octeon_irq_ciu0_enable_v2,
277#ifdef CONFIG_SMP
278 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
279#endif
280};
281
David Daney5b3b1682009-01-08 16:46:40 -0800282static struct irq_chip octeon_irq_chip_ciu0 = {
283 .name = "CIU0",
284 .enable = octeon_irq_ciu0_enable,
285 .disable = octeon_irq_ciu0_disable,
286 .ack = octeon_irq_ciu0_ack,
287 .eoi = octeon_irq_ciu0_eoi,
288#ifdef CONFIG_SMP
289 .set_affinity = octeon_irq_ciu0_set_affinity,
290#endif
291};
292
293
294static void octeon_irq_ciu1_ack(unsigned int irq)
295{
296 /*
297 * In order to avoid any locking accessing the CIU, we
298 * acknowledge CIU interrupts by disabling all of them. This
299 * way we can use a per core register and avoid any out of
300 * core locking requirements. This has the side affect that
301 * CIU interrupts can't be processed recursively. We don't
302 * need to disable IRQs to make these atomic since they are
303 * already disabled earlier in the low level interrupt code.
304 */
305 clear_c0_status(0x100 << 3);
306}
307
308static void octeon_irq_ciu1_eoi(unsigned int irq)
309{
310 /*
311 * Enable all CIU interrupts again. We don't need to disable
312 * IRQs to make these atomic since they are already disabled
313 * earlier in the low level interrupt code.
314 */
315 set_c0_status(0x100 << 3);
316}
317
318static void octeon_irq_ciu1_enable(unsigned int irq)
319{
320 int coreid = cvmx_get_core_num();
321 unsigned long flags;
322 uint64_t en1;
323 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
324
325 /*
326 * A read lock is used here to make sure only one core is ever
327 * updating the CIU enable bits at a time. During an enable
328 * the cores don't interfere with each other. During a disable
329 * the write lock stops any enables that might cause a
330 * problem.
331 */
332 read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
333 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
334 en1 |= 1ull << bit;
335 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
336 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
337 read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
338}
339
340static void octeon_irq_ciu1_disable(unsigned int irq)
341{
342 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
343 unsigned long flags;
344 uint64_t en1;
David Daney5b3b1682009-01-08 16:46:40 -0800345 int cpu;
346 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
347 for_each_online_cpu(cpu) {
David Daneycd847b72009-10-13 11:26:03 -0700348 int coreid = octeon_coreid_for_cpu(cpu);
David Daney5b3b1682009-01-08 16:46:40 -0800349 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
350 en1 &= ~(1ull << bit);
351 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
352 }
353 /*
354 * We need to do a read after the last update to make sure all
355 * of them are done.
356 */
357 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
358 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
David Daneycd847b72009-10-13 11:26:03 -0700359}
360
361/*
362 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
363 * registers.
364 */
365static void octeon_irq_ciu1_enable_v2(unsigned int irq)
366{
367 int index = cvmx_get_core_num() * 2 + 1;
368 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
369
370 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
371}
372
373/*
374 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
375 * registers.
376 */
377static void octeon_irq_ciu1_disable_v2(unsigned int irq)
378{
379 int index = cvmx_get_core_num() * 2 + 1;
380 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
381
382 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
383}
384
385/*
386 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
387 * registers.
388 */
389static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
390{
391 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
392 int index;
393 int cpu;
394 for_each_online_cpu(cpu) {
395 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
396 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
397 }
David Daney5b3b1682009-01-08 16:46:40 -0800398}
399
400#ifdef CONFIG_SMP
David Daneycd847b72009-10-13 11:26:03 -0700401static int octeon_irq_ciu1_set_affinity(unsigned int irq,
402 const struct cpumask *dest)
David Daney5b3b1682009-01-08 16:46:40 -0800403{
404 int cpu;
David Daneyb6b74d52009-10-13 08:52:28 -0700405 unsigned long flags;
David Daney5b3b1682009-01-08 16:46:40 -0800406 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
407
David Daneyb6b74d52009-10-13 08:52:28 -0700408 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800409 for_each_online_cpu(cpu) {
David Daneycd847b72009-10-13 11:26:03 -0700410 int coreid = octeon_coreid_for_cpu(cpu);
David Daney5b3b1682009-01-08 16:46:40 -0800411 uint64_t en1 =
412 cvmx_read_csr(CVMX_CIU_INTX_EN1
413 (coreid * 2 + 1));
414 if (cpumask_test_cpu(cpu, dest))
415 en1 |= 1ull << bit;
416 else
417 en1 &= ~(1ull << bit);
418 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
419 }
420 /*
421 * We need to do a read after the last update to make sure all
422 * of them are done.
423 */
424 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
David Daneyb6b74d52009-10-13 08:52:28 -0700425 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700426
427 return 0;
David Daney5b3b1682009-01-08 16:46:40 -0800428}
David Daneycd847b72009-10-13 11:26:03 -0700429
430/*
431 * Set affinity for the irq for chips that have the EN*_W1{S,C}
432 * registers.
433 */
434static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
435 const struct cpumask *dest)
436{
437 int cpu;
438 int index;
439 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
440 for_each_online_cpu(cpu) {
441 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
442 if (cpumask_test_cpu(cpu, dest))
443 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
444 else
445 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
446 }
447 return 0;
448}
David Daney5b3b1682009-01-08 16:46:40 -0800449#endif
450
David Daneycd847b72009-10-13 11:26:03 -0700451/*
452 * Newer octeon chips have support for lockless CIU operation.
453 */
454static struct irq_chip octeon_irq_chip_ciu1_v2 = {
455 .name = "CIU0",
456 .enable = octeon_irq_ciu1_enable_v2,
457 .disable = octeon_irq_ciu1_disable_all_v2,
458 .ack = octeon_irq_ciu1_disable_v2,
459 .eoi = octeon_irq_ciu1_enable_v2,
460#ifdef CONFIG_SMP
461 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
462#endif
463};
464
David Daney5b3b1682009-01-08 16:46:40 -0800465static struct irq_chip octeon_irq_chip_ciu1 = {
466 .name = "CIU1",
467 .enable = octeon_irq_ciu1_enable,
468 .disable = octeon_irq_ciu1_disable,
469 .ack = octeon_irq_ciu1_ack,
470 .eoi = octeon_irq_ciu1_eoi,
471#ifdef CONFIG_SMP
472 .set_affinity = octeon_irq_ciu1_set_affinity,
473#endif
474};
475
476#ifdef CONFIG_PCI_MSI
477
478static void octeon_irq_msi_ack(unsigned int irq)
479{
480 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
481 /* These chips have PCI */
482 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
483 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
484 } else {
485 /*
486 * These chips have PCIe. Thankfully the ACK doesn't
487 * need any locking.
488 */
489 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
490 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
491 }
492}
493
494static void octeon_irq_msi_eoi(unsigned int irq)
495{
496 /* Nothing needed */
497}
498
499static void octeon_irq_msi_enable(unsigned int irq)
500{
501 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
502 /*
503 * Octeon PCI doesn't have the ability to mask/unmask
504 * MSI interrupts individually. Instead of
505 * masking/unmasking them in groups of 16, we simple
506 * assume MSI devices are well behaved. MSI
507 * interrupts are always enable and the ACK is assumed
508 * to be enough.
509 */
510 } else {
511 /* These chips have PCIe. Note that we only support
512 * the first 64 MSI interrupts. Unfortunately all the
513 * MSI enables are in the same register. We use
514 * MSI0's lock to control access to them all.
515 */
516 uint64_t en;
517 unsigned long flags;
518 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
519 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
520 en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
521 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
522 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
523 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
524 }
525}
526
527static void octeon_irq_msi_disable(unsigned int irq)
528{
529 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
530 /* See comment in enable */
531 } else {
532 /*
533 * These chips have PCIe. Note that we only support
534 * the first 64 MSI interrupts. Unfortunately all the
535 * MSI enables are in the same register. We use
536 * MSI0's lock to control access to them all.
537 */
538 uint64_t en;
539 unsigned long flags;
540 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
541 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
542 en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
543 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
544 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
545 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
546 }
547}
548
549static struct irq_chip octeon_irq_chip_msi = {
550 .name = "MSI",
551 .enable = octeon_irq_msi_enable,
552 .disable = octeon_irq_msi_disable,
553 .ack = octeon_irq_msi_ack,
554 .eoi = octeon_irq_msi_eoi,
555};
556#endif
557
558void __init arch_init_irq(void)
559{
560 int irq;
David Daneycd847b72009-10-13 11:26:03 -0700561 struct irq_chip *chip0;
562 struct irq_chip *chip1;
David Daney5b3b1682009-01-08 16:46:40 -0800563
564#ifdef CONFIG_SMP
565 /* Set the default affinity to the boot cpu. */
566 cpumask_clear(irq_default_affinity);
567 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
568#endif
569
570 if (NR_IRQS < OCTEON_IRQ_LAST)
571 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
572
David Daneycd847b72009-10-13 11:26:03 -0700573 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
574 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
575 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
576 chip0 = &octeon_irq_chip_ciu0_v2;
577 chip1 = &octeon_irq_chip_ciu1_v2;
578 } else {
579 chip0 = &octeon_irq_chip_ciu0;
580 chip1 = &octeon_irq_chip_ciu1;
581 }
582
David Daney5b3b1682009-01-08 16:46:40 -0800583 /* 0 - 15 reserved for i8259 master and slave controller. */
584
585 /* 17 - 23 Mips internal */
586 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
587 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
588 handle_percpu_irq);
589 }
590
591 /* 24 - 87 CIU_INT_SUM0 */
592 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
David Daneycd847b72009-10-13 11:26:03 -0700593 set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
David Daney5b3b1682009-01-08 16:46:40 -0800594 }
595
596 /* 88 - 151 CIU_INT_SUM1 */
597 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
David Daneycd847b72009-10-13 11:26:03 -0700598 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
David Daney5b3b1682009-01-08 16:46:40 -0800599 }
600
601#ifdef CONFIG_PCI_MSI
602 /* 152 - 215 PCI/PCIe MSI interrupts */
603 for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
604 set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
605 handle_percpu_irq);
606 }
607#endif
608 set_c0_status(0x300 << 2);
609}
610
611asmlinkage void plat_irq_dispatch(void)
612{
613 const unsigned long core_id = cvmx_get_core_num();
614 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
615 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
616 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
617 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
618 unsigned long cop0_cause;
619 unsigned long cop0_status;
620 uint64_t ciu_en;
621 uint64_t ciu_sum;
622
623 while (1) {
624 cop0_cause = read_c0_cause();
625 cop0_status = read_c0_status();
626 cop0_cause &= cop0_status;
627 cop0_cause &= ST0_IM;
628
629 if (unlikely(cop0_cause & STATUSF_IP2)) {
630 ciu_sum = cvmx_read_csr(ciu_sum0_address);
631 ciu_en = cvmx_read_csr(ciu_en0_address);
632 ciu_sum &= ciu_en;
633 if (likely(ciu_sum))
634 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
635 else
636 spurious_interrupt();
637 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
638 ciu_sum = cvmx_read_csr(ciu_sum1_address);
639 ciu_en = cvmx_read_csr(ciu_en1_address);
640 ciu_sum &= ciu_en;
641 if (likely(ciu_sum))
642 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
643 else
644 spurious_interrupt();
645 } else if (likely(cop0_cause)) {
646 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
647 } else {
648 break;
649 }
650 }
651}
Ralf Baechle773cb772009-06-23 10:36:38 +0100652
653#ifdef CONFIG_HOTPLUG_CPU
654static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
655{
David Daneycd847b72009-10-13 11:26:03 -0700656 unsigned int isset;
657 int coreid = octeon_coreid_for_cpu(cpu);
Ralf Baechle773cb772009-06-23 10:36:38 +0100658 int bit = (irq < OCTEON_IRQ_WDOG0) ?
David Daneycd847b72009-10-13 11:26:03 -0700659 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
Ralf Baechle773cb772009-06-23 10:36:38 +0100660 if (irq < 64) {
661 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
662 (1ull << bit)) >> bit;
663 } else {
664 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
665 (1ull << bit)) >> bit;
666 }
667 return isset;
668}
669
670void fixup_irqs(void)
671{
672 int irq;
673
674 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
675 octeon_irq_core_disable_local(irq);
676
677 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
678 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
679 /* ciu irq migrates to next cpu */
680 octeon_irq_chip_ciu0.disable(irq);
681 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
682 }
683 }
684
685#if 0
686 for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
687 octeon_irq_mailbox_mask(irq);
688#endif
689 for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
690 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
691 /* ciu irq migrates to next cpu */
692 octeon_irq_chip_ciu0.disable(irq);
693 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
694 }
695 }
696
697 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
698 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
699 /* ciu irq migrates to next cpu */
700 octeon_irq_chip_ciu1.disable(irq);
701 octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
702 }
703 }
704}
705
706#endif /* CONFIG_HOTPLUG_CPU */