blob: ce7500cdf5b709de9755d94f6b07b41eeca17359 [file] [log] [blame]
David Daney5b3b1682009-01-08 16:46:40 -08001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
David Daney5aae1fd2010-07-23 10:43:46 -07006 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
David Daney5b3b1682009-01-08 16:46:40 -08007 */
8#include <linux/irq.h>
9#include <linux/interrupt.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010010#include <linux/smp.h>
David Daney5b3b1682009-01-08 16:46:40 -080011
12#include <asm/octeon/octeon.h>
13
David Daney39961422010-02-18 11:47:40 -080014static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
15static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
David Daney5b3b1682009-01-08 16:46:40 -080016
David Daneycd847b72009-10-13 11:26:03 -070017static int octeon_coreid_for_cpu(int cpu)
18{
19#ifdef CONFIG_SMP
20 return cpu_logical_map(cpu);
21#else
22 return cvmx_get_core_num();
23#endif
24}
25
David Daney5b3b1682009-01-08 16:46:40 -080026static void octeon_irq_core_ack(unsigned int irq)
27{
28 unsigned int bit = irq - OCTEON_IRQ_SW0;
29 /*
30 * We don't need to disable IRQs to make these atomic since
31 * they are already disabled earlier in the low level
32 * interrupt code.
33 */
34 clear_c0_status(0x100 << bit);
35 /* The two user interrupts must be cleared manually. */
36 if (bit < 2)
37 clear_c0_cause(0x100 << bit);
38}
39
40static void octeon_irq_core_eoi(unsigned int irq)
41{
David Daney5aae1fd2010-07-23 10:43:46 -070042 struct irq_desc *desc = irq_to_desc(irq);
David Daney5b3b1682009-01-08 16:46:40 -080043 unsigned int bit = irq - OCTEON_IRQ_SW0;
44 /*
45 * If an IRQ is being processed while we are disabling it the
46 * handler will attempt to unmask the interrupt after it has
47 * been disabled.
48 */
David Daney5aae1fd2010-07-23 10:43:46 -070049 if ((unlikely(desc->status & IRQ_DISABLED)))
David Daney5b3b1682009-01-08 16:46:40 -080050 return;
David Daney5b3b1682009-01-08 16:46:40 -080051 /*
52 * We don't need to disable IRQs to make these atomic since
53 * they are already disabled earlier in the low level
54 * interrupt code.
55 */
56 set_c0_status(0x100 << bit);
57}
58
59static void octeon_irq_core_enable(unsigned int irq)
60{
61 unsigned long flags;
62 unsigned int bit = irq - OCTEON_IRQ_SW0;
63
64 /*
65 * We need to disable interrupts to make sure our updates are
66 * atomic.
67 */
68 local_irq_save(flags);
69 set_c0_status(0x100 << bit);
70 local_irq_restore(flags);
71}
72
73static void octeon_irq_core_disable_local(unsigned int irq)
74{
75 unsigned long flags;
76 unsigned int bit = irq - OCTEON_IRQ_SW0;
77 /*
78 * We need to disable interrupts to make sure our updates are
79 * atomic.
80 */
81 local_irq_save(flags);
82 clear_c0_status(0x100 << bit);
83 local_irq_restore(flags);
84}
85
86static void octeon_irq_core_disable(unsigned int irq)
87{
88#ifdef CONFIG_SMP
89 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
90 (void *) (long) irq, 1);
91#else
92 octeon_irq_core_disable_local(irq);
93#endif
94}
95
96static struct irq_chip octeon_irq_chip_core = {
97 .name = "Core",
98 .enable = octeon_irq_core_enable,
99 .disable = octeon_irq_core_disable,
100 .ack = octeon_irq_core_ack,
101 .eoi = octeon_irq_core_eoi,
102};
103
104
105static void octeon_irq_ciu0_ack(unsigned int irq)
106{
David Daney5aae1fd2010-07-23 10:43:46 -0700107 switch (irq) {
108 case OCTEON_IRQ_GMX_DRP0:
109 case OCTEON_IRQ_GMX_DRP1:
110 case OCTEON_IRQ_IPD_DRP:
111 case OCTEON_IRQ_KEY_ZERO:
112 case OCTEON_IRQ_TIMER0:
113 case OCTEON_IRQ_TIMER1:
114 case OCTEON_IRQ_TIMER2:
115 case OCTEON_IRQ_TIMER3:
116 {
117 int index = cvmx_get_core_num() * 2;
118 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
119 /*
120 * CIU timer type interrupts must be acknoleged by
121 * writing a '1' bit to their sum0 bit.
122 */
123 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
124 break;
125 }
126 default:
127 break;
128 }
129
David Daney5b3b1682009-01-08 16:46:40 -0800130 /*
131 * In order to avoid any locking accessing the CIU, we
132 * acknowledge CIU interrupts by disabling all of them. This
133 * way we can use a per core register and avoid any out of
134 * core locking requirements. This has the side affect that
135 * CIU interrupts can't be processed recursively.
136 *
137 * We don't need to disable IRQs to make these atomic since
138 * they are already disabled earlier in the low level
139 * interrupt code.
140 */
141 clear_c0_status(0x100 << 2);
142}
143
144static void octeon_irq_ciu0_eoi(unsigned int irq)
145{
146 /*
147 * Enable all CIU interrupts again. We don't need to disable
148 * IRQs to make these atomic since they are already disabled
149 * earlier in the low level interrupt code.
150 */
151 set_c0_status(0x100 << 2);
152}
153
David Daney5aae1fd2010-07-23 10:43:46 -0700154static int next_coreid_for_irq(struct irq_desc *desc)
155{
156
157#ifdef CONFIG_SMP
158 int coreid;
159 int weight = cpumask_weight(desc->affinity);
160
161 if (weight > 1) {
162 int cpu = smp_processor_id();
163 for (;;) {
164 cpu = cpumask_next(cpu, desc->affinity);
165 if (cpu >= nr_cpu_ids) {
166 cpu = -1;
167 continue;
168 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
169 break;
170 }
171 }
172 coreid = octeon_coreid_for_cpu(cpu);
173 } else if (weight == 1) {
174 coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity));
175 } else {
176 coreid = cvmx_get_core_num();
177 }
178 return coreid;
179#else
180 return cvmx_get_core_num();
181#endif
182}
183
David Daney5b3b1682009-01-08 16:46:40 -0800184static void octeon_irq_ciu0_enable(unsigned int irq)
185{
David Daney5aae1fd2010-07-23 10:43:46 -0700186 struct irq_desc *desc = irq_to_desc(irq);
187 int coreid = next_coreid_for_irq(desc);
188 unsigned long flags;
189 uint64_t en0;
190 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
191
192 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
193 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
194 en0 |= 1ull << bit;
195 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
196 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
197 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
198}
199
200static void octeon_irq_ciu0_enable_mbox(unsigned int irq)
201{
David Daney5b3b1682009-01-08 16:46:40 -0800202 int coreid = cvmx_get_core_num();
203 unsigned long flags;
204 uint64_t en0;
205 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
206
David Daney39961422010-02-18 11:47:40 -0800207 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800208 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
209 en0 |= 1ull << bit;
210 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
211 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
David Daney39961422010-02-18 11:47:40 -0800212 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800213}
214
215static void octeon_irq_ciu0_disable(unsigned int irq)
216{
217 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
218 unsigned long flags;
219 uint64_t en0;
David Daney5b3b1682009-01-08 16:46:40 -0800220 int cpu;
David Daney39961422010-02-18 11:47:40 -0800221 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800222 for_each_online_cpu(cpu) {
David Daneycd847b72009-10-13 11:26:03 -0700223 int coreid = octeon_coreid_for_cpu(cpu);
David Daney5b3b1682009-01-08 16:46:40 -0800224 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
225 en0 &= ~(1ull << bit);
226 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
227 }
228 /*
229 * We need to do a read after the last update to make sure all
230 * of them are done.
231 */
232 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
David Daney39961422010-02-18 11:47:40 -0800233 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
David Daneycd847b72009-10-13 11:26:03 -0700234}
235
236/*
David Daney5aae1fd2010-07-23 10:43:46 -0700237 * Enable the irq on the next core in the affinity set for chips that
238 * have the EN*_W1{S,C} registers.
David Daneycd847b72009-10-13 11:26:03 -0700239 */
240static void octeon_irq_ciu0_enable_v2(unsigned int irq)
241{
David Daney5aae1fd2010-07-23 10:43:46 -0700242 int index;
243 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
244 struct irq_desc *desc = irq_to_desc(irq);
245
246 if ((desc->status & IRQ_DISABLED) == 0) {
247 index = next_coreid_for_irq(desc) * 2;
248 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
249 }
250}
251
252/*
253 * Enable the irq on the current CPU for chips that
254 * have the EN*_W1{S,C} registers.
255 */
256static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq)
257{
258 int index;
David Daneycd847b72009-10-13 11:26:03 -0700259 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
260
David Daney5aae1fd2010-07-23 10:43:46 -0700261 index = cvmx_get_core_num() * 2;
David Daneycd847b72009-10-13 11:26:03 -0700262 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
263}
264
265/*
266 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
267 * registers.
268 */
David Daneydbb103b2010-01-07 11:05:00 -0800269static void octeon_irq_ciu0_ack_v2(unsigned int irq)
David Daneycd847b72009-10-13 11:26:03 -0700270{
271 int index = cvmx_get_core_num() * 2;
272 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
273
David Daney5aae1fd2010-07-23 10:43:46 -0700274 switch (irq) {
275 case OCTEON_IRQ_GMX_DRP0:
276 case OCTEON_IRQ_GMX_DRP1:
277 case OCTEON_IRQ_IPD_DRP:
278 case OCTEON_IRQ_KEY_ZERO:
279 case OCTEON_IRQ_TIMER0:
280 case OCTEON_IRQ_TIMER1:
281 case OCTEON_IRQ_TIMER2:
282 case OCTEON_IRQ_TIMER3:
283 /*
284 * CIU timer type interrupts must be acknoleged by
285 * writing a '1' bit to their sum0 bit.
286 */
287 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
288 break;
289 default:
290 break;
291 }
292
David Daneycd847b72009-10-13 11:26:03 -0700293 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
294}
295
296/*
David Daneydbb103b2010-01-07 11:05:00 -0800297 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
298 * registers.
299 */
David Daney5aae1fd2010-07-23 10:43:46 -0700300static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq)
David Daneydbb103b2010-01-07 11:05:00 -0800301{
David Daney5aae1fd2010-07-23 10:43:46 -0700302 struct irq_desc *desc = irq_to_desc(irq);
David Daneydbb103b2010-01-07 11:05:00 -0800303 int index = cvmx_get_core_num() * 2;
304 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
305
David Daney5aae1fd2010-07-23 10:43:46 -0700306 if (likely((desc->status & IRQ_DISABLED) == 0))
David Daneydbb103b2010-01-07 11:05:00 -0800307 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
308}
309
310/*
David Daneycd847b72009-10-13 11:26:03 -0700311 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
312 * registers.
313 */
314static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
315{
316 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
317 int index;
318 int cpu;
319 for_each_online_cpu(cpu) {
320 index = octeon_coreid_for_cpu(cpu) * 2;
321 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
322 }
David Daney5b3b1682009-01-08 16:46:40 -0800323}
324
325#ifdef CONFIG_SMP
Yinghai Lud5dedd42009-04-27 17:59:21 -0700326static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
David Daney5b3b1682009-01-08 16:46:40 -0800327{
328 int cpu;
David Daney5aae1fd2010-07-23 10:43:46 -0700329 struct irq_desc *desc = irq_to_desc(irq);
330 int enable_one = (desc->status & IRQ_DISABLED) == 0;
David Daneyb6b74d52009-10-13 08:52:28 -0700331 unsigned long flags;
David Daney5b3b1682009-01-08 16:46:40 -0800332 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
333
David Daney5aae1fd2010-07-23 10:43:46 -0700334 /*
335 * For non-v2 CIU, we will allow only single CPU affinity.
336 * This removes the need to do locking in the .ack/.eoi
337 * functions.
338 */
339 if (cpumask_weight(dest) != 1)
340 return -EINVAL;
341
David Daney39961422010-02-18 11:47:40 -0800342 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800343 for_each_online_cpu(cpu) {
David Daneycd847b72009-10-13 11:26:03 -0700344 int coreid = octeon_coreid_for_cpu(cpu);
David Daney5b3b1682009-01-08 16:46:40 -0800345 uint64_t en0 =
346 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
David Daney5aae1fd2010-07-23 10:43:46 -0700347 if (cpumask_test_cpu(cpu, dest) && enable_one) {
348 enable_one = 0;
David Daney5b3b1682009-01-08 16:46:40 -0800349 en0 |= 1ull << bit;
David Daney5aae1fd2010-07-23 10:43:46 -0700350 } else {
David Daney5b3b1682009-01-08 16:46:40 -0800351 en0 &= ~(1ull << bit);
David Daney5aae1fd2010-07-23 10:43:46 -0700352 }
David Daney5b3b1682009-01-08 16:46:40 -0800353 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
354 }
355 /*
356 * We need to do a read after the last update to make sure all
357 * of them are done.
358 */
359 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
David Daney39961422010-02-18 11:47:40 -0800360 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700361
362 return 0;
David Daney5b3b1682009-01-08 16:46:40 -0800363}
David Daneycd847b72009-10-13 11:26:03 -0700364
365/*
366 * Set affinity for the irq for chips that have the EN*_W1{S,C}
367 * registers.
368 */
369static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
370 const struct cpumask *dest)
371{
372 int cpu;
373 int index;
David Daney5aae1fd2010-07-23 10:43:46 -0700374 struct irq_desc *desc = irq_to_desc(irq);
375 int enable_one = (desc->status & IRQ_DISABLED) == 0;
David Daneycd847b72009-10-13 11:26:03 -0700376 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
David Daney5aae1fd2010-07-23 10:43:46 -0700377
David Daneycd847b72009-10-13 11:26:03 -0700378 for_each_online_cpu(cpu) {
379 index = octeon_coreid_for_cpu(cpu) * 2;
David Daney5aae1fd2010-07-23 10:43:46 -0700380 if (cpumask_test_cpu(cpu, dest) && enable_one) {
381 enable_one = 0;
David Daneycd847b72009-10-13 11:26:03 -0700382 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
David Daney5aae1fd2010-07-23 10:43:46 -0700383 } else {
David Daneycd847b72009-10-13 11:26:03 -0700384 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
David Daney5aae1fd2010-07-23 10:43:46 -0700385 }
David Daneycd847b72009-10-13 11:26:03 -0700386 }
387 return 0;
388}
David Daney5b3b1682009-01-08 16:46:40 -0800389#endif
390
David Daneycd847b72009-10-13 11:26:03 -0700391/*
392 * Newer octeon chips have support for lockless CIU operation.
393 */
394static struct irq_chip octeon_irq_chip_ciu0_v2 = {
395 .name = "CIU0",
396 .enable = octeon_irq_ciu0_enable_v2,
397 .disable = octeon_irq_ciu0_disable_all_v2,
David Daney5aae1fd2010-07-23 10:43:46 -0700398 .eoi = octeon_irq_ciu0_enable_v2,
David Daneycd847b72009-10-13 11:26:03 -0700399#ifdef CONFIG_SMP
400 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
401#endif
402};
403
David Daney5b3b1682009-01-08 16:46:40 -0800404static struct irq_chip octeon_irq_chip_ciu0 = {
405 .name = "CIU0",
406 .enable = octeon_irq_ciu0_enable,
407 .disable = octeon_irq_ciu0_disable,
David Daney5b3b1682009-01-08 16:46:40 -0800408 .eoi = octeon_irq_ciu0_eoi,
409#ifdef CONFIG_SMP
410 .set_affinity = octeon_irq_ciu0_set_affinity,
411#endif
412};
413
David Daney5aae1fd2010-07-23 10:43:46 -0700414/* The mbox versions don't do any affinity or round-robin. */
415static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = {
416 .name = "CIU0-M",
417 .enable = octeon_irq_ciu0_enable_mbox_v2,
David Daney86568dc2010-02-15 12:13:18 -0800418 .disable = octeon_irq_ciu0_disable,
David Daney5aae1fd2010-07-23 10:43:46 -0700419 .eoi = octeon_irq_ciu0_eoi_mbox_v2,
David Daney86568dc2010-02-15 12:13:18 -0800420};
421
David Daney5aae1fd2010-07-23 10:43:46 -0700422static struct irq_chip octeon_irq_chip_ciu0_mbox = {
423 .name = "CIU0-M",
424 .enable = octeon_irq_ciu0_enable_mbox,
425 .disable = octeon_irq_ciu0_disable,
426 .eoi = octeon_irq_ciu0_eoi,
427};
David Daney5b3b1682009-01-08 16:46:40 -0800428
429static void octeon_irq_ciu1_ack(unsigned int irq)
430{
431 /*
432 * In order to avoid any locking accessing the CIU, we
433 * acknowledge CIU interrupts by disabling all of them. This
434 * way we can use a per core register and avoid any out of
435 * core locking requirements. This has the side affect that
436 * CIU interrupts can't be processed recursively. We don't
437 * need to disable IRQs to make these atomic since they are
438 * already disabled earlier in the low level interrupt code.
439 */
440 clear_c0_status(0x100 << 3);
441}
442
443static void octeon_irq_ciu1_eoi(unsigned int irq)
444{
445 /*
446 * Enable all CIU interrupts again. We don't need to disable
447 * IRQs to make these atomic since they are already disabled
448 * earlier in the low level interrupt code.
449 */
450 set_c0_status(0x100 << 3);
451}
452
453static void octeon_irq_ciu1_enable(unsigned int irq)
454{
David Daney5aae1fd2010-07-23 10:43:46 -0700455 struct irq_desc *desc = irq_to_desc(irq);
456 int coreid = next_coreid_for_irq(desc);
David Daney5b3b1682009-01-08 16:46:40 -0800457 unsigned long flags;
458 uint64_t en1;
459 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
460
David Daney39961422010-02-18 11:47:40 -0800461 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800462 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
463 en1 |= 1ull << bit;
464 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
465 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
David Daney39961422010-02-18 11:47:40 -0800466 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800467}
468
David Daney5aae1fd2010-07-23 10:43:46 -0700469/*
470 * Watchdog interrupts are special. They are associated with a single
471 * core, so we hardwire the affinity to that core.
472 */
473static void octeon_irq_ciu1_wd_enable(unsigned int irq)
474{
475 unsigned long flags;
476 uint64_t en1;
477 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
478 int coreid = bit;
479
480 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
481 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
482 en1 |= 1ull << bit;
483 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
484 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
485 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
486}
487
David Daney5b3b1682009-01-08 16:46:40 -0800488static void octeon_irq_ciu1_disable(unsigned int irq)
489{
490 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
491 unsigned long flags;
492 uint64_t en1;
David Daney5b3b1682009-01-08 16:46:40 -0800493 int cpu;
David Daney39961422010-02-18 11:47:40 -0800494 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800495 for_each_online_cpu(cpu) {
David Daneycd847b72009-10-13 11:26:03 -0700496 int coreid = octeon_coreid_for_cpu(cpu);
David Daney5b3b1682009-01-08 16:46:40 -0800497 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
498 en1 &= ~(1ull << bit);
499 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
500 }
501 /*
502 * We need to do a read after the last update to make sure all
503 * of them are done.
504 */
505 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
David Daney39961422010-02-18 11:47:40 -0800506 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
David Daneycd847b72009-10-13 11:26:03 -0700507}
508
509/*
510 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
511 * registers.
512 */
513static void octeon_irq_ciu1_enable_v2(unsigned int irq)
514{
David Daney5aae1fd2010-07-23 10:43:46 -0700515 int index;
David Daneycd847b72009-10-13 11:26:03 -0700516 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
David Daney5aae1fd2010-07-23 10:43:46 -0700517 struct irq_desc *desc = irq_to_desc(irq);
David Daneycd847b72009-10-13 11:26:03 -0700518
David Daney5aae1fd2010-07-23 10:43:46 -0700519 if ((desc->status & IRQ_DISABLED) == 0) {
520 index = next_coreid_for_irq(desc) * 2 + 1;
521 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
522 }
523}
524
525/*
526 * Watchdog interrupts are special. They are associated with a single
527 * core, so we hardwire the affinity to that core.
528 */
529static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq)
530{
531 int index;
532 int coreid = irq - OCTEON_IRQ_WDOG0;
533 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
534 struct irq_desc *desc = irq_to_desc(irq);
535
536 if ((desc->status & IRQ_DISABLED) == 0) {
537 index = coreid * 2 + 1;
538 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
539 }
David Daneycd847b72009-10-13 11:26:03 -0700540}
541
542/*
543 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
544 * registers.
545 */
David Daneydbb103b2010-01-07 11:05:00 -0800546static void octeon_irq_ciu1_ack_v2(unsigned int irq)
David Daneycd847b72009-10-13 11:26:03 -0700547{
548 int index = cvmx_get_core_num() * 2 + 1;
549 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
550
551 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
552}
553
554/*
555 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
556 * registers.
557 */
558static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
559{
560 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
561 int index;
562 int cpu;
563 for_each_online_cpu(cpu) {
564 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
565 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
566 }
David Daney5b3b1682009-01-08 16:46:40 -0800567}
568
569#ifdef CONFIG_SMP
David Daneycd847b72009-10-13 11:26:03 -0700570static int octeon_irq_ciu1_set_affinity(unsigned int irq,
571 const struct cpumask *dest)
David Daney5b3b1682009-01-08 16:46:40 -0800572{
573 int cpu;
David Daney5aae1fd2010-07-23 10:43:46 -0700574 struct irq_desc *desc = irq_to_desc(irq);
575 int enable_one = (desc->status & IRQ_DISABLED) == 0;
David Daneyb6b74d52009-10-13 08:52:28 -0700576 unsigned long flags;
David Daney5b3b1682009-01-08 16:46:40 -0800577 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
578
David Daney5aae1fd2010-07-23 10:43:46 -0700579 /*
580 * For non-v2 CIU, we will allow only single CPU affinity.
581 * This removes the need to do locking in the .ack/.eoi
582 * functions.
583 */
584 if (cpumask_weight(dest) != 1)
585 return -EINVAL;
586
David Daney39961422010-02-18 11:47:40 -0800587 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800588 for_each_online_cpu(cpu) {
David Daneycd847b72009-10-13 11:26:03 -0700589 int coreid = octeon_coreid_for_cpu(cpu);
David Daney5b3b1682009-01-08 16:46:40 -0800590 uint64_t en1 =
David Daney5aae1fd2010-07-23 10:43:46 -0700591 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
592 if (cpumask_test_cpu(cpu, dest) && enable_one) {
593 enable_one = 0;
David Daney5b3b1682009-01-08 16:46:40 -0800594 en1 |= 1ull << bit;
David Daney5aae1fd2010-07-23 10:43:46 -0700595 } else {
David Daney5b3b1682009-01-08 16:46:40 -0800596 en1 &= ~(1ull << bit);
David Daney5aae1fd2010-07-23 10:43:46 -0700597 }
David Daney5b3b1682009-01-08 16:46:40 -0800598 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
599 }
600 /*
601 * We need to do a read after the last update to make sure all
602 * of them are done.
603 */
604 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
David Daney39961422010-02-18 11:47:40 -0800605 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
Yinghai Lud5dedd42009-04-27 17:59:21 -0700606
607 return 0;
David Daney5b3b1682009-01-08 16:46:40 -0800608}
David Daneycd847b72009-10-13 11:26:03 -0700609
610/*
611 * Set affinity for the irq for chips that have the EN*_W1{S,C}
612 * registers.
613 */
614static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
615 const struct cpumask *dest)
616{
617 int cpu;
618 int index;
David Daney5aae1fd2010-07-23 10:43:46 -0700619 struct irq_desc *desc = irq_to_desc(irq);
620 int enable_one = (desc->status & IRQ_DISABLED) == 0;
David Daneycd847b72009-10-13 11:26:03 -0700621 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
622 for_each_online_cpu(cpu) {
623 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
David Daney5aae1fd2010-07-23 10:43:46 -0700624 if (cpumask_test_cpu(cpu, dest) && enable_one) {
625 enable_one = 0;
David Daneycd847b72009-10-13 11:26:03 -0700626 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
David Daney5aae1fd2010-07-23 10:43:46 -0700627 } else {
David Daneycd847b72009-10-13 11:26:03 -0700628 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
David Daney5aae1fd2010-07-23 10:43:46 -0700629 }
David Daneycd847b72009-10-13 11:26:03 -0700630 }
631 return 0;
632}
David Daney5b3b1682009-01-08 16:46:40 -0800633#endif
634
David Daneycd847b72009-10-13 11:26:03 -0700635/*
636 * Newer octeon chips have support for lockless CIU operation.
637 */
638static struct irq_chip octeon_irq_chip_ciu1_v2 = {
David Daney5aae1fd2010-07-23 10:43:46 -0700639 .name = "CIU1",
David Daneycd847b72009-10-13 11:26:03 -0700640 .enable = octeon_irq_ciu1_enable_v2,
641 .disable = octeon_irq_ciu1_disable_all_v2,
David Daney5aae1fd2010-07-23 10:43:46 -0700642 .eoi = octeon_irq_ciu1_enable_v2,
David Daneycd847b72009-10-13 11:26:03 -0700643#ifdef CONFIG_SMP
644 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
645#endif
646};
647
David Daney5b3b1682009-01-08 16:46:40 -0800648static struct irq_chip octeon_irq_chip_ciu1 = {
649 .name = "CIU1",
650 .enable = octeon_irq_ciu1_enable,
651 .disable = octeon_irq_ciu1_disable,
David Daney5b3b1682009-01-08 16:46:40 -0800652 .eoi = octeon_irq_ciu1_eoi,
653#ifdef CONFIG_SMP
654 .set_affinity = octeon_irq_ciu1_set_affinity,
655#endif
656};
657
David Daney5aae1fd2010-07-23 10:43:46 -0700658static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = {
659 .name = "CIU1-W",
660 .enable = octeon_irq_ciu1_wd_enable_v2,
661 .disable = octeon_irq_ciu1_disable_all_v2,
662 .eoi = octeon_irq_ciu1_wd_enable_v2,
663};
664
665static struct irq_chip octeon_irq_chip_ciu1_wd = {
666 .name = "CIU1-W",
667 .enable = octeon_irq_ciu1_wd_enable,
668 .disable = octeon_irq_ciu1_disable,
669 .eoi = octeon_irq_ciu1_eoi,
670};
671
672static void (*octeon_ciu0_ack)(unsigned int);
673static void (*octeon_ciu1_ack)(unsigned int);
674
David Daney5b3b1682009-01-08 16:46:40 -0800675void __init arch_init_irq(void)
676{
David Daney5aae1fd2010-07-23 10:43:46 -0700677 unsigned int irq;
David Daneycd847b72009-10-13 11:26:03 -0700678 struct irq_chip *chip0;
David Daney5aae1fd2010-07-23 10:43:46 -0700679 struct irq_chip *chip0_mbox;
David Daneycd847b72009-10-13 11:26:03 -0700680 struct irq_chip *chip1;
David Daney5aae1fd2010-07-23 10:43:46 -0700681 struct irq_chip *chip1_wd;
David Daney5b3b1682009-01-08 16:46:40 -0800682
683#ifdef CONFIG_SMP
684 /* Set the default affinity to the boot cpu. */
685 cpumask_clear(irq_default_affinity);
686 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
687#endif
688
689 if (NR_IRQS < OCTEON_IRQ_LAST)
690 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
691
David Daneycd847b72009-10-13 11:26:03 -0700692 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
693 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
694 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
David Daney5aae1fd2010-07-23 10:43:46 -0700695 octeon_ciu0_ack = octeon_irq_ciu0_ack_v2;
696 octeon_ciu1_ack = octeon_irq_ciu1_ack_v2;
David Daneycd847b72009-10-13 11:26:03 -0700697 chip0 = &octeon_irq_chip_ciu0_v2;
David Daney5aae1fd2010-07-23 10:43:46 -0700698 chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2;
David Daneycd847b72009-10-13 11:26:03 -0700699 chip1 = &octeon_irq_chip_ciu1_v2;
David Daney5aae1fd2010-07-23 10:43:46 -0700700 chip1_wd = &octeon_irq_chip_ciu1_wd_v2;
David Daneycd847b72009-10-13 11:26:03 -0700701 } else {
David Daney5aae1fd2010-07-23 10:43:46 -0700702 octeon_ciu0_ack = octeon_irq_ciu0_ack;
703 octeon_ciu1_ack = octeon_irq_ciu1_ack;
David Daneycd847b72009-10-13 11:26:03 -0700704 chip0 = &octeon_irq_chip_ciu0;
David Daney5aae1fd2010-07-23 10:43:46 -0700705 chip0_mbox = &octeon_irq_chip_ciu0_mbox;
David Daneycd847b72009-10-13 11:26:03 -0700706 chip1 = &octeon_irq_chip_ciu1;
David Daney5aae1fd2010-07-23 10:43:46 -0700707 chip1_wd = &octeon_irq_chip_ciu1_wd;
David Daneycd847b72009-10-13 11:26:03 -0700708 }
709
David Daney5b3b1682009-01-08 16:46:40 -0800710 /* 0 - 15 reserved for i8259 master and slave controller. */
711
712 /* 17 - 23 Mips internal */
713 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
714 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
715 handle_percpu_irq);
716 }
717
718 /* 24 - 87 CIU_INT_SUM0 */
719 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
David Daney86568dc2010-02-15 12:13:18 -0800720 switch (irq) {
David Daney5aae1fd2010-07-23 10:43:46 -0700721 case OCTEON_IRQ_MBOX0:
722 case OCTEON_IRQ_MBOX1:
723 set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq);
David Daney86568dc2010-02-15 12:13:18 -0800724 break;
725 default:
David Daney5aae1fd2010-07-23 10:43:46 -0700726 set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq);
David Daney86568dc2010-02-15 12:13:18 -0800727 break;
728 }
David Daney5b3b1682009-01-08 16:46:40 -0800729 }
730
731 /* 88 - 151 CIU_INT_SUM1 */
David Daney5aae1fd2010-07-23 10:43:46 -0700732 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++)
733 set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq);
734
735 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++)
736 set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq);
David Daney5b3b1682009-01-08 16:46:40 -0800737
David Daney5b3b1682009-01-08 16:46:40 -0800738 set_c0_status(0x300 << 2);
739}
740
741asmlinkage void plat_irq_dispatch(void)
742{
743 const unsigned long core_id = cvmx_get_core_num();
744 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
745 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
746 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
747 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
748 unsigned long cop0_cause;
749 unsigned long cop0_status;
750 uint64_t ciu_en;
751 uint64_t ciu_sum;
David Daney5aae1fd2010-07-23 10:43:46 -0700752 unsigned int irq;
David Daney5b3b1682009-01-08 16:46:40 -0800753
754 while (1) {
755 cop0_cause = read_c0_cause();
756 cop0_status = read_c0_status();
757 cop0_cause &= cop0_status;
758 cop0_cause &= ST0_IM;
759
760 if (unlikely(cop0_cause & STATUSF_IP2)) {
761 ciu_sum = cvmx_read_csr(ciu_sum0_address);
762 ciu_en = cvmx_read_csr(ciu_en0_address);
763 ciu_sum &= ciu_en;
David Daney5aae1fd2010-07-23 10:43:46 -0700764 if (likely(ciu_sum)) {
765 irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1;
766 octeon_ciu0_ack(irq);
767 do_IRQ(irq);
768 } else {
David Daney5b3b1682009-01-08 16:46:40 -0800769 spurious_interrupt();
David Daney5aae1fd2010-07-23 10:43:46 -0700770 }
David Daney5b3b1682009-01-08 16:46:40 -0800771 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
772 ciu_sum = cvmx_read_csr(ciu_sum1_address);
773 ciu_en = cvmx_read_csr(ciu_en1_address);
774 ciu_sum &= ciu_en;
David Daney5aae1fd2010-07-23 10:43:46 -0700775 if (likely(ciu_sum)) {
776 irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1;
777 octeon_ciu1_ack(irq);
778 do_IRQ(irq);
779 } else {
David Daney5b3b1682009-01-08 16:46:40 -0800780 spurious_interrupt();
David Daney5aae1fd2010-07-23 10:43:46 -0700781 }
David Daney5b3b1682009-01-08 16:46:40 -0800782 } else if (likely(cop0_cause)) {
783 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
784 } else {
785 break;
786 }
787 }
788}
Ralf Baechle773cb772009-06-23 10:36:38 +0100789
790#ifdef CONFIG_HOTPLUG_CPU
Ralf Baechle773cb772009-06-23 10:36:38 +0100791
792void fixup_irqs(void)
793{
David Daney35089202010-07-23 10:43:47 -0700794 int irq;
795 struct irq_desc *desc;
796 cpumask_t new_affinity;
797 unsigned long flags;
798 int do_set_affinity;
799 int cpu;
800
801 cpu = smp_processor_id();
Ralf Baechle773cb772009-06-23 10:36:38 +0100802
803 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
804 octeon_irq_core_disable_local(irq);
805
David Daney35089202010-07-23 10:43:47 -0700806 for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) {
807 desc = irq_to_desc(irq);
808 switch (irq) {
809 case OCTEON_IRQ_MBOX0:
810 case OCTEON_IRQ_MBOX1:
811 /* The eoi function will disable them on this CPU. */
812 desc->chip->eoi(irq);
813 break;
814 case OCTEON_IRQ_WDOG0:
815 case OCTEON_IRQ_WDOG1:
816 case OCTEON_IRQ_WDOG2:
817 case OCTEON_IRQ_WDOG3:
818 case OCTEON_IRQ_WDOG4:
819 case OCTEON_IRQ_WDOG5:
820 case OCTEON_IRQ_WDOG6:
821 case OCTEON_IRQ_WDOG7:
822 case OCTEON_IRQ_WDOG8:
823 case OCTEON_IRQ_WDOG9:
824 case OCTEON_IRQ_WDOG10:
825 case OCTEON_IRQ_WDOG11:
826 case OCTEON_IRQ_WDOG12:
827 case OCTEON_IRQ_WDOG13:
828 case OCTEON_IRQ_WDOG14:
829 case OCTEON_IRQ_WDOG15:
830 /*
831 * These have special per CPU semantics and
832 * are handled in the watchdog driver.
833 */
834 break;
835 default:
836 raw_spin_lock_irqsave(&desc->lock, flags);
837 /*
838 * If this irq has an action, it is in use and
839 * must be migrated if it has affinity to this
840 * cpu.
841 */
842 if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) {
843 if (cpumask_weight(desc->affinity) > 1) {
844 /*
845 * It has multi CPU affinity,
846 * just remove this CPU from
847 * the affinity set.
848 */
849 cpumask_copy(&new_affinity, desc->affinity);
850 cpumask_clear_cpu(cpu, &new_affinity);
851 } else {
852 /*
853 * Otherwise, put it on lowest
854 * numbered online CPU.
855 */
856 cpumask_clear(&new_affinity);
857 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
858 }
859 do_set_affinity = 1;
860 } else {
861 do_set_affinity = 0;
862 }
863 raw_spin_unlock_irqrestore(&desc->lock, flags);
Ralf Baechle773cb772009-06-23 10:36:38 +0100864
David Daney35089202010-07-23 10:43:47 -0700865 if (do_set_affinity)
866 irq_set_affinity(irq, &new_affinity);
Ralf Baechle773cb772009-06-23 10:36:38 +0100867
David Daney35089202010-07-23 10:43:47 -0700868 break;
Ralf Baechle773cb772009-06-23 10:36:38 +0100869 }
870 }
871}
872
873#endif /* CONFIG_HOTPLUG_CPU */