blob: a53305fbedb8fef7a4f7f15100018af1ce71dd88 [file] [log] [blame]
Maxime Bizone7300d02009-08-18 13:23:37 +01001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
David Howellsca4d3e672010-10-07 14:08:54 +010014#include <linux/irq.h>
Jonas Gorski74b8ca32014-07-12 12:49:39 +020015#include <linux/spinlock.h>
Maxime Bizone7300d02009-08-18 13:23:37 +010016#include <asm/irq_cpu.h>
17#include <asm/mipsregs.h>
18#include <bcm63xx_cpu.h>
19#include <bcm63xx_regs.h>
20#include <bcm63xx_io.h>
21#include <bcm63xx_irq.h>
22
Jonas Gorski7a9fd142014-07-12 12:49:38 +020023
Jonas Gorski74b8ca32014-07-12 12:49:39 +020024static DEFINE_SPINLOCK(ipic_lock);
25static DEFINE_SPINLOCK(epic_lock);
26
Jonas Gorskicc81d7f2014-07-12 12:49:36 +020027static u32 irq_stat_addr[2];
28static u32 irq_mask_addr[2];
Jonas Gorski7a9fd142014-07-12 12:49:38 +020029static void (*dispatch_internal)(int cpu);
Maxime Bizon37c42a72011-11-04 19:09:32 +010030static int is_ext_irq_cascaded;
Maxime Bizon62248922011-11-04 19:09:34 +010031static unsigned int ext_irq_count;
Maxime Bizon37c42a72011-11-04 19:09:32 +010032static unsigned int ext_irq_start, ext_irq_end;
Maxime Bizon62248922011-11-04 19:09:34 +010033static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
Jonas Gorski553e25b2014-07-12 12:49:41 +020034static void (*internal_irq_mask)(struct irq_data *d);
35static void (*internal_irq_unmask)(struct irq_data *d);
Maxime Bizonf61cced2011-11-04 19:09:31 +010036
Maxime Bizonf61cced2011-11-04 19:09:31 +010037
Maxime Bizon62248922011-11-04 19:09:34 +010038static inline u32 get_ext_irq_perf_reg(int irq)
39{
40 if (irq < 4)
41 return ext_irq_cfg_reg1;
42 return ext_irq_cfg_reg2;
43}
44
Maxime Bizonf61cced2011-11-04 19:09:31 +010045static inline void handle_internal(int intbit)
46{
Maxime Bizon37c42a72011-11-04 19:09:32 +010047 if (is_ext_irq_cascaded &&
48 intbit >= ext_irq_start && intbit <= ext_irq_end)
49 do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
50 else
51 do_IRQ(intbit + IRQ_INTERNAL_BASE);
Maxime Bizonf61cced2011-11-04 19:09:31 +010052}
53
Maxime Bizone7300d02009-08-18 13:23:37 +010054/*
55 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
56 * prioritize any interrupt relatively to another. the static counter
57 * will resume the loop where it ended the last time we left this
58 * function.
59 */
Maxime Bizone7300d02009-08-18 13:23:37 +010060
Jonas Gorski86ee4332014-07-12 12:49:35 +020061#define BUILD_IPIC_INTERNAL(width) \
Jonas Gorski7a9fd142014-07-12 12:49:38 +020062void __dispatch_internal_##width(int cpu) \
Jonas Gorski86ee4332014-07-12 12:49:35 +020063{ \
64 u32 pending[width / 32]; \
65 unsigned int src, tgt; \
66 bool irqs_pending = false; \
Jonas Gorski7a9fd142014-07-12 12:49:38 +020067 static unsigned int i[2]; \
68 unsigned int *next = &i[cpu]; \
Jonas Gorski74b8ca32014-07-12 12:49:39 +020069 unsigned long flags; \
Jonas Gorski86ee4332014-07-12 12:49:35 +020070 \
71 /* read registers in reverse order */ \
Jonas Gorski74b8ca32014-07-12 12:49:39 +020072 spin_lock_irqsave(&ipic_lock, flags); \
Jonas Gorski86ee4332014-07-12 12:49:35 +020073 for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
74 u32 val; \
75 \
Jonas Gorski7a9fd142014-07-12 12:49:38 +020076 val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
77 val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
Jonas Gorski86ee4332014-07-12 12:49:35 +020078 pending[--tgt] = val; \
79 \
80 if (val) \
81 irqs_pending = true; \
82 } \
Jonas Gorski74b8ca32014-07-12 12:49:39 +020083 spin_unlock_irqrestore(&ipic_lock, flags); \
Jonas Gorski86ee4332014-07-12 12:49:35 +020084 \
85 if (!irqs_pending) \
86 return; \
87 \
88 while (1) { \
Jonas Gorski7a9fd142014-07-12 12:49:38 +020089 unsigned int to_call = *next; \
Jonas Gorski86ee4332014-07-12 12:49:35 +020090 \
Jonas Gorski7a9fd142014-07-12 12:49:38 +020091 *next = (*next + 1) & (width - 1); \
Jonas Gorski86ee4332014-07-12 12:49:35 +020092 if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
93 handle_internal(to_call); \
94 break; \
95 } \
96 } \
97} \
98 \
Jonas Gorski553e25b2014-07-12 12:49:41 +020099static void __internal_irq_mask_##width(struct irq_data *d) \
Jonas Gorski86ee4332014-07-12 12:49:35 +0200100{ \
101 u32 val; \
Jonas Gorski553e25b2014-07-12 12:49:41 +0200102 unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
Jonas Gorski86ee4332014-07-12 12:49:35 +0200103 unsigned reg = (irq / 32) ^ (width/32 - 1); \
104 unsigned bit = irq & 0x1f; \
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200105 unsigned long flags; \
Jonas Gorski56d53ea2014-07-12 12:49:40 +0200106 int cpu; \
Jonas Gorski86ee4332014-07-12 12:49:35 +0200107 \
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200108 spin_lock_irqsave(&ipic_lock, flags); \
Jonas Gorski56d53ea2014-07-12 12:49:40 +0200109 for_each_present_cpu(cpu) { \
110 if (!irq_mask_addr[cpu]) \
111 break; \
112 \
113 val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
114 val &= ~(1 << bit); \
115 bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
116 } \
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200117 spin_unlock_irqrestore(&ipic_lock, flags); \
Jonas Gorski86ee4332014-07-12 12:49:35 +0200118} \
119 \
Jonas Gorski553e25b2014-07-12 12:49:41 +0200120static void __internal_irq_unmask_##width(struct irq_data *d) \
Jonas Gorski86ee4332014-07-12 12:49:35 +0200121{ \
122 u32 val; \
Jonas Gorski553e25b2014-07-12 12:49:41 +0200123 unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
Jonas Gorski86ee4332014-07-12 12:49:35 +0200124 unsigned reg = (irq / 32) ^ (width/32 - 1); \
125 unsigned bit = irq & 0x1f; \
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200126 unsigned long flags; \
Jonas Gorski56d53ea2014-07-12 12:49:40 +0200127 int cpu; \
Jonas Gorski86ee4332014-07-12 12:49:35 +0200128 \
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200129 spin_lock_irqsave(&ipic_lock, flags); \
Jonas Gorski56d53ea2014-07-12 12:49:40 +0200130 for_each_present_cpu(cpu) { \
131 if (!irq_mask_addr[cpu]) \
132 break; \
133 \
134 val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
135 if (cpu_online(cpu)) \
136 val |= (1 << bit); \
137 else \
138 val &= ~(1 << bit); \
139 bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
140 } \
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200141 spin_unlock_irqrestore(&ipic_lock, flags); \
Maxime Bizone7300d02009-08-18 13:23:37 +0100142}
143
Jonas Gorski86ee4332014-07-12 12:49:35 +0200144BUILD_IPIC_INTERNAL(32);
145BUILD_IPIC_INTERNAL(64);
Maxime Bizon71a43922011-11-04 19:09:33 +0100146
Maxime Bizone7300d02009-08-18 13:23:37 +0100147asmlinkage void plat_irq_dispatch(void)
148{
149 u32 cause;
150
151 do {
152 cause = read_c0_cause() & read_c0_status() & ST0_IM;
153
154 if (!cause)
155 break;
156
157 if (cause & CAUSEF_IP7)
158 do_IRQ(7);
Kevin Cernekee937ad102013-06-03 14:39:34 +0000159 if (cause & CAUSEF_IP0)
160 do_IRQ(0);
161 if (cause & CAUSEF_IP1)
162 do_IRQ(1);
Maxime Bizone7300d02009-08-18 13:23:37 +0100163 if (cause & CAUSEF_IP2)
Jonas Gorski7a9fd142014-07-12 12:49:38 +0200164 dispatch_internal(0);
Jonas Gorski56d53ea2014-07-12 12:49:40 +0200165 if (is_ext_irq_cascaded) {
166 if (cause & CAUSEF_IP3)
167 dispatch_internal(1);
168 } else {
Maxime Bizon37c42a72011-11-04 19:09:32 +0100169 if (cause & CAUSEF_IP3)
170 do_IRQ(IRQ_EXT_0);
171 if (cause & CAUSEF_IP4)
172 do_IRQ(IRQ_EXT_1);
173 if (cause & CAUSEF_IP5)
174 do_IRQ(IRQ_EXT_2);
175 if (cause & CAUSEF_IP6)
176 do_IRQ(IRQ_EXT_3);
177 }
Maxime Bizone7300d02009-08-18 13:23:37 +0100178 } while (1);
179}
180
181/*
182 * internal IRQs operations: only mask/unmask on PERF irq mask
183 * register.
184 */
Maxime Bizon37c42a72011-11-04 19:09:32 +0100185static void bcm63xx_internal_irq_mask(struct irq_data *d)
186{
Jonas Gorski553e25b2014-07-12 12:49:41 +0200187 internal_irq_mask(d);
Maxime Bizon37c42a72011-11-04 19:09:32 +0100188}
189
190static void bcm63xx_internal_irq_unmask(struct irq_data *d)
191{
Jonas Gorski553e25b2014-07-12 12:49:41 +0200192 internal_irq_unmask(d);
Maxime Bizon37c42a72011-11-04 19:09:32 +0100193}
194
Maxime Bizone7300d02009-08-18 13:23:37 +0100195/*
196 * external IRQs operations: mask/unmask and clear on PERF external
197 * irq control register.
198 */
Thomas Gleixner93f29362011-03-23 21:08:47 +0000199static void bcm63xx_external_irq_mask(struct irq_data *d)
Maxime Bizone7300d02009-08-18 13:23:37 +0100200{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100201 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100202 u32 reg, regaddr;
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200203 unsigned long flags;
Maxime Bizone7300d02009-08-18 13:23:37 +0100204
Maxime Bizon62248922011-11-04 19:09:34 +0100205 regaddr = get_ext_irq_perf_reg(irq);
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200206 spin_lock_irqsave(&epic_lock, flags);
Maxime Bizon62248922011-11-04 19:09:34 +0100207 reg = bcm_perf_readl(regaddr);
208
209 if (BCMCPU_IS_6348())
210 reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
211 else
212 reg &= ~EXTIRQ_CFG_MASK(irq % 4);
213
214 bcm_perf_writel(reg, regaddr);
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200215 spin_unlock_irqrestore(&epic_lock, flags);
216
Maxime Bizon37c42a72011-11-04 19:09:32 +0100217 if (is_ext_irq_cascaded)
Jonas Gorski553e25b2014-07-12 12:49:41 +0200218 internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
Maxime Bizone7300d02009-08-18 13:23:37 +0100219}
220
Thomas Gleixner93f29362011-03-23 21:08:47 +0000221static void bcm63xx_external_irq_unmask(struct irq_data *d)
Maxime Bizone7300d02009-08-18 13:23:37 +0100222{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100223 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100224 u32 reg, regaddr;
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200225 unsigned long flags;
Maxime Bizone7300d02009-08-18 13:23:37 +0100226
Maxime Bizon62248922011-11-04 19:09:34 +0100227 regaddr = get_ext_irq_perf_reg(irq);
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200228 spin_lock_irqsave(&epic_lock, flags);
Maxime Bizon62248922011-11-04 19:09:34 +0100229 reg = bcm_perf_readl(regaddr);
230
231 if (BCMCPU_IS_6348())
232 reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
233 else
234 reg |= EXTIRQ_CFG_MASK(irq % 4);
235
236 bcm_perf_writel(reg, regaddr);
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200237 spin_unlock_irqrestore(&epic_lock, flags);
Maxime Bizon62248922011-11-04 19:09:34 +0100238
Maxime Bizon37c42a72011-11-04 19:09:32 +0100239 if (is_ext_irq_cascaded)
Jonas Gorski553e25b2014-07-12 12:49:41 +0200240 internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start));
Maxime Bizone7300d02009-08-18 13:23:37 +0100241}
242
Thomas Gleixner93f29362011-03-23 21:08:47 +0000243static void bcm63xx_external_irq_clear(struct irq_data *d)
Maxime Bizone7300d02009-08-18 13:23:37 +0100244{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100245 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100246 u32 reg, regaddr;
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200247 unsigned long flags;
Maxime Bizone7300d02009-08-18 13:23:37 +0100248
Maxime Bizon62248922011-11-04 19:09:34 +0100249 regaddr = get_ext_irq_perf_reg(irq);
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200250 spin_lock_irqsave(&epic_lock, flags);
Maxime Bizon62248922011-11-04 19:09:34 +0100251 reg = bcm_perf_readl(regaddr);
252
253 if (BCMCPU_IS_6348())
254 reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
255 else
256 reg |= EXTIRQ_CFG_CLEAR(irq % 4);
257
258 bcm_perf_writel(reg, regaddr);
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200259 spin_unlock_irqrestore(&epic_lock, flags);
Maxime Bizone7300d02009-08-18 13:23:37 +0100260}
261
Thomas Gleixner93f29362011-03-23 21:08:47 +0000262static int bcm63xx_external_irq_set_type(struct irq_data *d,
Maxime Bizone7300d02009-08-18 13:23:37 +0100263 unsigned int flow_type)
264{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100265 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100266 u32 reg, regaddr;
267 int levelsense, sense, bothedge;
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200268 unsigned long flags;
Maxime Bizone7300d02009-08-18 13:23:37 +0100269
270 flow_type &= IRQ_TYPE_SENSE_MASK;
271
272 if (flow_type == IRQ_TYPE_NONE)
273 flow_type = IRQ_TYPE_LEVEL_LOW;
274
Maxime Bizon62248922011-11-04 19:09:34 +0100275 levelsense = sense = bothedge = 0;
Maxime Bizone7300d02009-08-18 13:23:37 +0100276 switch (flow_type) {
277 case IRQ_TYPE_EDGE_BOTH:
Maxime Bizon62248922011-11-04 19:09:34 +0100278 bothedge = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100279 break;
280
281 case IRQ_TYPE_EDGE_RISING:
Maxime Bizon62248922011-11-04 19:09:34 +0100282 sense = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100283 break;
284
285 case IRQ_TYPE_EDGE_FALLING:
Maxime Bizone7300d02009-08-18 13:23:37 +0100286 break;
287
288 case IRQ_TYPE_LEVEL_HIGH:
Maxime Bizon62248922011-11-04 19:09:34 +0100289 levelsense = 1;
290 sense = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100291 break;
292
293 case IRQ_TYPE_LEVEL_LOW:
Maxime Bizon62248922011-11-04 19:09:34 +0100294 levelsense = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100295 break;
296
297 default:
298 printk(KERN_ERR "bogus flow type combination given !\n");
299 return -EINVAL;
300 }
Maxime Bizon62248922011-11-04 19:09:34 +0100301
302 regaddr = get_ext_irq_perf_reg(irq);
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200303 spin_lock_irqsave(&epic_lock, flags);
Maxime Bizon62248922011-11-04 19:09:34 +0100304 reg = bcm_perf_readl(regaddr);
305 irq %= 4;
306
Maxime Bizon58e380a2012-07-13 07:46:05 +0000307 switch (bcm63xx_get_cpu_id()) {
308 case BCM6348_CPU_ID:
Maxime Bizon62248922011-11-04 19:09:34 +0100309 if (levelsense)
310 reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
311 else
312 reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
313 if (sense)
314 reg |= EXTIRQ_CFG_SENSE_6348(irq);
315 else
316 reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
317 if (bothedge)
318 reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
319 else
320 reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
Maxime Bizon58e380a2012-07-13 07:46:05 +0000321 break;
Maxime Bizon62248922011-11-04 19:09:34 +0100322
Florian Fainelli7b933422013-06-18 16:55:40 +0000323 case BCM3368_CPU_ID:
Maxime Bizon58e380a2012-07-13 07:46:05 +0000324 case BCM6328_CPU_ID:
325 case BCM6338_CPU_ID:
326 case BCM6345_CPU_ID:
327 case BCM6358_CPU_ID:
Jonas Gorski2c8aaf72013-03-21 14:03:17 +0000328 case BCM6362_CPU_ID:
Maxime Bizon58e380a2012-07-13 07:46:05 +0000329 case BCM6368_CPU_ID:
Maxime Bizon62248922011-11-04 19:09:34 +0100330 if (levelsense)
331 reg |= EXTIRQ_CFG_LEVELSENSE(irq);
332 else
333 reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
334 if (sense)
335 reg |= EXTIRQ_CFG_SENSE(irq);
336 else
337 reg &= ~EXTIRQ_CFG_SENSE(irq);
338 if (bothedge)
339 reg |= EXTIRQ_CFG_BOTHEDGE(irq);
340 else
341 reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
Maxime Bizon58e380a2012-07-13 07:46:05 +0000342 break;
343 default:
344 BUG();
Maxime Bizon62248922011-11-04 19:09:34 +0100345 }
346
347 bcm_perf_writel(reg, regaddr);
Jonas Gorski74b8ca32014-07-12 12:49:39 +0200348 spin_unlock_irqrestore(&epic_lock, flags);
Maxime Bizone7300d02009-08-18 13:23:37 +0100349
Thomas Gleixner93f29362011-03-23 21:08:47 +0000350 irqd_set_trigger_type(d, flow_type);
351 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
352 __irq_set_handler_locked(d->irq, handle_level_irq);
353 else
354 __irq_set_handler_locked(d->irq, handle_edge_irq);
Maxime Bizone7300d02009-08-18 13:23:37 +0100355
Thomas Gleixner93f29362011-03-23 21:08:47 +0000356 return IRQ_SET_MASK_OK_NOCOPY;
Maxime Bizone7300d02009-08-18 13:23:37 +0100357}
358
359static struct irq_chip bcm63xx_internal_irq_chip = {
360 .name = "bcm63xx_ipic",
Thomas Gleixner93f29362011-03-23 21:08:47 +0000361 .irq_mask = bcm63xx_internal_irq_mask,
362 .irq_unmask = bcm63xx_internal_irq_unmask,
Maxime Bizone7300d02009-08-18 13:23:37 +0100363};
364
365static struct irq_chip bcm63xx_external_irq_chip = {
366 .name = "bcm63xx_epic",
Thomas Gleixner93f29362011-03-23 21:08:47 +0000367 .irq_ack = bcm63xx_external_irq_clear,
Maxime Bizone7300d02009-08-18 13:23:37 +0100368
Thomas Gleixner93f29362011-03-23 21:08:47 +0000369 .irq_mask = bcm63xx_external_irq_mask,
370 .irq_unmask = bcm63xx_external_irq_unmask,
Maxime Bizone7300d02009-08-18 13:23:37 +0100371
Thomas Gleixner93f29362011-03-23 21:08:47 +0000372 .irq_set_type = bcm63xx_external_irq_set_type,
Maxime Bizone7300d02009-08-18 13:23:37 +0100373};
374
375static struct irqaction cpu_ip2_cascade_action = {
376 .handler = no_action,
377 .name = "cascade_ip2",
Wu Zhangjin5a4a4ad2011-07-23 12:41:24 +0000378 .flags = IRQF_NO_THREAD,
Maxime Bizone7300d02009-08-18 13:23:37 +0100379};
380
Jonas Gorski56d53ea2014-07-12 12:49:40 +0200381#ifdef CONFIG_SMP
382static struct irqaction cpu_ip3_cascade_action = {
383 .handler = no_action,
384 .name = "cascade_ip3",
385 .flags = IRQF_NO_THREAD,
386};
387#endif
388
Maxime Bizon37c42a72011-11-04 19:09:32 +0100389static struct irqaction cpu_ext_cascade_action = {
390 .handler = no_action,
391 .name = "cascade_extirq",
392 .flags = IRQF_NO_THREAD,
393};
394
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200395static void bcm63xx_init_irq(void)
396{
397 int irq_bits;
398
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200399 irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
400 irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200401 irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
402 irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200403
404 switch (bcm63xx_get_cpu_id()) {
405 case BCM3368_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200406 irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
407 irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200408 irq_stat_addr[1] = 0;
409 irq_stat_addr[1] = 0;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200410 irq_bits = 32;
411 ext_irq_count = 4;
412 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
413 break;
414 case BCM6328_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200415 irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
416 irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200417 irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
418 irq_stat_addr[1] += PERF_IRQMASK_6328_REG(1);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200419 irq_bits = 64;
420 ext_irq_count = 4;
421 is_ext_irq_cascaded = 1;
422 ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
423 ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
424 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
425 break;
426 case BCM6338_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200427 irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
428 irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200429 irq_stat_addr[1] = 0;
430 irq_mask_addr[1] = 0;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200431 irq_bits = 32;
432 ext_irq_count = 4;
433 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
434 break;
435 case BCM6345_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200436 irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
437 irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200438 irq_stat_addr[1] = 0;
439 irq_mask_addr[1] = 0;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200440 irq_bits = 32;
441 ext_irq_count = 4;
442 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
443 break;
444 case BCM6348_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200445 irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
446 irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200447 irq_stat_addr[1] = 0;
448 irq_mask_addr[1] = 0;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200449 irq_bits = 32;
450 ext_irq_count = 4;
451 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
452 break;
453 case BCM6358_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200454 irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
455 irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200456 irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
457 irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200458 irq_bits = 32;
459 ext_irq_count = 4;
460 is_ext_irq_cascaded = 1;
461 ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
462 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
463 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
464 break;
465 case BCM6362_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200466 irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
467 irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200468 irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
469 irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200470 irq_bits = 64;
471 ext_irq_count = 4;
472 is_ext_irq_cascaded = 1;
473 ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
474 ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
475 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
476 break;
477 case BCM6368_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200478 irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
479 irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200480 irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
481 irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200482 irq_bits = 64;
483 ext_irq_count = 6;
484 is_ext_irq_cascaded = 1;
485 ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
486 ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
487 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
488 ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
489 break;
490 default:
491 BUG();
492 }
493
494 if (irq_bits == 32) {
495 dispatch_internal = __dispatch_internal_32;
496 internal_irq_mask = __internal_irq_mask_32;
497 internal_irq_unmask = __internal_irq_unmask_32;
498 } else {
499 dispatch_internal = __dispatch_internal_64;
500 internal_irq_mask = __internal_irq_mask_64;
501 internal_irq_unmask = __internal_irq_unmask_64;
502 }
503}
504
Maxime Bizone7300d02009-08-18 13:23:37 +0100505void __init arch_init_irq(void)
506{
507 int i;
508
Maxime Bizonf61cced2011-11-04 19:09:31 +0100509 bcm63xx_init_irq();
Maxime Bizone7300d02009-08-18 13:23:37 +0100510 mips_cpu_irq_init();
511 for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200512 irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
Maxime Bizone7300d02009-08-18 13:23:37 +0100513 handle_level_irq);
514
Maxime Bizon62248922011-11-04 19:09:34 +0100515 for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200516 irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
Maxime Bizone7300d02009-08-18 13:23:37 +0100517 handle_edge_irq);
518
Maxime Bizon37c42a72011-11-04 19:09:32 +0100519 if (!is_ext_irq_cascaded) {
Maxime Bizon62248922011-11-04 19:09:34 +0100520 for (i = 3; i < 3 + ext_irq_count; ++i)
Maxime Bizon37c42a72011-11-04 19:09:32 +0100521 setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
522 }
523
524 setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
Jonas Gorski56d53ea2014-07-12 12:49:40 +0200525#ifdef CONFIG_SMP
526 if (is_ext_irq_cascaded)
527 setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action);
528#endif
Maxime Bizone7300d02009-08-18 13:23:37 +0100529}