blob: 53be291c3d944be9ef2ace2618bc85643d344b8e [file] [log] [blame]
Maxime Bizone7300d02009-08-18 13:23:37 +01001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
David Howellsca4d3e672010-10-07 14:08:54 +010014#include <linux/irq.h>
Maxime Bizone7300d02009-08-18 13:23:37 +010015#include <asm/irq_cpu.h>
16#include <asm/mipsregs.h>
17#include <bcm63xx_cpu.h>
18#include <bcm63xx_regs.h>
19#include <bcm63xx_io.h>
20#include <bcm63xx_irq.h>
21
Jonas Gorski7a9fd142014-07-12 12:49:38 +020022
Jonas Gorskicc81d7f2014-07-12 12:49:36 +020023static u32 irq_stat_addr[2];
24static u32 irq_mask_addr[2];
Jonas Gorski7a9fd142014-07-12 12:49:38 +020025static void (*dispatch_internal)(int cpu);
Maxime Bizon37c42a72011-11-04 19:09:32 +010026static int is_ext_irq_cascaded;
Maxime Bizon62248922011-11-04 19:09:34 +010027static unsigned int ext_irq_count;
Maxime Bizon37c42a72011-11-04 19:09:32 +010028static unsigned int ext_irq_start, ext_irq_end;
Maxime Bizon62248922011-11-04 19:09:34 +010029static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
Maxime Bizon71a43922011-11-04 19:09:33 +010030static void (*internal_irq_mask)(unsigned int irq);
31static void (*internal_irq_unmask)(unsigned int irq);
Maxime Bizonf61cced2011-11-04 19:09:31 +010032
Maxime Bizonf61cced2011-11-04 19:09:31 +010033
Maxime Bizon62248922011-11-04 19:09:34 +010034static inline u32 get_ext_irq_perf_reg(int irq)
35{
36 if (irq < 4)
37 return ext_irq_cfg_reg1;
38 return ext_irq_cfg_reg2;
39}
40
Maxime Bizonf61cced2011-11-04 19:09:31 +010041static inline void handle_internal(int intbit)
42{
Maxime Bizon37c42a72011-11-04 19:09:32 +010043 if (is_ext_irq_cascaded &&
44 intbit >= ext_irq_start && intbit <= ext_irq_end)
45 do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
46 else
47 do_IRQ(intbit + IRQ_INTERNAL_BASE);
Maxime Bizonf61cced2011-11-04 19:09:31 +010048}
49
Maxime Bizone7300d02009-08-18 13:23:37 +010050/*
51 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
52 * prioritize any interrupt relatively to another. the static counter
53 * will resume the loop where it ended the last time we left this
54 * function.
55 */
Maxime Bizone7300d02009-08-18 13:23:37 +010056
Jonas Gorski86ee4332014-07-12 12:49:35 +020057#define BUILD_IPIC_INTERNAL(width) \
Jonas Gorski7a9fd142014-07-12 12:49:38 +020058void __dispatch_internal_##width(int cpu) \
Jonas Gorski86ee4332014-07-12 12:49:35 +020059{ \
60 u32 pending[width / 32]; \
61 unsigned int src, tgt; \
62 bool irqs_pending = false; \
Jonas Gorski7a9fd142014-07-12 12:49:38 +020063 static unsigned int i[2]; \
64 unsigned int *next = &i[cpu]; \
Jonas Gorski86ee4332014-07-12 12:49:35 +020065 \
66 /* read registers in reverse order */ \
67 for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
68 u32 val; \
69 \
Jonas Gorski7a9fd142014-07-12 12:49:38 +020070 val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
71 val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
Jonas Gorski86ee4332014-07-12 12:49:35 +020072 pending[--tgt] = val; \
73 \
74 if (val) \
75 irqs_pending = true; \
76 } \
77 \
78 if (!irqs_pending) \
79 return; \
80 \
81 while (1) { \
Jonas Gorski7a9fd142014-07-12 12:49:38 +020082 unsigned int to_call = *next; \
Jonas Gorski86ee4332014-07-12 12:49:35 +020083 \
Jonas Gorski7a9fd142014-07-12 12:49:38 +020084 *next = (*next + 1) & (width - 1); \
Jonas Gorski86ee4332014-07-12 12:49:35 +020085 if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
86 handle_internal(to_call); \
87 break; \
88 } \
89 } \
90} \
91 \
92static void __internal_irq_mask_##width(unsigned int irq) \
93{ \
94 u32 val; \
95 unsigned reg = (irq / 32) ^ (width/32 - 1); \
96 unsigned bit = irq & 0x1f; \
97 \
Jonas Gorskicc81d7f2014-07-12 12:49:36 +020098 val = bcm_readl(irq_mask_addr[0] + reg * sizeof(u32)); \
Jonas Gorski86ee4332014-07-12 12:49:35 +020099 val &= ~(1 << bit); \
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200100 bcm_writel(val, irq_mask_addr[0] + reg * sizeof(u32)); \
Jonas Gorski86ee4332014-07-12 12:49:35 +0200101} \
102 \
103static void __internal_irq_unmask_##width(unsigned int irq) \
104{ \
105 u32 val; \
106 unsigned reg = (irq / 32) ^ (width/32 - 1); \
107 unsigned bit = irq & 0x1f; \
108 \
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200109 val = bcm_readl(irq_mask_addr[0] + reg * sizeof(u32)); \
Jonas Gorski86ee4332014-07-12 12:49:35 +0200110 val |= (1 << bit); \
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200111 bcm_writel(val, irq_mask_addr[0] + reg * sizeof(u32)); \
Maxime Bizone7300d02009-08-18 13:23:37 +0100112}
113
Jonas Gorski86ee4332014-07-12 12:49:35 +0200114BUILD_IPIC_INTERNAL(32);
115BUILD_IPIC_INTERNAL(64);
Maxime Bizon71a43922011-11-04 19:09:33 +0100116
Maxime Bizone7300d02009-08-18 13:23:37 +0100117asmlinkage void plat_irq_dispatch(void)
118{
119 u32 cause;
120
121 do {
122 cause = read_c0_cause() & read_c0_status() & ST0_IM;
123
124 if (!cause)
125 break;
126
127 if (cause & CAUSEF_IP7)
128 do_IRQ(7);
Kevin Cernekee937ad102013-06-03 14:39:34 +0000129 if (cause & CAUSEF_IP0)
130 do_IRQ(0);
131 if (cause & CAUSEF_IP1)
132 do_IRQ(1);
Maxime Bizone7300d02009-08-18 13:23:37 +0100133 if (cause & CAUSEF_IP2)
Jonas Gorski7a9fd142014-07-12 12:49:38 +0200134 dispatch_internal(0);
Maxime Bizon37c42a72011-11-04 19:09:32 +0100135 if (!is_ext_irq_cascaded) {
136 if (cause & CAUSEF_IP3)
137 do_IRQ(IRQ_EXT_0);
138 if (cause & CAUSEF_IP4)
139 do_IRQ(IRQ_EXT_1);
140 if (cause & CAUSEF_IP5)
141 do_IRQ(IRQ_EXT_2);
142 if (cause & CAUSEF_IP6)
143 do_IRQ(IRQ_EXT_3);
144 }
Maxime Bizone7300d02009-08-18 13:23:37 +0100145 } while (1);
146}
147
148/*
149 * internal IRQs operations: only mask/unmask on PERF irq mask
150 * register.
151 */
Maxime Bizon37c42a72011-11-04 19:09:32 +0100152static void bcm63xx_internal_irq_mask(struct irq_data *d)
153{
154 internal_irq_mask(d->irq - IRQ_INTERNAL_BASE);
155}
156
157static void bcm63xx_internal_irq_unmask(struct irq_data *d)
158{
159 internal_irq_unmask(d->irq - IRQ_INTERNAL_BASE);
160}
161
Maxime Bizone7300d02009-08-18 13:23:37 +0100162/*
163 * external IRQs operations: mask/unmask and clear on PERF external
164 * irq control register.
165 */
Thomas Gleixner93f29362011-03-23 21:08:47 +0000166static void bcm63xx_external_irq_mask(struct irq_data *d)
Maxime Bizone7300d02009-08-18 13:23:37 +0100167{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100168 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100169 u32 reg, regaddr;
Maxime Bizone7300d02009-08-18 13:23:37 +0100170
Maxime Bizon62248922011-11-04 19:09:34 +0100171 regaddr = get_ext_irq_perf_reg(irq);
172 reg = bcm_perf_readl(regaddr);
173
174 if (BCMCPU_IS_6348())
175 reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
176 else
177 reg &= ~EXTIRQ_CFG_MASK(irq % 4);
178
179 bcm_perf_writel(reg, regaddr);
Maxime Bizon37c42a72011-11-04 19:09:32 +0100180 if (is_ext_irq_cascaded)
181 internal_irq_mask(irq + ext_irq_start);
Maxime Bizone7300d02009-08-18 13:23:37 +0100182}
183
Thomas Gleixner93f29362011-03-23 21:08:47 +0000184static void bcm63xx_external_irq_unmask(struct irq_data *d)
Maxime Bizone7300d02009-08-18 13:23:37 +0100185{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100186 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100187 u32 reg, regaddr;
Maxime Bizone7300d02009-08-18 13:23:37 +0100188
Maxime Bizon62248922011-11-04 19:09:34 +0100189 regaddr = get_ext_irq_perf_reg(irq);
190 reg = bcm_perf_readl(regaddr);
191
192 if (BCMCPU_IS_6348())
193 reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
194 else
195 reg |= EXTIRQ_CFG_MASK(irq % 4);
196
197 bcm_perf_writel(reg, regaddr);
198
Maxime Bizon37c42a72011-11-04 19:09:32 +0100199 if (is_ext_irq_cascaded)
200 internal_irq_unmask(irq + ext_irq_start);
Maxime Bizone7300d02009-08-18 13:23:37 +0100201}
202
Thomas Gleixner93f29362011-03-23 21:08:47 +0000203static void bcm63xx_external_irq_clear(struct irq_data *d)
Maxime Bizone7300d02009-08-18 13:23:37 +0100204{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100205 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100206 u32 reg, regaddr;
Maxime Bizone7300d02009-08-18 13:23:37 +0100207
Maxime Bizon62248922011-11-04 19:09:34 +0100208 regaddr = get_ext_irq_perf_reg(irq);
209 reg = bcm_perf_readl(regaddr);
210
211 if (BCMCPU_IS_6348())
212 reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
213 else
214 reg |= EXTIRQ_CFG_CLEAR(irq % 4);
215
216 bcm_perf_writel(reg, regaddr);
Maxime Bizone7300d02009-08-18 13:23:37 +0100217}
218
Thomas Gleixner93f29362011-03-23 21:08:47 +0000219static int bcm63xx_external_irq_set_type(struct irq_data *d,
Maxime Bizone7300d02009-08-18 13:23:37 +0100220 unsigned int flow_type)
221{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100222 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100223 u32 reg, regaddr;
224 int levelsense, sense, bothedge;
Maxime Bizone7300d02009-08-18 13:23:37 +0100225
226 flow_type &= IRQ_TYPE_SENSE_MASK;
227
228 if (flow_type == IRQ_TYPE_NONE)
229 flow_type = IRQ_TYPE_LEVEL_LOW;
230
Maxime Bizon62248922011-11-04 19:09:34 +0100231 levelsense = sense = bothedge = 0;
Maxime Bizone7300d02009-08-18 13:23:37 +0100232 switch (flow_type) {
233 case IRQ_TYPE_EDGE_BOTH:
Maxime Bizon62248922011-11-04 19:09:34 +0100234 bothedge = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100235 break;
236
237 case IRQ_TYPE_EDGE_RISING:
Maxime Bizon62248922011-11-04 19:09:34 +0100238 sense = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100239 break;
240
241 case IRQ_TYPE_EDGE_FALLING:
Maxime Bizone7300d02009-08-18 13:23:37 +0100242 break;
243
244 case IRQ_TYPE_LEVEL_HIGH:
Maxime Bizon62248922011-11-04 19:09:34 +0100245 levelsense = 1;
246 sense = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100247 break;
248
249 case IRQ_TYPE_LEVEL_LOW:
Maxime Bizon62248922011-11-04 19:09:34 +0100250 levelsense = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100251 break;
252
253 default:
254 printk(KERN_ERR "bogus flow type combination given !\n");
255 return -EINVAL;
256 }
Maxime Bizon62248922011-11-04 19:09:34 +0100257
258 regaddr = get_ext_irq_perf_reg(irq);
259 reg = bcm_perf_readl(regaddr);
260 irq %= 4;
261
Maxime Bizon58e380a2012-07-13 07:46:05 +0000262 switch (bcm63xx_get_cpu_id()) {
263 case BCM6348_CPU_ID:
Maxime Bizon62248922011-11-04 19:09:34 +0100264 if (levelsense)
265 reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
266 else
267 reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
268 if (sense)
269 reg |= EXTIRQ_CFG_SENSE_6348(irq);
270 else
271 reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
272 if (bothedge)
273 reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
274 else
275 reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
Maxime Bizon58e380a2012-07-13 07:46:05 +0000276 break;
Maxime Bizon62248922011-11-04 19:09:34 +0100277
Florian Fainelli7b933422013-06-18 16:55:40 +0000278 case BCM3368_CPU_ID:
Maxime Bizon58e380a2012-07-13 07:46:05 +0000279 case BCM6328_CPU_ID:
280 case BCM6338_CPU_ID:
281 case BCM6345_CPU_ID:
282 case BCM6358_CPU_ID:
Jonas Gorski2c8aaf72013-03-21 14:03:17 +0000283 case BCM6362_CPU_ID:
Maxime Bizon58e380a2012-07-13 07:46:05 +0000284 case BCM6368_CPU_ID:
Maxime Bizon62248922011-11-04 19:09:34 +0100285 if (levelsense)
286 reg |= EXTIRQ_CFG_LEVELSENSE(irq);
287 else
288 reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
289 if (sense)
290 reg |= EXTIRQ_CFG_SENSE(irq);
291 else
292 reg &= ~EXTIRQ_CFG_SENSE(irq);
293 if (bothedge)
294 reg |= EXTIRQ_CFG_BOTHEDGE(irq);
295 else
296 reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
Maxime Bizon58e380a2012-07-13 07:46:05 +0000297 break;
298 default:
299 BUG();
Maxime Bizon62248922011-11-04 19:09:34 +0100300 }
301
302 bcm_perf_writel(reg, regaddr);
Maxime Bizone7300d02009-08-18 13:23:37 +0100303
Thomas Gleixner93f29362011-03-23 21:08:47 +0000304 irqd_set_trigger_type(d, flow_type);
305 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
306 __irq_set_handler_locked(d->irq, handle_level_irq);
307 else
308 __irq_set_handler_locked(d->irq, handle_edge_irq);
Maxime Bizone7300d02009-08-18 13:23:37 +0100309
Thomas Gleixner93f29362011-03-23 21:08:47 +0000310 return IRQ_SET_MASK_OK_NOCOPY;
Maxime Bizone7300d02009-08-18 13:23:37 +0100311}
312
313static struct irq_chip bcm63xx_internal_irq_chip = {
314 .name = "bcm63xx_ipic",
Thomas Gleixner93f29362011-03-23 21:08:47 +0000315 .irq_mask = bcm63xx_internal_irq_mask,
316 .irq_unmask = bcm63xx_internal_irq_unmask,
Maxime Bizone7300d02009-08-18 13:23:37 +0100317};
318
319static struct irq_chip bcm63xx_external_irq_chip = {
320 .name = "bcm63xx_epic",
Thomas Gleixner93f29362011-03-23 21:08:47 +0000321 .irq_ack = bcm63xx_external_irq_clear,
Maxime Bizone7300d02009-08-18 13:23:37 +0100322
Thomas Gleixner93f29362011-03-23 21:08:47 +0000323 .irq_mask = bcm63xx_external_irq_mask,
324 .irq_unmask = bcm63xx_external_irq_unmask,
Maxime Bizone7300d02009-08-18 13:23:37 +0100325
Thomas Gleixner93f29362011-03-23 21:08:47 +0000326 .irq_set_type = bcm63xx_external_irq_set_type,
Maxime Bizone7300d02009-08-18 13:23:37 +0100327};
328
329static struct irqaction cpu_ip2_cascade_action = {
330 .handler = no_action,
331 .name = "cascade_ip2",
Wu Zhangjin5a4a4ad2011-07-23 12:41:24 +0000332 .flags = IRQF_NO_THREAD,
Maxime Bizone7300d02009-08-18 13:23:37 +0100333};
334
Maxime Bizon37c42a72011-11-04 19:09:32 +0100335static struct irqaction cpu_ext_cascade_action = {
336 .handler = no_action,
337 .name = "cascade_extirq",
338 .flags = IRQF_NO_THREAD,
339};
340
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200341static void bcm63xx_init_irq(void)
342{
343 int irq_bits;
344
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200345 irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
346 irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200347 irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
348 irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200349
350 switch (bcm63xx_get_cpu_id()) {
351 case BCM3368_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200352 irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
353 irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200354 irq_stat_addr[1] = 0;
355 irq_stat_addr[1] = 0;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200356 irq_bits = 32;
357 ext_irq_count = 4;
358 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
359 break;
360 case BCM6328_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200361 irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
362 irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200363 irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
364 irq_stat_addr[1] += PERF_IRQMASK_6328_REG(1);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200365 irq_bits = 64;
366 ext_irq_count = 4;
367 is_ext_irq_cascaded = 1;
368 ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
369 ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
370 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
371 break;
372 case BCM6338_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200373 irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
374 irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200375 irq_stat_addr[1] = 0;
376 irq_mask_addr[1] = 0;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200377 irq_bits = 32;
378 ext_irq_count = 4;
379 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
380 break;
381 case BCM6345_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200382 irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
383 irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200384 irq_stat_addr[1] = 0;
385 irq_mask_addr[1] = 0;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200386 irq_bits = 32;
387 ext_irq_count = 4;
388 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
389 break;
390 case BCM6348_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200391 irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
392 irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200393 irq_stat_addr[1] = 0;
394 irq_mask_addr[1] = 0;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200395 irq_bits = 32;
396 ext_irq_count = 4;
397 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
398 break;
399 case BCM6358_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200400 irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
401 irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200402 irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
403 irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200404 irq_bits = 32;
405 ext_irq_count = 4;
406 is_ext_irq_cascaded = 1;
407 ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
408 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
409 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
410 break;
411 case BCM6362_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200412 irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
413 irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200414 irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
415 irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200416 irq_bits = 64;
417 ext_irq_count = 4;
418 is_ext_irq_cascaded = 1;
419 ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
420 ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
421 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
422 break;
423 case BCM6368_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200424 irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
425 irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
Jonas Gorski3534b5c2014-07-12 12:49:37 +0200426 irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
427 irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200428 irq_bits = 64;
429 ext_irq_count = 6;
430 is_ext_irq_cascaded = 1;
431 ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
432 ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
433 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
434 ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
435 break;
436 default:
437 BUG();
438 }
439
440 if (irq_bits == 32) {
441 dispatch_internal = __dispatch_internal_32;
442 internal_irq_mask = __internal_irq_mask_32;
443 internal_irq_unmask = __internal_irq_unmask_32;
444 } else {
445 dispatch_internal = __dispatch_internal_64;
446 internal_irq_mask = __internal_irq_mask_64;
447 internal_irq_unmask = __internal_irq_unmask_64;
448 }
449}
450
Maxime Bizone7300d02009-08-18 13:23:37 +0100451void __init arch_init_irq(void)
452{
453 int i;
454
Maxime Bizonf61cced2011-11-04 19:09:31 +0100455 bcm63xx_init_irq();
Maxime Bizone7300d02009-08-18 13:23:37 +0100456 mips_cpu_irq_init();
457 for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200458 irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
Maxime Bizone7300d02009-08-18 13:23:37 +0100459 handle_level_irq);
460
Maxime Bizon62248922011-11-04 19:09:34 +0100461 for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200462 irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
Maxime Bizone7300d02009-08-18 13:23:37 +0100463 handle_edge_irq);
464
Maxime Bizon37c42a72011-11-04 19:09:32 +0100465 if (!is_ext_irq_cascaded) {
Maxime Bizon62248922011-11-04 19:09:34 +0100466 for (i = 3; i < 3 + ext_irq_count; ++i)
Maxime Bizon37c42a72011-11-04 19:09:32 +0100467 setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
468 }
469
470 setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
Maxime Bizone7300d02009-08-18 13:23:37 +0100471}