blob: 91d1765561b5864b7e358bd5db03e87a15592b39 [file] [log] [blame]
Maxime Bizone7300d02009-08-18 13:23:37 +01001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
David Howellsca4d3e672010-10-07 14:08:54 +010014#include <linux/irq.h>
Maxime Bizone7300d02009-08-18 13:23:37 +010015#include <asm/irq_cpu.h>
16#include <asm/mipsregs.h>
17#include <bcm63xx_cpu.h>
18#include <bcm63xx_regs.h>
19#include <bcm63xx_io.h>
20#include <bcm63xx_irq.h>
21
Jonas Gorskicc81d7f2014-07-12 12:49:36 +020022static u32 irq_stat_addr[2];
23static u32 irq_mask_addr[2];
Maxime Bizonf61cced2011-11-04 19:09:31 +010024static void (*dispatch_internal)(void);
Maxime Bizon37c42a72011-11-04 19:09:32 +010025static int is_ext_irq_cascaded;
Maxime Bizon62248922011-11-04 19:09:34 +010026static unsigned int ext_irq_count;
Maxime Bizon37c42a72011-11-04 19:09:32 +010027static unsigned int ext_irq_start, ext_irq_end;
Maxime Bizon62248922011-11-04 19:09:34 +010028static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
Maxime Bizon71a43922011-11-04 19:09:33 +010029static void (*internal_irq_mask)(unsigned int irq);
30static void (*internal_irq_unmask)(unsigned int irq);
Maxime Bizonf61cced2011-11-04 19:09:31 +010031
Maxime Bizonf61cced2011-11-04 19:09:31 +010032
Maxime Bizon62248922011-11-04 19:09:34 +010033static inline u32 get_ext_irq_perf_reg(int irq)
34{
35 if (irq < 4)
36 return ext_irq_cfg_reg1;
37 return ext_irq_cfg_reg2;
38}
39
Maxime Bizonf61cced2011-11-04 19:09:31 +010040static inline void handle_internal(int intbit)
41{
Maxime Bizon37c42a72011-11-04 19:09:32 +010042 if (is_ext_irq_cascaded &&
43 intbit >= ext_irq_start && intbit <= ext_irq_end)
44 do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
45 else
46 do_IRQ(intbit + IRQ_INTERNAL_BASE);
Maxime Bizonf61cced2011-11-04 19:09:31 +010047}
48
Maxime Bizone7300d02009-08-18 13:23:37 +010049/*
50 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
51 * prioritize any interrupt relatively to another. the static counter
52 * will resume the loop where it ended the last time we left this
53 * function.
54 */
Maxime Bizone7300d02009-08-18 13:23:37 +010055
Jonas Gorski86ee4332014-07-12 12:49:35 +020056#define BUILD_IPIC_INTERNAL(width) \
57void __dispatch_internal_##width(void) \
58{ \
59 u32 pending[width / 32]; \
60 unsigned int src, tgt; \
61 bool irqs_pending = false; \
62 static unsigned int i; \
63 \
64 /* read registers in reverse order */ \
65 for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
66 u32 val; \
67 \
Jonas Gorskicc81d7f2014-07-12 12:49:36 +020068 val = bcm_readl(irq_stat_addr[0] + src * sizeof(u32)); \
69 val &= bcm_readl(irq_mask_addr[0] + src * sizeof(u32)); \
Jonas Gorski86ee4332014-07-12 12:49:35 +020070 pending[--tgt] = val; \
71 \
72 if (val) \
73 irqs_pending = true; \
74 } \
75 \
76 if (!irqs_pending) \
77 return; \
78 \
79 while (1) { \
80 unsigned int to_call = i; \
81 \
82 i = (i + 1) & (width - 1); \
83 if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
84 handle_internal(to_call); \
85 break; \
86 } \
87 } \
88} \
89 \
90static void __internal_irq_mask_##width(unsigned int irq) \
91{ \
92 u32 val; \
93 unsigned reg = (irq / 32) ^ (width/32 - 1); \
94 unsigned bit = irq & 0x1f; \
95 \
Jonas Gorskicc81d7f2014-07-12 12:49:36 +020096 val = bcm_readl(irq_mask_addr[0] + reg * sizeof(u32)); \
Jonas Gorski86ee4332014-07-12 12:49:35 +020097 val &= ~(1 << bit); \
Jonas Gorskicc81d7f2014-07-12 12:49:36 +020098 bcm_writel(val, irq_mask_addr[0] + reg * sizeof(u32)); \
Jonas Gorski86ee4332014-07-12 12:49:35 +020099} \
100 \
101static void __internal_irq_unmask_##width(unsigned int irq) \
102{ \
103 u32 val; \
104 unsigned reg = (irq / 32) ^ (width/32 - 1); \
105 unsigned bit = irq & 0x1f; \
106 \
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200107 val = bcm_readl(irq_mask_addr[0] + reg * sizeof(u32)); \
Jonas Gorski86ee4332014-07-12 12:49:35 +0200108 val |= (1 << bit); \
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200109 bcm_writel(val, irq_mask_addr[0] + reg * sizeof(u32)); \
Maxime Bizone7300d02009-08-18 13:23:37 +0100110}
111
Jonas Gorski86ee4332014-07-12 12:49:35 +0200112BUILD_IPIC_INTERNAL(32);
113BUILD_IPIC_INTERNAL(64);
Maxime Bizon71a43922011-11-04 19:09:33 +0100114
Maxime Bizone7300d02009-08-18 13:23:37 +0100115asmlinkage void plat_irq_dispatch(void)
116{
117 u32 cause;
118
119 do {
120 cause = read_c0_cause() & read_c0_status() & ST0_IM;
121
122 if (!cause)
123 break;
124
125 if (cause & CAUSEF_IP7)
126 do_IRQ(7);
Kevin Cernekee937ad102013-06-03 14:39:34 +0000127 if (cause & CAUSEF_IP0)
128 do_IRQ(0);
129 if (cause & CAUSEF_IP1)
130 do_IRQ(1);
Maxime Bizone7300d02009-08-18 13:23:37 +0100131 if (cause & CAUSEF_IP2)
Maxime Bizonf61cced2011-11-04 19:09:31 +0100132 dispatch_internal();
Maxime Bizon37c42a72011-11-04 19:09:32 +0100133 if (!is_ext_irq_cascaded) {
134 if (cause & CAUSEF_IP3)
135 do_IRQ(IRQ_EXT_0);
136 if (cause & CAUSEF_IP4)
137 do_IRQ(IRQ_EXT_1);
138 if (cause & CAUSEF_IP5)
139 do_IRQ(IRQ_EXT_2);
140 if (cause & CAUSEF_IP6)
141 do_IRQ(IRQ_EXT_3);
142 }
Maxime Bizone7300d02009-08-18 13:23:37 +0100143 } while (1);
144}
145
146/*
147 * internal IRQs operations: only mask/unmask on PERF irq mask
148 * register.
149 */
Maxime Bizon37c42a72011-11-04 19:09:32 +0100150static void bcm63xx_internal_irq_mask(struct irq_data *d)
151{
152 internal_irq_mask(d->irq - IRQ_INTERNAL_BASE);
153}
154
155static void bcm63xx_internal_irq_unmask(struct irq_data *d)
156{
157 internal_irq_unmask(d->irq - IRQ_INTERNAL_BASE);
158}
159
Maxime Bizone7300d02009-08-18 13:23:37 +0100160/*
161 * external IRQs operations: mask/unmask and clear on PERF external
162 * irq control register.
163 */
Thomas Gleixner93f29362011-03-23 21:08:47 +0000164static void bcm63xx_external_irq_mask(struct irq_data *d)
Maxime Bizone7300d02009-08-18 13:23:37 +0100165{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100166 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100167 u32 reg, regaddr;
Maxime Bizone7300d02009-08-18 13:23:37 +0100168
Maxime Bizon62248922011-11-04 19:09:34 +0100169 regaddr = get_ext_irq_perf_reg(irq);
170 reg = bcm_perf_readl(regaddr);
171
172 if (BCMCPU_IS_6348())
173 reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
174 else
175 reg &= ~EXTIRQ_CFG_MASK(irq % 4);
176
177 bcm_perf_writel(reg, regaddr);
Maxime Bizon37c42a72011-11-04 19:09:32 +0100178 if (is_ext_irq_cascaded)
179 internal_irq_mask(irq + ext_irq_start);
Maxime Bizone7300d02009-08-18 13:23:37 +0100180}
181
Thomas Gleixner93f29362011-03-23 21:08:47 +0000182static void bcm63xx_external_irq_unmask(struct irq_data *d)
Maxime Bizone7300d02009-08-18 13:23:37 +0100183{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100184 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100185 u32 reg, regaddr;
Maxime Bizone7300d02009-08-18 13:23:37 +0100186
Maxime Bizon62248922011-11-04 19:09:34 +0100187 regaddr = get_ext_irq_perf_reg(irq);
188 reg = bcm_perf_readl(regaddr);
189
190 if (BCMCPU_IS_6348())
191 reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
192 else
193 reg |= EXTIRQ_CFG_MASK(irq % 4);
194
195 bcm_perf_writel(reg, regaddr);
196
Maxime Bizon37c42a72011-11-04 19:09:32 +0100197 if (is_ext_irq_cascaded)
198 internal_irq_unmask(irq + ext_irq_start);
Maxime Bizone7300d02009-08-18 13:23:37 +0100199}
200
Thomas Gleixner93f29362011-03-23 21:08:47 +0000201static void bcm63xx_external_irq_clear(struct irq_data *d)
Maxime Bizone7300d02009-08-18 13:23:37 +0100202{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100203 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100204 u32 reg, regaddr;
Maxime Bizone7300d02009-08-18 13:23:37 +0100205
Maxime Bizon62248922011-11-04 19:09:34 +0100206 regaddr = get_ext_irq_perf_reg(irq);
207 reg = bcm_perf_readl(regaddr);
208
209 if (BCMCPU_IS_6348())
210 reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
211 else
212 reg |= EXTIRQ_CFG_CLEAR(irq % 4);
213
214 bcm_perf_writel(reg, regaddr);
Maxime Bizone7300d02009-08-18 13:23:37 +0100215}
216
Thomas Gleixner93f29362011-03-23 21:08:47 +0000217static int bcm63xx_external_irq_set_type(struct irq_data *d,
Maxime Bizone7300d02009-08-18 13:23:37 +0100218 unsigned int flow_type)
219{
Maxime Bizon37c42a72011-11-04 19:09:32 +0100220 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
Maxime Bizon62248922011-11-04 19:09:34 +0100221 u32 reg, regaddr;
222 int levelsense, sense, bothedge;
Maxime Bizone7300d02009-08-18 13:23:37 +0100223
224 flow_type &= IRQ_TYPE_SENSE_MASK;
225
226 if (flow_type == IRQ_TYPE_NONE)
227 flow_type = IRQ_TYPE_LEVEL_LOW;
228
Maxime Bizon62248922011-11-04 19:09:34 +0100229 levelsense = sense = bothedge = 0;
Maxime Bizone7300d02009-08-18 13:23:37 +0100230 switch (flow_type) {
231 case IRQ_TYPE_EDGE_BOTH:
Maxime Bizon62248922011-11-04 19:09:34 +0100232 bothedge = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100233 break;
234
235 case IRQ_TYPE_EDGE_RISING:
Maxime Bizon62248922011-11-04 19:09:34 +0100236 sense = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100237 break;
238
239 case IRQ_TYPE_EDGE_FALLING:
Maxime Bizone7300d02009-08-18 13:23:37 +0100240 break;
241
242 case IRQ_TYPE_LEVEL_HIGH:
Maxime Bizon62248922011-11-04 19:09:34 +0100243 levelsense = 1;
244 sense = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100245 break;
246
247 case IRQ_TYPE_LEVEL_LOW:
Maxime Bizon62248922011-11-04 19:09:34 +0100248 levelsense = 1;
Maxime Bizone7300d02009-08-18 13:23:37 +0100249 break;
250
251 default:
252 printk(KERN_ERR "bogus flow type combination given !\n");
253 return -EINVAL;
254 }
Maxime Bizon62248922011-11-04 19:09:34 +0100255
256 regaddr = get_ext_irq_perf_reg(irq);
257 reg = bcm_perf_readl(regaddr);
258 irq %= 4;
259
Maxime Bizon58e380a2012-07-13 07:46:05 +0000260 switch (bcm63xx_get_cpu_id()) {
261 case BCM6348_CPU_ID:
Maxime Bizon62248922011-11-04 19:09:34 +0100262 if (levelsense)
263 reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
264 else
265 reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
266 if (sense)
267 reg |= EXTIRQ_CFG_SENSE_6348(irq);
268 else
269 reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
270 if (bothedge)
271 reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
272 else
273 reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
Maxime Bizon58e380a2012-07-13 07:46:05 +0000274 break;
Maxime Bizon62248922011-11-04 19:09:34 +0100275
Florian Fainelli7b933422013-06-18 16:55:40 +0000276 case BCM3368_CPU_ID:
Maxime Bizon58e380a2012-07-13 07:46:05 +0000277 case BCM6328_CPU_ID:
278 case BCM6338_CPU_ID:
279 case BCM6345_CPU_ID:
280 case BCM6358_CPU_ID:
Jonas Gorski2c8aaf72013-03-21 14:03:17 +0000281 case BCM6362_CPU_ID:
Maxime Bizon58e380a2012-07-13 07:46:05 +0000282 case BCM6368_CPU_ID:
Maxime Bizon62248922011-11-04 19:09:34 +0100283 if (levelsense)
284 reg |= EXTIRQ_CFG_LEVELSENSE(irq);
285 else
286 reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
287 if (sense)
288 reg |= EXTIRQ_CFG_SENSE(irq);
289 else
290 reg &= ~EXTIRQ_CFG_SENSE(irq);
291 if (bothedge)
292 reg |= EXTIRQ_CFG_BOTHEDGE(irq);
293 else
294 reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
Maxime Bizon58e380a2012-07-13 07:46:05 +0000295 break;
296 default:
297 BUG();
Maxime Bizon62248922011-11-04 19:09:34 +0100298 }
299
300 bcm_perf_writel(reg, regaddr);
Maxime Bizone7300d02009-08-18 13:23:37 +0100301
Thomas Gleixner93f29362011-03-23 21:08:47 +0000302 irqd_set_trigger_type(d, flow_type);
303 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
304 __irq_set_handler_locked(d->irq, handle_level_irq);
305 else
306 __irq_set_handler_locked(d->irq, handle_edge_irq);
Maxime Bizone7300d02009-08-18 13:23:37 +0100307
Thomas Gleixner93f29362011-03-23 21:08:47 +0000308 return IRQ_SET_MASK_OK_NOCOPY;
Maxime Bizone7300d02009-08-18 13:23:37 +0100309}
310
311static struct irq_chip bcm63xx_internal_irq_chip = {
312 .name = "bcm63xx_ipic",
Thomas Gleixner93f29362011-03-23 21:08:47 +0000313 .irq_mask = bcm63xx_internal_irq_mask,
314 .irq_unmask = bcm63xx_internal_irq_unmask,
Maxime Bizone7300d02009-08-18 13:23:37 +0100315};
316
317static struct irq_chip bcm63xx_external_irq_chip = {
318 .name = "bcm63xx_epic",
Thomas Gleixner93f29362011-03-23 21:08:47 +0000319 .irq_ack = bcm63xx_external_irq_clear,
Maxime Bizone7300d02009-08-18 13:23:37 +0100320
Thomas Gleixner93f29362011-03-23 21:08:47 +0000321 .irq_mask = bcm63xx_external_irq_mask,
322 .irq_unmask = bcm63xx_external_irq_unmask,
Maxime Bizone7300d02009-08-18 13:23:37 +0100323
Thomas Gleixner93f29362011-03-23 21:08:47 +0000324 .irq_set_type = bcm63xx_external_irq_set_type,
Maxime Bizone7300d02009-08-18 13:23:37 +0100325};
326
327static struct irqaction cpu_ip2_cascade_action = {
328 .handler = no_action,
329 .name = "cascade_ip2",
Wu Zhangjin5a4a4ad2011-07-23 12:41:24 +0000330 .flags = IRQF_NO_THREAD,
Maxime Bizone7300d02009-08-18 13:23:37 +0100331};
332
Maxime Bizon37c42a72011-11-04 19:09:32 +0100333static struct irqaction cpu_ext_cascade_action = {
334 .handler = no_action,
335 .name = "cascade_extirq",
336 .flags = IRQF_NO_THREAD,
337};
338
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200339static void bcm63xx_init_irq(void)
340{
341 int irq_bits;
342
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200343 irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
344 irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200345
346 switch (bcm63xx_get_cpu_id()) {
347 case BCM3368_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200348 irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
349 irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200350 irq_bits = 32;
351 ext_irq_count = 4;
352 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
353 break;
354 case BCM6328_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200355 irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
356 irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200357 irq_bits = 64;
358 ext_irq_count = 4;
359 is_ext_irq_cascaded = 1;
360 ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
361 ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
362 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
363 break;
364 case BCM6338_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200365 irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
366 irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200367 irq_bits = 32;
368 ext_irq_count = 4;
369 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
370 break;
371 case BCM6345_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200372 irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
373 irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200374 irq_bits = 32;
375 ext_irq_count = 4;
376 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
377 break;
378 case BCM6348_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200379 irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
380 irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200381 irq_bits = 32;
382 ext_irq_count = 4;
383 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
384 break;
385 case BCM6358_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200386 irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
387 irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200388 irq_bits = 32;
389 ext_irq_count = 4;
390 is_ext_irq_cascaded = 1;
391 ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
392 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
393 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
394 break;
395 case BCM6362_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200396 irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
397 irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200398 irq_bits = 64;
399 ext_irq_count = 4;
400 is_ext_irq_cascaded = 1;
401 ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
402 ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
403 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
404 break;
405 case BCM6368_CPU_ID:
Jonas Gorskicc81d7f2014-07-12 12:49:36 +0200406 irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
407 irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
Jonas Gorskia6dfde82014-07-12 12:49:34 +0200408 irq_bits = 64;
409 ext_irq_count = 6;
410 is_ext_irq_cascaded = 1;
411 ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
412 ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
413 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
414 ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
415 break;
416 default:
417 BUG();
418 }
419
420 if (irq_bits == 32) {
421 dispatch_internal = __dispatch_internal_32;
422 internal_irq_mask = __internal_irq_mask_32;
423 internal_irq_unmask = __internal_irq_unmask_32;
424 } else {
425 dispatch_internal = __dispatch_internal_64;
426 internal_irq_mask = __internal_irq_mask_64;
427 internal_irq_unmask = __internal_irq_unmask_64;
428 }
429}
430
Maxime Bizone7300d02009-08-18 13:23:37 +0100431void __init arch_init_irq(void)
432{
433 int i;
434
Maxime Bizonf61cced2011-11-04 19:09:31 +0100435 bcm63xx_init_irq();
Maxime Bizone7300d02009-08-18 13:23:37 +0100436 mips_cpu_irq_init();
437 for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200438 irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
Maxime Bizone7300d02009-08-18 13:23:37 +0100439 handle_level_irq);
440
Maxime Bizon62248922011-11-04 19:09:34 +0100441 for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200442 irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
Maxime Bizone7300d02009-08-18 13:23:37 +0100443 handle_edge_irq);
444
Maxime Bizon37c42a72011-11-04 19:09:32 +0100445 if (!is_ext_irq_cascaded) {
Maxime Bizon62248922011-11-04 19:09:34 +0100446 for (i = 3; i < 3 + ext_irq_count; ++i)
Maxime Bizon37c42a72011-11-04 19:09:32 +0100447 setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
448 }
449
450 setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
Maxime Bizone7300d02009-08-18 13:23:37 +0100451}