blob: c1c4e2ae3f85be2804d4987c149a65d5e03ec66c [file] [log] [blame]
Aurelien Jacquiotec500af2011-10-04 11:06:27 -04001/*
2 * Support for C64x+ Megamodule Interrupt Controller
3 *
4 * Copyright (C) 2010, 2011 Texas Instruments Incorporated
5 * Contributed by: Mark Salter <msalter@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/of_irq.h>
16#include <linux/of_address.h>
17#include <linux/slab.h>
18#include <asm/soc.h>
19#include <asm/megamod-pic.h>
20
21#define NR_COMBINERS 4
22#define NR_MUX_OUTPUTS 12
23
24#define IRQ_UNMAPPED 0xffff
25
26/*
27 * Megamodule Interrupt Controller register layout
28 */
29struct megamod_regs {
30 u32 evtflag[8];
31 u32 evtset[8];
32 u32 evtclr[8];
33 u32 reserved0[8];
34 u32 evtmask[8];
35 u32 mevtflag[8];
36 u32 expmask[8];
37 u32 mexpflag[8];
38 u32 intmux_unused;
39 u32 intmux[7];
40 u32 reserved1[8];
41 u32 aegmux[2];
42 u32 reserved2[14];
43 u32 intxstat;
44 u32 intxclr;
45 u32 intdmask;
46 u32 reserved3[13];
47 u32 evtasrt;
48};
49
50struct megamod_pic {
Mark Salter0bd761e2012-01-26 09:26:21 -050051 struct irq_domain *irqhost;
Aurelien Jacquiotec500af2011-10-04 11:06:27 -040052 struct megamod_regs __iomem *regs;
53 raw_spinlock_t lock;
54
55 /* hw mux mapping */
56 unsigned int output_to_irq[NR_MUX_OUTPUTS];
57};
58
59static struct megamod_pic *mm_pic;
60
61struct megamod_cascade_data {
62 struct megamod_pic *pic;
63 int index;
64};
65
66static struct megamod_cascade_data cascade_data[NR_COMBINERS];
67
68static void mask_megamod(struct irq_data *data)
69{
70 struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
71 irq_hw_number_t src = irqd_to_hwirq(data);
72 u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
73
74 raw_spin_lock(&pic->lock);
75 soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask);
76 raw_spin_unlock(&pic->lock);
77}
78
79static void unmask_megamod(struct irq_data *data)
80{
81 struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
82 irq_hw_number_t src = irqd_to_hwirq(data);
83 u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
84
85 raw_spin_lock(&pic->lock);
86 soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask);
87 raw_spin_unlock(&pic->lock);
88}
89
90static struct irq_chip megamod_chip = {
91 .name = "megamod",
92 .irq_mask = mask_megamod,
93 .irq_unmask = unmask_megamod,
94};
95
96static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc)
97{
98 struct megamod_cascade_data *cascade;
99 struct megamod_pic *pic;
100 u32 events;
101 int n, idx;
102
103 cascade = irq_desc_get_handler_data(desc);
104
105 pic = cascade->pic;
106 idx = cascade->index;
107
108 while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) {
109 n = __ffs(events);
110
111 irq = irq_linear_revmap(pic->irqhost, idx * 32 + n);
112
113 soc_writel(1 << n, &pic->regs->evtclr[idx]);
114
115 generic_handle_irq(irq);
116 }
117}
118
Mark Salter0bd761e2012-01-26 09:26:21 -0500119static int megamod_map(struct irq_domain *h, unsigned int virq,
Aurelien Jacquiotec500af2011-10-04 11:06:27 -0400120 irq_hw_number_t hw)
121{
122 struct megamod_pic *pic = h->host_data;
123 int i;
124
125 /* We shouldn't see a hwirq which is muxed to core controller */
126 for (i = 0; i < NR_MUX_OUTPUTS; i++)
127 if (pic->output_to_irq[i] == hw)
128 return -1;
129
130 irq_set_chip_data(virq, pic);
131 irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq);
132
133 /* Set default irq type */
134 irq_set_irq_type(virq, IRQ_TYPE_NONE);
135
136 return 0;
137}
138
Grant Likely15a25982012-01-26 12:25:18 -0700139static const struct irq_domain_ops megamod_domain_ops = {
Aurelien Jacquiotec500af2011-10-04 11:06:27 -0400140 .map = megamod_map,
Grant Likelyc1e572e2012-01-26 08:40:09 -0700141 .xlate = irq_domain_xlate_onecell,
Aurelien Jacquiotec500af2011-10-04 11:06:27 -0400142};
143
144static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
145{
146 int index, offset;
147 u32 val;
148
149 if (src < 0 || src >= (NR_COMBINERS * 32)) {
150 pic->output_to_irq[output] = IRQ_UNMAPPED;
151 return;
152 }
153
154 /* four mappings per mux register */
155 index = output / 4;
156 offset = (output & 3) * 8;
157
158 val = soc_readl(&pic->regs->intmux[index]);
159 val &= ~(0xff << offset);
160 val |= src << offset;
161 soc_writel(val, &pic->regs->intmux[index]);
162}
163
164/*
165 * Parse the MUX mapping, if one exists.
166 *
167 * The MUX map is an array of up to 12 cells; one for each usable core priority
168 * interrupt. The value of a given cell is the megamodule interrupt source
169 * which is to me MUXed to the output corresponding to the cell position
170 * withing the array. The first cell in the array corresponds to priority
171 * 4 and the last (12th) cell corresponds to priority 15. The allowed
172 * values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt
173 * sources (0 - 3) are not allowed to be mapped through this property. They
174 * are handled through the "interrupts" property. This allows us to use a
175 * value of zero as a "do not map" placeholder.
176 */
177static void __init parse_priority_map(struct megamod_pic *pic,
178 int *mapping, int size)
179{
180 struct device_node *np = pic->irqhost->of_node;
181 const __be32 *map;
182 int i, maplen;
183 u32 val;
184
185 map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen);
186 if (map) {
187 maplen /= 4;
188 if (maplen > size)
189 maplen = size;
190
191 for (i = 0; i < maplen; i++) {
192 val = be32_to_cpup(map);
193 if (val && val >= 4)
194 mapping[i] = val;
195 ++map;
196 }
197 }
198}
199
200static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
201{
202 struct megamod_pic *pic;
203 int i, irq;
204 int mapping[NR_MUX_OUTPUTS];
205
206 pr_info("Initializing C64x+ Megamodule PIC\n");
207
208 pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
209 if (!pic) {
210 pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
211 return NULL;
212 }
213
Mark Salter0bd761e2012-01-26 09:26:21 -0500214 pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
215 &megamod_domain_ops, pic);
Aurelien Jacquiotec500af2011-10-04 11:06:27 -0400216 if (!pic->irqhost) {
217 pr_err("%s: Could not alloc host.\n", np->full_name);
218 goto error_free;
219 }
220
221 pic->irqhost->host_data = pic;
222
223 raw_spin_lock_init(&pic->lock);
224
225 pic->regs = of_iomap(np, 0);
226 if (!pic->regs) {
227 pr_err("%s: Could not map registers.\n", np->full_name);
228 goto error_free;
229 }
230
231 /* Initialize MUX map */
232 for (i = 0; i < ARRAY_SIZE(mapping); i++)
233 mapping[i] = IRQ_UNMAPPED;
234
235 parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));
236
237 /*
238 * We can have up to 12 interrupts cascading to the core controller.
239 * These cascades can be from the combined interrupt sources or for
240 * individual interrupt sources. The "interrupts" property only
241 * deals with the cascaded combined interrupts. The individual
242 * interrupts muxed to the core controller use the core controller
243 * as their interrupt parent.
244 */
245 for (i = 0; i < NR_COMBINERS; i++) {
246
247 irq = irq_of_parse_and_map(np, i);
248 if (irq == NO_IRQ)
249 continue;
250
251 /*
252 * We count on the core priority interrupts (4 - 15) being
253 * direct mapped. Check that device tree provided something
254 * in that range.
255 */
256 if (irq < 4 || irq >= NR_PRIORITY_IRQS) {
257 pr_err("%s: combiner-%d virq %d out of range!\n",
258 np->full_name, i, irq);
259 continue;
260 }
261
262 /* record the mapping */
263 mapping[irq - 4] = i;
264
265 pr_debug("%s: combiner-%d cascading to virq %d\n",
266 np->full_name, i, irq);
267
268 cascade_data[i].pic = pic;
269 cascade_data[i].index = i;
270
271 /* mask and clear all events in combiner */
272 soc_writel(~0, &pic->regs->evtmask[i]);
273 soc_writel(~0, &pic->regs->evtclr[i]);
274
275 irq_set_handler_data(irq, &cascade_data[i]);
276 irq_set_chained_handler(irq, megamod_irq_cascade);
277 }
278
279 /* Finally, set up the MUX registers */
280 for (i = 0; i < NR_MUX_OUTPUTS; i++) {
281 if (mapping[i] != IRQ_UNMAPPED) {
282 pr_debug("%s: setting mux %d to priority %d\n",
283 np->full_name, mapping[i], i + 4);
284 set_megamod_mux(pic, mapping[i], i);
285 }
286 }
287
288 return pic;
289
290error_free:
291 kfree(pic);
292
293 return NULL;
294}
295
296/*
297 * Return next active event after ACK'ing it.
298 * Return -1 if no events active.
299 */
300static int get_exception(void)
301{
302 int i, bit;
303 u32 mask;
304
305 for (i = 0; i < NR_COMBINERS; i++) {
306 mask = soc_readl(&mm_pic->regs->mexpflag[i]);
307 if (mask) {
308 bit = __ffs(mask);
309 soc_writel(1 << bit, &mm_pic->regs->evtclr[i]);
310 return (i * 32) + bit;
311 }
312 }
313 return -1;
314}
315
316static void assert_event(unsigned int val)
317{
318 soc_writel(val, &mm_pic->regs->evtasrt);
319}
320
321void __init megamod_pic_init(void)
322{
323 struct device_node *np;
324
325 np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic");
326 if (!np)
327 return;
328
329 mm_pic = init_megamod_pic(np);
330 of_node_put(np);
331
332 soc_ops.get_exception = get_exception;
333 soc_ops.assert_event = assert_event;
334
335 return;
336}