blob: 80e1d2fd9d4c383522c2cb96d3d4b3f7d2b89a42 [file] [log] [blame]
viresh kumar4c18e772010-05-03 09:24:30 +01001/*
viresh kumar4c18e772010-05-03 09:24:30 +01002 * SPEAr platform shared irq layer source file
3 *
Viresh Kumardf1590d2012-11-12 22:56:03 +05304 * Copyright (C) 2009-2012 ST Microelectronics
Viresh Kumar10d89352012-06-20 12:53:02 -07005 * Viresh Kumar <viresh.linux@gmail.com>
viresh kumar4c18e772010-05-03 09:24:30 +01006 *
Viresh Kumardf1590d2012-11-12 22:56:03 +05307 * Copyright (C) 2012 ST Microelectronics
8 * Shiraz Hashim <shiraz.hashim@st.com>
9 *
viresh kumar4c18e772010-05-03 09:24:30 +010010 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
viresh kumar4c18e772010-05-03 09:24:30 +010015
16#include <linux/err.h>
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053017#include <linux/export.h>
18#include <linux/interrupt.h>
viresh kumar4c18e772010-05-03 09:24:30 +010019#include <linux/io.h>
20#include <linux/irq.h>
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053021#include <linux/irqdomain.h>
Viresh Kumardf1590d2012-11-12 22:56:03 +053022#include <linux/irqchip/spear-shirq.h>
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053023#include <linux/of.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
viresh kumar4c18e772010-05-03 09:24:30 +010026#include <linux/spinlock.h>
viresh kumar4c18e772010-05-03 09:24:30 +010027
viresh kumar4c18e772010-05-03 09:24:30 +010028static DEFINE_SPINLOCK(lock);
29
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053030/* spear300 shared irq registers offsets and masks */
31#define SPEAR300_INT_ENB_MASK_REG 0x54
32#define SPEAR300_INT_STS_MASK_REG 0x58
33
34static struct spear_shirq spear300_shirq_ras1 = {
35 .irq_nr = 9,
36 .irq_bit_off = 0,
37 .regs = {
38 .enb_reg = SPEAR300_INT_ENB_MASK_REG,
39 .status_reg = SPEAR300_INT_STS_MASK_REG,
40 .clear_reg = -1,
41 },
42};
43
44static struct spear_shirq *spear300_shirq_blocks[] = {
45 &spear300_shirq_ras1,
46};
47
48/* spear310 shared irq registers offsets and masks */
49#define SPEAR310_INT_STS_MASK_REG 0x04
50
51static struct spear_shirq spear310_shirq_ras1 = {
52 .irq_nr = 8,
53 .irq_bit_off = 0,
54 .regs = {
55 .enb_reg = -1,
56 .status_reg = SPEAR310_INT_STS_MASK_REG,
57 .clear_reg = -1,
58 },
59};
60
61static struct spear_shirq spear310_shirq_ras2 = {
62 .irq_nr = 5,
63 .irq_bit_off = 8,
64 .regs = {
65 .enb_reg = -1,
66 .status_reg = SPEAR310_INT_STS_MASK_REG,
67 .clear_reg = -1,
68 },
69};
70
71static struct spear_shirq spear310_shirq_ras3 = {
72 .irq_nr = 1,
73 .irq_bit_off = 13,
74 .regs = {
75 .enb_reg = -1,
76 .status_reg = SPEAR310_INT_STS_MASK_REG,
77 .clear_reg = -1,
78 },
79};
80
81static struct spear_shirq spear310_shirq_intrcomm_ras = {
82 .irq_nr = 3,
83 .irq_bit_off = 14,
84 .regs = {
85 .enb_reg = -1,
86 .status_reg = SPEAR310_INT_STS_MASK_REG,
87 .clear_reg = -1,
88 },
89};
90
91static struct spear_shirq *spear310_shirq_blocks[] = {
92 &spear310_shirq_ras1,
93 &spear310_shirq_ras2,
94 &spear310_shirq_ras3,
95 &spear310_shirq_intrcomm_ras,
96};
97
98/* spear320 shared irq registers offsets and masks */
99#define SPEAR320_INT_STS_MASK_REG 0x04
100#define SPEAR320_INT_CLR_MASK_REG 0x04
101#define SPEAR320_INT_ENB_MASK_REG 0x08
102
103static struct spear_shirq spear320_shirq_ras1 = {
104 .irq_nr = 3,
105 .irq_bit_off = 7,
106 .regs = {
107 .enb_reg = -1,
108 .status_reg = SPEAR320_INT_STS_MASK_REG,
109 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
110 .reset_to_clear = 1,
111 },
112};
113
114static struct spear_shirq spear320_shirq_ras2 = {
115 .irq_nr = 1,
116 .irq_bit_off = 10,
117 .regs = {
118 .enb_reg = -1,
119 .status_reg = SPEAR320_INT_STS_MASK_REG,
120 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
121 .reset_to_clear = 1,
122 },
123};
124
125static struct spear_shirq spear320_shirq_ras3 = {
126 .irq_nr = 3,
127 .irq_bit_off = 0,
128 .invalid_irq = 1,
129 .regs = {
130 .enb_reg = SPEAR320_INT_ENB_MASK_REG,
131 .reset_to_enb = 1,
132 .status_reg = SPEAR320_INT_STS_MASK_REG,
133 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
134 .reset_to_clear = 1,
135 },
136};
137
138static struct spear_shirq spear320_shirq_intrcomm_ras = {
139 .irq_nr = 11,
140 .irq_bit_off = 11,
141 .regs = {
142 .enb_reg = -1,
143 .status_reg = SPEAR320_INT_STS_MASK_REG,
144 .clear_reg = SPEAR320_INT_CLR_MASK_REG,
145 .reset_to_clear = 1,
146 },
147};
148
149static struct spear_shirq *spear320_shirq_blocks[] = {
150 &spear320_shirq_ras3,
151 &spear320_shirq_ras1,
152 &spear320_shirq_ras2,
153 &spear320_shirq_intrcomm_ras,
154};
155
156static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
viresh kumar4c18e772010-05-03 09:24:30 +0100157{
Lennert Buytenhek0e60e112010-11-29 11:22:33 +0100158 struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530159 u32 val, offset = d->irq - shirq->irq_base;
viresh kumar4c18e772010-05-03 09:24:30 +0100160 unsigned long flags;
161
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530162 if (shirq->regs.enb_reg == -1)
viresh kumar4c18e772010-05-03 09:24:30 +0100163 return;
164
165 spin_lock_irqsave(&lock, flags);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530166 val = readl(shirq->base + shirq->regs.enb_reg);
167
168 if (mask ^ shirq->regs.reset_to_enb)
169 val &= ~(0x1 << shirq->irq_bit_off << offset);
viresh kumar4c18e772010-05-03 09:24:30 +0100170 else
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530171 val |= 0x1 << shirq->irq_bit_off << offset;
172
173 writel(val, shirq->base + shirq->regs.enb_reg);
viresh kumar4c18e772010-05-03 09:24:30 +0100174 spin_unlock_irqrestore(&lock, flags);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530175
176}
177
178static void shirq_irq_mask(struct irq_data *d)
179{
180 shirq_irq_mask_unmask(d, 1);
viresh kumar4c18e772010-05-03 09:24:30 +0100181}
182
Lennert Buytenhek0e60e112010-11-29 11:22:33 +0100183static void shirq_irq_unmask(struct irq_data *d)
viresh kumar4c18e772010-05-03 09:24:30 +0100184{
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530185 shirq_irq_mask_unmask(d, 0);
viresh kumar4c18e772010-05-03 09:24:30 +0100186}
187
188static struct irq_chip shirq_chip = {
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530189 .name = "spear-shirq",
Lennert Buytenhek0e60e112010-11-29 11:22:33 +0100190 .irq_ack = shirq_irq_mask,
191 .irq_mask = shirq_irq_mask,
192 .irq_unmask = shirq_irq_unmask,
viresh kumar4c18e772010-05-03 09:24:30 +0100193};
194
195static void shirq_handler(unsigned irq, struct irq_desc *desc)
196{
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530197 u32 i, j, val, mask, tmp;
198 struct irq_chip *chip;
Thomas Gleixner6845664a2011-03-24 13:25:22 +0100199 struct spear_shirq *shirq = irq_get_handler_data(irq);
viresh kumar4c18e772010-05-03 09:24:30 +0100200
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530201 chip = irq_get_chip(irq);
202 chip->irq_ack(&desc->irq_data);
203
204 mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
205 while ((val = readl(shirq->base + shirq->regs.status_reg) &
206 mask)) {
207
208 val >>= shirq->irq_bit_off;
209 for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
210
211 if (!(j & val))
viresh kumar4c18e772010-05-03 09:24:30 +0100212 continue;
213
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530214 generic_handle_irq(shirq->irq_base + i);
viresh kumar4c18e772010-05-03 09:24:30 +0100215
216 /* clear interrupt */
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530217 if (shirq->regs.clear_reg == -1)
viresh kumar4c18e772010-05-03 09:24:30 +0100218 continue;
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530219
220 tmp = readl(shirq->base + shirq->regs.clear_reg);
viresh kumar4c18e772010-05-03 09:24:30 +0100221 if (shirq->regs.reset_to_clear)
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530222 tmp &= ~(j << shirq->irq_bit_off);
viresh kumar4c18e772010-05-03 09:24:30 +0100223 else
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530224 tmp |= (j << shirq->irq_bit_off);
225 writel(tmp, shirq->base + shirq->regs.clear_reg);
viresh kumar4c18e772010-05-03 09:24:30 +0100226 }
227 }
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530228 chip->irq_unmask(&desc->irq_data);
viresh kumar4c18e772010-05-03 09:24:30 +0100229}
230
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530231static void __init spear_shirq_register(struct spear_shirq *shirq)
viresh kumar4c18e772010-05-03 09:24:30 +0100232{
233 int i;
234
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530235 if (shirq->invalid_irq)
236 return;
viresh kumar4c18e772010-05-03 09:24:30 +0100237
Thomas Gleixner6845664a2011-03-24 13:25:22 +0100238 irq_set_chained_handler(shirq->irq, shirq_handler);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530239 for (i = 0; i < shirq->irq_nr; i++) {
240 irq_set_chip_and_handler(shirq->irq_base + i,
Thomas Gleixnerf38c02f2011-03-24 13:35:09 +0100241 &shirq_chip, handle_simple_irq);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530242 set_irq_flags(shirq->irq_base + i, IRQF_VALID);
243 irq_set_chip_data(shirq->irq_base + i, shirq);
viresh kumar4c18e772010-05-03 09:24:30 +0100244 }
245
Thomas Gleixner6845664a2011-03-24 13:25:22 +0100246 irq_set_handler_data(shirq->irq, shirq);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530247}
248
249static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
250 struct device_node *np)
251{
252 int i, irq_base, hwirq = 0, irq_nr = 0;
253 static struct irq_domain *shirq_domain;
254 void __iomem *base;
255
256 base = of_iomap(np, 0);
257 if (!base) {
258 pr_err("%s: failed to map shirq registers\n", __func__);
259 return -ENXIO;
260 }
261
262 for (i = 0; i < block_nr; i++)
263 irq_nr += shirq_blocks[i]->irq_nr;
264
265 irq_base = irq_alloc_descs(-1, 0, irq_nr, 0);
266 if (IS_ERR_VALUE(irq_base)) {
267 pr_err("%s: irq desc alloc failed\n", __func__);
268 goto err_unmap;
269 }
270
271 shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0,
272 &irq_domain_simple_ops, NULL);
273 if (WARN_ON(!shirq_domain)) {
274 pr_warn("%s: irq domain init failed\n", __func__);
275 goto err_free_desc;
276 }
277
278 for (i = 0; i < block_nr; i++) {
279 shirq_blocks[i]->base = base;
280 shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain,
281 hwirq);
282 shirq_blocks[i]->irq = irq_of_parse_and_map(np, i);
283
284 spear_shirq_register(shirq_blocks[i]);
285 hwirq += shirq_blocks[i]->irq_nr;
286 }
287
viresh kumar4c18e772010-05-03 09:24:30 +0100288 return 0;
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530289
290err_free_desc:
291 irq_free_descs(irq_base, irq_nr);
292err_unmap:
293 iounmap(base);
294 return -ENXIO;
295}
296
297int __init spear300_shirq_of_init(struct device_node *np,
298 struct device_node *parent)
299{
300 return shirq_init(spear300_shirq_blocks,
301 ARRAY_SIZE(spear300_shirq_blocks), np);
302}
303
304int __init spear310_shirq_of_init(struct device_node *np,
305 struct device_node *parent)
306{
307 return shirq_init(spear310_shirq_blocks,
308 ARRAY_SIZE(spear310_shirq_blocks), np);
309}
310
311int __init spear320_shirq_of_init(struct device_node *np,
312 struct device_node *parent)
313{
314 return shirq_init(spear320_shirq_blocks,
315 ARRAY_SIZE(spear320_shirq_blocks), np);
viresh kumar4c18e772010-05-03 09:24:30 +0100316}