blob: 8521a7295b0209c79d8e1ff6ed8d737f84462cd1 [file] [log] [blame]
viresh kumar4c18e772010-05-03 09:24:30 +01001/*
viresh kumar4c18e772010-05-03 09:24:30 +01002 * SPEAr platform shared irq layer source file
3 *
Viresh Kumardf1590d2012-11-12 22:56:03 +05304 * Copyright (C) 2009-2012 ST Microelectronics
Viresh Kumar10d89352012-06-20 12:53:02 -07005 * Viresh Kumar <viresh.linux@gmail.com>
viresh kumar4c18e772010-05-03 09:24:30 +01006 *
Viresh Kumardf1590d2012-11-12 22:56:03 +05307 * Copyright (C) 2012 ST Microelectronics
Viresh Kumar9cc23682014-04-18 15:07:16 -07008 * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
Viresh Kumardf1590d2012-11-12 22:56:03 +05309 *
viresh kumar4c18e772010-05-03 09:24:30 +010010 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
viresh kumar4c18e772010-05-03 09:24:30 +010015
16#include <linux/err.h>
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053017#include <linux/export.h>
18#include <linux/interrupt.h>
viresh kumar4c18e772010-05-03 09:24:30 +010019#include <linux/io.h>
20#include <linux/irq.h>
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053021#include <linux/irqdomain.h>
22#include <linux/of.h>
23#include <linux/of_address.h>
24#include <linux/of_irq.h>
viresh kumar4c18e772010-05-03 09:24:30 +010025#include <linux/spinlock.h>
viresh kumar4c18e772010-05-03 09:24:30 +010026
Rob Herringe9c51552013-01-02 09:37:56 -060027#include "irqchip.h"
28
Thomas Gleixner078bc002014-06-19 21:34:38 +000029/*
30 * struct shirq_regs: shared irq register configuration
31 *
32 * enb_reg: enable register offset
33 * reset_to_enb: val 1 indicates, we need to clear bit for enabling interrupt
34 * status_reg: status register offset
35 * status_reg_mask: status register valid mask
Thomas Gleixner078bc002014-06-19 21:34:38 +000036 */
37struct shirq_regs {
38 u32 enb_reg;
39 u32 reset_to_enb;
40 u32 status_reg;
Thomas Gleixner078bc002014-06-19 21:34:38 +000041};
42
43/*
44 * struct spear_shirq: shared irq structure
45 *
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +000046 * base: Base register address
47 * regs: Register configuration for shared irq block
Thomas Gleixner4ecc8322014-06-19 21:34:41 +000048 * mask: Mask to apply to the status register
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +000049 * virq_base: Base virtual interrupt number
50 * nr_irqs: Number of interrupts handled by this block
51 * offset: Bit offset of the first interrupt
52 * disabled: Group is disabled, but accounted
Thomas Gleixner078bc002014-06-19 21:34:38 +000053 */
54struct spear_shirq {
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +000055 void __iomem *base;
56 struct shirq_regs regs;
Thomas Gleixner4ecc8322014-06-19 21:34:41 +000057 u32 mask;
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +000058 u32 virq_base;
59 u32 nr_irqs;
60 u32 offset;
61 bool disabled;
Thomas Gleixner078bc002014-06-19 21:34:38 +000062};
63
viresh kumar4c18e772010-05-03 09:24:30 +010064static DEFINE_SPINLOCK(lock);
65
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053066/* spear300 shared irq registers offsets and masks */
67#define SPEAR300_INT_ENB_MASK_REG 0x54
68#define SPEAR300_INT_STS_MASK_REG 0x58
69
70static struct spear_shirq spear300_shirq_ras1 = {
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +000071 .offset = 0,
72 .nr_irqs = 9,
Thomas Gleixner4ecc8322014-06-19 21:34:41 +000073 .mask = ((0x1 << 9) - 1) << 0,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053074 .regs = {
75 .enb_reg = SPEAR300_INT_ENB_MASK_REG,
76 .status_reg = SPEAR300_INT_STS_MASK_REG,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053077 },
78};
79
80static struct spear_shirq *spear300_shirq_blocks[] = {
81 &spear300_shirq_ras1,
82};
83
84/* spear310 shared irq registers offsets and masks */
85#define SPEAR310_INT_STS_MASK_REG 0x04
86
87static struct spear_shirq spear310_shirq_ras1 = {
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +000088 .offset = 0,
89 .nr_irqs = 8,
Thomas Gleixner4ecc8322014-06-19 21:34:41 +000090 .mask = ((0x1 << 8) - 1) << 0,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053091 .regs = {
92 .enb_reg = -1,
93 .status_reg = SPEAR310_INT_STS_MASK_REG,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +053094 },
95};
96
97static struct spear_shirq spear310_shirq_ras2 = {
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +000098 .offset = 8,
99 .nr_irqs = 5,
Thomas Gleixner4ecc8322014-06-19 21:34:41 +0000100 .mask = ((0x1 << 5) - 1) << 8,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530101 .regs = {
102 .enb_reg = -1,
103 .status_reg = SPEAR310_INT_STS_MASK_REG,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530104 },
105};
106
107static struct spear_shirq spear310_shirq_ras3 = {
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000108 .offset = 13,
109 .nr_irqs = 1,
Thomas Gleixner4ecc8322014-06-19 21:34:41 +0000110 .mask = ((0x1 << 1) - 1) << 13,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530111 .regs = {
112 .enb_reg = -1,
113 .status_reg = SPEAR310_INT_STS_MASK_REG,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530114 },
115};
116
117static struct spear_shirq spear310_shirq_intrcomm_ras = {
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000118 .offset = 14,
119 .nr_irqs = 3,
Thomas Gleixner4ecc8322014-06-19 21:34:41 +0000120 .mask = ((0x1 << 3) - 1) << 14,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530121 .regs = {
122 .enb_reg = -1,
123 .status_reg = SPEAR310_INT_STS_MASK_REG,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530124 },
125};
126
127static struct spear_shirq *spear310_shirq_blocks[] = {
128 &spear310_shirq_ras1,
129 &spear310_shirq_ras2,
130 &spear310_shirq_ras3,
131 &spear310_shirq_intrcomm_ras,
132};
133
134/* spear320 shared irq registers offsets and masks */
135#define SPEAR320_INT_STS_MASK_REG 0x04
136#define SPEAR320_INT_CLR_MASK_REG 0x04
137#define SPEAR320_INT_ENB_MASK_REG 0x08
138
Thomas Gleixner03319a12014-06-19 21:34:40 +0000139static struct spear_shirq spear320_shirq_ras3 = {
140 .offset = 0,
141 .nr_irqs = 7,
Thomas Gleixner4ecc8322014-06-19 21:34:41 +0000142 .mask = ((0x1 << 7) - 1) << 0,
Thomas Gleixner03319a12014-06-19 21:34:40 +0000143 .disabled = 1,
Thomas Gleixner03319a12014-06-19 21:34:40 +0000144};
145
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530146static struct spear_shirq spear320_shirq_ras1 = {
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000147 .offset = 7,
148 .nr_irqs = 3,
Thomas Gleixner4ecc8322014-06-19 21:34:41 +0000149 .mask = ((0x1 << 3) - 1) << 7,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530150 .regs = {
151 .enb_reg = -1,
152 .status_reg = SPEAR320_INT_STS_MASK_REG,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530153 },
154};
155
156static struct spear_shirq spear320_shirq_ras2 = {
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000157 .offset = 10,
158 .nr_irqs = 1,
Thomas Gleixner4ecc8322014-06-19 21:34:41 +0000159 .mask = ((0x1 << 1) - 1) << 10,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530160 .regs = {
161 .enb_reg = -1,
162 .status_reg = SPEAR320_INT_STS_MASK_REG,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530163 },
164};
165
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530166static struct spear_shirq spear320_shirq_intrcomm_ras = {
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000167 .offset = 11,
168 .nr_irqs = 11,
Thomas Gleixner4ecc8322014-06-19 21:34:41 +0000169 .mask = ((0x1 << 11) - 1) << 11,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530170 .regs = {
171 .enb_reg = -1,
172 .status_reg = SPEAR320_INT_STS_MASK_REG,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530173 },
174};
175
176static struct spear_shirq *spear320_shirq_blocks[] = {
177 &spear320_shirq_ras3,
178 &spear320_shirq_ras1,
179 &spear320_shirq_ras2,
180 &spear320_shirq_intrcomm_ras,
181};
182
183static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
viresh kumar4c18e772010-05-03 09:24:30 +0100184{
Lennert Buytenhek0e60e112010-11-29 11:22:33 +0100185 struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000186 u32 val, offset = d->irq - shirq->virq_base;
viresh kumar4c18e772010-05-03 09:24:30 +0100187 unsigned long flags;
188
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530189 if (shirq->regs.enb_reg == -1)
viresh kumar4c18e772010-05-03 09:24:30 +0100190 return;
191
192 spin_lock_irqsave(&lock, flags);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530193 val = readl(shirq->base + shirq->regs.enb_reg);
194
195 if (mask ^ shirq->regs.reset_to_enb)
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000196 val &= ~(0x1 << shirq->offset << offset);
viresh kumar4c18e772010-05-03 09:24:30 +0100197 else
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000198 val |= 0x1 << shirq->offset << offset;
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530199
200 writel(val, shirq->base + shirq->regs.enb_reg);
viresh kumar4c18e772010-05-03 09:24:30 +0100201 spin_unlock_irqrestore(&lock, flags);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530202
203}
204
205static void shirq_irq_mask(struct irq_data *d)
206{
207 shirq_irq_mask_unmask(d, 1);
viresh kumar4c18e772010-05-03 09:24:30 +0100208}
209
Lennert Buytenhek0e60e112010-11-29 11:22:33 +0100210static void shirq_irq_unmask(struct irq_data *d)
viresh kumar4c18e772010-05-03 09:24:30 +0100211{
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530212 shirq_irq_mask_unmask(d, 0);
viresh kumar4c18e772010-05-03 09:24:30 +0100213}
214
215static struct irq_chip shirq_chip = {
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530216 .name = "spear-shirq",
Lennert Buytenhek0e60e112010-11-29 11:22:33 +0100217 .irq_ack = shirq_irq_mask,
218 .irq_mask = shirq_irq_mask,
219 .irq_unmask = shirq_irq_unmask,
viresh kumar4c18e772010-05-03 09:24:30 +0100220};
221
222static void shirq_handler(unsigned irq, struct irq_desc *desc)
223{
Thomas Gleixner6845664a2011-03-24 13:25:22 +0100224 struct spear_shirq *shirq = irq_get_handler_data(irq);
Thomas Gleixnere3c871a2014-06-19 21:34:40 +0000225 struct irq_data *idata = irq_desc_get_irq_data(desc);
226 struct irq_chip *chip = irq_data_get_irq_chip(idata);
Thomas Gleixner25dc49e2014-06-19 21:34:42 +0000227 u32 pend;
viresh kumar4c18e772010-05-03 09:24:30 +0100228
Thomas Gleixnere3c871a2014-06-19 21:34:40 +0000229 chip->irq_ack(idata);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530230
Thomas Gleixner25dc49e2014-06-19 21:34:42 +0000231 pend = readl(shirq->base + shirq->regs.status_reg) & shirq->mask;
232 pend >>= shirq->offset;
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530233
Thomas Gleixner25dc49e2014-06-19 21:34:42 +0000234 while (pend) {
235 int irq = __ffs(pend);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530236
Thomas Gleixner25dc49e2014-06-19 21:34:42 +0000237 pend &= ~(0x1 << irq);
238 generic_handle_irq(shirq->virq_base + irq);
viresh kumar4c18e772010-05-03 09:24:30 +0100239 }
Thomas Gleixner25dc49e2014-06-19 21:34:42 +0000240
Thomas Gleixnere3c871a2014-06-19 21:34:40 +0000241 chip->irq_unmask(idata);
viresh kumar4c18e772010-05-03 09:24:30 +0100242}
243
Thomas Gleixnerf37ecbc2014-06-19 21:34:39 +0000244static void __init spear_shirq_register(struct spear_shirq *shirq,
245 int parent_irq)
viresh kumar4c18e772010-05-03 09:24:30 +0100246{
247 int i;
248
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000249 if (shirq->disabled)
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530250 return;
viresh kumar4c18e772010-05-03 09:24:30 +0100251
Thomas Gleixnerf37ecbc2014-06-19 21:34:39 +0000252 irq_set_chained_handler(parent_irq, shirq_handler);
253 irq_set_handler_data(parent_irq, shirq);
254
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000255 for (i = 0; i < shirq->nr_irqs; i++) {
256 irq_set_chip_and_handler(shirq->virq_base + i,
Thomas Gleixnerf38c02f2011-03-24 13:35:09 +0100257 &shirq_chip, handle_simple_irq);
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000258 set_irq_flags(shirq->virq_base + i, IRQF_VALID);
259 irq_set_chip_data(shirq->virq_base + i, shirq);
viresh kumar4c18e772010-05-03 09:24:30 +0100260 }
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530261}
262
263static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
264 struct device_node *np)
265{
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000266 int i, parent_irq, virq_base, hwirq = 0, nr_irqs = 0;
Thomas Gleixnera26c06f2014-06-19 21:34:37 +0000267 struct irq_domain *shirq_domain;
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530268 void __iomem *base;
269
270 base = of_iomap(np, 0);
271 if (!base) {
272 pr_err("%s: failed to map shirq registers\n", __func__);
273 return -ENXIO;
274 }
275
276 for (i = 0; i < block_nr; i++)
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000277 nr_irqs += shirq_blocks[i]->nr_irqs;
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530278
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000279 virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
280 if (IS_ERR_VALUE(virq_base)) {
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530281 pr_err("%s: irq desc alloc failed\n", __func__);
282 goto err_unmap;
283 }
284
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000285 shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530286 &irq_domain_simple_ops, NULL);
287 if (WARN_ON(!shirq_domain)) {
288 pr_warn("%s: irq domain init failed\n", __func__);
289 goto err_free_desc;
290 }
291
292 for (i = 0; i < block_nr; i++) {
293 shirq_blocks[i]->base = base;
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000294 shirq_blocks[i]->virq_base = irq_find_mapping(shirq_domain,
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530295 hwirq);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530296
Thomas Gleixnerf37ecbc2014-06-19 21:34:39 +0000297 parent_irq = irq_of_parse_and_map(np, i);
298 spear_shirq_register(shirq_blocks[i], parent_irq);
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000299 hwirq += shirq_blocks[i]->nr_irqs;
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530300 }
301
viresh kumar4c18e772010-05-03 09:24:30 +0100302 return 0;
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530303
304err_free_desc:
Thomas Gleixnerc5d1d852014-06-19 21:34:39 +0000305 irq_free_descs(virq_base, nr_irqs);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530306err_unmap:
307 iounmap(base);
308 return -ENXIO;
309}
310
Thomas Gleixner078bc002014-06-19 21:34:38 +0000311static int __init spear300_shirq_of_init(struct device_node *np,
312 struct device_node *parent)
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530313{
314 return shirq_init(spear300_shirq_blocks,
315 ARRAY_SIZE(spear300_shirq_blocks), np);
316}
Rob Herringe9c51552013-01-02 09:37:56 -0600317IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530318
Thomas Gleixner078bc002014-06-19 21:34:38 +0000319static int __init spear310_shirq_of_init(struct device_node *np,
320 struct device_node *parent)
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530321{
322 return shirq_init(spear310_shirq_blocks,
323 ARRAY_SIZE(spear310_shirq_blocks), np);
324}
Rob Herringe9c51552013-01-02 09:37:56 -0600325IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530326
Thomas Gleixner078bc002014-06-19 21:34:38 +0000327static int __init spear320_shirq_of_init(struct device_node *np,
328 struct device_node *parent)
Shiraz Hashim80515a5a2012-08-03 15:33:10 +0530329{
330 return shirq_init(spear320_shirq_blocks,
331 ARRAY_SIZE(spear320_shirq_blocks), np);
viresh kumar4c18e772010-05-03 09:24:30 +0100332}
Rob Herringe9c51552013-01-02 09:37:56 -0600333IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init);