blob: be4c5a8c96593ac6c218f05283811e5cfee1a158 [file] [log] [blame]
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +02001/*
2 * Marvell Orion SoCs IRQ chip driver.
3 *
4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#include <linux/io.h>
12#include <linux/irq.h>
Joel Porquet41a83e02015-07-07 17:11:46 -040013#include <linux/irqchip.h>
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +020014#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/of_irq.h>
17#include <asm/exception.h>
18#include <asm/mach/irq.h>
19
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +020020/*
21 * Orion SoC main interrupt controller
22 */
23#define ORION_IRQS_PER_CHIP 32
24
25#define ORION_IRQ_CAUSE 0x00
26#define ORION_IRQ_MASK 0x04
27#define ORION_IRQ_FIQ_MASK 0x08
28#define ORION_IRQ_ENDP_MASK 0x0c
29
30static struct irq_domain *orion_irq_domain;
31
Stephen Boyd8783dd32014-03-04 16:40:30 -080032static void
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +020033__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
34{
35 struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
36 int n, base = 0;
37
38 for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
39 struct irq_chip_generic *gc =
40 irq_get_domain_generic_chip(orion_irq_domain, base);
41 u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
42 gc->mask_cache;
43 while (stat) {
Sebastian Hesselbarthbffbc6e2014-04-28 23:12:08 +020044 u32 hwirq = __fls(stat);
Marc Zyngierf4bc9282014-08-26 11:03:25 +010045 handle_domain_irq(orion_irq_domain,
46 gc->irq_base + hwirq, regs);
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +020047 stat &= ~(1 << hwirq);
48 }
49 }
50}
51
52static int __init orion_irq_init(struct device_node *np,
53 struct device_node *parent)
54{
55 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
56 int n, ret, base, num_chips = 0;
57 struct resource r;
58
59 /* count number of irq chips by valid reg addresses */
60 while (of_address_to_resource(np, num_chips, &r) == 0)
61 num_chips++;
62
63 orion_irq_domain = irq_domain_add_linear(np,
64 num_chips * ORION_IRQS_PER_CHIP,
65 &irq_generic_chip_ops, NULL);
66 if (!orion_irq_domain)
67 panic("%s: unable to add irq domain\n", np->name);
68
69 ret = irq_alloc_domain_generic_chips(orion_irq_domain,
70 ORION_IRQS_PER_CHIP, 1, np->name,
71 handle_level_irq, clr, 0,
72 IRQ_GC_INIT_MASK_CACHE);
73 if (ret)
74 panic("%s: unable to alloc irq domain gc\n", np->name);
75
76 for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
77 struct irq_chip_generic *gc =
78 irq_get_domain_generic_chip(orion_irq_domain, base);
79
80 of_address_to_resource(np, n, &r);
81
82 if (!request_mem_region(r.start, resource_size(&r), np->name))
83 panic("%s: unable to request mem region %d",
84 np->name, n);
85
86 gc->reg_base = ioremap(r.start, resource_size(&r));
87 if (!gc->reg_base)
88 panic("%s: unable to map resource %d", np->name, n);
89
90 gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
91 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
92 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
93
94 /* mask all interrupts */
95 writel(0, gc->reg_base + ORION_IRQ_MASK);
96 }
97
98 set_handle_irq(orion_handle_irq);
99 return 0;
100}
101IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
102
103/*
104 * Orion SoC bridge interrupt controller
105 */
106#define ORION_BRIDGE_IRQ_CAUSE 0x00
107#define ORION_BRIDGE_IRQ_MASK 0x04
108
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +0200109static void orion_bridge_irq_handler(struct irq_desc *desc)
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +0200110{
Jiang Liu5b292642015-06-04 12:13:20 +0800111 struct irq_domain *d = irq_desc_get_handler_data(desc);
Andrew Lunnd86e9af62014-02-07 00:41:58 +0100112
113 struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +0200114 u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
115 gc->mask_cache;
116
117 while (stat) {
Sebastian Hesselbarthbffbc6e2014-04-28 23:12:08 +0200118 u32 hwirq = __fls(stat);
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +0200119
120 generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
121 stat &= ~(1 << hwirq);
122 }
123}
124
Sebastian Hesselbarthe0318ec2014-01-24 00:10:32 +0100125/*
126 * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
127 * To avoid interrupt events on stale irqs, we clear them before unmask.
128 */
129static unsigned int orion_bridge_irq_startup(struct irq_data *d)
130{
131 struct irq_chip_type *ct = irq_data_get_chip_type(d);
132
133 ct->chip.irq_ack(d);
134 ct->chip.irq_unmask(d);
135 return 0;
136}
137
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +0200138static int __init orion_bridge_irq_init(struct device_node *np,
139 struct device_node *parent)
140{
141 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
142 struct resource r;
143 struct irq_domain *domain;
144 struct irq_chip_generic *gc;
145 int ret, irq, nrirqs = 32;
146
147 /* get optional number of interrupts provided */
148 of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
149
150 domain = irq_domain_add_linear(np, nrirqs,
151 &irq_generic_chip_ops, NULL);
152 if (!domain) {
153 pr_err("%s: unable to add irq domain\n", np->name);
154 return -ENOMEM;
155 }
156
157 ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
Sebastian Hesselbarth5f400672014-01-23 23:38:05 +0100158 handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +0200159 if (ret) {
160 pr_err("%s: unable to alloc irq domain gc\n", np->name);
161 return ret;
162 }
163
164 ret = of_address_to_resource(np, 0, &r);
165 if (ret) {
166 pr_err("%s: unable to get resource\n", np->name);
167 return ret;
168 }
169
170 if (!request_mem_region(r.start, resource_size(&r), np->name)) {
171 pr_err("%s: unable to request mem region\n", np->name);
172 return -ENOMEM;
173 }
174
175 /* Map the parent interrupt for the chained handler */
176 irq = irq_of_parse_and_map(np, 0);
177 if (irq <= 0) {
178 pr_err("%s: unable to parse irq\n", np->name);
179 return -EINVAL;
180 }
181
182 gc = irq_get_domain_generic_chip(domain, 0);
183 gc->reg_base = ioremap(r.start, resource_size(&r));
184 if (!gc->reg_base) {
185 pr_err("%s: unable to map resource\n", np->name);
186 return -ENOMEM;
187 }
188
189 gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
190 gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
Sebastian Hesselbarthe0318ec2014-01-24 00:10:32 +0100191 gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +0200192 gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
193 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
194 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
195
Sebastian Hesselbarth7b119fd2014-01-23 23:38:04 +0100196 /* mask and clear all interrupts */
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +0200197 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
Sebastian Hesselbarth7b119fd2014-01-23 23:38:04 +0100198 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +0200199
Thomas Gleixner07d22c22015-06-21 21:10:57 +0200200 irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
201 domain);
Sebastian Hesselbarth9dbd90f2013-06-06 18:27:09 +0200202
203 return 0;
204}
205IRQCHIP_DECLARE(orion_bridge_intc,
206 "marvell,orion-bridge-intc", orion_bridge_irq_init);