blob: ca22f4e739443c0f7da108a0535e01e815aa3789 [file] [log] [blame]
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +02001/*
2 * Synopsys DW APB ICTL irqchip driver.
3 *
4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
5 *
6 * based on GPL'ed 2.6 kernel sources
7 * (c) Marvell International Ltd.
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/io.h>
15#include <linux/irq.h>
16#include <linux/irqchip/chained_irq.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19
20#include "irqchip.h"
21
22#define APB_INT_ENABLE_L 0x00
23#define APB_INT_ENABLE_H 0x04
24#define APB_INT_MASK_L 0x08
25#define APB_INT_MASK_H 0x0c
26#define APB_INT_FINALSTATUS_L 0x30
27#define APB_INT_FINALSTATUS_H 0x34
Thomas Gleixnerb6623112015-07-06 15:32:25 +020028#define APB_INT_BASE_OFFSET 0x04
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +020029
30static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
31{
Thomas Gleixnerb6623112015-07-06 15:32:25 +020032 struct irq_domain *d = irq_desc_get_handler_data(desc);
33 struct irq_chip *chip = irq_desc_get_chip(desc);
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +020034 int n;
35
36 chained_irq_enter(chip, desc);
37
Thomas Gleixnerb6623112015-07-06 15:32:25 +020038 for (n = 0; n < d->revmap_size; n += 32) {
39 struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n);
40 u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L);
41
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +020042 while (stat) {
43 u32 hwirq = ffs(stat) - 1;
Thomas Gleixnerb6623112015-07-06 15:32:25 +020044 u32 virq = irq_find_mapping(d, gc->irq_base + hwirq);
45
46 generic_handle_irq(virq);
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +020047 stat &= ~(1 << hwirq);
48 }
49 }
50
51 chained_irq_exit(chip, desc);
52}
53
Jisheng Zhang1655b052014-11-12 14:22:54 +080054#ifdef CONFIG_PM
55static void dw_apb_ictl_resume(struct irq_data *d)
56{
57 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
58 struct irq_chip_type *ct = irq_data_get_chip_type(d);
59
60 irq_gc_lock(gc);
61 writel_relaxed(~0, gc->reg_base + ct->regs.enable);
62 writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask);
63 irq_gc_unlock(gc);
64}
65#else
66#define dw_apb_ictl_resume NULL
67#endif /* CONFIG_PM */
68
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +020069static int __init dw_apb_ictl_init(struct device_node *np,
70 struct device_node *parent)
71{
72 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
73 struct resource r;
74 struct irq_domain *domain;
75 struct irq_chip_generic *gc;
76 void __iomem *iobase;
Thomas Gleixnerb6623112015-07-06 15:32:25 +020077 int ret, nrirqs, irq, i;
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +020078 u32 reg;
79
80 /* Map the parent interrupt for the chained handler */
81 irq = irq_of_parse_and_map(np, 0);
82 if (irq <= 0) {
83 pr_err("%s: unable to parse irq\n", np->full_name);
84 return -EINVAL;
85 }
86
87 ret = of_address_to_resource(np, 0, &r);
88 if (ret) {
89 pr_err("%s: unable to get resource\n", np->full_name);
90 return ret;
91 }
92
93 if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
94 pr_err("%s: unable to request mem region\n", np->full_name);
95 return -ENOMEM;
96 }
97
98 iobase = ioremap(r.start, resource_size(&r));
99 if (!iobase) {
100 pr_err("%s: unable to map resource\n", np->full_name);
101 ret = -ENOMEM;
102 goto err_release;
103 }
104
105 /*
106 * DW IP can be configured to allow 2-64 irqs. We can determine
107 * the number of irqs supported by writing into enable register
108 * and look for bits not set, as corresponding flip-flops will
109 * have been removed by sythesis tool.
110 */
111
112 /* mask and enable all interrupts */
Jisheng Zhang8876ce72014-11-12 14:22:52 +0800113 writel_relaxed(~0, iobase + APB_INT_MASK_L);
114 writel_relaxed(~0, iobase + APB_INT_MASK_H);
115 writel_relaxed(~0, iobase + APB_INT_ENABLE_L);
116 writel_relaxed(~0, iobase + APB_INT_ENABLE_H);
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +0200117
Jisheng Zhang8876ce72014-11-12 14:22:52 +0800118 reg = readl_relaxed(iobase + APB_INT_ENABLE_H);
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +0200119 if (reg)
120 nrirqs = 32 + fls(reg);
121 else
Jisheng Zhang8876ce72014-11-12 14:22:52 +0800122 nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L));
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +0200123
124 domain = irq_domain_add_linear(np, nrirqs,
125 &irq_generic_chip_ops, NULL);
126 if (!domain) {
127 pr_err("%s: unable to add irq domain\n", np->full_name);
128 ret = -ENOMEM;
129 goto err_unmap;
130 }
131
Thomas Gleixnerb6623112015-07-06 15:32:25 +0200132 ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name,
133 handle_level_irq, clr, 0,
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +0200134 IRQ_GC_INIT_MASK_CACHE);
135 if (ret) {
136 pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
137 goto err_unmap;
138 }
139
Thomas Gleixnerb6623112015-07-06 15:32:25 +0200140 for (i = 0; i < DIV_ROUND_UP(nrirqs, 32); i++) {
141 gc = irq_get_domain_generic_chip(domain, i * 32);
142 gc->reg_base = iobase + i * APB_INT_BASE_OFFSET;
143 gc->chip_types[0].regs.mask = APB_INT_MASK_L;
144 gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
145 gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
146 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
147 gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +0200148 }
149
Thomas Gleixnerb6623112015-07-06 15:32:25 +0200150 irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain);
Sebastian Hesselbarth350d71b92013-09-09 14:01:20 +0200151
152 return 0;
153
154err_unmap:
155 iounmap(iobase);
156err_release:
157 release_mem_region(r.start, resource_size(&r));
158 return ret;
159}
160IRQCHIP_DECLARE(dw_apb_ictl,
161 "snps,dw-apb-ictl", dw_apb_ictl_init);