blob: c4f719664c63bf94627e502b9abacd86aea8fbf9 [file] [log] [blame]
Carlo Caione6058bb32014-03-19 20:21:17 +01001/*
2 * Allwinner A20/A31 SoCs NMI IRQ chip driver.
3 *
4 * Carlo Caione <carlo.caione@gmail.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
Chen-Yu Tsai2d6caae2015-10-06 00:42:13 +080011#define DRV_NAME "sunxi-nmi"
12#define pr_fmt(fmt) DRV_NAME ": " fmt
13
Carlo Caione6058bb32014-03-19 20:21:17 +010014#include <linux/bitops.h>
15#include <linux/device.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/interrupt.h>
19#include <linux/irqdomain.h>
20#include <linux/of_irq.h>
21#include <linux/of_address.h>
22#include <linux/of_platform.h>
Joel Porquet41a83e02015-07-07 17:11:46 -040023#include <linux/irqchip.h>
Carlo Caione6058bb32014-03-19 20:21:17 +010024#include <linux/irqchip/chained_irq.h>
Carlo Caione6058bb32014-03-19 20:21:17 +010025
26#define SUNXI_NMI_SRC_TYPE_MASK 0x00000003
27
28enum {
29 SUNXI_SRC_TYPE_LEVEL_LOW = 0,
30 SUNXI_SRC_TYPE_EDGE_FALLING,
31 SUNXI_SRC_TYPE_LEVEL_HIGH,
32 SUNXI_SRC_TYPE_EDGE_RISING,
33};
34
35struct sunxi_sc_nmi_reg_offs {
36 u32 ctrl;
37 u32 pend;
38 u32 enable;
39};
40
41static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = {
42 .ctrl = 0x00,
43 .pend = 0x04,
44 .enable = 0x08,
45};
46
47static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
48 .ctrl = 0x00,
49 .pend = 0x04,
50 .enable = 0x34,
51};
52
53static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
54 u32 val)
55{
Kevin Cernekee332fd7c2014-11-06 22:44:17 -080056 irq_reg_writel(gc, val, off);
Carlo Caione6058bb32014-03-19 20:21:17 +010057}
58
59static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
60{
Kevin Cernekee332fd7c2014-11-06 22:44:17 -080061 return irq_reg_readl(gc, off);
Carlo Caione6058bb32014-03-19 20:21:17 +010062}
63
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +020064static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
Carlo Caione6058bb32014-03-19 20:21:17 +010065{
66 struct irq_domain *domain = irq_desc_get_handler_data(desc);
Jiang Liu5b292642015-06-04 12:13:20 +080067 struct irq_chip *chip = irq_desc_get_chip(desc);
Carlo Caione6058bb32014-03-19 20:21:17 +010068 unsigned int virq = irq_find_mapping(domain, 0);
69
70 chained_irq_enter(chip, desc);
71 generic_handle_irq(virq);
72 chained_irq_exit(chip, desc);
73}
74
75static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
76{
77 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
78 struct irq_chip_type *ct = gc->chip_types;
79 u32 src_type_reg;
80 u32 ctrl_off = ct->regs.type;
81 unsigned int src_type;
82 unsigned int i;
83
84 irq_gc_lock(gc);
85
86 switch (flow_type & IRQF_TRIGGER_MASK) {
87 case IRQ_TYPE_EDGE_FALLING:
88 src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
89 break;
90 case IRQ_TYPE_EDGE_RISING:
91 src_type = SUNXI_SRC_TYPE_EDGE_RISING;
92 break;
93 case IRQ_TYPE_LEVEL_HIGH:
94 src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
95 break;
96 case IRQ_TYPE_NONE:
97 case IRQ_TYPE_LEVEL_LOW:
98 src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
99 break;
100 default:
101 irq_gc_unlock(gc);
Chen-Yu Tsai2d6caae2015-10-06 00:42:13 +0800102 pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
103 data->irq);
Carlo Caione6058bb32014-03-19 20:21:17 +0100104 return -EBADR;
105 }
106
107 irqd_set_trigger_type(data, flow_type);
108 irq_setup_alt_chip(data, flow_type);
109
Axel Linfebe0692015-06-07 21:33:29 +0800110 for (i = 0; i < gc->num_ct; i++, ct++)
Carlo Caione6058bb32014-03-19 20:21:17 +0100111 if (ct->type & flow_type)
112 ctrl_off = ct->regs.type;
113
114 src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
115 src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
116 src_type_reg |= src_type;
117 sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
118
119 irq_gc_unlock(gc);
120
121 return IRQ_SET_MASK_OK;
122}
123
124static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
125 struct sunxi_sc_nmi_reg_offs *reg_offs)
126{
127 struct irq_domain *domain;
128 struct irq_chip_generic *gc;
129 unsigned int irq;
130 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
131 int ret;
132
133
134 domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
135 if (!domain) {
Chen-Yu Tsai2d6caae2015-10-06 00:42:13 +0800136 pr_err("Could not register interrupt domain.\n");
Carlo Caione6058bb32014-03-19 20:21:17 +0100137 return -ENOMEM;
138 }
139
Chen-Yu Tsai2d6caae2015-10-06 00:42:13 +0800140 ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
Carlo Caione6058bb32014-03-19 20:21:17 +0100141 handle_fasteoi_irq, clr, 0,
142 IRQ_GC_INIT_MASK_CACHE);
143 if (ret) {
Chen-Yu Tsai2d6caae2015-10-06 00:42:13 +0800144 pr_err("Could not allocate generic interrupt chip.\n");
145 goto fail_irqd_remove;
Carlo Caione6058bb32014-03-19 20:21:17 +0100146 }
147
148 irq = irq_of_parse_and_map(node, 0);
149 if (irq <= 0) {
Chen-Yu Tsai2d6caae2015-10-06 00:42:13 +0800150 pr_err("unable to parse irq\n");
Carlo Caione6058bb32014-03-19 20:21:17 +0100151 ret = -EINVAL;
152 goto fail_irqd_remove;
153 }
154
155 gc = irq_get_domain_generic_chip(domain, 0);
156 gc->reg_base = of_iomap(node, 0);
157 if (!gc->reg_base) {
Chen-Yu Tsai2d6caae2015-10-06 00:42:13 +0800158 pr_err("unable to map resource\n");
Carlo Caione6058bb32014-03-19 20:21:17 +0100159 ret = -ENOMEM;
160 goto fail_irqd_remove;
161 }
162
163 gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
164 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
165 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
166 gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
167 gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
168 gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
169 gc->chip_types[0].regs.ack = reg_offs->pend;
170 gc->chip_types[0].regs.mask = reg_offs->enable;
171 gc->chip_types[0].regs.type = reg_offs->ctrl;
172
173 gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
174 gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
175 gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
176 gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
177 gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
178 gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type;
179 gc->chip_types[1].regs.ack = reg_offs->pend;
180 gc->chip_types[1].regs.mask = reg_offs->enable;
181 gc->chip_types[1].regs.type = reg_offs->ctrl;
182 gc->chip_types[1].handler = handle_edge_irq;
183
Carlo Caione6058bb32014-03-19 20:21:17 +0100184 sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
185 sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
186
Thomas Gleixner3200a712015-06-21 21:10:58 +0200187 irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
Hans de Goede1b422ec2014-03-27 18:02:39 +0100188
Carlo Caione6058bb32014-03-19 20:21:17 +0100189 return 0;
190
191fail_irqd_remove:
192 irq_domain_remove(domain);
193
194 return ret;
195}
196
197static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
198 struct device_node *parent)
199{
200 return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
201}
202IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
203
204static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
205 struct device_node *parent)
206{
207 return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
208}
209IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);