blob: b78a169c9c83c0fddb0930f80cda1a195c2e12b9 [file] [log] [blame]
Rob Herringa900e5d2013-02-12 16:04:52 -06001/*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Combiner irqchip for EXYNOS
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/err.h>
12#include <linux/export.h>
13#include <linux/init.h>
14#include <linux/io.h>
Arnd Bergmannd34f03d2013-04-10 15:31:11 +020015#include <linux/slab.h>
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +020016#include <linux/syscore_ops.h>
Rob Herringa900e5d2013-02-12 16:04:52 -060017#include <linux/irqdomain.h>
Joel Porquet41a83e02015-07-07 17:11:46 -040018#include <linux/irqchip.h>
Catalin Marinasde88cbb2013-01-18 15:31:37 +000019#include <linux/irqchip/chained_irq.h>
Naveen Krishna Chatradhibc646902014-09-03 11:02:09 +053020#include <linux/interrupt.h>
Rob Herringa900e5d2013-02-12 16:04:52 -060021#include <linux/of_address.h>
22#include <linux/of_irq.h>
Rob Herringa900e5d2013-02-12 16:04:52 -060023
Rob Herringa900e5d2013-02-12 16:04:52 -060024#define COMBINER_ENABLE_SET 0x0
25#define COMBINER_ENABLE_CLEAR 0x4
26#define COMBINER_INT_STATUS 0xC
27
Arnd Bergmann6761dcf2013-04-10 15:17:47 +020028#define IRQ_IN_COMBINER 8
29
Rob Herringa900e5d2013-02-12 16:04:52 -060030static DEFINE_SPINLOCK(irq_controller_lock);
31
32struct combiner_chip_data {
Arnd Bergmann20adee82013-04-18 23:57:26 +020033 unsigned int hwirq_offset;
Rob Herringa900e5d2013-02-12 16:04:52 -060034 unsigned int irq_mask;
35 void __iomem *base;
Chanho Parkdf7ef462012-12-12 14:02:45 +090036 unsigned int parent_irq;
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +020037#ifdef CONFIG_PM
38 u32 pm_save;
39#endif
Rob Herringa900e5d2013-02-12 16:04:52 -060040};
41
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +020042static struct combiner_chip_data *combiner_data;
Rob Herringa900e5d2013-02-12 16:04:52 -060043static struct irq_domain *combiner_irq_domain;
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +020044static unsigned int max_nr = 20;
Rob Herringa900e5d2013-02-12 16:04:52 -060045
46static inline void __iomem *combiner_base(struct irq_data *data)
47{
48 struct combiner_chip_data *combiner_data =
49 irq_data_get_irq_chip_data(data);
50
51 return combiner_data->base;
52}
53
54static void combiner_mask_irq(struct irq_data *data)
55{
56 u32 mask = 1 << (data->hwirq % 32);
57
Ben Dooks2a4fe142016-06-21 11:20:29 +010058 writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
Rob Herringa900e5d2013-02-12 16:04:52 -060059}
60
61static void combiner_unmask_irq(struct irq_data *data)
62{
63 u32 mask = 1 << (data->hwirq % 32);
64
Ben Dooks2a4fe142016-06-21 11:20:29 +010065 writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
Rob Herringa900e5d2013-02-12 16:04:52 -060066}
67
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +020068static void combiner_handle_cascade_irq(struct irq_desc *desc)
Rob Herringa900e5d2013-02-12 16:04:52 -060069{
Jiang Liu5b292642015-06-04 12:13:20 +080070 struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
71 struct irq_chip *chip = irq_desc_get_chip(desc);
Rob Herringa900e5d2013-02-12 16:04:52 -060072 unsigned int cascade_irq, combiner_irq;
73 unsigned long status;
74
75 chained_irq_enter(chip, desc);
76
77 spin_lock(&irq_controller_lock);
Ben Dooks2a4fe142016-06-21 11:20:29 +010078 status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
Rob Herringa900e5d2013-02-12 16:04:52 -060079 spin_unlock(&irq_controller_lock);
80 status &= chip_data->irq_mask;
81
82 if (status == 0)
83 goto out;
84
Arnd Bergmann20adee82013-04-18 23:57:26 +020085 combiner_irq = chip_data->hwirq_offset + __ffs(status);
86 cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
Rob Herringa900e5d2013-02-12 16:04:52 -060087
Arnd Bergmann20adee82013-04-18 23:57:26 +020088 if (unlikely(!cascade_irq))
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +020089 handle_bad_irq(desc);
Rob Herringa900e5d2013-02-12 16:04:52 -060090 else
91 generic_handle_irq(cascade_irq);
92
93 out:
94 chained_irq_exit(chip, desc);
95}
96
Chanho Parkdf7ef462012-12-12 14:02:45 +090097#ifdef CONFIG_SMP
98static int combiner_set_affinity(struct irq_data *d,
99 const struct cpumask *mask_val, bool force)
100{
101 struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
102 struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
103 struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
104
105 if (chip && chip->irq_set_affinity)
106 return chip->irq_set_affinity(data, mask_val, force);
107 else
108 return -EINVAL;
109}
110#endif
111
Rob Herringa900e5d2013-02-12 16:04:52 -0600112static struct irq_chip combiner_chip = {
Chanho Parkdf7ef462012-12-12 14:02:45 +0900113 .name = "COMBINER",
114 .irq_mask = combiner_mask_irq,
115 .irq_unmask = combiner_unmask_irq,
116#ifdef CONFIG_SMP
117 .irq_set_affinity = combiner_set_affinity,
118#endif
Rob Herringa900e5d2013-02-12 16:04:52 -0600119};
120
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200121static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
Chanho Park4e164dc2012-12-12 14:02:49 +0900122 unsigned int irq)
123{
Thomas Gleixner741ff962015-06-21 21:10:49 +0200124 irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
125 combiner_data);
Rob Herringa900e5d2013-02-12 16:04:52 -0600126}
127
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200128static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
129 unsigned int combiner_nr,
Chanho Parkdf7ef462012-12-12 14:02:45 +0900130 void __iomem *base, unsigned int irq)
Rob Herringa900e5d2013-02-12 16:04:52 -0600131{
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200132 combiner_data->base = base;
Arnd Bergmann20adee82013-04-18 23:57:26 +0200133 combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200134 combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
135 combiner_data->parent_irq = irq;
Rob Herringa900e5d2013-02-12 16:04:52 -0600136
137 /* Disable all interrupts */
Ben Dooks2a4fe142016-06-21 11:20:29 +0100138 writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
Rob Herringa900e5d2013-02-12 16:04:52 -0600139}
140
Rob Herringa900e5d2013-02-12 16:04:52 -0600141static int combiner_irq_domain_xlate(struct irq_domain *d,
142 struct device_node *controller,
143 const u32 *intspec, unsigned int intsize,
144 unsigned long *out_hwirq,
145 unsigned int *out_type)
146{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100147 if (irq_domain_get_of_node(d) != controller)
Rob Herringa900e5d2013-02-12 16:04:52 -0600148 return -EINVAL;
149
150 if (intsize < 2)
151 return -EINVAL;
152
Arnd Bergmann6761dcf2013-04-10 15:17:47 +0200153 *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
Rob Herringa900e5d2013-02-12 16:04:52 -0600154 *out_type = 0;
155
156 return 0;
157}
Rob Herringa900e5d2013-02-12 16:04:52 -0600158
159static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
160 irq_hw_number_t hw)
161{
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200162 struct combiner_chip_data *combiner_data = d->host_data;
163
Rob Herringa900e5d2013-02-12 16:04:52 -0600164 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
165 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
Rob Herringd17cab42015-08-29 18:01:22 -0500166 irq_set_probe(irq);
Rob Herringa900e5d2013-02-12 16:04:52 -0600167
168 return 0;
169}
170
Krzysztof Kozlowski96009732015-04-27 21:54:24 +0900171static const struct irq_domain_ops combiner_irq_domain_ops = {
Rob Herringa900e5d2013-02-12 16:04:52 -0600172 .xlate = combiner_irq_domain_xlate,
173 .map = combiner_irq_domain_map,
174};
175
Sachin Kamatb8394de2013-06-26 17:06:37 +0530176static void __init combiner_init(void __iomem *combiner_base,
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200177 struct device_node *np)
Rob Herringa900e5d2013-02-12 16:04:52 -0600178{
Arnd Bergmann863a08d2013-04-12 15:27:09 +0200179 int i, irq;
Arnd Bergmann6761dcf2013-04-10 15:17:47 +0200180 unsigned int nr_irq;
Rob Herringa900e5d2013-02-12 16:04:52 -0600181
Arnd Bergmann6761dcf2013-04-10 15:17:47 +0200182 nr_irq = max_nr * IRQ_IN_COMBINER;
Chanho Park4e164dc2012-12-12 14:02:49 +0900183
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200184 combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
185 if (!combiner_data) {
Wang Longfaca10b2015-07-21 08:11:01 +0000186 pr_warn("%s: could not allocate combiner data\n", __func__);
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200187 return;
Rob Herringa900e5d2013-02-12 16:04:52 -0600188 }
Chanho Park4e164dc2012-12-12 14:02:49 +0900189
Chander Kashyap9403ac82013-10-21 06:01:40 +0900190 combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200191 &combiner_irq_domain_ops, combiner_data);
Rob Herringa900e5d2013-02-12 16:04:52 -0600192 if (WARN_ON(!combiner_irq_domain)) {
Wang Longfaca10b2015-07-21 08:11:01 +0000193 pr_warn("%s: irq domain init failed\n", __func__);
Rob Herringa900e5d2013-02-12 16:04:52 -0600194 return;
195 }
196
197 for (i = 0; i < max_nr; i++) {
Kukjin Kim0f561512013-07-16 12:18:19 +0900198 irq = irq_of_parse_and_map(np, i);
Arnd Bergmann92c8e492013-04-10 15:59:58 +0200199
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200200 combiner_init_one(&combiner_data[i], i,
201 combiner_base + (i >> 2) * 0x10, irq);
202 combiner_cascade_irq(&combiner_data[i], irq);
Rob Herringa900e5d2013-02-12 16:04:52 -0600203 }
204}
205
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200206#ifdef CONFIG_PM
207
208/**
209 * combiner_suspend - save interrupt combiner state before suspend
210 *
211 * Save the interrupt enable set register for all combiner groups since
212 * the state is lost when the system enters into a sleep state.
213 *
214 */
215static int combiner_suspend(void)
216{
217 int i;
218
219 for (i = 0; i < max_nr; i++)
220 combiner_data[i].pm_save =
Ben Dooks2a4fe142016-06-21 11:20:29 +0100221 readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200222
223 return 0;
224}
225
226/**
227 * combiner_resume - restore interrupt combiner state after resume
228 *
229 * Restore the interrupt enable set register for all combiner groups since
230 * the state is lost when the system enters into a sleep state on suspend.
231 *
232 */
233static void combiner_resume(void)
234{
235 int i;
236
237 for (i = 0; i < max_nr; i++) {
Ben Dooks2a4fe142016-06-21 11:20:29 +0100238 writel_relaxed(combiner_data[i].irq_mask,
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200239 combiner_data[i].base + COMBINER_ENABLE_CLEAR);
Ben Dooks2a4fe142016-06-21 11:20:29 +0100240 writel_relaxed(combiner_data[i].pm_save,
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200241 combiner_data[i].base + COMBINER_ENABLE_SET);
242 }
243}
244
245#else
246#define combiner_suspend NULL
247#define combiner_resume NULL
248#endif
249
250static struct syscore_ops combiner_syscore_ops = {
251 .suspend = combiner_suspend,
252 .resume = combiner_resume,
253};
254
Rob Herringa900e5d2013-02-12 16:04:52 -0600255static int __init combiner_of_init(struct device_node *np,
256 struct device_node *parent)
257{
258 void __iomem *combiner_base;
259
260 combiner_base = of_iomap(np, 0);
261 if (!combiner_base) {
262 pr_err("%s: failed to map combiner registers\n", __func__);
263 return -ENXIO;
264 }
265
Arnd Bergmann6761dcf2013-04-10 15:17:47 +0200266 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
267 pr_info("%s: number of combiners not specified, "
268 "setting default as %d.\n",
269 __func__, max_nr);
270 }
271
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200272 combiner_init(combiner_base, np);
273
274 register_syscore_ops(&combiner_syscore_ops);
Rob Herringa900e5d2013-02-12 16:04:52 -0600275
276 return 0;
277}
278IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
279 combiner_of_init);