blob: 0b1d5bdd08628ea00e8f2b31f055bb270d2c3c0a [file] [log] [blame]
Marc Zyngier021f6532014-06-30 16:01:31 +01001/*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Julien Grall68628bb2016-04-11 16:32:55 +010018#define pr_fmt(fmt) "GICv3: " fmt
19
Tomasz Nowickiffa7d612016-01-19 14:11:15 +010020#include <linux/acpi.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010021#include <linux/cpu.h>
Sudeep Holla3708d522014-08-26 16:03:35 +010022#include <linux/cpu_pm.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010023#include <linux/delay.h>
24#include <linux/interrupt.h>
Tomasz Nowickiffa7d612016-01-19 14:11:15 +010025#include <linux/irqdomain.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010026#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/of_irq.h>
29#include <linux/percpu.h>
30#include <linux/slab.h>
31
Joel Porquet41a83e02015-07-07 17:11:46 -040032#include <linux/irqchip.h>
Julien Grall1839e572016-04-11 16:32:57 +010033#include <linux/irqchip/arm-gic-common.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010034#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngiere3825ba2016-04-11 09:57:54 +010035#include <linux/irqchip/irq-partition-percpu.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010036
37#include <asm/cputype.h>
38#include <asm/exception.h>
39#include <asm/smp_plat.h>
Marc Zyngier0b6a3da2015-08-26 17:00:42 +010040#include <asm/virt.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010041
42#include "irq-gic-common.h"
Marc Zyngier021f6532014-06-30 16:01:31 +010043
Marc Zyngierf5c14342014-11-24 14:35:10 +000044struct redist_region {
45 void __iomem *redist_base;
46 phys_addr_t phys_base;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +010047 bool single_redist;
Marc Zyngierf5c14342014-11-24 14:35:10 +000048};
49
Marc Zyngier021f6532014-06-30 16:01:31 +010050struct gic_chip_data {
Marc Zyngiere3825ba2016-04-11 09:57:54 +010051 struct fwnode_handle *fwnode;
Marc Zyngier021f6532014-06-30 16:01:31 +010052 void __iomem *dist_base;
Marc Zyngierf5c14342014-11-24 14:35:10 +000053 struct redist_region *redist_regions;
54 struct rdists rdists;
Marc Zyngier021f6532014-06-30 16:01:31 +010055 struct irq_domain *domain;
56 u64 redist_stride;
Marc Zyngierf5c14342014-11-24 14:35:10 +000057 u32 nr_redist_regions;
Marc Zyngier021f6532014-06-30 16:01:31 +010058 unsigned int irq_nr;
Marc Zyngiere3825ba2016-04-11 09:57:54 +010059 struct partition_desc *ppi_descs[16];
Marc Zyngier021f6532014-06-30 16:01:31 +010060};
61
62static struct gic_chip_data gic_data __read_mostly;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +010063static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
Marc Zyngier021f6532014-06-30 16:01:31 +010064
Julien Grall1839e572016-04-11 16:32:57 +010065static struct gic_kvm_info gic_v3_kvm_info;
66
Marc Zyngierf5c14342014-11-24 14:35:10 +000067#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
68#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngier021f6532014-06-30 16:01:31 +010069#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
70
71/* Our default, arbitrary priority value. Linux only uses one anyway. */
72#define DEFAULT_PMR_VALUE 0xf0
73
74static inline unsigned int gic_irq(struct irq_data *d)
75{
76 return d->hwirq;
77}
78
79static inline int gic_irq_in_rdist(struct irq_data *d)
80{
81 return gic_irq(d) < 32;
82}
83
84static inline void __iomem *gic_dist_base(struct irq_data *d)
85{
86 if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
87 return gic_data_rdist_sgi_base();
88
89 if (d->hwirq <= 1023) /* SPI -> dist_base */
90 return gic_data.dist_base;
91
Marc Zyngier021f6532014-06-30 16:01:31 +010092 return NULL;
93}
94
95static void gic_do_wait_for_rwp(void __iomem *base)
96{
97 u32 count = 1000000; /* 1s! */
98
99 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
100 count--;
101 if (!count) {
102 pr_err_ratelimited("RWP timeout, gone fishing\n");
103 return;
104 }
105 cpu_relax();
106 udelay(1);
107 };
108}
109
110/* Wait for completion of a distributor change */
111static void gic_dist_wait_for_rwp(void)
112{
113 gic_do_wait_for_rwp(gic_data.dist_base);
114}
115
116/* Wait for completion of a redistributor change */
117static void gic_redist_wait_for_rwp(void)
118{
119 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
120}
121
Jean-Philippe Brucker7936e912015-10-01 13:47:14 +0100122#ifdef CONFIG_ARM64
Robert Richter8ac2a172015-09-21 22:58:39 +0200123static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
Robert Richter6d4e11c2015-09-21 22:58:35 +0200124
125static u64 __maybe_unused gic_read_iar(void)
126{
Robert Richter8ac2a172015-09-21 22:58:39 +0200127 if (static_branch_unlikely(&is_cavium_thunderx))
Robert Richter6d4e11c2015-09-21 22:58:35 +0200128 return gic_read_iar_cavium_thunderx();
129 else
130 return gic_read_iar_common();
131}
Jean-Philippe Brucker7936e912015-10-01 13:47:14 +0100132#endif
Marc Zyngier021f6532014-06-30 16:01:31 +0100133
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100134static void gic_enable_redist(bool enable)
Marc Zyngier021f6532014-06-30 16:01:31 +0100135{
136 void __iomem *rbase;
137 u32 count = 1000000; /* 1s! */
138 u32 val;
139
140 rbase = gic_data_rdist_rd_base();
141
Marc Zyngier021f6532014-06-30 16:01:31 +0100142 val = readl_relaxed(rbase + GICR_WAKER);
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100143 if (enable)
144 /* Wake up this CPU redistributor */
145 val &= ~GICR_WAKER_ProcessorSleep;
146 else
147 val |= GICR_WAKER_ProcessorSleep;
Marc Zyngier021f6532014-06-30 16:01:31 +0100148 writel_relaxed(val, rbase + GICR_WAKER);
149
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100150 if (!enable) { /* Check that GICR_WAKER is writeable */
151 val = readl_relaxed(rbase + GICR_WAKER);
152 if (!(val & GICR_WAKER_ProcessorSleep))
153 return; /* No PM support in this redistributor */
154 }
155
Dan Carpenterd102eb52016-10-14 10:26:21 +0300156 while (--count) {
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100157 val = readl_relaxed(rbase + GICR_WAKER);
Andrew Jonescf1d9d12016-05-11 21:23:17 +0200158 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100159 break;
Marc Zyngier021f6532014-06-30 16:01:31 +0100160 cpu_relax();
161 udelay(1);
162 };
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100163 if (!count)
164 pr_err_ratelimited("redistributor failed to %s...\n",
165 enable ? "wakeup" : "sleep");
Marc Zyngier021f6532014-06-30 16:01:31 +0100166}
167
168/*
169 * Routines to disable, enable, EOI and route interrupts
170 */
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000171static int gic_peek_irq(struct irq_data *d, u32 offset)
172{
173 u32 mask = 1 << (gic_irq(d) % 32);
174 void __iomem *base;
175
176 if (gic_irq_in_rdist(d))
177 base = gic_data_rdist_sgi_base();
178 else
179 base = gic_data.dist_base;
180
181 return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
182}
183
Marc Zyngier021f6532014-06-30 16:01:31 +0100184static void gic_poke_irq(struct irq_data *d, u32 offset)
185{
186 u32 mask = 1 << (gic_irq(d) % 32);
187 void (*rwp_wait)(void);
188 void __iomem *base;
189
190 if (gic_irq_in_rdist(d)) {
191 base = gic_data_rdist_sgi_base();
192 rwp_wait = gic_redist_wait_for_rwp;
193 } else {
194 base = gic_data.dist_base;
195 rwp_wait = gic_dist_wait_for_rwp;
196 }
197
198 writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
199 rwp_wait();
200}
201
Marc Zyngier021f6532014-06-30 16:01:31 +0100202static void gic_mask_irq(struct irq_data *d)
203{
204 gic_poke_irq(d, GICD_ICENABLER);
205}
206
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100207static void gic_eoimode1_mask_irq(struct irq_data *d)
208{
209 gic_mask_irq(d);
Marc Zyngier530bf352015-08-26 17:00:43 +0100210 /*
211 * When masking a forwarded interrupt, make sure it is
212 * deactivated as well.
213 *
214 * This ensures that an interrupt that is getting
215 * disabled/masked will not get "stuck", because there is
216 * noone to deactivate it (guest is being terminated).
217 */
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200218 if (irqd_is_forwarded_to_vcpu(d))
Marc Zyngier530bf352015-08-26 17:00:43 +0100219 gic_poke_irq(d, GICD_ICACTIVER);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100220}
221
Marc Zyngier021f6532014-06-30 16:01:31 +0100222static void gic_unmask_irq(struct irq_data *d)
223{
224 gic_poke_irq(d, GICD_ISENABLER);
225}
226
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000227static int gic_irq_set_irqchip_state(struct irq_data *d,
228 enum irqchip_irq_state which, bool val)
229{
230 u32 reg;
231
232 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
233 return -EINVAL;
234
235 switch (which) {
236 case IRQCHIP_STATE_PENDING:
237 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
238 break;
239
240 case IRQCHIP_STATE_ACTIVE:
241 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
242 break;
243
244 case IRQCHIP_STATE_MASKED:
245 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
246 break;
247
248 default:
249 return -EINVAL;
250 }
251
252 gic_poke_irq(d, reg);
253 return 0;
254}
255
256static int gic_irq_get_irqchip_state(struct irq_data *d,
257 enum irqchip_irq_state which, bool *val)
258{
259 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
260 return -EINVAL;
261
262 switch (which) {
263 case IRQCHIP_STATE_PENDING:
264 *val = gic_peek_irq(d, GICD_ISPENDR);
265 break;
266
267 case IRQCHIP_STATE_ACTIVE:
268 *val = gic_peek_irq(d, GICD_ISACTIVER);
269 break;
270
271 case IRQCHIP_STATE_MASKED:
272 *val = !gic_peek_irq(d, GICD_ISENABLER);
273 break;
274
275 default:
276 return -EINVAL;
277 }
278
279 return 0;
280}
281
Marc Zyngier021f6532014-06-30 16:01:31 +0100282static void gic_eoi_irq(struct irq_data *d)
283{
284 gic_write_eoir(gic_irq(d));
285}
286
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100287static void gic_eoimode1_eoi_irq(struct irq_data *d)
288{
289 /*
Marc Zyngier530bf352015-08-26 17:00:43 +0100290 * No need to deactivate an LPI, or an interrupt that
291 * is is getting forwarded to a vcpu.
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100292 */
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200293 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100294 return;
295 gic_write_dir(gic_irq(d));
296}
297
Marc Zyngier021f6532014-06-30 16:01:31 +0100298static int gic_set_type(struct irq_data *d, unsigned int type)
299{
300 unsigned int irq = gic_irq(d);
301 void (*rwp_wait)(void);
302 void __iomem *base;
303
304 /* Interrupt configuration for SGIs can't be changed */
305 if (irq < 16)
306 return -EINVAL;
307
Liviu Dudaufb7e7de2015-01-20 16:52:59 +0000308 /* SPIs have restrictions on the supported types */
309 if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
310 type != IRQ_TYPE_EDGE_RISING)
Marc Zyngier021f6532014-06-30 16:01:31 +0100311 return -EINVAL;
312
313 if (gic_irq_in_rdist(d)) {
314 base = gic_data_rdist_sgi_base();
315 rwp_wait = gic_redist_wait_for_rwp;
316 } else {
317 base = gic_data.dist_base;
318 rwp_wait = gic_dist_wait_for_rwp;
319 }
320
Liviu Dudaufb7e7de2015-01-20 16:52:59 +0000321 return gic_configure_irq(irq, type, base, rwp_wait);
Marc Zyngier021f6532014-06-30 16:01:31 +0100322}
323
Marc Zyngier530bf352015-08-26 17:00:43 +0100324static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
325{
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200326 if (vcpu)
327 irqd_set_forwarded_to_vcpu(d);
328 else
329 irqd_clr_forwarded_to_vcpu(d);
Marc Zyngier530bf352015-08-26 17:00:43 +0100330 return 0;
331}
332
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100333static u64 gic_mpidr_to_affinity(unsigned long mpidr)
Marc Zyngier021f6532014-06-30 16:01:31 +0100334{
335 u64 aff;
336
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100337 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
Marc Zyngier021f6532014-06-30 16:01:31 +0100338 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
339 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
340 MPIDR_AFFINITY_LEVEL(mpidr, 0));
341
342 return aff;
343}
344
345static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
346{
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100347 u32 irqnr;
Marc Zyngier021f6532014-06-30 16:01:31 +0100348
349 do {
350 irqnr = gic_read_iar();
351
Marc Zyngierda33f312014-11-24 14:35:18 +0000352 if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
Marc Zyngierebc6de02014-08-26 11:03:33 +0100353 int err;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100354
355 if (static_key_true(&supports_deactivate))
356 gic_write_eoir(irqnr);
357
Marc Zyngierebc6de02014-08-26 11:03:33 +0100358 err = handle_domain_irq(gic_data.domain, irqnr, regs);
359 if (err) {
Marc Zyngierda33f312014-11-24 14:35:18 +0000360 WARN_ONCE(true, "Unexpected interrupt received!\n");
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100361 if (static_key_true(&supports_deactivate)) {
362 if (irqnr < 8192)
363 gic_write_dir(irqnr);
364 } else {
365 gic_write_eoir(irqnr);
366 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100367 }
Marc Zyngierebc6de02014-08-26 11:03:33 +0100368 continue;
Marc Zyngier021f6532014-06-30 16:01:31 +0100369 }
370 if (irqnr < 16) {
371 gic_write_eoir(irqnr);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100372 if (static_key_true(&supports_deactivate))
373 gic_write_dir(irqnr);
Marc Zyngier021f6532014-06-30 16:01:31 +0100374#ifdef CONFIG_SMP
Will Deaconf86c4fb2016-04-26 12:00:00 +0100375 /*
376 * Unlike GICv2, we don't need an smp_rmb() here.
377 * The control dependency from gic_read_iar to
378 * the ISB in gic_write_eoir is enough to ensure
379 * that any shared data read by handle_IPI will
380 * be read after the ACK.
381 */
Marc Zyngier021f6532014-06-30 16:01:31 +0100382 handle_IPI(irqnr, regs);
383#else
384 WARN_ONCE(true, "Unexpected SGI received!\n");
385#endif
386 continue;
387 }
388 } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
389}
390
391static void __init gic_dist_init(void)
392{
393 unsigned int i;
394 u64 affinity;
395 void __iomem *base = gic_data.dist_base;
396
397 /* Disable the distributor */
398 writel_relaxed(0, base + GICD_CTLR);
399 gic_dist_wait_for_rwp();
400
Marc Zyngier7c9b9732016-05-06 19:41:56 +0100401 /*
402 * Configure SPIs as non-secure Group-1. This will only matter
403 * if the GIC only has a single security state. This will not
404 * do the right thing if the kernel is running in secure mode,
405 * but that's not the intended use case anyway.
406 */
407 for (i = 32; i < gic_data.irq_nr; i += 32)
408 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
409
Marc Zyngier021f6532014-06-30 16:01:31 +0100410 gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
411
412 /* Enable distributor with ARE, Group1 */
413 writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
414 base + GICD_CTLR);
415
416 /*
417 * Set all global interrupts to the boot CPU only. ARE must be
418 * enabled.
419 */
420 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
421 for (i = 32; i < gic_data.irq_nr; i++)
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +0100422 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
Marc Zyngier021f6532014-06-30 16:01:31 +0100423}
424
425static int gic_populate_rdist(void)
426{
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100427 unsigned long mpidr = cpu_logical_map(smp_processor_id());
Marc Zyngier021f6532014-06-30 16:01:31 +0100428 u64 typer;
429 u32 aff;
430 int i;
431
432 /*
433 * Convert affinity to a 32bit value that can be matched to
434 * GICR_TYPER bits [63:32].
435 */
436 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
437 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
438 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
439 MPIDR_AFFINITY_LEVEL(mpidr, 0));
440
Marc Zyngierf5c14342014-11-24 14:35:10 +0000441 for (i = 0; i < gic_data.nr_redist_regions; i++) {
442 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
Marc Zyngier021f6532014-06-30 16:01:31 +0100443 u32 reg;
444
445 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
446 if (reg != GIC_PIDR2_ARCH_GICv3 &&
447 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
448 pr_warn("No redistributor present @%p\n", ptr);
449 break;
450 }
451
452 do {
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +0100453 typer = gic_read_typer(ptr + GICR_TYPER);
Marc Zyngier021f6532014-06-30 16:01:31 +0100454 if ((typer >> 32) == aff) {
Marc Zyngierf5c14342014-11-24 14:35:10 +0000455 u64 offset = ptr - gic_data.redist_regions[i].redist_base;
Marc Zyngier021f6532014-06-30 16:01:31 +0100456 gic_data_rdist_rd_base() = ptr;
Marc Zyngierf5c14342014-11-24 14:35:10 +0000457 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100458 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
459 smp_processor_id(), mpidr, i,
460 &gic_data_rdist()->phys_base);
Marc Zyngier021f6532014-06-30 16:01:31 +0100461 return 0;
462 }
463
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +0100464 if (gic_data.redist_regions[i].single_redist)
465 break;
466
Marc Zyngier021f6532014-06-30 16:01:31 +0100467 if (gic_data.redist_stride) {
468 ptr += gic_data.redist_stride;
469 } else {
470 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
471 if (typer & GICR_TYPER_VLPIS)
472 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
473 }
474 } while (!(typer & GICR_TYPER_LAST));
475 }
476
477 /* We couldn't even deal with ourselves... */
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100478 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
479 smp_processor_id(), mpidr);
Marc Zyngier021f6532014-06-30 16:01:31 +0100480 return -ENODEV;
481}
482
Sudeep Holla3708d522014-08-26 16:03:35 +0100483static void gic_cpu_sys_reg_init(void)
Marc Zyngier021f6532014-06-30 16:01:31 +0100484{
Marc Zyngier7cabd002015-09-30 11:48:01 +0100485 /*
486 * Need to check that the SRE bit has actually been set. If
487 * not, it means that SRE is disabled at EL2. We're going to
488 * die painfully, and there is nothing we can do about it.
489 *
490 * Kindly inform the luser.
491 */
492 if (!gic_enable_sre())
493 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
Marc Zyngier021f6532014-06-30 16:01:31 +0100494
495 /* Set priority mask register */
496 gic_write_pmr(DEFAULT_PMR_VALUE);
497
Daniel Thompson91ef8442016-08-19 17:13:09 +0100498 /*
499 * Some firmwares hand over to the kernel with the BPR changed from
500 * its reset value (and with a value large enough to prevent
501 * any pre-emptive interrupts from working at all). Writing a zero
502 * to BPR restores is reset value.
503 */
504 gic_write_bpr1(0);
505
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100506 if (static_key_true(&supports_deactivate)) {
507 /* EOI drops priority only (mode 1) */
508 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
509 } else {
510 /* EOI deactivates interrupt too (mode 0) */
511 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
512 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100513
514 /* ... and let's hit the road... */
515 gic_write_grpen1(1);
516}
517
Marc Zyngierda33f312014-11-24 14:35:18 +0000518static int gic_dist_supports_lpis(void)
519{
520 return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
521}
522
Marc Zyngier021f6532014-06-30 16:01:31 +0100523static void gic_cpu_init(void)
524{
525 void __iomem *rbase;
526
527 /* Register ourselves with the rest of the world */
528 if (gic_populate_rdist())
529 return;
530
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100531 gic_enable_redist(true);
Marc Zyngier021f6532014-06-30 16:01:31 +0100532
533 rbase = gic_data_rdist_sgi_base();
534
Marc Zyngier7c9b9732016-05-06 19:41:56 +0100535 /* Configure SGIs/PPIs as non-secure Group-1 */
536 writel_relaxed(~0, rbase + GICR_IGROUPR0);
537
Marc Zyngier021f6532014-06-30 16:01:31 +0100538 gic_cpu_config(rbase, gic_redist_wait_for_rwp);
539
Marc Zyngierda33f312014-11-24 14:35:18 +0000540 /* Give LPIs a spin */
541 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
542 its_cpu_init();
543
Sudeep Holla3708d522014-08-26 16:03:35 +0100544 /* initialise system registers */
545 gic_cpu_sys_reg_init();
Marc Zyngier021f6532014-06-30 16:01:31 +0100546}
547
548#ifdef CONFIG_SMP
Marc Zyngier021f6532014-06-30 16:01:31 +0100549
Richard Cochran6670a6d2016-07-13 17:16:05 +0000550static int gic_starting_cpu(unsigned int cpu)
551{
552 gic_cpu_init();
553 return 0;
554}
Marc Zyngier021f6532014-06-30 16:01:31 +0100555
556static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100557 unsigned long cluster_id)
Marc Zyngier021f6532014-06-30 16:01:31 +0100558{
James Morse727653d2016-09-19 18:29:15 +0100559 int next_cpu, cpu = *base_cpu;
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100560 unsigned long mpidr = cpu_logical_map(cpu);
Marc Zyngier021f6532014-06-30 16:01:31 +0100561 u16 tlist = 0;
562
563 while (cpu < nr_cpu_ids) {
564 /*
565 * If we ever get a cluster of more than 16 CPUs, just
566 * scream and skip that CPU.
567 */
568 if (WARN_ON((mpidr & 0xff) >= 16))
569 goto out;
570
571 tlist |= 1 << (mpidr & 0xf);
572
James Morse727653d2016-09-19 18:29:15 +0100573 next_cpu = cpumask_next(cpu, mask);
574 if (next_cpu >= nr_cpu_ids)
Marc Zyngier021f6532014-06-30 16:01:31 +0100575 goto out;
James Morse727653d2016-09-19 18:29:15 +0100576 cpu = next_cpu;
Marc Zyngier021f6532014-06-30 16:01:31 +0100577
578 mpidr = cpu_logical_map(cpu);
579
580 if (cluster_id != (mpidr & ~0xffUL)) {
581 cpu--;
582 goto out;
583 }
584 }
585out:
586 *base_cpu = cpu;
587 return tlist;
588}
589
Andre Przywara7e580272014-11-12 13:46:06 +0000590#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
591 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
592 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
593
Marc Zyngier021f6532014-06-30 16:01:31 +0100594static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
595{
596 u64 val;
597
Andre Przywara7e580272014-11-12 13:46:06 +0000598 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
599 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
600 irq << ICC_SGI1R_SGI_ID_SHIFT |
601 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
602 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
Marc Zyngier021f6532014-06-30 16:01:31 +0100603
Mark Salter5fa8ed82018-02-02 09:20:29 -0500604 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
Marc Zyngier021f6532014-06-30 16:01:31 +0100605 gic_write_sgi1r(val);
606}
607
608static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
609{
610 int cpu;
611
612 if (WARN_ON(irq >= 16))
613 return;
614
615 /*
616 * Ensure that stores to Normal memory are visible to the
617 * other CPUs before issuing the IPI.
618 */
Shanker Donthineni2146b6e2018-01-31 18:03:42 -0600619 wmb();
Marc Zyngier021f6532014-06-30 16:01:31 +0100620
Rusty Russellf9b531f2015-03-05 10:49:16 +1030621 for_each_cpu(cpu, mask) {
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100622 unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
Marc Zyngier021f6532014-06-30 16:01:31 +0100623 u16 tlist;
624
625 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
626 gic_send_sgi(cluster_id, tlist, irq);
627 }
628
629 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
630 isb();
631}
632
633static void gic_smp_init(void)
634{
635 set_smp_cross_call(gic_raise_softirq);
Richard Cochran6670a6d2016-07-13 17:16:05 +0000636 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
637 "AP_IRQ_GICV3_STARTING", gic_starting_cpu,
638 NULL);
Marc Zyngier021f6532014-06-30 16:01:31 +0100639}
640
641static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
642 bool force)
643{
644 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
645 void __iomem *reg;
646 int enabled;
647 u64 val;
648
Suzuki K Poulose59613f82017-06-30 10:58:28 +0100649 if (cpu >= nr_cpu_ids)
650 return -EINVAL;
651
Marc Zyngier021f6532014-06-30 16:01:31 +0100652 if (gic_irq_in_rdist(d))
653 return -EINVAL;
654
655 /* If interrupt was enabled, disable it first */
656 enabled = gic_peek_irq(d, GICD_ISENABLER);
657 if (enabled)
658 gic_mask_irq(d);
659
660 reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
661 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
662
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +0100663 gic_write_irouter(val, reg);
Marc Zyngier021f6532014-06-30 16:01:31 +0100664
665 /*
666 * If the interrupt was enabled, enabled it again. Otherwise,
667 * just wait for the distributor to have digested our changes.
668 */
669 if (enabled)
670 gic_unmask_irq(d);
671 else
672 gic_dist_wait_for_rwp();
673
Antoine Tenart0fc6fa22016-02-19 16:22:43 +0100674 return IRQ_SET_MASK_OK_DONE;
Marc Zyngier021f6532014-06-30 16:01:31 +0100675}
676#else
677#define gic_set_affinity NULL
678#define gic_smp_init() do { } while(0)
679#endif
680
Sudeep Holla3708d522014-08-26 16:03:35 +0100681#ifdef CONFIG_CPU_PM
Sudeep Hollaccd94322016-08-17 13:49:19 +0100682/* Check whether it's single security state view */
683static bool gic_dist_security_disabled(void)
684{
685 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
686}
687
Sudeep Holla3708d522014-08-26 16:03:35 +0100688static int gic_cpu_pm_notifier(struct notifier_block *self,
689 unsigned long cmd, void *v)
690{
691 if (cmd == CPU_PM_EXIT) {
Sudeep Hollaccd94322016-08-17 13:49:19 +0100692 if (gic_dist_security_disabled())
693 gic_enable_redist(true);
Sudeep Holla3708d522014-08-26 16:03:35 +0100694 gic_cpu_sys_reg_init();
Sudeep Hollaccd94322016-08-17 13:49:19 +0100695 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
Sudeep Holla3708d522014-08-26 16:03:35 +0100696 gic_write_grpen1(0);
697 gic_enable_redist(false);
698 }
699 return NOTIFY_OK;
700}
701
702static struct notifier_block gic_cpu_pm_notifier_block = {
703 .notifier_call = gic_cpu_pm_notifier,
704};
705
706static void gic_cpu_pm_init(void)
707{
708 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
709}
710
711#else
712static inline void gic_cpu_pm_init(void) { }
713#endif /* CONFIG_CPU_PM */
714
Marc Zyngier021f6532014-06-30 16:01:31 +0100715static struct irq_chip gic_chip = {
716 .name = "GICv3",
717 .irq_mask = gic_mask_irq,
718 .irq_unmask = gic_unmask_irq,
719 .irq_eoi = gic_eoi_irq,
720 .irq_set_type = gic_set_type,
721 .irq_set_affinity = gic_set_affinity,
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000722 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
723 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
Sudeep Holla55963c92015-06-05 11:59:57 +0100724 .flags = IRQCHIP_SET_TYPE_MASKED,
Marc Zyngier021f6532014-06-30 16:01:31 +0100725};
726
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100727static struct irq_chip gic_eoimode1_chip = {
728 .name = "GICv3",
729 .irq_mask = gic_eoimode1_mask_irq,
730 .irq_unmask = gic_unmask_irq,
731 .irq_eoi = gic_eoimode1_eoi_irq,
732 .irq_set_type = gic_set_type,
733 .irq_set_affinity = gic_set_affinity,
734 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
735 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
Marc Zyngier530bf352015-08-26 17:00:43 +0100736 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100737 .flags = IRQCHIP_SET_TYPE_MASKED,
738};
739
Marc Zyngierda33f312014-11-24 14:35:18 +0000740#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
741
Marc Zyngier021f6532014-06-30 16:01:31 +0100742static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
743 irq_hw_number_t hw)
744{
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100745 struct irq_chip *chip = &gic_chip;
746
747 if (static_key_true(&supports_deactivate))
748 chip = &gic_eoimode1_chip;
749
Marc Zyngier021f6532014-06-30 16:01:31 +0100750 /* SGIs are private to the core kernel */
751 if (hw < 16)
752 return -EPERM;
Marc Zyngierda33f312014-11-24 14:35:18 +0000753 /* Nothing here */
754 if (hw >= gic_data.irq_nr && hw < 8192)
755 return -EPERM;
756 /* Off limits */
757 if (hw >= GIC_ID_NR)
758 return -EPERM;
759
Marc Zyngier021f6532014-06-30 16:01:31 +0100760 /* PPIs */
761 if (hw < 32) {
762 irq_set_percpu_devid(irq);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100763 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngier443acc42014-11-24 14:35:09 +0000764 handle_percpu_devid_irq, NULL, NULL);
Rob Herringd17cab42015-08-29 18:01:22 -0500765 irq_set_status_flags(irq, IRQ_NOAUTOEN);
Marc Zyngier021f6532014-06-30 16:01:31 +0100766 }
767 /* SPIs */
768 if (hw >= 32 && hw < gic_data.irq_nr) {
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100769 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngier443acc42014-11-24 14:35:09 +0000770 handle_fasteoi_irq, NULL, NULL);
Rob Herringd17cab42015-08-29 18:01:22 -0500771 irq_set_probe(irq);
Marc Zyngier021f6532014-06-30 16:01:31 +0100772 }
Marc Zyngierda33f312014-11-24 14:35:18 +0000773 /* LPIs */
774 if (hw >= 8192 && hw < GIC_ID_NR) {
775 if (!gic_dist_supports_lpis())
776 return -EPERM;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100777 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngierda33f312014-11-24 14:35:18 +0000778 handle_fasteoi_irq, NULL, NULL);
Marc Zyngierda33f312014-11-24 14:35:18 +0000779 }
780
Marc Zyngier021f6532014-06-30 16:01:31 +0100781 return 0;
782}
783
Marc Zyngierf833f572015-10-13 12:51:33 +0100784static int gic_irq_domain_translate(struct irq_domain *d,
785 struct irq_fwspec *fwspec,
786 unsigned long *hwirq,
787 unsigned int *type)
Marc Zyngier021f6532014-06-30 16:01:31 +0100788{
Marc Zyngierf833f572015-10-13 12:51:33 +0100789 if (is_of_node(fwspec->fwnode)) {
790 if (fwspec->param_count < 3)
791 return -EINVAL;
Marc Zyngier021f6532014-06-30 16:01:31 +0100792
Marc Zyngierdb8c70e2015-10-14 12:27:16 +0100793 switch (fwspec->param[0]) {
794 case 0: /* SPI */
795 *hwirq = fwspec->param[1] + 32;
796 break;
797 case 1: /* PPI */
798 *hwirq = fwspec->param[1] + 16;
799 break;
800 case GIC_IRQ_TYPE_LPI: /* LPI */
801 *hwirq = fwspec->param[1];
802 break;
803 default:
804 return -EINVAL;
805 }
Marc Zyngierf833f572015-10-13 12:51:33 +0100806
807 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
808 return 0;
Marc Zyngier021f6532014-06-30 16:01:31 +0100809 }
810
Tomasz Nowickiffa7d612016-01-19 14:11:15 +0100811 if (is_fwnode_irqchip(fwspec->fwnode)) {
812 if(fwspec->param_count != 2)
813 return -EINVAL;
814
815 *hwirq = fwspec->param[0];
816 *type = fwspec->param[1];
817 return 0;
818 }
819
Marc Zyngierf833f572015-10-13 12:51:33 +0100820 return -EINVAL;
Marc Zyngier021f6532014-06-30 16:01:31 +0100821}
822
Marc Zyngier443acc42014-11-24 14:35:09 +0000823static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
824 unsigned int nr_irqs, void *arg)
825{
826 int i, ret;
827 irq_hw_number_t hwirq;
828 unsigned int type = IRQ_TYPE_NONE;
Marc Zyngierf833f572015-10-13 12:51:33 +0100829 struct irq_fwspec *fwspec = arg;
Marc Zyngier443acc42014-11-24 14:35:09 +0000830
Marc Zyngierf833f572015-10-13 12:51:33 +0100831 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
Marc Zyngier443acc42014-11-24 14:35:09 +0000832 if (ret)
833 return ret;
834
835 for (i = 0; i < nr_irqs; i++)
836 gic_irq_domain_map(domain, virq + i, hwirq + i);
837
838 return 0;
839}
840
841static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
842 unsigned int nr_irqs)
843{
844 int i;
845
846 for (i = 0; i < nr_irqs; i++) {
847 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
848 irq_set_handler(virq + i, NULL);
849 irq_domain_reset_irq_data(d);
850 }
851}
852
Marc Zyngiere3825ba2016-04-11 09:57:54 +0100853static int gic_irq_domain_select(struct irq_domain *d,
854 struct irq_fwspec *fwspec,
855 enum irq_domain_bus_token bus_token)
856{
857 /* Not for us */
858 if (fwspec->fwnode != d->fwnode)
859 return 0;
860
861 /* If this is not DT, then we have a single domain */
862 if (!is_of_node(fwspec->fwnode))
863 return 1;
864
865 /*
866 * If this is a PPI and we have a 4th (non-null) parameter,
867 * then we need to match the partition domain.
868 */
869 if (fwspec->param_count >= 4 &&
870 fwspec->param[0] == 1 && fwspec->param[3] != 0)
871 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
872
873 return d == gic_data.domain;
874}
875
Marc Zyngier021f6532014-06-30 16:01:31 +0100876static const struct irq_domain_ops gic_irq_domain_ops = {
Marc Zyngierf833f572015-10-13 12:51:33 +0100877 .translate = gic_irq_domain_translate,
Marc Zyngier443acc42014-11-24 14:35:09 +0000878 .alloc = gic_irq_domain_alloc,
879 .free = gic_irq_domain_free,
Marc Zyngiere3825ba2016-04-11 09:57:54 +0100880 .select = gic_irq_domain_select,
881};
882
883static int partition_domain_translate(struct irq_domain *d,
884 struct irq_fwspec *fwspec,
885 unsigned long *hwirq,
886 unsigned int *type)
887{
888 struct device_node *np;
889 int ret;
890
891 np = of_find_node_by_phandle(fwspec->param[3]);
892 if (WARN_ON(!np))
893 return -EINVAL;
894
895 ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
896 of_node_to_fwnode(np));
897 if (ret < 0)
898 return ret;
899
900 *hwirq = ret;
901 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
902
903 return 0;
904}
905
906static const struct irq_domain_ops partition_domain_ops = {
907 .translate = partition_domain_translate,
908 .select = gic_irq_domain_select,
Marc Zyngier021f6532014-06-30 16:01:31 +0100909};
910
Robert Richter6d4e11c2015-09-21 22:58:35 +0200911static void gicv3_enable_quirks(void)
912{
Jean-Philippe Brucker7936e912015-10-01 13:47:14 +0100913#ifdef CONFIG_ARM64
Robert Richter6d4e11c2015-09-21 22:58:35 +0200914 if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
Robert Richter8ac2a172015-09-21 22:58:39 +0200915 static_branch_enable(&is_cavium_thunderx);
Jean-Philippe Brucker7936e912015-10-01 13:47:14 +0100916#endif
Robert Richter6d4e11c2015-09-21 22:58:35 +0200917}
918
Tomasz Nowickidb57d742016-01-19 14:11:14 +0100919static int __init gic_init_bases(void __iomem *dist_base,
920 struct redist_region *rdist_regs,
921 u32 nr_redist_regions,
922 u64 redist_stride,
923 struct fwnode_handle *handle)
924{
Tomasz Nowickidb57d742016-01-19 14:11:14 +0100925 u32 typer;
926 int gic_irqs;
927 int err;
928
929 if (!is_hyp_mode_available())
930 static_key_slow_dec(&supports_deactivate);
931
932 if (static_key_true(&supports_deactivate))
933 pr_info("GIC: Using split EOI/Deactivate mode\n");
934
Marc Zyngiere3825ba2016-04-11 09:57:54 +0100935 gic_data.fwnode = handle;
Tomasz Nowickidb57d742016-01-19 14:11:14 +0100936 gic_data.dist_base = dist_base;
937 gic_data.redist_regions = rdist_regs;
938 gic_data.nr_redist_regions = nr_redist_regions;
939 gic_data.redist_stride = redist_stride;
940
941 gicv3_enable_quirks();
942
943 /*
944 * Find out how many interrupts are supported.
945 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
946 */
947 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
948 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
949 gic_irqs = GICD_TYPER_IRQS(typer);
950 if (gic_irqs > 1020)
951 gic_irqs = 1020;
952 gic_data.irq_nr = gic_irqs;
953
954 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
955 &gic_data);
956 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
957
958 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
959 err = -ENOMEM;
960 goto out_free;
961 }
962
963 set_handle_irq(gic_handle_irq);
964
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200965 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
966 its_init(handle, &gic_data.rdists, gic_data.domain);
Tomasz Nowickidb57d742016-01-19 14:11:14 +0100967
968 gic_smp_init();
969 gic_dist_init();
970 gic_cpu_init();
971 gic_cpu_pm_init();
972
973 return 0;
974
975out_free:
976 if (gic_data.domain)
977 irq_domain_remove(gic_data.domain);
978 free_percpu(gic_data.rdists.rdist);
979 return err;
980}
981
982static int __init gic_validate_dist_version(void __iomem *dist_base)
983{
984 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
985
986 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
987 return -ENODEV;
988
989 return 0;
990}
991
Marc Zyngiere3825ba2016-04-11 09:57:54 +0100992static int get_cpu_number(struct device_node *dn)
993{
994 const __be32 *cell;
995 u64 hwid;
996 int i;
997
998 cell = of_get_property(dn, "reg", NULL);
999 if (!cell)
1000 return -1;
1001
1002 hwid = of_read_number(cell, of_n_addr_cells(dn));
1003
1004 /*
1005 * Non affinity bits must be set to 0 in the DT
1006 */
1007 if (hwid & ~MPIDR_HWID_BITMASK)
1008 return -1;
1009
1010 for (i = 0; i < num_possible_cpus(); i++)
1011 if (cpu_logical_map(i) == hwid)
1012 return i;
1013
1014 return -1;
1015}
1016
1017/* Create all possible partitions at boot time */
Linus Torvalds7beaa242016-05-19 11:27:09 -07001018static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001019{
1020 struct device_node *parts_node, *child_part;
1021 int part_idx = 0, i;
1022 int nr_parts;
1023 struct partition_affinity *parts;
1024
Johan Hovold828064b2017-11-11 17:51:25 +01001025 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001026 if (!parts_node)
1027 return;
1028
1029 nr_parts = of_get_child_count(parts_node);
1030
1031 if (!nr_parts)
Johan Hovold828064b2017-11-11 17:51:25 +01001032 goto out_put_node;
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001033
1034 parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
1035 if (WARN_ON(!parts))
Johan Hovold828064b2017-11-11 17:51:25 +01001036 goto out_put_node;
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001037
1038 for_each_child_of_node(parts_node, child_part) {
1039 struct partition_affinity *part;
1040 int n;
1041
1042 part = &parts[part_idx];
1043
1044 part->partition_id = of_node_to_fwnode(child_part);
1045
1046 pr_info("GIC: PPI partition %s[%d] { ",
1047 child_part->name, part_idx);
1048
1049 n = of_property_count_elems_of_size(child_part, "affinity",
1050 sizeof(u32));
1051 WARN_ON(n <= 0);
1052
1053 for (i = 0; i < n; i++) {
1054 int err, cpu;
1055 u32 cpu_phandle;
1056 struct device_node *cpu_node;
1057
1058 err = of_property_read_u32_index(child_part, "affinity",
1059 i, &cpu_phandle);
1060 if (WARN_ON(err))
1061 continue;
1062
1063 cpu_node = of_find_node_by_phandle(cpu_phandle);
1064 if (WARN_ON(!cpu_node))
1065 continue;
1066
1067 cpu = get_cpu_number(cpu_node);
1068 if (WARN_ON(cpu == -1))
1069 continue;
1070
1071 pr_cont("%s[%d] ", cpu_node->full_name, cpu);
1072
1073 cpumask_set_cpu(cpu, &part->mask);
1074 }
1075
1076 pr_cont("}\n");
1077 part_idx++;
1078 }
1079
1080 for (i = 0; i < 16; i++) {
1081 unsigned int irq;
1082 struct partition_desc *desc;
1083 struct irq_fwspec ppi_fwspec = {
1084 .fwnode = gic_data.fwnode,
1085 .param_count = 3,
1086 .param = {
1087 [0] = 1,
1088 [1] = i,
1089 [2] = IRQ_TYPE_NONE,
1090 },
1091 };
1092
1093 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1094 if (WARN_ON(!irq))
1095 continue;
1096 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1097 irq, &partition_domain_ops);
1098 if (WARN_ON(!desc))
1099 continue;
1100
1101 gic_data.ppi_descs[i] = desc;
1102 }
Johan Hovold828064b2017-11-11 17:51:25 +01001103
1104out_put_node:
1105 of_node_put(parts_node);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001106}
1107
Julien Grall1839e572016-04-11 16:32:57 +01001108static void __init gic_of_setup_kvm_info(struct device_node *node)
1109{
1110 int ret;
1111 struct resource r;
1112 u32 gicv_idx;
1113
1114 gic_v3_kvm_info.type = GIC_V3;
1115
1116 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1117 if (!gic_v3_kvm_info.maint_irq)
1118 return;
1119
1120 if (of_property_read_u32(node, "#redistributor-regions",
1121 &gicv_idx))
1122 gicv_idx = 1;
1123
1124 gicv_idx += 3; /* Also skip GICD, GICC, GICH */
1125 ret = of_address_to_resource(node, gicv_idx, &r);
1126 if (!ret)
1127 gic_v3_kvm_info.vcpu = r;
1128
1129 gic_set_kvm_info(&gic_v3_kvm_info);
1130}
1131
Marc Zyngier021f6532014-06-30 16:01:31 +01001132static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1133{
1134 void __iomem *dist_base;
Marc Zyngierf5c14342014-11-24 14:35:10 +00001135 struct redist_region *rdist_regs;
Marc Zyngier021f6532014-06-30 16:01:31 +01001136 u64 redist_stride;
Marc Zyngierf5c14342014-11-24 14:35:10 +00001137 u32 nr_redist_regions;
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001138 int err, i;
Marc Zyngier021f6532014-06-30 16:01:31 +01001139
1140 dist_base = of_iomap(node, 0);
1141 if (!dist_base) {
1142 pr_err("%s: unable to map gic dist registers\n",
1143 node->full_name);
1144 return -ENXIO;
1145 }
1146
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001147 err = gic_validate_dist_version(dist_base);
1148 if (err) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001149 pr_err("%s: no distributor detected, giving up\n",
1150 node->full_name);
Marc Zyngier021f6532014-06-30 16:01:31 +01001151 goto out_unmap_dist;
1152 }
1153
Marc Zyngierf5c14342014-11-24 14:35:10 +00001154 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1155 nr_redist_regions = 1;
Marc Zyngier021f6532014-06-30 16:01:31 +01001156
Marc Zyngierf5c14342014-11-24 14:35:10 +00001157 rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
1158 if (!rdist_regs) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001159 err = -ENOMEM;
1160 goto out_unmap_dist;
1161 }
1162
Marc Zyngierf5c14342014-11-24 14:35:10 +00001163 for (i = 0; i < nr_redist_regions; i++) {
1164 struct resource res;
1165 int ret;
1166
1167 ret = of_address_to_resource(node, 1 + i, &res);
1168 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1169 if (ret || !rdist_regs[i].redist_base) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001170 pr_err("%s: couldn't map region %d\n",
1171 node->full_name, i);
1172 err = -ENODEV;
1173 goto out_unmap_rdist;
1174 }
Marc Zyngierf5c14342014-11-24 14:35:10 +00001175 rdist_regs[i].phys_base = res.start;
Marc Zyngier021f6532014-06-30 16:01:31 +01001176 }
1177
1178 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1179 redist_stride = 0;
1180
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001181 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1182 redist_stride, &node->fwnode);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001183 if (err)
1184 goto out_unmap_rdist;
1185
1186 gic_populate_ppi_partitions(node);
Linus Torvalds7beaa242016-05-19 11:27:09 -07001187 gic_of_setup_kvm_info(node);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001188 return 0;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001189
Marc Zyngier021f6532014-06-30 16:01:31 +01001190out_unmap_rdist:
Marc Zyngierf5c14342014-11-24 14:35:10 +00001191 for (i = 0; i < nr_redist_regions; i++)
1192 if (rdist_regs[i].redist_base)
1193 iounmap(rdist_regs[i].redist_base);
1194 kfree(rdist_regs);
Marc Zyngier021f6532014-06-30 16:01:31 +01001195out_unmap_dist:
1196 iounmap(dist_base);
1197 return err;
1198}
1199
1200IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001201
1202#ifdef CONFIG_ACPI
Julien Grall611f0392016-04-11 16:32:56 +01001203static struct
1204{
1205 void __iomem *dist_base;
1206 struct redist_region *redist_regs;
1207 u32 nr_redist_regions;
1208 bool single_redist;
Julien Grall1839e572016-04-11 16:32:57 +01001209 u32 maint_irq;
1210 int maint_irq_mode;
1211 phys_addr_t vcpu_base;
Julien Grall611f0392016-04-11 16:32:56 +01001212} acpi_data __initdata;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001213
1214static void __init
1215gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1216{
1217 static int count = 0;
1218
Julien Grall611f0392016-04-11 16:32:56 +01001219 acpi_data.redist_regs[count].phys_base = phys_base;
1220 acpi_data.redist_regs[count].redist_base = redist_base;
1221 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001222 count++;
1223}
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001224
1225static int __init
1226gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
1227 const unsigned long end)
1228{
1229 struct acpi_madt_generic_redistributor *redist =
1230 (struct acpi_madt_generic_redistributor *)header;
1231 void __iomem *redist_base;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001232
1233 redist_base = ioremap(redist->base_address, redist->length);
1234 if (!redist_base) {
1235 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
1236 return -ENOMEM;
1237 }
1238
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001239 gic_acpi_register_redist(redist->base_address, redist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001240 return 0;
1241}
1242
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001243static int __init
1244gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
1245 const unsigned long end)
1246{
1247 struct acpi_madt_generic_interrupt *gicc =
1248 (struct acpi_madt_generic_interrupt *)header;
Julien Grall611f0392016-04-11 16:32:56 +01001249 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001250 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
1251 void __iomem *redist_base;
1252
Shanker Donthinenic9790242017-12-05 13:16:21 -06001253 /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
1254 if (!(gicc->flags & ACPI_MADT_ENABLED))
1255 return 0;
1256
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001257 redist_base = ioremap(gicc->gicr_base_address, size);
1258 if (!redist_base)
1259 return -ENOMEM;
1260
1261 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
1262 return 0;
1263}
1264
1265static int __init gic_acpi_collect_gicr_base(void)
1266{
1267 acpi_tbl_entry_handler redist_parser;
1268 enum acpi_madt_type type;
1269
Julien Grall611f0392016-04-11 16:32:56 +01001270 if (acpi_data.single_redist) {
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001271 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
1272 redist_parser = gic_acpi_parse_madt_gicc;
1273 } else {
1274 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
1275 redist_parser = gic_acpi_parse_madt_redist;
1276 }
1277
1278 /* Collect redistributor base addresses in GICR entries */
1279 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
1280 return 0;
1281
1282 pr_info("No valid GICR entries exist\n");
1283 return -ENODEV;
1284}
1285
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001286static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
1287 const unsigned long end)
1288{
1289 /* Subtable presence means that redist exists, that's it */
1290 return 0;
1291}
1292
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001293static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
1294 const unsigned long end)
1295{
1296 struct acpi_madt_generic_interrupt *gicc =
1297 (struct acpi_madt_generic_interrupt *)header;
1298
1299 /*
1300 * If GICC is enabled and has valid gicr base address, then it means
1301 * GICR base is presented via GICC
1302 */
1303 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
1304 return 0;
1305
Shanker Donthinenic9790242017-12-05 13:16:21 -06001306 /*
1307 * It's perfectly valid firmware can pass disabled GICC entry, driver
1308 * should not treat as errors, skip the entry instead of probe fail.
1309 */
1310 if (!(gicc->flags & ACPI_MADT_ENABLED))
1311 return 0;
1312
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001313 return -ENODEV;
1314}
1315
1316static int __init gic_acpi_count_gicr_regions(void)
1317{
1318 int count;
1319
1320 /*
1321 * Count how many redistributor regions we have. It is not allowed
1322 * to mix redistributor description, GICR and GICC subtables have to be
1323 * mutually exclusive.
1324 */
1325 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1326 gic_acpi_match_gicr, 0);
1327 if (count > 0) {
Julien Grall611f0392016-04-11 16:32:56 +01001328 acpi_data.single_redist = false;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001329 return count;
1330 }
1331
1332 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1333 gic_acpi_match_gicc, 0);
1334 if (count > 0)
Julien Grall611f0392016-04-11 16:32:56 +01001335 acpi_data.single_redist = true;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001336
1337 return count;
1338}
1339
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001340static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
1341 struct acpi_probe_entry *ape)
1342{
1343 struct acpi_madt_generic_distributor *dist;
1344 int count;
1345
1346 dist = (struct acpi_madt_generic_distributor *)header;
1347 if (dist->version != ape->driver_data)
1348 return false;
1349
1350 /* We need to do that exercise anyway, the sooner the better */
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001351 count = gic_acpi_count_gicr_regions();
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001352 if (count <= 0)
1353 return false;
1354
Julien Grall611f0392016-04-11 16:32:56 +01001355 acpi_data.nr_redist_regions = count;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001356 return true;
1357}
1358
Julien Grall1839e572016-04-11 16:32:57 +01001359static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
1360 const unsigned long end)
1361{
1362 struct acpi_madt_generic_interrupt *gicc =
1363 (struct acpi_madt_generic_interrupt *)header;
1364 int maint_irq_mode;
1365 static int first_madt = true;
1366
1367 /* Skip unusable CPUs */
1368 if (!(gicc->flags & ACPI_MADT_ENABLED))
1369 return 0;
1370
1371 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
1372 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
1373
1374 if (first_madt) {
1375 first_madt = false;
1376
1377 acpi_data.maint_irq = gicc->vgic_interrupt;
1378 acpi_data.maint_irq_mode = maint_irq_mode;
1379 acpi_data.vcpu_base = gicc->gicv_base_address;
1380
1381 return 0;
1382 }
1383
1384 /*
1385 * The maintenance interrupt and GICV should be the same for every CPU
1386 */
1387 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
1388 (acpi_data.maint_irq_mode != maint_irq_mode) ||
1389 (acpi_data.vcpu_base != gicc->gicv_base_address))
1390 return -EINVAL;
1391
1392 return 0;
1393}
1394
1395static bool __init gic_acpi_collect_virt_info(void)
1396{
1397 int count;
1398
1399 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1400 gic_acpi_parse_virt_madt_gicc, 0);
1401
1402 return (count > 0);
1403}
1404
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001405#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
Julien Grall1839e572016-04-11 16:32:57 +01001406#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
1407#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
1408
1409static void __init gic_acpi_setup_kvm_info(void)
1410{
1411 int irq;
1412
1413 if (!gic_acpi_collect_virt_info()) {
1414 pr_warn("Unable to get hardware information used for virtualization\n");
1415 return;
1416 }
1417
1418 gic_v3_kvm_info.type = GIC_V3;
1419
1420 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
1421 acpi_data.maint_irq_mode,
1422 ACPI_ACTIVE_HIGH);
1423 if (irq <= 0)
1424 return;
1425
1426 gic_v3_kvm_info.maint_irq = irq;
1427
1428 if (acpi_data.vcpu_base) {
1429 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
1430
1431 vcpu->flags = IORESOURCE_MEM;
1432 vcpu->start = acpi_data.vcpu_base;
1433 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1434 }
1435
1436 gic_set_kvm_info(&gic_v3_kvm_info);
1437}
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001438
1439static int __init
1440gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
1441{
1442 struct acpi_madt_generic_distributor *dist;
1443 struct fwnode_handle *domain_handle;
Julien Grall611f0392016-04-11 16:32:56 +01001444 size_t size;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001445 int i, err;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001446
1447 /* Get distributor base address */
1448 dist = (struct acpi_madt_generic_distributor *)header;
Julien Grall611f0392016-04-11 16:32:56 +01001449 acpi_data.dist_base = ioremap(dist->base_address,
1450 ACPI_GICV3_DIST_MEM_SIZE);
1451 if (!acpi_data.dist_base) {
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001452 pr_err("Unable to map GICD registers\n");
1453 return -ENOMEM;
1454 }
1455
Julien Grall611f0392016-04-11 16:32:56 +01001456 err = gic_validate_dist_version(acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001457 if (err) {
Julien Grall611f0392016-04-11 16:32:56 +01001458 pr_err("No distributor detected at @%p, giving up",
1459 acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001460 goto out_dist_unmap;
1461 }
1462
Julien Grall611f0392016-04-11 16:32:56 +01001463 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
1464 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
1465 if (!acpi_data.redist_regs) {
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001466 err = -ENOMEM;
1467 goto out_dist_unmap;
1468 }
1469
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001470 err = gic_acpi_collect_gicr_base();
1471 if (err)
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001472 goto out_redist_unmap;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001473
Julien Grall611f0392016-04-11 16:32:56 +01001474 domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001475 if (!domain_handle) {
1476 err = -ENOMEM;
1477 goto out_redist_unmap;
1478 }
1479
Julien Grall611f0392016-04-11 16:32:56 +01001480 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
1481 acpi_data.nr_redist_regions, 0, domain_handle);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001482 if (err)
1483 goto out_fwhandle_free;
1484
1485 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
Julien Grall1839e572016-04-11 16:32:57 +01001486 gic_acpi_setup_kvm_info();
1487
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001488 return 0;
1489
1490out_fwhandle_free:
1491 irq_domain_free_fwnode(domain_handle);
1492out_redist_unmap:
Julien Grall611f0392016-04-11 16:32:56 +01001493 for (i = 0; i < acpi_data.nr_redist_regions; i++)
1494 if (acpi_data.redist_regs[i].redist_base)
1495 iounmap(acpi_data.redist_regs[i].redist_base);
1496 kfree(acpi_data.redist_regs);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001497out_dist_unmap:
Julien Grall611f0392016-04-11 16:32:56 +01001498 iounmap(acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001499 return err;
1500}
1501IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1502 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
1503 gic_acpi_init);
1504IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1505 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
1506 gic_acpi_init);
1507IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1508 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
1509 gic_acpi_init);
1510#endif