blob: 19215cef0df256843558a905f7c78c22c49d5f4a [file] [log] [blame]
Marc Zyngier021f6532014-06-30 16:01:31 +01001/*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Julien Grall68628bb2016-04-11 16:32:55 +010018#define pr_fmt(fmt) "GICv3: " fmt
19
Tomasz Nowickiffa7d612016-01-19 14:11:15 +010020#include <linux/acpi.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010021#include <linux/cpu.h>
Sudeep Holla3708d522014-08-26 16:03:35 +010022#include <linux/cpu_pm.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010023#include <linux/delay.h>
24#include <linux/interrupt.h>
Tomasz Nowickiffa7d612016-01-19 14:11:15 +010025#include <linux/irqdomain.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010026#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/of_irq.h>
29#include <linux/percpu.h>
30#include <linux/slab.h>
Channagoud Kadabidf164542016-09-19 20:24:21 -070031#include <linux/msm_rtb.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010032
Joel Porquet41a83e02015-07-07 17:11:46 -040033#include <linux/irqchip.h>
Julien Grall1839e572016-04-11 16:32:57 +010034#include <linux/irqchip/arm-gic-common.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010035#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngiere3825ba2016-04-11 09:57:54 +010036#include <linux/irqchip/irq-partition-percpu.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010037
38#include <asm/cputype.h>
39#include <asm/exception.h>
40#include <asm/smp_plat.h>
Marc Zyngier0b6a3da2015-08-26 17:00:42 +010041#include <asm/virt.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010042
Runmin Wangcdf2b972016-06-23 11:13:24 -070043#include <linux/syscore_ops.h>
44
Marc Zyngier021f6532014-06-30 16:01:31 +010045#include "irq-gic-common.h"
Marc Zyngier021f6532014-06-30 16:01:31 +010046
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +053047#define MAX_IRQ 1020U /* Max number of SGI+PPI+SPI */
48#define SPI_START_IRQ 32 /* SPI start irq number */
49#define GICD_ICFGR_BITS 2 /* 2 bits per irq in GICD_ICFGR */
50#define GICD_ISENABLER_BITS 1 /* 1 bit per irq in GICD_ISENABLER */
51#define GICD_IPRIORITYR_BITS 8 /* 8 bits per irq in GICD_IPRIORITYR */
52
53/* 32 bit mask with lower n bits set */
54#define UMASK_LOW(n) (~0U >> (32 - (n)))
55
56/* Number of 32-bit words required to store all irqs, for
57 * registers where each word stores configuration for each irq
58 * in bits_per_irq bits.
59 */
60#define NUM_IRQ_WORDS(bits_per_irq) (DIV_ROUND_UP(MAX_IRQ, \
61 32 / (bits_per_irq)))
62#define MAX_IRQS_IGNORE 10
63
64#define IRQ_NR_BOUND(nr) min((nr), MAX_IRQ)
65
66/* Bitmap to irqs, which are restored */
67static DECLARE_BITMAP(irqs_restore, MAX_IRQ);
68
69/* Bitmap to irqs, for which restore is ignored.
70 * Presently, only GICD_IROUTER mismatches are
71 * ignored.
72 */
73static DECLARE_BITMAP(irqs_ignore_restore, MAX_IRQ);
74
Marc Zyngierf5c14342014-11-24 14:35:10 +000075struct redist_region {
76 void __iomem *redist_base;
77 phys_addr_t phys_base;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +010078 bool single_redist;
Marc Zyngierf5c14342014-11-24 14:35:10 +000079};
80
Marc Zyngier021f6532014-06-30 16:01:31 +010081struct gic_chip_data {
Marc Zyngiere3825ba2016-04-11 09:57:54 +010082 struct fwnode_handle *fwnode;
Marc Zyngier021f6532014-06-30 16:01:31 +010083 void __iomem *dist_base;
Marc Zyngierf5c14342014-11-24 14:35:10 +000084 struct redist_region *redist_regions;
85 struct rdists rdists;
Marc Zyngier021f6532014-06-30 16:01:31 +010086 struct irq_domain *domain;
87 u64 redist_stride;
Marc Zyngierf5c14342014-11-24 14:35:10 +000088 u32 nr_redist_regions;
Marc Zyngier021f6532014-06-30 16:01:31 +010089 unsigned int irq_nr;
Marc Zyngiere3825ba2016-04-11 09:57:54 +010090 struct partition_desc *ppi_descs[16];
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +053091
92 u64 saved_spi_router[MAX_IRQ];
93 u32 saved_spi_enable[NUM_IRQ_WORDS(GICD_ISENABLER_BITS)];
94 u32 saved_spi_cfg[NUM_IRQ_WORDS(GICD_ICFGR_BITS)];
95 u32 saved_spi_priority[NUM_IRQ_WORDS(GICD_IPRIORITYR_BITS)];
96
97 u64 changed_spi_router[MAX_IRQ];
98 u32 changed_spi_enable[NUM_IRQ_WORDS(GICD_ISENABLER_BITS)];
99 u32 changed_spi_cfg[NUM_IRQ_WORDS(GICD_ICFGR_BITS)];
100 u32 changed_spi_priority[NUM_IRQ_WORDS(GICD_IPRIORITYR_BITS)];
Marc Zyngier021f6532014-06-30 16:01:31 +0100101};
102
103static struct gic_chip_data gic_data __read_mostly;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100104static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
Marc Zyngier021f6532014-06-30 16:01:31 +0100105
Julien Grall1839e572016-04-11 16:32:57 +0100106static struct gic_kvm_info gic_v3_kvm_info;
107
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +0530108enum gicd_save_restore_reg {
109 SAVED_ICFGR,
110 SAVED_IS_ENABLER,
111 SAVED_IPRIORITYR,
112 NUM_SAVED_GICD_REGS,
113};
114
115/* Stores start address of spi config for saved gicd regs */
116static u32 *saved_spi_regs_start[NUM_SAVED_GICD_REGS] = {
117 [SAVED_ICFGR] = gic_data.saved_spi_cfg,
118 [SAVED_IS_ENABLER] = gic_data.saved_spi_enable,
119 [SAVED_IPRIORITYR] = gic_data.saved_spi_priority,
120};
121
122/* Stores start address of spi config for changed gicd regs */
123static u32 *changed_spi_regs_start[NUM_SAVED_GICD_REGS] = {
124 [SAVED_ICFGR] = gic_data.changed_spi_cfg,
125 [SAVED_IS_ENABLER] = gic_data.changed_spi_enable,
126 [SAVED_IPRIORITYR] = gic_data.changed_spi_priority,
127};
128
129/* GICD offset for saved registers */
130static u32 gicd_offset[NUM_SAVED_GICD_REGS] = {
131 [SAVED_ICFGR] = GICD_ICFGR,
132 [SAVED_IS_ENABLER] = GICD_ISENABLER,
133 [SAVED_IPRIORITYR] = GICD_IPRIORITYR,
134};
135
136/* Bits per irq word, for gicd saved registers */
137static u32 gicd_reg_bits_per_irq[NUM_SAVED_GICD_REGS] = {
138 [SAVED_ICFGR] = GICD_ICFGR_BITS,
139 [SAVED_IS_ENABLER] = GICD_ISENABLER_BITS,
140 [SAVED_IPRIORITYR] = GICD_IPRIORITYR_BITS,
141};
142
143#define for_each_spi_irq_word(i, reg) \
144 for (i = 0; \
145 i < DIV_ROUND_UP(IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ, \
146 32 / gicd_reg_bits_per_irq[reg]); \
147 i++)
148
149#define read_spi_word_offset(base, reg, i) \
150 readl_relaxed_no_log( \
151 base + gicd_offset[reg] + i * 4 + \
152 SPI_START_IRQ * gicd_reg_bits_per_irq[reg] / 8)
153
154#define restore_spi_word_offset(base, reg, i) \
155 writel_relaxed_no_log( \
156 saved_spi_regs_start[reg][i],\
157 base + gicd_offset[reg] + i * 4 + \
158 SPI_START_IRQ * gicd_reg_bits_per_irq[reg] / 8)
159
Marc Zyngierf5c14342014-11-24 14:35:10 +0000160#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
161#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngier021f6532014-06-30 16:01:31 +0100162#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
163
164/* Our default, arbitrary priority value. Linux only uses one anyway. */
165#define DEFAULT_PMR_VALUE 0xf0
166
167static inline unsigned int gic_irq(struct irq_data *d)
168{
169 return d->hwirq;
170}
171
172static inline int gic_irq_in_rdist(struct irq_data *d)
173{
174 return gic_irq(d) < 32;
175}
176
177static inline void __iomem *gic_dist_base(struct irq_data *d)
178{
179 if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
180 return gic_data_rdist_sgi_base();
181
182 if (d->hwirq <= 1023) /* SPI -> dist_base */
183 return gic_data.dist_base;
184
Marc Zyngier021f6532014-06-30 16:01:31 +0100185 return NULL;
186}
187
188static void gic_do_wait_for_rwp(void __iomem *base)
189{
190 u32 count = 1000000; /* 1s! */
191
Runmin Wang62c17dc2016-09-09 17:33:20 -0700192 while (readl_relaxed_no_log(base + GICD_CTLR) & GICD_CTLR_RWP) {
Marc Zyngier021f6532014-06-30 16:01:31 +0100193 count--;
194 if (!count) {
195 pr_err_ratelimited("RWP timeout, gone fishing\n");
196 return;
197 }
198 cpu_relax();
199 udelay(1);
200 };
201}
202
203/* Wait for completion of a distributor change */
204static void gic_dist_wait_for_rwp(void)
205{
206 gic_do_wait_for_rwp(gic_data.dist_base);
207}
208
209/* Wait for completion of a redistributor change */
210static void gic_redist_wait_for_rwp(void)
211{
212 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
213}
214
Jean-Philippe Brucker7936e912015-10-01 13:47:14 +0100215#ifdef CONFIG_ARM64
Robert Richter6d4e11c2015-09-21 22:58:35 +0200216
217static u64 __maybe_unused gic_read_iar(void)
218{
Suzuki K Poulosefe64d7d2016-11-08 13:56:20 +0000219 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
Robert Richter6d4e11c2015-09-21 22:58:35 +0200220 return gic_read_iar_cavium_thunderx();
221 else
222 return gic_read_iar_common();
223}
Jean-Philippe Brucker7936e912015-10-01 13:47:14 +0100224#endif
Marc Zyngier021f6532014-06-30 16:01:31 +0100225
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +0530226void gic_v3_dist_save(void)
227{
228 void __iomem *base = gic_data.dist_base;
229 int reg, i;
230
Neeraj Upadhyay70098172018-08-02 11:13:46 +0530231 if (!base)
232 return;
233
Neeraj Upadhyay3d5764b2018-06-10 20:29:52 +0530234 bitmap_zero(irqs_restore, MAX_IRQ);
235
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +0530236 for (reg = SAVED_ICFGR; reg < NUM_SAVED_GICD_REGS; reg++) {
237 for_each_spi_irq_word(i, reg) {
238 saved_spi_regs_start[reg][i] =
239 read_spi_word_offset(base, reg, i);
Neeraj Upadhyay3d5764b2018-06-10 20:29:52 +0530240 changed_spi_regs_start[reg][i] = 0;
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +0530241 }
242 }
243
Neeraj Upadhyay3d5764b2018-06-10 20:29:52 +0530244 for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +0530245 gic_data.saved_spi_router[i] =
246 gic_read_irouter(base + GICD_IROUTER + i * 8);
Neeraj Upadhyay3d5764b2018-06-10 20:29:52 +0530247 gic_data.changed_spi_router[i] = 0;
248 }
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +0530249}
250
251static void _gicd_check_reg(enum gicd_save_restore_reg reg)
252{
253 void __iomem *base = gic_data.dist_base;
254 u32 *saved_spi_cfg = saved_spi_regs_start[reg];
255 u32 *changed_spi_cfg = changed_spi_regs_start[reg];
256 u32 bits_per_irq = gicd_reg_bits_per_irq[reg];
257 u32 current_cfg = 0;
258 int i, j = SPI_START_IRQ, l;
259 u32 k;
260
261 for_each_spi_irq_word(i, reg) {
262 current_cfg = read_spi_word_offset(base, reg, i);
263 if (current_cfg != saved_spi_cfg[i]) {
264 for (k = current_cfg ^ saved_spi_cfg[i],
265 l = 0; k ; k >>= bits_per_irq, l++) {
266 if (k & UMASK_LOW(bits_per_irq))
267 set_bit(j+l, irqs_restore);
268 }
269 changed_spi_cfg[i] = current_cfg ^ saved_spi_cfg[i];
270 }
271 j += 32 / bits_per_irq;
272 }
273}
274
275#define _gic_v3_dist_check_icfgr() \
276 _gicd_check_reg(SAVED_ICFGR)
277#define _gic_v3_dist_check_ipriorityr() \
278 _gicd_check_reg(SAVED_IPRIORITYR)
279#define _gic_v3_dist_check_isenabler() \
280 _gicd_check_reg(SAVED_IS_ENABLER)
281
282static void _gic_v3_dist_check_irouter(void)
283{
284 void __iomem *base = gic_data.dist_base;
285 u64 current_irouter_cfg = 0;
286 int i;
287
288 for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
289 if (test_bit(i, irqs_ignore_restore))
290 continue;
291 current_irouter_cfg = gic_read_irouter(
292 base + GICD_IROUTER + i * 8);
293 if (current_irouter_cfg != gic_data.saved_spi_router[i]) {
294 set_bit(i, irqs_restore);
295 gic_data.changed_spi_router[i] =
296 current_irouter_cfg ^ gic_data.saved_spi_router[i];
297 }
298 }
299}
300
301static void _gic_v3_dist_restore_reg(enum gicd_save_restore_reg reg)
302{
303 void __iomem *base = gic_data.dist_base;
304 int i;
305
306 for_each_spi_irq_word(i, reg) {
307 if (changed_spi_regs_start[reg][i])
308 restore_spi_word_offset(base, reg, i);
309 }
310
311 /* Commit all restored configurations before subsequent writes */
312 wmb();
313}
314
315#define _gic_v3_dist_restore_icfgr() _gic_v3_dist_restore_reg(SAVED_ICFGR)
316#define _gic_v3_dist_restore_ipriorityr() \
317 _gic_v3_dist_restore_reg(SAVED_IPRIORITYR)
318
319static void _gic_v3_dist_restore_set_reg(u32 offset)
320{
321 void __iomem *base = gic_data.dist_base;
322 int i, j = SPI_START_IRQ, l;
323 int irq_nr = IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ;
324
325 for (i = 0; i < DIV_ROUND_UP(irq_nr, 32); i++, j += 32) {
326 u32 reg_val = readl_relaxed_no_log(base + offset + i * 4 + 4);
327 bool irqs_restore_updated = 0;
328
329 for (l = 0; l < 32; l++) {
330 if (test_bit(j+l, irqs_restore)) {
331 reg_val |= BIT(l);
332 irqs_restore_updated = 1;
333 }
334 }
335
336 if (irqs_restore_updated) {
337 writel_relaxed_no_log(
338 reg_val, base + offset + i * 4 + 4);
339 }
340 }
341
342 /* Commit restored configuration updates before subsequent writes */
343 wmb();
344}
345
346#define _gic_v3_dist_restore_isenabler() \
Gaurav Kohliacbe9152018-06-27 12:14:53 +0530347 _gic_v3_dist_restore_reg(SAVED_IS_ENABLER)
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +0530348
349#define _gic_v3_dist_restore_ispending() \
350 _gic_v3_dist_restore_set_reg(GICD_ISPENDR)
351
352static void _gic_v3_dist_restore_irouter(void)
353{
354 void __iomem *base = gic_data.dist_base;
355 int i;
356
357 for (i = 32; i < IRQ_NR_BOUND(gic_data.irq_nr); i++) {
358 if (test_bit(i, irqs_ignore_restore))
359 continue;
360 if (gic_data.changed_spi_router[i]) {
361 gic_write_irouter(gic_data.saved_spi_router[i],
362 base + GICD_IROUTER + i * 8);
363 }
364 }
365
366 /* Commit GICD_IROUTER writes before subsequent writes */
367 wmb();
368}
369
370static void _gic_v3_dist_clear_reg(u32 offset)
371{
372 void __iomem *base = gic_data.dist_base;
373 int i, j = SPI_START_IRQ, l;
374 int irq_nr = IRQ_NR_BOUND(gic_data.irq_nr) - SPI_START_IRQ;
375
376 for (i = 0; i < DIV_ROUND_UP(irq_nr, 32); i++, j += 32) {
377 u32 clear = 0;
378 bool irqs_restore_updated = 0;
379
380 for (l = 0; l < 32; l++) {
381 if (test_bit(j+l, irqs_restore)) {
382 clear |= BIT(l);
383 irqs_restore_updated = 1;
384 }
385 }
386
387 if (irqs_restore_updated) {
388 writel_relaxed_no_log(
389 clear, base + offset + i * 4 + 4);
390 }
391 }
392
393 /* Commit clearing of irq config before subsequent writes */
394 wmb();
395}
396
397#define _gic_v3_dist_set_icenabler() \
398 _gic_v3_dist_clear_reg(GICD_ICENABLER)
399
400#define _gic_v3_dist_set_icpending() \
401 _gic_v3_dist_clear_reg(GICD_ICPENDR)
402
403#define _gic_v3_dist_set_icactive() \
404 _gic_v3_dist_clear_reg(GICD_ICACTIVER)
405
406/* Restore GICD state for SPIs. SPI configuration is restored
407 * for GICD_ICFGR, GICD_ISENABLER, GICD_IPRIORITYR, GICD_IROUTER
408 * registers. Following is the sequence for restore:
409 *
410 * 1. For SPIs, check whether any of GICD_ICFGR, GICD_ISENABLER,
411 * GICD_IPRIORITYR, GICD_IROUTER, current configuration is
412 * different from saved configuration.
413 *
414 * For all irqs, with mismatched configurations,
415 *
416 * 2. Set GICD_ICENABLER and wait for its completion.
417 *
418 * 3. Restore any changed GICD_ICFGR, GICD_IPRIORITYR, GICD_IROUTER
419 * configurations.
420 *
421 * 4. Set GICD_ICACTIVER.
422 *
423 * 5. Set pending for the interrupt.
424 *
Gaurav Kohliacbe9152018-06-27 12:14:53 +0530425 * 6. Restore Enable bit of interrupt and wait for its completion.
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +0530426 *
427 */
428void gic_v3_dist_restore(void)
429{
Neeraj Upadhyay70098172018-08-02 11:13:46 +0530430 if (!gic_data.dist_base)
431 return;
432
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +0530433 _gic_v3_dist_check_icfgr();
434 _gic_v3_dist_check_ipriorityr();
435 _gic_v3_dist_check_isenabler();
436 _gic_v3_dist_check_irouter();
437
438 if (bitmap_empty(irqs_restore, IRQ_NR_BOUND(gic_data.irq_nr)))
439 return;
440
441 _gic_v3_dist_set_icenabler();
442 gic_dist_wait_for_rwp();
443
444 _gic_v3_dist_restore_icfgr();
445 _gic_v3_dist_restore_ipriorityr();
446 _gic_v3_dist_restore_irouter();
447
448 _gic_v3_dist_set_icactive();
449
450 _gic_v3_dist_set_icpending();
451 _gic_v3_dist_restore_ispending();
452
453 _gic_v3_dist_restore_isenabler();
454 gic_dist_wait_for_rwp();
455
456 /* Commit all writes before proceeding */
457 wmb();
458}
459
Lingutla Chandrasekhard3b73782017-08-31 16:31:05 +0530460/*
461 * gic_show_pending_irq - Shows the pending interrupts
462 * Note: Interrupts should be disabled on the cpu from which
463 * this is called to get accurate list of pending interrupts.
464 */
465void gic_show_pending_irqs(void)
466{
467 void __iomem *base;
468 u32 pending[32], enabled;
469 unsigned int j;
470
471 base = gic_data.dist_base;
472 for (j = 0; j * 32 < gic_data.irq_nr; j++) {
473 enabled = readl_relaxed(base +
474 GICD_ISENABLER + j * 4);
475 pending[j] = readl_relaxed(base +
476 GICD_ISPENDR + j * 4);
477 pending[j] &= enabled;
478 pr_err("Pending irqs[%d] %x\n", j, pending[j]);
479 }
480}
481
482/*
483 * get_gic_highpri_irq - Returns next high priority interrupt on current CPU
484 */
485unsigned int get_gic_highpri_irq(void)
486{
487 unsigned long flags;
488 unsigned int val = 0;
489
490 local_irq_save(flags);
491 val = read_gicreg(ICC_HPPIR1_EL1);
492 local_irq_restore(flags);
493
494 if (val >= 1020)
495 return 0;
496 return val;
497}
498
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100499static void gic_enable_redist(bool enable)
Marc Zyngier021f6532014-06-30 16:01:31 +0100500{
501 void __iomem *rbase;
502 u32 count = 1000000; /* 1s! */
503 u32 val;
504
505 rbase = gic_data_rdist_rd_base();
506
Marc Zyngier021f6532014-06-30 16:01:31 +0100507 val = readl_relaxed(rbase + GICR_WAKER);
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100508 if (enable)
509 /* Wake up this CPU redistributor */
510 val &= ~GICR_WAKER_ProcessorSleep;
511 else
512 val |= GICR_WAKER_ProcessorSleep;
Marc Zyngier021f6532014-06-30 16:01:31 +0100513 writel_relaxed(val, rbase + GICR_WAKER);
514
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100515 if (!enable) { /* Check that GICR_WAKER is writeable */
516 val = readl_relaxed(rbase + GICR_WAKER);
517 if (!(val & GICR_WAKER_ProcessorSleep))
518 return; /* No PM support in this redistributor */
519 }
520
Dan Carpenterd102eb52016-10-14 10:26:21 +0300521 while (--count) {
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100522 val = readl_relaxed(rbase + GICR_WAKER);
Andrew Jonescf1d9d12016-05-11 21:23:17 +0200523 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100524 break;
Marc Zyngier021f6532014-06-30 16:01:31 +0100525 cpu_relax();
526 udelay(1);
527 };
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100528 if (!count)
529 pr_err_ratelimited("redistributor failed to %s...\n",
530 enable ? "wakeup" : "sleep");
Marc Zyngier021f6532014-06-30 16:01:31 +0100531}
532
533/*
534 * Routines to disable, enable, EOI and route interrupts
535 */
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000536static int gic_peek_irq(struct irq_data *d, u32 offset)
537{
538 u32 mask = 1 << (gic_irq(d) % 32);
539 void __iomem *base;
540
541 if (gic_irq_in_rdist(d))
542 base = gic_data_rdist_sgi_base();
543 else
544 base = gic_data.dist_base;
545
Runmin Wang62c17dc2016-09-09 17:33:20 -0700546 return !!(readl_relaxed_no_log(base + offset + (gic_irq(d) / 32) * 4) & mask);
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000547}
548
Marc Zyngier021f6532014-06-30 16:01:31 +0100549static void gic_poke_irq(struct irq_data *d, u32 offset)
550{
551 u32 mask = 1 << (gic_irq(d) % 32);
552 void (*rwp_wait)(void);
553 void __iomem *base;
554
555 if (gic_irq_in_rdist(d)) {
556 base = gic_data_rdist_sgi_base();
557 rwp_wait = gic_redist_wait_for_rwp;
558 } else {
559 base = gic_data.dist_base;
560 rwp_wait = gic_dist_wait_for_rwp;
561 }
562
563 writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
564 rwp_wait();
565}
566
Marc Zyngier021f6532014-06-30 16:01:31 +0100567static void gic_mask_irq(struct irq_data *d)
568{
569 gic_poke_irq(d, GICD_ICENABLER);
570}
571
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100572static void gic_eoimode1_mask_irq(struct irq_data *d)
573{
574 gic_mask_irq(d);
Marc Zyngier530bf352015-08-26 17:00:43 +0100575 /*
576 * When masking a forwarded interrupt, make sure it is
577 * deactivated as well.
578 *
579 * This ensures that an interrupt that is getting
580 * disabled/masked will not get "stuck", because there is
581 * noone to deactivate it (guest is being terminated).
582 */
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200583 if (irqd_is_forwarded_to_vcpu(d))
Marc Zyngier530bf352015-08-26 17:00:43 +0100584 gic_poke_irq(d, GICD_ICACTIVER);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100585}
586
Marc Zyngier021f6532014-06-30 16:01:31 +0100587static void gic_unmask_irq(struct irq_data *d)
588{
589 gic_poke_irq(d, GICD_ISENABLER);
590}
591
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000592static int gic_irq_set_irqchip_state(struct irq_data *d,
593 enum irqchip_irq_state which, bool val)
594{
595 u32 reg;
596
597 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
598 return -EINVAL;
599
600 switch (which) {
601 case IRQCHIP_STATE_PENDING:
602 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
603 break;
604
605 case IRQCHIP_STATE_ACTIVE:
606 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
607 break;
608
609 case IRQCHIP_STATE_MASKED:
610 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
611 break;
612
613 default:
614 return -EINVAL;
615 }
616
617 gic_poke_irq(d, reg);
618 return 0;
619}
620
621static int gic_irq_get_irqchip_state(struct irq_data *d,
622 enum irqchip_irq_state which, bool *val)
623{
624 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
625 return -EINVAL;
626
627 switch (which) {
628 case IRQCHIP_STATE_PENDING:
629 *val = gic_peek_irq(d, GICD_ISPENDR);
630 break;
631
632 case IRQCHIP_STATE_ACTIVE:
633 *val = gic_peek_irq(d, GICD_ISACTIVER);
634 break;
635
636 case IRQCHIP_STATE_MASKED:
637 *val = !gic_peek_irq(d, GICD_ISENABLER);
638 break;
639
640 default:
641 return -EINVAL;
642 }
643
644 return 0;
645}
646
Marc Zyngier021f6532014-06-30 16:01:31 +0100647static void gic_eoi_irq(struct irq_data *d)
648{
649 gic_write_eoir(gic_irq(d));
650}
651
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100652static void gic_eoimode1_eoi_irq(struct irq_data *d)
653{
654 /*
Marc Zyngier530bf352015-08-26 17:00:43 +0100655 * No need to deactivate an LPI, or an interrupt that
656 * is is getting forwarded to a vcpu.
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100657 */
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200658 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100659 return;
660 gic_write_dir(gic_irq(d));
661}
662
Marc Zyngier021f6532014-06-30 16:01:31 +0100663static int gic_set_type(struct irq_data *d, unsigned int type)
664{
665 unsigned int irq = gic_irq(d);
666 void (*rwp_wait)(void);
667 void __iomem *base;
668
669 /* Interrupt configuration for SGIs can't be changed */
670 if (irq < 16)
671 return -EINVAL;
672
Liviu Dudaufb7e7de2015-01-20 16:52:59 +0000673 /* SPIs have restrictions on the supported types */
674 if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
675 type != IRQ_TYPE_EDGE_RISING)
Marc Zyngier021f6532014-06-30 16:01:31 +0100676 return -EINVAL;
677
678 if (gic_irq_in_rdist(d)) {
679 base = gic_data_rdist_sgi_base();
680 rwp_wait = gic_redist_wait_for_rwp;
681 } else {
682 base = gic_data.dist_base;
683 rwp_wait = gic_dist_wait_for_rwp;
684 }
685
Liviu Dudaufb7e7de2015-01-20 16:52:59 +0000686 return gic_configure_irq(irq, type, base, rwp_wait);
Marc Zyngier021f6532014-06-30 16:01:31 +0100687}
688
Marc Zyngier530bf352015-08-26 17:00:43 +0100689static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
690{
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200691 if (vcpu)
692 irqd_set_forwarded_to_vcpu(d);
693 else
694 irqd_clr_forwarded_to_vcpu(d);
Marc Zyngier530bf352015-08-26 17:00:43 +0100695 return 0;
696}
697
Runmin Wangcdf2b972016-06-23 11:13:24 -0700698#ifdef CONFIG_PM
699
700static int gic_suspend(void)
701{
702 return 0;
703}
704
705static void gic_show_resume_irq(struct gic_chip_data *gic)
706{
707 unsigned int i;
708 u32 enabled;
709 u32 pending[32];
710 void __iomem *base = gic_data.dist_base;
711
712 if (!msm_show_resume_irq_mask)
713 return;
714
715 for (i = 0; i * 32 < gic->irq_nr; i++) {
716 enabled = readl_relaxed(base + GICD_ICENABLER + i * 4);
717 pending[i] = readl_relaxed(base + GICD_ISPENDR + i * 4);
718 pending[i] &= enabled;
719 }
720
721 for (i = find_first_bit((unsigned long *)pending, gic->irq_nr);
722 i < gic->irq_nr;
723 i = find_next_bit((unsigned long *)pending, gic->irq_nr, i+1)) {
724 unsigned int irq = irq_find_mapping(gic->domain, i);
725 struct irq_desc *desc = irq_to_desc(irq);
726 const char *name = "null";
727
728 if (desc == NULL)
729 name = "stray irq";
730 else if (desc->action && desc->action->name)
731 name = desc->action->name;
732
733 pr_warn("%s: %d triggered %s\n", __func__, irq, name);
734 }
735}
736
737static void gic_resume_one(struct gic_chip_data *gic)
738{
739 gic_show_resume_irq(gic);
740}
741
742static void gic_resume(void)
743{
744 gic_resume_one(&gic_data);
745}
746
747static struct syscore_ops gic_syscore_ops = {
748 .suspend = gic_suspend,
749 .resume = gic_resume,
750};
751
752static int __init gic_init_sys(void)
753{
754 register_syscore_ops(&gic_syscore_ops);
755 return 0;
756}
757arch_initcall(gic_init_sys);
758
759#endif
760
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100761static u64 gic_mpidr_to_affinity(unsigned long mpidr)
Marc Zyngier021f6532014-06-30 16:01:31 +0100762{
763 u64 aff;
764
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100765 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
Marc Zyngier021f6532014-06-30 16:01:31 +0100766 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
767 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
768 MPIDR_AFFINITY_LEVEL(mpidr, 0));
769
770 return aff;
771}
772
773static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
774{
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100775 u32 irqnr;
Marc Zyngier021f6532014-06-30 16:01:31 +0100776
777 do {
778 irqnr = gic_read_iar();
779
Marc Zyngierda33f312014-11-24 14:35:18 +0000780 if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
Marc Zyngierebc6de02014-08-26 11:03:33 +0100781 int err;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100782
Channagoud Kadabidf164542016-09-19 20:24:21 -0700783 uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100784 if (static_key_true(&supports_deactivate))
785 gic_write_eoir(irqnr);
786
Marc Zyngierebc6de02014-08-26 11:03:33 +0100787 err = handle_domain_irq(gic_data.domain, irqnr, regs);
788 if (err) {
Marc Zyngierda33f312014-11-24 14:35:18 +0000789 WARN_ONCE(true, "Unexpected interrupt received!\n");
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100790 if (static_key_true(&supports_deactivate)) {
791 if (irqnr < 8192)
792 gic_write_dir(irqnr);
793 } else {
794 gic_write_eoir(irqnr);
795 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100796 }
Marc Zyngierebc6de02014-08-26 11:03:33 +0100797 continue;
Marc Zyngier021f6532014-06-30 16:01:31 +0100798 }
799 if (irqnr < 16) {
Channagoud Kadabidf164542016-09-19 20:24:21 -0700800 uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
Marc Zyngier021f6532014-06-30 16:01:31 +0100801 gic_write_eoir(irqnr);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100802 if (static_key_true(&supports_deactivate))
803 gic_write_dir(irqnr);
Marc Zyngier021f6532014-06-30 16:01:31 +0100804#ifdef CONFIG_SMP
Will Deaconf86c4fb2016-04-26 12:00:00 +0100805 /*
806 * Unlike GICv2, we don't need an smp_rmb() here.
807 * The control dependency from gic_read_iar to
808 * the ISB in gic_write_eoir is enough to ensure
809 * that any shared data read by handle_IPI will
810 * be read after the ACK.
811 */
Marc Zyngier021f6532014-06-30 16:01:31 +0100812 handle_IPI(irqnr, regs);
813#else
814 WARN_ONCE(true, "Unexpected SGI received!\n");
815#endif
816 continue;
817 }
818 } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
819}
820
821static void __init gic_dist_init(void)
822{
823 unsigned int i;
824 u64 affinity;
825 void __iomem *base = gic_data.dist_base;
826
827 /* Disable the distributor */
828 writel_relaxed(0, base + GICD_CTLR);
829 gic_dist_wait_for_rwp();
830
Marc Zyngier7c9b9732016-05-06 19:41:56 +0100831 /*
832 * Configure SPIs as non-secure Group-1. This will only matter
833 * if the GIC only has a single security state. This will not
834 * do the right thing if the kernel is running in secure mode,
835 * but that's not the intended use case anyway.
836 */
837 for (i = 32; i < gic_data.irq_nr; i += 32)
838 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
839
Marc Zyngier021f6532014-06-30 16:01:31 +0100840 gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
841
842 /* Enable distributor with ARE, Group1 */
843 writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
844 base + GICD_CTLR);
845
846 /*
847 * Set all global interrupts to the boot CPU only. ARE must be
848 * enabled.
849 */
850 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
851 for (i = 32; i < gic_data.irq_nr; i++)
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +0100852 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
Marc Zyngier021f6532014-06-30 16:01:31 +0100853}
854
855static int gic_populate_rdist(void)
856{
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100857 unsigned long mpidr = cpu_logical_map(smp_processor_id());
Marc Zyngier021f6532014-06-30 16:01:31 +0100858 u64 typer;
859 u32 aff;
860 int i;
861
862 /*
863 * Convert affinity to a 32bit value that can be matched to
864 * GICR_TYPER bits [63:32].
865 */
866 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
867 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
868 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
869 MPIDR_AFFINITY_LEVEL(mpidr, 0));
870
Marc Zyngierf5c14342014-11-24 14:35:10 +0000871 for (i = 0; i < gic_data.nr_redist_regions; i++) {
872 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
Marc Zyngier021f6532014-06-30 16:01:31 +0100873 u32 reg;
874
875 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
876 if (reg != GIC_PIDR2_ARCH_GICv3 &&
877 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
878 pr_warn("No redistributor present @%p\n", ptr);
879 break;
880 }
881
882 do {
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +0100883 typer = gic_read_typer(ptr + GICR_TYPER);
Marc Zyngier021f6532014-06-30 16:01:31 +0100884 if ((typer >> 32) == aff) {
Marc Zyngierf5c14342014-11-24 14:35:10 +0000885 u64 offset = ptr - gic_data.redist_regions[i].redist_base;
Marc Zyngier021f6532014-06-30 16:01:31 +0100886 gic_data_rdist_rd_base() = ptr;
Marc Zyngierf5c14342014-11-24 14:35:10 +0000887 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
Marc Zyngier021f6532014-06-30 16:01:31 +0100888 return 0;
889 }
890
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +0100891 if (gic_data.redist_regions[i].single_redist)
892 break;
893
Marc Zyngier021f6532014-06-30 16:01:31 +0100894 if (gic_data.redist_stride) {
895 ptr += gic_data.redist_stride;
896 } else {
897 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
898 if (typer & GICR_TYPER_VLPIS)
899 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
900 }
901 } while (!(typer & GICR_TYPER_LAST));
902 }
903
904 /* We couldn't even deal with ourselves... */
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100905 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
906 smp_processor_id(), mpidr);
Marc Zyngier021f6532014-06-30 16:01:31 +0100907 return -ENODEV;
908}
909
Sudeep Holla3708d522014-08-26 16:03:35 +0100910static void gic_cpu_sys_reg_init(void)
Marc Zyngier021f6532014-06-30 16:01:31 +0100911{
Marc Zyngier7cabd002015-09-30 11:48:01 +0100912 /*
913 * Need to check that the SRE bit has actually been set. If
914 * not, it means that SRE is disabled at EL2. We're going to
915 * die painfully, and there is nothing we can do about it.
916 *
917 * Kindly inform the luser.
918 */
919 if (!gic_enable_sre())
920 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
Marc Zyngier021f6532014-06-30 16:01:31 +0100921
922 /* Set priority mask register */
923 gic_write_pmr(DEFAULT_PMR_VALUE);
924
Daniel Thompson91ef8442016-08-19 17:13:09 +0100925 /*
926 * Some firmwares hand over to the kernel with the BPR changed from
927 * its reset value (and with a value large enough to prevent
928 * any pre-emptive interrupts from working at all). Writing a zero
929 * to BPR restores is reset value.
930 */
931 gic_write_bpr1(0);
932
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100933 if (static_key_true(&supports_deactivate)) {
934 /* EOI drops priority only (mode 1) */
935 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
936 } else {
937 /* EOI deactivates interrupt too (mode 0) */
938 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
939 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100940
941 /* ... and let's hit the road... */
942 gic_write_grpen1(1);
943}
944
Marc Zyngierda33f312014-11-24 14:35:18 +0000945static int gic_dist_supports_lpis(void)
946{
947 return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
948}
949
Marc Zyngier021f6532014-06-30 16:01:31 +0100950static void gic_cpu_init(void)
951{
952 void __iomem *rbase;
953
954 /* Register ourselves with the rest of the world */
955 if (gic_populate_rdist())
956 return;
957
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100958 gic_enable_redist(true);
Marc Zyngier021f6532014-06-30 16:01:31 +0100959
960 rbase = gic_data_rdist_sgi_base();
961
Marc Zyngier7c9b9732016-05-06 19:41:56 +0100962 /* Configure SGIs/PPIs as non-secure Group-1 */
963 writel_relaxed(~0, rbase + GICR_IGROUPR0);
964
Marc Zyngier021f6532014-06-30 16:01:31 +0100965 gic_cpu_config(rbase, gic_redist_wait_for_rwp);
966
Marc Zyngierda33f312014-11-24 14:35:18 +0000967 /* Give LPIs a spin */
Channagoud Kadabifdb06642016-09-19 20:55:36 -0700968 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
969 !IS_ENABLED(CONFIG_ARM_GIC_V3_ACL))
Marc Zyngierda33f312014-11-24 14:35:18 +0000970 its_cpu_init();
971
Sudeep Holla3708d522014-08-26 16:03:35 +0100972 /* initialise system registers */
973 gic_cpu_sys_reg_init();
Marc Zyngier021f6532014-06-30 16:01:31 +0100974}
975
976#ifdef CONFIG_SMP
Marc Zyngier021f6532014-06-30 16:01:31 +0100977
Richard Cochran6670a6d2016-07-13 17:16:05 +0000978static int gic_starting_cpu(unsigned int cpu)
979{
980 gic_cpu_init();
981 return 0;
982}
Marc Zyngier021f6532014-06-30 16:01:31 +0100983
984static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100985 unsigned long cluster_id)
Marc Zyngier021f6532014-06-30 16:01:31 +0100986{
James Morse727653d2016-09-19 18:29:15 +0100987 int next_cpu, cpu = *base_cpu;
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100988 unsigned long mpidr = cpu_logical_map(cpu);
Marc Zyngier021f6532014-06-30 16:01:31 +0100989 u16 tlist = 0;
990
991 while (cpu < nr_cpu_ids) {
992 /*
993 * If we ever get a cluster of more than 16 CPUs, just
994 * scream and skip that CPU.
995 */
996 if (WARN_ON((mpidr & 0xff) >= 16))
997 goto out;
998
999 tlist |= 1 << (mpidr & 0xf);
1000
James Morse727653d2016-09-19 18:29:15 +01001001 next_cpu = cpumask_next(cpu, mask);
1002 if (next_cpu >= nr_cpu_ids)
Marc Zyngier021f6532014-06-30 16:01:31 +01001003 goto out;
James Morse727653d2016-09-19 18:29:15 +01001004 cpu = next_cpu;
Marc Zyngier021f6532014-06-30 16:01:31 +01001005
1006 mpidr = cpu_logical_map(cpu);
1007
1008 if (cluster_id != (mpidr & ~0xffUL)) {
1009 cpu--;
1010 goto out;
1011 }
1012 }
1013out:
1014 *base_cpu = cpu;
1015 return tlist;
1016}
1017
Andre Przywara7e580272014-11-12 13:46:06 +00001018#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1019 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1020 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1021
Marc Zyngier021f6532014-06-30 16:01:31 +01001022static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1023{
1024 u64 val;
1025
Andre Przywara7e580272014-11-12 13:46:06 +00001026 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1027 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1028 irq << ICC_SGI1R_SGI_ID_SHIFT |
1029 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
1030 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
Marc Zyngier021f6532014-06-30 16:01:31 +01001031
Mark Salter5fa8ed82018-02-02 09:20:29 -05001032 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
Marc Zyngier021f6532014-06-30 16:01:31 +01001033 gic_write_sgi1r(val);
1034}
1035
1036static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
1037{
1038 int cpu;
1039
1040 if (WARN_ON(irq >= 16))
1041 return;
1042
1043 /*
1044 * Ensure that stores to Normal memory are visible to the
1045 * other CPUs before issuing the IPI.
1046 */
Shanker Donthineni2146b6e2018-01-31 18:03:42 -06001047 wmb();
Marc Zyngier021f6532014-06-30 16:01:31 +01001048
Rusty Russellf9b531f2015-03-05 10:49:16 +10301049 for_each_cpu(cpu, mask) {
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +01001050 unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
Marc Zyngier021f6532014-06-30 16:01:31 +01001051 u16 tlist;
1052
1053 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1054 gic_send_sgi(cluster_id, tlist, irq);
1055 }
1056
1057 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1058 isb();
1059}
1060
1061static void gic_smp_init(void)
1062{
1063 set_smp_cross_call(gic_raise_softirq);
Richard Cochran6670a6d2016-07-13 17:16:05 +00001064 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
1065 "AP_IRQ_GICV3_STARTING", gic_starting_cpu,
1066 NULL);
Marc Zyngier021f6532014-06-30 16:01:31 +01001067}
1068
1069static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1070 bool force)
1071{
1072 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
1073 void __iomem *reg;
1074 int enabled;
1075 u64 val;
1076
Suzuki K Poulose59613f82017-06-30 10:58:28 +01001077 if (cpu >= nr_cpu_ids)
1078 return -EINVAL;
1079
Marc Zyngier021f6532014-06-30 16:01:31 +01001080 if (gic_irq_in_rdist(d))
1081 return -EINVAL;
1082
1083 /* If interrupt was enabled, disable it first */
1084 enabled = gic_peek_irq(d, GICD_ISENABLER);
1085 if (enabled)
1086 gic_mask_irq(d);
1087
1088 reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
1089 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1090
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +01001091 gic_write_irouter(val, reg);
Marc Zyngier021f6532014-06-30 16:01:31 +01001092
1093 /*
1094 * If the interrupt was enabled, enabled it again. Otherwise,
1095 * just wait for the distributor to have digested our changes.
1096 */
1097 if (enabled)
1098 gic_unmask_irq(d);
1099 else
1100 gic_dist_wait_for_rwp();
1101
Antoine Tenart0fc6fa22016-02-19 16:22:43 +01001102 return IRQ_SET_MASK_OK_DONE;
Marc Zyngier021f6532014-06-30 16:01:31 +01001103}
1104#else
1105#define gic_set_affinity NULL
1106#define gic_smp_init() do { } while(0)
1107#endif
1108
Sudeep Holla3708d522014-08-26 16:03:35 +01001109#ifdef CONFIG_CPU_PM
Sudeep Hollaccd94322016-08-17 13:49:19 +01001110/* Check whether it's single security state view */
1111static bool gic_dist_security_disabled(void)
1112{
1113 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
1114}
1115
Sudeep Holla3708d522014-08-26 16:03:35 +01001116static int gic_cpu_pm_notifier(struct notifier_block *self,
1117 unsigned long cmd, void *v)
1118{
Murali Nalajala00aaa932015-05-19 10:26:11 -07001119 if (from_suspend)
1120 return NOTIFY_OK;
1121
Sudeep Holla3708d522014-08-26 16:03:35 +01001122 if (cmd == CPU_PM_EXIT) {
Sudeep Hollaccd94322016-08-17 13:49:19 +01001123 if (gic_dist_security_disabled())
1124 gic_enable_redist(true);
Sudeep Holla3708d522014-08-26 16:03:35 +01001125 gic_cpu_sys_reg_init();
Sudeep Hollaccd94322016-08-17 13:49:19 +01001126 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
Sudeep Holla3708d522014-08-26 16:03:35 +01001127 gic_write_grpen1(0);
1128 gic_enable_redist(false);
1129 }
1130 return NOTIFY_OK;
1131}
1132
1133static struct notifier_block gic_cpu_pm_notifier_block = {
1134 .notifier_call = gic_cpu_pm_notifier,
1135};
1136
1137static void gic_cpu_pm_init(void)
1138{
1139 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1140}
1141
1142#else
1143static inline void gic_cpu_pm_init(void) { }
1144#endif /* CONFIG_CPU_PM */
1145
Marc Zyngier021f6532014-06-30 16:01:31 +01001146static struct irq_chip gic_chip = {
1147 .name = "GICv3",
1148 .irq_mask = gic_mask_irq,
1149 .irq_unmask = gic_unmask_irq,
1150 .irq_eoi = gic_eoi_irq,
1151 .irq_set_type = gic_set_type,
1152 .irq_set_affinity = gic_set_affinity,
Marc Zyngierb594c6e2015-03-18 11:01:24 +00001153 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1154 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
Sudeep Holla55963c92015-06-05 11:59:57 +01001155 .flags = IRQCHIP_SET_TYPE_MASKED,
Marc Zyngier021f6532014-06-30 16:01:31 +01001156};
1157
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001158static struct irq_chip gic_eoimode1_chip = {
1159 .name = "GICv3",
1160 .irq_mask = gic_eoimode1_mask_irq,
1161 .irq_unmask = gic_unmask_irq,
1162 .irq_eoi = gic_eoimode1_eoi_irq,
1163 .irq_set_type = gic_set_type,
1164 .irq_set_affinity = gic_set_affinity,
1165 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1166 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
Marc Zyngier530bf352015-08-26 17:00:43 +01001167 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001168 .flags = IRQCHIP_SET_TYPE_MASKED,
1169};
1170
Marc Zyngierda33f312014-11-24 14:35:18 +00001171#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
1172
Marc Zyngier021f6532014-06-30 16:01:31 +01001173static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1174 irq_hw_number_t hw)
1175{
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001176 struct irq_chip *chip = &gic_chip;
1177
1178 if (static_key_true(&supports_deactivate))
1179 chip = &gic_eoimode1_chip;
1180
Marc Zyngier021f6532014-06-30 16:01:31 +01001181 /* SGIs are private to the core kernel */
1182 if (hw < 16)
1183 return -EPERM;
Marc Zyngierda33f312014-11-24 14:35:18 +00001184 /* Nothing here */
1185 if (hw >= gic_data.irq_nr && hw < 8192)
1186 return -EPERM;
1187 /* Off limits */
1188 if (hw >= GIC_ID_NR)
1189 return -EPERM;
1190
Marc Zyngier021f6532014-06-30 16:01:31 +01001191 /* PPIs */
1192 if (hw < 32) {
1193 irq_set_percpu_devid(irq);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001194 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngier443acc42014-11-24 14:35:09 +00001195 handle_percpu_devid_irq, NULL, NULL);
Rob Herringd17cab42015-08-29 18:01:22 -05001196 irq_set_status_flags(irq, IRQ_NOAUTOEN);
Marc Zyngier021f6532014-06-30 16:01:31 +01001197 }
1198 /* SPIs */
1199 if (hw >= 32 && hw < gic_data.irq_nr) {
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001200 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngier443acc42014-11-24 14:35:09 +00001201 handle_fasteoi_irq, NULL, NULL);
Rob Herringd17cab42015-08-29 18:01:22 -05001202 irq_set_probe(irq);
Marc Zyngier021f6532014-06-30 16:01:31 +01001203 }
Marc Zyngierda33f312014-11-24 14:35:18 +00001204 /* LPIs */
1205 if (hw >= 8192 && hw < GIC_ID_NR) {
1206 if (!gic_dist_supports_lpis())
1207 return -EPERM;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001208 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngierda33f312014-11-24 14:35:18 +00001209 handle_fasteoi_irq, NULL, NULL);
Marc Zyngierda33f312014-11-24 14:35:18 +00001210 }
1211
Marc Zyngier021f6532014-06-30 16:01:31 +01001212 return 0;
1213}
1214
Marc Zyngierf833f572015-10-13 12:51:33 +01001215static int gic_irq_domain_translate(struct irq_domain *d,
1216 struct irq_fwspec *fwspec,
1217 unsigned long *hwirq,
1218 unsigned int *type)
Marc Zyngier021f6532014-06-30 16:01:31 +01001219{
Marc Zyngierf833f572015-10-13 12:51:33 +01001220 if (is_of_node(fwspec->fwnode)) {
1221 if (fwspec->param_count < 3)
1222 return -EINVAL;
Marc Zyngier021f6532014-06-30 16:01:31 +01001223
Marc Zyngierdb8c70e2015-10-14 12:27:16 +01001224 switch (fwspec->param[0]) {
1225 case 0: /* SPI */
1226 *hwirq = fwspec->param[1] + 32;
1227 break;
1228 case 1: /* PPI */
1229 *hwirq = fwspec->param[1] + 16;
1230 break;
1231 case GIC_IRQ_TYPE_LPI: /* LPI */
1232 *hwirq = fwspec->param[1];
1233 break;
1234 default:
1235 return -EINVAL;
1236 }
Marc Zyngierf833f572015-10-13 12:51:33 +01001237
1238 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1239 return 0;
Marc Zyngier021f6532014-06-30 16:01:31 +01001240 }
1241
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001242 if (is_fwnode_irqchip(fwspec->fwnode)) {
1243 if(fwspec->param_count != 2)
1244 return -EINVAL;
1245
1246 *hwirq = fwspec->param[0];
1247 *type = fwspec->param[1];
1248 return 0;
1249 }
1250
Marc Zyngierf833f572015-10-13 12:51:33 +01001251 return -EINVAL;
Marc Zyngier021f6532014-06-30 16:01:31 +01001252}
1253
Marc Zyngier443acc42014-11-24 14:35:09 +00001254static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1255 unsigned int nr_irqs, void *arg)
1256{
1257 int i, ret;
1258 irq_hw_number_t hwirq;
1259 unsigned int type = IRQ_TYPE_NONE;
Marc Zyngierf833f572015-10-13 12:51:33 +01001260 struct irq_fwspec *fwspec = arg;
Marc Zyngier443acc42014-11-24 14:35:09 +00001261
Marc Zyngierf833f572015-10-13 12:51:33 +01001262 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
Marc Zyngier443acc42014-11-24 14:35:09 +00001263 if (ret)
1264 return ret;
1265
1266 for (i = 0; i < nr_irqs; i++)
1267 gic_irq_domain_map(domain, virq + i, hwirq + i);
1268
1269 return 0;
1270}
1271
1272static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1273 unsigned int nr_irqs)
1274{
1275 int i;
1276
1277 for (i = 0; i < nr_irqs; i++) {
1278 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1279 irq_set_handler(virq + i, NULL);
1280 irq_domain_reset_irq_data(d);
1281 }
1282}
1283
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001284static int gic_irq_domain_select(struct irq_domain *d,
1285 struct irq_fwspec *fwspec,
1286 enum irq_domain_bus_token bus_token)
1287{
1288 /* Not for us */
1289 if (fwspec->fwnode != d->fwnode)
1290 return 0;
1291
1292 /* If this is not DT, then we have a single domain */
1293 if (!is_of_node(fwspec->fwnode))
1294 return 1;
1295
1296 /*
1297 * If this is a PPI and we have a 4th (non-null) parameter,
1298 * then we need to match the partition domain.
1299 */
1300 if (fwspec->param_count >= 4 &&
1301 fwspec->param[0] == 1 && fwspec->param[3] != 0)
1302 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
1303
1304 return d == gic_data.domain;
1305}
1306
Marc Zyngier021f6532014-06-30 16:01:31 +01001307static const struct irq_domain_ops gic_irq_domain_ops = {
Marc Zyngierf833f572015-10-13 12:51:33 +01001308 .translate = gic_irq_domain_translate,
Marc Zyngier443acc42014-11-24 14:35:09 +00001309 .alloc = gic_irq_domain_alloc,
1310 .free = gic_irq_domain_free,
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001311 .select = gic_irq_domain_select,
1312};
1313
1314static int partition_domain_translate(struct irq_domain *d,
1315 struct irq_fwspec *fwspec,
1316 unsigned long *hwirq,
1317 unsigned int *type)
1318{
1319 struct device_node *np;
1320 int ret;
1321
1322 np = of_find_node_by_phandle(fwspec->param[3]);
1323 if (WARN_ON(!np))
1324 return -EINVAL;
1325
1326 ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
1327 of_node_to_fwnode(np));
1328 if (ret < 0)
1329 return ret;
1330
1331 *hwirq = ret;
1332 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1333
1334 return 0;
1335}
1336
1337static const struct irq_domain_ops partition_domain_ops = {
1338 .translate = partition_domain_translate,
1339 .select = gic_irq_domain_select,
Marc Zyngier021f6532014-06-30 16:01:31 +01001340};
1341
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001342static int __init gic_init_bases(void __iomem *dist_base,
1343 struct redist_region *rdist_regs,
1344 u32 nr_redist_regions,
1345 u64 redist_stride,
1346 struct fwnode_handle *handle)
1347{
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001348 u32 typer;
1349 int gic_irqs;
1350 int err;
1351
1352 if (!is_hyp_mode_available())
1353 static_key_slow_dec(&supports_deactivate);
1354
1355 if (static_key_true(&supports_deactivate))
1356 pr_info("GIC: Using split EOI/Deactivate mode\n");
1357
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001358 gic_data.fwnode = handle;
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001359 gic_data.dist_base = dist_base;
1360 gic_data.redist_regions = rdist_regs;
1361 gic_data.nr_redist_regions = nr_redist_regions;
1362 gic_data.redist_stride = redist_stride;
1363
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001364 /*
1365 * Find out how many interrupts are supported.
1366 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
1367 */
1368 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1369 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
1370 gic_irqs = GICD_TYPER_IRQS(typer);
1371 if (gic_irqs > 1020)
1372 gic_irqs = 1020;
1373 gic_data.irq_nr = gic_irqs;
1374
1375 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1376 &gic_data);
1377 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1378
1379 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1380 err = -ENOMEM;
1381 goto out_free;
1382 }
1383
1384 set_handle_irq(gic_handle_irq);
1385
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001386 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
Kyle Yan65be4a52016-10-31 15:05:00 -07001387 !IS_ENABLED(CONFIG_ARM_GIC_V3_ACL))
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02001388 its_init(handle, &gic_data.rdists, gic_data.domain);
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001389
1390 gic_smp_init();
1391 gic_dist_init();
1392 gic_cpu_init();
1393 gic_cpu_pm_init();
1394
1395 return 0;
1396
1397out_free:
1398 if (gic_data.domain)
1399 irq_domain_remove(gic_data.domain);
1400 free_percpu(gic_data.rdists.rdist);
1401 return err;
1402}
1403
1404static int __init gic_validate_dist_version(void __iomem *dist_base)
1405{
1406 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1407
1408 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1409 return -ENODEV;
1410
1411 return 0;
1412}
1413
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001414static int get_cpu_number(struct device_node *dn)
1415{
1416 const __be32 *cell;
1417 u64 hwid;
1418 int i;
1419
1420 cell = of_get_property(dn, "reg", NULL);
1421 if (!cell)
1422 return -1;
1423
1424 hwid = of_read_number(cell, of_n_addr_cells(dn));
1425
1426 /*
1427 * Non affinity bits must be set to 0 in the DT
1428 */
1429 if (hwid & ~MPIDR_HWID_BITMASK)
1430 return -1;
1431
1432 for (i = 0; i < num_possible_cpus(); i++)
1433 if (cpu_logical_map(i) == hwid)
1434 return i;
1435
1436 return -1;
1437}
1438
1439/* Create all possible partitions at boot time */
Linus Torvalds7beaa242016-05-19 11:27:09 -07001440static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001441{
1442 struct device_node *parts_node, *child_part;
1443 int part_idx = 0, i;
1444 int nr_parts;
1445 struct partition_affinity *parts;
1446
Johan Hovold828064b2017-11-11 17:51:25 +01001447 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001448 if (!parts_node)
1449 return;
1450
1451 nr_parts = of_get_child_count(parts_node);
1452
1453 if (!nr_parts)
Johan Hovold828064b2017-11-11 17:51:25 +01001454 goto out_put_node;
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001455
1456 parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
1457 if (WARN_ON(!parts))
Johan Hovold828064b2017-11-11 17:51:25 +01001458 goto out_put_node;
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001459
1460 for_each_child_of_node(parts_node, child_part) {
1461 struct partition_affinity *part;
1462 int n;
1463
1464 part = &parts[part_idx];
1465
1466 part->partition_id = of_node_to_fwnode(child_part);
1467
1468 pr_info("GIC: PPI partition %s[%d] { ",
1469 child_part->name, part_idx);
1470
1471 n = of_property_count_elems_of_size(child_part, "affinity",
1472 sizeof(u32));
1473 WARN_ON(n <= 0);
1474
1475 for (i = 0; i < n; i++) {
1476 int err, cpu;
1477 u32 cpu_phandle;
1478 struct device_node *cpu_node;
1479
1480 err = of_property_read_u32_index(child_part, "affinity",
1481 i, &cpu_phandle);
1482 if (WARN_ON(err))
1483 continue;
1484
1485 cpu_node = of_find_node_by_phandle(cpu_phandle);
1486 if (WARN_ON(!cpu_node))
1487 continue;
1488
1489 cpu = get_cpu_number(cpu_node);
1490 if (WARN_ON(cpu == -1))
1491 continue;
1492
1493 pr_cont("%s[%d] ", cpu_node->full_name, cpu);
1494
1495 cpumask_set_cpu(cpu, &part->mask);
1496 }
1497
1498 pr_cont("}\n");
1499 part_idx++;
1500 }
1501
1502 for (i = 0; i < 16; i++) {
1503 unsigned int irq;
1504 struct partition_desc *desc;
1505 struct irq_fwspec ppi_fwspec = {
1506 .fwnode = gic_data.fwnode,
1507 .param_count = 3,
1508 .param = {
1509 [0] = 1,
1510 [1] = i,
1511 [2] = IRQ_TYPE_NONE,
1512 },
1513 };
1514
1515 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1516 if (WARN_ON(!irq))
1517 continue;
1518 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1519 irq, &partition_domain_ops);
1520 if (WARN_ON(!desc))
1521 continue;
1522
1523 gic_data.ppi_descs[i] = desc;
1524 }
Johan Hovold828064b2017-11-11 17:51:25 +01001525
1526out_put_node:
1527 of_node_put(parts_node);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001528}
1529
Julien Grall1839e572016-04-11 16:32:57 +01001530static void __init gic_of_setup_kvm_info(struct device_node *node)
1531{
1532 int ret;
1533 struct resource r;
1534 u32 gicv_idx;
1535
1536 gic_v3_kvm_info.type = GIC_V3;
1537
1538 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1539 if (!gic_v3_kvm_info.maint_irq)
1540 return;
1541
1542 if (of_property_read_u32(node, "#redistributor-regions",
1543 &gicv_idx))
1544 gicv_idx = 1;
1545
1546 gicv_idx += 3; /* Also skip GICD, GICC, GICH */
1547 ret = of_address_to_resource(node, gicv_idx, &r);
1548 if (!ret)
1549 gic_v3_kvm_info.vcpu = r;
1550
1551 gic_set_kvm_info(&gic_v3_kvm_info);
1552}
1553
Marc Zyngier021f6532014-06-30 16:01:31 +01001554static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1555{
1556 void __iomem *dist_base;
Marc Zyngierf5c14342014-11-24 14:35:10 +00001557 struct redist_region *rdist_regs;
Marc Zyngier021f6532014-06-30 16:01:31 +01001558 u64 redist_stride;
Marc Zyngierf5c14342014-11-24 14:35:10 +00001559 u32 nr_redist_regions;
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +05301560 int err, i, ignore_irqs_len;
1561 u32 ignore_restore_irqs[MAX_IRQS_IGNORE] = {0};
Marc Zyngier021f6532014-06-30 16:01:31 +01001562
1563 dist_base = of_iomap(node, 0);
1564 if (!dist_base) {
1565 pr_err("%s: unable to map gic dist registers\n",
1566 node->full_name);
1567 return -ENXIO;
1568 }
1569
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001570 err = gic_validate_dist_version(dist_base);
1571 if (err) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001572 pr_err("%s: no distributor detected, giving up\n",
1573 node->full_name);
Marc Zyngier021f6532014-06-30 16:01:31 +01001574 goto out_unmap_dist;
1575 }
1576
Marc Zyngierf5c14342014-11-24 14:35:10 +00001577 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1578 nr_redist_regions = 1;
Marc Zyngier021f6532014-06-30 16:01:31 +01001579
Marc Zyngierf5c14342014-11-24 14:35:10 +00001580 rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
1581 if (!rdist_regs) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001582 err = -ENOMEM;
1583 goto out_unmap_dist;
1584 }
1585
Marc Zyngierf5c14342014-11-24 14:35:10 +00001586 for (i = 0; i < nr_redist_regions; i++) {
1587 struct resource res;
1588 int ret;
1589
1590 ret = of_address_to_resource(node, 1 + i, &res);
1591 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1592 if (ret || !rdist_regs[i].redist_base) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001593 pr_err("%s: couldn't map region %d\n",
1594 node->full_name, i);
1595 err = -ENODEV;
1596 goto out_unmap_rdist;
1597 }
Marc Zyngierf5c14342014-11-24 14:35:10 +00001598 rdist_regs[i].phys_base = res.start;
Marc Zyngier021f6532014-06-30 16:01:31 +01001599 }
1600
1601 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1602 redist_stride = 0;
1603
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001604 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1605 redist_stride, &node->fwnode);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001606 if (err)
1607 goto out_unmap_rdist;
1608
1609 gic_populate_ppi_partitions(node);
Linus Torvalds7beaa242016-05-19 11:27:09 -07001610 gic_of_setup_kvm_info(node);
Neeraj Upadhyayb179a8e2018-03-20 10:15:53 +05301611
1612 ignore_irqs_len = of_property_read_variable_u32_array(node,
1613 "ignored-save-restore-irqs",
1614 ignore_restore_irqs,
1615 0, MAX_IRQS_IGNORE);
1616 for (i = 0; i < ignore_irqs_len; i++)
1617 set_bit(ignore_restore_irqs[i], irqs_ignore_restore);
1618
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001619 return 0;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001620
Marc Zyngier021f6532014-06-30 16:01:31 +01001621out_unmap_rdist:
Marc Zyngierf5c14342014-11-24 14:35:10 +00001622 for (i = 0; i < nr_redist_regions; i++)
1623 if (rdist_regs[i].redist_base)
1624 iounmap(rdist_regs[i].redist_base);
1625 kfree(rdist_regs);
Marc Zyngier021f6532014-06-30 16:01:31 +01001626out_unmap_dist:
1627 iounmap(dist_base);
1628 return err;
1629}
1630
1631IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001632
1633#ifdef CONFIG_ACPI
Julien Grall611f0392016-04-11 16:32:56 +01001634static struct
1635{
1636 void __iomem *dist_base;
1637 struct redist_region *redist_regs;
1638 u32 nr_redist_regions;
1639 bool single_redist;
Marc Zyngier9dc447d22019-12-16 11:24:57 +00001640 int enabled_rdists;
Julien Grall1839e572016-04-11 16:32:57 +01001641 u32 maint_irq;
1642 int maint_irq_mode;
1643 phys_addr_t vcpu_base;
Julien Grall611f0392016-04-11 16:32:56 +01001644} acpi_data __initdata;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001645
1646static void __init
1647gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1648{
1649 static int count = 0;
1650
Julien Grall611f0392016-04-11 16:32:56 +01001651 acpi_data.redist_regs[count].phys_base = phys_base;
1652 acpi_data.redist_regs[count].redist_base = redist_base;
1653 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001654 count++;
1655}
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001656
1657static int __init
1658gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
1659 const unsigned long end)
1660{
1661 struct acpi_madt_generic_redistributor *redist =
1662 (struct acpi_madt_generic_redistributor *)header;
1663 void __iomem *redist_base;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001664
1665 redist_base = ioremap(redist->base_address, redist->length);
1666 if (!redist_base) {
1667 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
1668 return -ENOMEM;
1669 }
1670
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001671 gic_acpi_register_redist(redist->base_address, redist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001672 return 0;
1673}
1674
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001675static int __init
1676gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
1677 const unsigned long end)
1678{
1679 struct acpi_madt_generic_interrupt *gicc =
1680 (struct acpi_madt_generic_interrupt *)header;
Julien Grall611f0392016-04-11 16:32:56 +01001681 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001682 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
1683 void __iomem *redist_base;
1684
Shanker Donthinenic9790242017-12-05 13:16:21 -06001685 /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
1686 if (!(gicc->flags & ACPI_MADT_ENABLED))
1687 return 0;
1688
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001689 redist_base = ioremap(gicc->gicr_base_address, size);
1690 if (!redist_base)
1691 return -ENOMEM;
1692
1693 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
1694 return 0;
1695}
1696
1697static int __init gic_acpi_collect_gicr_base(void)
1698{
1699 acpi_tbl_entry_handler redist_parser;
1700 enum acpi_madt_type type;
1701
Julien Grall611f0392016-04-11 16:32:56 +01001702 if (acpi_data.single_redist) {
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001703 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
1704 redist_parser = gic_acpi_parse_madt_gicc;
1705 } else {
1706 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
1707 redist_parser = gic_acpi_parse_madt_redist;
1708 }
1709
1710 /* Collect redistributor base addresses in GICR entries */
1711 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
1712 return 0;
1713
1714 pr_info("No valid GICR entries exist\n");
1715 return -ENODEV;
1716}
1717
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001718static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
1719 const unsigned long end)
1720{
1721 /* Subtable presence means that redist exists, that's it */
1722 return 0;
1723}
1724
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001725static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
1726 const unsigned long end)
1727{
1728 struct acpi_madt_generic_interrupt *gicc =
1729 (struct acpi_madt_generic_interrupt *)header;
1730
1731 /*
1732 * If GICC is enabled and has valid gicr base address, then it means
1733 * GICR base is presented via GICC
1734 */
Marc Zyngier9dc447d22019-12-16 11:24:57 +00001735 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
1736 acpi_data.enabled_rdists++;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001737 return 0;
Marc Zyngier9dc447d22019-12-16 11:24:57 +00001738 }
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001739
Shanker Donthinenic9790242017-12-05 13:16:21 -06001740 /*
1741 * It's perfectly valid firmware can pass disabled GICC entry, driver
1742 * should not treat as errors, skip the entry instead of probe fail.
1743 */
1744 if (!(gicc->flags & ACPI_MADT_ENABLED))
1745 return 0;
1746
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001747 return -ENODEV;
1748}
1749
1750static int __init gic_acpi_count_gicr_regions(void)
1751{
1752 int count;
1753
1754 /*
1755 * Count how many redistributor regions we have. It is not allowed
1756 * to mix redistributor description, GICR and GICC subtables have to be
1757 * mutually exclusive.
1758 */
1759 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1760 gic_acpi_match_gicr, 0);
1761 if (count > 0) {
Julien Grall611f0392016-04-11 16:32:56 +01001762 acpi_data.single_redist = false;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001763 return count;
1764 }
1765
1766 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1767 gic_acpi_match_gicc, 0);
Marc Zyngier9dc447d22019-12-16 11:24:57 +00001768 if (count > 0) {
Julien Grall611f0392016-04-11 16:32:56 +01001769 acpi_data.single_redist = true;
Marc Zyngier9dc447d22019-12-16 11:24:57 +00001770 count = acpi_data.enabled_rdists;
1771 }
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001772
1773 return count;
1774}
1775
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001776static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
1777 struct acpi_probe_entry *ape)
1778{
1779 struct acpi_madt_generic_distributor *dist;
1780 int count;
1781
1782 dist = (struct acpi_madt_generic_distributor *)header;
1783 if (dist->version != ape->driver_data)
1784 return false;
1785
1786 /* We need to do that exercise anyway, the sooner the better */
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001787 count = gic_acpi_count_gicr_regions();
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001788 if (count <= 0)
1789 return false;
1790
Julien Grall611f0392016-04-11 16:32:56 +01001791 acpi_data.nr_redist_regions = count;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001792 return true;
1793}
1794
Julien Grall1839e572016-04-11 16:32:57 +01001795static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
1796 const unsigned long end)
1797{
1798 struct acpi_madt_generic_interrupt *gicc =
1799 (struct acpi_madt_generic_interrupt *)header;
1800 int maint_irq_mode;
1801 static int first_madt = true;
1802
1803 /* Skip unusable CPUs */
1804 if (!(gicc->flags & ACPI_MADT_ENABLED))
1805 return 0;
1806
1807 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
1808 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
1809
1810 if (first_madt) {
1811 first_madt = false;
1812
1813 acpi_data.maint_irq = gicc->vgic_interrupt;
1814 acpi_data.maint_irq_mode = maint_irq_mode;
1815 acpi_data.vcpu_base = gicc->gicv_base_address;
1816
1817 return 0;
1818 }
1819
1820 /*
1821 * The maintenance interrupt and GICV should be the same for every CPU
1822 */
1823 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
1824 (acpi_data.maint_irq_mode != maint_irq_mode) ||
1825 (acpi_data.vcpu_base != gicc->gicv_base_address))
1826 return -EINVAL;
1827
1828 return 0;
1829}
1830
1831static bool __init gic_acpi_collect_virt_info(void)
1832{
1833 int count;
1834
1835 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1836 gic_acpi_parse_virt_madt_gicc, 0);
1837
1838 return (count > 0);
1839}
1840
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001841#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
Julien Grall1839e572016-04-11 16:32:57 +01001842#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
1843#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
1844
1845static void __init gic_acpi_setup_kvm_info(void)
1846{
1847 int irq;
1848
1849 if (!gic_acpi_collect_virt_info()) {
1850 pr_warn("Unable to get hardware information used for virtualization\n");
1851 return;
1852 }
1853
1854 gic_v3_kvm_info.type = GIC_V3;
1855
1856 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
1857 acpi_data.maint_irq_mode,
1858 ACPI_ACTIVE_HIGH);
1859 if (irq <= 0)
1860 return;
1861
1862 gic_v3_kvm_info.maint_irq = irq;
1863
1864 if (acpi_data.vcpu_base) {
1865 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
1866
1867 vcpu->flags = IORESOURCE_MEM;
1868 vcpu->start = acpi_data.vcpu_base;
1869 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1870 }
1871
1872 gic_set_kvm_info(&gic_v3_kvm_info);
1873}
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001874
1875static int __init
1876gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
1877{
1878 struct acpi_madt_generic_distributor *dist;
1879 struct fwnode_handle *domain_handle;
Julien Grall611f0392016-04-11 16:32:56 +01001880 size_t size;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001881 int i, err;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001882
1883 /* Get distributor base address */
1884 dist = (struct acpi_madt_generic_distributor *)header;
Julien Grall611f0392016-04-11 16:32:56 +01001885 acpi_data.dist_base = ioremap(dist->base_address,
1886 ACPI_GICV3_DIST_MEM_SIZE);
1887 if (!acpi_data.dist_base) {
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001888 pr_err("Unable to map GICD registers\n");
1889 return -ENOMEM;
1890 }
1891
Julien Grall611f0392016-04-11 16:32:56 +01001892 err = gic_validate_dist_version(acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001893 if (err) {
Julien Grall611f0392016-04-11 16:32:56 +01001894 pr_err("No distributor detected at @%p, giving up",
1895 acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001896 goto out_dist_unmap;
1897 }
1898
Julien Grall611f0392016-04-11 16:32:56 +01001899 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
1900 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
1901 if (!acpi_data.redist_regs) {
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001902 err = -ENOMEM;
1903 goto out_dist_unmap;
1904 }
1905
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001906 err = gic_acpi_collect_gicr_base();
1907 if (err)
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001908 goto out_redist_unmap;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001909
Julien Grall611f0392016-04-11 16:32:56 +01001910 domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001911 if (!domain_handle) {
1912 err = -ENOMEM;
1913 goto out_redist_unmap;
1914 }
1915
Julien Grall611f0392016-04-11 16:32:56 +01001916 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
1917 acpi_data.nr_redist_regions, 0, domain_handle);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001918 if (err)
1919 goto out_fwhandle_free;
1920
1921 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
Julien Grall1839e572016-04-11 16:32:57 +01001922 gic_acpi_setup_kvm_info();
1923
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001924 return 0;
1925
1926out_fwhandle_free:
1927 irq_domain_free_fwnode(domain_handle);
1928out_redist_unmap:
Julien Grall611f0392016-04-11 16:32:56 +01001929 for (i = 0; i < acpi_data.nr_redist_regions; i++)
1930 if (acpi_data.redist_regs[i].redist_base)
1931 iounmap(acpi_data.redist_regs[i].redist_base);
1932 kfree(acpi_data.redist_regs);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001933out_dist_unmap:
Julien Grall611f0392016-04-11 16:32:56 +01001934 iounmap(acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001935 return err;
1936}
1937IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1938 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
1939 gic_acpi_init);
1940IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1941 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
1942 gic_acpi_init);
1943IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1944 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
1945 gic_acpi_init);
1946#endif