blob: cae53937feebd2f80b588c75a6e89d631206a4a0 [file] [log] [blame]
Marc Zyngiercc2d3212014-11-24 14:35:11 +00001/*
Marc Zyngierd7276b82016-12-20 15:11:47 +00002 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
Marc Zyngiercc2d3212014-11-24 14:35:11 +00003 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020018#include <linux/acpi.h>
Hanjun Guo8d3554b2017-03-07 20:39:59 +080019#include <linux/acpi_iort.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000020#include <linux/bitmap.h>
21#include <linux/cpu.h>
22#include <linux/delay.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010023#include <linux/dma-iommu.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000024#include <linux/interrupt.h>
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020025#include <linux/irqdomain.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000026#include <linux/log2.h>
27#include <linux/mm.h>
28#include <linux/msi.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31#include <linux/of_irq.h>
32#include <linux/of_pci.h>
33#include <linux/of_platform.h>
34#include <linux/percpu.h>
35#include <linux/slab.h>
Derek Basehoredba0bc72018-02-28 21:48:18 -080036#include <linux/syscore_ops.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000037
Joel Porquet41a83e062015-07-07 17:11:46 -040038#include <linux/irqchip.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000039#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngierc808eea2016-12-20 09:31:20 +000040#include <linux/irqchip/arm-gic-v4.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000041
Marc Zyngiercc2d3212014-11-24 14:35:11 +000042#include <asm/cputype.h>
43#include <asm/exception.h>
44
Robert Richter67510cc2015-09-21 22:58:37 +020045#include "irq-gic-common.h"
46
Robert Richter94100972015-09-21 22:58:38 +020047#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
48#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +020049#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
Derek Basehoredba0bc72018-02-28 21:48:18 -080050#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
Marc Zyngiercc2d3212014-11-24 14:35:11 +000051
Marc Zyngierc48ed512014-11-24 14:35:12 +000052#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
53
Marc Zyngiera13b0402016-12-19 17:15:24 +000054static u32 lpi_id_bits;
55
56/*
57 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
58 * deal with (one configuration byte per interrupt). PENDBASE has to
59 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
60 */
61#define LPI_NRBITS lpi_id_bits
62#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
63#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
64
65#define LPI_PROP_DEFAULT_PRIO 0xa0
66
Marc Zyngiercc2d3212014-11-24 14:35:11 +000067/*
68 * Collection structure - just an ID, and a redistributor address to
69 * ping. We use one per CPU as a bag of interrupts assigned to this
70 * CPU.
71 */
72struct its_collection {
73 u64 target_address;
74 u16 col_id;
75};
76
77/*
Shanker Donthineni93473592016-06-06 18:17:30 -050078 * The ITS_BASER structure - contains memory information, cached
79 * value of BASER register configuration and ITS page size.
Shanker Donthineni466b7d12016-03-09 22:10:49 -060080 */
81struct its_baser {
82 void *base;
83 u64 val;
84 u32 order;
Shanker Donthineni93473592016-06-06 18:17:30 -050085 u32 psz;
Shanker Donthineni466b7d12016-03-09 22:10:49 -060086};
87
Ard Biesheuvel558b0162017-10-17 17:55:56 +010088struct its_device;
89
Shanker Donthineni466b7d12016-03-09 22:10:49 -060090/*
Marc Zyngiercc2d3212014-11-24 14:35:11 +000091 * The ITS structure - contains most of the infrastructure, with the
Marc Zyngier841514a2015-07-28 14:46:20 +010092 * top-level MSI domain, the command queue, the collections, and the
93 * list of devices writing to it.
Marc Zyngiercc2d3212014-11-24 14:35:11 +000094 */
95struct its_node {
96 raw_spinlock_t lock;
97 struct list_head entry;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000098 void __iomem *base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +020099 phys_addr_t phys_base;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000100 struct its_cmd_block *cmd_base;
101 struct its_cmd_block *cmd_write;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600102 struct its_baser tables[GITS_BASER_NR_REGS];
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000103 struct its_collection *collections;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100104 struct fwnode_handle *fwnode_handle;
105 u64 (*get_msi_base)(struct its_device *its_dev);
Derek Basehoredba0bc72018-02-28 21:48:18 -0800106 u64 cbaser_save;
107 u32 ctlr_save;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000108 struct list_head its_device_list;
109 u64 flags;
Marc Zyngierdebf6d02017-10-08 18:44:42 +0100110 unsigned long list_nr;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000111 u32 ite_size;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600112 u32 device_ids;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +0200113 int numa_node;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100114 unsigned int msi_domain_flags;
115 u32 pre_its_base; /* for Socionext Synquacer */
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000116 bool is_v4;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100117 int vlpi_redist_offset;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000118};
119
120#define ITS_ITT_ALIGN SZ_256
121
Shanker Donthineni32bd44d2017-10-07 15:43:48 -0500122/* The maximum number of VPEID bits supported by VLPI commands */
123#define ITS_MAX_VPEID_BITS (16)
124#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
125
Shanker Donthineni2eca0d62016-02-16 18:00:36 -0600126/* Convert page order to size in bytes */
127#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
128
Marc Zyngier591e5be2015-07-17 10:46:42 +0100129struct event_lpi_map {
130 unsigned long *lpi_map;
131 u16 *col_map;
132 irq_hw_number_t lpi_base;
133 int nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000134 struct mutex vlpi_lock;
135 struct its_vm *vm;
136 struct its_vlpi_map *vlpi_maps;
137 int nr_vlpis;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100138};
139
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000140/*
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000141 * The ITS view of a device - belongs to an ITS, owns an interrupt
142 * translation table, and a list of interrupts. If it some of its
143 * LPIs are injected into a guest (GICv4), the event_map.vm field
144 * indicates which one.
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000145 */
146struct its_device {
147 struct list_head entry;
148 struct its_node *its;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100149 struct event_lpi_map event_map;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000150 void *itt;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000151 u32 nr_ites;
152 u32 device_id;
153};
154
Marc Zyngier20b3d542016-12-20 15:23:22 +0000155static struct {
156 raw_spinlock_t lock;
157 struct its_device *dev;
158 struct its_vpe **vpes;
159 int next_victim;
160} vpe_proxy;
161
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000162static LIST_HEAD(its_nodes);
163static DEFINE_SPINLOCK(its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000164static struct rdists *gic_rdists;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200165static struct irq_domain *its_parent;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000166
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000167static unsigned long its_list_map;
Marc Zyngier3171a472016-12-20 15:17:28 +0000168static u16 vmovp_seq_num;
169static DEFINE_RAW_SPINLOCK(vmovp_lock);
170
Marc Zyngier7d75bbb2016-12-20 13:55:54 +0000171static DEFINE_IDA(its_vpeid_ida);
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000172
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000173#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
174#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngiere643d802016-12-20 15:09:31 +0000175#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000176
Marc Zyngier591e5be2015-07-17 10:46:42 +0100177static struct its_collection *dev_event_to_col(struct its_device *its_dev,
178 u32 event)
179{
180 struct its_node *its = its_dev->its;
181
182 return its->collections + its_dev->event_map.col_map[event];
183}
184
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000185/*
186 * ITS command descriptors - parameters to be encoded in a command
187 * block.
188 */
189struct its_cmd_desc {
190 union {
191 struct {
192 struct its_device *dev;
193 u32 event_id;
194 } its_inv_cmd;
195
196 struct {
197 struct its_device *dev;
198 u32 event_id;
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000199 } its_clear_cmd;
200
201 struct {
202 struct its_device *dev;
203 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000204 } its_int_cmd;
205
206 struct {
207 struct its_device *dev;
208 int valid;
209 } its_mapd_cmd;
210
211 struct {
212 struct its_collection *col;
213 int valid;
214 } its_mapc_cmd;
215
216 struct {
217 struct its_device *dev;
218 u32 phys_id;
219 u32 event_id;
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000220 } its_mapti_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000221
222 struct {
223 struct its_device *dev;
224 struct its_collection *col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100225 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000226 } its_movi_cmd;
227
228 struct {
229 struct its_device *dev;
230 u32 event_id;
231 } its_discard_cmd;
232
233 struct {
234 struct its_collection *col;
235 } its_invall_cmd;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000236
237 struct {
238 struct its_vpe *vpe;
Marc Zyngiereb781922016-12-20 14:47:05 +0000239 } its_vinvall_cmd;
240
241 struct {
242 struct its_vpe *vpe;
243 struct its_collection *col;
244 bool valid;
245 } its_vmapp_cmd;
246
247 struct {
248 struct its_vpe *vpe;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000249 struct its_device *dev;
250 u32 virt_id;
251 u32 event_id;
252 bool db_enabled;
253 } its_vmapti_cmd;
254
255 struct {
256 struct its_vpe *vpe;
257 struct its_device *dev;
258 u32 event_id;
259 bool db_enabled;
260 } its_vmovi_cmd;
Marc Zyngier3171a472016-12-20 15:17:28 +0000261
262 struct {
263 struct its_vpe *vpe;
264 struct its_collection *col;
265 u16 seq_num;
266 u16 its_list;
267 } its_vmovp_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000268 };
269};
270
271/*
272 * The ITS command block, which is what the ITS actually parses.
273 */
274struct its_cmd_block {
275 u64 raw_cmd[4];
276};
277
278#define ITS_CMD_QUEUE_SZ SZ_64K
279#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
280
Marc Zyngier67047f902017-07-28 21:16:58 +0100281typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
282 struct its_cmd_block *,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000283 struct its_cmd_desc *);
284
Marc Zyngier67047f902017-07-28 21:16:58 +0100285typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
286 struct its_cmd_block *,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000287 struct its_cmd_desc *);
288
Marc Zyngier4d36f132016-12-19 17:11:52 +0000289static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
290{
291 u64 mask = GENMASK_ULL(h, l);
292 *raw_cmd &= ~mask;
293 *raw_cmd |= (val << l) & mask;
294}
295
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000296static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
297{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000298 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000299}
300
301static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
302{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000303 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000304}
305
306static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
307{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000308 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000309}
310
311static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
312{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000313 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000314}
315
316static void its_encode_size(struct its_cmd_block *cmd, u8 size)
317{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000318 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000319}
320
321static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
322{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500323 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000324}
325
326static void its_encode_valid(struct its_cmd_block *cmd, int valid)
327{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000328 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000329}
330
331static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
332{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500333 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000334}
335
336static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
337{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000338 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000339}
340
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000341static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
342{
343 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
344}
345
346static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
347{
348 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
349}
350
351static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
352{
353 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
354}
355
356static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
357{
358 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
359}
360
Marc Zyngier3171a472016-12-20 15:17:28 +0000361static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
362{
363 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
364}
365
366static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
367{
368 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
369}
370
Marc Zyngiereb781922016-12-20 14:47:05 +0000371static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
372{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500373 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
Marc Zyngiereb781922016-12-20 14:47:05 +0000374}
375
376static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
377{
378 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
379}
380
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000381static inline void its_fixup_cmd(struct its_cmd_block *cmd)
382{
383 /* Let's fixup BE commands */
384 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
385 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
386 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
387 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
388}
389
Marc Zyngier67047f902017-07-28 21:16:58 +0100390static struct its_collection *its_build_mapd_cmd(struct its_node *its,
391 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000392 struct its_cmd_desc *desc)
393{
394 unsigned long itt_addr;
Marc Zyngierc8481262014-12-12 10:51:24 +0000395 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000396
397 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
398 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
399
400 its_encode_cmd(cmd, GITS_CMD_MAPD);
401 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
402 its_encode_size(cmd, size - 1);
403 its_encode_itt(cmd, itt_addr);
404 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
405
406 its_fixup_cmd(cmd);
407
Marc Zyngier591e5be2015-07-17 10:46:42 +0100408 return NULL;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000409}
410
Marc Zyngier67047f902017-07-28 21:16:58 +0100411static struct its_collection *its_build_mapc_cmd(struct its_node *its,
412 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000413 struct its_cmd_desc *desc)
414{
415 its_encode_cmd(cmd, GITS_CMD_MAPC);
416 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
417 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
418 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
419
420 its_fixup_cmd(cmd);
421
422 return desc->its_mapc_cmd.col;
423}
424
Marc Zyngier67047f902017-07-28 21:16:58 +0100425static struct its_collection *its_build_mapti_cmd(struct its_node *its,
426 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000427 struct its_cmd_desc *desc)
428{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100429 struct its_collection *col;
430
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000431 col = dev_event_to_col(desc->its_mapti_cmd.dev,
432 desc->its_mapti_cmd.event_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100433
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000434 its_encode_cmd(cmd, GITS_CMD_MAPTI);
435 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
436 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
437 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100438 its_encode_collection(cmd, col->col_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000439
440 its_fixup_cmd(cmd);
441
Marc Zyngier591e5be2015-07-17 10:46:42 +0100442 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000443}
444
Marc Zyngier67047f902017-07-28 21:16:58 +0100445static struct its_collection *its_build_movi_cmd(struct its_node *its,
446 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000447 struct its_cmd_desc *desc)
448{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100449 struct its_collection *col;
450
451 col = dev_event_to_col(desc->its_movi_cmd.dev,
452 desc->its_movi_cmd.event_id);
453
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000454 its_encode_cmd(cmd, GITS_CMD_MOVI);
455 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100456 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000457 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
458
459 its_fixup_cmd(cmd);
460
Marc Zyngier591e5be2015-07-17 10:46:42 +0100461 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000462}
463
Marc Zyngier67047f902017-07-28 21:16:58 +0100464static struct its_collection *its_build_discard_cmd(struct its_node *its,
465 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000466 struct its_cmd_desc *desc)
467{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100468 struct its_collection *col;
469
470 col = dev_event_to_col(desc->its_discard_cmd.dev,
471 desc->its_discard_cmd.event_id);
472
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000473 its_encode_cmd(cmd, GITS_CMD_DISCARD);
474 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
475 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
476
477 its_fixup_cmd(cmd);
478
Marc Zyngier591e5be2015-07-17 10:46:42 +0100479 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000480}
481
Marc Zyngier67047f902017-07-28 21:16:58 +0100482static struct its_collection *its_build_inv_cmd(struct its_node *its,
483 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000484 struct its_cmd_desc *desc)
485{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100486 struct its_collection *col;
487
488 col = dev_event_to_col(desc->its_inv_cmd.dev,
489 desc->its_inv_cmd.event_id);
490
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000491 its_encode_cmd(cmd, GITS_CMD_INV);
492 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
493 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
494
495 its_fixup_cmd(cmd);
496
Marc Zyngier591e5be2015-07-17 10:46:42 +0100497 return col;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000498}
499
Marc Zyngier67047f902017-07-28 21:16:58 +0100500static struct its_collection *its_build_int_cmd(struct its_node *its,
501 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000502 struct its_cmd_desc *desc)
503{
504 struct its_collection *col;
505
506 col = dev_event_to_col(desc->its_int_cmd.dev,
507 desc->its_int_cmd.event_id);
508
509 its_encode_cmd(cmd, GITS_CMD_INT);
510 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
511 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
512
513 its_fixup_cmd(cmd);
514
515 return col;
516}
517
Marc Zyngier67047f902017-07-28 21:16:58 +0100518static struct its_collection *its_build_clear_cmd(struct its_node *its,
519 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000520 struct its_cmd_desc *desc)
521{
522 struct its_collection *col;
523
524 col = dev_event_to_col(desc->its_clear_cmd.dev,
525 desc->its_clear_cmd.event_id);
526
527 its_encode_cmd(cmd, GITS_CMD_CLEAR);
528 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
529 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
530
531 its_fixup_cmd(cmd);
532
533 return col;
534}
535
Marc Zyngier67047f902017-07-28 21:16:58 +0100536static struct its_collection *its_build_invall_cmd(struct its_node *its,
537 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000538 struct its_cmd_desc *desc)
539{
540 its_encode_cmd(cmd, GITS_CMD_INVALL);
541 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
542
543 its_fixup_cmd(cmd);
544
545 return NULL;
546}
547
Marc Zyngier67047f902017-07-28 21:16:58 +0100548static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
549 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000550 struct its_cmd_desc *desc)
551{
552 its_encode_cmd(cmd, GITS_CMD_VINVALL);
553 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
554
555 its_fixup_cmd(cmd);
556
557 return desc->its_vinvall_cmd.vpe;
558}
559
Marc Zyngier67047f902017-07-28 21:16:58 +0100560static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
561 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000562 struct its_cmd_desc *desc)
563{
564 unsigned long vpt_addr;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100565 u64 target;
Marc Zyngiereb781922016-12-20 14:47:05 +0000566
567 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100568 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngiereb781922016-12-20 14:47:05 +0000569
570 its_encode_cmd(cmd, GITS_CMD_VMAPP);
571 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
572 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100573 its_encode_target(cmd, target);
Marc Zyngiereb781922016-12-20 14:47:05 +0000574 its_encode_vpt_addr(cmd, vpt_addr);
575 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
576
577 its_fixup_cmd(cmd);
578
579 return desc->its_vmapp_cmd.vpe;
580}
581
Marc Zyngier67047f902017-07-28 21:16:58 +0100582static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
583 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000584 struct its_cmd_desc *desc)
585{
586 u32 db;
587
588 if (desc->its_vmapti_cmd.db_enabled)
589 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
590 else
591 db = 1023;
592
593 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
594 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
595 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
596 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
597 its_encode_db_phys_id(cmd, db);
598 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
599
600 its_fixup_cmd(cmd);
601
602 return desc->its_vmapti_cmd.vpe;
603}
604
Marc Zyngier67047f902017-07-28 21:16:58 +0100605static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
606 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000607 struct its_cmd_desc *desc)
608{
609 u32 db;
610
611 if (desc->its_vmovi_cmd.db_enabled)
612 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
613 else
614 db = 1023;
615
616 its_encode_cmd(cmd, GITS_CMD_VMOVI);
617 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
618 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
619 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
620 its_encode_db_phys_id(cmd, db);
621 its_encode_db_valid(cmd, true);
622
623 its_fixup_cmd(cmd);
624
625 return desc->its_vmovi_cmd.vpe;
626}
627
Marc Zyngier67047f902017-07-28 21:16:58 +0100628static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
629 struct its_cmd_block *cmd,
Marc Zyngier3171a472016-12-20 15:17:28 +0000630 struct its_cmd_desc *desc)
631{
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100632 u64 target;
633
634 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngier3171a472016-12-20 15:17:28 +0000635 its_encode_cmd(cmd, GITS_CMD_VMOVP);
636 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
637 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
638 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100639 its_encode_target(cmd, target);
Marc Zyngier3171a472016-12-20 15:17:28 +0000640
641 its_fixup_cmd(cmd);
642
643 return desc->its_vmovp_cmd.vpe;
644}
645
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000646static u64 its_cmd_ptr_to_offset(struct its_node *its,
647 struct its_cmd_block *ptr)
648{
649 return (ptr - its->cmd_base) * sizeof(*ptr);
650}
651
652static int its_queue_full(struct its_node *its)
653{
654 int widx;
655 int ridx;
656
657 widx = its->cmd_write - its->cmd_base;
658 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
659
660 /* This is incredibly unlikely to happen, unless the ITS locks up. */
661 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
662 return 1;
663
664 return 0;
665}
666
667static struct its_cmd_block *its_allocate_entry(struct its_node *its)
668{
669 struct its_cmd_block *cmd;
670 u32 count = 1000000; /* 1s! */
671
672 while (its_queue_full(its)) {
673 count--;
674 if (!count) {
675 pr_err_ratelimited("ITS queue not draining\n");
676 return NULL;
677 }
678 cpu_relax();
679 udelay(1);
680 }
681
682 cmd = its->cmd_write++;
683
684 /* Handle queue wrapping */
685 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
686 its->cmd_write = its->cmd_base;
687
Marc Zyngier34d677a2016-12-19 17:16:45 +0000688 /* Clear command */
689 cmd->raw_cmd[0] = 0;
690 cmd->raw_cmd[1] = 0;
691 cmd->raw_cmd[2] = 0;
692 cmd->raw_cmd[3] = 0;
693
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000694 return cmd;
695}
696
697static struct its_cmd_block *its_post_commands(struct its_node *its)
698{
699 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
700
701 writel_relaxed(wr, its->base + GITS_CWRITER);
702
703 return its->cmd_write;
704}
705
706static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
707{
708 /*
709 * Make sure the commands written to memory are observable by
710 * the ITS.
711 */
712 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +0000713 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000714 else
715 dsb(ishst);
716}
717
Marc Zyngiera19b4622017-08-04 17:45:50 +0100718static int its_wait_for_range_completion(struct its_node *its,
719 struct its_cmd_block *from,
720 struct its_cmd_block *to)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000721{
722 u64 rd_idx, from_idx, to_idx;
723 u32 count = 1000000; /* 1s! */
724
725 from_idx = its_cmd_ptr_to_offset(its, from);
726 to_idx = its_cmd_ptr_to_offset(its, to);
727
728 while (1) {
729 rd_idx = readl_relaxed(its->base + GITS_CREADR);
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100730
731 /* Direct case */
732 if (from_idx < to_idx && rd_idx >= to_idx)
733 break;
734
735 /* Wrapped case */
736 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000737 break;
738
739 count--;
740 if (!count) {
Marc Zyngiera19b4622017-08-04 17:45:50 +0100741 pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
742 from_idx, to_idx, rd_idx);
743 return -1;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000744 }
745 cpu_relax();
746 udelay(1);
747 }
Marc Zyngiera19b4622017-08-04 17:45:50 +0100748
749 return 0;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000750}
751
Marc Zyngiere4f90942016-12-19 17:56:32 +0000752/* Warning, macro hell follows */
753#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
754void name(struct its_node *its, \
755 buildtype builder, \
756 struct its_cmd_desc *desc) \
757{ \
758 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
759 synctype *sync_obj; \
760 unsigned long flags; \
761 \
762 raw_spin_lock_irqsave(&its->lock, flags); \
763 \
764 cmd = its_allocate_entry(its); \
765 if (!cmd) { /* We're soooooo screewed... */ \
766 raw_spin_unlock_irqrestore(&its->lock, flags); \
767 return; \
768 } \
Marc Zyngier67047f902017-07-28 21:16:58 +0100769 sync_obj = builder(its, cmd, desc); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000770 its_flush_cmd(its, cmd); \
771 \
772 if (sync_obj) { \
773 sync_cmd = its_allocate_entry(its); \
774 if (!sync_cmd) \
775 goto post; \
776 \
Marc Zyngier67047f902017-07-28 21:16:58 +0100777 buildfn(its, sync_cmd, sync_obj); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000778 its_flush_cmd(its, sync_cmd); \
779 } \
780 \
781post: \
782 next_cmd = its_post_commands(its); \
783 raw_spin_unlock_irqrestore(&its->lock, flags); \
784 \
Marc Zyngiera19b4622017-08-04 17:45:50 +0100785 if (its_wait_for_range_completion(its, cmd, next_cmd)) \
786 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000787}
788
Marc Zyngier67047f902017-07-28 21:16:58 +0100789static void its_build_sync_cmd(struct its_node *its,
790 struct its_cmd_block *sync_cmd,
Marc Zyngiere4f90942016-12-19 17:56:32 +0000791 struct its_collection *sync_col)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000792{
Marc Zyngiere4f90942016-12-19 17:56:32 +0000793 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
794 its_encode_target(sync_cmd, sync_col->target_address);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000795
Marc Zyngiere4f90942016-12-19 17:56:32 +0000796 its_fixup_cmd(sync_cmd);
797}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000798
Marc Zyngiere4f90942016-12-19 17:56:32 +0000799static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
800 struct its_collection, its_build_sync_cmd)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000801
Marc Zyngier67047f902017-07-28 21:16:58 +0100802static void its_build_vsync_cmd(struct its_node *its,
803 struct its_cmd_block *sync_cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000804 struct its_vpe *sync_vpe)
805{
806 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
807 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000808
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000809 its_fixup_cmd(sync_cmd);
810}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000811
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000812static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
813 struct its_vpe, its_build_vsync_cmd)
814
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000815static void its_send_int(struct its_device *dev, u32 event_id)
816{
817 struct its_cmd_desc desc;
818
819 desc.its_int_cmd.dev = dev;
820 desc.its_int_cmd.event_id = event_id;
821
822 its_send_single_command(dev->its, its_build_int_cmd, &desc);
823}
824
825static void its_send_clear(struct its_device *dev, u32 event_id)
826{
827 struct its_cmd_desc desc;
828
829 desc.its_clear_cmd.dev = dev;
830 desc.its_clear_cmd.event_id = event_id;
831
832 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000833}
834
835static void its_send_inv(struct its_device *dev, u32 event_id)
836{
837 struct its_cmd_desc desc;
838
839 desc.its_inv_cmd.dev = dev;
840 desc.its_inv_cmd.event_id = event_id;
841
842 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
843}
844
845static void its_send_mapd(struct its_device *dev, int valid)
846{
847 struct its_cmd_desc desc;
848
849 desc.its_mapd_cmd.dev = dev;
850 desc.its_mapd_cmd.valid = !!valid;
851
852 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
853}
854
855static void its_send_mapc(struct its_node *its, struct its_collection *col,
856 int valid)
857{
858 struct its_cmd_desc desc;
859
860 desc.its_mapc_cmd.col = col;
861 desc.its_mapc_cmd.valid = !!valid;
862
863 its_send_single_command(its, its_build_mapc_cmd, &desc);
864}
865
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000866static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000867{
868 struct its_cmd_desc desc;
869
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000870 desc.its_mapti_cmd.dev = dev;
871 desc.its_mapti_cmd.phys_id = irq_id;
872 desc.its_mapti_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000873
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000874 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000875}
876
877static void its_send_movi(struct its_device *dev,
878 struct its_collection *col, u32 id)
879{
880 struct its_cmd_desc desc;
881
882 desc.its_movi_cmd.dev = dev;
883 desc.its_movi_cmd.col = col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100884 desc.its_movi_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000885
886 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
887}
888
889static void its_send_discard(struct its_device *dev, u32 id)
890{
891 struct its_cmd_desc desc;
892
893 desc.its_discard_cmd.dev = dev;
894 desc.its_discard_cmd.event_id = id;
895
896 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
897}
898
899static void its_send_invall(struct its_node *its, struct its_collection *col)
900{
901 struct its_cmd_desc desc;
902
903 desc.its_invall_cmd.col = col;
904
905 its_send_single_command(its, its_build_invall_cmd, &desc);
906}
Marc Zyngierc48ed512014-11-24 14:35:12 +0000907
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000908static void its_send_vmapti(struct its_device *dev, u32 id)
909{
910 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
911 struct its_cmd_desc desc;
912
913 desc.its_vmapti_cmd.vpe = map->vpe;
914 desc.its_vmapti_cmd.dev = dev;
915 desc.its_vmapti_cmd.virt_id = map->vintid;
916 desc.its_vmapti_cmd.event_id = id;
917 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
918
919 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
920}
921
922static void its_send_vmovi(struct its_device *dev, u32 id)
923{
924 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
925 struct its_cmd_desc desc;
926
927 desc.its_vmovi_cmd.vpe = map->vpe;
928 desc.its_vmovi_cmd.dev = dev;
929 desc.its_vmovi_cmd.event_id = id;
930 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
931
932 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
933}
934
Marc Zyngier75fd9512017-10-08 18:46:39 +0100935static void its_send_vmapp(struct its_node *its,
936 struct its_vpe *vpe, bool valid)
Marc Zyngiereb781922016-12-20 14:47:05 +0000937{
938 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +0000939
940 desc.its_vmapp_cmd.vpe = vpe;
941 desc.its_vmapp_cmd.valid = valid;
Marc Zyngier75fd9512017-10-08 18:46:39 +0100942 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
Marc Zyngiereb781922016-12-20 14:47:05 +0000943
Marc Zyngier75fd9512017-10-08 18:46:39 +0100944 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +0000945}
946
Marc Zyngier3171a472016-12-20 15:17:28 +0000947static void its_send_vmovp(struct its_vpe *vpe)
948{
949 struct its_cmd_desc desc;
950 struct its_node *its;
951 unsigned long flags;
952 int col_id = vpe->col_idx;
953
954 desc.its_vmovp_cmd.vpe = vpe;
955 desc.its_vmovp_cmd.its_list = (u16)its_list_map;
956
957 if (!its_list_map) {
958 its = list_first_entry(&its_nodes, struct its_node, entry);
959 desc.its_vmovp_cmd.seq_num = 0;
960 desc.its_vmovp_cmd.col = &its->collections[col_id];
961 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
962 return;
963 }
964
965 /*
966 * Yet another marvel of the architecture. If using the
967 * its_list "feature", we need to make sure that all ITSs
968 * receive all VMOVP commands in the same order. The only way
969 * to guarantee this is to make vmovp a serialization point.
970 *
971 * Wall <-- Head.
972 */
973 raw_spin_lock_irqsave(&vmovp_lock, flags);
974
975 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
976
977 /* Emit VMOVPs */
978 list_for_each_entry(its, &its_nodes, entry) {
979 if (!its->is_v4)
980 continue;
981
Marc Zyngier2247e1b2017-10-08 18:50:36 +0100982 if (!vpe->its_vm->vlpi_count[its->list_nr])
983 continue;
984
Marc Zyngier3171a472016-12-20 15:17:28 +0000985 desc.its_vmovp_cmd.col = &its->collections[col_id];
986 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
987 }
988
989 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
990}
991
Marc Zyngier40619a22017-10-08 15:16:09 +0100992static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
Marc Zyngiereb781922016-12-20 14:47:05 +0000993{
994 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +0000995
996 desc.its_vinvall_cmd.vpe = vpe;
Marc Zyngier40619a22017-10-08 15:16:09 +0100997 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +0000998}
999
Marc Zyngierc48ed512014-11-24 14:35:12 +00001000/*
1001 * irqchip functions - assumes MSI, mostly.
1002 */
1003
1004static inline u32 its_get_event_id(struct irq_data *d)
1005{
1006 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
Marc Zyngier591e5be2015-07-17 10:46:42 +01001007 return d->hwirq - its_dev->event_map.lpi_base;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001008}
1009
Marc Zyngier015ec032016-12-20 09:54:57 +00001010static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
Marc Zyngierc48ed512014-11-24 14:35:12 +00001011{
Marc Zyngier015ec032016-12-20 09:54:57 +00001012 irq_hw_number_t hwirq;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001013 struct page *prop_page;
1014 u8 *cfg;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001015
Marc Zyngier015ec032016-12-20 09:54:57 +00001016 if (irqd_is_forwarded_to_vcpu(d)) {
1017 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1018 u32 event = its_get_event_id(d);
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001019 struct its_vlpi_map *map;
Marc Zyngier015ec032016-12-20 09:54:57 +00001020
1021 prop_page = its_dev->event_map.vm->vprop_page;
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001022 map = &its_dev->event_map.vlpi_maps[event];
1023 hwirq = map->vintid;
1024
1025 /* Remember the updated property */
1026 map->properties &= ~clr;
1027 map->properties |= set | LPI_PROP_GROUP1;
Marc Zyngier015ec032016-12-20 09:54:57 +00001028 } else {
1029 prop_page = gic_rdists->prop_page;
1030 hwirq = d->hwirq;
1031 }
Marc Zyngieradcdb942016-12-19 19:18:13 +00001032
1033 cfg = page_address(prop_page) + hwirq - 8192;
1034 *cfg &= ~clr;
Marc Zyngier015ec032016-12-20 09:54:57 +00001035 *cfg |= set | LPI_PROP_GROUP1;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001036
1037 /*
1038 * Make the above write visible to the redistributors.
1039 * And yes, we're flushing exactly: One. Single. Byte.
1040 * Humpf...
1041 */
1042 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +00001043 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001044 else
1045 dsb(ishst);
Marc Zyngier015ec032016-12-20 09:54:57 +00001046}
1047
1048static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1049{
1050 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1051
1052 lpi_write_config(d, clr, set);
Marc Zyngieradcdb942016-12-19 19:18:13 +00001053 its_send_inv(its_dev, its_get_event_id(d));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001054}
1055
Marc Zyngier015ec032016-12-20 09:54:57 +00001056static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1057{
1058 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1059 u32 event = its_get_event_id(d);
1060
1061 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1062 return;
1063
1064 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1065
1066 /*
1067 * More fun with the architecture:
1068 *
1069 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1070 * value or to 1023, depending on the enable bit. But that
1071 * would be issueing a mapping for an /existing/ DevID+EventID
1072 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1073 * to the /same/ vPE, using this opportunity to adjust the
1074 * doorbell. Mouahahahaha. We loves it, Precious.
1075 */
1076 its_send_vmovi(its_dev, event);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001077}
1078
1079static void its_mask_irq(struct irq_data *d)
1080{
Marc Zyngier015ec032016-12-20 09:54:57 +00001081 if (irqd_is_forwarded_to_vcpu(d))
1082 its_vlpi_set_doorbell(d, false);
1083
Marc Zyngieradcdb942016-12-19 19:18:13 +00001084 lpi_update_config(d, LPI_PROP_ENABLED, 0);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001085}
1086
1087static void its_unmask_irq(struct irq_data *d)
1088{
Marc Zyngier015ec032016-12-20 09:54:57 +00001089 if (irqd_is_forwarded_to_vcpu(d))
1090 its_vlpi_set_doorbell(d, true);
1091
Marc Zyngieradcdb942016-12-19 19:18:13 +00001092 lpi_update_config(d, 0, LPI_PROP_ENABLED);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001093}
1094
Marc Zyngierc48ed512014-11-24 14:35:12 +00001095static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1096 bool force)
1097{
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001098 unsigned int cpu;
1099 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001100 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1101 struct its_collection *target_col;
1102 u32 id = its_get_event_id(d);
1103
Marc Zyngier015ec032016-12-20 09:54:57 +00001104 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1105 if (irqd_is_forwarded_to_vcpu(d))
1106 return -EINVAL;
1107
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001108 /* lpi cannot be routed to a redistributor that is on a foreign node */
1109 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1110 if (its_dev->its->numa_node >= 0) {
1111 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1112 if (!cpumask_intersects(mask_val, cpu_mask))
1113 return -EINVAL;
1114 }
1115 }
1116
1117 cpu = cpumask_any_and(mask_val, cpu_mask);
1118
Marc Zyngierc48ed512014-11-24 14:35:12 +00001119 if (cpu >= nr_cpu_ids)
1120 return -EINVAL;
1121
MaJun8b8d94a2017-05-18 16:19:13 +08001122 /* don't set the affinity when the target cpu is same as current one */
1123 if (cpu != its_dev->event_map.col_map[id]) {
1124 target_col = &its_dev->its->collections[cpu];
1125 its_send_movi(its_dev, target_col, id);
1126 its_dev->event_map.col_map[id] = cpu;
Marc Zyngier0d224d32017-08-18 09:39:18 +01001127 irq_data_update_effective_affinity(d, cpumask_of(cpu));
MaJun8b8d94a2017-05-18 16:19:13 +08001128 }
Marc Zyngierc48ed512014-11-24 14:35:12 +00001129
1130 return IRQ_SET_MASK_OK_DONE;
1131}
1132
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001133static u64 its_irq_get_msi_base(struct its_device *its_dev)
1134{
1135 struct its_node *its = its_dev->its;
1136
1137 return its->phys_base + GITS_TRANSLATER;
1138}
1139
Marc Zyngierb48ac832014-11-24 14:35:16 +00001140static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1141{
1142 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1143 struct its_node *its;
1144 u64 addr;
1145
1146 its = its_dev->its;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001147 addr = its->get_msi_base(its_dev);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001148
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001149 msg->address_lo = lower_32_bits(addr);
1150 msg->address_hi = upper_32_bits(addr);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001151 msg->data = its_get_event_id(d);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001152
1153 iommu_dma_map_msi_msg(d->irq, msg);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001154}
1155
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001156static int its_irq_set_irqchip_state(struct irq_data *d,
1157 enum irqchip_irq_state which,
1158 bool state)
1159{
1160 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1161 u32 event = its_get_event_id(d);
1162
1163 if (which != IRQCHIP_STATE_PENDING)
1164 return -EINVAL;
1165
1166 if (state)
1167 its_send_int(its_dev, event);
1168 else
1169 its_send_clear(its_dev, event);
1170
1171 return 0;
1172}
1173
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001174static void its_map_vm(struct its_node *its, struct its_vm *vm)
1175{
1176 unsigned long flags;
1177
1178 /* Not using the ITS list? Everything is always mapped. */
1179 if (!its_list_map)
1180 return;
1181
1182 raw_spin_lock_irqsave(&vmovp_lock, flags);
1183
1184 /*
1185 * If the VM wasn't mapped yet, iterate over the vpes and get
1186 * them mapped now.
1187 */
1188 vm->vlpi_count[its->list_nr]++;
1189
1190 if (vm->vlpi_count[its->list_nr] == 1) {
1191 int i;
1192
1193 for (i = 0; i < vm->nr_vpes; i++) {
1194 struct its_vpe *vpe = vm->vpes[i];
Marc Zyngier44c4c252017-10-19 10:11:34 +01001195 struct irq_data *d = irq_get_irq_data(vpe->irq);
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001196
1197 /* Map the VPE to the first possible CPU */
1198 vpe->col_idx = cpumask_first(cpu_online_mask);
1199 its_send_vmapp(its, vpe, true);
1200 its_send_vinvall(its, vpe);
Marc Zyngier44c4c252017-10-19 10:11:34 +01001201 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001202 }
1203 }
1204
1205 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1206}
1207
1208static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1209{
1210 unsigned long flags;
1211
1212 /* Not using the ITS list? Everything is always mapped. */
1213 if (!its_list_map)
1214 return;
1215
1216 raw_spin_lock_irqsave(&vmovp_lock, flags);
1217
1218 if (!--vm->vlpi_count[its->list_nr]) {
1219 int i;
1220
1221 for (i = 0; i < vm->nr_vpes; i++)
1222 its_send_vmapp(its, vm->vpes[i], false);
1223 }
1224
1225 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1226}
1227
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001228static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1229{
1230 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1231 u32 event = its_get_event_id(d);
1232 int ret = 0;
1233
1234 if (!info->map)
1235 return -EINVAL;
1236
1237 mutex_lock(&its_dev->event_map.vlpi_lock);
1238
1239 if (!its_dev->event_map.vm) {
1240 struct its_vlpi_map *maps;
1241
Kees Cook6396bb22018-06-12 14:03:40 -07001242 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001243 GFP_KERNEL);
1244 if (!maps) {
1245 ret = -ENOMEM;
1246 goto out;
1247 }
1248
1249 its_dev->event_map.vm = info->map->vm;
1250 its_dev->event_map.vlpi_maps = maps;
1251 } else if (its_dev->event_map.vm != info->map->vm) {
1252 ret = -EINVAL;
1253 goto out;
1254 }
1255
1256 /* Get our private copy of the mapping information */
1257 its_dev->event_map.vlpi_maps[event] = *info->map;
1258
1259 if (irqd_is_forwarded_to_vcpu(d)) {
1260 /* Already mapped, move it around */
1261 its_send_vmovi(its_dev, event);
1262 } else {
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001263 /* Ensure all the VPEs are mapped on this ITS */
1264 its_map_vm(its_dev->its, info->map->vm);
1265
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001266 /*
1267 * Flag the interrupt as forwarded so that we can
1268 * start poking the virtual property table.
1269 */
1270 irqd_set_forwarded_to_vcpu(d);
1271
1272 /* Write out the property to the prop table */
1273 lpi_write_config(d, 0xff, info->map->properties);
1274
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001275 /* Drop the physical mapping */
1276 its_send_discard(its_dev, event);
1277
1278 /* and install the virtual one */
1279 its_send_vmapti(its_dev, event);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001280
1281 /* Increment the number of VLPIs */
1282 its_dev->event_map.nr_vlpis++;
1283 }
1284
1285out:
1286 mutex_unlock(&its_dev->event_map.vlpi_lock);
1287 return ret;
1288}
1289
1290static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1291{
1292 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1293 u32 event = its_get_event_id(d);
1294 int ret = 0;
1295
1296 mutex_lock(&its_dev->event_map.vlpi_lock);
1297
1298 if (!its_dev->event_map.vm ||
1299 !its_dev->event_map.vlpi_maps[event].vm) {
1300 ret = -EINVAL;
1301 goto out;
1302 }
1303
1304 /* Copy our mapping information to the incoming request */
1305 *info->map = its_dev->event_map.vlpi_maps[event];
1306
1307out:
1308 mutex_unlock(&its_dev->event_map.vlpi_lock);
1309 return ret;
1310}
1311
1312static int its_vlpi_unmap(struct irq_data *d)
1313{
1314 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1315 u32 event = its_get_event_id(d);
1316 int ret = 0;
1317
1318 mutex_lock(&its_dev->event_map.vlpi_lock);
1319
1320 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1321 ret = -EINVAL;
1322 goto out;
1323 }
1324
1325 /* Drop the virtual mapping */
1326 its_send_discard(its_dev, event);
1327
1328 /* and restore the physical one */
1329 irqd_clr_forwarded_to_vcpu(d);
1330 its_send_mapti(its_dev, d->hwirq, event);
1331 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1332 LPI_PROP_ENABLED |
1333 LPI_PROP_GROUP1));
1334
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001335 /* Potentially unmap the VM from this ITS */
1336 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1337
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001338 /*
1339 * Drop the refcount and make the device available again if
1340 * this was the last VLPI.
1341 */
1342 if (!--its_dev->event_map.nr_vlpis) {
1343 its_dev->event_map.vm = NULL;
1344 kfree(its_dev->event_map.vlpi_maps);
1345 }
1346
1347out:
1348 mutex_unlock(&its_dev->event_map.vlpi_lock);
1349 return ret;
1350}
1351
Marc Zyngier015ec032016-12-20 09:54:57 +00001352static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1353{
1354 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1355
1356 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1357 return -EINVAL;
1358
1359 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1360 lpi_update_config(d, 0xff, info->config);
1361 else
1362 lpi_write_config(d, 0xff, info->config);
1363 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1364
1365 return 0;
1366}
1367
Marc Zyngierc808eea2016-12-20 09:31:20 +00001368static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1369{
1370 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1371 struct its_cmd_info *info = vcpu_info;
1372
1373 /* Need a v4 ITS */
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001374 if (!its_dev->its->is_v4)
Marc Zyngierc808eea2016-12-20 09:31:20 +00001375 return -EINVAL;
1376
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001377 /* Unmap request? */
1378 if (!info)
1379 return its_vlpi_unmap(d);
1380
Marc Zyngierc808eea2016-12-20 09:31:20 +00001381 switch (info->cmd_type) {
1382 case MAP_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001383 return its_vlpi_map(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001384
1385 case GET_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001386 return its_vlpi_get(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001387
1388 case PROP_UPDATE_VLPI:
1389 case PROP_UPDATE_AND_INV_VLPI:
Marc Zyngier015ec032016-12-20 09:54:57 +00001390 return its_vlpi_prop_update(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001391
1392 default:
1393 return -EINVAL;
1394 }
1395}
1396
Marc Zyngierc48ed512014-11-24 14:35:12 +00001397static struct irq_chip its_irq_chip = {
1398 .name = "ITS",
1399 .irq_mask = its_mask_irq,
1400 .irq_unmask = its_unmask_irq,
Ashok Kumar004fa082016-02-11 05:38:53 -08001401 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngierc48ed512014-11-24 14:35:12 +00001402 .irq_set_affinity = its_set_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001403 .irq_compose_msi_msg = its_irq_compose_msi_msg,
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001404 .irq_set_irqchip_state = its_irq_set_irqchip_state,
Marc Zyngierc808eea2016-12-20 09:31:20 +00001405 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001406};
1407
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001408/*
1409 * How we allocate LPIs:
1410 *
1411 * The GIC has id_bits bits for interrupt identifiers. From there, we
1412 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
1413 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
1414 * bits to the right.
1415 *
1416 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
1417 */
1418#define IRQS_PER_CHUNK_SHIFT 5
Ard Biesheuvel4f2c7582018-03-06 15:51:32 +00001419#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001420#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001421
1422static unsigned long *lpi_bitmap;
1423static u32 lpi_chunks;
1424static DEFINE_SPINLOCK(lpi_lock);
1425
1426static int its_lpi_to_chunk(int lpi)
1427{
1428 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
1429}
1430
1431static int its_chunk_to_lpi(int chunk)
1432{
1433 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
1434}
1435
Tomasz Nowicki04a0e4d2016-01-19 14:11:18 +01001436static int __init its_lpi_init(u32 id_bits)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001437{
1438 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
1439
Kees Cook6396bb22018-06-12 14:03:40 -07001440 lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long),
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001441 GFP_KERNEL);
1442 if (!lpi_bitmap) {
1443 lpi_chunks = 0;
1444 return -ENOMEM;
1445 }
1446
1447 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
1448 return 0;
1449}
1450
1451static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
1452{
1453 unsigned long *bitmap = NULL;
1454 int chunk_id;
1455 int nr_chunks;
1456 int i;
1457
1458 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
1459
1460 spin_lock(&lpi_lock);
1461
1462 do {
1463 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
1464 0, nr_chunks, 0);
1465 if (chunk_id < lpi_chunks)
1466 break;
1467
1468 nr_chunks--;
1469 } while (nr_chunks > 0);
1470
1471 if (!nr_chunks)
1472 goto out;
1473
Kees Cook6396bb22018-06-12 14:03:40 -07001474 bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK),
1475 sizeof(long),
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001476 GFP_ATOMIC);
1477 if (!bitmap)
1478 goto out;
1479
1480 for (i = 0; i < nr_chunks; i++)
1481 set_bit(chunk_id + i, lpi_bitmap);
1482
1483 *base = its_chunk_to_lpi(chunk_id);
1484 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
1485
1486out:
1487 spin_unlock(&lpi_lock);
1488
Marc Zyngierc8415b92015-10-02 16:44:05 +01001489 if (!bitmap)
1490 *base = *nr_ids = 0;
1491
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001492 return bitmap;
1493}
1494
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001495static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001496{
1497 int lpi;
1498
1499 spin_lock(&lpi_lock);
1500
1501 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
1502 int chunk = its_lpi_to_chunk(lpi);
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001503
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001504 BUG_ON(chunk > lpi_chunks);
1505 if (test_bit(chunk, lpi_bitmap)) {
1506 clear_bit(chunk, lpi_bitmap);
1507 } else {
1508 pr_err("Bad LPI chunk %d\n", chunk);
1509 }
1510 }
1511
1512 spin_unlock(&lpi_lock);
1513
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001514 kfree(bitmap);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001515}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001516
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001517static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1518{
1519 struct page *prop_page;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001520
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001521 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1522 if (!prop_page)
1523 return NULL;
1524
1525 /* Priority 0xa0, Group-1, disabled */
1526 memset(page_address(prop_page),
1527 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
1528 LPI_PROPBASE_SZ);
1529
1530 /* Make sure the GIC will observe the written configuration */
1531 gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
1532
1533 return prop_page;
1534}
1535
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001536static void its_free_prop_table(struct page *prop_page)
1537{
1538 free_pages((unsigned long)page_address(prop_page),
1539 get_order(LPI_PROPBASE_SZ));
1540}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001541
1542static int __init its_alloc_lpi_tables(void)
1543{
1544 phys_addr_t paddr;
1545
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001546 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001547 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001548 if (!gic_rdists->prop_page) {
1549 pr_err("Failed to allocate PROPBASE\n");
1550 return -ENOMEM;
1551 }
1552
1553 paddr = page_to_phys(gic_rdists->prop_page);
1554 pr_info("GIC: using LPI property table @%pa\n", &paddr);
1555
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001556 return its_lpi_init(lpi_id_bits);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001557}
1558
1559static const char *its_base_type_string[] = {
1560 [GITS_BASER_TYPE_DEVICE] = "Devices",
1561 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
Marc Zyngier4f46de92016-12-20 15:50:14 +00001562 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001563 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1564 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1565 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1566 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1567};
1568
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001569static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1570{
1571 u32 idx = baser - its->tables;
1572
Vladimir Murzin0968a612016-11-02 11:54:06 +00001573 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001574}
1575
1576static void its_write_baser(struct its_node *its, struct its_baser *baser,
1577 u64 val)
1578{
1579 u32 idx = baser - its->tables;
1580
Vladimir Murzin0968a612016-11-02 11:54:06 +00001581 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001582 baser->val = its_read_baser(its, baser);
1583}
1584
Shanker Donthineni93473592016-06-06 18:17:30 -05001585static int its_setup_baser(struct its_node *its, struct its_baser *baser,
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001586 u64 cache, u64 shr, u32 psz, u32 order,
1587 bool indirect)
Shanker Donthineni93473592016-06-06 18:17:30 -05001588{
1589 u64 val = its_read_baser(its, baser);
1590 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1591 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001592 u64 baser_phys, tmp;
Shanker Donthineni93473592016-06-06 18:17:30 -05001593 u32 alloc_pages;
1594 void *base;
Shanker Donthineni93473592016-06-06 18:17:30 -05001595
1596retry_alloc_baser:
1597 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1598 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1599 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1600 &its->phys_base, its_base_type_string[type],
1601 alloc_pages, GITS_BASER_PAGES_MAX);
1602 alloc_pages = GITS_BASER_PAGES_MAX;
1603 order = get_order(GITS_BASER_PAGES_MAX * psz);
1604 }
1605
1606 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1607 if (!base)
1608 return -ENOMEM;
1609
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001610 baser_phys = virt_to_phys(base);
1611
1612 /* Check if the physical address of the memory is above 48bits */
1613 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1614
1615 /* 52bit PA is supported only when PageSize=64K */
1616 if (psz != SZ_64K) {
1617 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1618 free_pages((unsigned long)base, order);
1619 return -ENXIO;
1620 }
1621
1622 /* Convert 52bit PA to 48bit field */
1623 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1624 }
1625
Shanker Donthineni93473592016-06-06 18:17:30 -05001626retry_baser:
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001627 val = (baser_phys |
Shanker Donthineni93473592016-06-06 18:17:30 -05001628 (type << GITS_BASER_TYPE_SHIFT) |
1629 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1630 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1631 cache |
1632 shr |
1633 GITS_BASER_VALID);
1634
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001635 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1636
Shanker Donthineni93473592016-06-06 18:17:30 -05001637 switch (psz) {
1638 case SZ_4K:
1639 val |= GITS_BASER_PAGE_SIZE_4K;
1640 break;
1641 case SZ_16K:
1642 val |= GITS_BASER_PAGE_SIZE_16K;
1643 break;
1644 case SZ_64K:
1645 val |= GITS_BASER_PAGE_SIZE_64K;
1646 break;
1647 }
1648
1649 its_write_baser(its, baser, val);
1650 tmp = baser->val;
1651
1652 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1653 /*
1654 * Shareability didn't stick. Just use
1655 * whatever the read reported, which is likely
1656 * to be the only thing this redistributor
1657 * supports. If that's zero, make it
1658 * non-cacheable as well.
1659 */
1660 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1661 if (!shr) {
1662 cache = GITS_BASER_nC;
Vladimir Murzin328191c2016-11-02 11:54:05 +00001663 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
Shanker Donthineni93473592016-06-06 18:17:30 -05001664 }
1665 goto retry_baser;
1666 }
1667
1668 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1669 /*
1670 * Page size didn't stick. Let's try a smaller
1671 * size and retry. If we reach 4K, then
1672 * something is horribly wrong...
1673 */
1674 free_pages((unsigned long)base, order);
1675 baser->base = NULL;
1676
1677 switch (psz) {
1678 case SZ_16K:
1679 psz = SZ_4K;
1680 goto retry_alloc_baser;
1681 case SZ_64K:
1682 psz = SZ_16K;
1683 goto retry_alloc_baser;
1684 }
1685 }
1686
1687 if (val != tmp) {
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001688 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
Shanker Donthineni93473592016-06-06 18:17:30 -05001689 &its->phys_base, its_base_type_string[type],
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001690 val, tmp);
Shanker Donthineni93473592016-06-06 18:17:30 -05001691 free_pages((unsigned long)base, order);
1692 return -ENXIO;
1693 }
1694
1695 baser->order = order;
1696 baser->base = base;
1697 baser->psz = psz;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001698 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
Shanker Donthineni93473592016-06-06 18:17:30 -05001699
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001700 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001701 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
Shanker Donthineni93473592016-06-06 18:17:30 -05001702 its_base_type_string[type],
1703 (unsigned long)virt_to_phys(base),
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001704 indirect ? "indirect" : "flat", (int)esz,
Shanker Donthineni93473592016-06-06 18:17:30 -05001705 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1706
1707 return 0;
1708}
1709
Marc Zyngier4cacac52016-12-19 18:18:34 +00001710static bool its_parse_indirect_baser(struct its_node *its,
1711 struct its_baser *baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001712 u32 psz, u32 *order, u32 ids)
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001713{
Marc Zyngier4cacac52016-12-19 18:18:34 +00001714 u64 tmp = its_read_baser(its, baser);
1715 u64 type = GITS_BASER_TYPE(tmp);
1716 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001717 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001718 u32 new_order = *order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001719 bool indirect = false;
1720
1721 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1722 if ((esz << ids) > (psz * 2)) {
1723 /*
1724 * Find out whether hw supports a single or two-level table by
1725 * table by reading bit at offset '62' after writing '1' to it.
1726 */
1727 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1728 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1729
1730 if (indirect) {
1731 /*
1732 * The size of the lvl2 table is equal to ITS page size
1733 * which is 'psz'. For computing lvl1 table size,
1734 * subtract ID bits that sparse lvl2 table from 'ids'
1735 * which is reported by ITS hardware times lvl1 table
1736 * entry size.
1737 */
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001738 ids -= ilog2(psz / (int)esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001739 esz = GITS_LVL1_ENTRY_SIZE;
1740 }
1741 }
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001742
1743 /*
1744 * Allocate as many entries as required to fit the
1745 * range of device IDs that the ITS can grok... The ID
1746 * space being incredibly sparse, this results in a
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001747 * massive waste of memory if two-level device table
1748 * feature is not supported by hardware.
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001749 */
1750 new_order = max_t(u32, get_order(esz << ids), new_order);
1751 if (new_order >= MAX_ORDER) {
1752 new_order = MAX_ORDER - 1;
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001753 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001754 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1755 &its->phys_base, its_base_type_string[type],
1756 its->device_ids, ids);
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001757 }
1758
1759 *order = new_order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001760
1761 return indirect;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001762}
1763
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001764static void its_free_tables(struct its_node *its)
1765{
1766 int i;
1767
1768 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni1a485f42016-02-01 20:19:44 -06001769 if (its->tables[i].base) {
1770 free_pages((unsigned long)its->tables[i].base,
1771 its->tables[i].order);
1772 its->tables[i].base = NULL;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001773 }
1774 }
1775}
1776
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05001777static int its_alloc_tables(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001778{
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001779 u64 shr = GITS_BASER_InnerShareable;
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001780 u64 cache = GITS_BASER_RaWaWb;
Shanker Donthineni93473592016-06-06 18:17:30 -05001781 u32 psz = SZ_64K;
1782 int err, i;
Robert Richter94100972015-09-21 22:58:38 +02001783
Ard Biesheuvelfa150012017-10-17 17:55:54 +01001784 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1785 /* erratum 24313: ignore memory access type */
1786 cache = GITS_BASER_nCnB;
Shanker Donthineni466b7d12016-03-09 22:10:49 -06001787
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001788 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001789 struct its_baser *baser = its->tables + i;
1790 u64 val = its_read_baser(its, baser);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001791 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni93473592016-06-06 18:17:30 -05001792 u32 order = get_order(psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001793 bool indirect = false;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001794
Marc Zyngier4cacac52016-12-19 18:18:34 +00001795 switch (type) {
1796 case GITS_BASER_TYPE_NONE:
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001797 continue;
1798
Marc Zyngier4cacac52016-12-19 18:18:34 +00001799 case GITS_BASER_TYPE_DEVICE:
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001800 indirect = its_parse_indirect_baser(its, baser,
1801 psz, &order,
1802 its->device_ids);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001803 case GITS_BASER_TYPE_VCPU:
1804 indirect = its_parse_indirect_baser(its, baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001805 psz, &order,
1806 ITS_MAX_VPEID_BITS);
Marc Zyngier4cacac52016-12-19 18:18:34 +00001807 break;
1808 }
Marc Zyngierf54b97e2015-03-06 16:37:41 +00001809
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001810 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
Shanker Donthineni93473592016-06-06 18:17:30 -05001811 if (err < 0) {
1812 its_free_tables(its);
1813 return err;
Robert Richter30f21362015-09-21 22:58:34 +02001814 }
1815
Shanker Donthineni93473592016-06-06 18:17:30 -05001816 /* Update settings which will be used for next BASERn */
1817 psz = baser->psz;
1818 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1819 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001820 }
1821
1822 return 0;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001823}
1824
1825static int its_alloc_collections(struct its_node *its)
1826{
Kees Cook6396bb22018-06-12 14:03:40 -07001827 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001828 GFP_KERNEL);
1829 if (!its->collections)
1830 return -ENOMEM;
1831
1832 return 0;
1833}
1834
Marc Zyngier7c297a22016-12-19 18:34:38 +00001835static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1836{
1837 struct page *pend_page;
1838 /*
1839 * The pending pages have to be at least 64kB aligned,
1840 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1841 */
1842 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
1843 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1844 if (!pend_page)
1845 return NULL;
1846
1847 /* Make sure the GIC will observe the zero-ed page */
1848 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1849
1850 return pend_page;
1851}
1852
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001853static void its_free_pending_table(struct page *pt)
1854{
1855 free_pages((unsigned long)page_address(pt),
1856 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1857}
1858
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001859static void its_cpu_init_lpis(void)
1860{
1861 void __iomem *rbase = gic_data_rdist_rd_base();
1862 struct page *pend_page;
1863 u64 val, tmp;
1864
1865 /* If we didn't allocate the pending table yet, do it now */
1866 pend_page = gic_data_rdist()->pend_page;
1867 if (!pend_page) {
1868 phys_addr_t paddr;
Marc Zyngier7c297a22016-12-19 18:34:38 +00001869
1870 pend_page = its_allocate_pending_table(GFP_NOWAIT);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001871 if (!pend_page) {
1872 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1873 smp_processor_id());
1874 return;
1875 }
1876
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001877 paddr = page_to_phys(pend_page);
1878 pr_info("CPU%d: using LPI pending table @%pa\n",
1879 smp_processor_id(), &paddr);
1880 gic_data_rdist()->pend_page = pend_page;
1881 }
1882
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001883 /* set PROPBASE */
1884 val = (page_to_phys(gic_rdists->prop_page) |
1885 GICR_PROPBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001886 GICR_PROPBASER_RaWaWb |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001887 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1888
Vladimir Murzin0968a612016-11-02 11:54:06 +00001889 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1890 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001891
1892 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00001893 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1894 /*
1895 * The HW reports non-shareable, we must
1896 * remove the cacheability attributes as
1897 * well.
1898 */
1899 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1900 GICR_PROPBASER_CACHEABILITY_MASK);
1901 val |= GICR_PROPBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00001902 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00001903 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001904 pr_info_once("GIC: using cache flushing for LPI property table\n");
1905 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1906 }
1907
1908 /* set PENDBASE */
1909 val = (page_to_phys(pend_page) |
Marc Zyngier4ad3e362015-03-27 14:15:04 +00001910 GICR_PENDBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001911 GICR_PENDBASER_RaWaWb);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001912
Vladimir Murzin0968a612016-11-02 11:54:06 +00001913 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1914 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00001915
1916 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1917 /*
1918 * The HW reports non-shareable, we must remove the
1919 * cacheability attributes as well.
1920 */
1921 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1922 GICR_PENDBASER_CACHEABILITY_MASK);
1923 val |= GICR_PENDBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00001924 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00001925 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001926
1927 /* Enable LPIs */
1928 val = readl_relaxed(rbase + GICR_CTLR);
1929 val |= GICR_CTLR_ENABLE_LPIS;
1930 writel_relaxed(val, rbase + GICR_CTLR);
1931
1932 /* Make sure the GIC has seen the above */
1933 dsb(sy);
1934}
1935
Derek Basehore920181c2018-02-28 21:48:20 -08001936static void its_cpu_init_collection(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001937{
Derek Basehore920181c2018-02-28 21:48:20 -08001938 int cpu = smp_processor_id();
1939 u64 target;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001940
Derek Basehore920181c2018-02-28 21:48:20 -08001941 /* avoid cross node collections and its mapping */
1942 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1943 struct device_node *cpu_node;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001944
Derek Basehore920181c2018-02-28 21:48:20 -08001945 cpu_node = of_get_cpu_node(cpu, NULL);
1946 if (its->numa_node != NUMA_NO_NODE &&
1947 its->numa_node != of_node_to_nid(cpu_node))
1948 return;
1949 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001950
Derek Basehore920181c2018-02-28 21:48:20 -08001951 /*
1952 * We now have to bind each collection to its target
1953 * redistributor.
1954 */
1955 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001956 /*
Derek Basehore920181c2018-02-28 21:48:20 -08001957 * This ITS wants the physical address of the
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001958 * redistributor.
1959 */
Derek Basehore920181c2018-02-28 21:48:20 -08001960 target = gic_data_rdist()->phys_base;
1961 } else {
1962 /* This ITS wants a linear CPU number. */
1963 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
1964 target = GICR_TYPER_CPU_NUMBER(target) << 16;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001965 }
1966
Derek Basehore920181c2018-02-28 21:48:20 -08001967 /* Perform collection mapping */
1968 its->collections[cpu].target_address = target;
1969 its->collections[cpu].col_id = cpu;
1970
1971 its_send_mapc(its, &its->collections[cpu], 1);
1972 its_send_invall(its, &its->collections[cpu]);
1973}
1974
1975static void its_cpu_init_collections(void)
1976{
1977 struct its_node *its;
1978
1979 spin_lock(&its_lock);
1980
1981 list_for_each_entry(its, &its_nodes, entry)
1982 its_cpu_init_collection(its);
1983
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001984 spin_unlock(&its_lock);
1985}
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001986
1987static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1988{
1989 struct its_device *its_dev = NULL, *tmp;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00001990 unsigned long flags;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001991
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00001992 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00001993
1994 list_for_each_entry(tmp, &its->its_device_list, entry) {
1995 if (tmp->device_id == dev_id) {
1996 its_dev = tmp;
1997 break;
1998 }
1999 }
2000
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002001 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002002
2003 return its_dev;
2004}
2005
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002006static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2007{
2008 int i;
2009
2010 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2011 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2012 return &its->tables[i];
2013 }
2014
2015 return NULL;
2016}
2017
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002018static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002019{
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002020 struct page *page;
2021 u32 esz, idx;
2022 __le64 *table;
2023
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002024 /* Don't allow device id that exceeds single, flat table limit */
2025 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2026 if (!(baser->val & GITS_BASER_INDIRECT))
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002027 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002028
2029 /* Compute 1st level table index & check if that exceeds table limit */
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002030 idx = id >> ilog2(baser->psz / esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002031 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2032 return false;
2033
2034 table = baser->base;
2035
2036 /* Allocate memory for 2nd level table */
2037 if (!table[idx]) {
2038 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
2039 if (!page)
2040 return false;
2041
2042 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2043 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002044 gic_flush_dcache_to_poc(page_address(page), baser->psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002045
2046 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2047
2048 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2049 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002050 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002051
2052 /* Ensure updated table contents are visible to ITS hardware */
2053 dsb(sy);
2054 }
2055
2056 return true;
2057}
2058
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002059static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2060{
2061 struct its_baser *baser;
2062
2063 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2064
2065 /* Don't allow device id that exceeds ITS hardware limit */
2066 if (!baser)
2067 return (ilog2(dev_id) < its->device_ids);
2068
2069 return its_alloc_table_entry(baser, dev_id);
2070}
2071
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002072static bool its_alloc_vpe_table(u32 vpe_id)
2073{
2074 struct its_node *its;
2075
2076 /*
2077 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2078 * could try and only do it on ITSs corresponding to devices
2079 * that have interrupts targeted at this VPE, but the
2080 * complexity becomes crazy (and you have tons of memory
2081 * anyway, right?).
2082 */
2083 list_for_each_entry(its, &its_nodes, entry) {
2084 struct its_baser *baser;
2085
2086 if (!its->is_v4)
2087 continue;
2088
2089 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2090 if (!baser)
2091 return false;
2092
2093 if (!its_alloc_table_entry(baser, vpe_id))
2094 return false;
2095 }
2096
2097 return true;
2098}
2099
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002100static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002101 int nvecs, bool alloc_lpis)
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002102{
2103 struct its_device *dev;
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002104 unsigned long *lpi_map = NULL;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002105 unsigned long flags;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002106 u16 *col_map = NULL;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002107 void *itt;
2108 int lpi_base;
2109 int nr_lpis;
Marc Zyngierc8481262014-12-12 10:51:24 +00002110 int nr_ites;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002111 int sz;
2112
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002113 if (!its_alloc_device_table(its, dev_id))
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002114 return NULL;
2115
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002116 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Marc Zyngierc8481262014-12-12 10:51:24 +00002117 /*
Ard Biesheuvel4f2c7582018-03-06 15:51:32 +00002118 * We allocate at least one chunk worth of LPIs bet device,
2119 * and thus that many ITEs. The device may require less though.
Marc Zyngierc8481262014-12-12 10:51:24 +00002120 */
Ard Biesheuvel4f2c7582018-03-06 15:51:32 +00002121 nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
Marc Zyngierc8481262014-12-12 10:51:24 +00002122 sz = nr_ites * its->ite_size;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002123 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
Yun Wu6c834122015-03-06 16:37:46 +00002124 itt = kzalloc(sz, GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002125 if (alloc_lpis) {
2126 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
2127 if (lpi_map)
Kees Cook6396bb22018-06-12 14:03:40 -07002128 col_map = kcalloc(nr_lpis, sizeof(*col_map),
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002129 GFP_KERNEL);
2130 } else {
Kees Cook6396bb22018-06-12 14:03:40 -07002131 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002132 nr_lpis = 0;
2133 lpi_base = 0;
2134 }
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002135
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002136 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002137 kfree(dev);
2138 kfree(itt);
2139 kfree(lpi_map);
Marc Zyngier591e5be2015-07-17 10:46:42 +01002140 kfree(col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002141 return NULL;
2142 }
2143
Vladimir Murzin328191c2016-11-02 11:54:05 +00002144 gic_flush_dcache_to_poc(itt, sz);
Marc Zyngier5a9a8912015-09-13 12:14:32 +01002145
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002146 dev->its = its;
2147 dev->itt = itt;
Marc Zyngierc8481262014-12-12 10:51:24 +00002148 dev->nr_ites = nr_ites;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002149 dev->event_map.lpi_map = lpi_map;
2150 dev->event_map.col_map = col_map;
2151 dev->event_map.lpi_base = lpi_base;
2152 dev->event_map.nr_lpis = nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +00002153 mutex_init(&dev->event_map.vlpi_lock);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002154 dev->device_id = dev_id;
2155 INIT_LIST_HEAD(&dev->entry);
2156
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002157 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002158 list_add(&dev->entry, &its->its_device_list);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002159 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002160
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002161 /* Map device to its ITT */
2162 its_send_mapd(dev, 1);
2163
2164 return dev;
2165}
2166
2167static void its_free_device(struct its_device *its_dev)
2168{
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002169 unsigned long flags;
2170
2171 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002172 list_del(&its_dev->entry);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002173 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002174 kfree(its_dev->itt);
2175 kfree(its_dev);
2176}
Marc Zyngierb48ac832014-11-24 14:35:16 +00002177
2178static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
2179{
2180 int idx;
2181
Marc Zyngier591e5be2015-07-17 10:46:42 +01002182 idx = find_first_zero_bit(dev->event_map.lpi_map,
2183 dev->event_map.nr_lpis);
2184 if (idx == dev->event_map.nr_lpis)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002185 return -ENOSPC;
2186
Marc Zyngier591e5be2015-07-17 10:46:42 +01002187 *hwirq = dev->event_map.lpi_base + idx;
2188 set_bit(idx, dev->event_map.lpi_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002189
Marc Zyngierb48ac832014-11-24 14:35:16 +00002190 return 0;
2191}
2192
Marc Zyngier54456db2015-07-28 14:46:21 +01002193static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2194 int nvec, msi_alloc_info_t *info)
Marc Zyngiere8137f42015-03-06 16:37:42 +00002195{
Marc Zyngierb48ac832014-11-24 14:35:16 +00002196 struct its_node *its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002197 struct its_device *its_dev;
Marc Zyngier54456db2015-07-28 14:46:21 +01002198 struct msi_domain_info *msi_info;
2199 u32 dev_id;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002200
Marc Zyngier54456db2015-07-28 14:46:21 +01002201 /*
2202 * We ignore "dev" entierely, and rely on the dev_id that has
2203 * been passed via the scratchpad. This limits this domain's
2204 * usefulness to upper layers that definitely know that they
2205 * are built on top of the ITS.
2206 */
2207 dev_id = info->scratchpad[0].ul;
2208
2209 msi_info = msi_get_domain_info(domain);
2210 its = msi_info->data;
2211
Marc Zyngier20b3d542016-12-20 15:23:22 +00002212 if (!gic_rdists->has_direct_lpi &&
2213 vpe_proxy.dev &&
2214 vpe_proxy.dev->its == its &&
2215 dev_id == vpe_proxy.dev->device_id) {
2216 /* Bad luck. Get yourself a better implementation */
2217 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2218 dev_id);
2219 return -EINVAL;
2220 }
2221
Marc Zyngierf1304202015-07-28 14:46:18 +01002222 its_dev = its_find_device(its, dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002223 if (its_dev) {
2224 /*
2225 * We already have seen this ID, probably through
2226 * another alias (PCI bridge of some sort). No need to
2227 * create the device.
2228 */
Marc Zyngierf1304202015-07-28 14:46:18 +01002229 pr_debug("Reusing ITT for devID %x\n", dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002230 goto out;
2231 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002232
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002233 its_dev = its_create_device(its, dev_id, nvec, true);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002234 if (!its_dev)
2235 return -ENOMEM;
2236
Marc Zyngierf1304202015-07-28 14:46:18 +01002237 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
Marc Zyngiere8137f42015-03-06 16:37:42 +00002238out:
Marc Zyngierb48ac832014-11-24 14:35:16 +00002239 info->scratchpad[0].ptr = its_dev;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002240 return 0;
2241}
2242
Marc Zyngier54456db2015-07-28 14:46:21 +01002243static struct msi_domain_ops its_msi_domain_ops = {
2244 .msi_prepare = its_msi_prepare,
2245};
2246
Marc Zyngierb48ac832014-11-24 14:35:16 +00002247static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2248 unsigned int virq,
2249 irq_hw_number_t hwirq)
2250{
Marc Zyngierf833f572015-10-13 12:51:33 +01002251 struct irq_fwspec fwspec;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002252
Marc Zyngierf833f572015-10-13 12:51:33 +01002253 if (irq_domain_get_of_node(domain->parent)) {
2254 fwspec.fwnode = domain->parent->fwnode;
2255 fwspec.param_count = 3;
2256 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2257 fwspec.param[1] = hwirq;
2258 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02002259 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2260 fwspec.fwnode = domain->parent->fwnode;
2261 fwspec.param_count = 2;
2262 fwspec.param[0] = hwirq;
2263 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
Marc Zyngierf833f572015-10-13 12:51:33 +01002264 } else {
2265 return -EINVAL;
2266 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002267
Marc Zyngierf833f572015-10-13 12:51:33 +01002268 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002269}
2270
2271static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2272 unsigned int nr_irqs, void *args)
2273{
2274 msi_alloc_info_t *info = args;
2275 struct its_device *its_dev = info->scratchpad[0].ptr;
2276 irq_hw_number_t hwirq;
2277 int err;
2278 int i;
2279
2280 for (i = 0; i < nr_irqs; i++) {
2281 err = its_alloc_device_irq(its_dev, &hwirq);
2282 if (err)
2283 return err;
2284
2285 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
2286 if (err)
2287 return err;
2288
2289 irq_domain_set_hwirq_and_chip(domain, virq + i,
2290 hwirq, &its_irq_chip, its_dev);
Marc Zyngier0d224d32017-08-18 09:39:18 +01002291 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
Marc Zyngierf1304202015-07-28 14:46:18 +01002292 pr_debug("ID:%d pID:%d vID:%d\n",
2293 (int)(hwirq - its_dev->event_map.lpi_base),
2294 (int) hwirq, virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002295 }
2296
2297 return 0;
2298}
2299
Thomas Gleixner72491642017-09-13 23:29:10 +02002300static int its_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01002301 struct irq_data *d, bool reserve)
Marc Zyngieraca268d2014-12-12 10:51:23 +00002302{
2303 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2304 u32 event = its_get_event_id(d);
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002305 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngier0d224d32017-08-18 09:39:18 +01002306 int cpu;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002307
2308 /* get the cpu_mask of local node */
2309 if (its_dev->its->numa_node >= 0)
2310 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002311
Marc Zyngier591e5be2015-07-17 10:46:42 +01002312 /* Bind the LPI to the first possible CPU */
Yang Yingliangc1797b12018-06-22 10:52:51 +01002313 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2314 if (cpu >= nr_cpu_ids) {
2315 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2316 return -EINVAL;
2317
2318 cpu = cpumask_first(cpu_online_mask);
2319 }
2320
Marc Zyngier0d224d32017-08-18 09:39:18 +01002321 its_dev->event_map.col_map[event] = cpu;
2322 irq_data_update_effective_affinity(d, cpumask_of(cpu));
Marc Zyngier591e5be2015-07-17 10:46:42 +01002323
Marc Zyngieraca268d2014-12-12 10:51:23 +00002324 /* Map the GIC IRQ and event to the device */
Marc Zyngier6a25ad32016-12-20 15:52:26 +00002325 its_send_mapti(its_dev, d->hwirq, event);
Thomas Gleixner72491642017-09-13 23:29:10 +02002326 return 0;
Marc Zyngieraca268d2014-12-12 10:51:23 +00002327}
2328
2329static void its_irq_domain_deactivate(struct irq_domain *domain,
2330 struct irq_data *d)
2331{
2332 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2333 u32 event = its_get_event_id(d);
2334
2335 /* Stop the delivery of interrupts */
2336 its_send_discard(its_dev, event);
2337}
2338
Marc Zyngierb48ac832014-11-24 14:35:16 +00002339static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2340 unsigned int nr_irqs)
2341{
2342 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2343 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2344 int i;
2345
2346 for (i = 0; i < nr_irqs; i++) {
2347 struct irq_data *data = irq_domain_get_irq_data(domain,
2348 virq + i);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002349 u32 event = its_get_event_id(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002350
2351 /* Mark interrupt index as unused */
Marc Zyngier591e5be2015-07-17 10:46:42 +01002352 clear_bit(event, its_dev->event_map.lpi_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002353
2354 /* Nuke the entry in the domain */
Marc Zyngier2da39942014-12-12 10:51:22 +00002355 irq_domain_reset_irq_data(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002356 }
2357
2358 /* If all interrupts have been freed, start mopping the floor */
Marc Zyngier591e5be2015-07-17 10:46:42 +01002359 if (bitmap_empty(its_dev->event_map.lpi_map,
2360 its_dev->event_map.nr_lpis)) {
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00002361 its_lpi_free_chunks(its_dev->event_map.lpi_map,
2362 its_dev->event_map.lpi_base,
2363 its_dev->event_map.nr_lpis);
2364 kfree(its_dev->event_map.col_map);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002365
2366 /* Unmap device/itt */
2367 its_send_mapd(its_dev, 0);
2368 its_free_device(its_dev);
2369 }
2370
2371 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2372}
2373
2374static const struct irq_domain_ops its_domain_ops = {
2375 .alloc = its_irq_domain_alloc,
2376 .free = its_irq_domain_free,
Marc Zyngieraca268d2014-12-12 10:51:23 +00002377 .activate = its_irq_domain_activate,
2378 .deactivate = its_irq_domain_deactivate,
Marc Zyngierb48ac832014-11-24 14:35:16 +00002379};
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002380
Marc Zyngier20b3d542016-12-20 15:23:22 +00002381/*
2382 * This is insane.
2383 *
2384 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2385 * likely), the only way to perform an invalidate is to use a fake
2386 * device to issue an INV command, implying that the LPI has first
2387 * been mapped to some event on that device. Since this is not exactly
2388 * cheap, we try to keep that mapping around as long as possible, and
2389 * only issue an UNMAP if we're short on available slots.
2390 *
2391 * Broken by design(tm).
2392 */
2393static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2394{
2395 /* Already unmapped? */
2396 if (vpe->vpe_proxy_event == -1)
2397 return;
2398
2399 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2400 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2401
2402 /*
2403 * We don't track empty slots at all, so let's move the
2404 * next_victim pointer if we can quickly reuse that slot
2405 * instead of nuking an existing entry. Not clear that this is
2406 * always a win though, and this might just generate a ripple
2407 * effect... Let's just hope VPEs don't migrate too often.
2408 */
2409 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2410 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2411
2412 vpe->vpe_proxy_event = -1;
2413}
2414
2415static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2416{
2417 if (!gic_rdists->has_direct_lpi) {
2418 unsigned long flags;
2419
2420 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2421 its_vpe_db_proxy_unmap_locked(vpe);
2422 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2423 }
2424}
2425
2426static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2427{
2428 /* Already mapped? */
2429 if (vpe->vpe_proxy_event != -1)
2430 return;
2431
2432 /* This slot was already allocated. Kick the other VPE out. */
2433 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2434 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2435
2436 /* Map the new VPE instead */
2437 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2438 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2439 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2440
2441 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2442 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2443}
2444
Marc Zyngier958b90d2017-08-18 16:14:17 +01002445static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2446{
2447 unsigned long flags;
2448 struct its_collection *target_col;
2449
2450 if (gic_rdists->has_direct_lpi) {
2451 void __iomem *rdbase;
2452
2453 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2454 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2455 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2456 cpu_relax();
2457
2458 return;
2459 }
2460
2461 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2462
2463 its_vpe_db_proxy_map_locked(vpe);
2464
2465 target_col = &vpe_proxy.dev->its->collections[to];
2466 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2467 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2468
2469 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2470}
2471
Marc Zyngier3171a472016-12-20 15:17:28 +00002472static int its_vpe_set_affinity(struct irq_data *d,
2473 const struct cpumask *mask_val,
2474 bool force)
2475{
2476 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2477 int cpu = cpumask_first(mask_val);
2478
2479 /*
2480 * Changing affinity is mega expensive, so let's be as lazy as
Marc Zyngier20b3d542016-12-20 15:23:22 +00002481 * we can and only do it if we really have to. Also, if mapped
Marc Zyngier958b90d2017-08-18 16:14:17 +01002482 * into the proxy device, we need to move the doorbell
2483 * interrupt to its new location.
Marc Zyngier3171a472016-12-20 15:17:28 +00002484 */
2485 if (vpe->col_idx != cpu) {
Marc Zyngier958b90d2017-08-18 16:14:17 +01002486 int from = vpe->col_idx;
2487
Marc Zyngier3171a472016-12-20 15:17:28 +00002488 vpe->col_idx = cpu;
2489 its_send_vmovp(vpe);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002490 its_vpe_db_proxy_move(vpe, from, cpu);
Marc Zyngier3171a472016-12-20 15:17:28 +00002491 }
2492
Marc Zyngier44c4c252017-10-19 10:11:34 +01002493 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2494
Marc Zyngier3171a472016-12-20 15:17:28 +00002495 return IRQ_SET_MASK_OK_DONE;
2496}
2497
Marc Zyngiere643d802016-12-20 15:09:31 +00002498static void its_vpe_schedule(struct its_vpe *vpe)
2499{
Robin Murphy50c33092018-02-16 16:57:56 +00002500 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002501 u64 val;
2502
2503 /* Schedule the VPE */
2504 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2505 GENMASK_ULL(51, 12);
2506 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2507 val |= GICR_VPROPBASER_RaWb;
2508 val |= GICR_VPROPBASER_InnerShareable;
2509 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2510
2511 val = virt_to_phys(page_address(vpe->vpt_page)) &
2512 GENMASK_ULL(51, 16);
2513 val |= GICR_VPENDBASER_RaWaWb;
2514 val |= GICR_VPENDBASER_NonShareable;
2515 /*
2516 * There is no good way of finding out if the pending table is
2517 * empty as we can race against the doorbell interrupt very
2518 * easily. So in the end, vpe->pending_last is only an
2519 * indication that the vcpu has something pending, not one
2520 * that the pending table is empty. A good implementation
2521 * would be able to read its coarse map pretty quickly anyway,
2522 * making this a tolerable issue.
2523 */
2524 val |= GICR_VPENDBASER_PendingLast;
2525 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2526 val |= GICR_VPENDBASER_Valid;
2527 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2528}
2529
2530static void its_vpe_deschedule(struct its_vpe *vpe)
2531{
Robin Murphy50c33092018-02-16 16:57:56 +00002532 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002533 u32 count = 1000000; /* 1s! */
2534 bool clean;
2535 u64 val;
2536
2537 /* We're being scheduled out */
2538 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2539 val &= ~GICR_VPENDBASER_Valid;
2540 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2541
2542 do {
2543 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2544 clean = !(val & GICR_VPENDBASER_Dirty);
2545 if (!clean) {
2546 count--;
2547 cpu_relax();
2548 udelay(1);
2549 }
2550 } while (!clean && count);
2551
2552 if (unlikely(!clean && !count)) {
2553 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2554 vpe->idai = false;
2555 vpe->pending_last = true;
2556 } else {
2557 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2558 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2559 }
2560}
2561
Marc Zyngier40619a22017-10-08 15:16:09 +01002562static void its_vpe_invall(struct its_vpe *vpe)
2563{
2564 struct its_node *its;
2565
2566 list_for_each_entry(its, &its_nodes, entry) {
2567 if (!its->is_v4)
2568 continue;
2569
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002570 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2571 continue;
2572
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01002573 /*
2574 * Sending a VINVALL to a single ITS is enough, as all
2575 * we need is to reach the redistributors.
2576 */
Marc Zyngier40619a22017-10-08 15:16:09 +01002577 its_send_vinvall(its, vpe);
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01002578 return;
Marc Zyngier40619a22017-10-08 15:16:09 +01002579 }
2580}
2581
Marc Zyngiere643d802016-12-20 15:09:31 +00002582static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2583{
2584 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2585 struct its_cmd_info *info = vcpu_info;
2586
2587 switch (info->cmd_type) {
2588 case SCHEDULE_VPE:
2589 its_vpe_schedule(vpe);
2590 return 0;
2591
2592 case DESCHEDULE_VPE:
2593 its_vpe_deschedule(vpe);
2594 return 0;
2595
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002596 case INVALL_VPE:
Marc Zyngier40619a22017-10-08 15:16:09 +01002597 its_vpe_invall(vpe);
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002598 return 0;
2599
Marc Zyngiere643d802016-12-20 15:09:31 +00002600 default:
2601 return -EINVAL;
2602 }
2603}
2604
Marc Zyngier20b3d542016-12-20 15:23:22 +00002605static void its_vpe_send_cmd(struct its_vpe *vpe,
2606 void (*cmd)(struct its_device *, u32))
2607{
2608 unsigned long flags;
2609
2610 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2611
2612 its_vpe_db_proxy_map_locked(vpe);
2613 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2614
2615 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2616}
2617
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002618static void its_vpe_send_inv(struct irq_data *d)
2619{
2620 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002621
Marc Zyngier20b3d542016-12-20 15:23:22 +00002622 if (gic_rdists->has_direct_lpi) {
2623 void __iomem *rdbase;
2624
2625 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2626 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2627 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2628 cpu_relax();
2629 } else {
2630 its_vpe_send_cmd(vpe, its_send_inv);
2631 }
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002632}
2633
2634static void its_vpe_mask_irq(struct irq_data *d)
2635{
2636 /*
2637 * We need to unmask the LPI, which is described by the parent
2638 * irq_data. Instead of calling into the parent (which won't
2639 * exactly do the right thing, let's simply use the
2640 * parent_data pointer. Yes, I'm naughty.
2641 */
2642 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2643 its_vpe_send_inv(d);
2644}
2645
2646static void its_vpe_unmask_irq(struct irq_data *d)
2647{
2648 /* Same hack as above... */
2649 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2650 its_vpe_send_inv(d);
2651}
2652
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002653static int its_vpe_set_irqchip_state(struct irq_data *d,
2654 enum irqchip_irq_state which,
2655 bool state)
2656{
2657 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2658
2659 if (which != IRQCHIP_STATE_PENDING)
2660 return -EINVAL;
2661
2662 if (gic_rdists->has_direct_lpi) {
2663 void __iomem *rdbase;
2664
2665 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2666 if (state) {
2667 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2668 } else {
2669 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2670 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2671 cpu_relax();
2672 }
2673 } else {
2674 if (state)
2675 its_vpe_send_cmd(vpe, its_send_int);
2676 else
2677 its_vpe_send_cmd(vpe, its_send_clear);
2678 }
2679
2680 return 0;
2681}
2682
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002683static struct irq_chip its_vpe_irq_chip = {
2684 .name = "GICv4-vpe",
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002685 .irq_mask = its_vpe_mask_irq,
2686 .irq_unmask = its_vpe_unmask_irq,
2687 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngier3171a472016-12-20 15:17:28 +00002688 .irq_set_affinity = its_vpe_set_affinity,
Marc Zyngiere57a3e282017-07-31 14:47:24 +01002689 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
Marc Zyngiere643d802016-12-20 15:09:31 +00002690 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002691};
2692
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002693static int its_vpe_id_alloc(void)
2694{
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002695 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002696}
2697
2698static void its_vpe_id_free(u16 id)
2699{
2700 ida_simple_remove(&its_vpeid_ida, id);
2701}
2702
2703static int its_vpe_init(struct its_vpe *vpe)
2704{
2705 struct page *vpt_page;
2706 int vpe_id;
2707
2708 /* Allocate vpe_id */
2709 vpe_id = its_vpe_id_alloc();
2710 if (vpe_id < 0)
2711 return vpe_id;
2712
2713 /* Allocate VPT */
2714 vpt_page = its_allocate_pending_table(GFP_KERNEL);
2715 if (!vpt_page) {
2716 its_vpe_id_free(vpe_id);
2717 return -ENOMEM;
2718 }
2719
2720 if (!its_alloc_vpe_table(vpe_id)) {
2721 its_vpe_id_free(vpe_id);
2722 its_free_pending_table(vpe->vpt_page);
2723 return -ENOMEM;
2724 }
2725
2726 vpe->vpe_id = vpe_id;
2727 vpe->vpt_page = vpt_page;
Marc Zyngier20b3d542016-12-20 15:23:22 +00002728 vpe->vpe_proxy_event = -1;
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002729
2730 return 0;
2731}
2732
2733static void its_vpe_teardown(struct its_vpe *vpe)
2734{
Marc Zyngier20b3d542016-12-20 15:23:22 +00002735 its_vpe_db_proxy_unmap(vpe);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002736 its_vpe_id_free(vpe->vpe_id);
2737 its_free_pending_table(vpe->vpt_page);
2738}
2739
2740static void its_vpe_irq_domain_free(struct irq_domain *domain,
2741 unsigned int virq,
2742 unsigned int nr_irqs)
2743{
2744 struct its_vm *vm = domain->host_data;
2745 int i;
2746
2747 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2748
2749 for (i = 0; i < nr_irqs; i++) {
2750 struct irq_data *data = irq_domain_get_irq_data(domain,
2751 virq + i);
2752 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2753
2754 BUG_ON(vm != vpe->its_vm);
2755
2756 clear_bit(data->hwirq, vm->db_bitmap);
2757 its_vpe_teardown(vpe);
2758 irq_domain_reset_irq_data(data);
2759 }
2760
2761 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2762 its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2763 its_free_prop_table(vm->vprop_page);
2764 }
2765}
2766
2767static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2768 unsigned int nr_irqs, void *args)
2769{
2770 struct its_vm *vm = args;
2771 unsigned long *bitmap;
2772 struct page *vprop_page;
2773 int base, nr_ids, i, err = 0;
2774
2775 BUG_ON(!vm);
2776
2777 bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
2778 if (!bitmap)
2779 return -ENOMEM;
2780
2781 if (nr_ids < nr_irqs) {
2782 its_lpi_free_chunks(bitmap, base, nr_ids);
2783 return -ENOMEM;
2784 }
2785
2786 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2787 if (!vprop_page) {
2788 its_lpi_free_chunks(bitmap, base, nr_ids);
2789 return -ENOMEM;
2790 }
2791
2792 vm->db_bitmap = bitmap;
2793 vm->db_lpi_base = base;
2794 vm->nr_db_lpis = nr_ids;
2795 vm->vprop_page = vprop_page;
2796
2797 for (i = 0; i < nr_irqs; i++) {
2798 vm->vpes[i]->vpe_db_lpi = base + i;
2799 err = its_vpe_init(vm->vpes[i]);
2800 if (err)
2801 break;
2802 err = its_irq_gic_domain_alloc(domain, virq + i,
2803 vm->vpes[i]->vpe_db_lpi);
2804 if (err)
2805 break;
2806 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2807 &its_vpe_irq_chip, vm->vpes[i]);
2808 set_bit(i, bitmap);
2809 }
2810
2811 if (err) {
2812 if (i > 0)
2813 its_vpe_irq_domain_free(domain, virq, i - 1);
2814
2815 its_lpi_free_chunks(bitmap, base, nr_ids);
2816 its_free_prop_table(vprop_page);
2817 }
2818
2819 return err;
2820}
2821
Thomas Gleixner72491642017-09-13 23:29:10 +02002822static int its_vpe_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01002823 struct irq_data *d, bool reserve)
Marc Zyngiereb781922016-12-20 14:47:05 +00002824{
2825 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier40619a22017-10-08 15:16:09 +01002826 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00002827
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002828 /* If we use the list map, we issue VMAPP on demand... */
2829 if (its_list_map)
Marc Zyngier6ef930f2017-11-07 10:04:38 +00002830 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00002831
2832 /* Map the VPE to the first possible CPU */
2833 vpe->col_idx = cpumask_first(cpu_online_mask);
Marc Zyngier40619a22017-10-08 15:16:09 +01002834
2835 list_for_each_entry(its, &its_nodes, entry) {
2836 if (!its->is_v4)
2837 continue;
2838
Marc Zyngier75fd9512017-10-08 18:46:39 +01002839 its_send_vmapp(its, vpe, true);
Marc Zyngier40619a22017-10-08 15:16:09 +01002840 its_send_vinvall(its, vpe);
2841 }
2842
Marc Zyngier44c4c252017-10-19 10:11:34 +01002843 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
2844
Thomas Gleixner72491642017-09-13 23:29:10 +02002845 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00002846}
2847
2848static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2849 struct irq_data *d)
2850{
2851 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier75fd9512017-10-08 18:46:39 +01002852 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00002853
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002854 /*
2855 * If we use the list map, we unmap the VPE once no VLPIs are
2856 * associated with the VM.
2857 */
2858 if (its_list_map)
2859 return;
2860
Marc Zyngier75fd9512017-10-08 18:46:39 +01002861 list_for_each_entry(its, &its_nodes, entry) {
2862 if (!its->is_v4)
2863 continue;
2864
2865 its_send_vmapp(its, vpe, false);
2866 }
Marc Zyngiereb781922016-12-20 14:47:05 +00002867}
2868
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002869static const struct irq_domain_ops its_vpe_domain_ops = {
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002870 .alloc = its_vpe_irq_domain_alloc,
2871 .free = its_vpe_irq_domain_free,
Marc Zyngiereb781922016-12-20 14:47:05 +00002872 .activate = its_vpe_irq_domain_activate,
2873 .deactivate = its_vpe_irq_domain_deactivate,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00002874};
2875
Yun Wu4559fbb2015-03-06 16:37:50 +00002876static int its_force_quiescent(void __iomem *base)
2877{
2878 u32 count = 1000000; /* 1s */
2879 u32 val;
2880
2881 val = readl_relaxed(base + GITS_CTLR);
David Daney7611da82016-08-18 15:41:58 -07002882 /*
2883 * GIC architecture specification requires the ITS to be both
2884 * disabled and quiescent for writes to GITS_BASER<n> or
2885 * GITS_CBASER to not have UNPREDICTABLE results.
2886 */
2887 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
Yun Wu4559fbb2015-03-06 16:37:50 +00002888 return 0;
2889
2890 /* Disable the generation of all interrupts to this ITS */
Marc Zyngierd51c4b42017-06-27 21:24:25 +01002891 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
Yun Wu4559fbb2015-03-06 16:37:50 +00002892 writel_relaxed(val, base + GITS_CTLR);
2893
2894 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
2895 while (1) {
2896 val = readl_relaxed(base + GITS_CTLR);
2897 if (val & GITS_CTLR_QUIESCENT)
2898 return 0;
2899
2900 count--;
2901 if (!count)
2902 return -EBUSY;
2903
2904 cpu_relax();
2905 udelay(1);
2906 }
2907}
2908
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002909static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
Robert Richter94100972015-09-21 22:58:38 +02002910{
2911 struct its_node *its = data;
2912
Ard Biesheuvelfa150012017-10-17 17:55:54 +01002913 /* erratum 22375: only alloc 8MB table size */
2914 its->device_ids = 0x14; /* 20 bits, 8MB */
Robert Richter94100972015-09-21 22:58:38 +02002915 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002916
2917 return true;
Robert Richter94100972015-09-21 22:58:38 +02002918}
2919
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002920static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002921{
2922 struct its_node *its = data;
2923
2924 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002925
2926 return true;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002927}
2928
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002929static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
Shanker Donthineni90922a22017-03-07 08:20:38 -06002930{
2931 struct its_node *its = data;
2932
2933 /* On QDF2400, the size of the ITE is 16Bytes */
2934 its->ite_size = 16;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01002935
2936 return true;
Shanker Donthineni90922a22017-03-07 08:20:38 -06002937}
2938
Ard Biesheuvel558b0162017-10-17 17:55:56 +01002939static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
2940{
2941 struct its_node *its = its_dev->its;
2942
2943 /*
2944 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
2945 * which maps 32-bit writes targeted at a separate window of
2946 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
2947 * with device ID taken from bits [device_id_bits + 1:2] of
2948 * the window offset.
2949 */
2950 return its->pre_its_base + (its_dev->device_id << 2);
2951}
2952
2953static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
2954{
2955 struct its_node *its = data;
2956 u32 pre_its_window[2];
2957 u32 ids;
2958
2959 if (!fwnode_property_read_u32_array(its->fwnode_handle,
2960 "socionext,synquacer-pre-its",
2961 pre_its_window,
2962 ARRAY_SIZE(pre_its_window))) {
2963
2964 its->pre_its_base = pre_its_window[0];
2965 its->get_msi_base = its_irq_get_msi_base_pre_its;
2966
2967 ids = ilog2(pre_its_window[1]) - 2;
2968 if (its->device_ids > ids)
2969 its->device_ids = ids;
2970
2971 /* the pre-ITS breaks isolation, so disable MSI remapping */
2972 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
2973 return true;
2974 }
2975 return false;
2976}
2977
Marc Zyngier5c9a8822017-07-28 21:20:37 +01002978static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
2979{
2980 struct its_node *its = data;
2981
2982 /*
2983 * Hip07 insists on using the wrong address for the VLPI
2984 * page. Trick it into doing the right thing...
2985 */
2986 its->vlpi_redist_offset = SZ_128K;
2987 return true;
Marc Zyngiercc2d3212014-11-24 14:35:11 +00002988}
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002989
Robert Richter67510cc2015-09-21 22:58:37 +02002990static const struct gic_quirk its_quirks[] = {
Robert Richter94100972015-09-21 22:58:38 +02002991#ifdef CONFIG_CAVIUM_ERRATUM_22375
2992 {
2993 .desc = "ITS: Cavium errata 22375, 24313",
2994 .iidr = 0xa100034c, /* ThunderX pass 1.x */
2995 .mask = 0xffff0fff,
2996 .init = its_enable_quirk_cavium_22375,
2997 },
2998#endif
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002999#ifdef CONFIG_CAVIUM_ERRATUM_23144
3000 {
3001 .desc = "ITS: Cavium erratum 23144",
3002 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3003 .mask = 0xffff0fff,
3004 .init = its_enable_quirk_cavium_23144,
3005 },
3006#endif
Shanker Donthineni90922a22017-03-07 08:20:38 -06003007#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3008 {
3009 .desc = "ITS: QDF2400 erratum 0065",
3010 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3011 .mask = 0xffffffff,
3012 .init = its_enable_quirk_qdf2400_e0065,
3013 },
3014#endif
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003015#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3016 {
3017 /*
3018 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3019 * implementation, but with a 'pre-ITS' added that requires
3020 * special handling in software.
3021 */
3022 .desc = "ITS: Socionext Synquacer pre-ITS",
3023 .iidr = 0x0001143b,
3024 .mask = 0xffffffff,
3025 .init = its_enable_quirk_socionext_synquacer,
3026 },
3027#endif
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003028#ifdef CONFIG_HISILICON_ERRATUM_161600802
3029 {
3030 .desc = "ITS: Hip07 erratum 161600802",
3031 .iidr = 0x00000004,
3032 .mask = 0xffffffff,
3033 .init = its_enable_quirk_hip07_161600802,
3034 },
3035#endif
Robert Richter67510cc2015-09-21 22:58:37 +02003036 {
3037 }
3038};
3039
3040static void its_enable_quirks(struct its_node *its)
3041{
3042 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3043
3044 gic_enable_quirks(iidr, its_quirks, its);
3045}
3046
Derek Basehoredba0bc72018-02-28 21:48:18 -08003047static int its_save_disable(void)
3048{
3049 struct its_node *its;
3050 int err = 0;
3051
3052 spin_lock(&its_lock);
3053 list_for_each_entry(its, &its_nodes, entry) {
3054 void __iomem *base;
3055
3056 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3057 continue;
3058
3059 base = its->base;
3060 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3061 err = its_force_quiescent(base);
3062 if (err) {
3063 pr_err("ITS@%pa: failed to quiesce: %d\n",
3064 &its->phys_base, err);
3065 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3066 goto err;
3067 }
3068
3069 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3070 }
3071
3072err:
3073 if (err) {
3074 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3075 void __iomem *base;
3076
3077 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3078 continue;
3079
3080 base = its->base;
3081 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3082 }
3083 }
3084 spin_unlock(&its_lock);
3085
3086 return err;
3087}
3088
3089static void its_restore_enable(void)
3090{
3091 struct its_node *its;
3092 int ret;
3093
3094 spin_lock(&its_lock);
3095 list_for_each_entry(its, &its_nodes, entry) {
3096 void __iomem *base;
3097 int i;
3098
3099 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3100 continue;
3101
3102 base = its->base;
3103
3104 /*
3105 * Make sure that the ITS is disabled. If it fails to quiesce,
3106 * don't restore it since writing to CBASER or BASER<n>
3107 * registers is undefined according to the GIC v3 ITS
3108 * Specification.
3109 */
3110 ret = its_force_quiescent(base);
3111 if (ret) {
3112 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3113 &its->phys_base, ret);
3114 continue;
3115 }
3116
3117 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3118
3119 /*
3120 * Writing CBASER resets CREADR to 0, so make CWRITER and
3121 * cmd_write line up with it.
3122 */
3123 its->cmd_write = its->cmd_base;
3124 gits_write_cwriter(0, base + GITS_CWRITER);
3125
3126 /* Restore GITS_BASER from the value cache. */
3127 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3128 struct its_baser *baser = &its->tables[i];
3129
3130 if (!(baser->val & GITS_BASER_VALID))
3131 continue;
3132
3133 its_write_baser(its, baser, baser->val);
3134 }
3135 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
Derek Basehore920181c2018-02-28 21:48:20 -08003136
3137 /*
3138 * Reinit the collection if it's stored in the ITS. This is
3139 * indicated by the col_id being less than the HCC field.
3140 * CID < HCC as specified in the GIC v3 Documentation.
3141 */
3142 if (its->collections[smp_processor_id()].col_id <
3143 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3144 its_cpu_init_collection(its);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003145 }
3146 spin_unlock(&its_lock);
3147}
3148
3149static struct syscore_ops its_syscore_ops = {
3150 .suspend = its_save_disable,
3151 .resume = its_restore_enable,
3152};
3153
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003154static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003155{
3156 struct irq_domain *inner_domain;
3157 struct msi_domain_info *info;
3158
3159 info = kzalloc(sizeof(*info), GFP_KERNEL);
3160 if (!info)
3161 return -ENOMEM;
3162
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003163 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003164 if (!inner_domain) {
3165 kfree(info);
3166 return -ENOMEM;
3167 }
3168
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003169 inner_domain->parent = its_parent;
Marc Zyngier96f0d932017-06-22 11:42:50 +01003170 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003171 inner_domain->flags |= its->msi_domain_flags;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003172 info->ops = &its_msi_domain_ops;
3173 info->data = its;
3174 inner_domain->host_data = info;
3175
3176 return 0;
3177}
3178
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003179static int its_init_vpe_domain(void)
3180{
Marc Zyngier20b3d542016-12-20 15:23:22 +00003181 struct its_node *its;
3182 u32 devid;
3183 int entries;
3184
3185 if (gic_rdists->has_direct_lpi) {
3186 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3187 return 0;
3188 }
3189
3190 /* Any ITS will do, even if not v4 */
3191 its = list_first_entry(&its_nodes, struct its_node, entry);
3192
3193 entries = roundup_pow_of_two(nr_cpu_ids);
Kees Cook6396bb22018-06-12 14:03:40 -07003194 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
Marc Zyngier20b3d542016-12-20 15:23:22 +00003195 GFP_KERNEL);
3196 if (!vpe_proxy.vpes) {
3197 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3198 return -ENOMEM;
3199 }
3200
3201 /* Use the last possible DevID */
3202 devid = GENMASK(its->device_ids - 1, 0);
3203 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3204 if (!vpe_proxy.dev) {
3205 kfree(vpe_proxy.vpes);
3206 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3207 return -ENOMEM;
3208 }
3209
Shanker Donthinenic427a472017-09-23 13:50:19 -05003210 BUG_ON(entries > vpe_proxy.dev->nr_ites);
Marc Zyngier20b3d542016-12-20 15:23:22 +00003211
3212 raw_spin_lock_init(&vpe_proxy.lock);
3213 vpe_proxy.next_victim = 0;
3214 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3215 devid, vpe_proxy.dev->nr_ites);
3216
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003217 return 0;
3218}
3219
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003220static int __init its_compute_its_list_map(struct resource *res,
3221 void __iomem *its_base)
3222{
3223 int its_number;
3224 u32 ctlr;
3225
3226 /*
3227 * This is assumed to be done early enough that we're
3228 * guaranteed to be single-threaded, hence no
3229 * locking. Should this change, we should address
3230 * this.
3231 */
Marc Zyngierab604912017-10-08 18:48:06 +01003232 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3233 if (its_number >= GICv4_ITS_LIST_MAX) {
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003234 pr_err("ITS@%pa: No ITSList entry available!\n",
3235 &res->start);
3236 return -EINVAL;
3237 }
3238
3239 ctlr = readl_relaxed(its_base + GITS_CTLR);
3240 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3241 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3242 writel_relaxed(ctlr, its_base + GITS_CTLR);
3243 ctlr = readl_relaxed(its_base + GITS_CTLR);
3244 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3245 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3246 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3247 }
3248
3249 if (test_and_set_bit(its_number, &its_list_map)) {
3250 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3251 &res->start, its_number);
3252 return -EINVAL;
3253 }
3254
3255 return its_number;
3256}
3257
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003258static int __init its_probe_one(struct resource *res,
3259 struct fwnode_handle *handle, int numa_node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003260{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003261 struct its_node *its;
3262 void __iomem *its_base;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003263 u32 val, ctlr;
3264 u64 baser, tmp, typer;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003265 int err;
3266
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003267 its_base = ioremap(res->start, resource_size(res));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003268 if (!its_base) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003269 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003270 return -ENOMEM;
3271 }
3272
3273 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3274 if (val != 0x30 && val != 0x40) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003275 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003276 err = -ENODEV;
3277 goto out_unmap;
3278 }
3279
Yun Wu4559fbb2015-03-06 16:37:50 +00003280 err = its_force_quiescent(its_base);
3281 if (err) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003282 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
Yun Wu4559fbb2015-03-06 16:37:50 +00003283 goto out_unmap;
3284 }
3285
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003286 pr_info("ITS %pR\n", res);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003287
3288 its = kzalloc(sizeof(*its), GFP_KERNEL);
3289 if (!its) {
3290 err = -ENOMEM;
3291 goto out_unmap;
3292 }
3293
3294 raw_spin_lock_init(&its->lock);
3295 INIT_LIST_HEAD(&its->entry);
3296 INIT_LIST_HEAD(&its->its_device_list);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003297 typer = gic_read_typer(its_base + GITS_TYPER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003298 its->base = its_base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003299 its->phys_base = res->start;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003300 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
Ard Biesheuvelfa150012017-10-17 17:55:54 +01003301 its->device_ids = GITS_TYPER_DEVBITS(typer);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003302 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
3303 if (its->is_v4) {
3304 if (!(typer & GITS_TYPER_VMOVP)) {
3305 err = its_compute_its_list_map(res, its_base);
3306 if (err < 0)
3307 goto out_free_its;
3308
Marc Zyngierdebf6d02017-10-08 18:44:42 +01003309 its->list_nr = err;
3310
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003311 pr_info("ITS@%pa: Using ITS number %d\n",
3312 &res->start, err);
3313 } else {
3314 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3315 }
3316 }
3317
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003318 its->numa_node = numa_node;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003319
Robert Richter5bc13c22017-02-01 18:38:25 +01003320 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3321 get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003322 if (!its->cmd_base) {
3323 err = -ENOMEM;
3324 goto out_free_its;
3325 }
3326 its->cmd_write = its->cmd_base;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003327 its->fwnode_handle = handle;
3328 its->get_msi_base = its_irq_get_msi_base;
3329 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003330
Robert Richter67510cc2015-09-21 22:58:37 +02003331 its_enable_quirks(its);
3332
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05003333 err = its_alloc_tables(its);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003334 if (err)
3335 goto out_free_cmd;
3336
3337 err = its_alloc_collections(its);
3338 if (err)
3339 goto out_free_tables;
3340
3341 baser = (virt_to_phys(its->cmd_base) |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06003342 GITS_CBASER_RaWaWb |
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003343 GITS_CBASER_InnerShareable |
3344 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3345 GITS_CBASER_VALID);
3346
Vladimir Murzin0968a612016-11-02 11:54:06 +00003347 gits_write_cbaser(baser, its->base + GITS_CBASER);
3348 tmp = gits_read_cbaser(its->base + GITS_CBASER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003349
Marc Zyngier4ad3e362015-03-27 14:15:04 +00003350 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00003351 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3352 /*
3353 * The HW reports non-shareable, we must
3354 * remove the cacheability attributes as
3355 * well.
3356 */
3357 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3358 GITS_CBASER_CACHEABILITY_MASK);
3359 baser |= GITS_CBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00003360 gits_write_cbaser(baser, its->base + GITS_CBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00003361 }
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003362 pr_info("ITS: using cache flushing for cmd queue\n");
3363 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3364 }
3365
Vladimir Murzin0968a612016-11-02 11:54:06 +00003366 gits_write_cwriter(0, its->base + GITS_CWRITER);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003367 ctlr = readl_relaxed(its->base + GITS_CTLR);
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003368 ctlr |= GITS_CTLR_ENABLE;
3369 if (its->is_v4)
3370 ctlr |= GITS_CTLR_ImDe;
3371 writel_relaxed(ctlr, its->base + GITS_CTLR);
Marc Zyngier241a3862015-03-27 14:15:05 +00003372
Derek Basehoredba0bc72018-02-28 21:48:18 -08003373 if (GITS_TYPER_HCC(typer))
3374 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
3375
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003376 err = its_init_domain(handle, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003377 if (err)
3378 goto out_free_tables;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003379
3380 spin_lock(&its_lock);
3381 list_add(&its->entry, &its_nodes);
3382 spin_unlock(&its_lock);
3383
3384 return 0;
3385
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003386out_free_tables:
3387 its_free_tables(its);
3388out_free_cmd:
Robert Richter5bc13c22017-02-01 18:38:25 +01003389 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003390out_free_its:
3391 kfree(its);
3392out_unmap:
3393 iounmap(its_base);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003394 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003395 return err;
3396}
3397
3398static bool gic_rdists_supports_plpis(void)
3399{
Marc Zyngier589ce5f2016-10-14 15:13:07 +01003400 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003401}
3402
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003403static int redist_disable_lpis(void)
3404{
3405 void __iomem *rbase = gic_data_rdist_rd_base();
3406 u64 timeout = USEC_PER_SEC;
3407 u64 val;
3408
3409 if (!gic_rdists_supports_plpis()) {
3410 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3411 return -ENXIO;
3412 }
3413
3414 val = readl_relaxed(rbase + GICR_CTLR);
3415 if (!(val & GICR_CTLR_ENABLE_LPIS))
3416 return 0;
3417
3418 pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
3419 smp_processor_id());
3420 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3421
3422 /* Disable LPIs */
3423 val &= ~GICR_CTLR_ENABLE_LPIS;
3424 writel_relaxed(val, rbase + GICR_CTLR);
3425
3426 /* Make sure any change to GICR_CTLR is observable by the GIC */
3427 dsb(sy);
3428
3429 /*
3430 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3431 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3432 * Error out if we time out waiting for RWP to clear.
3433 */
3434 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
3435 if (!timeout) {
3436 pr_err("CPU%d: Timeout while disabling LPIs\n",
3437 smp_processor_id());
3438 return -ETIMEDOUT;
3439 }
3440 udelay(1);
3441 timeout--;
3442 }
3443
3444 /*
3445 * After it has been written to 1, it is IMPLEMENTATION
3446 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3447 * cleared to 0. Error out if clearing the bit failed.
3448 */
3449 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
3450 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3451 return -EBUSY;
3452 }
3453
3454 return 0;
3455}
3456
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003457int its_cpu_init(void)
3458{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003459 if (!list_empty(&its_nodes)) {
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003460 int ret;
3461
3462 ret = redist_disable_lpis();
3463 if (ret)
3464 return ret;
3465
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003466 its_cpu_init_lpis();
Derek Basehore920181c2018-02-28 21:48:20 -08003467 its_cpu_init_collections();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003468 }
3469
3470 return 0;
3471}
3472
Arvind Yadav935bba72017-06-22 16:05:30 +05303473static const struct of_device_id its_device_id[] = {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003474 { .compatible = "arm,gic-v3-its", },
3475 {},
3476};
3477
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003478static int __init its_of_probe(struct device_node *node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003479{
3480 struct device_node *np;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003481 struct resource res;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003482
3483 for (np = of_find_matching_node(node, its_device_id); np;
3484 np = of_find_matching_node(np, its_device_id)) {
Stephen Boyd95a25622018-02-01 09:03:29 -08003485 if (!of_device_is_available(np))
3486 continue;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003487 if (!of_property_read_bool(np, "msi-controller")) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003488 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3489 np);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003490 continue;
3491 }
3492
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003493 if (of_address_to_resource(np, 0, &res)) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003494 pr_warn("%pOF: no regs?\n", np);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003495 continue;
3496 }
3497
3498 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003499 }
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003500 return 0;
3501}
3502
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003503#ifdef CONFIG_ACPI
3504
3505#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3506
Robert Richterd1ce2632017-07-12 15:25:09 +02003507#ifdef CONFIG_ACPI_NUMA
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303508struct its_srat_map {
3509 /* numa node id */
3510 u32 numa_node;
3511 /* GIC ITS ID */
3512 u32 its_id;
3513};
3514
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003515static struct its_srat_map *its_srat_maps __initdata;
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303516static int its_in_srat __initdata;
3517
3518static int __init acpi_get_its_numa_node(u32 its_id)
3519{
3520 int i;
3521
3522 for (i = 0; i < its_in_srat; i++) {
3523 if (its_id == its_srat_maps[i].its_id)
3524 return its_srat_maps[i].numa_node;
3525 }
3526 return NUMA_NO_NODE;
3527}
3528
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003529static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
3530 const unsigned long end)
3531{
3532 return 0;
3533}
3534
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303535static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
3536 const unsigned long end)
3537{
3538 int node;
3539 struct acpi_srat_gic_its_affinity *its_affinity;
3540
3541 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3542 if (!its_affinity)
3543 return -EINVAL;
3544
3545 if (its_affinity->header.length < sizeof(*its_affinity)) {
3546 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3547 its_affinity->header.length);
3548 return -EINVAL;
3549 }
3550
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303551 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3552
3553 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3554 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3555 return 0;
3556 }
3557
3558 its_srat_maps[its_in_srat].numa_node = node;
3559 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3560 its_in_srat++;
3561 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3562 its_affinity->proximity_domain, its_affinity->its_id, node);
3563
3564 return 0;
3565}
3566
3567static void __init acpi_table_parse_srat_its(void)
3568{
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003569 int count;
3570
3571 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3572 sizeof(struct acpi_table_srat),
3573 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3574 gic_acpi_match_srat_its, 0);
3575 if (count <= 0)
3576 return;
3577
Kees Cook6da2ec52018-06-12 13:55:00 -07003578 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
3579 GFP_KERNEL);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003580 if (!its_srat_maps) {
3581 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3582 return;
3583 }
3584
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303585 acpi_table_parse_entries(ACPI_SIG_SRAT,
3586 sizeof(struct acpi_table_srat),
3587 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3588 gic_acpi_parse_srat_its, 0);
3589}
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003590
3591/* free the its_srat_maps after ITS probing */
3592static void __init acpi_its_srat_maps_free(void)
3593{
3594 kfree(its_srat_maps);
3595}
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303596#else
3597static void __init acpi_table_parse_srat_its(void) { }
3598static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003599static void __init acpi_its_srat_maps_free(void) { }
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303600#endif
3601
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003602static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
3603 const unsigned long end)
3604{
3605 struct acpi_madt_generic_translator *its_entry;
3606 struct fwnode_handle *dom_handle;
3607 struct resource res;
3608 int err;
3609
3610 its_entry = (struct acpi_madt_generic_translator *)header;
3611 memset(&res, 0, sizeof(res));
3612 res.start = its_entry->base_address;
3613 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3614 res.flags = IORESOURCE_MEM;
3615
3616 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
3617 if (!dom_handle) {
3618 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3619 &res.start);
3620 return -ENOMEM;
3621 }
3622
Shameer Kolothum8b4282e2018-02-13 15:20:50 +00003623 err = iort_register_domain_token(its_entry->translation_id, res.start,
3624 dom_handle);
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003625 if (err) {
3626 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3627 &res.start, its_entry->translation_id);
3628 goto dom_err;
3629 }
3630
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303631 err = its_probe_one(&res, dom_handle,
3632 acpi_get_its_numa_node(its_entry->translation_id));
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003633 if (!err)
3634 return 0;
3635
3636 iort_deregister_domain_token(its_entry->translation_id);
3637dom_err:
3638 irq_domain_free_fwnode(dom_handle);
3639 return err;
3640}
3641
3642static void __init its_acpi_probe(void)
3643{
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303644 acpi_table_parse_srat_its();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003645 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3646 gic_acpi_parse_madt_its, 0);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003647 acpi_its_srat_maps_free();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003648}
3649#else
3650static void __init its_acpi_probe(void) { }
3651#endif
3652
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003653int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3654 struct irq_domain *parent_domain)
3655{
3656 struct device_node *of_node;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003657 struct its_node *its;
3658 bool has_v4 = false;
3659 int err;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003660
3661 its_parent = parent_domain;
3662 of_node = to_of_node(handle);
3663 if (of_node)
3664 its_of_probe(of_node);
3665 else
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003666 its_acpi_probe();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003667
3668 if (list_empty(&its_nodes)) {
3669 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3670 return -ENXIO;
3671 }
3672
3673 gic_rdists = rdists;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003674 err = its_alloc_lpi_tables();
3675 if (err)
3676 return err;
3677
3678 list_for_each_entry(its, &its_nodes, entry)
3679 has_v4 |= its->is_v4;
3680
3681 if (has_v4 & rdists->has_vlpis) {
Marc Zyngier3d63cb52016-12-20 15:31:54 +00003682 if (its_init_vpe_domain() ||
3683 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003684 rdists->has_vlpis = false;
3685 pr_err("ITS: Disabling GICv4 support\n");
3686 }
3687 }
3688
Derek Basehoredba0bc72018-02-28 21:48:18 -08003689 register_syscore_ops(&its_syscore_ops);
3690
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003691 return 0;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003692}