blob: ead00b2072b260321c31330e20a8031b5ba362d8 [file] [log] [blame]
Christoffer Dall64a959d2015-11-24 16:51:12 +01001/*
2 * Copyright (C) 2015, 2016 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __KVM_ARM_VGIC_NEW_H__
17#define __KVM_ARM_VGIC_NEW_H__
18
Eric Auger90977732015-12-01 15:02:35 +010019#include <linux/irqchip/arm-gic-common.h>
20
Marc Zyngier2b0cda82016-04-26 11:06:47 +010021#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
22#define IMPLEMENTER_ARM 0x43b
23
Eric Augere2c1f9a2015-12-21 16:36:04 +010024#define VGIC_ADDR_UNDEF (-1)
25#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
26
Andre Przywarafd59ed32016-01-27 14:54:30 +000027#define INTERRUPT_ID_BITS_SPIS 10
Andre Przywara33d3bc92016-07-15 12:43:34 +010028#define INTERRUPT_ID_BITS_ITS 16
Andre Przywara055658b2015-12-01 14:34:02 +000029#define VGIC_PRI_BITS 5
30
Marc Zyngier0919e842015-11-26 17:19:25 +000031#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
32
Vijaya Kumar K94574c92017-01-26 19:50:47 +053033#define VGIC_AFFINITY_0_SHIFT 0
34#define VGIC_AFFINITY_0_MASK (0xffUL << VGIC_AFFINITY_0_SHIFT)
35#define VGIC_AFFINITY_1_SHIFT 8
36#define VGIC_AFFINITY_1_MASK (0xffUL << VGIC_AFFINITY_1_SHIFT)
37#define VGIC_AFFINITY_2_SHIFT 16
38#define VGIC_AFFINITY_2_MASK (0xffUL << VGIC_AFFINITY_2_SHIFT)
39#define VGIC_AFFINITY_3_SHIFT 24
40#define VGIC_AFFINITY_3_MASK (0xffUL << VGIC_AFFINITY_3_SHIFT)
41
42#define VGIC_AFFINITY_LEVEL(reg, level) \
43 ((((reg) & VGIC_AFFINITY_## level ##_MASK) \
44 >> VGIC_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
45
46/*
47 * The Userspace encodes the affinity differently from the MPIDR,
48 * Below macro converts vgic userspace format to MPIDR reg format.
49 */
50#define VGIC_TO_MPIDR(val) (VGIC_AFFINITY_LEVEL(val, 0) | \
51 VGIC_AFFINITY_LEVEL(val, 1) | \
52 VGIC_AFFINITY_LEVEL(val, 2) | \
53 VGIC_AFFINITY_LEVEL(val, 3))
54
Vijaya Kumar Kd017d7b2017-01-26 19:50:51 +053055/*
56 * As per Documentation/virtual/kvm/devices/arm-vgic-v3.txt,
57 * below macros are defined for CPUREG encoding.
58 */
59#define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK 0x000000000000c000
60#define KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT 14
61#define KVM_REG_ARM_VGIC_SYSREG_OP1_MASK 0x0000000000003800
62#define KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT 11
63#define KVM_REG_ARM_VGIC_SYSREG_CRN_MASK 0x0000000000000780
64#define KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT 7
65#define KVM_REG_ARM_VGIC_SYSREG_CRM_MASK 0x0000000000000078
66#define KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT 3
67#define KVM_REG_ARM_VGIC_SYSREG_OP2_MASK 0x0000000000000007
68#define KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT 0
69
70#define KVM_DEV_ARM_VGIC_SYSREG_MASK (KVM_REG_ARM_VGIC_SYSREG_OP0_MASK | \
71 KVM_REG_ARM_VGIC_SYSREG_OP1_MASK | \
72 KVM_REG_ARM_VGIC_SYSREG_CRN_MASK | \
73 KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \
74 KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
75
Eric Augerea1ad532017-01-09 16:19:41 +010076/*
77 * As per Documentation/virtual/kvm/devices/arm-vgic-its.txt,
78 * below macros are defined for ITS table entry encoding.
79 */
80#define KVM_ITS_CTE_VALID_SHIFT 63
81#define KVM_ITS_CTE_VALID_MASK BIT_ULL(63)
82#define KVM_ITS_CTE_RDBASE_SHIFT 16
83#define KVM_ITS_CTE_ICID_MASK GENMASK_ULL(15, 0)
Eric Augereff484e2017-05-03 17:38:01 +020084#define KVM_ITS_ITE_NEXT_SHIFT 48
85#define KVM_ITS_ITE_PINTID_SHIFT 16
86#define KVM_ITS_ITE_PINTID_MASK GENMASK_ULL(47, 16)
87#define KVM_ITS_ITE_ICID_MASK GENMASK_ULL(15, 0)
Eric Auger57a9a112017-01-09 16:27:07 +010088#define KVM_ITS_DTE_VALID_SHIFT 63
89#define KVM_ITS_DTE_VALID_MASK BIT_ULL(63)
90#define KVM_ITS_DTE_NEXT_SHIFT 49
91#define KVM_ITS_DTE_NEXT_MASK GENMASK_ULL(62, 49)
92#define KVM_ITS_DTE_ITTADDR_SHIFT 5
93#define KVM_ITS_DTE_ITTADDR_MASK GENMASK_ULL(48, 5)
94#define KVM_ITS_DTE_SIZE_MASK GENMASK_ULL(4, 0)
95#define KVM_ITS_L1E_VALID_MASK BIT_ULL(63)
96/* we only support 64 kB translation table page size */
97#define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16)
Eric Augerea1ad532017-01-09 16:19:41 +010098
Eric Auger04c11092018-05-22 09:55:17 +020099#define KVM_VGIC_V3_RDIST_INDEX_MASK GENMASK_ULL(11, 0)
100#define KVM_VGIC_V3_RDIST_FLAGS_MASK GENMASK_ULL(15, 12)
101#define KVM_VGIC_V3_RDIST_FLAGS_SHIFT 12
102#define KVM_VGIC_V3_RDIST_BASE_MASK GENMASK_ULL(51, 16)
103#define KVM_VGIC_V3_RDIST_COUNT_MASK GENMASK_ULL(63, 52)
104#define KVM_VGIC_V3_RDIST_COUNT_SHIFT 52
105
Andre Przywara62b06f82018-03-06 09:21:06 +0000106/* Requires the irq_lock to be held by the caller. */
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100107static inline bool irq_is_pending(struct vgic_irq *irq)
108{
109 if (irq->config == VGIC_CONFIG_EDGE)
110 return irq->pending_latch;
111 else
112 return irq->pending_latch || irq->line_level;
113}
114
Christoffer Dalle40cc572017-08-29 10:40:44 +0200115static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
116{
117 return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
118}
119
Marc Zyngier53692902018-04-18 10:39:04 +0100120static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
121{
122 /* Account for the active state as an interrupt */
123 if (vgic_irq_is_sgi(irq->intid) && irq->source)
124 return hweight8(irq->source) + irq->active;
125
126 return irq_is_pending(irq) || irq->active;
127}
128
129static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
130{
131 return vgic_irq_get_lr_count(irq) > 1;
132}
133
Christoffer Dall6d561112017-03-21 22:05:22 +0100134/*
135 * This struct provides an intermediate representation of the fields contained
136 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
137 * state to userspace can generate either GICv2 or GICv3 CPU interface
138 * registers regardless of the hardware backed GIC used.
139 */
Andre Przywarae4823a72015-12-03 11:47:37 +0000140struct vgic_vmcr {
Christoffer Dall28232a42017-05-20 14:12:34 +0200141 u32 grpen0;
142 u32 grpen1;
143
144 u32 ackctl;
145 u32 fiqen;
146 u32 cbpr;
147 u32 eoim;
148
Andre Przywarae4823a72015-12-03 11:47:37 +0000149 u32 abpr;
150 u32 bpr;
Christoffer Dall6d561112017-03-21 22:05:22 +0100151 u32 pmr; /* Priority mask field in the GICC_PMR and
152 * ICC_PMR_EL1 priority field format */
Andre Przywarae4823a72015-12-03 11:47:37 +0000153};
154
Vijaya Kumar K94574c92017-01-26 19:50:47 +0530155struct vgic_reg_attr {
156 struct kvm_vcpu *vcpu;
157 gpa_t addr;
158};
159
160int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
161 struct vgic_reg_attr *reg_attr);
162int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
163 struct vgic_reg_attr *reg_attr);
164const struct vgic_register_region *
165vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
166 gpa_t addr, int len);
Christoffer Dall64a959d2015-11-24 16:51:12 +0100167struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
168 u32 intid);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100169void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
Christoffer Dalle40cc572017-08-29 10:40:44 +0200170bool vgic_get_phys_line_level(struct vgic_irq *irq);
Christoffer Dalldf635c52017-09-01 16:25:12 +0200171void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
Christoffer Dalle40cc572017-08-29 10:40:44 +0200172void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200173bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
174 unsigned long flags);
Marc Zyngier2b0cda82016-04-26 11:06:47 +0100175void vgic_kick_vcpus(struct kvm *kvm);
Christoffer Dall64a959d2015-11-24 16:51:12 +0100176
Andre Przywara1085fdc2016-07-15 12:43:31 +0100177int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
178 phys_addr_t addr, phys_addr_t alignment);
179
Marc Zyngier140b0862015-11-26 17:19:25 +0000180void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
181void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
182void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
183void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
Marc Zyngier16ca6a62018-03-06 21:48:01 +0000184void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
Eric Augerf94591e2015-12-21 17:34:52 +0100185int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
Christoffer Dallc3199f22016-04-25 01:11:37 +0200186int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
187 int offset, u32 *val);
Andre Przywara878c5692015-12-03 11:48:42 +0000188int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
189 int offset, u32 *val);
Andre Przywarae4823a72015-12-03 11:47:37 +0000190void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
191void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
Eric Augerad275b8b2015-12-21 18:09:38 +0100192void vgic_v2_enable(struct kvm_vcpu *vcpu);
Eric Auger90977732015-12-01 15:02:35 +0100193int vgic_v2_probe(const struct gic_kvm_info *info);
Eric Augerb0442ee2015-12-21 15:04:42 +0100194int vgic_v2_map_resources(struct kvm *kvm);
Andre Przywarafb848db2016-04-26 21:32:49 +0100195int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
196 enum vgic_type);
Marc Zyngier140b0862015-11-26 17:19:25 +0000197
Christoffer Dall5b0d2cc2017-03-18 13:56:56 +0100198void vgic_v2_init_lrs(void);
Christoffer Dall328e5662016-03-24 11:21:04 +0100199void vgic_v2_load(struct kvm_vcpu *vcpu);
200void vgic_v2_put(struct kvm_vcpu *vcpu);
Christoffer Dall5b0d2cc2017-03-18 13:56:56 +0100201
Christoffer Dall75174ba2016-12-22 20:39:10 +0100202void vgic_v2_save_state(struct kvm_vcpu *vcpu);
203void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
204
Marc Zyngierd97594e2016-07-17 11:27:23 +0100205static inline void vgic_get_irq_kref(struct vgic_irq *irq)
206{
207 if (irq->intid < VGIC_MIN_LPI)
208 return;
209
210 kref_get(&irq->refcount);
211}
212
Marc Zyngier59529f62015-11-30 13:09:53 +0000213void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
214void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
215void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
216void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
Marc Zyngier16ca6a62018-03-06 21:48:01 +0000217void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
Andre Przywarae4823a72015-12-03 11:47:37 +0000218void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
219void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
Eric Augerad275b8b2015-12-21 18:09:38 +0100220void vgic_v3_enable(struct kvm_vcpu *vcpu);
Eric Auger90977732015-12-01 15:02:35 +0100221int vgic_v3_probe(const struct gic_kvm_info *info);
Eric Augerb0442ee2015-12-21 15:04:42 +0100222int vgic_v3_map_resources(struct kvm *kvm);
Eric Auger44de9d62017-05-04 11:19:52 +0200223int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
Eric Auger28077122017-01-09 16:28:27 +0100224int vgic_v3_save_pending_tables(struct kvm *kvm);
Eric Auger04c11092018-05-22 09:55:17 +0200225int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
Christoffer Dall1aab6f42017-05-08 12:30:24 +0200226int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
Christoffer Dall9a746d72017-05-08 12:23:51 +0200227bool vgic_v3_check_base(struct kvm *kvm);
Vladimir Murzin7a1ff702016-09-12 15:49:18 +0100228
Christoffer Dall328e5662016-03-24 11:21:04 +0100229void vgic_v3_load(struct kvm_vcpu *vcpu);
230void vgic_v3_put(struct kvm_vcpu *vcpu);
231
Andre Przywara59c5ab42016-07-15 12:43:30 +0100232bool vgic_has_its(struct kvm *kvm);
Andre Przywara0e4e82f2016-07-15 12:43:38 +0100233int kvm_vgic_register_its_device(void);
Andre Przywara33d3bc92016-07-15 12:43:34 +0100234void vgic_enable_lpis(struct kvm_vcpu *vcpu);
Andre Przywara2891a7d2016-07-15 12:43:37 +0100235int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
Vijaya Kumar K94574c92017-01-26 19:50:47 +0530236int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
237int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
238 int offset, u32 *val);
239int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
240 int offset, u32 *val);
Vijaya Kumar Kd017d7b2017-01-26 19:50:51 +0530241int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write,
242 u64 id, u64 *val);
243int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
244 u64 *reg);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530245int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
246 u32 intid, u64 *val);
Andre Przywara42c88702016-07-15 12:43:23 +0100247int kvm_register_vgic_device(unsigned long type);
Vijaya Kumar K5fb247d2017-01-26 19:50:50 +0530248void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
249void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
Eric Augerad275b8b2015-12-21 18:09:38 +0100250int vgic_lazy_init(struct kvm *kvm);
251int vgic_init(struct kvm *kvm);
Eric Augerc86c7722015-11-30 14:01:58 +0100252
Greg Kroah-Hartman929f45e2018-05-29 18:22:04 +0200253void vgic_debug_init(struct kvm *kvm);
254void vgic_debug_destroy(struct kvm *kvm);
Christoffer Dall10f92c42017-01-17 23:09:13 +0100255
Eric Augerdfc99f82017-03-23 11:51:52 +0100256bool lock_all_vcpus(struct kvm *kvm);
257void unlock_all_vcpus(struct kvm *kvm);
258
Christoffer Dall50f5bd52017-09-01 11:41:52 +0200259static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
260{
261 struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
262
263 /*
264 * num_pri_bits are initialized with HW supported values.
265 * We can rely safely on num_pri_bits even if VM has not
266 * restored ICC_CTLR_EL1 before restoring APnR registers.
267 */
268 switch (cpu_if->num_pri_bits) {
269 case 7: return 3;
270 case 6: return 1;
271 default: return 0;
272 }
273}
274
Eric Augerdc524612018-05-22 09:55:09 +0200275static inline bool
276vgic_v3_redist_region_full(struct vgic_redist_region *region)
277{
278 if (!region->count)
279 return false;
280
281 return (region->free_index >= region->count);
282}
283
284struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rdregs);
285
Eric Auger028bf272018-05-22 09:55:11 +0200286static inline size_t
287vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
288{
289 if (!rdreg->count)
290 return atomic_read(&kvm->online_vcpus) * KVM_VGIC_V3_REDIST_SIZE;
291 else
292 return rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
293}
Eric Auger04c11092018-05-22 09:55:17 +0200294
295struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
296 u32 index);
297
Eric Auger028bf272018-05-22 09:55:11 +0200298bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
299
Eric Augerccc27bf2018-05-22 09:55:12 +0200300static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
301{
302 struct vgic_dist *d = &kvm->arch.vgic;
303
304 return (base + size > d->vgic_dist_base) &&
305 (base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE);
306}
307
Marc Zyngierbebfd2a2017-10-27 15:28:35 +0100308int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
309 u32 devid, u32 eventid, struct vgic_irq **irq);
310struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
311
Marc Zyngiere7c48052017-10-27 15:28:37 +0100312bool vgic_supports_direct_msis(struct kvm *kvm);
Marc Zyngier74fe55d2017-10-27 15:28:38 +0100313int vgic_v4_init(struct kvm *kvm);
314void vgic_v4_teardown(struct kvm *kvm);
Marc Zyngier62775792017-10-27 15:28:50 +0100315int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
316int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
Marc Zyngiere7c48052017-10-27 15:28:37 +0100317
Christoffer Dall64a959d2015-11-24 16:51:12 +0100318#endif