blob: a21393637e4b9b25cae9ee038dcfe4f39d1999c7 [file] [log] [blame]
Andre Przywarafb848db2016-04-26 21:32:49 +01001/*
2 * VGICv2 MMIO handling functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/irqchip/arm-gic.h>
15#include <linux/kvm.h>
16#include <linux/kvm_host.h>
17#include <kvm/iodev.h>
18#include <kvm/arm_vgic.h>
19
20#include "vgic.h"
21#include "vgic-mmio.h"
22
Marc Zyngier2b0cda82016-04-26 11:06:47 +010023static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
24 gpa_t addr, unsigned int len)
25{
26 u32 value;
27
28 switch (addr & 0x0c) {
29 case GIC_DIST_CTRL:
30 value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
31 break;
32 case GIC_DIST_CTR:
33 value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
34 value = (value >> 5) - 1;
35 value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
36 break;
37 case GIC_DIST_IIDR:
38 value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
39 break;
40 default:
41 return 0;
42 }
43
44 return value;
45}
46
47static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
48 gpa_t addr, unsigned int len,
49 unsigned long val)
50{
51 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
52 bool was_enabled = dist->enabled;
53
54 switch (addr & 0x0c) {
55 case GIC_DIST_CTRL:
56 dist->enabled = val & GICD_ENABLE;
57 if (!was_enabled && dist->enabled)
58 vgic_kick_vcpus(vcpu->kvm);
59 break;
60 case GIC_DIST_CTR:
61 case GIC_DIST_IIDR:
62 /* Nothing to do */
63 return;
64 }
65}
66
Andre Przywara55cc01f2015-12-01 12:42:05 +000067static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
68 gpa_t addr, unsigned int len,
69 unsigned long val)
70{
71 int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
72 int intid = val & 0xf;
73 int targets = (val >> 16) & 0xff;
74 int mode = (val >> 24) & 0x03;
75 int c;
76 struct kvm_vcpu *vcpu;
77
78 switch (mode) {
79 case 0x0: /* as specified by targets */
80 break;
81 case 0x1:
82 targets = (1U << nr_vcpus) - 1; /* all, ... */
83 targets &= ~(1U << source_vcpu->vcpu_id); /* but self */
84 break;
85 case 0x2: /* this very vCPU only */
86 targets = (1U << source_vcpu->vcpu_id);
87 break;
88 case 0x3: /* reserved */
89 return;
90 }
91
92 kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
93 struct vgic_irq *irq;
94
95 if (!(targets & (1U << c)))
96 continue;
97
98 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
99
100 spin_lock(&irq->irq_lock);
101 irq->pending = true;
102 irq->source |= 1U << source_vcpu->vcpu_id;
103
104 vgic_queue_irq_unlock(source_vcpu->kvm, irq);
105 }
106}
107
Andre Przywara2c234d62015-12-01 12:41:55 +0000108static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
109 gpa_t addr, unsigned int len)
110{
111 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
112 int i;
113 u64 val = 0;
114
115 for (i = 0; i < len; i++) {
116 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
117
118 val |= (u64)irq->targets << (i * 8);
119 }
120
121 return val;
122}
123
124static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
125 gpa_t addr, unsigned int len,
126 unsigned long val)
127{
128 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
129 int i;
130
131 /* GICD_ITARGETSR[0-7] are read-only */
132 if (intid < VGIC_NR_PRIVATE_IRQS)
133 return;
134
135 for (i = 0; i < len; i++) {
136 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
137 int target;
138
139 spin_lock(&irq->irq_lock);
140
141 irq->targets = (val >> (i * 8)) & 0xff;
142 target = irq->targets ? __ffs(irq->targets) : 0;
143 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
144
145 spin_unlock(&irq->irq_lock);
146 }
147}
148
Andre Przywaraed402132015-12-09 16:21:37 +0000149static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
150 gpa_t addr, unsigned int len)
151{
152 u32 intid = addr & 0x0f;
153 int i;
154 u64 val = 0;
155
156 for (i = 0; i < len; i++) {
157 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
158
159 val |= (u64)irq->source << (i * 8);
160 }
161 return val;
162}
163
164static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
165 gpa_t addr, unsigned int len,
166 unsigned long val)
167{
168 u32 intid = addr & 0x0f;
169 int i;
170
171 for (i = 0; i < len; i++) {
172 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
173
174 spin_lock(&irq->irq_lock);
175
176 irq->source &= ~((val >> (i * 8)) & 0xff);
177 if (!irq->source)
178 irq->pending = false;
179
180 spin_unlock(&irq->irq_lock);
181 }
182}
183
184static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
185 gpa_t addr, unsigned int len,
186 unsigned long val)
187{
188 u32 intid = addr & 0x0f;
189 int i;
190
191 for (i = 0; i < len; i++) {
192 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
193
194 spin_lock(&irq->irq_lock);
195
196 irq->source |= (val >> (i * 8)) & 0xff;
197
198 if (irq->source) {
199 irq->pending = true;
200 vgic_queue_irq_unlock(vcpu->kvm, irq);
201 } else {
202 spin_unlock(&irq->irq_lock);
203 }
204 }
205}
206
Andre Przywara878c5692015-12-03 11:48:42 +0000207static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
208{
209 if (kvm_vgic_global_state.type == VGIC_V2)
210 vgic_v2_set_vmcr(vcpu, vmcr);
211 else
212 vgic_v3_set_vmcr(vcpu, vmcr);
213}
214
215static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
216{
217 if (kvm_vgic_global_state.type == VGIC_V2)
218 vgic_v2_get_vmcr(vcpu, vmcr);
219 else
220 vgic_v3_get_vmcr(vcpu, vmcr);
221}
222
223#define GICC_ARCH_VERSION_V2 0x2
224
225/* These are for userland accesses only, there is no guest-facing emulation. */
226static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
227 gpa_t addr, unsigned int len)
228{
229 struct vgic_vmcr vmcr;
230 u32 val;
231
232 vgic_get_vmcr(vcpu, &vmcr);
233
234 switch (addr & 0xff) {
235 case GIC_CPU_CTRL:
236 val = vmcr.ctlr;
237 break;
238 case GIC_CPU_PRIMASK:
239 val = vmcr.pmr;
240 break;
241 case GIC_CPU_BINPOINT:
242 val = vmcr.bpr;
243 break;
244 case GIC_CPU_ALIAS_BINPOINT:
245 val = vmcr.abpr;
246 break;
247 case GIC_CPU_IDENT:
248 val = ((PRODUCT_ID_KVM << 20) |
249 (GICC_ARCH_VERSION_V2 << 16) |
250 IMPLEMENTER_ARM);
251 break;
252 default:
253 return 0;
254 }
255
256 return val;
257}
258
259static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
260 gpa_t addr, unsigned int len,
261 unsigned long val)
262{
263 struct vgic_vmcr vmcr;
264
265 vgic_get_vmcr(vcpu, &vmcr);
266
267 switch (addr & 0xff) {
268 case GIC_CPU_CTRL:
269 vmcr.ctlr = val;
270 break;
271 case GIC_CPU_PRIMASK:
272 vmcr.pmr = val;
273 break;
274 case GIC_CPU_BINPOINT:
275 vmcr.bpr = val;
276 break;
277 case GIC_CPU_ALIAS_BINPOINT:
278 vmcr.abpr = val;
279 break;
280 }
281
282 vgic_set_vmcr(vcpu, &vmcr);
283}
284
Andre Przywarafb848db2016-04-26 21:32:49 +0100285static const struct vgic_register_region vgic_v2_dist_registers[] = {
286 REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
Marc Zyngier2b0cda82016-04-26 11:06:47 +0100287 vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
Andre Przywarafb848db2016-04-26 21:32:49 +0100288 VGIC_ACCESS_32bit),
289 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
290 vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
291 VGIC_ACCESS_32bit),
292 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
Andre Przywarafd122e62015-12-01 14:33:05 +0000293 vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
Andre Przywarafb848db2016-04-26 21:32:49 +0100294 VGIC_ACCESS_32bit),
295 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
Andre Przywarafd122e62015-12-01 14:33:05 +0000296 vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
Andre Przywarafb848db2016-04-26 21:32:49 +0100297 VGIC_ACCESS_32bit),
298 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
Andre Przywara96b29802015-12-01 14:33:41 +0000299 vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
Andre Przywarafb848db2016-04-26 21:32:49 +0100300 VGIC_ACCESS_32bit),
301 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
Andre Przywara96b29802015-12-01 14:33:41 +0000302 vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
Andre Przywarafb848db2016-04-26 21:32:49 +0100303 VGIC_ACCESS_32bit),
304 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
Andre Przywara69b6fe02015-12-01 12:40:58 +0000305 vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
Andre Przywarafb848db2016-04-26 21:32:49 +0100306 VGIC_ACCESS_32bit),
307 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
Andre Przywara69b6fe02015-12-01 12:40:58 +0000308 vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
Andre Przywarafb848db2016-04-26 21:32:49 +0100309 VGIC_ACCESS_32bit),
310 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
Andre Przywara055658b2015-12-01 14:34:02 +0000311 vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
Andre Przywarafb848db2016-04-26 21:32:49 +0100312 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
313 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
Andre Przywara2c234d62015-12-01 12:41:55 +0000314 vgic_mmio_read_target, vgic_mmio_write_target, 8,
Andre Przywarafb848db2016-04-26 21:32:49 +0100315 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
316 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
Andre Przywara79717e42015-12-01 12:41:31 +0000317 vgic_mmio_read_config, vgic_mmio_write_config, 2,
Andre Przywarafb848db2016-04-26 21:32:49 +0100318 VGIC_ACCESS_32bit),
319 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
Andre Przywara55cc01f2015-12-01 12:42:05 +0000320 vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
Andre Przywarafb848db2016-04-26 21:32:49 +0100321 VGIC_ACCESS_32bit),
322 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
Andre Przywaraed402132015-12-09 16:21:37 +0000323 vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
Andre Przywarafb848db2016-04-26 21:32:49 +0100324 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
325 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
Andre Przywaraed402132015-12-09 16:21:37 +0000326 vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
Andre Przywarafb848db2016-04-26 21:32:49 +0100327 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
328};
329
Andre Przywara878c5692015-12-03 11:48:42 +0000330static const struct vgic_register_region vgic_v2_cpu_registers[] = {
331 REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
332 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
333 VGIC_ACCESS_32bit),
334 REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
335 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
336 VGIC_ACCESS_32bit),
337 REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
338 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
339 VGIC_ACCESS_32bit),
340 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
341 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
342 VGIC_ACCESS_32bit),
343 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
344 vgic_mmio_read_raz, vgic_mmio_write_wi, 16,
345 VGIC_ACCESS_32bit),
346 REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
347 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
348 VGIC_ACCESS_32bit),
349};
350
Andre Przywarafb848db2016-04-26 21:32:49 +0100351unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
352{
353 dev->regions = vgic_v2_dist_registers;
354 dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
355
356 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
357
358 return SZ_4K;
359}
Eric Augerf94591e2015-12-21 17:34:52 +0100360
361int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
362{
363 int nr_irqs = dev->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
364 const struct vgic_register_region *regions;
365 gpa_t addr;
366 int nr_regions, i, len;
367
368 addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
369
370 switch (attr->group) {
371 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
372 regions = vgic_v2_dist_registers;
373 nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
374 break;
375 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
Andre Przywara878c5692015-12-03 11:48:42 +0000376 regions = vgic_v2_cpu_registers;
377 nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
378 break;
Eric Augerf94591e2015-12-21 17:34:52 +0100379 default:
380 return -ENXIO;
381 }
382
383 /* We only support aligned 32-bit accesses. */
384 if (addr & 3)
385 return -ENXIO;
386
387 for (i = 0; i < nr_regions; i++) {
388 if (regions[i].bits_per_irq)
389 len = (regions[i].bits_per_irq * nr_irqs) / 8;
390 else
391 len = regions[i].len;
392
393 if (regions[i].reg_offset <= addr &&
394 regions[i].reg_offset + len > addr)
395 return 0;
396 }
397
398 return -ENXIO;
399}
Christoffer Dallc3199f22016-04-25 01:11:37 +0200400
401/*
402 * When userland tries to access the VGIC register handlers, we need to
403 * create a usable struct vgic_io_device to be passed to the handlers and we
404 * have to set up a buffer similar to what would have happened if a guest MMIO
405 * access occurred, including doing endian conversions on BE systems.
406 */
407static int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
408 bool is_write, int offset, u32 *val)
409{
410 unsigned int len = 4;
411 u8 buf[4];
412 int ret;
413
414 if (is_write) {
415 vgic_data_host_to_mmio_bus(buf, len, *val);
416 ret = kvm_io_gic_ops.write(vcpu, &dev->dev, offset, len, buf);
417 } else {
418 ret = kvm_io_gic_ops.read(vcpu, &dev->dev, offset, len, buf);
419 if (!ret)
420 *val = vgic_data_mmio_bus_to_host(buf, len);
421 }
422
423 return ret;
424}
425
Andre Przywara878c5692015-12-03 11:48:42 +0000426int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
427 int offset, u32 *val)
428{
429 struct vgic_io_device dev = {
430 .regions = vgic_v2_cpu_registers,
431 .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
432 };
433
434 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
435}
436
Christoffer Dallc3199f22016-04-25 01:11:37 +0200437int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
438 int offset, u32 *val)
439{
440 struct vgic_io_device dev = {
441 .regions = vgic_v2_dist_registers,
442 .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
443 };
444
445 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
446}