blob: 7460b376d090111ae61b579362713e98244a76fd [file] [log] [blame]
Andre Przywara1d916222014-06-07 00:53:08 +02001/*
2 * Contains GICv2 specific emulation code, was in vgic.c before.
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/cpu.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/uaccess.h>
26
27#include <linux/irqchip/arm-gic.h>
28
29#include <asm/kvm_emulate.h>
30#include <asm/kvm_arm.h>
31#include <asm/kvm_mmu.h>
32
33#include "vgic.h"
34
35#define GICC_ARCH_VERSION_V2 0x2
36
37static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
38static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
39{
40 return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
41}
42
43static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
44 struct kvm_exit_mmio *mmio, phys_addr_t offset)
45{
46 u32 reg;
47 u32 word_offset = offset & 3;
48
49 switch (offset & ~3) {
50 case 0: /* GICD_CTLR */
51 reg = vcpu->kvm->arch.vgic.enabled;
52 vgic_reg_access(mmio, &reg, word_offset,
53 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
54 if (mmio->is_write) {
55 vcpu->kvm->arch.vgic.enabled = reg & 1;
56 vgic_update_state(vcpu->kvm);
57 return true;
58 }
59 break;
60
61 case 4: /* GICD_TYPER */
62 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
63 reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
64 vgic_reg_access(mmio, &reg, word_offset,
65 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
66 break;
67
68 case 8: /* GICD_IIDR */
69 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
70 vgic_reg_access(mmio, &reg, word_offset,
71 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
72 break;
73 }
74
75 return false;
76}
77
78static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
79 struct kvm_exit_mmio *mmio,
80 phys_addr_t offset)
81{
82 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
83 vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
84}
85
86static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
87 struct kvm_exit_mmio *mmio,
88 phys_addr_t offset)
89{
90 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
91 vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
92}
93
94static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
95 struct kvm_exit_mmio *mmio,
96 phys_addr_t offset)
97{
98 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
99 vcpu->vcpu_id);
100}
101
102static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
103 struct kvm_exit_mmio *mmio,
104 phys_addr_t offset)
105{
106 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
107 vcpu->vcpu_id);
108}
109
Christoffer Dall47a98b12015-03-13 17:02:54 +0000110static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio,
112 phys_addr_t offset)
113{
114 return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
115 vcpu->vcpu_id);
116}
117
118static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
119 struct kvm_exit_mmio *mmio,
120 phys_addr_t offset)
121{
122 return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
123 vcpu->vcpu_id);
124}
125
Andre Przywara1d916222014-06-07 00:53:08 +0200126static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
127 struct kvm_exit_mmio *mmio,
128 phys_addr_t offset)
129{
130 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
131 vcpu->vcpu_id, offset);
132 vgic_reg_access(mmio, reg, offset,
133 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
134 return false;
135}
136
137#define GICD_ITARGETSR_SIZE 32
138#define GICD_CPUTARGETS_BITS 8
139#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
140static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
141{
142 struct vgic_dist *dist = &kvm->arch.vgic;
143 int i;
144 u32 val = 0;
145
146 irq -= VGIC_NR_PRIVATE_IRQS;
147
148 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
149 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
150
151 return val;
152}
153
154static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
155{
156 struct vgic_dist *dist = &kvm->arch.vgic;
157 struct kvm_vcpu *vcpu;
158 int i, c;
159 unsigned long *bmap;
160 u32 target;
161
162 irq -= VGIC_NR_PRIVATE_IRQS;
163
164 /*
165 * Pick the LSB in each byte. This ensures we target exactly
166 * one vcpu per IRQ. If the byte is null, assume we target
167 * CPU0.
168 */
169 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
170 int shift = i * GICD_CPUTARGETS_BITS;
171
172 target = ffs((val >> shift) & 0xffU);
173 target = target ? (target - 1) : 0;
174 dist->irq_spi_cpu[irq + i] = target;
175 kvm_for_each_vcpu(c, vcpu, kvm) {
176 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
177 if (c == target)
178 set_bit(irq + i, bmap);
179 else
180 clear_bit(irq + i, bmap);
181 }
182 }
183}
184
185static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
186 struct kvm_exit_mmio *mmio,
187 phys_addr_t offset)
188{
189 u32 reg;
190
191 /* We treat the banked interrupts targets as read-only */
192 if (offset < 32) {
193 u32 roreg;
194
195 roreg = 1 << vcpu->vcpu_id;
196 roreg |= roreg << 8;
197 roreg |= roreg << 16;
198
199 vgic_reg_access(mmio, &roreg, offset,
200 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
201 return false;
202 }
203
204 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
205 vgic_reg_access(mmio, &reg, offset,
206 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
207 if (mmio->is_write) {
208 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
209 vgic_update_state(vcpu->kvm);
210 return true;
211 }
212
213 return false;
214}
215
216static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
217 struct kvm_exit_mmio *mmio, phys_addr_t offset)
218{
219 u32 *reg;
220
221 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
222 vcpu->vcpu_id, offset >> 1);
223
224 return vgic_handle_cfg_reg(reg, mmio, offset);
225}
226
227static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
228 struct kvm_exit_mmio *mmio, phys_addr_t offset)
229{
230 u32 reg;
231
232 vgic_reg_access(mmio, &reg, offset,
233 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
234 if (mmio->is_write) {
235 vgic_dispatch_sgi(vcpu, reg);
236 vgic_update_state(vcpu->kvm);
237 return true;
238 }
239
240 return false;
241}
242
243/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
244static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
245 struct kvm_exit_mmio *mmio,
246 phys_addr_t offset)
247{
248 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
249 int sgi;
250 int min_sgi = (offset & ~0x3);
251 int max_sgi = min_sgi + 3;
252 int vcpu_id = vcpu->vcpu_id;
253 u32 reg = 0;
254
255 /* Copy source SGIs from distributor side */
256 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
257 u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
258
259 reg |= ((u32)sources) << (8 * (sgi - min_sgi));
260 }
261
262 mmio_data_write(mmio, ~0, reg);
263 return false;
264}
265
266static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
267 struct kvm_exit_mmio *mmio,
268 phys_addr_t offset, bool set)
269{
270 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
271 int sgi;
272 int min_sgi = (offset & ~0x3);
273 int max_sgi = min_sgi + 3;
274 int vcpu_id = vcpu->vcpu_id;
275 u32 reg;
276 bool updated = false;
277
278 reg = mmio_data_read(mmio, ~0);
279
280 /* Clear pending SGIs on the distributor */
281 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
282 u8 mask = reg >> (8 * (sgi - min_sgi));
283 u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
284
285 if (set) {
286 if ((*src & mask) != mask)
287 updated = true;
288 *src |= mask;
289 } else {
290 if (*src & mask)
291 updated = true;
292 *src &= ~mask;
293 }
294 }
295
296 if (updated)
297 vgic_update_state(vcpu->kvm);
298
299 return updated;
300}
301
302static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
303 struct kvm_exit_mmio *mmio,
304 phys_addr_t offset)
305{
306 if (!mmio->is_write)
307 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
308 else
309 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
310}
311
312static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
313 struct kvm_exit_mmio *mmio,
314 phys_addr_t offset)
315{
316 if (!mmio->is_write)
317 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
318 else
319 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
320}
321
Andre Przywaracf50a1e2015-03-26 14:39:32 +0000322static const struct vgic_io_range vgic_dist_ranges[] = {
Andre Przywara1d916222014-06-07 00:53:08 +0200323 {
324 .base = GIC_DIST_CTRL,
325 .len = 12,
326 .bits_per_irq = 0,
327 .handle_mmio = handle_mmio_misc,
328 },
329 {
330 .base = GIC_DIST_IGROUP,
331 .len = VGIC_MAX_IRQS / 8,
332 .bits_per_irq = 1,
333 .handle_mmio = handle_mmio_raz_wi,
334 },
335 {
336 .base = GIC_DIST_ENABLE_SET,
337 .len = VGIC_MAX_IRQS / 8,
338 .bits_per_irq = 1,
339 .handle_mmio = handle_mmio_set_enable_reg,
340 },
341 {
342 .base = GIC_DIST_ENABLE_CLEAR,
343 .len = VGIC_MAX_IRQS / 8,
344 .bits_per_irq = 1,
345 .handle_mmio = handle_mmio_clear_enable_reg,
346 },
347 {
348 .base = GIC_DIST_PENDING_SET,
349 .len = VGIC_MAX_IRQS / 8,
350 .bits_per_irq = 1,
351 .handle_mmio = handle_mmio_set_pending_reg,
352 },
353 {
354 .base = GIC_DIST_PENDING_CLEAR,
355 .len = VGIC_MAX_IRQS / 8,
356 .bits_per_irq = 1,
357 .handle_mmio = handle_mmio_clear_pending_reg,
358 },
359 {
360 .base = GIC_DIST_ACTIVE_SET,
361 .len = VGIC_MAX_IRQS / 8,
362 .bits_per_irq = 1,
Christoffer Dall47a98b12015-03-13 17:02:54 +0000363 .handle_mmio = handle_mmio_set_active_reg,
Andre Przywara1d916222014-06-07 00:53:08 +0200364 },
365 {
366 .base = GIC_DIST_ACTIVE_CLEAR,
367 .len = VGIC_MAX_IRQS / 8,
368 .bits_per_irq = 1,
Christoffer Dall47a98b12015-03-13 17:02:54 +0000369 .handle_mmio = handle_mmio_clear_active_reg,
Andre Przywara1d916222014-06-07 00:53:08 +0200370 },
371 {
372 .base = GIC_DIST_PRI,
373 .len = VGIC_MAX_IRQS,
374 .bits_per_irq = 8,
375 .handle_mmio = handle_mmio_priority_reg,
376 },
377 {
378 .base = GIC_DIST_TARGET,
379 .len = VGIC_MAX_IRQS,
380 .bits_per_irq = 8,
381 .handle_mmio = handle_mmio_target_reg,
382 },
383 {
384 .base = GIC_DIST_CONFIG,
385 .len = VGIC_MAX_IRQS / 4,
386 .bits_per_irq = 2,
387 .handle_mmio = handle_mmio_cfg_reg,
388 },
389 {
390 .base = GIC_DIST_SOFTINT,
391 .len = 4,
392 .handle_mmio = handle_mmio_sgi_reg,
393 },
394 {
395 .base = GIC_DIST_SGI_PENDING_CLEAR,
396 .len = VGIC_NR_SGIS,
397 .handle_mmio = handle_mmio_sgi_clear,
398 },
399 {
400 .base = GIC_DIST_SGI_PENDING_SET,
401 .len = VGIC_NR_SGIS,
402 .handle_mmio = handle_mmio_sgi_set,
403 },
404 {}
405};
406
407static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
408 struct kvm_exit_mmio *mmio)
409{
410 unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
411
412 if (!is_in_range(mmio->phys_addr, mmio->len, base,
413 KVM_VGIC_V2_DIST_SIZE))
414 return false;
415
416 /* GICv2 does not support accesses wider than 32 bits */
417 if (mmio->len > 4) {
418 kvm_inject_dabt(vcpu, mmio->phys_addr);
419 return true;
420 }
421
422 return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
423}
424
425static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
426{
427 struct kvm *kvm = vcpu->kvm;
428 struct vgic_dist *dist = &kvm->arch.vgic;
429 int nrcpus = atomic_read(&kvm->online_vcpus);
430 u8 target_cpus;
431 int sgi, mode, c, vcpu_id;
432
433 vcpu_id = vcpu->vcpu_id;
434
435 sgi = reg & 0xf;
436 target_cpus = (reg >> 16) & 0xff;
437 mode = (reg >> 24) & 3;
438
439 switch (mode) {
440 case 0:
441 if (!target_cpus)
442 return;
443 break;
444
445 case 1:
446 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
447 break;
448
449 case 2:
450 target_cpus = 1 << vcpu_id;
451 break;
452 }
453
454 kvm_for_each_vcpu(c, vcpu, kvm) {
455 if (target_cpus & 1) {
456 /* Flag the SGI as pending */
457 vgic_dist_irq_set_pending(vcpu, sgi);
458 *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
459 kvm_debug("SGI%d from CPU%d to CPU%d\n",
460 sgi, vcpu_id, c);
461 }
462
463 target_cpus >>= 1;
464 }
465}
466
467static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
468{
469 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
470 unsigned long sources;
471 int vcpu_id = vcpu->vcpu_id;
472 int c;
473
474 sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
475
476 for_each_set_bit(c, &sources, dist->nr_cpus) {
477 if (vgic_queue_irq(vcpu, c, irq))
478 clear_bit(c, &sources);
479 }
480
481 *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
482
483 /*
484 * If the sources bitmap has been cleared it means that we
485 * could queue all the SGIs onto link registers (see the
486 * clear_bit above), and therefore we are done with them in
487 * our emulated gic and can get rid of them.
488 */
489 if (!sources) {
490 vgic_dist_irq_clear_pending(vcpu, irq);
491 vgic_cpu_irq_clear(vcpu, irq);
492 return true;
493 }
494
495 return false;
496}
497
498/**
499 * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
500 * @kvm: pointer to the kvm struct
501 *
502 * Map the virtual CPU interface into the VM before running any VCPUs. We
503 * can't do this at creation time, because user space must first set the
504 * virtual CPU interface address in the guest physical address space.
505 */
506static int vgic_v2_map_resources(struct kvm *kvm,
507 const struct vgic_params *params)
508{
Andre Przywaraa9cf86f2015-03-26 14:39:35 +0000509 struct vgic_dist *dist = &kvm->arch.vgic;
Andre Przywara1d916222014-06-07 00:53:08 +0200510 int ret = 0;
511
512 if (!irqchip_in_kernel(kvm))
513 return 0;
514
515 mutex_lock(&kvm->lock);
516
517 if (vgic_ready(kvm))
518 goto out;
519
Andre Przywaraa9cf86f2015-03-26 14:39:35 +0000520 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
521 IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
Andre Przywara1d916222014-06-07 00:53:08 +0200522 kvm_err("Need to set vgic cpu and dist addresses first\n");
523 ret = -ENXIO;
524 goto out;
525 }
526
Andre Przywaraa9cf86f2015-03-26 14:39:35 +0000527 vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
528 KVM_VGIC_V2_DIST_SIZE,
529 vgic_dist_ranges, -1, &dist->dist_iodev);
530
Andre Przywara1d916222014-06-07 00:53:08 +0200531 /*
532 * Initialize the vgic if this hasn't already been done on demand by
533 * accessing the vgic state from userspace.
534 */
535 ret = vgic_init(kvm);
536 if (ret) {
537 kvm_err("Unable to allocate maps\n");
Andre Przywaraa9cf86f2015-03-26 14:39:35 +0000538 goto out_unregister;
Andre Przywara1d916222014-06-07 00:53:08 +0200539 }
540
Andre Przywaraa9cf86f2015-03-26 14:39:35 +0000541 ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
Andre Przywara1d916222014-06-07 00:53:08 +0200542 params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
543 true);
544 if (ret) {
545 kvm_err("Unable to remap VGIC CPU to VCPU\n");
Andre Przywaraa9cf86f2015-03-26 14:39:35 +0000546 goto out_unregister;
Andre Przywara1d916222014-06-07 00:53:08 +0200547 }
548
Andre Przywaraa9cf86f2015-03-26 14:39:35 +0000549 dist->ready = true;
550 goto out;
551
552out_unregister:
553 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
554
Andre Przywara1d916222014-06-07 00:53:08 +0200555out:
556 if (ret)
557 kvm_vgic_destroy(kvm);
558 mutex_unlock(&kvm->lock);
559 return ret;
560}
561
562static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
563{
564 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
565
566 *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
567}
568
569static int vgic_v2_init_model(struct kvm *kvm)
570{
571 int i;
572
573 for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
574 vgic_set_target_reg(kvm, 0, i);
575
576 return 0;
577}
578
579void vgic_v2_init_emulation(struct kvm *kvm)
580{
581 struct vgic_dist *dist = &kvm->arch.vgic;
582
583 dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
584 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
585 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
586 dist->vm_ops.init_model = vgic_v2_init_model;
587 dist->vm_ops.map_resources = vgic_v2_map_resources;
588
589 kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
590}
591
592static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
593 struct kvm_exit_mmio *mmio, phys_addr_t offset)
594{
595 bool updated = false;
596 struct vgic_vmcr vmcr;
597 u32 *vmcr_field;
598 u32 reg;
599
600 vgic_get_vmcr(vcpu, &vmcr);
601
602 switch (offset & ~0x3) {
603 case GIC_CPU_CTRL:
604 vmcr_field = &vmcr.ctlr;
605 break;
606 case GIC_CPU_PRIMASK:
607 vmcr_field = &vmcr.pmr;
608 break;
609 case GIC_CPU_BINPOINT:
610 vmcr_field = &vmcr.bpr;
611 break;
612 case GIC_CPU_ALIAS_BINPOINT:
613 vmcr_field = &vmcr.abpr;
614 break;
615 default:
616 BUG();
617 }
618
619 if (!mmio->is_write) {
620 reg = *vmcr_field;
621 mmio_data_write(mmio, ~0, reg);
622 } else {
623 reg = mmio_data_read(mmio, ~0);
624 if (reg != *vmcr_field) {
625 *vmcr_field = reg;
626 vgic_set_vmcr(vcpu, &vmcr);
627 updated = true;
628 }
629 }
630 return updated;
631}
632
633static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
634 struct kvm_exit_mmio *mmio, phys_addr_t offset)
635{
636 return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
637}
638
639static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
640 struct kvm_exit_mmio *mmio,
641 phys_addr_t offset)
642{
643 u32 reg;
644
645 if (mmio->is_write)
646 return false;
647
648 /* GICC_IIDR */
649 reg = (PRODUCT_ID_KVM << 20) |
650 (GICC_ARCH_VERSION_V2 << 16) |
651 (IMPLEMENTER_ARM << 0);
652 mmio_data_write(mmio, ~0, reg);
653 return false;
654}
655
656/*
657 * CPU Interface Register accesses - these are not accessed by the VM, but by
658 * user space for saving and restoring VGIC state.
659 */
Andre Przywaracf50a1e2015-03-26 14:39:32 +0000660static const struct vgic_io_range vgic_cpu_ranges[] = {
Andre Przywara1d916222014-06-07 00:53:08 +0200661 {
662 .base = GIC_CPU_CTRL,
663 .len = 12,
664 .handle_mmio = handle_cpu_mmio_misc,
665 },
666 {
667 .base = GIC_CPU_ALIAS_BINPOINT,
668 .len = 4,
669 .handle_mmio = handle_mmio_abpr,
670 },
671 {
672 .base = GIC_CPU_ACTIVEPRIO,
673 .len = 16,
674 .handle_mmio = handle_mmio_raz_wi,
675 },
676 {
677 .base = GIC_CPU_IDENT,
678 .len = 4,
679 .handle_mmio = handle_cpu_mmio_ident,
680 },
681};
682
683static int vgic_attr_regs_access(struct kvm_device *dev,
684 struct kvm_device_attr *attr,
685 u32 *reg, bool is_write)
686{
Andre Przywaracf50a1e2015-03-26 14:39:32 +0000687 const struct vgic_io_range *r = NULL, *ranges;
Andre Przywara1d916222014-06-07 00:53:08 +0200688 phys_addr_t offset;
689 int ret, cpuid, c;
690 struct kvm_vcpu *vcpu, *tmp_vcpu;
691 struct vgic_dist *vgic;
692 struct kvm_exit_mmio mmio;
693
694 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
695 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
696 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
697
698 mutex_lock(&dev->kvm->lock);
699
700 ret = vgic_init(dev->kvm);
701 if (ret)
702 goto out;
703
704 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
705 ret = -EINVAL;
706 goto out;
707 }
708
709 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
710 vgic = &dev->kvm->arch.vgic;
711
712 mmio.len = 4;
713 mmio.is_write = is_write;
714 if (is_write)
715 mmio_data_write(&mmio, ~0, *reg);
716 switch (attr->group) {
717 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
718 mmio.phys_addr = vgic->vgic_dist_base + offset;
719 ranges = vgic_dist_ranges;
720 break;
721 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
722 mmio.phys_addr = vgic->vgic_cpu_base + offset;
723 ranges = vgic_cpu_ranges;
724 break;
725 default:
726 BUG();
727 }
Andre Przywara9f199d02015-03-26 14:39:33 +0000728 r = vgic_find_range(ranges, 4, offset);
Andre Przywara1d916222014-06-07 00:53:08 +0200729
730 if (unlikely(!r || !r->handle_mmio)) {
731 ret = -ENXIO;
732 goto out;
733 }
734
735
736 spin_lock(&vgic->lock);
737
738 /*
739 * Ensure that no other VCPU is running by checking the vcpu->cpu
740 * field. If no other VPCUs are running we can safely access the VGIC
741 * state, because even if another VPU is run after this point, that
742 * VCPU will not touch the vgic state, because it will block on
743 * getting the vgic->lock in kvm_vgic_sync_hwstate().
744 */
745 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
746 if (unlikely(tmp_vcpu->cpu != -1)) {
747 ret = -EBUSY;
748 goto out_vgic_unlock;
749 }
750 }
751
752 /*
753 * Move all pending IRQs from the LRs on all VCPUs so the pending
754 * state can be properly represented in the register state accessible
755 * through this API.
756 */
757 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
758 vgic_unqueue_irqs(tmp_vcpu);
759
760 offset -= r->base;
761 r->handle_mmio(vcpu, &mmio, offset);
762
763 if (!is_write)
764 *reg = mmio_data_read(&mmio, ~0);
765
766 ret = 0;
767out_vgic_unlock:
768 spin_unlock(&vgic->lock);
769out:
770 mutex_unlock(&dev->kvm->lock);
771 return ret;
772}
773
774static int vgic_v2_create(struct kvm_device *dev, u32 type)
775{
776 return kvm_vgic_create(dev->kvm, type);
777}
778
779static void vgic_v2_destroy(struct kvm_device *dev)
780{
781 kfree(dev);
782}
783
784static int vgic_v2_set_attr(struct kvm_device *dev,
785 struct kvm_device_attr *attr)
786{
787 int ret;
788
789 ret = vgic_set_common_attr(dev, attr);
790 if (ret != -ENXIO)
791 return ret;
792
793 switch (attr->group) {
794 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
795 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
796 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
797 u32 reg;
798
799 if (get_user(reg, uaddr))
800 return -EFAULT;
801
802 return vgic_attr_regs_access(dev, attr, &reg, true);
803 }
804
805 }
806
807 return -ENXIO;
808}
809
810static int vgic_v2_get_attr(struct kvm_device *dev,
811 struct kvm_device_attr *attr)
812{
813 int ret;
814
815 ret = vgic_get_common_attr(dev, attr);
816 if (ret != -ENXIO)
817 return ret;
818
819 switch (attr->group) {
820 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
821 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
822 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
823 u32 reg = 0;
824
825 ret = vgic_attr_regs_access(dev, attr, &reg, false);
826 if (ret)
827 return ret;
828 return put_user(reg, uaddr);
829 }
830
831 }
832
833 return -ENXIO;
834}
835
836static int vgic_v2_has_attr(struct kvm_device *dev,
837 struct kvm_device_attr *attr)
838{
839 phys_addr_t offset;
840
841 switch (attr->group) {
842 case KVM_DEV_ARM_VGIC_GRP_ADDR:
843 switch (attr->attr) {
844 case KVM_VGIC_V2_ADDR_TYPE_DIST:
845 case KVM_VGIC_V2_ADDR_TYPE_CPU:
846 return 0;
847 }
848 break;
849 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
850 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
851 return vgic_has_attr_regs(vgic_dist_ranges, offset);
852 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
853 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
854 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
855 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
856 return 0;
857 case KVM_DEV_ARM_VGIC_GRP_CTRL:
858 switch (attr->attr) {
859 case KVM_DEV_ARM_VGIC_CTRL_INIT:
860 return 0;
861 }
862 }
863 return -ENXIO;
864}
865
866struct kvm_device_ops kvm_arm_vgic_v2_ops = {
867 .name = "kvm-arm-vgic-v2",
868 .create = vgic_v2_create,
869 .destroy = vgic_v2_destroy,
870 .set_attr = vgic_v2_set_attr,
871 .get_attr = vgic_v2_get_attr,
872 .has_attr = vgic_v2_has_attr,
873};