blob: 815069f22e8b10cc0448cc931734268478938608 [file] [log] [blame]
Marc Zyngier1a89dd92013-01-21 19:36:12 -05001/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <asm/kvm_emulate.h>
24
Marc Zyngierb47ef922013-01-21 19:36:14 -050025/*
26 * How the whole thing works (courtesy of Christoffer Dall):
27 *
28 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
29 * something is pending
30 * - VGIC pending interrupts are stored on the vgic.irq_state vgic
31 * bitmap (this bitmap is updated by both user land ioctls and guest
32 * mmio ops, and other in-kernel peripherals such as the
33 * arch. timers) and indicate the 'wire' state.
34 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
35 * recalculated
36 * - To calculate the oracle, we need info for each cpu from
37 * compute_pending_for_cpu, which considers:
38 * - PPI: dist->irq_state & dist->irq_enable
39 * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
40 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
41 * registers, stored on each vcpu. We only keep one bit of
42 * information per interrupt, making sure that only one vcpu can
43 * accept the interrupt.
44 * - The same is true when injecting an interrupt, except that we only
45 * consider a single interrupt at a time. The irq_spi_cpu array
46 * contains the target CPU for each SPI.
47 *
48 * The handling of level interrupts adds some extra complexity. We
49 * need to track when the interrupt has been EOIed, so we can sample
50 * the 'line' again. This is achieved as such:
51 *
52 * - When a level interrupt is moved onto a vcpu, the corresponding
53 * bit in irq_active is set. As long as this bit is set, the line
54 * will be ignored for further interrupts. The interrupt is injected
55 * into the vcpu with the GICH_LR_EOI bit set (generate a
56 * maintenance interrupt on EOI).
57 * - When the interrupt is EOIed, the maintenance interrupt fires,
58 * and clears the corresponding bit in irq_active. This allow the
59 * interrupt line to be sampled again.
60 */
61
Christoffer Dall330690c2013-01-21 19:36:13 -050062#define VGIC_ADDR_UNDEF (-1)
63#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
64
Marc Zyngier1a89dd92013-01-21 19:36:12 -050065#define ACCESS_READ_VALUE (1 << 0)
66#define ACCESS_READ_RAZ (0 << 0)
67#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
68#define ACCESS_WRITE_IGNORED (0 << 1)
69#define ACCESS_WRITE_SETBIT (1 << 1)
70#define ACCESS_WRITE_CLEARBIT (2 << 1)
71#define ACCESS_WRITE_VALUE (3 << 1)
72#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
73
Marc Zyngierb47ef922013-01-21 19:36:14 -050074static void vgic_update_state(struct kvm *kvm);
75static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
76
77static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
78 int cpuid, u32 offset)
79{
80 offset >>= 2;
81 if (!offset)
82 return x->percpu[cpuid].reg;
83 else
84 return x->shared.reg + offset - 1;
85}
86
87static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
88 int cpuid, int irq)
89{
90 if (irq < VGIC_NR_PRIVATE_IRQS)
91 return test_bit(irq, x->percpu[cpuid].reg_ul);
92
93 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
94}
95
96static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
97 int irq, int val)
98{
99 unsigned long *reg;
100
101 if (irq < VGIC_NR_PRIVATE_IRQS) {
102 reg = x->percpu[cpuid].reg_ul;
103 } else {
104 reg = x->shared.reg_ul;
105 irq -= VGIC_NR_PRIVATE_IRQS;
106 }
107
108 if (val)
109 set_bit(irq, reg);
110 else
111 clear_bit(irq, reg);
112}
113
114static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
115{
116 if (unlikely(cpuid >= VGIC_MAX_CPUS))
117 return NULL;
118 return x->percpu[cpuid].reg_ul;
119}
120
121static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
122{
123 return x->shared.reg_ul;
124}
125
126static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
127{
128 offset >>= 2;
129 BUG_ON(offset > (VGIC_NR_IRQS / 4));
130 if (offset < 4)
131 return x->percpu[cpuid] + offset;
132 else
133 return x->shared + offset - 8;
134}
135
136#define VGIC_CFG_LEVEL 0
137#define VGIC_CFG_EDGE 1
138
139static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
140{
141 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
142 int irq_val;
143
144 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
145 return irq_val == VGIC_CFG_EDGE;
146}
147
148static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
149{
150 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
151
152 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
153}
154
155static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
156{
157 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
158
159 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
160}
161
162static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
163{
164 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
165
166 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0);
167}
168
169static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
170{
171 if (irq < VGIC_NR_PRIVATE_IRQS)
172 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
173 else
174 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
175 vcpu->arch.vgic_cpu.pending_shared);
176}
177
178static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
179{
180 if (irq < VGIC_NR_PRIVATE_IRQS)
181 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
182 else
183 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
184 vcpu->arch.vgic_cpu.pending_shared);
185}
186
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500187static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
188{
189 return *((u32 *)mmio->data) & mask;
190}
191
192static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
193{
194 *((u32 *)mmio->data) = value & mask;
195}
196
197/**
198 * vgic_reg_access - access vgic register
199 * @mmio: pointer to the data describing the mmio access
200 * @reg: pointer to the virtual backing of vgic distributor data
201 * @offset: least significant 2 bits used for word offset
202 * @mode: ACCESS_ mode (see defines above)
203 *
204 * Helper to make vgic register access easier using one of the access
205 * modes defined for vgic register access
206 * (read,raz,write-ignored,setbit,clearbit,write)
207 */
208static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
209 phys_addr_t offset, int mode)
210{
211 int word_offset = (offset & 3) * 8;
212 u32 mask = (1UL << (mmio->len * 8)) - 1;
213 u32 regval;
214
215 /*
216 * Any alignment fault should have been delivered to the guest
217 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
218 */
219
220 if (reg) {
221 regval = *reg;
222 } else {
223 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
224 regval = 0;
225 }
226
227 if (mmio->is_write) {
228 u32 data = mmio_data_read(mmio, mask) << word_offset;
229 switch (ACCESS_WRITE_MASK(mode)) {
230 case ACCESS_WRITE_IGNORED:
231 return;
232
233 case ACCESS_WRITE_SETBIT:
234 regval |= data;
235 break;
236
237 case ACCESS_WRITE_CLEARBIT:
238 regval &= ~data;
239 break;
240
241 case ACCESS_WRITE_VALUE:
242 regval = (regval & ~(mask << word_offset)) | data;
243 break;
244 }
245 *reg = regval;
246 } else {
247 switch (ACCESS_READ_MASK(mode)) {
248 case ACCESS_READ_RAZ:
249 regval = 0;
250 /* fall through */
251
252 case ACCESS_READ_VALUE:
253 mmio_data_write(mmio, mask, regval >> word_offset);
254 }
255 }
256}
257
Marc Zyngierb47ef922013-01-21 19:36:14 -0500258static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
259 struct kvm_exit_mmio *mmio, phys_addr_t offset)
260{
261 u32 reg;
262 u32 word_offset = offset & 3;
263
264 switch (offset & ~3) {
265 case 0: /* CTLR */
266 reg = vcpu->kvm->arch.vgic.enabled;
267 vgic_reg_access(mmio, &reg, word_offset,
268 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
269 if (mmio->is_write) {
270 vcpu->kvm->arch.vgic.enabled = reg & 1;
271 vgic_update_state(vcpu->kvm);
272 return true;
273 }
274 break;
275
276 case 4: /* TYPER */
277 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
278 reg |= (VGIC_NR_IRQS >> 5) - 1;
279 vgic_reg_access(mmio, &reg, word_offset,
280 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
281 break;
282
283 case 8: /* IIDR */
284 reg = 0x4B00043B;
285 vgic_reg_access(mmio, &reg, word_offset,
286 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
287 break;
288 }
289
290 return false;
291}
292
293static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
294 struct kvm_exit_mmio *mmio, phys_addr_t offset)
295{
296 vgic_reg_access(mmio, NULL, offset,
297 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
298 return false;
299}
300
301static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
302 struct kvm_exit_mmio *mmio,
303 phys_addr_t offset)
304{
305 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
306 vcpu->vcpu_id, offset);
307 vgic_reg_access(mmio, reg, offset,
308 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
309 if (mmio->is_write) {
310 vgic_update_state(vcpu->kvm);
311 return true;
312 }
313
314 return false;
315}
316
317static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
318 struct kvm_exit_mmio *mmio,
319 phys_addr_t offset)
320{
321 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
322 vcpu->vcpu_id, offset);
323 vgic_reg_access(mmio, reg, offset,
324 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
325 if (mmio->is_write) {
326 if (offset < 4) /* Force SGI enabled */
327 *reg |= 0xffff;
328 vgic_update_state(vcpu->kvm);
329 return true;
330 }
331
332 return false;
333}
334
335static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
336 struct kvm_exit_mmio *mmio,
337 phys_addr_t offset)
338{
339 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
340 vcpu->vcpu_id, offset);
341 vgic_reg_access(mmio, reg, offset,
342 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
343 if (mmio->is_write) {
344 vgic_update_state(vcpu->kvm);
345 return true;
346 }
347
348 return false;
349}
350
351static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
352 struct kvm_exit_mmio *mmio,
353 phys_addr_t offset)
354{
355 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
356 vcpu->vcpu_id, offset);
357 vgic_reg_access(mmio, reg, offset,
358 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
359 if (mmio->is_write) {
360 vgic_update_state(vcpu->kvm);
361 return true;
362 }
363
364 return false;
365}
366
367static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
368 struct kvm_exit_mmio *mmio,
369 phys_addr_t offset)
370{
371 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
372 vcpu->vcpu_id, offset);
373 vgic_reg_access(mmio, reg, offset,
374 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
375 return false;
376}
377
378#define GICD_ITARGETSR_SIZE 32
379#define GICD_CPUTARGETS_BITS 8
380#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
381static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
382{
383 struct vgic_dist *dist = &kvm->arch.vgic;
384 struct kvm_vcpu *vcpu;
385 int i, c;
386 unsigned long *bmap;
387 u32 val = 0;
388
389 irq -= VGIC_NR_PRIVATE_IRQS;
390
391 kvm_for_each_vcpu(c, vcpu, kvm) {
392 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
393 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
394 if (test_bit(irq + i, bmap))
395 val |= 1 << (c + i * 8);
396 }
397
398 return val;
399}
400
401static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
402{
403 struct vgic_dist *dist = &kvm->arch.vgic;
404 struct kvm_vcpu *vcpu;
405 int i, c;
406 unsigned long *bmap;
407 u32 target;
408
409 irq -= VGIC_NR_PRIVATE_IRQS;
410
411 /*
412 * Pick the LSB in each byte. This ensures we target exactly
413 * one vcpu per IRQ. If the byte is null, assume we target
414 * CPU0.
415 */
416 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
417 int shift = i * GICD_CPUTARGETS_BITS;
418 target = ffs((val >> shift) & 0xffU);
419 target = target ? (target - 1) : 0;
420 dist->irq_spi_cpu[irq + i] = target;
421 kvm_for_each_vcpu(c, vcpu, kvm) {
422 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
423 if (c == target)
424 set_bit(irq + i, bmap);
425 else
426 clear_bit(irq + i, bmap);
427 }
428 }
429}
430
431static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
432 struct kvm_exit_mmio *mmio,
433 phys_addr_t offset)
434{
435 u32 reg;
436
437 /* We treat the banked interrupts targets as read-only */
438 if (offset < 32) {
439 u32 roreg = 1 << vcpu->vcpu_id;
440 roreg |= roreg << 8;
441 roreg |= roreg << 16;
442
443 vgic_reg_access(mmio, &roreg, offset,
444 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
445 return false;
446 }
447
448 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
449 vgic_reg_access(mmio, &reg, offset,
450 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
451 if (mmio->is_write) {
452 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
453 vgic_update_state(vcpu->kvm);
454 return true;
455 }
456
457 return false;
458}
459
460static u32 vgic_cfg_expand(u16 val)
461{
462 u32 res = 0;
463 int i;
464
465 /*
466 * Turn a 16bit value like abcd...mnop into a 32bit word
467 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
468 */
469 for (i = 0; i < 16; i++)
470 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
471
472 return res;
473}
474
475static u16 vgic_cfg_compress(u32 val)
476{
477 u16 res = 0;
478 int i;
479
480 /*
481 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
482 * abcd...mnop which is what we really care about.
483 */
484 for (i = 0; i < 16; i++)
485 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
486
487 return res;
488}
489
490/*
491 * The distributor uses 2 bits per IRQ for the CFG register, but the
492 * LSB is always 0. As such, we only keep the upper bit, and use the
493 * two above functions to compress/expand the bits
494 */
495static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
496 struct kvm_exit_mmio *mmio, phys_addr_t offset)
497{
498 u32 val;
499 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
500 vcpu->vcpu_id, offset >> 1);
501 if (offset & 2)
502 val = *reg >> 16;
503 else
504 val = *reg & 0xffff;
505
506 val = vgic_cfg_expand(val);
507 vgic_reg_access(mmio, &val, offset,
508 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
509 if (mmio->is_write) {
510 if (offset < 4) {
511 *reg = ~0U; /* Force PPIs/SGIs to 1 */
512 return false;
513 }
514
515 val = vgic_cfg_compress(val);
516 if (offset & 2) {
517 *reg &= 0xffff;
518 *reg |= val << 16;
519 } else {
520 *reg &= 0xffff << 16;
521 *reg |= val;
522 }
523 }
524
525 return false;
526}
527
528static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
529 struct kvm_exit_mmio *mmio, phys_addr_t offset)
530{
531 u32 reg;
532 vgic_reg_access(mmio, &reg, offset,
533 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
534 if (mmio->is_write) {
535 vgic_dispatch_sgi(vcpu, reg);
536 vgic_update_state(vcpu->kvm);
537 return true;
538 }
539
540 return false;
541}
542
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500543/*
544 * I would have liked to use the kvm_bus_io_*() API instead, but it
545 * cannot cope with banked registers (only the VM pointer is passed
546 * around, and we need the vcpu). One of these days, someone please
547 * fix it!
548 */
549struct mmio_range {
550 phys_addr_t base;
551 unsigned long len;
552 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
553 phys_addr_t offset);
554};
555
556static const struct mmio_range vgic_ranges[] = {
Marc Zyngierb47ef922013-01-21 19:36:14 -0500557 {
558 .base = GIC_DIST_CTRL,
559 .len = 12,
560 .handle_mmio = handle_mmio_misc,
561 },
562 {
563 .base = GIC_DIST_IGROUP,
564 .len = VGIC_NR_IRQS / 8,
565 .handle_mmio = handle_mmio_raz_wi,
566 },
567 {
568 .base = GIC_DIST_ENABLE_SET,
569 .len = VGIC_NR_IRQS / 8,
570 .handle_mmio = handle_mmio_set_enable_reg,
571 },
572 {
573 .base = GIC_DIST_ENABLE_CLEAR,
574 .len = VGIC_NR_IRQS / 8,
575 .handle_mmio = handle_mmio_clear_enable_reg,
576 },
577 {
578 .base = GIC_DIST_PENDING_SET,
579 .len = VGIC_NR_IRQS / 8,
580 .handle_mmio = handle_mmio_set_pending_reg,
581 },
582 {
583 .base = GIC_DIST_PENDING_CLEAR,
584 .len = VGIC_NR_IRQS / 8,
585 .handle_mmio = handle_mmio_clear_pending_reg,
586 },
587 {
588 .base = GIC_DIST_ACTIVE_SET,
589 .len = VGIC_NR_IRQS / 8,
590 .handle_mmio = handle_mmio_raz_wi,
591 },
592 {
593 .base = GIC_DIST_ACTIVE_CLEAR,
594 .len = VGIC_NR_IRQS / 8,
595 .handle_mmio = handle_mmio_raz_wi,
596 },
597 {
598 .base = GIC_DIST_PRI,
599 .len = VGIC_NR_IRQS,
600 .handle_mmio = handle_mmio_priority_reg,
601 },
602 {
603 .base = GIC_DIST_TARGET,
604 .len = VGIC_NR_IRQS,
605 .handle_mmio = handle_mmio_target_reg,
606 },
607 {
608 .base = GIC_DIST_CONFIG,
609 .len = VGIC_NR_IRQS / 4,
610 .handle_mmio = handle_mmio_cfg_reg,
611 },
612 {
613 .base = GIC_DIST_SOFTINT,
614 .len = 4,
615 .handle_mmio = handle_mmio_sgi_reg,
616 },
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500617 {}
618};
619
620static const
621struct mmio_range *find_matching_range(const struct mmio_range *ranges,
622 struct kvm_exit_mmio *mmio,
623 phys_addr_t base)
624{
625 const struct mmio_range *r = ranges;
626 phys_addr_t addr = mmio->phys_addr - base;
627
628 while (r->len) {
629 if (addr >= r->base &&
630 (addr + mmio->len) <= (r->base + r->len))
631 return r;
632 r++;
633 }
634
635 return NULL;
636}
637
638/**
639 * vgic_handle_mmio - handle an in-kernel MMIO access
640 * @vcpu: pointer to the vcpu performing the access
641 * @run: pointer to the kvm_run structure
642 * @mmio: pointer to the data describing the access
643 *
644 * returns true if the MMIO access has been performed in kernel space,
645 * and false if it needs to be emulated in user space.
646 */
647bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
648 struct kvm_exit_mmio *mmio)
649{
Marc Zyngierb47ef922013-01-21 19:36:14 -0500650 const struct mmio_range *range;
651 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
652 unsigned long base = dist->vgic_dist_base;
653 bool updated_state;
654 unsigned long offset;
655
656 if (!irqchip_in_kernel(vcpu->kvm) ||
657 mmio->phys_addr < base ||
658 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
659 return false;
660
661 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
662 if (mmio->len > 4) {
663 kvm_inject_dabt(vcpu, mmio->phys_addr);
664 return true;
665 }
666
667 range = find_matching_range(vgic_ranges, mmio, base);
668 if (unlikely(!range || !range->handle_mmio)) {
669 pr_warn("Unhandled access %d %08llx %d\n",
670 mmio->is_write, mmio->phys_addr, mmio->len);
671 return false;
672 }
673
674 spin_lock(&vcpu->kvm->arch.vgic.lock);
675 offset = mmio->phys_addr - range->base - base;
676 updated_state = range->handle_mmio(vcpu, mmio, offset);
677 spin_unlock(&vcpu->kvm->arch.vgic.lock);
678 kvm_prepare_mmio(run, mmio);
679 kvm_handle_mmio_return(vcpu, run);
680
681 return true;
682}
683
684static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
685{
686 struct kvm *kvm = vcpu->kvm;
687 struct vgic_dist *dist = &kvm->arch.vgic;
688 int nrcpus = atomic_read(&kvm->online_vcpus);
689 u8 target_cpus;
690 int sgi, mode, c, vcpu_id;
691
692 vcpu_id = vcpu->vcpu_id;
693
694 sgi = reg & 0xf;
695 target_cpus = (reg >> 16) & 0xff;
696 mode = (reg >> 24) & 3;
697
698 switch (mode) {
699 case 0:
700 if (!target_cpus)
701 return;
702
703 case 1:
704 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
705 break;
706
707 case 2:
708 target_cpus = 1 << vcpu_id;
709 break;
710 }
711
712 kvm_for_each_vcpu(c, vcpu, kvm) {
713 if (target_cpus & 1) {
714 /* Flag the SGI as pending */
715 vgic_dist_irq_set(vcpu, sgi);
716 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
717 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
718 }
719
720 target_cpus >>= 1;
721 }
722}
723
724static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
725{
726 return 0;
727}
728
729/*
730 * Update the interrupt state and determine which CPUs have pending
731 * interrupts. Must be called with distributor lock held.
732 */
733static void vgic_update_state(struct kvm *kvm)
734{
735 struct vgic_dist *dist = &kvm->arch.vgic;
736 struct kvm_vcpu *vcpu;
737 int c;
738
739 if (!dist->enabled) {
740 set_bit(0, &dist->irq_pending_on_cpu);
741 return;
742 }
743
744 kvm_for_each_vcpu(c, vcpu, kvm) {
745 if (compute_pending_for_cpu(vcpu)) {
746 pr_debug("CPU%d has pending interrupts\n", c);
747 set_bit(c, &dist->irq_pending_on_cpu);
748 }
749 }
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500750}
Christoffer Dall330690c2013-01-21 19:36:13 -0500751
752static bool vgic_ioaddr_overlap(struct kvm *kvm)
753{
754 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
755 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
756
757 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
758 return 0;
759 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
760 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
761 return -EBUSY;
762 return 0;
763}
764
765static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
766 phys_addr_t addr, phys_addr_t size)
767{
768 int ret;
769
770 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
771 return -EEXIST;
772 if (addr + size < addr)
773 return -EINVAL;
774
775 ret = vgic_ioaddr_overlap(kvm);
776 if (ret)
777 return ret;
778 *ioaddr = addr;
779 return ret;
780}
781
782int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
783{
784 int r = 0;
785 struct vgic_dist *vgic = &kvm->arch.vgic;
786
787 if (addr & ~KVM_PHYS_MASK)
788 return -E2BIG;
789
790 if (addr & ~PAGE_MASK)
791 return -EINVAL;
792
793 mutex_lock(&kvm->lock);
794 switch (type) {
795 case KVM_VGIC_V2_ADDR_TYPE_DIST:
796 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
797 addr, KVM_VGIC_V2_DIST_SIZE);
798 break;
799 case KVM_VGIC_V2_ADDR_TYPE_CPU:
800 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
801 addr, KVM_VGIC_V2_CPU_SIZE);
802 break;
803 default:
804 r = -ENODEV;
805 }
806
807 mutex_unlock(&kvm->lock);
808 return r;
809}