blob: 697ce17538f5cd64602e1e5df473448f2954880e [file] [log] [blame]
Marc Zyngier1a89dd92013-01-21 19:36:12 -05001/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Marc Zyngier01ac5e32013-01-21 19:36:16 -050019#include <linux/cpu.h>
Marc Zyngier1a89dd92013-01-21 19:36:12 -050020#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
Marc Zyngier01ac5e32013-01-21 19:36:16 -050024#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
Christoffer Dall2a2f3e262014-02-02 13:41:02 -080027#include <linux/uaccess.h>
Marc Zyngier01ac5e32013-01-21 19:36:16 -050028
29#include <linux/irqchip/arm-gic.h>
30
Marc Zyngier1a89dd92013-01-21 19:36:12 -050031#include <asm/kvm_emulate.h>
Marc Zyngier01ac5e32013-01-21 19:36:16 -050032#include <asm/kvm_arm.h>
33#include <asm/kvm_mmu.h>
Eric Auger174178f2015-03-04 11:14:36 +010034#include <trace/events/kvm.h>
Marc Zyngier1a89dd92013-01-21 19:36:12 -050035
Marc Zyngierb47ef922013-01-21 19:36:14 -050036/*
37 * How the whole thing works (courtesy of Christoffer Dall):
38 *
39 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
Christoffer Dall7e362912014-06-14 22:34:04 +020040 * something is pending on the CPU interface.
41 * - Interrupts that are pending on the distributor are stored on the
42 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
43 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
44 * arch. timers).
Marc Zyngierb47ef922013-01-21 19:36:14 -050045 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * recalculated
47 * - To calculate the oracle, we need info for each cpu from
48 * compute_pending_for_cpu, which considers:
Christoffer Dall227844f2014-06-09 12:27:18 +020049 * - PPI: dist->irq_pending & dist->irq_enable
50 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
Christoffer Dall7e362912014-06-14 22:34:04 +020051 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
Marc Zyngierb47ef922013-01-21 19:36:14 -050052 * registers, stored on each vcpu. We only keep one bit of
53 * information per interrupt, making sure that only one vcpu can
54 * accept the interrupt.
Christoffer Dall7e362912014-06-14 22:34:04 +020055 * - If any of the above state changes, we must recalculate the oracle.
Marc Zyngierb47ef922013-01-21 19:36:14 -050056 * - The same is true when injecting an interrupt, except that we only
57 * consider a single interrupt at a time. The irq_spi_cpu array
58 * contains the target CPU for each SPI.
59 *
60 * The handling of level interrupts adds some extra complexity. We
61 * need to track when the interrupt has been EOIed, so we can sample
62 * the 'line' again. This is achieved as such:
63 *
64 * - When a level interrupt is moved onto a vcpu, the corresponding
Christoffer Dalldbf20f92014-06-09 12:55:13 +020065 * bit in irq_queued is set. As long as this bit is set, the line
Marc Zyngierb47ef922013-01-21 19:36:14 -050066 * will be ignored for further interrupts. The interrupt is injected
67 * into the vcpu with the GICH_LR_EOI bit set (generate a
68 * maintenance interrupt on EOI).
69 * - When the interrupt is EOIed, the maintenance interrupt fires,
Christoffer Dalldbf20f92014-06-09 12:55:13 +020070 * and clears the corresponding bit in irq_queued. This allows the
Marc Zyngierb47ef922013-01-21 19:36:14 -050071 * interrupt line to be sampled again.
Christoffer Dallfaa1b462014-06-14 21:54:51 +020072 * - Note that level-triggered interrupts can also be set to pending from
73 * writes to GICD_ISPENDRn and lowering the external input line does not
74 * cause the interrupt to become inactive in such a situation.
75 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
76 * inactive as long as the external input line is held high.
Marc Zyngierb47ef922013-01-21 19:36:14 -050077 */
78
Andre Przywara83215812014-06-07 00:53:08 +020079#include "vgic.h"
Christoffer Dall330690c2013-01-21 19:36:13 -050080
Marc Zyngiera1fcb442013-01-21 19:36:15 -050081static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +010082static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +010083static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
84static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
Marc Zyngier01ac5e32013-01-21 19:36:16 -050085
Marc Zyngier8f186d52014-02-04 18:13:03 +000086static const struct vgic_ops *vgic_ops;
87static const struct vgic_params *vgic;
Marc Zyngierb47ef922013-01-21 19:36:14 -050088
Andre Przywarab26e5fd2014-06-02 16:19:12 +020089static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
90{
91 vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
92}
93
94static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
95{
96 return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
97}
98
99int kvm_vgic_map_resources(struct kvm *kvm)
100{
101 return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
102}
103
Victor Kamensky9662fb42014-06-12 09:30:10 -0700104/*
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100105 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
106 * extracts u32s out of them.
Victor Kamensky9662fb42014-06-12 09:30:10 -0700107 *
108 * This does not work on 64-bit BE systems, because the bitmap access
109 * will store two consecutive 32-bit words with the higher-addressed
110 * register's bits at the lower index and the lower-addressed register's
111 * bits at the higher index.
112 *
113 * Therefore, swizzle the register index when accessing the 32-bit word
114 * registers to access the right register's value.
115 */
116#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
117#define REG_OFFSET_SWIZZLE 1
118#else
119#define REG_OFFSET_SWIZZLE 0
120#endif
Marc Zyngierb47ef922013-01-21 19:36:14 -0500121
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100122static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
123{
124 int nr_longs;
125
126 nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
127
128 b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
129 if (!b->private)
130 return -ENOMEM;
131
132 b->shared = b->private + nr_cpus;
133
134 return 0;
135}
136
137static void vgic_free_bitmap(struct vgic_bitmap *b)
138{
139 kfree(b->private);
140 b->private = NULL;
141 b->shared = NULL;
142}
143
Christoffer Dall2df36a52014-09-28 16:04:26 +0200144/*
145 * Call this function to convert a u64 value to an unsigned long * bitmask
146 * in a way that works on both 32-bit and 64-bit LE and BE platforms.
147 *
148 * Warning: Calling this function may modify *val.
149 */
150static unsigned long *u64_to_bitmask(u64 *val)
151{
152#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
153 *val = (*val >> 32) | (*val << 32);
154#endif
155 return (unsigned long *)val;
156}
157
Andre Przywara83215812014-06-07 00:53:08 +0200158u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500159{
160 offset >>= 2;
161 if (!offset)
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100162 return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
Marc Zyngierb47ef922013-01-21 19:36:14 -0500163 else
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100164 return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500165}
166
167static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
168 int cpuid, int irq)
169{
170 if (irq < VGIC_NR_PRIVATE_IRQS)
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100171 return test_bit(irq, x->private + cpuid);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500172
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100173 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500174}
175
Andre Przywara83215812014-06-07 00:53:08 +0200176void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
177 int irq, int val)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500178{
179 unsigned long *reg;
180
181 if (irq < VGIC_NR_PRIVATE_IRQS) {
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100182 reg = x->private + cpuid;
Marc Zyngierb47ef922013-01-21 19:36:14 -0500183 } else {
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100184 reg = x->shared;
Marc Zyngierb47ef922013-01-21 19:36:14 -0500185 irq -= VGIC_NR_PRIVATE_IRQS;
186 }
187
188 if (val)
189 set_bit(irq, reg);
190 else
191 clear_bit(irq, reg);
192}
193
194static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
195{
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100196 return x->private + cpuid;
Marc Zyngierb47ef922013-01-21 19:36:14 -0500197}
198
Andre Przywara83215812014-06-07 00:53:08 +0200199unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500200{
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100201 return x->shared;
202}
203
204static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
205{
206 int size;
207
208 size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
209 size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
210
211 x->private = kzalloc(size, GFP_KERNEL);
212 if (!x->private)
213 return -ENOMEM;
214
215 x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
216 return 0;
217}
218
219static void vgic_free_bytemap(struct vgic_bytemap *b)
220{
221 kfree(b->private);
222 b->private = NULL;
223 b->shared = NULL;
Marc Zyngierb47ef922013-01-21 19:36:14 -0500224}
225
Andre Przywara83215812014-06-07 00:53:08 +0200226u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500227{
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100228 u32 *reg;
229
230 if (offset < VGIC_NR_PRIVATE_IRQS) {
231 reg = x->private;
232 offset += cpuid * VGIC_NR_PRIVATE_IRQS;
233 } else {
234 reg = x->shared;
235 offset -= VGIC_NR_PRIVATE_IRQS;
236 }
237
238 return reg + (offset / sizeof(u32));
Marc Zyngierb47ef922013-01-21 19:36:14 -0500239}
240
241#define VGIC_CFG_LEVEL 0
242#define VGIC_CFG_EDGE 1
243
244static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
245{
246 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
247 int irq_val;
248
249 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
250 return irq_val == VGIC_CFG_EDGE;
251}
252
253static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
254{
255 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
256
257 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
258}
259
Christoffer Dalldbf20f92014-06-09 12:55:13 +0200260static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500261{
262 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
263
Christoffer Dalldbf20f92014-06-09 12:55:13 +0200264 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500265}
266
Christoffer Dalldbf20f92014-06-09 12:55:13 +0200267static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500268{
269 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
270
Christoffer Dalldbf20f92014-06-09 12:55:13 +0200271 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500272}
273
Christoffer Dalldbf20f92014-06-09 12:55:13 +0200274static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500275{
276 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
277
Christoffer Dalldbf20f92014-06-09 12:55:13 +0200278 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500279}
280
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200281static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
282{
283 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
284
285 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
286}
287
288static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
289{
290 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
291
292 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
293}
294
295static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
296{
297 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
298
299 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
300}
301
302static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
303{
304 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
305
306 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
307}
308
309static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
310{
311 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
312
313 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
314}
315
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500316static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
317{
318 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
319
Christoffer Dall227844f2014-06-09 12:27:18 +0200320 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500321}
322
Andre Przywara83215812014-06-07 00:53:08 +0200323void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500324{
325 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
326
Christoffer Dall227844f2014-06-09 12:27:18 +0200327 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500328}
329
Andre Przywara83215812014-06-07 00:53:08 +0200330void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500331{
332 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
333
Christoffer Dall227844f2014-06-09 12:27:18 +0200334 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500335}
336
337static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
338{
339 if (irq < VGIC_NR_PRIVATE_IRQS)
340 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
341 else
342 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
343 vcpu->arch.vgic_cpu.pending_shared);
344}
345
Andre Przywara83215812014-06-07 00:53:08 +0200346void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500347{
348 if (irq < VGIC_NR_PRIVATE_IRQS)
349 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
350 else
351 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
352 vcpu->arch.vgic_cpu.pending_shared);
353}
354
Christoffer Dalldbf20f92014-06-09 12:55:13 +0200355static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
356{
357 return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
358}
359
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500360/**
361 * vgic_reg_access - access vgic register
362 * @mmio: pointer to the data describing the mmio access
363 * @reg: pointer to the virtual backing of vgic distributor data
364 * @offset: least significant 2 bits used for word offset
365 * @mode: ACCESS_ mode (see defines above)
366 *
367 * Helper to make vgic register access easier using one of the access
368 * modes defined for vgic register access
369 * (read,raz,write-ignored,setbit,clearbit,write)
370 */
Andre Przywara83215812014-06-07 00:53:08 +0200371void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
372 phys_addr_t offset, int mode)
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500373{
374 int word_offset = (offset & 3) * 8;
375 u32 mask = (1UL << (mmio->len * 8)) - 1;
376 u32 regval;
377
378 /*
379 * Any alignment fault should have been delivered to the guest
380 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
381 */
382
383 if (reg) {
384 regval = *reg;
385 } else {
386 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
387 regval = 0;
388 }
389
390 if (mmio->is_write) {
391 u32 data = mmio_data_read(mmio, mask) << word_offset;
392 switch (ACCESS_WRITE_MASK(mode)) {
393 case ACCESS_WRITE_IGNORED:
394 return;
395
396 case ACCESS_WRITE_SETBIT:
397 regval |= data;
398 break;
399
400 case ACCESS_WRITE_CLEARBIT:
401 regval &= ~data;
402 break;
403
404 case ACCESS_WRITE_VALUE:
405 regval = (regval & ~(mask << word_offset)) | data;
406 break;
407 }
408 *reg = regval;
409 } else {
410 switch (ACCESS_READ_MASK(mode)) {
411 case ACCESS_READ_RAZ:
412 regval = 0;
413 /* fall through */
414
415 case ACCESS_READ_VALUE:
416 mmio_data_write(mmio, mask, regval >> word_offset);
417 }
418 }
419}
420
Andre Przywara83215812014-06-07 00:53:08 +0200421bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
422 phys_addr_t offset)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500423{
424 vgic_reg_access(mmio, NULL, offset,
425 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
426 return false;
427}
428
Andre Przywara83215812014-06-07 00:53:08 +0200429bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
430 phys_addr_t offset, int vcpu_id, int access)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500431{
Andre Przywarad97f6832014-06-11 14:11:49 +0200432 u32 *reg;
433 int mode = ACCESS_READ_VALUE | access;
434 struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
435
436 reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
437 vgic_reg_access(mmio, reg, offset, mode);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500438 if (mmio->is_write) {
Andre Przywarad97f6832014-06-11 14:11:49 +0200439 if (access & ACCESS_WRITE_CLEARBIT) {
440 if (offset < 4) /* Force SGI enabled */
441 *reg |= 0xffff;
442 vgic_retire_disabled_irqs(target_vcpu);
443 }
444 vgic_update_state(kvm);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500445 return true;
446 }
447
448 return false;
449}
450
Andre Przywara83215812014-06-07 00:53:08 +0200451bool vgic_handle_set_pending_reg(struct kvm *kvm,
452 struct kvm_exit_mmio *mmio,
453 phys_addr_t offset, int vcpu_id)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500454{
Christoffer Dall9da48b52014-06-14 22:30:45 +0200455 u32 *reg, orig;
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200456 u32 level_mask;
Andre Przywarad97f6832014-06-11 14:11:49 +0200457 int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
458 struct vgic_dist *dist = &kvm->arch.vgic;
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200459
Andre Przywarad97f6832014-06-11 14:11:49 +0200460 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200461 level_mask = (~(*reg));
462
463 /* Mark both level and edge triggered irqs as pending */
Andre Przywarad97f6832014-06-11 14:11:49 +0200464 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
Christoffer Dall9da48b52014-06-14 22:30:45 +0200465 orig = *reg;
Andre Przywarad97f6832014-06-11 14:11:49 +0200466 vgic_reg_access(mmio, reg, offset, mode);
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200467
Marc Zyngierb47ef922013-01-21 19:36:14 -0500468 if (mmio->is_write) {
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200469 /* Set the soft-pending flag only for level-triggered irqs */
470 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
Andre Przywarad97f6832014-06-11 14:11:49 +0200471 vcpu_id, offset);
472 vgic_reg_access(mmio, reg, offset, mode);
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200473 *reg &= level_mask;
474
Christoffer Dall9da48b52014-06-14 22:30:45 +0200475 /* Ignore writes to SGIs */
476 if (offset < 2) {
477 *reg &= ~0xffff;
478 *reg |= orig & 0xffff;
479 }
480
Andre Przywarad97f6832014-06-11 14:11:49 +0200481 vgic_update_state(kvm);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500482 return true;
483 }
484
485 return false;
486}
487
Andre Przywara83215812014-06-07 00:53:08 +0200488bool vgic_handle_clear_pending_reg(struct kvm *kvm,
489 struct kvm_exit_mmio *mmio,
490 phys_addr_t offset, int vcpu_id)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500491{
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200492 u32 *level_active;
Christoffer Dall9da48b52014-06-14 22:30:45 +0200493 u32 *reg, orig;
Andre Przywarad97f6832014-06-11 14:11:49 +0200494 int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
495 struct vgic_dist *dist = &kvm->arch.vgic;
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200496
Andre Przywarad97f6832014-06-11 14:11:49 +0200497 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
Christoffer Dall9da48b52014-06-14 22:30:45 +0200498 orig = *reg;
Andre Przywarad97f6832014-06-11 14:11:49 +0200499 vgic_reg_access(mmio, reg, offset, mode);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500500 if (mmio->is_write) {
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200501 /* Re-set level triggered level-active interrupts */
502 level_active = vgic_bitmap_get_reg(&dist->irq_level,
Andre Przywarad97f6832014-06-11 14:11:49 +0200503 vcpu_id, offset);
504 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200505 *reg |= *level_active;
506
Christoffer Dall9da48b52014-06-14 22:30:45 +0200507 /* Ignore writes to SGIs */
508 if (offset < 2) {
509 *reg &= ~0xffff;
510 *reg |= orig & 0xffff;
511 }
512
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200513 /* Clear soft-pending flags */
514 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
Andre Przywarad97f6832014-06-11 14:11:49 +0200515 vcpu_id, offset);
516 vgic_reg_access(mmio, reg, offset, mode);
Christoffer Dallfaa1b462014-06-14 21:54:51 +0200517
Andre Przywarad97f6832014-06-11 14:11:49 +0200518 vgic_update_state(kvm);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500519 return true;
520 }
Marc Zyngierb47ef922013-01-21 19:36:14 -0500521 return false;
522}
523
Marc Zyngierb47ef922013-01-21 19:36:14 -0500524static u32 vgic_cfg_expand(u16 val)
525{
526 u32 res = 0;
527 int i;
528
529 /*
530 * Turn a 16bit value like abcd...mnop into a 32bit word
531 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
532 */
533 for (i = 0; i < 16; i++)
534 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
535
536 return res;
537}
538
539static u16 vgic_cfg_compress(u32 val)
540{
541 u16 res = 0;
542 int i;
543
544 /*
545 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
546 * abcd...mnop which is what we really care about.
547 */
548 for (i = 0; i < 16; i++)
549 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
550
551 return res;
552}
553
554/*
555 * The distributor uses 2 bits per IRQ for the CFG register, but the
556 * LSB is always 0. As such, we only keep the upper bit, and use the
557 * two above functions to compress/expand the bits
558 */
Andre Przywara83215812014-06-07 00:53:08 +0200559bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
560 phys_addr_t offset)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500561{
562 u32 val;
Marc Zyngier6545eae2013-08-29 11:08:23 +0100563
Andre Przywaraf2ae85b2014-04-11 00:07:18 +0200564 if (offset & 4)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500565 val = *reg >> 16;
566 else
567 val = *reg & 0xffff;
568
569 val = vgic_cfg_expand(val);
570 vgic_reg_access(mmio, &val, offset,
571 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
572 if (mmio->is_write) {
Andre Przywaraf2ae85b2014-04-11 00:07:18 +0200573 if (offset < 8) {
Marc Zyngierb47ef922013-01-21 19:36:14 -0500574 *reg = ~0U; /* Force PPIs/SGIs to 1 */
575 return false;
576 }
577
578 val = vgic_cfg_compress(val);
Andre Przywaraf2ae85b2014-04-11 00:07:18 +0200579 if (offset & 4) {
Marc Zyngierb47ef922013-01-21 19:36:14 -0500580 *reg &= 0xffff;
581 *reg |= val << 16;
582 } else {
583 *reg &= 0xffff << 16;
584 *reg |= val;
585 }
586 }
587
588 return false;
589}
590
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800591/**
592 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
593 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
594 *
595 * Move any pending IRQs that have already been assigned to LRs back to the
596 * emulated distributor state so that the complete emulated state can be read
597 * from the main emulation structures without investigating the LRs.
598 *
599 * Note that IRQs in the active state in the LRs get their pending state moved
600 * to the distributor but the active state stays in the LRs, because we don't
601 * track the active state on the distributor side.
602 */
Andre Przywara83215812014-06-07 00:53:08 +0200603void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800604{
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800605 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100606 int i;
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800607
608 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100609 struct vgic_lr lr = vgic_get_lr(vcpu, i);
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800610
611 /*
612 * There are three options for the state bits:
613 *
614 * 01: pending
615 * 10: active
616 * 11: pending and active
617 *
618 * If the LR holds only an active interrupt (not pending) then
619 * just leave it alone.
620 */
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100621 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800622 continue;
623
624 /*
625 * Reestablish the pending state on the distributor and the
626 * CPU interface. It may have already been pending, but that
627 * is fine, then we are only setting a few bits that were
628 * already set.
629 */
Christoffer Dall227844f2014-06-09 12:27:18 +0200630 vgic_dist_irq_set_pending(vcpu, lr.irq);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100631 if (lr.irq < VGIC_NR_SGIS)
Andre Przywarab26e5fd2014-06-02 16:19:12 +0200632 add_sgi_source(vcpu, lr.irq, lr.source);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100633 lr.state &= ~LR_STATE_PENDING;
634 vgic_set_lr(vcpu, i, lr);
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800635
636 /*
637 * If there's no state left on the LR (it could still be
638 * active), then the LR does not hold any useful info and can
639 * be marked as free for other use.
640 */
Christoffer Dallcced50c2014-06-14 22:37:33 +0200641 if (!(lr.state & LR_STATE_MASK)) {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100642 vgic_retire_lr(i, lr.irq, vcpu);
Christoffer Dallcced50c2014-06-14 22:37:33 +0200643 vgic_irq_clear_queued(vcpu, lr.irq);
644 }
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800645
646 /* Finally update the VGIC state. */
647 vgic_update_state(vcpu->kvm);
648 }
649}
650
Andre Przywara83215812014-06-07 00:53:08 +0200651const
652struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges,
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500653 struct kvm_exit_mmio *mmio,
Christoffer Dall1006e8c2013-09-23 14:55:56 -0700654 phys_addr_t offset)
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500655{
Andre Przywara83215812014-06-07 00:53:08 +0200656 const struct kvm_mmio_range *r = ranges;
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500657
658 while (r->len) {
Christoffer Dall1006e8c2013-09-23 14:55:56 -0700659 if (offset >= r->base &&
660 (offset + mmio->len) <= (r->base + r->len))
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500661 return r;
662 r++;
663 }
664
665 return NULL;
666}
667
Marc Zyngierc3c91832014-07-08 12:09:04 +0100668static bool vgic_validate_access(const struct vgic_dist *dist,
Andre Przywara83215812014-06-07 00:53:08 +0200669 const struct kvm_mmio_range *range,
Marc Zyngierc3c91832014-07-08 12:09:04 +0100670 unsigned long offset)
671{
672 int irq;
673
674 if (!range->bits_per_irq)
675 return true; /* Not an irq-based access */
676
677 irq = offset * 8 / range->bits_per_irq;
678 if (irq >= dist->nr_irqs)
679 return false;
680
681 return true;
682}
683
Andre Przywara05bc8aa2014-06-05 16:07:50 +0200684/*
685 * Call the respective handler function for the given range.
686 * We split up any 64 bit accesses into two consecutive 32 bit
687 * handler calls and merge the result afterwards.
688 * We do this in a little endian fashion regardless of the host's
689 * or guest's endianness, because the GIC is always LE and the rest of
690 * the code (vgic_reg_access) also puts it in a LE fashion already.
691 * At this point we have already identified the handle function, so
692 * range points to that one entry and offset is relative to this.
693 */
694static bool call_range_handler(struct kvm_vcpu *vcpu,
695 struct kvm_exit_mmio *mmio,
696 unsigned long offset,
Andre Przywara83215812014-06-07 00:53:08 +0200697 const struct kvm_mmio_range *range)
Andre Przywara05bc8aa2014-06-05 16:07:50 +0200698{
699 u32 *data32 = (void *)mmio->data;
700 struct kvm_exit_mmio mmio32;
701 bool ret;
702
703 if (likely(mmio->len <= 4))
704 return range->handle_mmio(vcpu, mmio, offset);
705
706 /*
707 * Any access bigger than 4 bytes (that we currently handle in KVM)
708 * is actually 8 bytes long, caused by a 64-bit access
709 */
710
711 mmio32.len = 4;
712 mmio32.is_write = mmio->is_write;
Andre Przywara9fedf142014-11-13 16:21:35 +0000713 mmio32.private = mmio->private;
Andre Przywara05bc8aa2014-06-05 16:07:50 +0200714
715 mmio32.phys_addr = mmio->phys_addr + 4;
716 if (mmio->is_write)
717 *(u32 *)mmio32.data = data32[1];
718 ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
719 if (!mmio->is_write)
720 data32[1] = *(u32 *)mmio32.data;
721
722 mmio32.phys_addr = mmio->phys_addr;
723 if (mmio->is_write)
724 *(u32 *)mmio32.data = data32[0];
725 ret |= range->handle_mmio(vcpu, &mmio32, offset);
726 if (!mmio->is_write)
727 data32[0] = *(u32 *)mmio32.data;
728
729 return ret;
730}
731
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500732/**
Andre Przywara96415252014-06-02 22:44:37 +0200733 * vgic_handle_mmio_range - handle an in-kernel MMIO access
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500734 * @vcpu: pointer to the vcpu performing the access
735 * @run: pointer to the kvm_run structure
736 * @mmio: pointer to the data describing the access
Andre Przywara96415252014-06-02 22:44:37 +0200737 * @ranges: array of MMIO ranges in a given region
738 * @mmio_base: base address of that region
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500739 *
Andre Przywara96415252014-06-02 22:44:37 +0200740 * returns true if the MMIO access could be performed
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500741 */
Andre Przywara83215812014-06-07 00:53:08 +0200742bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
Andre Przywara96415252014-06-02 22:44:37 +0200743 struct kvm_exit_mmio *mmio,
Andre Przywara83215812014-06-07 00:53:08 +0200744 const struct kvm_mmio_range *ranges,
Andre Przywara96415252014-06-02 22:44:37 +0200745 unsigned long mmio_base)
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500746{
Andre Przywara83215812014-06-07 00:53:08 +0200747 const struct kvm_mmio_range *range;
Marc Zyngierb47ef922013-01-21 19:36:14 -0500748 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
Marc Zyngierb47ef922013-01-21 19:36:14 -0500749 bool updated_state;
750 unsigned long offset;
751
Andre Przywara96415252014-06-02 22:44:37 +0200752 offset = mmio->phys_addr - mmio_base;
Andre Przywara83215812014-06-07 00:53:08 +0200753 range = vgic_find_range(ranges, mmio, offset);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500754 if (unlikely(!range || !range->handle_mmio)) {
755 pr_warn("Unhandled access %d %08llx %d\n",
756 mmio->is_write, mmio->phys_addr, mmio->len);
757 return false;
758 }
759
760 spin_lock(&vcpu->kvm->arch.vgic.lock);
Andre Przywara96415252014-06-02 22:44:37 +0200761 offset -= range->base;
Marc Zyngierc3c91832014-07-08 12:09:04 +0100762 if (vgic_validate_access(dist, range, offset)) {
Andre Przywara05bc8aa2014-06-05 16:07:50 +0200763 updated_state = call_range_handler(vcpu, mmio, offset, range);
Marc Zyngierc3c91832014-07-08 12:09:04 +0100764 } else {
Andre Przywara05bc8aa2014-06-05 16:07:50 +0200765 if (!mmio->is_write)
766 memset(mmio->data, 0, mmio->len);
Marc Zyngierc3c91832014-07-08 12:09:04 +0100767 updated_state = false;
768 }
Marc Zyngierb47ef922013-01-21 19:36:14 -0500769 spin_unlock(&vcpu->kvm->arch.vgic.lock);
770 kvm_prepare_mmio(run, mmio);
771 kvm_handle_mmio_return(vcpu, run);
772
Marc Zyngier5863c2c2013-01-21 19:36:15 -0500773 if (updated_state)
774 vgic_kick_vcpus(vcpu->kvm);
775
Marc Zyngierb47ef922013-01-21 19:36:14 -0500776 return true;
777}
778
Andre Przywara96415252014-06-02 22:44:37 +0200779/**
780 * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
781 * @vcpu: pointer to the vcpu performing the access
782 * @run: pointer to the kvm_run structure
783 * @mmio: pointer to the data describing the access
784 *
785 * returns true if the MMIO access has been performed in kernel space,
786 * and false if it needs to be emulated in user space.
Andre Przywarab26e5fd2014-06-02 16:19:12 +0200787 * Calls the actual handling routine for the selected VGIC model.
Andre Przywara96415252014-06-02 22:44:37 +0200788 */
789bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
790 struct kvm_exit_mmio *mmio)
791{
792 if (!irqchip_in_kernel(vcpu->kvm))
793 return false;
794
Andre Przywarab26e5fd2014-06-02 16:19:12 +0200795 /*
796 * This will currently call either vgic_v2_handle_mmio() or
797 * vgic_v3_handle_mmio(), which in turn will call
798 * vgic_handle_mmio_range() defined above.
799 */
800 return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio);
Andre Przywara96415252014-06-02 22:44:37 +0200801}
802
Marc Zyngierfb65ab62014-07-08 12:09:02 +0100803static int vgic_nr_shared_irqs(struct vgic_dist *dist)
804{
805 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
806}
807
Marc Zyngierb47ef922013-01-21 19:36:14 -0500808static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
809{
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500810 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
811 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
812 unsigned long pending_private, pending_shared;
Marc Zyngierfb65ab62014-07-08 12:09:02 +0100813 int nr_shared = vgic_nr_shared_irqs(dist);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500814 int vcpu_id;
815
816 vcpu_id = vcpu->vcpu_id;
817 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
818 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
819
Christoffer Dall227844f2014-06-09 12:27:18 +0200820 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500821 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
822 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
823
Christoffer Dall227844f2014-06-09 12:27:18 +0200824 pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500825 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
Marc Zyngierfb65ab62014-07-08 12:09:02 +0100826 bitmap_and(pend_shared, pending, enabled, nr_shared);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500827 bitmap_and(pend_shared, pend_shared,
828 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
Marc Zyngierfb65ab62014-07-08 12:09:02 +0100829 nr_shared);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500830
831 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
Marc Zyngierfb65ab62014-07-08 12:09:02 +0100832 pending_shared = find_first_bit(pend_shared, nr_shared);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500833 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
Marc Zyngierfb65ab62014-07-08 12:09:02 +0100834 pending_shared < vgic_nr_shared_irqs(dist));
Marc Zyngierb47ef922013-01-21 19:36:14 -0500835}
836
837/*
838 * Update the interrupt state and determine which CPUs have pending
839 * interrupts. Must be called with distributor lock held.
840 */
Andre Przywara83215812014-06-07 00:53:08 +0200841void vgic_update_state(struct kvm *kvm)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500842{
843 struct vgic_dist *dist = &kvm->arch.vgic;
844 struct kvm_vcpu *vcpu;
845 int c;
846
847 if (!dist->enabled) {
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100848 set_bit(0, dist->irq_pending_on_cpu);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500849 return;
850 }
851
852 kvm_for_each_vcpu(c, vcpu, kvm) {
853 if (compute_pending_for_cpu(vcpu)) {
854 pr_debug("CPU%d has pending interrupts\n", c);
Marc Zyngierc1bfb572014-07-08 12:09:01 +0100855 set_bit(c, dist->irq_pending_on_cpu);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500856 }
857 }
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500858}
Christoffer Dall330690c2013-01-21 19:36:13 -0500859
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100860static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
861{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000862 return vgic_ops->get_lr(vcpu, lr);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100863}
864
865static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
866 struct vgic_lr vlr)
867{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000868 vgic_ops->set_lr(vcpu, lr, vlr);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100869}
870
Marc Zyngier69bb2c92013-06-04 10:29:39 +0100871static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
872 struct vgic_lr vlr)
873{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000874 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
Marc Zyngier69bb2c92013-06-04 10:29:39 +0100875}
876
877static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
878{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000879 return vgic_ops->get_elrsr(vcpu);
Marc Zyngier69bb2c92013-06-04 10:29:39 +0100880}
881
Marc Zyngier8d6a0312013-06-04 10:33:43 +0100882static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
883{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000884 return vgic_ops->get_eisr(vcpu);
Marc Zyngier8d6a0312013-06-04 10:33:43 +0100885}
886
Marc Zyngier495dd852013-06-04 11:02:10 +0100887static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
888{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000889 return vgic_ops->get_interrupt_status(vcpu);
Marc Zyngier495dd852013-06-04 11:02:10 +0100890}
891
Marc Zyngier909d9b52013-06-04 11:24:17 +0100892static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
893{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000894 vgic_ops->enable_underflow(vcpu);
Marc Zyngier909d9b52013-06-04 11:24:17 +0100895}
896
897static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
898{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000899 vgic_ops->disable_underflow(vcpu);
Marc Zyngier909d9b52013-06-04 11:24:17 +0100900}
901
Andre Przywara83215812014-06-07 00:53:08 +0200902void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
Marc Zyngierbeee38b2014-02-04 17:48:10 +0000903{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000904 vgic_ops->get_vmcr(vcpu, vmcr);
Marc Zyngierbeee38b2014-02-04 17:48:10 +0000905}
906
Andre Przywara83215812014-06-07 00:53:08 +0200907void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
Marc Zyngierbeee38b2014-02-04 17:48:10 +0000908{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000909 vgic_ops->set_vmcr(vcpu, vmcr);
Marc Zyngierbeee38b2014-02-04 17:48:10 +0000910}
911
Marc Zyngierda8dafd12013-06-04 11:36:38 +0100912static inline void vgic_enable(struct kvm_vcpu *vcpu)
913{
Marc Zyngier8f186d52014-02-04 18:13:03 +0000914 vgic_ops->enable(vcpu);
Marc Zyngierda8dafd12013-06-04 11:36:38 +0100915}
916
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100917static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
918{
919 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
920 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
921
922 vlr.state = 0;
923 vgic_set_lr(vcpu, lr_nr, vlr);
924 clear_bit(lr_nr, vgic_cpu->lr_used);
925 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
926}
Marc Zyngiera1fcb442013-01-21 19:36:15 -0500927
928/*
929 * An interrupt may have been disabled after being made pending on the
930 * CPU interface (the classic case is a timer running while we're
931 * rebooting the guest - the interrupt would kick as soon as the CPU
932 * interface gets enabled, with deadly consequences).
933 *
934 * The solution is to examine already active LRs, and check the
935 * interrupt is still enabled. If not, just retire it.
936 */
937static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
938{
939 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
940 int lr;
941
Marc Zyngier8f186d52014-02-04 18:13:03 +0000942 for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100943 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
Marc Zyngiera1fcb442013-01-21 19:36:15 -0500944
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100945 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
946 vgic_retire_lr(lr, vlr.irq, vcpu);
Christoffer Dalldbf20f92014-06-09 12:55:13 +0200947 if (vgic_irq_is_queued(vcpu, vlr.irq))
948 vgic_irq_clear_queued(vcpu, vlr.irq);
Marc Zyngiera1fcb442013-01-21 19:36:15 -0500949 }
950 }
951}
952
Alex Bennée71760952015-03-13 17:02:53 +0000953static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
954 int lr_nr, struct vgic_lr vlr)
955{
956 if (vgic_dist_irq_is_pending(vcpu, irq)) {
957 vlr.state |= LR_STATE_PENDING;
958 kvm_debug("Set pending: 0x%x\n", vlr.state);
959 }
960
961 if (!vgic_irq_is_edge(vcpu, irq))
962 vlr.state |= LR_EOI_INT;
963
964 vgic_set_lr(vcpu, lr_nr, vlr);
965}
966
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500967/*
968 * Queue an interrupt to a CPU virtual interface. Return true on success,
969 * or false if it wasn't possible to queue it.
Andre Przywara1d916222014-06-07 00:53:08 +0200970 * sgi_source must be zero for any non-SGI interrupts.
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500971 */
Andre Przywara83215812014-06-07 00:53:08 +0200972bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500973{
974 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
Marc Zyngier5fb66da2014-07-08 12:09:05 +0100975 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100976 struct vgic_lr vlr;
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500977 int lr;
978
979 /* Sanitize the input... */
980 BUG_ON(sgi_source_id & ~7);
981 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
Marc Zyngier5fb66da2014-07-08 12:09:05 +0100982 BUG_ON(irq >= dist->nr_irqs);
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500983
984 kvm_debug("Queue IRQ%d\n", irq);
985
986 lr = vgic_cpu->vgic_irq_lr_map[irq];
987
988 /* Do we have an active interrupt for the same CPUID? */
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100989 if (lr != LR_EMPTY) {
990 vlr = vgic_get_lr(vcpu, lr);
991 if (vlr.source == sgi_source_id) {
992 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
993 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
Alex Bennée71760952015-03-13 17:02:53 +0000994 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100995 return true;
996 }
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500997 }
998
999 /* Try to use another LR for this interrupt */
1000 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
Marc Zyngier8f186d52014-02-04 18:13:03 +00001001 vgic->nr_lr);
1002 if (lr >= vgic->nr_lr)
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001003 return false;
1004
1005 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001006 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1007 set_bit(lr, vgic_cpu->lr_used);
1008
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001009 vlr.irq = irq;
1010 vlr.source = sgi_source_id;
Alex Bennée71760952015-03-13 17:02:53 +00001011 vlr.state = 0;
1012 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001013
1014 return true;
1015}
1016
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001017static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1018{
Christoffer Dalldbf20f92014-06-09 12:55:13 +02001019 if (!vgic_can_sample_irq(vcpu, irq))
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001020 return true; /* level interrupt, already queued */
1021
1022 if (vgic_queue_irq(vcpu, 0, irq)) {
1023 if (vgic_irq_is_edge(vcpu, irq)) {
Christoffer Dall227844f2014-06-09 12:27:18 +02001024 vgic_dist_irq_clear_pending(vcpu, irq);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001025 vgic_cpu_irq_clear(vcpu, irq);
1026 } else {
Christoffer Dalldbf20f92014-06-09 12:55:13 +02001027 vgic_irq_set_queued(vcpu, irq);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001028 }
1029
1030 return true;
1031 }
1032
1033 return false;
1034}
1035
1036/*
1037 * Fill the list registers with pending interrupts before running the
1038 * guest.
1039 */
1040static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1041{
1042 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1043 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1044 int i, vcpu_id;
1045 int overflow = 0;
1046
1047 vcpu_id = vcpu->vcpu_id;
1048
1049 /*
1050 * We may not have any pending interrupt, or the interrupts
1051 * may have been serviced from another vcpu. In all cases,
1052 * move along.
1053 */
1054 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
1055 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1056 goto epilog;
1057 }
1058
1059 /* SGIs */
1060 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
Andre Przywarab26e5fd2014-06-02 16:19:12 +02001061 if (!queue_sgi(vcpu, i))
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001062 overflow = 1;
1063 }
1064
1065 /* PPIs */
1066 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
1067 if (!vgic_queue_hwirq(vcpu, i))
1068 overflow = 1;
1069 }
1070
1071 /* SPIs */
Marc Zyngierfb65ab62014-07-08 12:09:02 +01001072 for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) {
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001073 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1074 overflow = 1;
1075 }
1076
1077epilog:
1078 if (overflow) {
Marc Zyngier909d9b52013-06-04 11:24:17 +01001079 vgic_enable_underflow(vcpu);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001080 } else {
Marc Zyngier909d9b52013-06-04 11:24:17 +01001081 vgic_disable_underflow(vcpu);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001082 /*
1083 * We're about to run this VCPU, and we've consumed
1084 * everything the distributor had in store for
1085 * us. Claim we don't have anything pending. We'll
1086 * adjust that if needed while exiting.
1087 */
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001088 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001089 }
1090}
1091
1092static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1093{
Marc Zyngier495dd852013-06-04 11:02:10 +01001094 u32 status = vgic_get_interrupt_status(vcpu);
Eric Auger649cf732015-03-04 11:14:35 +01001095 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001096 bool level_pending = false;
Eric Auger174178f2015-03-04 11:14:36 +01001097 struct kvm *kvm = vcpu->kvm;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001098
Marc Zyngier495dd852013-06-04 11:02:10 +01001099 kvm_debug("STATUS = %08x\n", status);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001100
Marc Zyngier495dd852013-06-04 11:02:10 +01001101 if (status & INT_STATUS_EOI) {
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001102 /*
1103 * Some level interrupts have been EOIed. Clear their
1104 * active bit.
1105 */
Marc Zyngier8d6a0312013-06-04 10:33:43 +01001106 u64 eisr = vgic_get_eisr(vcpu);
Christoffer Dall2df36a52014-09-28 16:04:26 +02001107 unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001108 int lr;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001109
Marc Zyngier8f186d52014-02-04 18:13:03 +00001110 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001111 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001112 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001113
Eric Auger649cf732015-03-04 11:14:35 +01001114 spin_lock(&dist->lock);
Christoffer Dalldbf20f92014-06-09 12:55:13 +02001115 vgic_irq_clear_queued(vcpu, vlr.irq);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001116 WARN_ON(vlr.state & LR_STATE_MASK);
1117 vlr.state = 0;
1118 vgic_set_lr(vcpu, lr, vlr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001119
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001120 /*
1121 * If the IRQ was EOIed it was also ACKed and we we
1122 * therefore assume we can clear the soft pending
1123 * state (should it had been set) for this interrupt.
1124 *
1125 * Note: if the IRQ soft pending state was set after
1126 * the IRQ was acked, it actually shouldn't be
1127 * cleared, but we have no way of knowing that unless
1128 * we start trapping ACKs when the soft-pending state
1129 * is set.
1130 */
1131 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1132
Eric Auger174178f2015-03-04 11:14:36 +01001133 /*
1134 * kvm_notify_acked_irq calls kvm_set_irq()
1135 * to reset the IRQ level. Need to release the
1136 * lock for kvm_set_irq to grab it.
1137 */
1138 spin_unlock(&dist->lock);
1139
1140 kvm_notify_acked_irq(kvm, 0,
1141 vlr.irq - VGIC_NR_PRIVATE_IRQS);
1142 spin_lock(&dist->lock);
1143
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001144 /* Any additional pending interrupt? */
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001145 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001146 vgic_cpu_irq_set(vcpu, vlr.irq);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001147 level_pending = true;
1148 } else {
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001149 vgic_dist_irq_clear_pending(vcpu, vlr.irq);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001150 vgic_cpu_irq_clear(vcpu, vlr.irq);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001151 }
Marc Zyngier75da01e2013-01-31 11:25:52 +00001152
Eric Auger649cf732015-03-04 11:14:35 +01001153 spin_unlock(&dist->lock);
1154
Marc Zyngier75da01e2013-01-31 11:25:52 +00001155 /*
1156 * Despite being EOIed, the LR may not have
1157 * been marked as empty.
1158 */
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001159 vgic_sync_lr_elrsr(vcpu, lr, vlr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001160 }
1161 }
1162
Marc Zyngier495dd852013-06-04 11:02:10 +01001163 if (status & INT_STATUS_UNDERFLOW)
Marc Zyngier909d9b52013-06-04 11:24:17 +01001164 vgic_disable_underflow(vcpu);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001165
1166 return level_pending;
1167}
1168
Eric Auger649cf732015-03-04 11:14:35 +01001169/* Sync back the VGIC state after a guest run */
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001170static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1171{
1172 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1173 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001174 u64 elrsr;
1175 unsigned long *elrsr_ptr;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001176 int lr, pending;
1177 bool level_pending;
1178
1179 level_pending = vgic_process_maintenance(vcpu);
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001180 elrsr = vgic_get_elrsr(vcpu);
Christoffer Dall2df36a52014-09-28 16:04:26 +02001181 elrsr_ptr = u64_to_bitmask(&elrsr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001182
1183 /* Clear mappings for empty LRs */
Marc Zyngier8f186d52014-02-04 18:13:03 +00001184 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001185 struct vgic_lr vlr;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001186
1187 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1188 continue;
1189
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001190 vlr = vgic_get_lr(vcpu, lr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001191
Marc Zyngier5fb66da2014-07-08 12:09:05 +01001192 BUG_ON(vlr.irq >= dist->nr_irqs);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001193 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001194 }
1195
1196 /* Check if we still have something up our sleeve... */
Marc Zyngier8f186d52014-02-04 18:13:03 +00001197 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1198 if (level_pending || pending < vgic->nr_lr)
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001199 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001200}
1201
1202void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1203{
1204 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1205
1206 if (!irqchip_in_kernel(vcpu->kvm))
1207 return;
1208
1209 spin_lock(&dist->lock);
1210 __kvm_vgic_flush_hwstate(vcpu);
1211 spin_unlock(&dist->lock);
1212}
1213
1214void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1215{
1216 if (!irqchip_in_kernel(vcpu->kvm))
1217 return;
1218
1219 __kvm_vgic_sync_hwstate(vcpu);
1220}
1221
1222int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1223{
1224 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1225
1226 if (!irqchip_in_kernel(vcpu->kvm))
1227 return 0;
1228
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001229 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001230}
1231
Andre Przywara83215812014-06-07 00:53:08 +02001232void vgic_kick_vcpus(struct kvm *kvm)
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001233{
1234 struct kvm_vcpu *vcpu;
1235 int c;
1236
1237 /*
1238 * We've injected an interrupt, time to find out who deserves
1239 * a good kick...
1240 */
1241 kvm_for_each_vcpu(c, vcpu, kvm) {
1242 if (kvm_vgic_vcpu_pending_irq(vcpu))
1243 kvm_vcpu_kick(vcpu);
1244 }
1245}
1246
1247static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1248{
Christoffer Dall227844f2014-06-09 12:27:18 +02001249 int edge_triggered = vgic_irq_is_edge(vcpu, irq);
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001250
1251 /*
1252 * Only inject an interrupt if:
1253 * - edge triggered and we have a rising edge
1254 * - level triggered and we change level
1255 */
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001256 if (edge_triggered) {
1257 int state = vgic_dist_irq_is_pending(vcpu, irq);
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001258 return level > state;
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001259 } else {
1260 int state = vgic_dist_irq_get_level(vcpu, irq);
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001261 return level != state;
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001262 }
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001263}
1264
Shannon Zhao016ed392014-11-19 10:11:25 +00001265static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001266 unsigned int irq_num, bool level)
1267{
1268 struct vgic_dist *dist = &kvm->arch.vgic;
1269 struct kvm_vcpu *vcpu;
Christoffer Dall227844f2014-06-09 12:27:18 +02001270 int edge_triggered, level_triggered;
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001271 int enabled;
Andre Przywaraa0675c22014-06-07 00:54:51 +02001272 bool ret = true, can_inject = true;
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001273
1274 spin_lock(&dist->lock);
1275
1276 vcpu = kvm_get_vcpu(kvm, cpuid);
Christoffer Dall227844f2014-06-09 12:27:18 +02001277 edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1278 level_triggered = !edge_triggered;
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001279
1280 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1281 ret = false;
1282 goto out;
1283 }
1284
1285 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1286 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
Andre Przywaraa0675c22014-06-07 00:54:51 +02001287 if (cpuid == VCPU_NOT_ALLOCATED) {
1288 /* Pretend we use CPU0, and prevent injection */
1289 cpuid = 0;
1290 can_inject = false;
1291 }
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001292 vcpu = kvm_get_vcpu(kvm, cpuid);
1293 }
1294
1295 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1296
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001297 if (level) {
1298 if (level_triggered)
1299 vgic_dist_irq_set_level(vcpu, irq_num);
Christoffer Dall227844f2014-06-09 12:27:18 +02001300 vgic_dist_irq_set_pending(vcpu, irq_num);
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001301 } else {
1302 if (level_triggered) {
1303 vgic_dist_irq_clear_level(vcpu, irq_num);
1304 if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
1305 vgic_dist_irq_clear_pending(vcpu, irq_num);
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001306 }
wanghaibin7d39f9e32014-11-17 09:27:37 +00001307
1308 ret = false;
1309 goto out;
Christoffer Dallfaa1b462014-06-14 21:54:51 +02001310 }
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001311
1312 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1313
Andre Przywaraa0675c22014-06-07 00:54:51 +02001314 if (!enabled || !can_inject) {
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001315 ret = false;
1316 goto out;
1317 }
1318
Christoffer Dalldbf20f92014-06-09 12:55:13 +02001319 if (!vgic_can_sample_irq(vcpu, irq_num)) {
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001320 /*
1321 * Level interrupt in progress, will be picked up
1322 * when EOId.
1323 */
1324 ret = false;
1325 goto out;
1326 }
1327
1328 if (level) {
1329 vgic_cpu_irq_set(vcpu, irq_num);
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001330 set_bit(cpuid, dist->irq_pending_on_cpu);
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001331 }
1332
1333out:
1334 spin_unlock(&dist->lock);
1335
Shannon Zhao016ed392014-11-19 10:11:25 +00001336 return ret ? cpuid : -EINVAL;
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001337}
1338
1339/**
1340 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1341 * @kvm: The VM structure pointer
1342 * @cpuid: The CPU for PPIs
1343 * @irq_num: The IRQ number that is assigned to the device
1344 * @level: Edge-triggered: true: to trigger the interrupt
1345 * false: to ignore the call
1346 * Level-sensitive true: activates an interrupt
1347 * false: deactivates an interrupt
1348 *
1349 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1350 * level-sensitive interrupts. You can think of the level parameter as 1
1351 * being HIGH and 0 being LOW and all devices being active-HIGH.
1352 */
1353int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1354 bool level)
1355{
Christoffer Dallca7d9c82014-12-09 14:35:33 +01001356 int ret = 0;
Shannon Zhao016ed392014-11-19 10:11:25 +00001357 int vcpu_id;
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001358
Christoffer Dallca7d9c82014-12-09 14:35:33 +01001359 if (unlikely(!vgic_initialized(kvm))) {
Andre Przywara598921362014-06-03 09:33:10 +02001360 /*
1361 * We only provide the automatic initialization of the VGIC
1362 * for the legacy case of a GICv2. Any other type must
1363 * be explicitly initialized once setup with the respective
1364 * KVM device call.
1365 */
1366 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) {
1367 ret = -EBUSY;
1368 goto out;
1369 }
Christoffer Dallca7d9c82014-12-09 14:35:33 +01001370 mutex_lock(&kvm->lock);
1371 ret = vgic_init(kvm);
1372 mutex_unlock(&kvm->lock);
1373
1374 if (ret)
1375 goto out;
Shannon Zhao016ed392014-11-19 10:11:25 +00001376 }
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001377
Christoffer Dallca7d9c82014-12-09 14:35:33 +01001378 vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
1379 if (vcpu_id >= 0) {
1380 /* kick the specified vcpu */
1381 kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
1382 }
1383
1384out:
1385 return ret;
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001386}
1387
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001388static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1389{
1390 /*
1391 * We cannot rely on the vgic maintenance interrupt to be
1392 * delivered synchronously. This means we can only use it to
1393 * exit the VM, and we perform the handling of EOIed
1394 * interrupts on the exit path (see vgic_process_maintenance).
1395 */
1396 return IRQ_HANDLED;
1397}
1398
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001399void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1400{
1401 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1402
1403 kfree(vgic_cpu->pending_shared);
1404 kfree(vgic_cpu->vgic_irq_lr_map);
1405 vgic_cpu->pending_shared = NULL;
1406 vgic_cpu->vgic_irq_lr_map = NULL;
1407}
1408
1409static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1410{
1411 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1412
1413 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1414 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
Peter Maydell6d3cfbe2014-12-04 15:02:24 +00001415 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001416
1417 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
1418 kvm_vgic_vcpu_destroy(vcpu);
1419 return -ENOMEM;
1420 }
1421
Peter Maydell6d3cfbe2014-12-04 15:02:24 +00001422 memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001423
1424 /*
Marc Zyngierca85f622013-06-18 19:17:28 +01001425 * Store the number of LRs per vcpu, so we don't have to go
1426 * all the way to the distributor structure to find out. Only
1427 * assembly code should use this one.
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001428 */
Marc Zyngier8f186d52014-02-04 18:13:03 +00001429 vgic_cpu->nr_lr = vgic->nr_lr;
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001430
Peter Maydell6d3cfbe2014-12-04 15:02:24 +00001431 return 0;
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001432}
1433
Andre Przywara3caa2d82014-06-02 16:26:01 +02001434/**
1435 * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
1436 *
1437 * The host's GIC naturally limits the maximum amount of VCPUs a guest
1438 * can use.
1439 */
1440int kvm_vgic_get_max_vcpus(void)
1441{
1442 return vgic->max_gic_vcpus;
1443}
1444
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001445void kvm_vgic_destroy(struct kvm *kvm)
1446{
1447 struct vgic_dist *dist = &kvm->arch.vgic;
1448 struct kvm_vcpu *vcpu;
1449 int i;
1450
1451 kvm_for_each_vcpu(i, vcpu, kvm)
1452 kvm_vgic_vcpu_destroy(vcpu);
1453
1454 vgic_free_bitmap(&dist->irq_enabled);
1455 vgic_free_bitmap(&dist->irq_level);
1456 vgic_free_bitmap(&dist->irq_pending);
1457 vgic_free_bitmap(&dist->irq_soft_pend);
1458 vgic_free_bitmap(&dist->irq_queued);
1459 vgic_free_bitmap(&dist->irq_cfg);
1460 vgic_free_bytemap(&dist->irq_priority);
1461 if (dist->irq_spi_target) {
1462 for (i = 0; i < dist->nr_cpus; i++)
1463 vgic_free_bitmap(&dist->irq_spi_target[i]);
1464 }
1465 kfree(dist->irq_sgi_sources);
1466 kfree(dist->irq_spi_cpu);
Andre Przywaraa0675c22014-06-07 00:54:51 +02001467 kfree(dist->irq_spi_mpidr);
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001468 kfree(dist->irq_spi_target);
1469 kfree(dist->irq_pending_on_cpu);
1470 dist->irq_sgi_sources = NULL;
1471 dist->irq_spi_cpu = NULL;
1472 dist->irq_spi_target = NULL;
1473 dist->irq_pending_on_cpu = NULL;
Christoffer Dall1f57be22014-12-09 14:30:36 +01001474 dist->nr_cpus = 0;
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001475}
1476
1477/*
1478 * Allocate and initialize the various data structures. Must be called
1479 * with kvm->lock held!
1480 */
Andre Przywara83215812014-06-07 00:53:08 +02001481int vgic_init(struct kvm *kvm)
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001482{
1483 struct vgic_dist *dist = &kvm->arch.vgic;
1484 struct kvm_vcpu *vcpu;
1485 int nr_cpus, nr_irqs;
Peter Maydell6d3cfbe2014-12-04 15:02:24 +00001486 int ret, i, vcpu_id;
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001487
Christoffer Dall1f57be22014-12-09 14:30:36 +01001488 if (vgic_initialized(kvm))
Marc Zyngier4956f2b2014-07-08 12:09:06 +01001489 return 0;
Marc Zyngier5fb66da2014-07-08 12:09:05 +01001490
Marc Zyngier4956f2b2014-07-08 12:09:06 +01001491 nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
1492 if (!nr_cpus) /* No vcpus? Can't be good... */
Eric Auger66b030e2014-12-15 18:43:32 +01001493 return -ENODEV;
Marc Zyngier4956f2b2014-07-08 12:09:06 +01001494
1495 /*
1496 * If nobody configured the number of interrupts, use the
1497 * legacy one.
1498 */
Marc Zyngier5fb66da2014-07-08 12:09:05 +01001499 if (!dist->nr_irqs)
1500 dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
1501
1502 nr_irqs = dist->nr_irqs;
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001503
1504 ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
1505 ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
1506 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1507 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1508 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1509 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1510 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1511
1512 if (ret)
1513 goto out;
1514
1515 dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
1516 dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
1517 dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
1518 GFP_KERNEL);
1519 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1520 GFP_KERNEL);
1521 if (!dist->irq_sgi_sources ||
1522 !dist->irq_spi_cpu ||
1523 !dist->irq_spi_target ||
1524 !dist->irq_pending_on_cpu) {
1525 ret = -ENOMEM;
1526 goto out;
1527 }
1528
1529 for (i = 0; i < nr_cpus; i++)
1530 ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
1531 nr_cpus, nr_irqs);
1532
1533 if (ret)
1534 goto out;
1535
Andre Przywarab26e5fd2014-06-02 16:19:12 +02001536 ret = kvm->arch.vgic.vm_ops.init_model(kvm);
1537 if (ret)
1538 goto out;
Peter Maydell6d3cfbe2014-12-04 15:02:24 +00001539
1540 kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001541 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
1542 if (ret) {
1543 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1544 break;
1545 }
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001546
Peter Maydell6d3cfbe2014-12-04 15:02:24 +00001547 for (i = 0; i < dist->nr_irqs; i++) {
1548 if (i < VGIC_NR_PPIS)
1549 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1550 vcpu->vcpu_id, i, 1);
1551 if (i < VGIC_NR_PRIVATE_IRQS)
1552 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1553 vcpu->vcpu_id, i,
1554 VGIC_CFG_EDGE);
1555 }
1556
1557 vgic_enable(vcpu);
1558 }
Marc Zyngier4956f2b2014-07-08 12:09:06 +01001559
Marc Zyngierc1bfb572014-07-08 12:09:01 +01001560out:
1561 if (ret)
1562 kvm_vgic_destroy(kvm);
1563
1564 return ret;
1565}
1566
Andre Przywarab26e5fd2014-06-02 16:19:12 +02001567static int init_vgic_model(struct kvm *kvm, int type)
1568{
1569 switch (type) {
1570 case KVM_DEV_TYPE_ARM_VGIC_V2:
1571 vgic_v2_init_emulation(kvm);
1572 break;
Andre Przywarab5d84ff2014-06-03 10:26:03 +02001573#ifdef CONFIG_ARM_GIC_V3
1574 case KVM_DEV_TYPE_ARM_VGIC_V3:
1575 vgic_v3_init_emulation(kvm);
1576 break;
1577#endif
Andre Przywarab26e5fd2014-06-02 16:19:12 +02001578 default:
1579 return -ENODEV;
1580 }
1581
Andre Przywara3caa2d82014-06-02 16:26:01 +02001582 if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus)
1583 return -E2BIG;
1584
Andre Przywarab26e5fd2014-06-02 16:19:12 +02001585 return 0;
1586}
1587
Andre Przywara598921362014-06-03 09:33:10 +02001588int kvm_vgic_create(struct kvm *kvm, u32 type)
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001589{
Christoffer Dall6b50f542014-11-06 11:47:39 +00001590 int i, vcpu_lock_idx = -1, ret;
Christoffer Dall73306722013-10-25 17:29:18 +01001591 struct kvm_vcpu *vcpu;
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001592
1593 mutex_lock(&kvm->lock);
1594
Andre Przywara4ce7ebd2014-10-26 23:18:14 +00001595 if (irqchip_in_kernel(kvm)) {
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001596 ret = -EEXIST;
1597 goto out;
1598 }
1599
Christoffer Dall73306722013-10-25 17:29:18 +01001600 /*
Andre Przywarab5d84ff2014-06-03 10:26:03 +02001601 * This function is also called by the KVM_CREATE_IRQCHIP handler,
1602 * which had no chance yet to check the availability of the GICv2
1603 * emulation. So check this here again. KVM_CREATE_DEVICE does
1604 * the proper checks already.
1605 */
1606 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2)
1607 return -ENODEV;
1608
1609 /*
Christoffer Dall73306722013-10-25 17:29:18 +01001610 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1611 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1612 * that no other VCPUs are run while we create the vgic.
1613 */
Christoffer Dall6b50f542014-11-06 11:47:39 +00001614 ret = -EBUSY;
Christoffer Dall73306722013-10-25 17:29:18 +01001615 kvm_for_each_vcpu(i, vcpu, kvm) {
1616 if (!mutex_trylock(&vcpu->mutex))
1617 goto out_unlock;
1618 vcpu_lock_idx = i;
1619 }
1620
1621 kvm_for_each_vcpu(i, vcpu, kvm) {
Christoffer Dall6b50f542014-11-06 11:47:39 +00001622 if (vcpu->arch.has_run_once)
Christoffer Dall73306722013-10-25 17:29:18 +01001623 goto out_unlock;
Christoffer Dall73306722013-10-25 17:29:18 +01001624 }
Christoffer Dall6b50f542014-11-06 11:47:39 +00001625 ret = 0;
Christoffer Dall73306722013-10-25 17:29:18 +01001626
Andre Przywarab26e5fd2014-06-02 16:19:12 +02001627 ret = init_vgic_model(kvm, type);
1628 if (ret)
1629 goto out_unlock;
1630
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001631 spin_lock_init(&kvm->arch.vgic.lock);
Marc Zyngierf982cf42014-05-15 10:03:25 +01001632 kvm->arch.vgic.in_kernel = true;
Andre Przywara598921362014-06-03 09:33:10 +02001633 kvm->arch.vgic.vgic_model = type;
Marc Zyngier8f186d52014-02-04 18:13:03 +00001634 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001635 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1636 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
Andre Przywaraa0675c22014-06-07 00:54:51 +02001637 kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001638
Christoffer Dall73306722013-10-25 17:29:18 +01001639out_unlock:
1640 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1641 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1642 mutex_unlock(&vcpu->mutex);
1643 }
1644
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001645out:
1646 mutex_unlock(&kvm->lock);
1647 return ret;
1648}
1649
Will Deacon1fa451b2014-08-26 15:13:24 +01001650static int vgic_ioaddr_overlap(struct kvm *kvm)
Christoffer Dall330690c2013-01-21 19:36:13 -05001651{
1652 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1653 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1654
1655 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1656 return 0;
1657 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1658 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1659 return -EBUSY;
1660 return 0;
1661}
1662
1663static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1664 phys_addr_t addr, phys_addr_t size)
1665{
1666 int ret;
1667
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001668 if (addr & ~KVM_PHYS_MASK)
1669 return -E2BIG;
1670
1671 if (addr & (SZ_4K - 1))
1672 return -EINVAL;
1673
Christoffer Dall330690c2013-01-21 19:36:13 -05001674 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
1675 return -EEXIST;
1676 if (addr + size < addr)
1677 return -EINVAL;
1678
Haibin Wang30c21172014-04-29 14:49:17 +08001679 *ioaddr = addr;
Christoffer Dall330690c2013-01-21 19:36:13 -05001680 ret = vgic_ioaddr_overlap(kvm);
1681 if (ret)
Haibin Wang30c21172014-04-29 14:49:17 +08001682 *ioaddr = VGIC_ADDR_UNDEF;
1683
Christoffer Dall330690c2013-01-21 19:36:13 -05001684 return ret;
1685}
1686
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001687/**
1688 * kvm_vgic_addr - set or get vgic VM base addresses
1689 * @kvm: pointer to the vm struct
Andre Przywaraac3d3732014-06-03 10:26:30 +02001690 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001691 * @addr: pointer to address value
1692 * @write: if true set the address in the VM address space, if false read the
1693 * address
1694 *
1695 * Set or get the vgic base addresses for the distributor and the virtual CPU
1696 * interface in the VM physical address space. These addresses are properties
1697 * of the emulated core/SoC and therefore user space initially knows this
1698 * information.
1699 */
1700int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
Christoffer Dall330690c2013-01-21 19:36:13 -05001701{
1702 int r = 0;
1703 struct vgic_dist *vgic = &kvm->arch.vgic;
Andre Przywaraac3d3732014-06-03 10:26:30 +02001704 int type_needed;
1705 phys_addr_t *addr_ptr, block_size;
Andre Przywara4fa96afd2015-01-13 12:02:13 +00001706 phys_addr_t alignment;
Christoffer Dall330690c2013-01-21 19:36:13 -05001707
Christoffer Dall330690c2013-01-21 19:36:13 -05001708 mutex_lock(&kvm->lock);
1709 switch (type) {
1710 case KVM_VGIC_V2_ADDR_TYPE_DIST:
Andre Przywaraac3d3732014-06-03 10:26:30 +02001711 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
1712 addr_ptr = &vgic->vgic_dist_base;
1713 block_size = KVM_VGIC_V2_DIST_SIZE;
Andre Przywara4fa96afd2015-01-13 12:02:13 +00001714 alignment = SZ_4K;
Christoffer Dall330690c2013-01-21 19:36:13 -05001715 break;
1716 case KVM_VGIC_V2_ADDR_TYPE_CPU:
Andre Przywaraac3d3732014-06-03 10:26:30 +02001717 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
1718 addr_ptr = &vgic->vgic_cpu_base;
1719 block_size = KVM_VGIC_V2_CPU_SIZE;
Andre Przywara4fa96afd2015-01-13 12:02:13 +00001720 alignment = SZ_4K;
Christoffer Dall330690c2013-01-21 19:36:13 -05001721 break;
Andre Przywaraac3d3732014-06-03 10:26:30 +02001722#ifdef CONFIG_ARM_GIC_V3
1723 case KVM_VGIC_V3_ADDR_TYPE_DIST:
1724 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
1725 addr_ptr = &vgic->vgic_dist_base;
1726 block_size = KVM_VGIC_V3_DIST_SIZE;
Andre Przywara4fa96afd2015-01-13 12:02:13 +00001727 alignment = SZ_64K;
Andre Przywaraac3d3732014-06-03 10:26:30 +02001728 break;
1729 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
1730 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
1731 addr_ptr = &vgic->vgic_redist_base;
1732 block_size = KVM_VGIC_V3_REDIST_SIZE;
Andre Przywara4fa96afd2015-01-13 12:02:13 +00001733 alignment = SZ_64K;
Andre Przywaraac3d3732014-06-03 10:26:30 +02001734 break;
1735#endif
Christoffer Dall330690c2013-01-21 19:36:13 -05001736 default:
1737 r = -ENODEV;
Andre Przywaraac3d3732014-06-03 10:26:30 +02001738 goto out;
Christoffer Dall330690c2013-01-21 19:36:13 -05001739 }
1740
Andre Przywaraac3d3732014-06-03 10:26:30 +02001741 if (vgic->vgic_model != type_needed) {
1742 r = -ENODEV;
1743 goto out;
1744 }
1745
Andre Przywara4fa96afd2015-01-13 12:02:13 +00001746 if (write) {
1747 if (!IS_ALIGNED(*addr, alignment))
1748 r = -EINVAL;
1749 else
1750 r = vgic_ioaddr_assign(kvm, addr_ptr, *addr,
1751 block_size);
1752 } else {
Andre Przywaraac3d3732014-06-03 10:26:30 +02001753 *addr = *addr_ptr;
Andre Przywara4fa96afd2015-01-13 12:02:13 +00001754 }
Andre Przywaraac3d3732014-06-03 10:26:30 +02001755
1756out:
Christoffer Dall330690c2013-01-21 19:36:13 -05001757 mutex_unlock(&kvm->lock);
1758 return r;
1759}
Christoffer Dall73306722013-10-25 17:29:18 +01001760
Andre Przywara83215812014-06-07 00:53:08 +02001761int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
Christoffer Dall73306722013-10-25 17:29:18 +01001762{
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001763 int r;
1764
1765 switch (attr->group) {
1766 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1767 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1768 u64 addr;
1769 unsigned long type = (unsigned long)attr->attr;
1770
1771 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1772 return -EFAULT;
1773
1774 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
1775 return (r == -ENODEV) ? -ENXIO : r;
1776 }
Marc Zyngiera98f26f2014-07-08 12:09:07 +01001777 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
1778 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
1779 u32 val;
1780 int ret = 0;
1781
1782 if (get_user(val, uaddr))
1783 return -EFAULT;
1784
1785 /*
1786 * We require:
1787 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
1788 * - at most 1024 interrupts
1789 * - a multiple of 32 interrupts
1790 */
1791 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
1792 val > VGIC_MAX_IRQS ||
1793 (val & 31))
1794 return -EINVAL;
1795
1796 mutex_lock(&dev->kvm->lock);
1797
Christoffer Dallc52edf52014-12-09 14:28:09 +01001798 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
Marc Zyngiera98f26f2014-07-08 12:09:07 +01001799 ret = -EBUSY;
1800 else
1801 dev->kvm->arch.vgic.nr_irqs = val;
1802
1803 mutex_unlock(&dev->kvm->lock);
1804
1805 return ret;
1806 }
Eric Auger065c0032014-12-15 18:43:33 +01001807 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
1808 switch (attr->attr) {
1809 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1810 r = vgic_init(dev->kvm);
1811 return r;
1812 }
1813 break;
1814 }
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001815 }
1816
Christoffer Dall73306722013-10-25 17:29:18 +01001817 return -ENXIO;
1818}
1819
Andre Przywara83215812014-06-07 00:53:08 +02001820int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
Christoffer Dall73306722013-10-25 17:29:18 +01001821{
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001822 int r = -ENXIO;
1823
1824 switch (attr->group) {
1825 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1826 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1827 u64 addr;
1828 unsigned long type = (unsigned long)attr->attr;
1829
1830 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
1831 if (r)
1832 return (r == -ENODEV) ? -ENXIO : r;
1833
1834 if (copy_to_user(uaddr, &addr, sizeof(addr)))
1835 return -EFAULT;
Christoffer Dallc07a0192013-10-25 21:17:31 +01001836 break;
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001837 }
Marc Zyngiera98f26f2014-07-08 12:09:07 +01001838 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
1839 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
Andre Przywarab60da142014-08-21 11:08:27 +01001840
Marc Zyngiera98f26f2014-07-08 12:09:07 +01001841 r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
1842 break;
1843 }
Christoffer Dallc07a0192013-10-25 21:17:31 +01001844
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001845 }
1846
1847 return r;
Christoffer Dall73306722013-10-25 17:29:18 +01001848}
1849
Andre Przywara83215812014-06-07 00:53:08 +02001850int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset)
Christoffer Dallc07a0192013-10-25 21:17:31 +01001851{
1852 struct kvm_exit_mmio dev_attr_mmio;
1853
1854 dev_attr_mmio.len = 4;
Andre Przywara83215812014-06-07 00:53:08 +02001855 if (vgic_find_range(ranges, &dev_attr_mmio, offset))
Christoffer Dallc07a0192013-10-25 21:17:31 +01001856 return 0;
1857 else
1858 return -ENXIO;
1859}
1860
Will Deaconc06a8412014-09-02 10:27:34 +01001861static void vgic_init_maintenance_interrupt(void *info)
1862{
1863 enable_percpu_irq(vgic->maint_irq, 0);
1864}
1865
1866static int vgic_cpu_notify(struct notifier_block *self,
1867 unsigned long action, void *cpu)
1868{
1869 switch (action) {
1870 case CPU_STARTING:
1871 case CPU_STARTING_FROZEN:
1872 vgic_init_maintenance_interrupt(NULL);
1873 break;
1874 case CPU_DYING:
1875 case CPU_DYING_FROZEN:
1876 disable_percpu_irq(vgic->maint_irq);
1877 break;
1878 }
1879
1880 return NOTIFY_OK;
1881}
1882
1883static struct notifier_block vgic_cpu_nb = {
1884 .notifier_call = vgic_cpu_notify,
1885};
1886
1887static const struct of_device_id vgic_ids[] = {
Mark Rutland0f3724752015-03-05 14:47:44 +00001888 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
1889 { .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, },
1890 { .compatible = "arm,gic-400", .data = vgic_v2_probe, },
1891 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
Will Deaconc06a8412014-09-02 10:27:34 +01001892 {},
1893};
1894
1895int kvm_vgic_hyp_init(void)
1896{
1897 const struct of_device_id *matched_id;
Christoffer Dalla875daf2014-09-18 18:15:32 -07001898 const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
1899 const struct vgic_params **);
Will Deaconc06a8412014-09-02 10:27:34 +01001900 struct device_node *vgic_node;
1901 int ret;
1902
1903 vgic_node = of_find_matching_node_and_match(NULL,
1904 vgic_ids, &matched_id);
1905 if (!vgic_node) {
1906 kvm_err("error: no compatible GIC node found\n");
1907 return -ENODEV;
1908 }
1909
1910 vgic_probe = matched_id->data;
1911 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
1912 if (ret)
1913 return ret;
1914
1915 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
1916 "vgic", kvm_get_running_vcpus());
1917 if (ret) {
1918 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
1919 return ret;
1920 }
1921
1922 ret = __register_cpu_notifier(&vgic_cpu_nb);
1923 if (ret) {
1924 kvm_err("Cannot register vgic CPU notifier\n");
1925 goto out_free_irq;
1926 }
1927
1928 /* Callback into for arch code for setup */
1929 vgic_arch_setup(vgic);
1930
1931 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1932
Andre Przywaraea2f83a2014-10-26 23:17:00 +00001933 return 0;
Will Deaconc06a8412014-09-02 10:27:34 +01001934
1935out_free_irq:
1936 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
1937 return ret;
1938}
Eric Auger174178f2015-03-04 11:14:36 +01001939
1940int kvm_irq_map_gsi(struct kvm *kvm,
1941 struct kvm_kernel_irq_routing_entry *entries,
1942 int gsi)
1943{
1944 return gsi;
1945}
1946
1947int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1948{
1949 return pin;
1950}
1951
1952int kvm_set_irq(struct kvm *kvm, int irq_source_id,
1953 u32 irq, int level, bool line_status)
1954{
1955 unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
1956
1957 trace_kvm_set_irq(irq, level, irq_source_id);
1958
1959 BUG_ON(!vgic_initialized(kvm));
1960
1961 if (spi > kvm->arch.vgic.nr_irqs)
1962 return -EINVAL;
1963 return kvm_vgic_inject_irq(kvm, 0, spi, level);
1964
1965}
1966
1967/* MSI not implemented yet */
1968int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
1969 struct kvm *kvm, int irq_source_id,
1970 int level, bool line_status)
1971{
1972 return 0;
1973}