blob: dbe99d635c80435ffd938999c4246ce6f45307c7 [file] [log] [blame]
Marc Zyngier4493b1c2016-04-26 11:06:12 +01001/*
2 * VGIC MMIO handling functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/bitops.h>
15#include <linux/bsearch.h>
16#include <linux/kvm.h>
17#include <linux/kvm_host.h>
18#include <kvm/iodev.h>
Christoffer Dalldf635c52017-09-01 16:25:12 +020019#include <kvm/arm_arch_timer.h>
Marc Zyngier4493b1c2016-04-26 11:06:12 +010020#include <kvm/arm_vgic.h>
21
22#include "vgic.h"
23#include "vgic-mmio.h"
24
25unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
26 gpa_t addr, unsigned int len)
27{
28 return 0;
29}
30
31unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
32 gpa_t addr, unsigned int len)
33{
34 return -1UL;
35}
36
37void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
38 unsigned int len, unsigned long val)
39{
40 /* Ignore */
41}
42
Andre Przywarafd122e62015-12-01 14:33:05 +000043/*
44 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
45 * of the enabled bit, so there is only one function for both here.
46 */
47unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
48 gpa_t addr, unsigned int len)
49{
50 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
51 u32 value = 0;
52 int i;
53
54 /* Loop over all IRQs affected by this read */
55 for (i = 0; i < len * 8; i++) {
56 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
57
58 if (irq->enabled)
59 value |= (1U << i);
Andre Przywara5dd4b922016-07-15 12:43:27 +010060
61 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +000062 }
63
64 return value;
65}
66
67void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
68 gpa_t addr, unsigned int len,
69 unsigned long val)
70{
71 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
72 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +020073 unsigned long flags;
Andre Przywarafd122e62015-12-01 14:33:05 +000074
75 for_each_set_bit(i, &val, len * 8) {
76 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
77
Christoffer Dall006df0f2016-10-16 22:19:11 +020078 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywarafd122e62015-12-01 14:33:05 +000079 irq->enabled = true;
Christoffer Dall006df0f2016-10-16 22:19:11 +020080 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +010081
82 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +000083 }
84}
85
86void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
87 gpa_t addr, unsigned int len,
88 unsigned long val)
89{
90 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
91 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +020092 unsigned long flags;
Andre Przywarafd122e62015-12-01 14:33:05 +000093
94 for_each_set_bit(i, &val, len * 8) {
95 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
96
Christoffer Dall006df0f2016-10-16 22:19:11 +020097 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywarafd122e62015-12-01 14:33:05 +000098
99 irq->enabled = false;
100
Christoffer Dall006df0f2016-10-16 22:19:11 +0200101 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100102 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +0000103 }
104}
105
Andre Przywara96b29802015-12-01 14:33:41 +0000106unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
107 gpa_t addr, unsigned int len)
108{
109 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
110 u32 value = 0;
111 int i;
112
113 /* Loop over all IRQs affected by this read */
114 for (i = 0; i < len * 8; i++) {
115 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
Andre Przywara62b06f82018-03-06 09:21:06 +0000116 unsigned long flags;
Andre Przywara96b29802015-12-01 14:33:41 +0000117
Andre Przywara62b06f82018-03-06 09:21:06 +0000118 spin_lock_irqsave(&irq->irq_lock, flags);
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100119 if (irq_is_pending(irq))
Andre Przywara96b29802015-12-01 14:33:41 +0000120 value |= (1U << i);
Andre Przywara62b06f82018-03-06 09:21:06 +0000121 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100122
123 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000124 }
125
126 return value;
127}
128
Christoffer Dall6c1b7522017-09-14 11:08:45 -0700129/*
130 * This function will return the VCPU that performed the MMIO access and
131 * trapped from within the VM, and will return NULL if this is a userspace
132 * access.
133 *
134 * We can disable preemption locally around accessing the per-CPU variable,
135 * and use the resolved vcpu pointer after enabling preemption again, because
136 * even if the current thread is migrated to another CPU, reading the per-CPU
137 * value later will give us the same value as we update the per-CPU variable
138 * in the preempt notifier handlers.
139 */
140static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
141{
142 struct kvm_vcpu *vcpu;
143
144 preempt_disable();
145 vcpu = kvm_arm_get_running_vcpu();
146 preempt_enable();
147 return vcpu;
148}
149
Christoffer Dalldf635c52017-09-01 16:25:12 +0200150/* Must be called with irq->irq_lock held */
151static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
152 bool is_uaccess)
153{
154 if (is_uaccess)
155 return;
156
157 irq->pending_latch = true;
158 vgic_irq_set_phys_active(irq, true);
159}
160
Andre Przywara96b29802015-12-01 14:33:41 +0000161void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
162 gpa_t addr, unsigned int len,
163 unsigned long val)
164{
Christoffer Dalldf635c52017-09-01 16:25:12 +0200165 bool is_uaccess = !vgic_get_mmio_requester_vcpu();
Andre Przywara96b29802015-12-01 14:33:41 +0000166 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
167 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200168 unsigned long flags;
Andre Przywara96b29802015-12-01 14:33:41 +0000169
170 for_each_set_bit(i, &val, len * 8) {
171 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
172
Christoffer Dall006df0f2016-10-16 22:19:11 +0200173 spin_lock_irqsave(&irq->irq_lock, flags);
Christoffer Dalldf635c52017-09-01 16:25:12 +0200174 if (irq->hw)
175 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
176 else
177 irq->pending_latch = true;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200178 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100179 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000180 }
181}
182
Christoffer Dalldf635c52017-09-01 16:25:12 +0200183/* Must be called with irq->irq_lock held */
184static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
185 bool is_uaccess)
186{
187 if (is_uaccess)
188 return;
189
190 irq->pending_latch = false;
191
192 /*
193 * We don't want the guest to effectively mask the physical
194 * interrupt by doing a write to SPENDR followed by a write to
195 * CPENDR for HW interrupts, so we clear the active state on
196 * the physical side if the virtual interrupt is not active.
197 * This may lead to taking an additional interrupt on the
198 * host, but that should not be a problem as the worst that
199 * can happen is an additional vgic injection. We also clear
200 * the pending state to maintain proper semantics for edge HW
201 * interrupts.
202 */
203 vgic_irq_set_phys_pending(irq, false);
204 if (!irq->active)
205 vgic_irq_set_phys_active(irq, false);
206}
207
Andre Przywara96b29802015-12-01 14:33:41 +0000208void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
209 gpa_t addr, unsigned int len,
210 unsigned long val)
211{
Christoffer Dalldf635c52017-09-01 16:25:12 +0200212 bool is_uaccess = !vgic_get_mmio_requester_vcpu();
Andre Przywara96b29802015-12-01 14:33:41 +0000213 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
214 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200215 unsigned long flags;
Andre Przywara96b29802015-12-01 14:33:41 +0000216
217 for_each_set_bit(i, &val, len * 8) {
218 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
219
Christoffer Dall006df0f2016-10-16 22:19:11 +0200220 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara96b29802015-12-01 14:33:41 +0000221
Christoffer Dalldf635c52017-09-01 16:25:12 +0200222 if (irq->hw)
223 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
224 else
225 irq->pending_latch = false;
Andre Przywara96b29802015-12-01 14:33:41 +0000226
Christoffer Dall006df0f2016-10-16 22:19:11 +0200227 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100228 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000229 }
230}
231
Andre Przywara69b6fe02015-12-01 12:40:58 +0000232unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
233 gpa_t addr, unsigned int len)
234{
235 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
236 u32 value = 0;
237 int i;
238
239 /* Loop over all IRQs affected by this read */
240 for (i = 0; i < len * 8; i++) {
241 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
242
243 if (irq->active)
244 value |= (1U << i);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100245
246 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000247 }
248
249 return value;
250}
251
Christoffer Dalldf635c52017-09-01 16:25:12 +0200252/* Must be called with irq->irq_lock held */
253static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
254 bool active, bool is_uaccess)
255{
256 if (is_uaccess)
257 return;
258
259 irq->active = active;
260 vgic_irq_set_phys_active(irq, active);
261}
262
Christoffer Dall35a2d582016-05-20 15:25:28 +0200263static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
Christoffer Dalldf635c52017-09-01 16:25:12 +0200264 bool active)
Christoffer Dall35a2d582016-05-20 15:25:28 +0200265{
Christoffer Dall006df0f2016-10-16 22:19:11 +0200266 unsigned long flags;
Christoffer Dall6c1b7522017-09-14 11:08:45 -0700267 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
Jintack Lim370a0ec2017-03-06 05:42:37 -0800268
Christoffer Dall6c1b7522017-09-14 11:08:45 -0700269 spin_lock_irqsave(&irq->irq_lock, flags);
Jintack Lim370a0ec2017-03-06 05:42:37 -0800270
Christoffer Dall35a2d582016-05-20 15:25:28 +0200271 /*
272 * If this virtual IRQ was written into a list register, we
273 * have to make sure the CPU that runs the VCPU thread has
Jintack Lim370a0ec2017-03-06 05:42:37 -0800274 * synced back the LR state to the struct vgic_irq.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200275 *
Jintack Lim370a0ec2017-03-06 05:42:37 -0800276 * As long as the conditions below are true, we know the VCPU thread
277 * may be on its way back from the guest (we kicked the VCPU thread in
278 * vgic_change_active_prepare) and still has to sync back this IRQ,
279 * so we release and re-acquire the spin_lock to let the other thread
280 * sync back the IRQ.
Christoffer Dall6c1b7522017-09-14 11:08:45 -0700281 *
282 * When accessing VGIC state from user space, requester_vcpu is
283 * NULL, which is fine, because we guarantee that no VCPUs are running
284 * when accessing VGIC state from user space so irq->vcpu->cpu is
285 * always -1.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200286 */
287 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
Jintack Lim370a0ec2017-03-06 05:42:37 -0800288 irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
Marc Zyngier05fb05a2016-06-02 09:24:06 +0100289 irq->vcpu->cpu != -1) /* VCPU thread is running */
Christoffer Dall35a2d582016-05-20 15:25:28 +0200290 cond_resched_lock(&irq->irq_lock);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200291
Christoffer Dalldf635c52017-09-01 16:25:12 +0200292 if (irq->hw)
293 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
294 else
295 irq->active = active;
296
297 if (irq->active)
Christoffer Dall006df0f2016-10-16 22:19:11 +0200298 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200299 else
Christoffer Dall006df0f2016-10-16 22:19:11 +0200300 spin_unlock_irqrestore(&irq->irq_lock, flags);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200301}
302
303/*
304 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
305 * is not queued on some running VCPU's LRs, because then the change to the
306 * active state can be overwritten when the VCPU's state is synced coming back
307 * from the guest.
308 *
309 * For shared interrupts, we have to stop all the VCPUs because interrupts can
310 * be migrated while we don't hold the IRQ locks and we don't want to be
311 * chasing moving targets.
312 *
Christoffer Dallabd72292017-05-06 20:01:24 +0200313 * For private interrupts we don't have to do anything because userspace
314 * accesses to the VGIC state already require all VCPUs to be stopped, and
315 * only the VCPU itself can modify its private interrupts active state, which
316 * guarantees that the VCPU is not running.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200317 */
318static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
319{
Christoffer Dallabd72292017-05-06 20:01:24 +0200320 if (intid > VGIC_NR_PRIVATE_IRQS)
Christoffer Dall35a2d582016-05-20 15:25:28 +0200321 kvm_arm_halt_guest(vcpu->kvm);
322}
323
324/* See vgic_change_active_prepare */
325static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
326{
Christoffer Dallabd72292017-05-06 20:01:24 +0200327 if (intid > VGIC_NR_PRIVATE_IRQS)
Christoffer Dall35a2d582016-05-20 15:25:28 +0200328 kvm_arm_resume_guest(vcpu->kvm);
329}
330
Christoffer Dall31971912017-05-16 09:44:39 +0200331static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
332 gpa_t addr, unsigned int len,
333 unsigned long val)
Andre Przywara69b6fe02015-12-01 12:40:58 +0000334{
335 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
336 int i;
337
Andre Przywara69b6fe02015-12-01 12:40:58 +0000338 for_each_set_bit(i, &val, len * 8) {
339 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200340 vgic_mmio_change_active(vcpu, irq, false);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100341 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000342 }
Christoffer Dall31971912017-05-16 09:44:39 +0200343}
344
345void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
346 gpa_t addr, unsigned int len,
347 unsigned long val)
348{
349 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
350
Christoffer Dallabd72292017-05-06 20:01:24 +0200351 mutex_lock(&vcpu->kvm->lock);
Christoffer Dall31971912017-05-16 09:44:39 +0200352 vgic_change_active_prepare(vcpu, intid);
353
354 __vgic_mmio_write_cactive(vcpu, addr, len, val);
355
Christoffer Dall35a2d582016-05-20 15:25:28 +0200356 vgic_change_active_finish(vcpu, intid);
Christoffer Dallabd72292017-05-06 20:01:24 +0200357 mutex_unlock(&vcpu->kvm->lock);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000358}
359
Christoffer Dall31971912017-05-16 09:44:39 +0200360void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
361 gpa_t addr, unsigned int len,
362 unsigned long val)
363{
364 __vgic_mmio_write_cactive(vcpu, addr, len, val);
365}
366
367static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
368 gpa_t addr, unsigned int len,
369 unsigned long val)
370{
371 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
372 int i;
373
374 for_each_set_bit(i, &val, len * 8) {
375 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
376 vgic_mmio_change_active(vcpu, irq, true);
377 vgic_put_irq(vcpu->kvm, irq);
378 }
379}
380
Andre Przywara69b6fe02015-12-01 12:40:58 +0000381void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
382 gpa_t addr, unsigned int len,
383 unsigned long val)
384{
385 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000386
Christoffer Dallabd72292017-05-06 20:01:24 +0200387 mutex_lock(&vcpu->kvm->lock);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200388 vgic_change_active_prepare(vcpu, intid);
Christoffer Dall31971912017-05-16 09:44:39 +0200389
390 __vgic_mmio_write_sactive(vcpu, addr, len, val);
391
Christoffer Dall35a2d582016-05-20 15:25:28 +0200392 vgic_change_active_finish(vcpu, intid);
Christoffer Dallabd72292017-05-06 20:01:24 +0200393 mutex_unlock(&vcpu->kvm->lock);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000394}
395
Christoffer Dall31971912017-05-16 09:44:39 +0200396void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
397 gpa_t addr, unsigned int len,
398 unsigned long val)
399{
400 __vgic_mmio_write_sactive(vcpu, addr, len, val);
401}
402
Andre Przywara055658b2015-12-01 14:34:02 +0000403unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
404 gpa_t addr, unsigned int len)
405{
406 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
407 int i;
408 u64 val = 0;
409
410 for (i = 0; i < len; i++) {
411 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
412
413 val |= (u64)irq->priority << (i * 8);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100414
415 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara055658b2015-12-01 14:34:02 +0000416 }
417
418 return val;
419}
420
421/*
422 * We currently don't handle changing the priority of an interrupt that
423 * is already pending on a VCPU. If there is a need for this, we would
424 * need to make this VCPU exit and re-evaluate the priorities, potentially
425 * leading to this interrupt getting presented now to the guest (if it has
426 * been masked by the priority mask before).
427 */
428void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
429 gpa_t addr, unsigned int len,
430 unsigned long val)
431{
432 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
433 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200434 unsigned long flags;
Andre Przywara055658b2015-12-01 14:34:02 +0000435
436 for (i = 0; i < len; i++) {
437 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
438
Christoffer Dall006df0f2016-10-16 22:19:11 +0200439 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara055658b2015-12-01 14:34:02 +0000440 /* Narrow the priority range to what we actually support */
441 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200442 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100443
444 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara055658b2015-12-01 14:34:02 +0000445 }
446}
447
Andre Przywara79717e42015-12-01 12:41:31 +0000448unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
449 gpa_t addr, unsigned int len)
450{
451 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
452 u32 value = 0;
453 int i;
454
455 for (i = 0; i < len * 4; i++) {
456 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
457
458 if (irq->config == VGIC_CONFIG_EDGE)
459 value |= (2U << (i * 2));
Andre Przywara5dd4b922016-07-15 12:43:27 +0100460
461 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara79717e42015-12-01 12:41:31 +0000462 }
463
464 return value;
465}
466
467void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
468 gpa_t addr, unsigned int len,
469 unsigned long val)
470{
471 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
472 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200473 unsigned long flags;
Andre Przywara79717e42015-12-01 12:41:31 +0000474
475 for (i = 0; i < len * 4; i++) {
Andre Przywara5dd4b922016-07-15 12:43:27 +0100476 struct vgic_irq *irq;
Andre Przywara79717e42015-12-01 12:41:31 +0000477
478 /*
479 * The configuration cannot be changed for SGIs in general,
480 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
481 * code relies on PPIs being level triggered, so we also
482 * make them read-only here.
483 */
484 if (intid + i < VGIC_NR_PRIVATE_IRQS)
485 continue;
486
Andre Przywara5dd4b922016-07-15 12:43:27 +0100487 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200488 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100489
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100490 if (test_bit(i * 2 + 1, &val))
Andre Przywara79717e42015-12-01 12:41:31 +0000491 irq->config = VGIC_CONFIG_EDGE;
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100492 else
Andre Przywara79717e42015-12-01 12:41:31 +0000493 irq->config = VGIC_CONFIG_LEVEL;
Andre Przywara5dd4b922016-07-15 12:43:27 +0100494
Christoffer Dall006df0f2016-10-16 22:19:11 +0200495 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100496 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara79717e42015-12-01 12:41:31 +0000497 }
498}
499
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530500u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
501{
502 int i;
503 u64 val = 0;
504 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
505
506 for (i = 0; i < 32; i++) {
507 struct vgic_irq *irq;
508
509 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
510 continue;
511
512 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
513 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
514 val |= (1U << i);
515
516 vgic_put_irq(vcpu->kvm, irq);
517 }
518
519 return val;
520}
521
522void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
523 const u64 val)
524{
525 int i;
526 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200527 unsigned long flags;
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530528
529 for (i = 0; i < 32; i++) {
530 struct vgic_irq *irq;
531 bool new_level;
532
533 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
534 continue;
535
536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
537
538 /*
539 * Line level is set irrespective of irq type
540 * (level or edge) to avoid dependency that VM should
541 * restore irq config before line level.
542 */
543 new_level = !!(val & (1U << i));
Christoffer Dall006df0f2016-10-16 22:19:11 +0200544 spin_lock_irqsave(&irq->irq_lock, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530545 irq->line_level = new_level;
546 if (new_level)
Christoffer Dall006df0f2016-10-16 22:19:11 +0200547 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530548 else
Christoffer Dall006df0f2016-10-16 22:19:11 +0200549 spin_unlock_irqrestore(&irq->irq_lock, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530550
551 vgic_put_irq(vcpu->kvm, irq);
552 }
553}
554
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100555static int match_region(const void *key, const void *elt)
556{
557 const unsigned int offset = (unsigned long)key;
558 const struct vgic_register_region *region = elt;
559
560 if (offset < region->reg_offset)
561 return -1;
562
563 if (offset >= region->reg_offset + region->len)
564 return 1;
565
566 return 0;
567}
568
Eric Auger4b7171a2016-12-20 09:20:00 +0100569const struct vgic_register_region *
570vgic_find_mmio_region(const struct vgic_register_region *regions,
571 int nr_regions, unsigned int offset)
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100572{
Eric Auger4b7171a2016-12-20 09:20:00 +0100573 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
574 sizeof(regions[0]), match_region);
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100575}
576
Vijaya Kumar K5fb247d2017-01-26 19:50:50 +0530577void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
578{
579 if (kvm_vgic_global_state.type == VGIC_V2)
580 vgic_v2_set_vmcr(vcpu, vmcr);
581 else
582 vgic_v3_set_vmcr(vcpu, vmcr);
583}
584
585void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
586{
587 if (kvm_vgic_global_state.type == VGIC_V2)
588 vgic_v2_get_vmcr(vcpu, vmcr);
589 else
590 vgic_v3_get_vmcr(vcpu, vmcr);
591}
592
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100593/*
594 * kvm_mmio_read_buf() returns a value in a format where it can be converted
595 * to a byte array and be directly observed as the guest wanted it to appear
596 * in memory if it had done the store itself, which is LE for the GIC, as the
597 * guest knows the GIC is always LE.
598 *
599 * We convert this value to the CPUs native format to deal with it as a data
600 * value.
601 */
602unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
603{
604 unsigned long data = kvm_mmio_read_buf(val, len);
605
606 switch (len) {
607 case 1:
608 return data;
609 case 2:
610 return le16_to_cpu(data);
611 case 4:
612 return le32_to_cpu(data);
613 default:
614 return le64_to_cpu(data);
615 }
616}
617
618/*
619 * kvm_mmio_write_buf() expects a value in a format such that if converted to
620 * a byte array it is observed as the guest would see it if it could perform
621 * the load directly. Since the GIC is LE, and the guest knows this, the
622 * guest expects a value in little endian format.
623 *
624 * We convert the data value from the CPUs native format to LE so that the
625 * value is returned in the proper format.
626 */
627void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
628 unsigned long data)
629{
630 switch (len) {
631 case 1:
632 break;
633 case 2:
634 data = cpu_to_le16(data);
635 break;
636 case 4:
637 data = cpu_to_le32(data);
638 break;
639 default:
640 data = cpu_to_le64(data);
641 }
642
643 kvm_mmio_write_buf(buf, len, data);
644}
645
646static
647struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
648{
649 return container_of(dev, struct vgic_io_device, dev);
650}
651
Andre Przywara112b0b82016-11-01 18:00:08 +0000652static bool check_region(const struct kvm *kvm,
653 const struct vgic_register_region *region,
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100654 gpa_t addr, int len)
655{
Andre Przywara112b0b82016-11-01 18:00:08 +0000656 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
657
658 switch (len) {
659 case sizeof(u8):
660 flags = VGIC_ACCESS_8bit;
661 break;
662 case sizeof(u32):
663 flags = VGIC_ACCESS_32bit;
664 break;
665 case sizeof(u64):
666 flags = VGIC_ACCESS_64bit;
667 break;
668 default:
669 return false;
670 }
671
672 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
673 if (!region->bits_per_irq)
674 return true;
675
676 /* Do we access a non-allocated IRQ? */
677 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
678 }
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100679
680 return false;
681}
682
Vijaya Kumar K94574c92017-01-26 19:50:47 +0530683const struct vgic_register_region *
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530684vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
685 gpa_t addr, int len)
686{
687 const struct vgic_register_region *region;
688
689 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
690 addr - iodev->base_addr);
691 if (!region || !check_region(vcpu->kvm, region, addr, len))
692 return NULL;
693
694 return region;
695}
696
697static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
698 gpa_t addr, u32 *val)
699{
700 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
701 const struct vgic_register_region *region;
702 struct kvm_vcpu *r_vcpu;
703
704 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
705 if (!region) {
706 *val = 0;
707 return 0;
708 }
709
710 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
711 if (region->uaccess_read)
712 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
713 else
714 *val = region->read(r_vcpu, addr, sizeof(u32));
715
716 return 0;
717}
718
719static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
720 gpa_t addr, const u32 *val)
721{
722 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
723 const struct vgic_register_region *region;
724 struct kvm_vcpu *r_vcpu;
725
726 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
727 if (!region)
728 return 0;
729
730 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
731 if (region->uaccess_write)
732 region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
733 else
734 region->write(r_vcpu, addr, sizeof(u32), *val);
735
736 return 0;
737}
738
739/*
740 * Userland access to VGIC registers.
741 */
742int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
743 bool is_write, int offset, u32 *val)
744{
745 if (is_write)
746 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
747 else
748 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
749}
750
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100751static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
752 gpa_t addr, int len, void *val)
753{
754 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
755 const struct vgic_register_region *region;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100756 unsigned long data = 0;
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100757
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530758 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
759 if (!region) {
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100760 memset(val, 0, len);
761 return 0;
762 }
763
Andre Przywara59c5ab42016-07-15 12:43:30 +0100764 switch (iodev->iodev_type) {
765 case IODEV_CPUIF:
Eric Auger9d5fcb92016-07-18 10:57:36 +0000766 data = region->read(vcpu, addr, len);
767 break;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100768 case IODEV_DIST:
769 data = region->read(vcpu, addr, len);
770 break;
771 case IODEV_REDIST:
772 data = region->read(iodev->redist_vcpu, addr, len);
773 break;
774 case IODEV_ITS:
775 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
776 break;
777 }
778
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100779 vgic_data_host_to_mmio_bus(val, len, data);
780 return 0;
781}
782
783static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
784 gpa_t addr, int len, const void *val)
785{
786 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
787 const struct vgic_register_region *region;
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100788 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
789
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530790 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
791 if (!region)
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100792 return 0;
793
Andre Przywara59c5ab42016-07-15 12:43:30 +0100794 switch (iodev->iodev_type) {
795 case IODEV_CPUIF:
Eric Auger9d5fcb92016-07-18 10:57:36 +0000796 region->write(vcpu, addr, len, data);
Andre Przywara59c5ab42016-07-15 12:43:30 +0100797 break;
798 case IODEV_DIST:
799 region->write(vcpu, addr, len, data);
800 break;
801 case IODEV_REDIST:
802 region->write(iodev->redist_vcpu, addr, len, data);
803 break;
804 case IODEV_ITS:
805 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
806 break;
807 }
808
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100809 return 0;
810}
811
812struct kvm_io_device_ops kvm_io_gic_ops = {
813 .read = dispatch_mmio_read,
814 .write = dispatch_mmio_write,
815};
Andre Przywarafb848db2016-04-26 21:32:49 +0100816
817int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
818 enum vgic_type type)
819{
820 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
821 int ret = 0;
822 unsigned int len;
823
824 switch (type) {
825 case VGIC_V2:
826 len = vgic_v2_init_dist_iodev(io_device);
827 break;
Andre Przywaraed9b8ce2015-12-01 14:34:34 +0000828 case VGIC_V3:
829 len = vgic_v3_init_dist_iodev(io_device);
830 break;
Andre Przywarafb848db2016-04-26 21:32:49 +0100831 default:
832 BUG_ON(1);
833 }
834
835 io_device->base_addr = dist_base_address;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100836 io_device->iodev_type = IODEV_DIST;
Andre Przywarafb848db2016-04-26 21:32:49 +0100837 io_device->redist_vcpu = NULL;
838
839 mutex_lock(&kvm->slots_lock);
840 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
841 len, &io_device->dev);
842 mutex_unlock(&kvm->slots_lock);
843
844 return ret;
845}