blob: e54ef2fdf73dd391246c16474a3b3e652dc57300 [file] [log] [blame]
Christoffer Dall64a959d2015-11-24 16:51:12 +01001/*
2 * Copyright (C) 2015, 2016 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kvm.h>
18#include <linux/kvm_host.h>
Christoffer Dall8e444742015-11-25 10:02:16 -080019#include <linux/list_sort.h>
Christoffer Dall64a959d2015-11-24 16:51:12 +010020
21#include "vgic.h"
22
Christoffer Dall81eeb952015-11-25 10:02:16 -080023#define CREATE_TRACE_POINTS
Christoffer Dall35d2d5d2017-05-04 13:54:17 +020024#include "trace.h"
Christoffer Dall81eeb952015-11-25 10:02:16 -080025
26#ifdef CONFIG_DEBUG_SPINLOCK
27#define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
28#else
29#define DEBUG_SPINLOCK_BUG_ON(p)
30#endif
31
Ard Biesheuvel63d7c6a2017-03-09 21:51:59 +010032struct vgic_global kvm_vgic_global_state __ro_after_init = {
33 .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
34};
Christoffer Dall64a959d2015-11-24 16:51:12 +010035
Christoffer Dall81eeb952015-11-25 10:02:16 -080036/*
37 * Locking order is always:
Christoffer Dallabd72292017-05-06 20:01:24 +020038 * kvm->lock (mutex)
39 * its->cmd_lock (mutex)
40 * its->its_lock (mutex)
41 * vgic_cpu->ap_list_lock
42 * kvm->lpi_list_lock
43 * vgic_irq->irq_lock
Christoffer Dall81eeb952015-11-25 10:02:16 -080044 *
Andre Przywara424c3382016-07-15 12:43:32 +010045 * If you need to take multiple locks, always take the upper lock first,
46 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
47 * If you are already holding a lock and need to take a higher one, you
48 * have to drop the lower ranking lock first and re-aquire it after having
49 * taken the upper one.
Christoffer Dall81eeb952015-11-25 10:02:16 -080050 *
51 * When taking more than one ap_list_lock at the same time, always take the
52 * lowest numbered VCPU's ap_list_lock first, so:
53 * vcpuX->vcpu_id < vcpuY->vcpu_id:
54 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
55 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
Christoffer Dall006df0f2016-10-16 22:19:11 +020056 *
57 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
58 * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
59 * spinlocks for any lock that may be taken while injecting an interrupt.
Christoffer Dall81eeb952015-11-25 10:02:16 -080060 */
61
Andre Przywara38024112016-07-15 12:43:33 +010062/*
63 * Iterate over the VM's list of mapped LPIs to find the one with a
64 * matching interrupt ID and return a reference to the IRQ structure.
65 */
66static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
67{
68 struct vgic_dist *dist = &kvm->arch.vgic;
69 struct vgic_irq *irq = NULL;
70
71 spin_lock(&dist->lpi_list_lock);
72
73 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
74 if (irq->intid != intid)
75 continue;
76
77 /*
78 * This increases the refcount, the caller is expected to
79 * call vgic_put_irq() later once it's finished with the IRQ.
80 */
Marc Zyngierd97594e2016-07-17 11:27:23 +010081 vgic_get_irq_kref(irq);
Andre Przywara38024112016-07-15 12:43:33 +010082 goto out_unlock;
83 }
84 irq = NULL;
85
86out_unlock:
87 spin_unlock(&dist->lpi_list_lock);
88
89 return irq;
90}
91
92/*
93 * This looks up the virtual interrupt ID to get the corresponding
94 * struct vgic_irq. It also increases the refcount, so any caller is expected
95 * to call vgic_put_irq() once it's finished with this IRQ.
96 */
Christoffer Dall64a959d2015-11-24 16:51:12 +010097struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
98 u32 intid)
99{
100 /* SGIs and PPIs */
101 if (intid <= VGIC_MAX_PRIVATE)
102 return &vcpu->arch.vgic_cpu.private_irqs[intid];
103
104 /* SPIs */
105 if (intid <= VGIC_MAX_SPI)
106 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
107
Andre Przywara38024112016-07-15 12:43:33 +0100108 /* LPIs */
Christoffer Dall64a959d2015-11-24 16:51:12 +0100109 if (intid >= VGIC_MIN_LPI)
Andre Przywara38024112016-07-15 12:43:33 +0100110 return vgic_get_lpi(kvm, intid);
Christoffer Dall64a959d2015-11-24 16:51:12 +0100111
112 WARN(1, "Looking up struct vgic_irq for reserved INTID");
113 return NULL;
114}
Christoffer Dall81eeb952015-11-25 10:02:16 -0800115
Andre Przywara38024112016-07-15 12:43:33 +0100116/*
117 * We can't do anything in here, because we lack the kvm pointer to
118 * lock and remove the item from the lpi_list. So we keep this function
119 * empty and use the return value of kref_put() to trigger the freeing.
120 */
Andre Przywara5dd4b922016-07-15 12:43:27 +0100121static void vgic_irq_release(struct kref *ref)
122{
Andre Przywara5dd4b922016-07-15 12:43:27 +0100123}
124
125void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
126{
Christoffer Dall2cccbb32016-08-02 22:05:42 +0200127 struct vgic_dist *dist = &kvm->arch.vgic;
Andre Przywara38024112016-07-15 12:43:33 +0100128
Andre Przywara5dd4b922016-07-15 12:43:27 +0100129 if (irq->intid < VGIC_MIN_LPI)
130 return;
131
Andre Przywara38024112016-07-15 12:43:33 +0100132 spin_lock(&dist->lpi_list_lock);
Christoffer Dall2cccbb32016-08-02 22:05:42 +0200133 if (!kref_put(&irq->refcount, vgic_irq_release)) {
134 spin_unlock(&dist->lpi_list_lock);
135 return;
136 };
137
Andre Przywara38024112016-07-15 12:43:33 +0100138 list_del(&irq->lpi_list);
139 dist->lpi_list_count--;
140 spin_unlock(&dist->lpi_list_lock);
141
142 kfree(irq);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100143}
144
Christoffer Dall81eeb952015-11-25 10:02:16 -0800145/**
146 * kvm_vgic_target_oracle - compute the target vcpu for an irq
147 *
148 * @irq: The irq to route. Must be already locked.
149 *
150 * Based on the current state of the interrupt (enabled, pending,
151 * active, vcpu and target_vcpu), compute the next vcpu this should be
152 * given to. Return NULL if this shouldn't be injected at all.
153 *
154 * Requires the IRQ lock to be held.
155 */
156static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
157{
158 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
159
160 /* If the interrupt is active, it must stay on the current vcpu */
161 if (irq->active)
162 return irq->vcpu ? : irq->target_vcpu;
163
164 /*
165 * If the IRQ is not active but enabled and pending, we should direct
166 * it to its configured target VCPU.
167 * If the distributor is disabled, pending interrupts shouldn't be
168 * forwarded.
169 */
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100170 if (irq->enabled && irq_is_pending(irq)) {
Christoffer Dall81eeb952015-11-25 10:02:16 -0800171 if (unlikely(irq->target_vcpu &&
172 !irq->target_vcpu->kvm->arch.vgic.enabled))
173 return NULL;
174
175 return irq->target_vcpu;
176 }
177
178 /* If neither active nor pending and enabled, then this IRQ should not
179 * be queued to any VCPU.
180 */
181 return NULL;
182}
183
184/*
Christoffer Dall8e444742015-11-25 10:02:16 -0800185 * The order of items in the ap_lists defines how we'll pack things in LRs as
186 * well, the first items in the list being the first things populated in the
187 * LRs.
188 *
189 * A hard rule is that active interrupts can never be pushed out of the LRs
190 * (and therefore take priority) since we cannot reliably trap on deactivation
191 * of IRQs and therefore they have to be present in the LRs.
192 *
193 * Otherwise things should be sorted by the priority field and the GIC
194 * hardware support will take care of preemption of priority groups etc.
195 *
196 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
197 * to sort "b" before "a".
198 */
199static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
200{
201 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
202 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
203 bool penda, pendb;
204 int ret;
205
206 spin_lock(&irqa->irq_lock);
207 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
208
209 if (irqa->active || irqb->active) {
210 ret = (int)irqb->active - (int)irqa->active;
211 goto out;
212 }
213
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100214 penda = irqa->enabled && irq_is_pending(irqa);
215 pendb = irqb->enabled && irq_is_pending(irqb);
Christoffer Dall8e444742015-11-25 10:02:16 -0800216
217 if (!penda || !pendb) {
218 ret = (int)pendb - (int)penda;
219 goto out;
220 }
221
222 /* Both pending and enabled, sort by priority */
223 ret = irqa->priority - irqb->priority;
224out:
225 spin_unlock(&irqb->irq_lock);
226 spin_unlock(&irqa->irq_lock);
227 return ret;
228}
229
230/* Must be called with the ap_list_lock held */
231static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
232{
233 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
234
235 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
236
237 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
238}
239
240/*
Christoffer Dall81eeb952015-11-25 10:02:16 -0800241 * Only valid injection if changing level for level-triggered IRQs or for a
Christoffer Dallcb3f0ad2017-05-16 12:41:18 +0200242 * rising edge, and in-kernel connected IRQ lines can only be controlled by
243 * their owner.
Christoffer Dall81eeb952015-11-25 10:02:16 -0800244 */
Christoffer Dallcb3f0ad2017-05-16 12:41:18 +0200245static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
Christoffer Dall81eeb952015-11-25 10:02:16 -0800246{
Christoffer Dallcb3f0ad2017-05-16 12:41:18 +0200247 if (irq->owner != owner)
248 return false;
249
Christoffer Dall81eeb952015-11-25 10:02:16 -0800250 switch (irq->config) {
251 case VGIC_CONFIG_LEVEL:
252 return irq->line_level != level;
253 case VGIC_CONFIG_EDGE:
254 return level;
255 }
256
257 return false;
258}
259
260/*
261 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
262 * Do the queuing if necessary, taking the right locks in the right order.
263 * Returns true when the IRQ was queued, false otherwise.
264 *
265 * Needs to be entered with the IRQ lock already held, but will return
266 * with all locks dropped.
267 */
Christoffer Dall006df0f2016-10-16 22:19:11 +0200268bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
269 unsigned long flags)
Christoffer Dall81eeb952015-11-25 10:02:16 -0800270{
271 struct kvm_vcpu *vcpu;
272
273 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
274
275retry:
276 vcpu = vgic_target_oracle(irq);
277 if (irq->vcpu || !vcpu) {
278 /*
279 * If this IRQ is already on a VCPU's ap_list, then it
280 * cannot be moved or modified and there is no more work for
281 * us to do.
282 *
283 * Otherwise, if the irq is not pending and enabled, it does
284 * not need to be inserted into an ap_list and there is also
285 * no more work for us to do.
286 */
Christoffer Dall006df0f2016-10-16 22:19:11 +0200287 spin_unlock_irqrestore(&irq->irq_lock, flags);
Shih-Wei Lid42c7972016-10-27 15:08:13 +0000288
289 /*
290 * We have to kick the VCPU here, because we could be
291 * queueing an edge-triggered interrupt for which we
292 * get no EOI maintenance interrupt. In that case,
293 * while the IRQ is already on the VCPU's AP list, the
294 * VCPU could have EOI'ed the original interrupt and
295 * won't see this one until it exits for some other
296 * reason.
297 */
Andrew Jones325f9c62017-06-04 14:43:59 +0200298 if (vcpu) {
299 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
Shih-Wei Lid42c7972016-10-27 15:08:13 +0000300 kvm_vcpu_kick(vcpu);
Andrew Jones325f9c62017-06-04 14:43:59 +0200301 }
Christoffer Dall81eeb952015-11-25 10:02:16 -0800302 return false;
303 }
304
305 /*
306 * We must unlock the irq lock to take the ap_list_lock where
307 * we are going to insert this new pending interrupt.
308 */
Christoffer Dall006df0f2016-10-16 22:19:11 +0200309 spin_unlock_irqrestore(&irq->irq_lock, flags);
Christoffer Dall81eeb952015-11-25 10:02:16 -0800310
311 /* someone can do stuff here, which we re-check below */
312
Christoffer Dall006df0f2016-10-16 22:19:11 +0200313 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
Christoffer Dall81eeb952015-11-25 10:02:16 -0800314 spin_lock(&irq->irq_lock);
315
316 /*
317 * Did something change behind our backs?
318 *
319 * There are two cases:
320 * 1) The irq lost its pending state or was disabled behind our
321 * backs and/or it was queued to another VCPU's ap_list.
322 * 2) Someone changed the affinity on this irq behind our
323 * backs and we are now holding the wrong ap_list_lock.
324 *
325 * In both cases, drop the locks and retry.
326 */
327
328 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
329 spin_unlock(&irq->irq_lock);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200330 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
Christoffer Dall81eeb952015-11-25 10:02:16 -0800331
Christoffer Dall006df0f2016-10-16 22:19:11 +0200332 spin_lock_irqsave(&irq->irq_lock, flags);
Christoffer Dall81eeb952015-11-25 10:02:16 -0800333 goto retry;
334 }
335
Andre Przywara5dd4b922016-07-15 12:43:27 +0100336 /*
337 * Grab a reference to the irq to reflect the fact that it is
338 * now in the ap_list.
339 */
340 vgic_get_irq_kref(irq);
Christoffer Dall81eeb952015-11-25 10:02:16 -0800341 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
342 irq->vcpu = vcpu;
343
344 spin_unlock(&irq->irq_lock);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200345 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
Christoffer Dall81eeb952015-11-25 10:02:16 -0800346
Andrew Jones325f9c62017-06-04 14:43:59 +0200347 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
Christoffer Dall81eeb952015-11-25 10:02:16 -0800348 kvm_vcpu_kick(vcpu);
349
350 return true;
351}
352
Christoffer Dall11710de2017-02-01 11:03:45 +0100353/**
354 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
355 * @kvm: The VM structure pointer
356 * @cpuid: The CPU for PPIs
357 * @intid: The INTID to inject a new state to.
358 * @level: Edge-triggered: true: to trigger the interrupt
359 * false: to ignore the call
360 * Level-sensitive true: raise the input signal
361 * false: lower the input signal
Christoffer Dallcb3f0ad2017-05-16 12:41:18 +0200362 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
363 * that the caller is allowed to inject this IRQ. Userspace
364 * injections will have owner == NULL.
Christoffer Dall11710de2017-02-01 11:03:45 +0100365 *
366 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
367 * level-sensitive interrupts. You can think of the level parameter as 1
368 * being HIGH and 0 being LOW and all devices being active-HIGH.
369 */
370int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
Christoffer Dallcb3f0ad2017-05-16 12:41:18 +0200371 bool level, void *owner)
Christoffer Dall81eeb952015-11-25 10:02:16 -0800372{
373 struct kvm_vcpu *vcpu;
374 struct vgic_irq *irq;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200375 unsigned long flags;
Christoffer Dall81eeb952015-11-25 10:02:16 -0800376 int ret;
377
378 trace_vgic_update_irq_pending(cpuid, intid, level);
379
Eric Augerad275b8b2015-12-21 18:09:38 +0100380 ret = vgic_lazy_init(kvm);
381 if (ret)
382 return ret;
383
Christoffer Dall81eeb952015-11-25 10:02:16 -0800384 vcpu = kvm_get_vcpu(kvm, cpuid);
385 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
386 return -EINVAL;
387
388 irq = vgic_get_irq(kvm, vcpu, intid);
389 if (!irq)
390 return -EINVAL;
391
Christoffer Dall006df0f2016-10-16 22:19:11 +0200392 spin_lock_irqsave(&irq->irq_lock, flags);
Christoffer Dall81eeb952015-11-25 10:02:16 -0800393
Christoffer Dallcb3f0ad2017-05-16 12:41:18 +0200394 if (!vgic_validate_injection(irq, level, owner)) {
Christoffer Dall81eeb952015-11-25 10:02:16 -0800395 /* Nothing to see here, move along... */
Christoffer Dall006df0f2016-10-16 22:19:11 +0200396 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100397 vgic_put_irq(kvm, irq);
Christoffer Dall81eeb952015-11-25 10:02:16 -0800398 return 0;
399 }
400
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100401 if (irq->config == VGIC_CONFIG_LEVEL)
Christoffer Dall81eeb952015-11-25 10:02:16 -0800402 irq->line_level = level;
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100403 else
404 irq->pending_latch = true;
Christoffer Dall81eeb952015-11-25 10:02:16 -0800405
Christoffer Dall006df0f2016-10-16 22:19:11 +0200406 vgic_queue_irq_unlock(kvm, irq, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100407 vgic_put_irq(kvm, irq);
Christoffer Dall81eeb952015-11-25 10:02:16 -0800408
409 return 0;
410}
411
Andre Przywara568e8c92015-12-22 00:52:33 +0000412int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
413{
414 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200415 unsigned long flags;
Andre Przywara568e8c92015-12-22 00:52:33 +0000416
417 BUG_ON(!irq);
418
Christoffer Dall006df0f2016-10-16 22:19:11 +0200419 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara568e8c92015-12-22 00:52:33 +0000420
421 irq->hw = true;
422 irq->hwintid = phys_irq;
423
Christoffer Dall006df0f2016-10-16 22:19:11 +0200424 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100425 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara568e8c92015-12-22 00:52:33 +0000426
427 return 0;
428}
429
430int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
431{
Andre Przywara5dd4b922016-07-15 12:43:27 +0100432 struct vgic_irq *irq;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200433 unsigned long flags;
Andre Przywara568e8c92015-12-22 00:52:33 +0000434
435 if (!vgic_initialized(vcpu->kvm))
436 return -EAGAIN;
437
Andre Przywara5dd4b922016-07-15 12:43:27 +0100438 irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
439 BUG_ON(!irq);
440
Christoffer Dall006df0f2016-10-16 22:19:11 +0200441 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara568e8c92015-12-22 00:52:33 +0000442
443 irq->hw = false;
444 irq->hwintid = 0;
445
Christoffer Dall006df0f2016-10-16 22:19:11 +0200446 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100447 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara568e8c92015-12-22 00:52:33 +0000448
449 return 0;
450}
451
Marc Zyngier0919e842015-11-26 17:19:25 +0000452/**
Christoffer Dallc6ccd302017-05-04 13:24:20 +0200453 * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
454 *
455 * @vcpu: Pointer to the VCPU (used for PPIs)
456 * @intid: The virtual INTID identifying the interrupt (PPI or SPI)
457 * @owner: Opaque pointer to the owner
458 *
459 * Returns 0 if intid is not already used by another in-kernel device and the
460 * owner is set, otherwise returns an error code.
461 */
462int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
463{
464 struct vgic_irq *irq;
465 int ret = 0;
466
467 if (!vgic_initialized(vcpu->kvm))
468 return -EAGAIN;
469
470 /* SGIs and LPIs cannot be wired up to any device */
471 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
472 return -EINVAL;
473
474 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
475 spin_lock(&irq->irq_lock);
476 if (irq->owner && irq->owner != owner)
477 ret = -EEXIST;
478 else
479 irq->owner = owner;
480 spin_unlock(&irq->irq_lock);
481
482 return ret;
483}
484
485/**
Marc Zyngier0919e842015-11-26 17:19:25 +0000486 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
487 *
488 * @vcpu: The VCPU pointer
489 *
490 * Go over the list of "interesting" interrupts, and prune those that we
491 * won't have to consider in the near future.
492 */
493static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
494{
495 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
496 struct vgic_irq *irq, *tmp;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200497 unsigned long flags;
Marc Zyngier0919e842015-11-26 17:19:25 +0000498
499retry:
Christoffer Dall006df0f2016-10-16 22:19:11 +0200500 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
Marc Zyngier0919e842015-11-26 17:19:25 +0000501
502 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
503 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
504
505 spin_lock(&irq->irq_lock);
506
507 BUG_ON(vcpu != irq->vcpu);
508
509 target_vcpu = vgic_target_oracle(irq);
510
511 if (!target_vcpu) {
512 /*
513 * We don't need to process this interrupt any
514 * further, move it off the list.
515 */
516 list_del(&irq->ap_list);
517 irq->vcpu = NULL;
518 spin_unlock(&irq->irq_lock);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100519
520 /*
521 * This vgic_put_irq call matches the
522 * vgic_get_irq_kref in vgic_queue_irq_unlock,
523 * where we added the LPI to the ap_list. As
524 * we remove the irq from the list, we drop
525 * also drop the refcount.
526 */
527 vgic_put_irq(vcpu->kvm, irq);
Marc Zyngier0919e842015-11-26 17:19:25 +0000528 continue;
529 }
530
531 if (target_vcpu == vcpu) {
532 /* We're on the right CPU */
533 spin_unlock(&irq->irq_lock);
534 continue;
535 }
536
537 /* This interrupt looks like it has to be migrated. */
538
539 spin_unlock(&irq->irq_lock);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200540 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
Marc Zyngier0919e842015-11-26 17:19:25 +0000541
542 /*
543 * Ensure locking order by always locking the smallest
544 * ID first.
545 */
546 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
547 vcpuA = vcpu;
548 vcpuB = target_vcpu;
549 } else {
550 vcpuA = target_vcpu;
551 vcpuB = vcpu;
552 }
553
Christoffer Dall006df0f2016-10-16 22:19:11 +0200554 spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
Marc Zyngier0919e842015-11-26 17:19:25 +0000555 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
556 SINGLE_DEPTH_NESTING);
557 spin_lock(&irq->irq_lock);
558
559 /*
560 * If the affinity has been preserved, move the
561 * interrupt around. Otherwise, it means things have
562 * changed while the interrupt was unlocked, and we
563 * need to replay this.
564 *
565 * In all cases, we cannot trust the list not to have
566 * changed, so we restart from the beginning.
567 */
568 if (target_vcpu == vgic_target_oracle(irq)) {
569 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
570
571 list_del(&irq->ap_list);
572 irq->vcpu = target_vcpu;
573 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
574 }
575
576 spin_unlock(&irq->irq_lock);
577 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200578 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
Marc Zyngier0919e842015-11-26 17:19:25 +0000579 goto retry;
580 }
581
Christoffer Dall006df0f2016-10-16 22:19:11 +0200582 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
Marc Zyngier0919e842015-11-26 17:19:25 +0000583}
584
Marc Zyngier0919e842015-11-26 17:19:25 +0000585static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
586{
Marc Zyngier59529f62015-11-30 13:09:53 +0000587 if (kvm_vgic_global_state.type == VGIC_V2)
588 vgic_v2_fold_lr_state(vcpu);
589 else
590 vgic_v3_fold_lr_state(vcpu);
Marc Zyngier0919e842015-11-26 17:19:25 +0000591}
592
593/* Requires the irq_lock to be held. */
594static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
595 struct vgic_irq *irq, int lr)
596{
597 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
Marc Zyngier140b0862015-11-26 17:19:25 +0000598
Marc Zyngier59529f62015-11-30 13:09:53 +0000599 if (kvm_vgic_global_state.type == VGIC_V2)
600 vgic_v2_populate_lr(vcpu, irq, lr);
601 else
602 vgic_v3_populate_lr(vcpu, irq, lr);
Marc Zyngier0919e842015-11-26 17:19:25 +0000603}
604
605static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
606{
Marc Zyngier59529f62015-11-30 13:09:53 +0000607 if (kvm_vgic_global_state.type == VGIC_V2)
608 vgic_v2_clear_lr(vcpu, lr);
609 else
610 vgic_v3_clear_lr(vcpu, lr);
Marc Zyngier0919e842015-11-26 17:19:25 +0000611}
612
613static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
614{
Marc Zyngier59529f62015-11-30 13:09:53 +0000615 if (kvm_vgic_global_state.type == VGIC_V2)
616 vgic_v2_set_underflow(vcpu);
617 else
618 vgic_v3_set_underflow(vcpu);
Marc Zyngier0919e842015-11-26 17:19:25 +0000619}
620
621/* Requires the ap_list_lock to be held. */
622static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
623{
624 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
625 struct vgic_irq *irq;
626 int count = 0;
627
628 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
629
630 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
631 spin_lock(&irq->irq_lock);
632 /* GICv2 SGIs can count for more than one... */
633 if (vgic_irq_is_sgi(irq->intid) && irq->source)
634 count += hweight8(irq->source);
635 else
636 count++;
637 spin_unlock(&irq->irq_lock);
638 }
639 return count;
640}
641
642/* Requires the VCPU's ap_list_lock to be held. */
643static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
644{
645 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
646 struct vgic_irq *irq;
647 int count = 0;
648
649 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
650
Christoffer Dall90cac1f2017-03-21 21:16:12 +0100651 if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr)
Marc Zyngier0919e842015-11-26 17:19:25 +0000652 vgic_sort_ap_list(vcpu);
Marc Zyngier0919e842015-11-26 17:19:25 +0000653
654 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
655 spin_lock(&irq->irq_lock);
656
657 if (unlikely(vgic_target_oracle(irq) != vcpu))
658 goto next;
659
660 /*
661 * If we get an SGI with multiple sources, try to get
662 * them in all at once.
663 */
664 do {
665 vgic_populate_lr(vcpu, irq, count++);
666 } while (irq->source && count < kvm_vgic_global_state.nr_lr);
667
668next:
669 spin_unlock(&irq->irq_lock);
670
Christoffer Dall90cac1f2017-03-21 21:16:12 +0100671 if (count == kvm_vgic_global_state.nr_lr) {
672 if (!list_is_last(&irq->ap_list,
673 &vgic_cpu->ap_list_head))
674 vgic_set_underflow(vcpu);
Marc Zyngier0919e842015-11-26 17:19:25 +0000675 break;
Christoffer Dall90cac1f2017-03-21 21:16:12 +0100676 }
Marc Zyngier0919e842015-11-26 17:19:25 +0000677 }
678
679 vcpu->arch.vgic_cpu.used_lrs = count;
680
681 /* Nuke remaining LRs */
682 for ( ; count < kvm_vgic_global_state.nr_lr; count++)
683 vgic_clear_lr(vcpu, count);
684}
685
686/* Sync back the hardware VGIC state into our emulation after a guest's run. */
687void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
688{
Shih-Wei Lif6769582016-10-19 18:12:34 +0000689 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
690
Christoffer Dall8ac76ef2017-03-18 13:48:42 +0100691 /* An empty ap_list_head implies used_lrs == 0 */
692 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
Christoffer Dall0099b772016-09-27 18:53:35 +0200693 return;
694
Christoffer Dall8ac76ef2017-03-18 13:48:42 +0100695 if (vgic_cpu->used_lrs)
696 vgic_fold_lr_state(vcpu);
Marc Zyngier0919e842015-11-26 17:19:25 +0000697 vgic_prune_ap_list(vcpu);
698}
699
700/* Flush our emulation state into the GIC hardware before entering the guest. */
701void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
702{
Shih-Wei Lif6769582016-10-19 18:12:34 +0000703 /*
704 * If there are no virtual interrupts active or pending for this
705 * VCPU, then there is no work to do and we can bail out without
706 * taking any lock. There is a potential race with someone injecting
707 * interrupts to the VCPU, but it is a benign race as the VCPU will
708 * either observe the new interrupt before or after doing this check,
709 * and introducing additional synchronization mechanism doesn't change
710 * this.
711 */
712 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
Christoffer Dall0099b772016-09-27 18:53:35 +0200713 return;
714
Christoffer Dall006df0f2016-10-16 22:19:11 +0200715 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
716
Marc Zyngier0919e842015-11-26 17:19:25 +0000717 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
718 vgic_flush_lr_state(vcpu);
719 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
720}
Eric Auger90eee562015-12-07 15:30:38 +0000721
Christoffer Dall328e5662016-03-24 11:21:04 +0100722void kvm_vgic_load(struct kvm_vcpu *vcpu)
723{
724 if (unlikely(!vgic_initialized(vcpu->kvm)))
725 return;
726
727 if (kvm_vgic_global_state.type == VGIC_V2)
728 vgic_v2_load(vcpu);
729 else
730 vgic_v3_load(vcpu);
731}
732
733void kvm_vgic_put(struct kvm_vcpu *vcpu)
734{
735 if (unlikely(!vgic_initialized(vcpu->kvm)))
736 return;
737
738 if (kvm_vgic_global_state.type == VGIC_V2)
739 vgic_v2_put(vcpu);
740 else
741 vgic_v3_put(vcpu);
742}
743
Eric Auger90eee562015-12-07 15:30:38 +0000744int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
745{
746 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
747 struct vgic_irq *irq;
748 bool pending = false;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200749 unsigned long flags;
Eric Auger90eee562015-12-07 15:30:38 +0000750
751 if (!vcpu->kvm->arch.vgic.enabled)
752 return false;
753
Christoffer Dall006df0f2016-10-16 22:19:11 +0200754 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
Eric Auger90eee562015-12-07 15:30:38 +0000755
756 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
757 spin_lock(&irq->irq_lock);
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100758 pending = irq_is_pending(irq) && irq->enabled;
Eric Auger90eee562015-12-07 15:30:38 +0000759 spin_unlock(&irq->irq_lock);
760
761 if (pending)
762 break;
763 }
764
Christoffer Dall006df0f2016-10-16 22:19:11 +0200765 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
Eric Auger90eee562015-12-07 15:30:38 +0000766
767 return pending;
768}
Marc Zyngier2b0cda82016-04-26 11:06:47 +0100769
770void vgic_kick_vcpus(struct kvm *kvm)
771{
772 struct kvm_vcpu *vcpu;
773 int c;
774
775 /*
776 * We've injected an interrupt, time to find out who deserves
777 * a good kick...
778 */
779 kvm_for_each_vcpu(c, vcpu, kvm) {
Andrew Jones325f9c62017-06-04 14:43:59 +0200780 if (kvm_vgic_vcpu_pending_irq(vcpu)) {
781 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
Marc Zyngier2b0cda82016-04-26 11:06:47 +0100782 kvm_vcpu_kick(vcpu);
Andrew Jones325f9c62017-06-04 14:43:59 +0200783 }
Marc Zyngier2b0cda82016-04-26 11:06:47 +0100784 }
785}
Andre Przywara568e8c92015-12-22 00:52:33 +0000786
787bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
788{
789 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
790 bool map_is_active;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200791 unsigned long flags;
Andre Przywara568e8c92015-12-22 00:52:33 +0000792
Christoffer Dallf39d16c2016-10-19 12:40:17 +0200793 if (!vgic_initialized(vcpu->kvm))
794 return false;
795
Christoffer Dall006df0f2016-10-16 22:19:11 +0200796 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara568e8c92015-12-22 00:52:33 +0000797 map_is_active = irq->hw && irq->active;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200798 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100799 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara568e8c92015-12-22 00:52:33 +0000800
801 return map_is_active;
802}
Andre Przywara0e4e82f2016-07-15 12:43:38 +0100803