Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015, 2016 ARM Ltd. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 15 | */ |
| 16 | |
| 17 | #include <linux/kvm.h> |
| 18 | #include <linux/kvm_host.h> |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 19 | #include <linux/list_sort.h> |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 20 | |
| 21 | #include "vgic.h" |
| 22 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 23 | #define CREATE_TRACE_POINTS |
Christoffer Dall | 35d2d5d | 2017-05-04 13:54:17 +0200 | [diff] [blame] | 24 | #include "trace.h" |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 25 | |
| 26 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 27 | #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p) |
| 28 | #else |
| 29 | #define DEBUG_SPINLOCK_BUG_ON(p) |
| 30 | #endif |
| 31 | |
Ard Biesheuvel | 63d7c6a | 2017-03-09 21:51:59 +0100 | [diff] [blame] | 32 | struct vgic_global kvm_vgic_global_state __ro_after_init = { |
| 33 | .gicv3_cpuif = STATIC_KEY_FALSE_INIT, |
| 34 | }; |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 35 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 36 | /* |
| 37 | * Locking order is always: |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame^] | 38 | * kvm->lock (mutex) |
| 39 | * its->cmd_lock (mutex) |
| 40 | * its->its_lock (mutex) |
| 41 | * vgic_cpu->ap_list_lock |
| 42 | * kvm->lpi_list_lock |
| 43 | * vgic_irq->irq_lock |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 44 | * |
Andre Przywara | 424c338 | 2016-07-15 12:43:32 +0100 | [diff] [blame] | 45 | * If you need to take multiple locks, always take the upper lock first, |
| 46 | * then the lower ones, e.g. first take the its_lock, then the irq_lock. |
| 47 | * If you are already holding a lock and need to take a higher one, you |
| 48 | * have to drop the lower ranking lock first and re-aquire it after having |
| 49 | * taken the upper one. |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 50 | * |
| 51 | * When taking more than one ap_list_lock at the same time, always take the |
| 52 | * lowest numbered VCPU's ap_list_lock first, so: |
| 53 | * vcpuX->vcpu_id < vcpuY->vcpu_id: |
| 54 | * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); |
| 55 | * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); |
| 56 | */ |
| 57 | |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 58 | /* |
| 59 | * Iterate over the VM's list of mapped LPIs to find the one with a |
| 60 | * matching interrupt ID and return a reference to the IRQ structure. |
| 61 | */ |
| 62 | static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) |
| 63 | { |
| 64 | struct vgic_dist *dist = &kvm->arch.vgic; |
| 65 | struct vgic_irq *irq = NULL; |
| 66 | |
| 67 | spin_lock(&dist->lpi_list_lock); |
| 68 | |
| 69 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
| 70 | if (irq->intid != intid) |
| 71 | continue; |
| 72 | |
| 73 | /* |
| 74 | * This increases the refcount, the caller is expected to |
| 75 | * call vgic_put_irq() later once it's finished with the IRQ. |
| 76 | */ |
Marc Zyngier | d97594e | 2016-07-17 11:27:23 +0100 | [diff] [blame] | 77 | vgic_get_irq_kref(irq); |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 78 | goto out_unlock; |
| 79 | } |
| 80 | irq = NULL; |
| 81 | |
| 82 | out_unlock: |
| 83 | spin_unlock(&dist->lpi_list_lock); |
| 84 | |
| 85 | return irq; |
| 86 | } |
| 87 | |
| 88 | /* |
| 89 | * This looks up the virtual interrupt ID to get the corresponding |
| 90 | * struct vgic_irq. It also increases the refcount, so any caller is expected |
| 91 | * to call vgic_put_irq() once it's finished with this IRQ. |
| 92 | */ |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 93 | struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, |
| 94 | u32 intid) |
| 95 | { |
| 96 | /* SGIs and PPIs */ |
| 97 | if (intid <= VGIC_MAX_PRIVATE) |
| 98 | return &vcpu->arch.vgic_cpu.private_irqs[intid]; |
| 99 | |
| 100 | /* SPIs */ |
| 101 | if (intid <= VGIC_MAX_SPI) |
| 102 | return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; |
| 103 | |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 104 | /* LPIs */ |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 105 | if (intid >= VGIC_MIN_LPI) |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 106 | return vgic_get_lpi(kvm, intid); |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 107 | |
| 108 | WARN(1, "Looking up struct vgic_irq for reserved INTID"); |
| 109 | return NULL; |
| 110 | } |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 111 | |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 112 | /* |
| 113 | * We can't do anything in here, because we lack the kvm pointer to |
| 114 | * lock and remove the item from the lpi_list. So we keep this function |
| 115 | * empty and use the return value of kref_put() to trigger the freeing. |
| 116 | */ |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 117 | static void vgic_irq_release(struct kref *ref) |
| 118 | { |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) |
| 122 | { |
Christoffer Dall | 2cccbb3 | 2016-08-02 22:05:42 +0200 | [diff] [blame] | 123 | struct vgic_dist *dist = &kvm->arch.vgic; |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 124 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 125 | if (irq->intid < VGIC_MIN_LPI) |
| 126 | return; |
| 127 | |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 128 | spin_lock(&dist->lpi_list_lock); |
Christoffer Dall | 2cccbb3 | 2016-08-02 22:05:42 +0200 | [diff] [blame] | 129 | if (!kref_put(&irq->refcount, vgic_irq_release)) { |
| 130 | spin_unlock(&dist->lpi_list_lock); |
| 131 | return; |
| 132 | }; |
| 133 | |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 134 | list_del(&irq->lpi_list); |
| 135 | dist->lpi_list_count--; |
| 136 | spin_unlock(&dist->lpi_list_lock); |
| 137 | |
| 138 | kfree(irq); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 139 | } |
| 140 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 141 | /** |
| 142 | * kvm_vgic_target_oracle - compute the target vcpu for an irq |
| 143 | * |
| 144 | * @irq: The irq to route. Must be already locked. |
| 145 | * |
| 146 | * Based on the current state of the interrupt (enabled, pending, |
| 147 | * active, vcpu and target_vcpu), compute the next vcpu this should be |
| 148 | * given to. Return NULL if this shouldn't be injected at all. |
| 149 | * |
| 150 | * Requires the IRQ lock to be held. |
| 151 | */ |
| 152 | static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) |
| 153 | { |
| 154 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); |
| 155 | |
| 156 | /* If the interrupt is active, it must stay on the current vcpu */ |
| 157 | if (irq->active) |
| 158 | return irq->vcpu ? : irq->target_vcpu; |
| 159 | |
| 160 | /* |
| 161 | * If the IRQ is not active but enabled and pending, we should direct |
| 162 | * it to its configured target VCPU. |
| 163 | * If the distributor is disabled, pending interrupts shouldn't be |
| 164 | * forwarded. |
| 165 | */ |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 166 | if (irq->enabled && irq_is_pending(irq)) { |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 167 | if (unlikely(irq->target_vcpu && |
| 168 | !irq->target_vcpu->kvm->arch.vgic.enabled)) |
| 169 | return NULL; |
| 170 | |
| 171 | return irq->target_vcpu; |
| 172 | } |
| 173 | |
| 174 | /* If neither active nor pending and enabled, then this IRQ should not |
| 175 | * be queued to any VCPU. |
| 176 | */ |
| 177 | return NULL; |
| 178 | } |
| 179 | |
| 180 | /* |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 181 | * The order of items in the ap_lists defines how we'll pack things in LRs as |
| 182 | * well, the first items in the list being the first things populated in the |
| 183 | * LRs. |
| 184 | * |
| 185 | * A hard rule is that active interrupts can never be pushed out of the LRs |
| 186 | * (and therefore take priority) since we cannot reliably trap on deactivation |
| 187 | * of IRQs and therefore they have to be present in the LRs. |
| 188 | * |
| 189 | * Otherwise things should be sorted by the priority field and the GIC |
| 190 | * hardware support will take care of preemption of priority groups etc. |
| 191 | * |
| 192 | * Return negative if "a" sorts before "b", 0 to preserve order, and positive |
| 193 | * to sort "b" before "a". |
| 194 | */ |
| 195 | static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) |
| 196 | { |
| 197 | struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); |
| 198 | struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); |
| 199 | bool penda, pendb; |
| 200 | int ret; |
| 201 | |
| 202 | spin_lock(&irqa->irq_lock); |
| 203 | spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); |
| 204 | |
| 205 | if (irqa->active || irqb->active) { |
| 206 | ret = (int)irqb->active - (int)irqa->active; |
| 207 | goto out; |
| 208 | } |
| 209 | |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 210 | penda = irqa->enabled && irq_is_pending(irqa); |
| 211 | pendb = irqb->enabled && irq_is_pending(irqb); |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 212 | |
| 213 | if (!penda || !pendb) { |
| 214 | ret = (int)pendb - (int)penda; |
| 215 | goto out; |
| 216 | } |
| 217 | |
| 218 | /* Both pending and enabled, sort by priority */ |
| 219 | ret = irqa->priority - irqb->priority; |
| 220 | out: |
| 221 | spin_unlock(&irqb->irq_lock); |
| 222 | spin_unlock(&irqa->irq_lock); |
| 223 | return ret; |
| 224 | } |
| 225 | |
| 226 | /* Must be called with the ap_list_lock held */ |
| 227 | static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) |
| 228 | { |
| 229 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 230 | |
| 231 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
| 232 | |
| 233 | list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); |
| 234 | } |
| 235 | |
| 236 | /* |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 237 | * Only valid injection if changing level for level-triggered IRQs or for a |
| 238 | * rising edge. |
| 239 | */ |
| 240 | static bool vgic_validate_injection(struct vgic_irq *irq, bool level) |
| 241 | { |
| 242 | switch (irq->config) { |
| 243 | case VGIC_CONFIG_LEVEL: |
| 244 | return irq->line_level != level; |
| 245 | case VGIC_CONFIG_EDGE: |
| 246 | return level; |
| 247 | } |
| 248 | |
| 249 | return false; |
| 250 | } |
| 251 | |
| 252 | /* |
| 253 | * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list. |
| 254 | * Do the queuing if necessary, taking the right locks in the right order. |
| 255 | * Returns true when the IRQ was queued, false otherwise. |
| 256 | * |
| 257 | * Needs to be entered with the IRQ lock already held, but will return |
| 258 | * with all locks dropped. |
| 259 | */ |
| 260 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) |
| 261 | { |
| 262 | struct kvm_vcpu *vcpu; |
| 263 | |
| 264 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); |
| 265 | |
| 266 | retry: |
| 267 | vcpu = vgic_target_oracle(irq); |
| 268 | if (irq->vcpu || !vcpu) { |
| 269 | /* |
| 270 | * If this IRQ is already on a VCPU's ap_list, then it |
| 271 | * cannot be moved or modified and there is no more work for |
| 272 | * us to do. |
| 273 | * |
| 274 | * Otherwise, if the irq is not pending and enabled, it does |
| 275 | * not need to be inserted into an ap_list and there is also |
| 276 | * no more work for us to do. |
| 277 | */ |
| 278 | spin_unlock(&irq->irq_lock); |
Shih-Wei Li | d42c797 | 2016-10-27 15:08:13 +0000 | [diff] [blame] | 279 | |
| 280 | /* |
| 281 | * We have to kick the VCPU here, because we could be |
| 282 | * queueing an edge-triggered interrupt for which we |
| 283 | * get no EOI maintenance interrupt. In that case, |
| 284 | * while the IRQ is already on the VCPU's AP list, the |
| 285 | * VCPU could have EOI'ed the original interrupt and |
| 286 | * won't see this one until it exits for some other |
| 287 | * reason. |
| 288 | */ |
| 289 | if (vcpu) |
| 290 | kvm_vcpu_kick(vcpu); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 291 | return false; |
| 292 | } |
| 293 | |
| 294 | /* |
| 295 | * We must unlock the irq lock to take the ap_list_lock where |
| 296 | * we are going to insert this new pending interrupt. |
| 297 | */ |
| 298 | spin_unlock(&irq->irq_lock); |
| 299 | |
| 300 | /* someone can do stuff here, which we re-check below */ |
| 301 | |
| 302 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 303 | spin_lock(&irq->irq_lock); |
| 304 | |
| 305 | /* |
| 306 | * Did something change behind our backs? |
| 307 | * |
| 308 | * There are two cases: |
| 309 | * 1) The irq lost its pending state or was disabled behind our |
| 310 | * backs and/or it was queued to another VCPU's ap_list. |
| 311 | * 2) Someone changed the affinity on this irq behind our |
| 312 | * backs and we are now holding the wrong ap_list_lock. |
| 313 | * |
| 314 | * In both cases, drop the locks and retry. |
| 315 | */ |
| 316 | |
| 317 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { |
| 318 | spin_unlock(&irq->irq_lock); |
| 319 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 320 | |
| 321 | spin_lock(&irq->irq_lock); |
| 322 | goto retry; |
| 323 | } |
| 324 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 325 | /* |
| 326 | * Grab a reference to the irq to reflect the fact that it is |
| 327 | * now in the ap_list. |
| 328 | */ |
| 329 | vgic_get_irq_kref(irq); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 330 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); |
| 331 | irq->vcpu = vcpu; |
| 332 | |
| 333 | spin_unlock(&irq->irq_lock); |
| 334 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 335 | |
| 336 | kvm_vcpu_kick(vcpu); |
| 337 | |
| 338 | return true; |
| 339 | } |
| 340 | |
Christoffer Dall | 11710de | 2017-02-01 11:03:45 +0100 | [diff] [blame] | 341 | /** |
| 342 | * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic |
| 343 | * @kvm: The VM structure pointer |
| 344 | * @cpuid: The CPU for PPIs |
| 345 | * @intid: The INTID to inject a new state to. |
| 346 | * @level: Edge-triggered: true: to trigger the interrupt |
| 347 | * false: to ignore the call |
| 348 | * Level-sensitive true: raise the input signal |
| 349 | * false: lower the input signal |
| 350 | * |
| 351 | * The VGIC is not concerned with devices being active-LOW or active-HIGH for |
| 352 | * level-sensitive interrupts. You can think of the level parameter as 1 |
| 353 | * being HIGH and 0 being LOW and all devices being active-HIGH. |
| 354 | */ |
| 355 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, |
| 356 | bool level) |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 357 | { |
| 358 | struct kvm_vcpu *vcpu; |
| 359 | struct vgic_irq *irq; |
| 360 | int ret; |
| 361 | |
| 362 | trace_vgic_update_irq_pending(cpuid, intid, level); |
| 363 | |
Eric Auger | ad275b8b | 2015-12-21 18:09:38 +0100 | [diff] [blame] | 364 | ret = vgic_lazy_init(kvm); |
| 365 | if (ret) |
| 366 | return ret; |
| 367 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 368 | vcpu = kvm_get_vcpu(kvm, cpuid); |
| 369 | if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS) |
| 370 | return -EINVAL; |
| 371 | |
| 372 | irq = vgic_get_irq(kvm, vcpu, intid); |
| 373 | if (!irq) |
| 374 | return -EINVAL; |
| 375 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 376 | spin_lock(&irq->irq_lock); |
| 377 | |
| 378 | if (!vgic_validate_injection(irq, level)) { |
| 379 | /* Nothing to see here, move along... */ |
| 380 | spin_unlock(&irq->irq_lock); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 381 | vgic_put_irq(kvm, irq); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 382 | return 0; |
| 383 | } |
| 384 | |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 385 | if (irq->config == VGIC_CONFIG_LEVEL) |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 386 | irq->line_level = level; |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 387 | else |
| 388 | irq->pending_latch = true; |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 389 | |
| 390 | vgic_queue_irq_unlock(kvm, irq); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 391 | vgic_put_irq(kvm, irq); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 392 | |
| 393 | return 0; |
| 394 | } |
| 395 | |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 396 | int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) |
| 397 | { |
| 398 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); |
| 399 | |
| 400 | BUG_ON(!irq); |
| 401 | |
| 402 | spin_lock(&irq->irq_lock); |
| 403 | |
| 404 | irq->hw = true; |
| 405 | irq->hwintid = phys_irq; |
| 406 | |
| 407 | spin_unlock(&irq->irq_lock); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 408 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 409 | |
| 410 | return 0; |
| 411 | } |
| 412 | |
| 413 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) |
| 414 | { |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 415 | struct vgic_irq *irq; |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 416 | |
| 417 | if (!vgic_initialized(vcpu->kvm)) |
| 418 | return -EAGAIN; |
| 419 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 420 | irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); |
| 421 | BUG_ON(!irq); |
| 422 | |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 423 | spin_lock(&irq->irq_lock); |
| 424 | |
| 425 | irq->hw = false; |
| 426 | irq->hwintid = 0; |
| 427 | |
| 428 | spin_unlock(&irq->irq_lock); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 429 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 430 | |
| 431 | return 0; |
| 432 | } |
| 433 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 434 | /** |
| 435 | * vgic_prune_ap_list - Remove non-relevant interrupts from the list |
| 436 | * |
| 437 | * @vcpu: The VCPU pointer |
| 438 | * |
| 439 | * Go over the list of "interesting" interrupts, and prune those that we |
| 440 | * won't have to consider in the near future. |
| 441 | */ |
| 442 | static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) |
| 443 | { |
| 444 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 445 | struct vgic_irq *irq, *tmp; |
| 446 | |
| 447 | retry: |
| 448 | spin_lock(&vgic_cpu->ap_list_lock); |
| 449 | |
| 450 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { |
| 451 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; |
| 452 | |
| 453 | spin_lock(&irq->irq_lock); |
| 454 | |
| 455 | BUG_ON(vcpu != irq->vcpu); |
| 456 | |
| 457 | target_vcpu = vgic_target_oracle(irq); |
| 458 | |
| 459 | if (!target_vcpu) { |
| 460 | /* |
| 461 | * We don't need to process this interrupt any |
| 462 | * further, move it off the list. |
| 463 | */ |
| 464 | list_del(&irq->ap_list); |
| 465 | irq->vcpu = NULL; |
| 466 | spin_unlock(&irq->irq_lock); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 467 | |
| 468 | /* |
| 469 | * This vgic_put_irq call matches the |
| 470 | * vgic_get_irq_kref in vgic_queue_irq_unlock, |
| 471 | * where we added the LPI to the ap_list. As |
| 472 | * we remove the irq from the list, we drop |
| 473 | * also drop the refcount. |
| 474 | */ |
| 475 | vgic_put_irq(vcpu->kvm, irq); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 476 | continue; |
| 477 | } |
| 478 | |
| 479 | if (target_vcpu == vcpu) { |
| 480 | /* We're on the right CPU */ |
| 481 | spin_unlock(&irq->irq_lock); |
| 482 | continue; |
| 483 | } |
| 484 | |
| 485 | /* This interrupt looks like it has to be migrated. */ |
| 486 | |
| 487 | spin_unlock(&irq->irq_lock); |
| 488 | spin_unlock(&vgic_cpu->ap_list_lock); |
| 489 | |
| 490 | /* |
| 491 | * Ensure locking order by always locking the smallest |
| 492 | * ID first. |
| 493 | */ |
| 494 | if (vcpu->vcpu_id < target_vcpu->vcpu_id) { |
| 495 | vcpuA = vcpu; |
| 496 | vcpuB = target_vcpu; |
| 497 | } else { |
| 498 | vcpuA = target_vcpu; |
| 499 | vcpuB = vcpu; |
| 500 | } |
| 501 | |
| 502 | spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
| 503 | spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, |
| 504 | SINGLE_DEPTH_NESTING); |
| 505 | spin_lock(&irq->irq_lock); |
| 506 | |
| 507 | /* |
| 508 | * If the affinity has been preserved, move the |
| 509 | * interrupt around. Otherwise, it means things have |
| 510 | * changed while the interrupt was unlocked, and we |
| 511 | * need to replay this. |
| 512 | * |
| 513 | * In all cases, we cannot trust the list not to have |
| 514 | * changed, so we restart from the beginning. |
| 515 | */ |
| 516 | if (target_vcpu == vgic_target_oracle(irq)) { |
| 517 | struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; |
| 518 | |
| 519 | list_del(&irq->ap_list); |
| 520 | irq->vcpu = target_vcpu; |
| 521 | list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); |
| 522 | } |
| 523 | |
| 524 | spin_unlock(&irq->irq_lock); |
| 525 | spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); |
| 526 | spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
| 527 | goto retry; |
| 528 | } |
| 529 | |
| 530 | spin_unlock(&vgic_cpu->ap_list_lock); |
| 531 | } |
| 532 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 533 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) |
| 534 | { |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 535 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 536 | vgic_v2_fold_lr_state(vcpu); |
| 537 | else |
| 538 | vgic_v3_fold_lr_state(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 539 | } |
| 540 | |
| 541 | /* Requires the irq_lock to be held. */ |
| 542 | static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, |
| 543 | struct vgic_irq *irq, int lr) |
| 544 | { |
| 545 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); |
Marc Zyngier | 140b086 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 546 | |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 547 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 548 | vgic_v2_populate_lr(vcpu, irq, lr); |
| 549 | else |
| 550 | vgic_v3_populate_lr(vcpu, irq, lr); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 551 | } |
| 552 | |
| 553 | static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr) |
| 554 | { |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 555 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 556 | vgic_v2_clear_lr(vcpu, lr); |
| 557 | else |
| 558 | vgic_v3_clear_lr(vcpu, lr); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 559 | } |
| 560 | |
| 561 | static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) |
| 562 | { |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 563 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 564 | vgic_v2_set_underflow(vcpu); |
| 565 | else |
| 566 | vgic_v3_set_underflow(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 567 | } |
| 568 | |
| 569 | /* Requires the ap_list_lock to be held. */ |
| 570 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu) |
| 571 | { |
| 572 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 573 | struct vgic_irq *irq; |
| 574 | int count = 0; |
| 575 | |
| 576 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
| 577 | |
| 578 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
| 579 | spin_lock(&irq->irq_lock); |
| 580 | /* GICv2 SGIs can count for more than one... */ |
| 581 | if (vgic_irq_is_sgi(irq->intid) && irq->source) |
| 582 | count += hweight8(irq->source); |
| 583 | else |
| 584 | count++; |
| 585 | spin_unlock(&irq->irq_lock); |
| 586 | } |
| 587 | return count; |
| 588 | } |
| 589 | |
| 590 | /* Requires the VCPU's ap_list_lock to be held. */ |
| 591 | static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) |
| 592 | { |
| 593 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 594 | struct vgic_irq *irq; |
| 595 | int count = 0; |
| 596 | |
| 597 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
| 598 | |
Christoffer Dall | 90cac1f | 2017-03-21 21:16:12 +0100 | [diff] [blame] | 599 | if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 600 | vgic_sort_ap_list(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 601 | |
| 602 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
| 603 | spin_lock(&irq->irq_lock); |
| 604 | |
| 605 | if (unlikely(vgic_target_oracle(irq) != vcpu)) |
| 606 | goto next; |
| 607 | |
| 608 | /* |
| 609 | * If we get an SGI with multiple sources, try to get |
| 610 | * them in all at once. |
| 611 | */ |
| 612 | do { |
| 613 | vgic_populate_lr(vcpu, irq, count++); |
| 614 | } while (irq->source && count < kvm_vgic_global_state.nr_lr); |
| 615 | |
| 616 | next: |
| 617 | spin_unlock(&irq->irq_lock); |
| 618 | |
Christoffer Dall | 90cac1f | 2017-03-21 21:16:12 +0100 | [diff] [blame] | 619 | if (count == kvm_vgic_global_state.nr_lr) { |
| 620 | if (!list_is_last(&irq->ap_list, |
| 621 | &vgic_cpu->ap_list_head)) |
| 622 | vgic_set_underflow(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 623 | break; |
Christoffer Dall | 90cac1f | 2017-03-21 21:16:12 +0100 | [diff] [blame] | 624 | } |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 625 | } |
| 626 | |
| 627 | vcpu->arch.vgic_cpu.used_lrs = count; |
| 628 | |
| 629 | /* Nuke remaining LRs */ |
| 630 | for ( ; count < kvm_vgic_global_state.nr_lr; count++) |
| 631 | vgic_clear_lr(vcpu, count); |
| 632 | } |
| 633 | |
| 634 | /* Sync back the hardware VGIC state into our emulation after a guest's run. */ |
| 635 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
| 636 | { |
Shih-Wei Li | f676958 | 2016-10-19 18:12:34 +0000 | [diff] [blame] | 637 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 638 | |
Christoffer Dall | 8ac76ef | 2017-03-18 13:48:42 +0100 | [diff] [blame] | 639 | /* An empty ap_list_head implies used_lrs == 0 */ |
| 640 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) |
Christoffer Dall | 0099b77 | 2016-09-27 18:53:35 +0200 | [diff] [blame] | 641 | return; |
| 642 | |
Christoffer Dall | 8ac76ef | 2017-03-18 13:48:42 +0100 | [diff] [blame] | 643 | if (vgic_cpu->used_lrs) |
| 644 | vgic_fold_lr_state(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 645 | vgic_prune_ap_list(vcpu); |
| 646 | } |
| 647 | |
| 648 | /* Flush our emulation state into the GIC hardware before entering the guest. */ |
| 649 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) |
| 650 | { |
Shih-Wei Li | f676958 | 2016-10-19 18:12:34 +0000 | [diff] [blame] | 651 | /* |
| 652 | * If there are no virtual interrupts active or pending for this |
| 653 | * VCPU, then there is no work to do and we can bail out without |
| 654 | * taking any lock. There is a potential race with someone injecting |
| 655 | * interrupts to the VCPU, but it is a benign race as the VCPU will |
| 656 | * either observe the new interrupt before or after doing this check, |
| 657 | * and introducing additional synchronization mechanism doesn't change |
| 658 | * this. |
| 659 | */ |
| 660 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) |
Christoffer Dall | 0099b77 | 2016-09-27 18:53:35 +0200 | [diff] [blame] | 661 | return; |
| 662 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 663 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 664 | vgic_flush_lr_state(vcpu); |
| 665 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 666 | } |
Eric Auger | 90eee56 | 2015-12-07 15:30:38 +0000 | [diff] [blame] | 667 | |
Christoffer Dall | 328e566 | 2016-03-24 11:21:04 +0100 | [diff] [blame] | 668 | void kvm_vgic_load(struct kvm_vcpu *vcpu) |
| 669 | { |
| 670 | if (unlikely(!vgic_initialized(vcpu->kvm))) |
| 671 | return; |
| 672 | |
| 673 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 674 | vgic_v2_load(vcpu); |
| 675 | else |
| 676 | vgic_v3_load(vcpu); |
| 677 | } |
| 678 | |
| 679 | void kvm_vgic_put(struct kvm_vcpu *vcpu) |
| 680 | { |
| 681 | if (unlikely(!vgic_initialized(vcpu->kvm))) |
| 682 | return; |
| 683 | |
| 684 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 685 | vgic_v2_put(vcpu); |
| 686 | else |
| 687 | vgic_v3_put(vcpu); |
| 688 | } |
| 689 | |
Eric Auger | 90eee56 | 2015-12-07 15:30:38 +0000 | [diff] [blame] | 690 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |
| 691 | { |
| 692 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 693 | struct vgic_irq *irq; |
| 694 | bool pending = false; |
| 695 | |
| 696 | if (!vcpu->kvm->arch.vgic.enabled) |
| 697 | return false; |
| 698 | |
| 699 | spin_lock(&vgic_cpu->ap_list_lock); |
| 700 | |
| 701 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
| 702 | spin_lock(&irq->irq_lock); |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 703 | pending = irq_is_pending(irq) && irq->enabled; |
Eric Auger | 90eee56 | 2015-12-07 15:30:38 +0000 | [diff] [blame] | 704 | spin_unlock(&irq->irq_lock); |
| 705 | |
| 706 | if (pending) |
| 707 | break; |
| 708 | } |
| 709 | |
| 710 | spin_unlock(&vgic_cpu->ap_list_lock); |
| 711 | |
| 712 | return pending; |
| 713 | } |
Marc Zyngier | 2b0cda8 | 2016-04-26 11:06:47 +0100 | [diff] [blame] | 714 | |
| 715 | void vgic_kick_vcpus(struct kvm *kvm) |
| 716 | { |
| 717 | struct kvm_vcpu *vcpu; |
| 718 | int c; |
| 719 | |
| 720 | /* |
| 721 | * We've injected an interrupt, time to find out who deserves |
| 722 | * a good kick... |
| 723 | */ |
| 724 | kvm_for_each_vcpu(c, vcpu, kvm) { |
| 725 | if (kvm_vgic_vcpu_pending_irq(vcpu)) |
| 726 | kvm_vcpu_kick(vcpu); |
| 727 | } |
| 728 | } |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 729 | |
| 730 | bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq) |
| 731 | { |
| 732 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); |
| 733 | bool map_is_active; |
| 734 | |
| 735 | spin_lock(&irq->irq_lock); |
| 736 | map_is_active = irq->hw && irq->active; |
| 737 | spin_unlock(&irq->irq_lock); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 738 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 739 | |
| 740 | return map_is_active; |
| 741 | } |
Andre Przywara | 0e4e82f | 2016-07-15 12:43:38 +0100 | [diff] [blame] | 742 | |