Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015, 2016 ARM Ltd. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 15 | */ |
| 16 | |
| 17 | #include <linux/kvm.h> |
| 18 | #include <linux/kvm_host.h> |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 19 | #include <linux/list_sort.h> |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 20 | |
| 21 | #include "vgic.h" |
| 22 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 23 | #define CREATE_TRACE_POINTS |
| 24 | #include "../trace.h" |
| 25 | |
| 26 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 27 | #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p) |
| 28 | #else |
| 29 | #define DEBUG_SPINLOCK_BUG_ON(p) |
| 30 | #endif |
| 31 | |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 32 | struct vgic_global __section(.hyp.text) kvm_vgic_global_state; |
| 33 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 34 | /* |
| 35 | * Locking order is always: |
| 36 | * vgic_cpu->ap_list_lock |
| 37 | * vgic_irq->irq_lock |
| 38 | * |
| 39 | * (that is, always take the ap_list_lock before the struct vgic_irq lock). |
| 40 | * |
| 41 | * When taking more than one ap_list_lock at the same time, always take the |
| 42 | * lowest numbered VCPU's ap_list_lock first, so: |
| 43 | * vcpuX->vcpu_id < vcpuY->vcpu_id: |
| 44 | * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); |
| 45 | * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); |
| 46 | */ |
| 47 | |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 48 | struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, |
| 49 | u32 intid) |
| 50 | { |
| 51 | /* SGIs and PPIs */ |
| 52 | if (intid <= VGIC_MAX_PRIVATE) |
| 53 | return &vcpu->arch.vgic_cpu.private_irqs[intid]; |
| 54 | |
| 55 | /* SPIs */ |
| 56 | if (intid <= VGIC_MAX_SPI) |
| 57 | return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; |
| 58 | |
| 59 | /* LPIs are not yet covered */ |
| 60 | if (intid >= VGIC_MIN_LPI) |
| 61 | return NULL; |
| 62 | |
| 63 | WARN(1, "Looking up struct vgic_irq for reserved INTID"); |
| 64 | return NULL; |
| 65 | } |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 66 | |
| 67 | /** |
| 68 | * kvm_vgic_target_oracle - compute the target vcpu for an irq |
| 69 | * |
| 70 | * @irq: The irq to route. Must be already locked. |
| 71 | * |
| 72 | * Based on the current state of the interrupt (enabled, pending, |
| 73 | * active, vcpu and target_vcpu), compute the next vcpu this should be |
| 74 | * given to. Return NULL if this shouldn't be injected at all. |
| 75 | * |
| 76 | * Requires the IRQ lock to be held. |
| 77 | */ |
| 78 | static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) |
| 79 | { |
| 80 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); |
| 81 | |
| 82 | /* If the interrupt is active, it must stay on the current vcpu */ |
| 83 | if (irq->active) |
| 84 | return irq->vcpu ? : irq->target_vcpu; |
| 85 | |
| 86 | /* |
| 87 | * If the IRQ is not active but enabled and pending, we should direct |
| 88 | * it to its configured target VCPU. |
| 89 | * If the distributor is disabled, pending interrupts shouldn't be |
| 90 | * forwarded. |
| 91 | */ |
| 92 | if (irq->enabled && irq->pending) { |
| 93 | if (unlikely(irq->target_vcpu && |
| 94 | !irq->target_vcpu->kvm->arch.vgic.enabled)) |
| 95 | return NULL; |
| 96 | |
| 97 | return irq->target_vcpu; |
| 98 | } |
| 99 | |
| 100 | /* If neither active nor pending and enabled, then this IRQ should not |
| 101 | * be queued to any VCPU. |
| 102 | */ |
| 103 | return NULL; |
| 104 | } |
| 105 | |
| 106 | /* |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 107 | * The order of items in the ap_lists defines how we'll pack things in LRs as |
| 108 | * well, the first items in the list being the first things populated in the |
| 109 | * LRs. |
| 110 | * |
| 111 | * A hard rule is that active interrupts can never be pushed out of the LRs |
| 112 | * (and therefore take priority) since we cannot reliably trap on deactivation |
| 113 | * of IRQs and therefore they have to be present in the LRs. |
| 114 | * |
| 115 | * Otherwise things should be sorted by the priority field and the GIC |
| 116 | * hardware support will take care of preemption of priority groups etc. |
| 117 | * |
| 118 | * Return negative if "a" sorts before "b", 0 to preserve order, and positive |
| 119 | * to sort "b" before "a". |
| 120 | */ |
| 121 | static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) |
| 122 | { |
| 123 | struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); |
| 124 | struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); |
| 125 | bool penda, pendb; |
| 126 | int ret; |
| 127 | |
| 128 | spin_lock(&irqa->irq_lock); |
| 129 | spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); |
| 130 | |
| 131 | if (irqa->active || irqb->active) { |
| 132 | ret = (int)irqb->active - (int)irqa->active; |
| 133 | goto out; |
| 134 | } |
| 135 | |
| 136 | penda = irqa->enabled && irqa->pending; |
| 137 | pendb = irqb->enabled && irqb->pending; |
| 138 | |
| 139 | if (!penda || !pendb) { |
| 140 | ret = (int)pendb - (int)penda; |
| 141 | goto out; |
| 142 | } |
| 143 | |
| 144 | /* Both pending and enabled, sort by priority */ |
| 145 | ret = irqa->priority - irqb->priority; |
| 146 | out: |
| 147 | spin_unlock(&irqb->irq_lock); |
| 148 | spin_unlock(&irqa->irq_lock); |
| 149 | return ret; |
| 150 | } |
| 151 | |
| 152 | /* Must be called with the ap_list_lock held */ |
| 153 | static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) |
| 154 | { |
| 155 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 156 | |
| 157 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
| 158 | |
| 159 | list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); |
| 160 | } |
| 161 | |
| 162 | /* |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 163 | * Only valid injection if changing level for level-triggered IRQs or for a |
| 164 | * rising edge. |
| 165 | */ |
| 166 | static bool vgic_validate_injection(struct vgic_irq *irq, bool level) |
| 167 | { |
| 168 | switch (irq->config) { |
| 169 | case VGIC_CONFIG_LEVEL: |
| 170 | return irq->line_level != level; |
| 171 | case VGIC_CONFIG_EDGE: |
| 172 | return level; |
| 173 | } |
| 174 | |
| 175 | return false; |
| 176 | } |
| 177 | |
| 178 | /* |
| 179 | * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list. |
| 180 | * Do the queuing if necessary, taking the right locks in the right order. |
| 181 | * Returns true when the IRQ was queued, false otherwise. |
| 182 | * |
| 183 | * Needs to be entered with the IRQ lock already held, but will return |
| 184 | * with all locks dropped. |
| 185 | */ |
| 186 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) |
| 187 | { |
| 188 | struct kvm_vcpu *vcpu; |
| 189 | |
| 190 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); |
| 191 | |
| 192 | retry: |
| 193 | vcpu = vgic_target_oracle(irq); |
| 194 | if (irq->vcpu || !vcpu) { |
| 195 | /* |
| 196 | * If this IRQ is already on a VCPU's ap_list, then it |
| 197 | * cannot be moved or modified and there is no more work for |
| 198 | * us to do. |
| 199 | * |
| 200 | * Otherwise, if the irq is not pending and enabled, it does |
| 201 | * not need to be inserted into an ap_list and there is also |
| 202 | * no more work for us to do. |
| 203 | */ |
| 204 | spin_unlock(&irq->irq_lock); |
| 205 | return false; |
| 206 | } |
| 207 | |
| 208 | /* |
| 209 | * We must unlock the irq lock to take the ap_list_lock where |
| 210 | * we are going to insert this new pending interrupt. |
| 211 | */ |
| 212 | spin_unlock(&irq->irq_lock); |
| 213 | |
| 214 | /* someone can do stuff here, which we re-check below */ |
| 215 | |
| 216 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 217 | spin_lock(&irq->irq_lock); |
| 218 | |
| 219 | /* |
| 220 | * Did something change behind our backs? |
| 221 | * |
| 222 | * There are two cases: |
| 223 | * 1) The irq lost its pending state or was disabled behind our |
| 224 | * backs and/or it was queued to another VCPU's ap_list. |
| 225 | * 2) Someone changed the affinity on this irq behind our |
| 226 | * backs and we are now holding the wrong ap_list_lock. |
| 227 | * |
| 228 | * In both cases, drop the locks and retry. |
| 229 | */ |
| 230 | |
| 231 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { |
| 232 | spin_unlock(&irq->irq_lock); |
| 233 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 234 | |
| 235 | spin_lock(&irq->irq_lock); |
| 236 | goto retry; |
| 237 | } |
| 238 | |
| 239 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); |
| 240 | irq->vcpu = vcpu; |
| 241 | |
| 242 | spin_unlock(&irq->irq_lock); |
| 243 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 244 | |
| 245 | kvm_vcpu_kick(vcpu); |
| 246 | |
| 247 | return true; |
| 248 | } |
| 249 | |
| 250 | static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, |
| 251 | unsigned int intid, bool level, |
| 252 | bool mapped_irq) |
| 253 | { |
| 254 | struct kvm_vcpu *vcpu; |
| 255 | struct vgic_irq *irq; |
| 256 | int ret; |
| 257 | |
| 258 | trace_vgic_update_irq_pending(cpuid, intid, level); |
| 259 | |
Eric Auger | ad275b8b | 2015-12-21 18:09:38 +0100 | [diff] [blame^] | 260 | ret = vgic_lazy_init(kvm); |
| 261 | if (ret) |
| 262 | return ret; |
| 263 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 264 | vcpu = kvm_get_vcpu(kvm, cpuid); |
| 265 | if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS) |
| 266 | return -EINVAL; |
| 267 | |
| 268 | irq = vgic_get_irq(kvm, vcpu, intid); |
| 269 | if (!irq) |
| 270 | return -EINVAL; |
| 271 | |
| 272 | if (irq->hw != mapped_irq) |
| 273 | return -EINVAL; |
| 274 | |
| 275 | spin_lock(&irq->irq_lock); |
| 276 | |
| 277 | if (!vgic_validate_injection(irq, level)) { |
| 278 | /* Nothing to see here, move along... */ |
| 279 | spin_unlock(&irq->irq_lock); |
| 280 | return 0; |
| 281 | } |
| 282 | |
| 283 | if (irq->config == VGIC_CONFIG_LEVEL) { |
| 284 | irq->line_level = level; |
| 285 | irq->pending = level || irq->soft_pending; |
| 286 | } else { |
| 287 | irq->pending = true; |
| 288 | } |
| 289 | |
| 290 | vgic_queue_irq_unlock(kvm, irq); |
| 291 | |
| 292 | return 0; |
| 293 | } |
| 294 | |
| 295 | /** |
| 296 | * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic |
| 297 | * @kvm: The VM structure pointer |
| 298 | * @cpuid: The CPU for PPIs |
| 299 | * @intid: The INTID to inject a new state to. |
| 300 | * @level: Edge-triggered: true: to trigger the interrupt |
| 301 | * false: to ignore the call |
| 302 | * Level-sensitive true: raise the input signal |
| 303 | * false: lower the input signal |
| 304 | * |
| 305 | * The VGIC is not concerned with devices being active-LOW or active-HIGH for |
| 306 | * level-sensitive interrupts. You can think of the level parameter as 1 |
| 307 | * being HIGH and 0 being LOW and all devices being active-HIGH. |
| 308 | */ |
| 309 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, |
| 310 | bool level) |
| 311 | { |
| 312 | return vgic_update_irq_pending(kvm, cpuid, intid, level, false); |
| 313 | } |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 314 | |
| 315 | /** |
| 316 | * vgic_prune_ap_list - Remove non-relevant interrupts from the list |
| 317 | * |
| 318 | * @vcpu: The VCPU pointer |
| 319 | * |
| 320 | * Go over the list of "interesting" interrupts, and prune those that we |
| 321 | * won't have to consider in the near future. |
| 322 | */ |
| 323 | static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) |
| 324 | { |
| 325 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 326 | struct vgic_irq *irq, *tmp; |
| 327 | |
| 328 | retry: |
| 329 | spin_lock(&vgic_cpu->ap_list_lock); |
| 330 | |
| 331 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { |
| 332 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; |
| 333 | |
| 334 | spin_lock(&irq->irq_lock); |
| 335 | |
| 336 | BUG_ON(vcpu != irq->vcpu); |
| 337 | |
| 338 | target_vcpu = vgic_target_oracle(irq); |
| 339 | |
| 340 | if (!target_vcpu) { |
| 341 | /* |
| 342 | * We don't need to process this interrupt any |
| 343 | * further, move it off the list. |
| 344 | */ |
| 345 | list_del(&irq->ap_list); |
| 346 | irq->vcpu = NULL; |
| 347 | spin_unlock(&irq->irq_lock); |
| 348 | continue; |
| 349 | } |
| 350 | |
| 351 | if (target_vcpu == vcpu) { |
| 352 | /* We're on the right CPU */ |
| 353 | spin_unlock(&irq->irq_lock); |
| 354 | continue; |
| 355 | } |
| 356 | |
| 357 | /* This interrupt looks like it has to be migrated. */ |
| 358 | |
| 359 | spin_unlock(&irq->irq_lock); |
| 360 | spin_unlock(&vgic_cpu->ap_list_lock); |
| 361 | |
| 362 | /* |
| 363 | * Ensure locking order by always locking the smallest |
| 364 | * ID first. |
| 365 | */ |
| 366 | if (vcpu->vcpu_id < target_vcpu->vcpu_id) { |
| 367 | vcpuA = vcpu; |
| 368 | vcpuB = target_vcpu; |
| 369 | } else { |
| 370 | vcpuA = target_vcpu; |
| 371 | vcpuB = vcpu; |
| 372 | } |
| 373 | |
| 374 | spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
| 375 | spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, |
| 376 | SINGLE_DEPTH_NESTING); |
| 377 | spin_lock(&irq->irq_lock); |
| 378 | |
| 379 | /* |
| 380 | * If the affinity has been preserved, move the |
| 381 | * interrupt around. Otherwise, it means things have |
| 382 | * changed while the interrupt was unlocked, and we |
| 383 | * need to replay this. |
| 384 | * |
| 385 | * In all cases, we cannot trust the list not to have |
| 386 | * changed, so we restart from the beginning. |
| 387 | */ |
| 388 | if (target_vcpu == vgic_target_oracle(irq)) { |
| 389 | struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; |
| 390 | |
| 391 | list_del(&irq->ap_list); |
| 392 | irq->vcpu = target_vcpu; |
| 393 | list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); |
| 394 | } |
| 395 | |
| 396 | spin_unlock(&irq->irq_lock); |
| 397 | spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); |
| 398 | spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
| 399 | goto retry; |
| 400 | } |
| 401 | |
| 402 | spin_unlock(&vgic_cpu->ap_list_lock); |
| 403 | } |
| 404 | |
| 405 | static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu) |
| 406 | { |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 407 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 408 | vgic_v2_process_maintenance(vcpu); |
| 409 | else |
| 410 | vgic_v3_process_maintenance(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 411 | } |
| 412 | |
| 413 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) |
| 414 | { |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 415 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 416 | vgic_v2_fold_lr_state(vcpu); |
| 417 | else |
| 418 | vgic_v3_fold_lr_state(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 419 | } |
| 420 | |
| 421 | /* Requires the irq_lock to be held. */ |
| 422 | static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, |
| 423 | struct vgic_irq *irq, int lr) |
| 424 | { |
| 425 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); |
Marc Zyngier | 140b086 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 426 | |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 427 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 428 | vgic_v2_populate_lr(vcpu, irq, lr); |
| 429 | else |
| 430 | vgic_v3_populate_lr(vcpu, irq, lr); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 431 | } |
| 432 | |
| 433 | static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr) |
| 434 | { |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 435 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 436 | vgic_v2_clear_lr(vcpu, lr); |
| 437 | else |
| 438 | vgic_v3_clear_lr(vcpu, lr); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 439 | } |
| 440 | |
| 441 | static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) |
| 442 | { |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 443 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 444 | vgic_v2_set_underflow(vcpu); |
| 445 | else |
| 446 | vgic_v3_set_underflow(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 447 | } |
| 448 | |
| 449 | /* Requires the ap_list_lock to be held. */ |
| 450 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu) |
| 451 | { |
| 452 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 453 | struct vgic_irq *irq; |
| 454 | int count = 0; |
| 455 | |
| 456 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
| 457 | |
| 458 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
| 459 | spin_lock(&irq->irq_lock); |
| 460 | /* GICv2 SGIs can count for more than one... */ |
| 461 | if (vgic_irq_is_sgi(irq->intid) && irq->source) |
| 462 | count += hweight8(irq->source); |
| 463 | else |
| 464 | count++; |
| 465 | spin_unlock(&irq->irq_lock); |
| 466 | } |
| 467 | return count; |
| 468 | } |
| 469 | |
| 470 | /* Requires the VCPU's ap_list_lock to be held. */ |
| 471 | static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) |
| 472 | { |
| 473 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 474 | struct vgic_irq *irq; |
| 475 | int count = 0; |
| 476 | |
| 477 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
| 478 | |
| 479 | if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) { |
| 480 | vgic_set_underflow(vcpu); |
| 481 | vgic_sort_ap_list(vcpu); |
| 482 | } |
| 483 | |
| 484 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
| 485 | spin_lock(&irq->irq_lock); |
| 486 | |
| 487 | if (unlikely(vgic_target_oracle(irq) != vcpu)) |
| 488 | goto next; |
| 489 | |
| 490 | /* |
| 491 | * If we get an SGI with multiple sources, try to get |
| 492 | * them in all at once. |
| 493 | */ |
| 494 | do { |
| 495 | vgic_populate_lr(vcpu, irq, count++); |
| 496 | } while (irq->source && count < kvm_vgic_global_state.nr_lr); |
| 497 | |
| 498 | next: |
| 499 | spin_unlock(&irq->irq_lock); |
| 500 | |
| 501 | if (count == kvm_vgic_global_state.nr_lr) |
| 502 | break; |
| 503 | } |
| 504 | |
| 505 | vcpu->arch.vgic_cpu.used_lrs = count; |
| 506 | |
| 507 | /* Nuke remaining LRs */ |
| 508 | for ( ; count < kvm_vgic_global_state.nr_lr; count++) |
| 509 | vgic_clear_lr(vcpu, count); |
| 510 | } |
| 511 | |
| 512 | /* Sync back the hardware VGIC state into our emulation after a guest's run. */ |
| 513 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
| 514 | { |
| 515 | vgic_process_maintenance_interrupt(vcpu); |
| 516 | vgic_fold_lr_state(vcpu); |
| 517 | vgic_prune_ap_list(vcpu); |
| 518 | } |
| 519 | |
| 520 | /* Flush our emulation state into the GIC hardware before entering the guest. */ |
| 521 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) |
| 522 | { |
| 523 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 524 | vgic_flush_lr_state(vcpu); |
| 525 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 526 | } |
Eric Auger | 90eee56 | 2015-12-07 15:30:38 +0000 | [diff] [blame] | 527 | |
| 528 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |
| 529 | { |
| 530 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 531 | struct vgic_irq *irq; |
| 532 | bool pending = false; |
| 533 | |
| 534 | if (!vcpu->kvm->arch.vgic.enabled) |
| 535 | return false; |
| 536 | |
| 537 | spin_lock(&vgic_cpu->ap_list_lock); |
| 538 | |
| 539 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
| 540 | spin_lock(&irq->irq_lock); |
| 541 | pending = irq->pending && irq->enabled; |
| 542 | spin_unlock(&irq->irq_lock); |
| 543 | |
| 544 | if (pending) |
| 545 | break; |
| 546 | } |
| 547 | |
| 548 | spin_unlock(&vgic_cpu->ap_list_lock); |
| 549 | |
| 550 | return pending; |
| 551 | } |
Marc Zyngier | 2b0cda8 | 2016-04-26 11:06:47 +0100 | [diff] [blame] | 552 | |
| 553 | void vgic_kick_vcpus(struct kvm *kvm) |
| 554 | { |
| 555 | struct kvm_vcpu *vcpu; |
| 556 | int c; |
| 557 | |
| 558 | /* |
| 559 | * We've injected an interrupt, time to find out who deserves |
| 560 | * a good kick... |
| 561 | */ |
| 562 | kvm_for_each_vcpu(c, vcpu, kvm) { |
| 563 | if (kvm_vgic_vcpu_pending_irq(vcpu)) |
| 564 | kvm_vcpu_kick(vcpu); |
| 565 | } |
| 566 | } |