Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License, version 2, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/errno.h> |
| 20 | #include <linux/err.h> |
| 21 | #include <linux/kvm_host.h> |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/vmalloc.h> |
| 24 | #include <linux/fs.h> |
| 25 | #include <linux/mman.h> |
| 26 | #include <linux/sched.h> |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 27 | #include <linux/kvm.h> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 28 | #include <trace/events/kvm.h> |
| 29 | |
| 30 | #define CREATE_TRACE_POINTS |
| 31 | #include "trace.h" |
| 32 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 33 | #include <asm/uaccess.h> |
| 34 | #include <asm/ptrace.h> |
| 35 | #include <asm/mman.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 36 | #include <asm/tlbflush.h> |
Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 37 | #include <asm/cacheflush.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 38 | #include <asm/virt.h> |
| 39 | #include <asm/kvm_arm.h> |
| 40 | #include <asm/kvm_asm.h> |
| 41 | #include <asm/kvm_mmu.h> |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 42 | #include <asm/kvm_emulate.h> |
Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 43 | #include <asm/kvm_coproc.h> |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 44 | #include <asm/kvm_psci.h> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 45 | |
| 46 | #ifdef REQUIRES_VIRT |
| 47 | __asm__(".arch_extension virt"); |
| 48 | #endif |
| 49 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 50 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); |
Marc Zyngier | 9c7a643 | 2012-10-27 18:23:25 +0100 | [diff] [blame] | 51 | static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 52 | static unsigned long hyp_default_vectors; |
| 53 | |
Marc Zyngier | 1638a12 | 2013-01-21 19:36:11 -0500 | [diff] [blame] | 54 | /* Per-CPU variable containing the currently running vcpu. */ |
| 55 | static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); |
| 56 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 57 | /* The VMID used in the VTTBR */ |
| 58 | static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); |
| 59 | static u8 kvm_next_vmid; |
| 60 | static DEFINE_SPINLOCK(kvm_vmid_lock); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 61 | |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 62 | static bool vgic_present; |
| 63 | |
Marc Zyngier | 1638a12 | 2013-01-21 19:36:11 -0500 | [diff] [blame] | 64 | static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) |
| 65 | { |
| 66 | BUG_ON(preemptible()); |
| 67 | __get_cpu_var(kvm_arm_running_vcpu) = vcpu; |
| 68 | } |
| 69 | |
| 70 | /** |
| 71 | * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU. |
| 72 | * Must be called from non-preemptible context |
| 73 | */ |
| 74 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void) |
| 75 | { |
| 76 | BUG_ON(preemptible()); |
| 77 | return __get_cpu_var(kvm_arm_running_vcpu); |
| 78 | } |
| 79 | |
| 80 | /** |
| 81 | * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus. |
| 82 | */ |
| 83 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void) |
| 84 | { |
| 85 | return &kvm_arm_running_vcpu; |
| 86 | } |
| 87 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 88 | int kvm_arch_hardware_enable(void *garbage) |
| 89 | { |
| 90 | return 0; |
| 91 | } |
| 92 | |
| 93 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
| 94 | { |
| 95 | return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; |
| 96 | } |
| 97 | |
| 98 | void kvm_arch_hardware_disable(void *garbage) |
| 99 | { |
| 100 | } |
| 101 | |
| 102 | int kvm_arch_hardware_setup(void) |
| 103 | { |
| 104 | return 0; |
| 105 | } |
| 106 | |
| 107 | void kvm_arch_hardware_unsetup(void) |
| 108 | { |
| 109 | } |
| 110 | |
| 111 | void kvm_arch_check_processor_compat(void *rtn) |
| 112 | { |
| 113 | *(int *)rtn = 0; |
| 114 | } |
| 115 | |
| 116 | void kvm_arch_sync_events(struct kvm *kvm) |
| 117 | { |
| 118 | } |
| 119 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 120 | /** |
| 121 | * kvm_arch_init_vm - initializes a VM data structure |
| 122 | * @kvm: pointer to the KVM struct |
| 123 | */ |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 124 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
| 125 | { |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 126 | int ret = 0; |
| 127 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 128 | if (type) |
| 129 | return -EINVAL; |
| 130 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 131 | ret = kvm_alloc_stage2_pgd(kvm); |
| 132 | if (ret) |
| 133 | goto out_fail_alloc; |
| 134 | |
| 135 | ret = create_hyp_mappings(kvm, kvm + 1); |
| 136 | if (ret) |
| 137 | goto out_free_stage2_pgd; |
| 138 | |
| 139 | /* Mark the initial VMID generation invalid */ |
| 140 | kvm->arch.vmid_gen = 0; |
| 141 | |
| 142 | return ret; |
| 143 | out_free_stage2_pgd: |
| 144 | kvm_free_stage2_pgd(kvm); |
| 145 | out_fail_alloc: |
| 146 | return ret; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 147 | } |
| 148 | |
| 149 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
| 150 | { |
| 151 | return VM_FAULT_SIGBUS; |
| 152 | } |
| 153 | |
| 154 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, |
| 155 | struct kvm_memory_slot *dont) |
| 156 | { |
| 157 | } |
| 158 | |
| 159 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) |
| 160 | { |
| 161 | return 0; |
| 162 | } |
| 163 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 164 | /** |
| 165 | * kvm_arch_destroy_vm - destroy the VM data structure |
| 166 | * @kvm: pointer to the KVM struct |
| 167 | */ |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 168 | void kvm_arch_destroy_vm(struct kvm *kvm) |
| 169 | { |
| 170 | int i; |
| 171 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 172 | kvm_free_stage2_pgd(kvm); |
| 173 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 174 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
| 175 | if (kvm->vcpus[i]) { |
| 176 | kvm_arch_vcpu_free(kvm->vcpus[i]); |
| 177 | kvm->vcpus[i] = NULL; |
| 178 | } |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | int kvm_dev_ioctl_check_extension(long ext) |
| 183 | { |
| 184 | int r; |
| 185 | switch (ext) { |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 186 | case KVM_CAP_IRQCHIP: |
| 187 | r = vgic_present; |
| 188 | break; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 189 | case KVM_CAP_USER_MEMORY: |
| 190 | case KVM_CAP_SYNC_MMU: |
| 191 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: |
| 192 | case KVM_CAP_ONE_REG: |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 193 | case KVM_CAP_ARM_PSCI: |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 194 | r = 1; |
| 195 | break; |
| 196 | case KVM_CAP_COALESCED_MMIO: |
| 197 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
| 198 | break; |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 199 | case KVM_CAP_ARM_SET_DEVICE_ADDR: |
| 200 | r = 1; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 201 | case KVM_CAP_NR_VCPUS: |
| 202 | r = num_online_cpus(); |
| 203 | break; |
| 204 | case KVM_CAP_MAX_VCPUS: |
| 205 | r = KVM_MAX_VCPUS; |
| 206 | break; |
| 207 | default: |
| 208 | r = 0; |
| 209 | break; |
| 210 | } |
| 211 | return r; |
| 212 | } |
| 213 | |
| 214 | long kvm_arch_dev_ioctl(struct file *filp, |
| 215 | unsigned int ioctl, unsigned long arg) |
| 216 | { |
| 217 | return -EINVAL; |
| 218 | } |
| 219 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 220 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
| 221 | struct kvm_memory_slot *memslot, |
Takuya Yoshikawa | 7b6195a | 2013-02-27 19:44:34 +0900 | [diff] [blame] | 222 | struct kvm_userspace_memory_region *mem, |
| 223 | enum kvm_mr_change change) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 224 | { |
| 225 | return 0; |
| 226 | } |
| 227 | |
| 228 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
| 229 | struct kvm_userspace_memory_region *mem, |
Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 230 | const struct kvm_memory_slot *old, |
| 231 | enum kvm_mr_change change) |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 232 | { |
| 233 | } |
| 234 | |
| 235 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
| 236 | { |
| 237 | } |
| 238 | |
| 239 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
| 240 | struct kvm_memory_slot *slot) |
| 241 | { |
| 242 | } |
| 243 | |
| 244 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
| 245 | { |
| 246 | int err; |
| 247 | struct kvm_vcpu *vcpu; |
| 248 | |
| 249 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
| 250 | if (!vcpu) { |
| 251 | err = -ENOMEM; |
| 252 | goto out; |
| 253 | } |
| 254 | |
| 255 | err = kvm_vcpu_init(vcpu, kvm, id); |
| 256 | if (err) |
| 257 | goto free_vcpu; |
| 258 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 259 | err = create_hyp_mappings(vcpu, vcpu + 1); |
| 260 | if (err) |
| 261 | goto vcpu_uninit; |
| 262 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 263 | return vcpu; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 264 | vcpu_uninit: |
| 265 | kvm_vcpu_uninit(vcpu); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 266 | free_vcpu: |
| 267 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
| 268 | out: |
| 269 | return ERR_PTR(err); |
| 270 | } |
| 271 | |
| 272 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
| 273 | { |
| 274 | return 0; |
| 275 | } |
| 276 | |
| 277 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
| 278 | { |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 279 | kvm_mmu_free_memory_caches(vcpu); |
Marc Zyngier | 967f842 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 280 | kvm_timer_vcpu_terminate(vcpu); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 281 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 282 | } |
| 283 | |
| 284 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
| 285 | { |
| 286 | kvm_arch_vcpu_free(vcpu); |
| 287 | } |
| 288 | |
| 289 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
| 290 | { |
| 291 | return 0; |
| 292 | } |
| 293 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 294 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
| 295 | { |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 296 | int ret; |
| 297 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 298 | /* Force users to call KVM_ARM_VCPU_INIT */ |
| 299 | vcpu->arch.target = -1; |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 300 | |
| 301 | /* Set up VGIC */ |
| 302 | ret = kvm_vgic_vcpu_init(vcpu); |
| 303 | if (ret) |
| 304 | return ret; |
| 305 | |
Marc Zyngier | 967f842 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 306 | /* Set up the timer */ |
| 307 | kvm_timer_vcpu_init(vcpu); |
| 308 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 309 | return 0; |
| 310 | } |
| 311 | |
| 312 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
| 313 | { |
| 314 | } |
| 315 | |
| 316 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 317 | { |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 318 | vcpu->cpu = cpu; |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 319 | vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); |
Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 320 | |
| 321 | /* |
| 322 | * Check whether this vcpu requires the cache to be flushed on |
| 323 | * this physical CPU. This is a consequence of doing dcache |
| 324 | * operations by set/way on this vcpu. We do it here to be in |
| 325 | * a non-preemptible section. |
| 326 | */ |
| 327 | if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) |
| 328 | flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ |
Marc Zyngier | 1638a12 | 2013-01-21 19:36:11 -0500 | [diff] [blame] | 329 | |
| 330 | kvm_arm_set_running_vcpu(vcpu); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 331 | } |
| 332 | |
| 333 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
| 334 | { |
Marc Zyngier | 1638a12 | 2013-01-21 19:36:11 -0500 | [diff] [blame] | 335 | kvm_arm_set_running_vcpu(NULL); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 336 | } |
| 337 | |
| 338 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
| 339 | struct kvm_guest_debug *dbg) |
| 340 | { |
| 341 | return -EINVAL; |
| 342 | } |
| 343 | |
| 344 | |
| 345 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
| 346 | struct kvm_mp_state *mp_state) |
| 347 | { |
| 348 | return -EINVAL; |
| 349 | } |
| 350 | |
| 351 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
| 352 | struct kvm_mp_state *mp_state) |
| 353 | { |
| 354 | return -EINVAL; |
| 355 | } |
| 356 | |
Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 357 | /** |
| 358 | * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled |
| 359 | * @v: The VCPU pointer |
| 360 | * |
| 361 | * If the guest CPU is not waiting for interrupts or an interrupt line is |
| 362 | * asserted, the CPU is by definition runnable. |
| 363 | */ |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 364 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
| 365 | { |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 366 | return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 367 | } |
| 368 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 369 | /* Just ensure a guest exit from a particular CPU */ |
| 370 | static void exit_vm_noop(void *info) |
| 371 | { |
| 372 | } |
| 373 | |
| 374 | void force_vm_exit(const cpumask_t *mask) |
| 375 | { |
| 376 | smp_call_function_many(mask, exit_vm_noop, NULL, true); |
| 377 | } |
| 378 | |
| 379 | /** |
| 380 | * need_new_vmid_gen - check that the VMID is still valid |
| 381 | * @kvm: The VM's VMID to checkt |
| 382 | * |
| 383 | * return true if there is a new generation of VMIDs being used |
| 384 | * |
| 385 | * The hardware supports only 256 values with the value zero reserved for the |
| 386 | * host, so we check if an assigned value belongs to a previous generation, |
| 387 | * which which requires us to assign a new value. If we're the first to use a |
| 388 | * VMID for the new generation, we must flush necessary caches and TLBs on all |
| 389 | * CPUs. |
| 390 | */ |
| 391 | static bool need_new_vmid_gen(struct kvm *kvm) |
| 392 | { |
| 393 | return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); |
| 394 | } |
| 395 | |
| 396 | /** |
| 397 | * update_vttbr - Update the VTTBR with a valid VMID before the guest runs |
| 398 | * @kvm The guest that we are about to run |
| 399 | * |
| 400 | * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the |
| 401 | * VM has a valid VMID, otherwise assigns a new one and flushes corresponding |
| 402 | * caches and TLBs. |
| 403 | */ |
| 404 | static void update_vttbr(struct kvm *kvm) |
| 405 | { |
| 406 | phys_addr_t pgd_phys; |
| 407 | u64 vmid; |
| 408 | |
| 409 | if (!need_new_vmid_gen(kvm)) |
| 410 | return; |
| 411 | |
| 412 | spin_lock(&kvm_vmid_lock); |
| 413 | |
| 414 | /* |
| 415 | * We need to re-check the vmid_gen here to ensure that if another vcpu |
| 416 | * already allocated a valid vmid for this vm, then this vcpu should |
| 417 | * use the same vmid. |
| 418 | */ |
| 419 | if (!need_new_vmid_gen(kvm)) { |
| 420 | spin_unlock(&kvm_vmid_lock); |
| 421 | return; |
| 422 | } |
| 423 | |
| 424 | /* First user of a new VMID generation? */ |
| 425 | if (unlikely(kvm_next_vmid == 0)) { |
| 426 | atomic64_inc(&kvm_vmid_gen); |
| 427 | kvm_next_vmid = 1; |
| 428 | |
| 429 | /* |
| 430 | * On SMP we know no other CPUs can use this CPU's or each |
| 431 | * other's VMID after force_vm_exit returns since the |
| 432 | * kvm_vmid_lock blocks them from reentry to the guest. |
| 433 | */ |
| 434 | force_vm_exit(cpu_all_mask); |
| 435 | /* |
| 436 | * Now broadcast TLB + ICACHE invalidation over the inner |
| 437 | * shareable domain to make sure all data structures are |
| 438 | * clean. |
| 439 | */ |
| 440 | kvm_call_hyp(__kvm_flush_vm_context); |
| 441 | } |
| 442 | |
| 443 | kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); |
| 444 | kvm->arch.vmid = kvm_next_vmid; |
| 445 | kvm_next_vmid++; |
| 446 | |
| 447 | /* update vttbr to be used with the new vmid */ |
| 448 | pgd_phys = virt_to_phys(kvm->arch.pgd); |
| 449 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; |
| 450 | kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; |
| 451 | kvm->arch.vttbr |= vmid; |
| 452 | |
| 453 | spin_unlock(&kvm_vmid_lock); |
| 454 | } |
| 455 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 456 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) |
| 457 | { |
| 458 | if (likely(vcpu->arch.has_run_once)) |
| 459 | return 0; |
| 460 | |
| 461 | vcpu->arch.has_run_once = true; |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 462 | |
| 463 | /* |
Marc Zyngier | 01ac5e3 | 2013-01-21 19:36:16 -0500 | [diff] [blame] | 464 | * Initialize the VGIC before running a vcpu the first time on |
| 465 | * this VM. |
| 466 | */ |
| 467 | if (irqchip_in_kernel(vcpu->kvm) && |
| 468 | unlikely(!vgic_initialized(vcpu->kvm))) { |
| 469 | int ret = kvm_vgic_init(vcpu->kvm); |
| 470 | if (ret) |
| 471 | return ret; |
| 472 | } |
| 473 | |
| 474 | /* |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 475 | * Handle the "start in power-off" case by calling into the |
| 476 | * PSCI code. |
| 477 | */ |
| 478 | if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { |
| 479 | *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; |
| 480 | kvm_psci_call(vcpu); |
| 481 | } |
| 482 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 483 | return 0; |
| 484 | } |
| 485 | |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 486 | static void vcpu_pause(struct kvm_vcpu *vcpu) |
| 487 | { |
| 488 | wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); |
| 489 | |
| 490 | wait_event_interruptible(*wq, !vcpu->arch.pause); |
| 491 | } |
| 492 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 493 | /** |
| 494 | * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code |
| 495 | * @vcpu: The VCPU pointer |
| 496 | * @run: The kvm_run structure pointer used for userspace state exchange |
| 497 | * |
| 498 | * This function is called through the VCPU_RUN ioctl called from user space. It |
| 499 | * will execute VM code in a loop until the time slice for the process is used |
| 500 | * or some emulation is needed from user space in which case the function will |
| 501 | * return with return value 0 and with the kvm_run structure filled in with the |
| 502 | * required data for the requested emulation. |
| 503 | */ |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 504 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| 505 | { |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 506 | int ret; |
| 507 | sigset_t sigsaved; |
| 508 | |
| 509 | /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ |
| 510 | if (unlikely(vcpu->arch.target < 0)) |
| 511 | return -ENOEXEC; |
| 512 | |
| 513 | ret = kvm_vcpu_first_run_init(vcpu); |
| 514 | if (ret) |
| 515 | return ret; |
| 516 | |
Christoffer Dall | 45e96ea | 2013-01-20 18:43:58 -0500 | [diff] [blame] | 517 | if (run->exit_reason == KVM_EXIT_MMIO) { |
| 518 | ret = kvm_handle_mmio_return(vcpu, vcpu->run); |
| 519 | if (ret) |
| 520 | return ret; |
| 521 | } |
| 522 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 523 | if (vcpu->sigset_active) |
| 524 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
| 525 | |
| 526 | ret = 1; |
| 527 | run->exit_reason = KVM_EXIT_UNKNOWN; |
| 528 | while (ret > 0) { |
| 529 | /* |
| 530 | * Check conditions before entering the guest |
| 531 | */ |
| 532 | cond_resched(); |
| 533 | |
| 534 | update_vttbr(vcpu->kvm); |
| 535 | |
Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 536 | if (vcpu->arch.pause) |
| 537 | vcpu_pause(vcpu); |
| 538 | |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 539 | kvm_vgic_flush_hwstate(vcpu); |
Marc Zyngier | c7e3ba6 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 540 | kvm_timer_flush_hwstate(vcpu); |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 541 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 542 | local_irq_disable(); |
| 543 | |
| 544 | /* |
| 545 | * Re-check atomic conditions |
| 546 | */ |
| 547 | if (signal_pending(current)) { |
| 548 | ret = -EINTR; |
| 549 | run->exit_reason = KVM_EXIT_INTR; |
| 550 | } |
| 551 | |
| 552 | if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { |
| 553 | local_irq_enable(); |
Marc Zyngier | c7e3ba6 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 554 | kvm_timer_sync_hwstate(vcpu); |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 555 | kvm_vgic_sync_hwstate(vcpu); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 556 | continue; |
| 557 | } |
| 558 | |
| 559 | /************************************************************** |
| 560 | * Enter the guest |
| 561 | */ |
| 562 | trace_kvm_entry(*vcpu_pc(vcpu)); |
| 563 | kvm_guest_enter(); |
| 564 | vcpu->mode = IN_GUEST_MODE; |
| 565 | |
| 566 | ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); |
| 567 | |
| 568 | vcpu->mode = OUTSIDE_GUEST_MODE; |
Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 569 | vcpu->arch.last_pcpu = smp_processor_id(); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 570 | kvm_guest_exit(); |
| 571 | trace_kvm_exit(*vcpu_pc(vcpu)); |
| 572 | /* |
| 573 | * We may have taken a host interrupt in HYP mode (ie |
| 574 | * while executing the guest). This interrupt is still |
| 575 | * pending, as we haven't serviced it yet! |
| 576 | * |
| 577 | * We're now back in SVC mode, with interrupts |
| 578 | * disabled. Enabling the interrupts now will have |
| 579 | * the effect of taking the interrupt again, in SVC |
| 580 | * mode this time. |
| 581 | */ |
| 582 | local_irq_enable(); |
| 583 | |
| 584 | /* |
| 585 | * Back from guest |
| 586 | *************************************************************/ |
| 587 | |
Marc Zyngier | c7e3ba6 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 588 | kvm_timer_sync_hwstate(vcpu); |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 589 | kvm_vgic_sync_hwstate(vcpu); |
| 590 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 591 | ret = handle_exit(vcpu, run, ret); |
| 592 | } |
| 593 | |
| 594 | if (vcpu->sigset_active) |
| 595 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
| 596 | return ret; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 597 | } |
| 598 | |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 599 | static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) |
| 600 | { |
| 601 | int bit_index; |
| 602 | bool set; |
| 603 | unsigned long *ptr; |
| 604 | |
| 605 | if (number == KVM_ARM_IRQ_CPU_IRQ) |
| 606 | bit_index = __ffs(HCR_VI); |
| 607 | else /* KVM_ARM_IRQ_CPU_FIQ */ |
| 608 | bit_index = __ffs(HCR_VF); |
| 609 | |
| 610 | ptr = (unsigned long *)&vcpu->arch.irq_lines; |
| 611 | if (level) |
| 612 | set = test_and_set_bit(bit_index, ptr); |
| 613 | else |
| 614 | set = test_and_clear_bit(bit_index, ptr); |
| 615 | |
| 616 | /* |
| 617 | * If we didn't change anything, no need to wake up or kick other CPUs |
| 618 | */ |
| 619 | if (set == level) |
| 620 | return 0; |
| 621 | |
| 622 | /* |
| 623 | * The vcpu irq_lines field was updated, wake up sleeping VCPUs and |
| 624 | * trigger a world-switch round on the running physical CPU to set the |
| 625 | * virtual IRQ/FIQ fields in the HCR appropriately. |
| 626 | */ |
| 627 | kvm_vcpu_kick(vcpu); |
| 628 | |
| 629 | return 0; |
| 630 | } |
| 631 | |
Alexander Graf | 79558f1 | 2013-04-16 19:21:41 +0200 | [diff] [blame] | 632 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
| 633 | bool line_status) |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 634 | { |
| 635 | u32 irq = irq_level->irq; |
| 636 | unsigned int irq_type, vcpu_idx, irq_num; |
| 637 | int nrcpus = atomic_read(&kvm->online_vcpus); |
| 638 | struct kvm_vcpu *vcpu = NULL; |
| 639 | bool level = irq_level->level; |
| 640 | |
| 641 | irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; |
| 642 | vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; |
| 643 | irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; |
| 644 | |
| 645 | trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); |
| 646 | |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 647 | switch (irq_type) { |
| 648 | case KVM_ARM_IRQ_TYPE_CPU: |
| 649 | if (irqchip_in_kernel(kvm)) |
| 650 | return -ENXIO; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 651 | |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 652 | if (vcpu_idx >= nrcpus) |
| 653 | return -EINVAL; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 654 | |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 655 | vcpu = kvm_get_vcpu(kvm, vcpu_idx); |
| 656 | if (!vcpu) |
| 657 | return -EINVAL; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 658 | |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 659 | if (irq_num > KVM_ARM_IRQ_CPU_FIQ) |
| 660 | return -EINVAL; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 661 | |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 662 | return vcpu_interrupt_line(vcpu, irq_num, level); |
| 663 | case KVM_ARM_IRQ_TYPE_PPI: |
| 664 | if (!irqchip_in_kernel(kvm)) |
| 665 | return -ENXIO; |
| 666 | |
| 667 | if (vcpu_idx >= nrcpus) |
| 668 | return -EINVAL; |
| 669 | |
| 670 | vcpu = kvm_get_vcpu(kvm, vcpu_idx); |
| 671 | if (!vcpu) |
| 672 | return -EINVAL; |
| 673 | |
| 674 | if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) |
| 675 | return -EINVAL; |
| 676 | |
| 677 | return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); |
| 678 | case KVM_ARM_IRQ_TYPE_SPI: |
| 679 | if (!irqchip_in_kernel(kvm)) |
| 680 | return -ENXIO; |
| 681 | |
| 682 | if (irq_num < VGIC_NR_PRIVATE_IRQS || |
| 683 | irq_num > KVM_ARM_IRQ_GIC_MAX) |
| 684 | return -EINVAL; |
| 685 | |
| 686 | return kvm_vgic_inject_irq(kvm, 0, irq_num, level); |
| 687 | } |
| 688 | |
| 689 | return -EINVAL; |
Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 690 | } |
| 691 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 692 | long kvm_arch_vcpu_ioctl(struct file *filp, |
| 693 | unsigned int ioctl, unsigned long arg) |
| 694 | { |
| 695 | struct kvm_vcpu *vcpu = filp->private_data; |
| 696 | void __user *argp = (void __user *)arg; |
| 697 | |
| 698 | switch (ioctl) { |
| 699 | case KVM_ARM_VCPU_INIT: { |
| 700 | struct kvm_vcpu_init init; |
| 701 | |
| 702 | if (copy_from_user(&init, argp, sizeof(init))) |
| 703 | return -EFAULT; |
| 704 | |
| 705 | return kvm_vcpu_set_target(vcpu, &init); |
| 706 | |
| 707 | } |
| 708 | case KVM_SET_ONE_REG: |
| 709 | case KVM_GET_ONE_REG: { |
| 710 | struct kvm_one_reg reg; |
| 711 | if (copy_from_user(®, argp, sizeof(reg))) |
| 712 | return -EFAULT; |
| 713 | if (ioctl == KVM_SET_ONE_REG) |
| 714 | return kvm_arm_set_reg(vcpu, ®); |
| 715 | else |
| 716 | return kvm_arm_get_reg(vcpu, ®); |
| 717 | } |
| 718 | case KVM_GET_REG_LIST: { |
| 719 | struct kvm_reg_list __user *user_list = argp; |
| 720 | struct kvm_reg_list reg_list; |
| 721 | unsigned n; |
| 722 | |
| 723 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) |
| 724 | return -EFAULT; |
| 725 | n = reg_list.n; |
| 726 | reg_list.n = kvm_arm_num_regs(vcpu); |
| 727 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) |
| 728 | return -EFAULT; |
| 729 | if (n < reg_list.n) |
| 730 | return -E2BIG; |
| 731 | return kvm_arm_copy_reg_indices(vcpu, user_list->reg); |
| 732 | } |
| 733 | default: |
| 734 | return -EINVAL; |
| 735 | } |
| 736 | } |
| 737 | |
| 738 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
| 739 | { |
| 740 | return -EINVAL; |
| 741 | } |
| 742 | |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 743 | static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, |
| 744 | struct kvm_arm_device_addr *dev_addr) |
| 745 | { |
Christoffer Dall | 330690c | 2013-01-21 19:36:13 -0500 | [diff] [blame] | 746 | unsigned long dev_id, type; |
| 747 | |
| 748 | dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> |
| 749 | KVM_ARM_DEVICE_ID_SHIFT; |
| 750 | type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> |
| 751 | KVM_ARM_DEVICE_TYPE_SHIFT; |
| 752 | |
| 753 | switch (dev_id) { |
| 754 | case KVM_ARM_DEVICE_VGIC_V2: |
| 755 | if (!vgic_present) |
| 756 | return -ENXIO; |
| 757 | return kvm_vgic_set_addr(kvm, type, dev_addr->addr); |
| 758 | default: |
| 759 | return -ENODEV; |
| 760 | } |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 761 | } |
| 762 | |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 763 | long kvm_arch_vm_ioctl(struct file *filp, |
| 764 | unsigned int ioctl, unsigned long arg) |
| 765 | { |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 766 | struct kvm *kvm = filp->private_data; |
| 767 | void __user *argp = (void __user *)arg; |
| 768 | |
| 769 | switch (ioctl) { |
Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 770 | case KVM_CREATE_IRQCHIP: { |
| 771 | if (vgic_present) |
| 772 | return kvm_vgic_create(kvm); |
| 773 | else |
| 774 | return -ENXIO; |
| 775 | } |
Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 776 | case KVM_ARM_SET_DEVICE_ADDR: { |
| 777 | struct kvm_arm_device_addr dev_addr; |
| 778 | |
| 779 | if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) |
| 780 | return -EFAULT; |
| 781 | return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); |
| 782 | } |
| 783 | default: |
| 784 | return -EINVAL; |
| 785 | } |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 786 | } |
| 787 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 788 | static void cpu_init_hyp_mode(void *vector) |
| 789 | { |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame^] | 790 | unsigned long long boot_pgd_ptr; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 791 | unsigned long long pgd_ptr; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 792 | unsigned long hyp_stack_ptr; |
| 793 | unsigned long stack_page; |
| 794 | unsigned long vector_ptr; |
| 795 | |
| 796 | /* Switch from the HYP stub to our own HYP init vector */ |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame^] | 797 | __hyp_set_vectors(kvm_get_idmap_vector()); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 798 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame^] | 799 | boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr(); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 800 | pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 801 | stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); |
| 802 | hyp_stack_ptr = stack_page + PAGE_SIZE; |
| 803 | vector_ptr = (unsigned long)__kvm_hyp_vector; |
| 804 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame^] | 805 | __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 806 | } |
| 807 | |
| 808 | /** |
| 809 | * Inits Hyp-mode on all online CPUs |
| 810 | */ |
| 811 | static int init_hyp_mode(void) |
| 812 | { |
| 813 | phys_addr_t init_phys_addr; |
| 814 | int cpu; |
| 815 | int err = 0; |
| 816 | |
| 817 | /* |
| 818 | * Allocate Hyp PGD and setup Hyp identity mapping |
| 819 | */ |
| 820 | err = kvm_mmu_init(); |
| 821 | if (err) |
| 822 | goto out_err; |
| 823 | |
| 824 | /* |
| 825 | * It is probably enough to obtain the default on one |
| 826 | * CPU. It's unlikely to be different on the others. |
| 827 | */ |
| 828 | hyp_default_vectors = __hyp_get_vectors(); |
| 829 | |
| 830 | /* |
| 831 | * Allocate stack pages for Hypervisor-mode |
| 832 | */ |
| 833 | for_each_possible_cpu(cpu) { |
| 834 | unsigned long stack_page; |
| 835 | |
| 836 | stack_page = __get_free_page(GFP_KERNEL); |
| 837 | if (!stack_page) { |
| 838 | err = -ENOMEM; |
| 839 | goto out_free_stack_pages; |
| 840 | } |
| 841 | |
| 842 | per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; |
| 843 | } |
| 844 | |
| 845 | /* |
| 846 | * Execute the init code on each CPU. |
| 847 | * |
| 848 | * Note: The stack is not mapped yet, so don't do anything else than |
| 849 | * initializing the hypervisor mode on each CPU using a local stack |
| 850 | * space for temporary storage. |
| 851 | */ |
| 852 | init_phys_addr = virt_to_phys(__kvm_hyp_init); |
| 853 | for_each_online_cpu(cpu) { |
| 854 | smp_call_function_single(cpu, cpu_init_hyp_mode, |
| 855 | (void *)(long)init_phys_addr, 1); |
| 856 | } |
| 857 | |
| 858 | /* |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 859 | * Map the Hyp-code called directly from the host |
| 860 | */ |
| 861 | err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); |
| 862 | if (err) { |
| 863 | kvm_err("Cannot map world-switch code\n"); |
| 864 | goto out_free_mappings; |
| 865 | } |
| 866 | |
| 867 | /* |
| 868 | * Map the Hyp stack pages |
| 869 | */ |
| 870 | for_each_possible_cpu(cpu) { |
| 871 | char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); |
| 872 | err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); |
| 873 | |
| 874 | if (err) { |
| 875 | kvm_err("Cannot map hyp stack\n"); |
| 876 | goto out_free_mappings; |
| 877 | } |
| 878 | } |
| 879 | |
| 880 | /* |
| 881 | * Map the host VFP structures |
| 882 | */ |
Marc Zyngier | 9c7a643 | 2012-10-27 18:23:25 +0100 | [diff] [blame] | 883 | kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 884 | if (!kvm_host_vfp_state) { |
| 885 | err = -ENOMEM; |
| 886 | kvm_err("Cannot allocate host VFP state\n"); |
| 887 | goto out_free_mappings; |
| 888 | } |
| 889 | |
| 890 | for_each_possible_cpu(cpu) { |
Marc Zyngier | 9c7a643 | 2012-10-27 18:23:25 +0100 | [diff] [blame] | 891 | kvm_kernel_vfp_t *vfp; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 892 | |
| 893 | vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); |
| 894 | err = create_hyp_mappings(vfp, vfp + 1); |
| 895 | |
| 896 | if (err) { |
| 897 | kvm_err("Cannot map host VFP state: %d\n", err); |
| 898 | goto out_free_vfp; |
| 899 | } |
| 900 | } |
| 901 | |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 902 | /* |
| 903 | * Init HYP view of VGIC |
| 904 | */ |
| 905 | err = kvm_vgic_hyp_init(); |
| 906 | if (err) |
| 907 | goto out_free_vfp; |
| 908 | |
Marc Zyngier | 01ac5e3 | 2013-01-21 19:36:16 -0500 | [diff] [blame] | 909 | #ifdef CONFIG_KVM_ARM_VGIC |
| 910 | vgic_present = true; |
| 911 | #endif |
| 912 | |
Marc Zyngier | 967f842 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 913 | /* |
| 914 | * Init HYP architected timer support |
| 915 | */ |
| 916 | err = kvm_timer_hyp_init(); |
| 917 | if (err) |
| 918 | goto out_free_mappings; |
| 919 | |
Marc Zyngier | 210552c | 2013-03-05 03:18:00 +0000 | [diff] [blame] | 920 | kvm_perf_init(); |
| 921 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 922 | kvm_info("Hyp mode initialized successfully\n"); |
Marc Zyngier | 210552c | 2013-03-05 03:18:00 +0000 | [diff] [blame] | 923 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 924 | return 0; |
| 925 | out_free_vfp: |
| 926 | free_percpu(kvm_host_vfp_state); |
| 927 | out_free_mappings: |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 928 | free_hyp_pgds(); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 929 | out_free_stack_pages: |
| 930 | for_each_possible_cpu(cpu) |
| 931 | free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); |
| 932 | out_err: |
| 933 | kvm_err("error initializing Hyp mode: %d\n", err); |
| 934 | return err; |
| 935 | } |
| 936 | |
| 937 | /** |
| 938 | * Initialize Hyp-mode and memory mappings on all CPUs. |
| 939 | */ |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 940 | int kvm_arch_init(void *opaque) |
| 941 | { |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 942 | int err; |
| 943 | |
| 944 | if (!is_hyp_mode_available()) { |
| 945 | kvm_err("HYP mode not available\n"); |
| 946 | return -ENODEV; |
| 947 | } |
| 948 | |
| 949 | if (kvm_target_cpu() < 0) { |
| 950 | kvm_err("Target CPU not supported!\n"); |
| 951 | return -ENODEV; |
| 952 | } |
| 953 | |
| 954 | err = init_hyp_mode(); |
| 955 | if (err) |
| 956 | goto out_err; |
| 957 | |
Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 958 | kvm_coproc_table_init(); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 959 | return 0; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 960 | out_err: |
| 961 | return err; |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 962 | } |
| 963 | |
| 964 | /* NOP: Compiling as a module not supported */ |
| 965 | void kvm_arch_exit(void) |
| 966 | { |
Marc Zyngier | 210552c | 2013-03-05 03:18:00 +0000 | [diff] [blame] | 967 | kvm_perf_teardown(); |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 968 | } |
| 969 | |
| 970 | static int arm_init(void) |
| 971 | { |
| 972 | int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); |
| 973 | return rc; |
| 974 | } |
| 975 | |
| 976 | module_init(arm_init); |