blob: 2101152c3a4b4ca455bbd1aa5544bc939dc87b05 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/vmalloc.h>
24#include <linux/fs.h>
25#include <linux/mman.h>
26#include <linux/sched.h>
Christoffer Dall86ce8532013-01-20 18:28:08 -050027#include <linux/kvm.h>
Christoffer Dall749cf76c2013-01-20 18:28:06 -050028#include <trace/events/kvm.h>
29
30#define CREATE_TRACE_POINTS
31#include "trace.h"
32
33#include <asm/unified.h>
34#include <asm/uaccess.h>
35#include <asm/ptrace.h>
36#include <asm/mman.h>
37#include <asm/cputype.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050038#include <asm/tlbflush.h>
39#include <asm/virt.h>
40#include <asm/kvm_arm.h>
41#include <asm/kvm_asm.h>
42#include <asm/kvm_mmu.h>
Christoffer Dall749cf76c2013-01-20 18:28:06 -050043
44#ifdef REQUIRES_VIRT
45__asm__(".arch_extension virt");
46#endif
47
Christoffer Dall342cd0a2013-01-20 18:28:06 -050048static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
49static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
50static unsigned long hyp_default_vectors;
51
52
Christoffer Dall749cf76c2013-01-20 18:28:06 -050053int kvm_arch_hardware_enable(void *garbage)
54{
55 return 0;
56}
57
58int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
59{
60 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
61}
62
63void kvm_arch_hardware_disable(void *garbage)
64{
65}
66
67int kvm_arch_hardware_setup(void)
68{
69 return 0;
70}
71
72void kvm_arch_hardware_unsetup(void)
73{
74}
75
76void kvm_arch_check_processor_compat(void *rtn)
77{
78 *(int *)rtn = 0;
79}
80
81void kvm_arch_sync_events(struct kvm *kvm)
82{
83}
84
Christoffer Dalld5d81842013-01-20 18:28:07 -050085/**
86 * kvm_arch_init_vm - initializes a VM data structure
87 * @kvm: pointer to the KVM struct
88 */
Christoffer Dall749cf76c2013-01-20 18:28:06 -050089int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
90{
Christoffer Dalld5d81842013-01-20 18:28:07 -050091 int ret = 0;
92
Christoffer Dall749cf76c2013-01-20 18:28:06 -050093 if (type)
94 return -EINVAL;
95
Christoffer Dalld5d81842013-01-20 18:28:07 -050096 ret = kvm_alloc_stage2_pgd(kvm);
97 if (ret)
98 goto out_fail_alloc;
99
100 ret = create_hyp_mappings(kvm, kvm + 1);
101 if (ret)
102 goto out_free_stage2_pgd;
103
104 /* Mark the initial VMID generation invalid */
105 kvm->arch.vmid_gen = 0;
106
107 return ret;
108out_free_stage2_pgd:
109 kvm_free_stage2_pgd(kvm);
110out_fail_alloc:
111 return ret;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500112}
113
114int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
115{
116 return VM_FAULT_SIGBUS;
117}
118
119void kvm_arch_free_memslot(struct kvm_memory_slot *free,
120 struct kvm_memory_slot *dont)
121{
122}
123
124int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
125{
126 return 0;
127}
128
Christoffer Dalld5d81842013-01-20 18:28:07 -0500129/**
130 * kvm_arch_destroy_vm - destroy the VM data structure
131 * @kvm: pointer to the KVM struct
132 */
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500133void kvm_arch_destroy_vm(struct kvm *kvm)
134{
135 int i;
136
Christoffer Dalld5d81842013-01-20 18:28:07 -0500137 kvm_free_stage2_pgd(kvm);
138
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500139 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
140 if (kvm->vcpus[i]) {
141 kvm_arch_vcpu_free(kvm->vcpus[i]);
142 kvm->vcpus[i] = NULL;
143 }
144 }
145}
146
147int kvm_dev_ioctl_check_extension(long ext)
148{
149 int r;
150 switch (ext) {
151 case KVM_CAP_USER_MEMORY:
152 case KVM_CAP_SYNC_MMU:
153 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
154 case KVM_CAP_ONE_REG:
155 r = 1;
156 break;
157 case KVM_CAP_COALESCED_MMIO:
158 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
159 break;
160 case KVM_CAP_NR_VCPUS:
161 r = num_online_cpus();
162 break;
163 case KVM_CAP_MAX_VCPUS:
164 r = KVM_MAX_VCPUS;
165 break;
166 default:
167 r = 0;
168 break;
169 }
170 return r;
171}
172
173long kvm_arch_dev_ioctl(struct file *filp,
174 unsigned int ioctl, unsigned long arg)
175{
176 return -EINVAL;
177}
178
179int kvm_arch_set_memory_region(struct kvm *kvm,
180 struct kvm_userspace_memory_region *mem,
181 struct kvm_memory_slot old,
182 int user_alloc)
183{
184 return 0;
185}
186
187int kvm_arch_prepare_memory_region(struct kvm *kvm,
188 struct kvm_memory_slot *memslot,
189 struct kvm_memory_slot old,
190 struct kvm_userspace_memory_region *mem,
191 int user_alloc)
192{
193 return 0;
194}
195
196void kvm_arch_commit_memory_region(struct kvm *kvm,
197 struct kvm_userspace_memory_region *mem,
198 struct kvm_memory_slot old,
199 int user_alloc)
200{
201}
202
203void kvm_arch_flush_shadow_all(struct kvm *kvm)
204{
205}
206
207void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
208 struct kvm_memory_slot *slot)
209{
210}
211
212struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
213{
214 int err;
215 struct kvm_vcpu *vcpu;
216
217 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
218 if (!vcpu) {
219 err = -ENOMEM;
220 goto out;
221 }
222
223 err = kvm_vcpu_init(vcpu, kvm, id);
224 if (err)
225 goto free_vcpu;
226
Christoffer Dalld5d81842013-01-20 18:28:07 -0500227 err = create_hyp_mappings(vcpu, vcpu + 1);
228 if (err)
229 goto vcpu_uninit;
230
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500231 return vcpu;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500232vcpu_uninit:
233 kvm_vcpu_uninit(vcpu);
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500234free_vcpu:
235 kmem_cache_free(kvm_vcpu_cache, vcpu);
236out:
237 return ERR_PTR(err);
238}
239
240int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
241{
242 return 0;
243}
244
245void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
246{
Christoffer Dalld5d81842013-01-20 18:28:07 -0500247 kvm_mmu_free_memory_caches(vcpu);
248 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500249}
250
251void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
252{
253 kvm_arch_vcpu_free(vcpu);
254}
255
256int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
257{
258 return 0;
259}
260
261int __attribute_const__ kvm_target_cpu(void)
262{
263 unsigned long implementor = read_cpuid_implementor();
264 unsigned long part_number = read_cpuid_part_number();
265
266 if (implementor != ARM_CPU_IMP_ARM)
267 return -EINVAL;
268
269 switch (part_number) {
270 case ARM_CPU_PART_CORTEX_A15:
271 return KVM_ARM_TARGET_CORTEX_A15;
272 default:
273 return -EINVAL;
274 }
275}
276
277int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
278{
279 return 0;
280}
281
282void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
283{
284}
285
286void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
287{
Christoffer Dall86ce8532013-01-20 18:28:08 -0500288 vcpu->cpu = cpu;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500289}
290
291void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
292{
293}
294
295int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
296 struct kvm_guest_debug *dbg)
297{
298 return -EINVAL;
299}
300
301
302int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
303 struct kvm_mp_state *mp_state)
304{
305 return -EINVAL;
306}
307
308int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
309 struct kvm_mp_state *mp_state)
310{
311 return -EINVAL;
312}
313
314int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
315{
316 return 0;
317}
318
319int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
320{
321 return -EINVAL;
322}
323
Christoffer Dall86ce8532013-01-20 18:28:08 -0500324static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
325{
326 int bit_index;
327 bool set;
328 unsigned long *ptr;
329
330 if (number == KVM_ARM_IRQ_CPU_IRQ)
331 bit_index = __ffs(HCR_VI);
332 else /* KVM_ARM_IRQ_CPU_FIQ */
333 bit_index = __ffs(HCR_VF);
334
335 ptr = (unsigned long *)&vcpu->arch.irq_lines;
336 if (level)
337 set = test_and_set_bit(bit_index, ptr);
338 else
339 set = test_and_clear_bit(bit_index, ptr);
340
341 /*
342 * If we didn't change anything, no need to wake up or kick other CPUs
343 */
344 if (set == level)
345 return 0;
346
347 /*
348 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
349 * trigger a world-switch round on the running physical CPU to set the
350 * virtual IRQ/FIQ fields in the HCR appropriately.
351 */
352 kvm_vcpu_kick(vcpu);
353
354 return 0;
355}
356
357int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level)
358{
359 u32 irq = irq_level->irq;
360 unsigned int irq_type, vcpu_idx, irq_num;
361 int nrcpus = atomic_read(&kvm->online_vcpus);
362 struct kvm_vcpu *vcpu = NULL;
363 bool level = irq_level->level;
364
365 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
366 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
367 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
368
369 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
370
371 if (irq_type != KVM_ARM_IRQ_TYPE_CPU)
372 return -EINVAL;
373
374 if (vcpu_idx >= nrcpus)
375 return -EINVAL;
376
377 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
378 if (!vcpu)
379 return -EINVAL;
380
381 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
382 return -EINVAL;
383
384 return vcpu_interrupt_line(vcpu, irq_num, level);
385}
386
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500387long kvm_arch_vcpu_ioctl(struct file *filp,
388 unsigned int ioctl, unsigned long arg)
389{
390 struct kvm_vcpu *vcpu = filp->private_data;
391 void __user *argp = (void __user *)arg;
392
393 switch (ioctl) {
394 case KVM_ARM_VCPU_INIT: {
395 struct kvm_vcpu_init init;
396
397 if (copy_from_user(&init, argp, sizeof(init)))
398 return -EFAULT;
399
400 return kvm_vcpu_set_target(vcpu, &init);
401
402 }
403 case KVM_SET_ONE_REG:
404 case KVM_GET_ONE_REG: {
405 struct kvm_one_reg reg;
406 if (copy_from_user(&reg, argp, sizeof(reg)))
407 return -EFAULT;
408 if (ioctl == KVM_SET_ONE_REG)
409 return kvm_arm_set_reg(vcpu, &reg);
410 else
411 return kvm_arm_get_reg(vcpu, &reg);
412 }
413 case KVM_GET_REG_LIST: {
414 struct kvm_reg_list __user *user_list = argp;
415 struct kvm_reg_list reg_list;
416 unsigned n;
417
418 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
419 return -EFAULT;
420 n = reg_list.n;
421 reg_list.n = kvm_arm_num_regs(vcpu);
422 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
423 return -EFAULT;
424 if (n < reg_list.n)
425 return -E2BIG;
426 return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
427 }
428 default:
429 return -EINVAL;
430 }
431}
432
433int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
434{
435 return -EINVAL;
436}
437
438long kvm_arch_vm_ioctl(struct file *filp,
439 unsigned int ioctl, unsigned long arg)
440{
441 return -EINVAL;
442}
443
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500444static void cpu_init_hyp_mode(void *vector)
445{
446 unsigned long long pgd_ptr;
447 unsigned long pgd_low, pgd_high;
448 unsigned long hyp_stack_ptr;
449 unsigned long stack_page;
450 unsigned long vector_ptr;
451
452 /* Switch from the HYP stub to our own HYP init vector */
453 __hyp_set_vectors((unsigned long)vector);
454
455 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
456 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
457 pgd_high = (pgd_ptr >> 32ULL);
458 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
459 hyp_stack_ptr = stack_page + PAGE_SIZE;
460 vector_ptr = (unsigned long)__kvm_hyp_vector;
461
462 /*
463 * Call initialization code, and switch to the full blown
464 * HYP code. The init code doesn't need to preserve these registers as
465 * r1-r3 and r12 are already callee save according to the AAPCS.
466 * Note that we slightly misuse the prototype by casing the pgd_low to
467 * a void *.
468 */
469 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
470}
471
472/**
473 * Inits Hyp-mode on all online CPUs
474 */
475static int init_hyp_mode(void)
476{
477 phys_addr_t init_phys_addr;
478 int cpu;
479 int err = 0;
480
481 /*
482 * Allocate Hyp PGD and setup Hyp identity mapping
483 */
484 err = kvm_mmu_init();
485 if (err)
486 goto out_err;
487
488 /*
489 * It is probably enough to obtain the default on one
490 * CPU. It's unlikely to be different on the others.
491 */
492 hyp_default_vectors = __hyp_get_vectors();
493
494 /*
495 * Allocate stack pages for Hypervisor-mode
496 */
497 for_each_possible_cpu(cpu) {
498 unsigned long stack_page;
499
500 stack_page = __get_free_page(GFP_KERNEL);
501 if (!stack_page) {
502 err = -ENOMEM;
503 goto out_free_stack_pages;
504 }
505
506 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
507 }
508
509 /*
510 * Execute the init code on each CPU.
511 *
512 * Note: The stack is not mapped yet, so don't do anything else than
513 * initializing the hypervisor mode on each CPU using a local stack
514 * space for temporary storage.
515 */
516 init_phys_addr = virt_to_phys(__kvm_hyp_init);
517 for_each_online_cpu(cpu) {
518 smp_call_function_single(cpu, cpu_init_hyp_mode,
519 (void *)(long)init_phys_addr, 1);
520 }
521
522 /*
523 * Unmap the identity mapping
524 */
525 kvm_clear_hyp_idmap();
526
527 /*
528 * Map the Hyp-code called directly from the host
529 */
530 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
531 if (err) {
532 kvm_err("Cannot map world-switch code\n");
533 goto out_free_mappings;
534 }
535
536 /*
537 * Map the Hyp stack pages
538 */
539 for_each_possible_cpu(cpu) {
540 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
541 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
542
543 if (err) {
544 kvm_err("Cannot map hyp stack\n");
545 goto out_free_mappings;
546 }
547 }
548
549 /*
550 * Map the host VFP structures
551 */
552 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
553 if (!kvm_host_vfp_state) {
554 err = -ENOMEM;
555 kvm_err("Cannot allocate host VFP state\n");
556 goto out_free_mappings;
557 }
558
559 for_each_possible_cpu(cpu) {
560 struct vfp_hard_struct *vfp;
561
562 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
563 err = create_hyp_mappings(vfp, vfp + 1);
564
565 if (err) {
566 kvm_err("Cannot map host VFP state: %d\n", err);
567 goto out_free_vfp;
568 }
569 }
570
571 kvm_info("Hyp mode initialized successfully\n");
572 return 0;
573out_free_vfp:
574 free_percpu(kvm_host_vfp_state);
575out_free_mappings:
576 free_hyp_pmds();
577out_free_stack_pages:
578 for_each_possible_cpu(cpu)
579 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
580out_err:
581 kvm_err("error initializing Hyp mode: %d\n", err);
582 return err;
583}
584
585/**
586 * Initialize Hyp-mode and memory mappings on all CPUs.
587 */
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500588int kvm_arch_init(void *opaque)
589{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500590 int err;
591
592 if (!is_hyp_mode_available()) {
593 kvm_err("HYP mode not available\n");
594 return -ENODEV;
595 }
596
597 if (kvm_target_cpu() < 0) {
598 kvm_err("Target CPU not supported!\n");
599 return -ENODEV;
600 }
601
602 err = init_hyp_mode();
603 if (err)
604 goto out_err;
605
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500606 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500607out_err:
608 return err;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500609}
610
611/* NOP: Compiling as a module not supported */
612void kvm_arch_exit(void)
613{
614}
615
616static int arm_init(void)
617{
618 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
619 return rc;
620}
621
622module_init(arm_init);