blob: 8b8a4445367011b31afdb06f5363a120ea1db019 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02008 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08009 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
13 *
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
16 *
17 */
18
Andre Przywaraaf669ac2015-03-26 14:39:29 +000019#include <kvm/iodev.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080020
Avi Kivityedf88412007-12-16 11:02:48 +020021#include <linux/kvm_host.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080022#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/percpu.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080026#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080029#include <linux/reboot.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080030#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
Rafael J. Wysockifb3600c2011-03-23 22:16:23 +010033#include <linux/syscore_ops.h>
Avi Kivity774c47f2007-02-12 00:54:47 -080034#include <linux/cpu.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040035#include <linux/sched.h>
Avi Kivityd9e368d2007-06-07 19:18:30 +030036#include <linux/cpumask.h>
37#include <linux/smp.h>
Avi Kivityd6d28162007-06-28 08:38:16 -040038#include <linux/anon_inodes.h>
Avi Kivity04d2cc72007-09-10 18:10:54 +030039#include <linux/profile.h>
Anthony Liguori7aa81cc2007-09-17 14:57:50 -050040#include <linux/kvm_para.h>
Izik Eidus6fc138d2007-10-09 19:20:39 +020041#include <linux/pagemap.h>
Anthony Liguori8d4e1282007-10-18 09:59:34 -050042#include <linux/mman.h>
Anthony Liguori35149e22008-04-02 14:46:56 -050043#include <linux/swap.h>
Sheng Yange56d5322009-03-12 21:45:39 +080044#include <linux/bitops.h>
Marcelo Tosatti547de292009-05-07 17:55:13 -030045#include <linux/spinlock.h>
Arnd Bergmann6ff58942009-10-22 14:19:27 +020046#include <linux/compat.h>
Marcelo Tosattibc6678a2009-12-23 14:35:21 -020047#include <linux/srcu.h>
Joerg Roedel8f0b1ab2010-01-28 12:37:56 +010048#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090049#include <linux/slab.h>
Sasha Levin743eeb02011-07-27 16:00:48 +030050#include <linux/sort.h>
51#include <linux/bsearch.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080052
Avi Kivitye4956062007-06-28 14:15:57 -040053#include <asm/processor.h>
Avi Kivitye4956062007-06-28 14:15:57 -040054#include <asm/io.h>
David Matlack2ea75be2014-09-19 16:03:25 -070055#include <asm/ioctl.h>
Avi Kivitye4956062007-06-28 14:15:57 -040056#include <asm/uaccess.h>
Izik Eidus3e021bf2007-11-19 11:16:57 +020057#include <asm/pgtable.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080058
Laurent Vivier5f94c172008-05-30 16:05:54 +020059#include "coalesced_mmio.h"
Gleb Natapovaf585b92010-10-14 11:22:46 +020060#include "async_pf.h"
Paolo Bonzini3c3c29f2014-09-24 13:02:46 +020061#include "vfio.h"
Laurent Vivier5f94c172008-05-30 16:05:54 +020062
Marcelo Tosatti229456f2009-06-17 09:22:14 -030063#define CREATE_TRACE_POINTS
64#include <trace/events/kvm.h>
65
Avi Kivity6aa8b732006-12-10 02:21:36 -080066MODULE_AUTHOR("Qumranet");
67MODULE_LICENSE("GPL");
68
Christian Borntraeger0fa97782015-02-27 16:50:10 +010069static unsigned int halt_poll_ns;
Paolo Bonzinif7819512015-02-04 18:20:58 +010070module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
71
Marcelo Tosattifa40a822009-06-04 15:08:24 -030072/*
73 * Ordering of locks:
74 *
Xiubo Lib7d409d2015-02-26 14:58:24 +080075 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
Marcelo Tosattifa40a822009-06-04 15:08:24 -030076 */
77
Paolo Bonzini2f303b72013-09-25 13:53:07 +020078DEFINE_SPINLOCK(kvm_lock);
Paolo Bonzini4a937f92013-09-10 12:58:35 +020079static DEFINE_RAW_SPINLOCK(kvm_count_lock);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +080080LIST_HEAD(vm_list);
Avi Kivity133de902007-02-12 00:54:44 -080081
Rusty Russell7f59f492008-12-07 21:25:45 +103082static cpumask_var_t cpus_hardware_enabled;
Xiubo Lif4fee932015-02-26 14:58:21 +080083static int kvm_usage_count;
Alexander Graf10474ae2009-09-15 11:37:46 +020084static atomic_t hardware_enable_failed;
Avi Kivity1b6c0162007-05-24 13:03:52 +030085
Rusty Russellc16f8622007-07-30 21:12:19 +100086struct kmem_cache *kvm_vcpu_cache;
87EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
Avi Kivity1165f5f2007-04-19 17:27:43 +030088
Avi Kivity15ad7142007-07-11 18:17:21 +030089static __read_mostly struct preempt_ops kvm_preempt_ops;
90
Hollis Blanchard76f7c872008-04-15 16:05:42 -050091struct dentry *kvm_debugfs_dir;
Paul Mackerrase23a8082015-03-28 14:21:01 +110092EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
Avi Kivity6aa8b732006-12-10 02:21:36 -080093
Avi Kivitybccf2152007-02-21 18:04:26 +020094static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
95 unsigned long arg);
Christian Borntraegerde8e5d72015-02-03 09:35:15 +010096#ifdef CONFIG_KVM_COMPAT
Alexander Graf1dda6062011-06-08 02:45:37 +020097static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
98 unsigned long arg);
99#endif
Alexander Graf10474ae2009-09-15 11:37:46 +0200100static int hardware_enable_all(void);
101static void hardware_disable_all(void);
Avi Kivitybccf2152007-02-21 18:04:26 +0200102
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200103static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
Stephen Hemminger79408762013-12-29 12:12:29 -0800104
105static void kvm_release_pfn_dirty(pfn_t pfn);
Paolo Bonzinibc009e42015-05-26 12:43:41 +0200106static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200107
Andi Kleen52480132014-02-08 08:51:57 +0100108__visible bool kvm_rebooting;
Avi Kivityb7c41452010-12-02 17:52:50 +0200109EXPORT_SYMBOL_GPL(kvm_rebooting);
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300110
Marcelo Tosatti54dee992009-06-11 12:07:44 -0300111static bool largepages_enabled = true;
112
Ard Biesheuvelbf4bea82014-11-10 08:33:56 +0000113bool kvm_is_reserved_pfn(pfn_t pfn)
Ben-Ami Yassourcbff90a2008-07-28 19:26:24 +0300114{
Andrea Arcangeli11feeb42013-07-25 03:04:38 +0200115 if (pfn_valid(pfn))
Ard Biesheuvelbf4bea82014-11-10 08:33:56 +0000116 return PageReserved(pfn_to_page(pfn));
Ben-Ami Yassourcbff90a2008-07-28 19:26:24 +0300117
118 return true;
119}
120
Avi Kivity6aa8b732006-12-10 02:21:36 -0800121/*
122 * Switches to specified vcpu, until a matching vcpu_put()
123 */
Michael S. Tsirkin9fc77442012-09-16 11:50:30 +0300124int vcpu_load(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800125{
Avi Kivity15ad7142007-07-11 18:17:21 +0300126 int cpu;
127
Michael S. Tsirkin9fc77442012-09-16 11:50:30 +0300128 if (mutex_lock_killable(&vcpu->mutex))
129 return -EINTR;
Avi Kivity15ad7142007-07-11 18:17:21 +0300130 cpu = get_cpu();
131 preempt_notifier_register(&vcpu->preempt_notifier);
Carsten Otte313a3dc2007-10-11 19:16:52 +0200132 kvm_arch_vcpu_load(vcpu, cpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300133 put_cpu();
Michael S. Tsirkin9fc77442012-09-16 11:50:30 +0300134 return 0;
Avi Kivitybccf2152007-02-21 18:04:26 +0200135}
136
Carsten Otte313a3dc2007-10-11 19:16:52 +0200137void vcpu_put(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800138{
Avi Kivity15ad7142007-07-11 18:17:21 +0300139 preempt_disable();
Carsten Otte313a3dc2007-10-11 19:16:52 +0200140 kvm_arch_vcpu_put(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300141 preempt_notifier_unregister(&vcpu->preempt_notifier);
142 preempt_enable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800143 mutex_unlock(&vcpu->mutex);
144}
145
Avi Kivityd9e368d2007-06-07 19:18:30 +0300146static void ack_flush(void *_completed)
147{
Avi Kivityd9e368d2007-06-07 19:18:30 +0300148}
149
Tang Chen445b8232014-09-24 15:57:55 +0800150bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
Avi Kivityd9e368d2007-06-07 19:18:30 +0300151{
Avi Kivity597a5f52008-07-20 14:24:22 +0300152 int i, cpu, me;
Rusty Russell6ef7a1b2008-12-08 20:28:04 +1030153 cpumask_var_t cpus;
154 bool called = true;
Avi Kivityd9e368d2007-06-07 19:18:30 +0300155 struct kvm_vcpu *vcpu;
Avi Kivityd9e368d2007-06-07 19:18:30 +0300156
Li Zefan79f55992009-06-15 14:58:26 +0800157 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
Rusty Russell6ef7a1b2008-12-08 20:28:04 +1030158
Xiao Guangrong3cba4132011-01-12 15:41:22 +0800159 me = get_cpu();
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300160 kvm_for_each_vcpu(i, vcpu, kvm) {
Xiao Guangrong3cba4132011-01-12 15:41:22 +0800161 kvm_make_request(req, vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +0300162 cpu = vcpu->cpu;
Xiao Guangrong6b7e2d02011-01-12 15:40:31 +0800163
164 /* Set ->requests bit before we read ->mode */
165 smp_mb();
166
167 if (cpus != NULL && cpu != -1 && cpu != me &&
168 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
Rusty Russell6ef7a1b2008-12-08 20:28:04 +1030169 cpumask_set_cpu(cpu, cpus);
Avi Kivityd9e368d2007-06-07 19:18:30 +0300170 }
Rusty Russell6ef7a1b2008-12-08 20:28:04 +1030171 if (unlikely(cpus == NULL))
172 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
173 else if (!cpumask_empty(cpus))
174 smp_call_function_many(cpus, ack_flush, NULL, 1);
175 else
176 called = false;
Xiao Guangrong3cba4132011-01-12 15:41:22 +0800177 put_cpu();
Rusty Russell6ef7a1b2008-12-08 20:28:04 +1030178 free_cpumask_var(cpus);
Rusty Russell49846892008-12-08 20:26:24 +1030179 return called;
180}
181
Mario Smarducha6d51012015-01-15 15:58:52 -0800182#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
Rusty Russell49846892008-12-08 20:26:24 +1030183void kvm_flush_remote_tlbs(struct kvm *kvm)
184{
Xiao Guangronga086f6a2014-04-17 17:06:12 +0800185 long dirty_count = kvm->tlbs_dirty;
186
187 smp_mb();
Tang Chen445b8232014-09-24 15:57:55 +0800188 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
Rusty Russell49846892008-12-08 20:26:24 +1030189 ++kvm->stat.remote_tlb_flush;
Xiao Guangronga086f6a2014-04-17 17:06:12 +0800190 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
Avi Kivityd9e368d2007-06-07 19:18:30 +0300191}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530192EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
Mario Smarducha6d51012015-01-15 15:58:52 -0800193#endif
Avi Kivityd9e368d2007-06-07 19:18:30 +0300194
Marcelo Tosatti2e53d632008-02-20 14:47:24 -0500195void kvm_reload_remote_mmus(struct kvm *kvm)
196{
Tang Chen445b8232014-09-24 15:57:55 +0800197 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
Marcelo Tosatti2e53d632008-02-20 14:47:24 -0500198}
199
Marcelo Tosattid8281992012-11-27 23:29:01 -0200200void kvm_make_mclock_inprogress_request(struct kvm *kvm)
201{
Tang Chen445b8232014-09-24 15:57:55 +0800202 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
Marcelo Tosattid8281992012-11-27 23:29:01 -0200203}
204
Yang Zhang3d81bc72013-04-11 19:25:13 +0800205void kvm_make_scan_ioapic_request(struct kvm *kvm)
Yang Zhangc7c9c562013-01-25 10:18:51 +0800206{
Tang Chen445b8232014-09-24 15:57:55 +0800207 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
Yang Zhangc7c9c562013-01-25 10:18:51 +0800208}
209
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000210int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
211{
212 struct page *page;
213 int r;
214
215 mutex_init(&vcpu->mutex);
216 vcpu->cpu = -1;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000217 vcpu->kvm = kvm;
218 vcpu->vcpu_id = id;
Rik van Riel34bb10b2011-02-01 09:52:41 -0500219 vcpu->pid = NULL;
Eddie Dongb6958ce2007-07-18 12:15:21 +0300220 init_waitqueue_head(&vcpu->wq);
Gleb Natapovaf585b92010-10-14 11:22:46 +0200221 kvm_async_pf_vcpu_init(vcpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000222
223 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
224 if (!page) {
225 r = -ENOMEM;
226 goto fail;
227 }
228 vcpu->run = page_address(page);
229
Raghavendra K T4c088492012-07-18 19:07:46 +0530230 kvm_vcpu_set_in_spin_loop(vcpu, false);
231 kvm_vcpu_set_dy_eligible(vcpu, false);
Raghavendra K T3a08a8f2013-03-04 23:32:07 +0530232 vcpu->preempted = false;
Raghavendra K T4c088492012-07-18 19:07:46 +0530233
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +0800234 r = kvm_arch_vcpu_init(vcpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000235 if (r < 0)
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +0800236 goto fail_free_run;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000237 return 0;
238
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000239fail_free_run:
240 free_page((unsigned long)vcpu->run);
241fail:
Rusty Russell76fafa52007-10-08 10:50:48 +1000242 return r;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000243}
244EXPORT_SYMBOL_GPL(kvm_vcpu_init);
245
246void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
247{
Rik van Riel34bb10b2011-02-01 09:52:41 -0500248 put_pid(vcpu->pid);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +0800249 kvm_arch_vcpu_uninit(vcpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000250 free_page((unsigned long)vcpu->run);
251}
252EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
253
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200254#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
255static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
256{
257 return container_of(mn, struct kvm, mmu_notifier);
258}
259
260static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
261 struct mm_struct *mm,
262 unsigned long address)
263{
264 struct kvm *kvm = mmu_notifier_to_kvm(mn);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200265 int need_tlb_flush, idx;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200266
267 /*
268 * When ->invalidate_page runs, the linux pte has been zapped
269 * already but the page is still allocated until
270 * ->invalidate_page returns. So if we increase the sequence
271 * here the kvm page fault will notice if the spte can't be
272 * established because the page is going to be freed. If
273 * instead the kvm page fault establishes the spte before
274 * ->invalidate_page runs, kvm_unmap_hva will release it
275 * before returning.
276 *
277 * The sequence increase only need to be seen at spin_unlock
278 * time, and not at spin_lock time.
279 *
280 * Increasing the sequence after the spin_unlock would be
281 * unsafe because the kvm page fault could then establish the
282 * pte after kvm_unmap_hva returned, without noticing the page
283 * is going to be freed.
284 */
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200285 idx = srcu_read_lock(&kvm->srcu);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200286 spin_lock(&kvm->mmu_lock);
Takuya Yoshikawa565f3be2012-02-10 15:28:31 +0900287
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200288 kvm->mmu_notifier_seq++;
Xiao Guangronga4ee1ca2010-11-23 11:13:00 +0800289 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200290 /* we've to flush the tlb before the pages can be freed */
291 if (need_tlb_flush)
292 kvm_flush_remote_tlbs(kvm);
293
Takuya Yoshikawa565f3be2012-02-10 15:28:31 +0900294 spin_unlock(&kvm->mmu_lock);
Tang Chenfe715572014-09-24 15:57:57 +0800295
296 kvm_arch_mmu_notifier_invalidate_page(kvm, address);
297
Takuya Yoshikawa565f3be2012-02-10 15:28:31 +0900298 srcu_read_unlock(&kvm->srcu, idx);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200299}
300
Izik Eidus3da0dd42009-09-23 21:47:18 +0300301static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
302 struct mm_struct *mm,
303 unsigned long address,
304 pte_t pte)
305{
306 struct kvm *kvm = mmu_notifier_to_kvm(mn);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200307 int idx;
Izik Eidus3da0dd42009-09-23 21:47:18 +0300308
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200309 idx = srcu_read_lock(&kvm->srcu);
Izik Eidus3da0dd42009-09-23 21:47:18 +0300310 spin_lock(&kvm->mmu_lock);
311 kvm->mmu_notifier_seq++;
312 kvm_set_spte_hva(kvm, address, pte);
313 spin_unlock(&kvm->mmu_lock);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200314 srcu_read_unlock(&kvm->srcu, idx);
Izik Eidus3da0dd42009-09-23 21:47:18 +0300315}
316
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200317static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
318 struct mm_struct *mm,
319 unsigned long start,
320 unsigned long end)
321{
322 struct kvm *kvm = mmu_notifier_to_kvm(mn);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200323 int need_tlb_flush = 0, idx;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200324
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200325 idx = srcu_read_lock(&kvm->srcu);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200326 spin_lock(&kvm->mmu_lock);
327 /*
328 * The count increase must become visible at unlock time as no
329 * spte can be established without taking the mmu_lock and
330 * count is also read inside the mmu_lock critical section.
331 */
332 kvm->mmu_notifier_count++;
Takuya Yoshikawab3ae2092012-07-02 17:56:33 +0900333 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
Xiao Guangronga4ee1ca2010-11-23 11:13:00 +0800334 need_tlb_flush |= kvm->tlbs_dirty;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200335 /* we've to flush the tlb before the pages can be freed */
336 if (need_tlb_flush)
337 kvm_flush_remote_tlbs(kvm);
Takuya Yoshikawa565f3be2012-02-10 15:28:31 +0900338
339 spin_unlock(&kvm->mmu_lock);
340 srcu_read_unlock(&kvm->srcu, idx);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200341}
342
343static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
344 struct mm_struct *mm,
345 unsigned long start,
346 unsigned long end)
347{
348 struct kvm *kvm = mmu_notifier_to_kvm(mn);
349
350 spin_lock(&kvm->mmu_lock);
351 /*
352 * This sequence increase will notify the kvm page fault that
353 * the page that is going to be mapped in the spte could have
354 * been freed.
355 */
356 kvm->mmu_notifier_seq++;
Paul Mackerrasa355aa52011-12-12 12:37:21 +0000357 smp_wmb();
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200358 /*
359 * The above sequence increase must be visible before the
Paul Mackerrasa355aa52011-12-12 12:37:21 +0000360 * below count decrease, which is ensured by the smp_wmb above
361 * in conjunction with the smp_rmb in mmu_notifier_retry().
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200362 */
363 kvm->mmu_notifier_count--;
364 spin_unlock(&kvm->mmu_lock);
365
366 BUG_ON(kvm->mmu_notifier_count < 0);
367}
368
369static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
370 struct mm_struct *mm,
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700371 unsigned long start,
372 unsigned long end)
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200373{
374 struct kvm *kvm = mmu_notifier_to_kvm(mn);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200375 int young, idx;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200376
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200377 idx = srcu_read_lock(&kvm->srcu);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200378 spin_lock(&kvm->mmu_lock);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200379
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700380 young = kvm_age_hva(kvm, start, end);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200381 if (young)
382 kvm_flush_remote_tlbs(kvm);
383
Takuya Yoshikawa565f3be2012-02-10 15:28:31 +0900384 spin_unlock(&kvm->mmu_lock);
385 srcu_read_unlock(&kvm->srcu, idx);
386
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200387 return young;
388}
389
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800390static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
391 struct mm_struct *mm,
392 unsigned long address)
393{
394 struct kvm *kvm = mmu_notifier_to_kvm(mn);
395 int young, idx;
396
397 idx = srcu_read_lock(&kvm->srcu);
398 spin_lock(&kvm->mmu_lock);
399 young = kvm_test_age_hva(kvm, address);
400 spin_unlock(&kvm->mmu_lock);
401 srcu_read_unlock(&kvm->srcu, idx);
402
403 return young;
404}
405
Marcelo Tosatti85db06e2008-12-10 21:23:26 +0100406static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
407 struct mm_struct *mm)
408{
409 struct kvm *kvm = mmu_notifier_to_kvm(mn);
Lai Jiangshaneda2bed2010-04-20 14:29:29 +0800410 int idx;
411
412 idx = srcu_read_lock(&kvm->srcu);
Marcelo Tosatti2df72e92012-08-24 15:54:57 -0300413 kvm_arch_flush_shadow_all(kvm);
Lai Jiangshaneda2bed2010-04-20 14:29:29 +0800414 srcu_read_unlock(&kvm->srcu, idx);
Marcelo Tosatti85db06e2008-12-10 21:23:26 +0100415}
416
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200417static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
418 .invalidate_page = kvm_mmu_notifier_invalidate_page,
419 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
420 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
421 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800422 .test_young = kvm_mmu_notifier_test_young,
Izik Eidus3da0dd42009-09-23 21:47:18 +0300423 .change_pte = kvm_mmu_notifier_change_pte,
Marcelo Tosatti85db06e2008-12-10 21:23:26 +0100424 .release = kvm_mmu_notifier_release,
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200425};
Avi Kivity4c07b0a2009-12-20 14:54:04 +0200426
427static int kvm_init_mmu_notifier(struct kvm *kvm)
428{
429 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
430 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
431}
432
433#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
434
435static int kvm_init_mmu_notifier(struct kvm *kvm)
436{
437 return 0;
438}
439
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200440#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
441
Paolo Bonzinia47d2b02015-05-17 11:41:37 +0200442static struct kvm_memslots *kvm_alloc_memslots(void)
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +0800443{
444 int i;
Paolo Bonzinia47d2b02015-05-17 11:41:37 +0200445 struct kvm_memslots *slots;
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +0800446
Paolo Bonzinia47d2b02015-05-17 11:41:37 +0200447 slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
448 if (!slots)
449 return NULL;
450
451 /*
452 * Init kvm generation close to the maximum to easily test the
453 * code of handling generation number wrap-around.
454 */
455 slots->generation = -150;
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +0800456 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
Xiao Guangrongf85e2cb2011-11-24 17:41:54 +0800457 slots->id_to_index[i] = slots->memslots[i].id = i;
Paolo Bonzinia47d2b02015-05-17 11:41:37 +0200458
459 return slots;
460}
461
462static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
463{
464 if (!memslot->dirty_bitmap)
465 return;
466
467 kvfree(memslot->dirty_bitmap);
468 memslot->dirty_bitmap = NULL;
469}
470
471/*
472 * Free any memory in @free but not in @dont.
473 */
474static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
475 struct kvm_memory_slot *dont)
476{
477 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
478 kvm_destroy_dirty_bitmap(free);
479
480 kvm_arch_free_memslot(kvm, free, dont);
481
482 free->npages = 0;
483}
484
485static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
486{
487 struct kvm_memory_slot *memslot;
488
489 if (!slots)
490 return;
491
492 kvm_for_each_memslot(memslot, slots)
493 kvm_free_memslot(kvm, memslot, NULL);
494
495 kvfree(slots);
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +0800496}
497
Carsten Ottee08b9632012-01-04 10:25:20 +0100498static struct kvm *kvm_create_vm(unsigned long type)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800499{
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100500 int r, i;
501 struct kvm *kvm = kvm_arch_alloc_vm();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800502
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100503 if (!kvm)
504 return ERR_PTR(-ENOMEM);
505
Carsten Ottee08b9632012-01-04 10:25:20 +0100506 r = kvm_arch_init_vm(kvm, type);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100507 if (r)
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100508 goto out_err_no_disable;
Alexander Graf10474ae2009-09-15 11:37:46 +0200509
510 r = hardware_enable_all();
511 if (r)
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100512 goto out_err_no_disable;
Alexander Graf10474ae2009-09-15 11:37:46 +0200513
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200514#ifdef CONFIG_HAVE_KVM_IRQFD
Gleb Natapov136bdfe2009-08-24 11:54:23 +0300515 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
Avi Kivity75858a82009-01-04 17:10:50 +0200516#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800517
Alex Williamson1e702d92012-12-10 10:33:32 -0700518 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
519
Marcelo Tosatti46a26bf2009-12-23 14:35:16 -0200520 r = -ENOMEM;
Paolo Bonzinif481b062015-05-17 17:30:37 +0200521 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
522 kvm->memslots[i] = kvm_alloc_memslots();
523 if (!kvm->memslots[i])
524 goto out_err_no_srcu;
525 }
Paolo Bonzini00f034a2014-08-20 14:29:21 +0200526
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200527 if (init_srcu_struct(&kvm->srcu))
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100528 goto out_err_no_srcu;
529 if (init_srcu_struct(&kvm->irq_srcu))
530 goto out_err_no_irq_srcu;
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200531 for (i = 0; i < KVM_NR_BUSES; i++) {
532 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
533 GFP_KERNEL);
Jan Kiszka57e7fbe2010-11-09 12:42:12 +0100534 if (!kvm->buses[i])
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200535 goto out_err;
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200536 }
Mike Waychison74b5c5b2011-06-03 13:04:53 -0700537
OGAWA Hirofumi85722cd2011-05-11 09:28:28 +0900538 spin_lock_init(&kvm->mmu_lock);
Avi Kivity6d4e4c42007-11-21 16:41:05 +0200539 kvm->mm = current->mm;
540 atomic_inc(&kvm->mm->mm_count);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400541 kvm_eventfd_init(kvm);
Shaohua Li11ec2802007-07-23 14:51:37 +0800542 mutex_init(&kvm->lock);
Marcelo Tosatti60eead72009-06-04 15:08:23 -0300543 mutex_init(&kvm->irq_lock);
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200544 mutex_init(&kvm->slots_lock);
Izik Eidusd39f13b2008-03-30 16:01:25 +0300545 atomic_set(&kvm->users_count, 1);
Scott Wood07f0a7b2013-04-25 14:11:23 +0000546 INIT_LIST_HEAD(&kvm->devices);
Mike Waychison74b5c5b2011-06-03 13:04:53 -0700547
548 r = kvm_init_mmu_notifier(kvm);
549 if (r)
550 goto out_err;
551
Paolo Bonzini2f303b72013-09-25 13:53:07 +0200552 spin_lock(&kvm_lock);
Rusty Russell5e58cfe2007-07-23 17:08:21 +1000553 list_add(&kvm->vm_list, &vm_list);
Paolo Bonzini2f303b72013-09-25 13:53:07 +0200554 spin_unlock(&kvm_lock);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100555
Peter Zijlstra2ecd9d22015-07-03 18:53:58 +0200556 preempt_notifier_inc();
557
Avi Kivityf17abe92007-02-21 19:28:04 +0200558 return kvm;
Alexander Graf10474ae2009-09-15 11:37:46 +0200559
560out_err:
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100561 cleanup_srcu_struct(&kvm->irq_srcu);
562out_err_no_irq_srcu:
Jan Kiszka57e7fbe2010-11-09 12:42:12 +0100563 cleanup_srcu_struct(&kvm->srcu);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100564out_err_no_srcu:
Alexander Graf10474ae2009-09-15 11:37:46 +0200565 hardware_disable_all();
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100566out_err_no_disable:
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200567 for (i = 0; i < KVM_NR_BUSES; i++)
568 kfree(kvm->buses[i]);
Paolo Bonzinif481b062015-05-17 17:30:37 +0200569 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
570 kvm_free_memslots(kvm, kvm->memslots[i]);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100571 kvm_arch_free_vm(kvm);
Alexander Graf10474ae2009-09-15 11:37:46 +0200572 return ERR_PTR(r);
Avi Kivityf17abe92007-02-21 19:28:04 +0200573}
574
Takuya Yoshikawa92eca8f2012-05-20 13:13:28 +0900575/*
576 * Avoid using vmalloc for a small buffer.
577 * Should not be used when the size is statically known.
578 */
Takuya Yoshikawac1a7b322012-05-20 13:15:07 +0900579void *kvm_kvzalloc(unsigned long size)
Takuya Yoshikawa92eca8f2012-05-20 13:13:28 +0900580{
581 if (size > PAGE_SIZE)
582 return vzalloc(size);
583 else
584 return kzalloc(size, GFP_KERNEL);
585}
586
Scott Wood07f0a7b2013-04-25 14:11:23 +0000587static void kvm_destroy_devices(struct kvm *kvm)
588{
589 struct list_head *node, *tmp;
590
591 list_for_each_safe(node, tmp, &kvm->devices) {
592 struct kvm_device *dev =
593 list_entry(node, struct kvm_device, vm_node);
594
595 list_del(node);
596 dev->ops->destroy(dev);
597 }
598}
599
Avi Kivityf17abe92007-02-21 19:28:04 +0200600static void kvm_destroy_vm(struct kvm *kvm)
601{
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200602 int i;
Avi Kivity6d4e4c42007-11-21 16:41:05 +0200603 struct mm_struct *mm = kvm->mm;
604
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800605 kvm_arch_sync_events(kvm);
Paolo Bonzini2f303b72013-09-25 13:53:07 +0200606 spin_lock(&kvm_lock);
Avi Kivity133de902007-02-12 00:54:44 -0800607 list_del(&kvm->vm_list);
Paolo Bonzini2f303b72013-09-25 13:53:07 +0200608 spin_unlock(&kvm_lock);
Avi Kivity399ec802008-11-19 13:58:46 +0200609 kvm_free_irq_routing(kvm);
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200610 for (i = 0; i < KVM_NR_BUSES; i++)
611 kvm_io_bus_destroy(kvm->buses[i]);
Avi Kivity980da6c2009-12-20 15:13:43 +0200612 kvm_coalesced_mmio_free(kvm);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200613#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
614 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
Gleb Natapovf00be0c2009-03-19 12:20:36 +0200615#else
Marcelo Tosatti2df72e92012-08-24 15:54:57 -0300616 kvm_arch_flush_shadow_all(kvm);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200617#endif
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +0800618 kvm_arch_destroy_vm(kvm);
Scott Wood07f0a7b2013-04-25 14:11:23 +0000619 kvm_destroy_devices(kvm);
Paolo Bonzinif481b062015-05-17 17:30:37 +0200620 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
621 kvm_free_memslots(kvm, kvm->memslots[i]);
Paolo Bonzini820b3fc2014-06-03 13:44:17 +0200622 cleanup_srcu_struct(&kvm->irq_srcu);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100623 cleanup_srcu_struct(&kvm->srcu);
624 kvm_arch_free_vm(kvm);
Peter Zijlstra2ecd9d22015-07-03 18:53:58 +0200625 preempt_notifier_dec();
Alexander Graf10474ae2009-09-15 11:37:46 +0200626 hardware_disable_all();
Avi Kivity6d4e4c42007-11-21 16:41:05 +0200627 mmdrop(mm);
Avi Kivityf17abe92007-02-21 19:28:04 +0200628}
629
Izik Eidusd39f13b2008-03-30 16:01:25 +0300630void kvm_get_kvm(struct kvm *kvm)
631{
632 atomic_inc(&kvm->users_count);
633}
634EXPORT_SYMBOL_GPL(kvm_get_kvm);
635
636void kvm_put_kvm(struct kvm *kvm)
637{
638 if (atomic_dec_and_test(&kvm->users_count))
639 kvm_destroy_vm(kvm);
640}
641EXPORT_SYMBOL_GPL(kvm_put_kvm);
642
643
Avi Kivityf17abe92007-02-21 19:28:04 +0200644static int kvm_vm_release(struct inode *inode, struct file *filp)
645{
646 struct kvm *kvm = filp->private_data;
647
Gregory Haskins721eecb2009-05-20 10:30:49 -0400648 kvm_irqfd_release(kvm);
649
Izik Eidusd39f13b2008-03-30 16:01:25 +0300650 kvm_put_kvm(kvm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800651 return 0;
652}
653
Takuya Yoshikawa515a0122010-10-27 18:23:54 +0900654/*
655 * Allocation size is twice as large as the actual dirty bitmap size.
Takuya Yoshikawa93474b22012-03-01 19:34:45 +0900656 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
Takuya Yoshikawa515a0122010-10-27 18:23:54 +0900657 */
Takuya Yoshikawaa36a57b12010-10-27 18:22:19 +0900658static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
659{
Takuya Yoshikawa515a0122010-10-27 18:23:54 +0900660 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
Takuya Yoshikawaa36a57b12010-10-27 18:22:19 +0900661
Takuya Yoshikawa92eca8f2012-05-20 13:13:28 +0900662 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
Takuya Yoshikawaa36a57b12010-10-27 18:22:19 +0900663 if (!memslot->dirty_bitmap)
664 return -ENOMEM;
665
Takuya Yoshikawaa36a57b12010-10-27 18:22:19 +0900666 return 0;
667}
668
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +0800669/*
Igor Mammedov0e60b072014-12-01 17:29:26 +0000670 * Insert memslot and re-sort memslots based on their GFN,
671 * so binary search could be used to lookup GFN.
672 * Sorting algorithm takes advantage of having initially
673 * sorted array and known changed memslot position.
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +0800674 */
Paolo Bonzini5cc15022014-11-14 10:55:31 +0100675static void update_memslots(struct kvm_memslots *slots,
676 struct kvm_memory_slot *new)
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +0800677{
Paolo Bonzini8593176c2014-11-14 10:22:07 +0100678 int id = new->id;
679 int i = slots->id_to_index[id];
Igor Mammedov063584d2014-11-13 23:00:13 +0000680 struct kvm_memory_slot *mslots = slots->memslots;
Xiao Guangrongf85e2cb2011-11-24 17:41:54 +0800681
Paolo Bonzini8593176c2014-11-14 10:22:07 +0100682 WARN_ON(mslots[i].id != id);
Igor Mammedov9c1a5d382014-12-01 17:29:27 +0000683 if (!new->npages) {
Paolo Bonzinidbaff302014-12-27 21:08:16 +0100684 WARN_ON(!mslots[i].npages);
Igor Mammedov9c1a5d382014-12-01 17:29:27 +0000685 if (mslots[i].npages)
686 slots->used_slots--;
687 } else {
688 if (!mslots[i].npages)
689 slots->used_slots++;
690 }
Igor Mammedov0e60b072014-12-01 17:29:26 +0000691
Igor Mammedov7f379cf2014-12-01 17:29:24 +0000692 while (i < KVM_MEM_SLOTS_NUM - 1 &&
Igor Mammedov0e60b072014-12-01 17:29:26 +0000693 new->base_gfn <= mslots[i + 1].base_gfn) {
694 if (!mslots[i + 1].npages)
695 break;
Igor Mammedov7f379cf2014-12-01 17:29:24 +0000696 mslots[i] = mslots[i + 1];
697 slots->id_to_index[mslots[i].id] = i;
698 i++;
699 }
Paolo Bonziniefbeec72014-12-27 18:01:00 +0100700
701 /*
702 * The ">=" is needed when creating a slot with base_gfn == 0,
703 * so that it moves before all those with base_gfn == npages == 0.
704 *
705 * On the other hand, if new->npages is zero, the above loop has
706 * already left i pointing to the beginning of the empty part of
707 * mslots, and the ">=" would move the hole backwards in this
708 * case---which is wrong. So skip the loop when deleting a slot.
709 */
710 if (new->npages) {
711 while (i > 0 &&
712 new->base_gfn >= mslots[i - 1].base_gfn) {
713 mslots[i] = mslots[i - 1];
714 slots->id_to_index[mslots[i].id] = i;
715 i--;
716 }
Paolo Bonzinidbaff302014-12-27 21:08:16 +0100717 } else
718 WARN_ON_ONCE(i != slots->used_slots);
Xiao Guangrongf85e2cb2011-11-24 17:41:54 +0800719
Paolo Bonzini8593176c2014-11-14 10:22:07 +0100720 mslots[i] = *new;
721 slots->id_to_index[mslots[i].id] = i;
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +0800722}
723
Paolo Bonzini09170a42015-05-18 13:59:39 +0200724static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
Xiao Guangronga50d64d2012-08-21 10:58:13 +0800725{
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +0800726 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
727
Christoffer Dall0f8a4de2014-08-26 14:00:37 +0200728#ifdef __KVM_HAVE_READONLY_MEM
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +0800729 valid_flags |= KVM_MEM_READONLY;
730#endif
731
732 if (mem->flags & ~valid_flags)
Xiao Guangronga50d64d2012-08-21 10:58:13 +0800733 return -EINVAL;
734
735 return 0;
736}
737
Gleb Natapov7ec4fb42012-12-24 17:49:30 +0200738static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
Paolo Bonzinif481b062015-05-17 17:30:37 +0200739 int as_id, struct kvm_memslots *slots)
Gleb Natapov7ec4fb42012-12-24 17:49:30 +0200740{
Paolo Bonzinif481b062015-05-17 17:30:37 +0200741 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
Gleb Natapov7ec4fb42012-12-24 17:49:30 +0200742
David Matlackee3d1572014-08-18 15:46:06 -0700743 /*
744 * Set the low bit in the generation, which disables SPTE caching
745 * until the end of synchronize_srcu_expedited.
746 */
747 WARN_ON(old_memslots->generation & 1);
748 slots->generation = old_memslots->generation + 1;
749
Paolo Bonzinif481b062015-05-17 17:30:37 +0200750 rcu_assign_pointer(kvm->memslots[as_id], slots);
Gleb Natapov7ec4fb42012-12-24 17:49:30 +0200751 synchronize_srcu_expedited(&kvm->srcu);
Takuya Yoshikawae59dbe02013-07-04 13:40:29 +0900752
David Matlackee3d1572014-08-18 15:46:06 -0700753 /*
754 * Increment the new memslot generation a second time. This prevents
755 * vm exits that race with memslot updates from caching a memslot
756 * generation that will (potentially) be valid forever.
757 */
758 slots->generation++;
759
Paolo Bonzini15f46012015-05-17 21:26:08 +0200760 kvm_arch_memslots_updated(kvm, slots);
Takuya Yoshikawae59dbe02013-07-04 13:40:29 +0900761
762 return old_memslots;
Gleb Natapov7ec4fb42012-12-24 17:49:30 +0200763}
764
Avi Kivity6aa8b732006-12-10 02:21:36 -0800765/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800766 * Allocate some memory and give it an address in the guest physical address
767 * space.
768 *
769 * Discontiguous memory is allowed, mostly for framebuffers.
Sheng Yangf78e0e22007-10-29 09:40:42 +0800770 *
Dominik Dingel02d5d552014-10-27 16:22:56 +0100771 * Must be called holding kvm->slots_lock for write.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800772 */
Sheng Yangf78e0e22007-10-29 09:40:42 +0800773int __kvm_set_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200774 const struct kvm_userspace_memory_region *mem)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800775{
Avi Kivity8234b222010-12-27 12:08:45 +0200776 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800777 gfn_t base_gfn;
Heiko Carstens28bcb112009-09-03 17:35:35 +0200778 unsigned long npages;
Takuya Yoshikawaa843fac2013-01-11 18:27:43 +0900779 struct kvm_memory_slot *slot;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800780 struct kvm_memory_slot old, new;
Alex Williamsonb7f69c52012-12-10 10:33:03 -0700781 struct kvm_memslots *slots = NULL, *old_memslots;
Paolo Bonzinif481b062015-05-17 17:30:37 +0200782 int as_id, id;
Takuya Yoshikawaf64c0392013-01-29 11:00:07 +0900783 enum kvm_mr_change change;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800784
Xiao Guangronga50d64d2012-08-21 10:58:13 +0800785 r = check_memory_region_flags(mem);
786 if (r)
787 goto out;
788
Avi Kivity6aa8b732006-12-10 02:21:36 -0800789 r = -EINVAL;
Paolo Bonzinif481b062015-05-17 17:30:37 +0200790 as_id = mem->slot >> 16;
791 id = (u16)mem->slot;
792
Avi Kivity6aa8b732006-12-10 02:21:36 -0800793 /* General sanity checks */
794 if (mem->memory_size & (PAGE_SIZE - 1))
795 goto out;
796 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
797 goto out;
Takuya Yoshikawafa3d3152011-05-07 16:35:38 +0900798 /* We can read the guest memory with __xxx_user() later on. */
Paolo Bonzinif481b062015-05-17 17:30:37 +0200799 if ((id < KVM_USER_MEM_SLOTS) &&
Takuya Yoshikawafa3d3152011-05-07 16:35:38 +0900800 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
Heiko Carstens9e3bb6b2011-05-24 07:51:27 +0200801 !access_ok(VERIFY_WRITE,
802 (void __user *)(unsigned long)mem->userspace_addr,
803 mem->memory_size)))
Hollis Blanchard78749802008-11-07 13:32:12 -0600804 goto out;
Paolo Bonzinif481b062015-05-17 17:30:37 +0200805 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800806 goto out;
807 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
808 goto out;
809
Paolo Bonzinif481b062015-05-17 17:30:37 +0200810 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800811 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
812 npages = mem->memory_size >> PAGE_SHIFT;
813
Takuya Yoshikawa660c22c2010-04-13 22:47:24 +0900814 if (npages > KVM_MEM_MAX_NR_PAGES)
815 goto out;
816
Takuya Yoshikawaa843fac2013-01-11 18:27:43 +0900817 new = old = *slot;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800818
Paolo Bonzinif481b062015-05-17 17:30:37 +0200819 new.id = id;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800820 new.base_gfn = base_gfn;
821 new.npages = npages;
822 new.flags = mem->flags;
823
Takuya Yoshikawaf64c0392013-01-29 11:00:07 +0900824 if (npages) {
825 if (!old.npages)
826 change = KVM_MR_CREATE;
827 else { /* Modify an existing slot. */
828 if ((mem->userspace_addr != old.userspace_addr) ||
Takuya Yoshikawa75d61fb2013-01-30 19:40:41 +0900829 (npages != old.npages) ||
830 ((new.flags ^ old.flags) & KVM_MEM_READONLY))
Takuya Yoshikawaf64c0392013-01-29 11:00:07 +0900831 goto out;
832
833 if (base_gfn != old.base_gfn)
834 change = KVM_MR_MOVE;
835 else if (new.flags != old.flags)
836 change = KVM_MR_FLAGS_ONLY;
837 else { /* Nothing to change. */
838 r = 0;
839 goto out;
840 }
841 }
Paolo Bonzini09170a42015-05-18 13:59:39 +0200842 } else {
843 if (!old.npages)
844 goto out;
845
Takuya Yoshikawaf64c0392013-01-29 11:00:07 +0900846 change = KVM_MR_DELETE;
Paolo Bonzini09170a42015-05-18 13:59:39 +0200847 new.base_gfn = 0;
848 new.flags = 0;
849 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800850
Takuya Yoshikawaf64c0392013-01-29 11:00:07 +0900851 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
Takuya Yoshikawa0a706be2013-01-11 18:26:55 +0900852 /* Check for overlaps */
853 r = -EEXIST;
Paolo Bonzinif481b062015-05-17 17:30:37 +0200854 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
Takuya Yoshikawaa843fac2013-01-11 18:27:43 +0900855 if ((slot->id >= KVM_USER_MEM_SLOTS) ||
Paolo Bonzinif481b062015-05-17 17:30:37 +0200856 (slot->id == id))
Takuya Yoshikawa0a706be2013-01-11 18:26:55 +0900857 continue;
858 if (!((base_gfn + npages <= slot->base_gfn) ||
859 (base_gfn >= slot->base_gfn + slot->npages)))
860 goto out;
861 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800862 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800863
Avi Kivity6aa8b732006-12-10 02:21:36 -0800864 /* Free page dirty bitmap if unneeded */
865 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
Al Viro8b6d44c2007-02-09 16:38:40 +0000866 new.dirty_bitmap = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800867
868 r = -ENOMEM;
Takuya Yoshikawaf64c0392013-01-29 11:00:07 +0900869 if (change == KVM_MR_CREATE) {
Takuya Yoshikawa189a2f72012-02-08 13:01:09 +0900870 new.userspace_addr = mem->userspace_addr;
Takuya Yoshikawad89cc612012-08-01 18:03:28 +0900871
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530872 if (kvm_arch_create_memslot(kvm, &new, npages))
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900873 goto out_free;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800874 }
Joerg Roedelec04b262009-06-19 15:16:23 +0200875
Avi Kivity6aa8b732006-12-10 02:21:36 -0800876 /* Allocate page dirty bitmap if needed */
877 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
Takuya Yoshikawaa36a57b12010-10-27 18:22:19 +0900878 if (kvm_create_dirty_bitmap(&new) < 0)
Sheng Yangf78e0e22007-10-29 09:40:42 +0800879 goto out_free;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800880 }
881
Igor Mammedov74496132015-03-20 12:21:37 +0000882 slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
Paolo Bonzinif2a81032014-11-14 10:46:45 +0100883 if (!slots)
884 goto out_free;
Paolo Bonzinif481b062015-05-17 17:30:37 +0200885 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
Paolo Bonzinif2a81032014-11-14 10:46:45 +0100886
Takuya Yoshikawaf64c0392013-01-29 11:00:07 +0900887 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
Paolo Bonzinif481b062015-05-17 17:30:37 +0200888 slot = id_to_memslot(slots, id);
Xiao Guangrong28a37542011-11-24 19:04:35 +0800889 slot->flags |= KVM_MEMSLOT_INVALID;
890
Paolo Bonzinif481b062015-05-17 17:30:37 +0200891 old_memslots = install_new_memslots(kvm, as_id, slots);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200892
Alex Williamsone40f1932012-12-10 10:32:57 -0700893 /* slot was deleted or moved, clear iommu mapping */
894 kvm_iommu_unmap_pages(kvm, &old);
Marcelo Tosatti12d6e752012-08-24 15:54:58 -0300895 /* From this point no new shadow pages pointing to a deleted,
896 * or moved, memslot will be created.
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200897 *
898 * validation of sp->gfn happens in:
Xiubo Lib7d409d2015-02-26 14:58:24 +0800899 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
900 * - kvm_is_visible_gfn (mmu_check_roots)
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200901 */
Marcelo Tosatti2df72e92012-08-24 15:54:57 -0300902 kvm_arch_flush_shadow_memslot(kvm, slot);
Paolo Bonzinif2a81032014-11-14 10:46:45 +0100903
904 /*
905 * We can re-use the old_memslots from above, the only difference
906 * from the currently installed memslots is the invalid flag. This
907 * will get overwritten by update_memslots anyway.
908 */
Alex Williamsonb7f69c52012-12-10 10:33:03 -0700909 slots = old_memslots;
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200910 }
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300911
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +0900912 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200913 if (r)
Alex Williamsonb7f69c52012-12-10 10:33:03 -0700914 goto out_slots;
Marcelo Tosattif7784b82009-12-23 14:35:18 -0200915
Paolo Bonzinia47d2b02015-05-17 11:41:37 +0200916 /* actual memory is freed via old in kvm_free_memslot below */
Takuya Yoshikawaf64c0392013-01-29 11:00:07 +0900917 if (change == KVM_MR_DELETE) {
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200918 new.dirty_bitmap = NULL;
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +0900919 memset(&new.arch, 0, sizeof(new.arch));
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200920 }
921
Paolo Bonzini5cc15022014-11-14 10:55:31 +0100922 update_memslots(slots, &new);
Paolo Bonzinif481b062015-05-17 17:30:37 +0200923 old_memslots = install_new_memslots(kvm, as_id, slots);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800924
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200925 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
Zhang Xiantao3ad82a72007-11-20 13:11:38 +0800926
Paolo Bonzinia47d2b02015-05-17 11:41:37 +0200927 kvm_free_memslot(kvm, &old, &new);
Igor Mammedov74496132015-03-20 12:21:37 +0000928 kvfree(old_memslots);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200929
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200930 /*
931 * IOMMU mapping: New slots need to be mapped. Old slots need to be
Avi Kivity6aa8b732006-12-10 02:21:36 -0800932 * un-mapped and re-mapped if their base changes. Since base change
933 * unmapping is handled above with slot deletion, mapping alone is
Sheng Yangf78e0e22007-10-29 09:40:42 +0800934 * needed here. Anything else the iommu might care about for existing
Avi Kivity6aa8b732006-12-10 02:21:36 -0800935 * slots (size changes, userspace addr changes and read-only flag
936 * changes) is disallowed above, so any other attribute changes getting
937 * here can be skipped.
Izik Eidus210c7c42007-10-24 23:52:57 +0200938 */
939 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
Sheng Yangf78e0e22007-10-29 09:40:42 +0800940 r = kvm_iommu_map_pages(kvm, &new);
Yang Zhange0230e12013-10-24 09:56:39 +0800941 return r;
Sheng Yangf78e0e22007-10-29 09:40:42 +0800942 }
943
Avi Kivity6aa8b732006-12-10 02:21:36 -0800944 return 0;
945
946out_slots:
Igor Mammedov74496132015-03-20 12:21:37 +0000947 kvfree(slots);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800948out_free:
Paolo Bonzinia47d2b02015-05-17 11:41:37 +0200949 kvm_free_memslot(kvm, &new, &old);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800950out:
951 return r;
Izik Eidus210c7c42007-10-24 23:52:57 +0200952}
953EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
Sheng Yangf78e0e22007-10-29 09:40:42 +0800954
955int kvm_set_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200956 const struct kvm_userspace_memory_region *mem)
Sheng Yangf78e0e22007-10-29 09:40:42 +0800957{
958 int r;
959
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200960 mutex_lock(&kvm->slots_lock);
Takuya Yoshikawa47ae31e2013-02-27 19:43:00 +0900961 r = __kvm_set_memory_region(kvm, mem);
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200962 mutex_unlock(&kvm->slots_lock);
Sheng Yangf78e0e22007-10-29 09:40:42 +0800963 return r;
964}
Izik Eidus210c7c42007-10-24 23:52:57 +0200965EXPORT_SYMBOL_GPL(kvm_set_memory_region);
966
Stephen Hemminger79408762013-12-29 12:12:29 -0800967static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
968 struct kvm_userspace_memory_region *mem)
Izik Eidus210c7c42007-10-24 23:52:57 +0200969{
Paolo Bonzinif481b062015-05-17 17:30:37 +0200970 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
Izik Eiduse0d62c72007-10-24 23:57:46 +0200971 return -EINVAL;
Paolo Bonzini09170a42015-05-18 13:59:39 +0200972
Takuya Yoshikawa47ae31e2013-02-27 19:43:00 +0900973 return kvm_set_memory_region(kvm, mem);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800974}
975
Zhang Xiantao5bb064d2007-11-18 20:29:43 +0800976int kvm_get_dirty_log(struct kvm *kvm,
977 struct kvm_dirty_log *log, int *is_dirty)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800978{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200979 struct kvm_memslots *slots;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800980 struct kvm_memory_slot *memslot;
Paolo Bonzinif481b062015-05-17 17:30:37 +0200981 int r, i, as_id, id;
Takuya Yoshikawa87bf6e72010-04-12 19:35:35 +0900982 unsigned long n;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800983 unsigned long any = 0;
984
Avi Kivity6aa8b732006-12-10 02:21:36 -0800985 r = -EINVAL;
Paolo Bonzinif481b062015-05-17 17:30:37 +0200986 as_id = log->slot >> 16;
987 id = (u16)log->slot;
988 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800989 goto out;
990
Paolo Bonzinif481b062015-05-17 17:30:37 +0200991 slots = __kvm_memslots(kvm, as_id);
992 memslot = id_to_memslot(slots, id);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800993 r = -ENOENT;
994 if (!memslot->dirty_bitmap)
995 goto out;
996
Takuya Yoshikawa87bf6e72010-04-12 19:35:35 +0900997 n = kvm_dirty_bitmap_bytes(memslot);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800998
Uri Lublincd1a4a92007-02-22 16:43:09 +0200999 for (i = 0; !any && i < n/sizeof(long); ++i)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001000 any = memslot->dirty_bitmap[i];
1001
1002 r = -EFAULT;
1003 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1004 goto out;
1005
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08001006 if (any)
1007 *is_dirty = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001008
1009 r = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001010out:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001011 return r;
1012}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301013EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001014
Mario Smarduchba0513b2015-01-15 15:58:53 -08001015#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1016/**
1017 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
1018 * are dirty write protect them for next write.
1019 * @kvm: pointer to kvm instance
1020 * @log: slot id and address to which we copy the log
1021 * @is_dirty: flag set if any page is dirty
1022 *
1023 * We need to keep it in mind that VCPU threads can write to the bitmap
1024 * concurrently. So, to avoid losing track of dirty pages we keep the
1025 * following order:
1026 *
1027 * 1. Take a snapshot of the bit and clear it if needed.
1028 * 2. Write protect the corresponding page.
1029 * 3. Copy the snapshot to the userspace.
1030 * 4. Upon return caller flushes TLB's if needed.
1031 *
1032 * Between 2 and 4, the guest may write to the page using the remaining TLB
1033 * entry. This is not a problem because the page is reported dirty using
1034 * the snapshot taken before and step 4 ensures that writes done after
1035 * exiting to userspace will be logged for the next call.
1036 *
1037 */
1038int kvm_get_dirty_log_protect(struct kvm *kvm,
1039 struct kvm_dirty_log *log, bool *is_dirty)
1040{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001041 struct kvm_memslots *slots;
Mario Smarduchba0513b2015-01-15 15:58:53 -08001042 struct kvm_memory_slot *memslot;
Paolo Bonzinif481b062015-05-17 17:30:37 +02001043 int r, i, as_id, id;
Mario Smarduchba0513b2015-01-15 15:58:53 -08001044 unsigned long n;
1045 unsigned long *dirty_bitmap;
1046 unsigned long *dirty_bitmap_buffer;
1047
1048 r = -EINVAL;
Paolo Bonzinif481b062015-05-17 17:30:37 +02001049 as_id = log->slot >> 16;
1050 id = (u16)log->slot;
1051 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
Mario Smarduchba0513b2015-01-15 15:58:53 -08001052 goto out;
1053
Paolo Bonzinif481b062015-05-17 17:30:37 +02001054 slots = __kvm_memslots(kvm, as_id);
1055 memslot = id_to_memslot(slots, id);
Mario Smarduchba0513b2015-01-15 15:58:53 -08001056
1057 dirty_bitmap = memslot->dirty_bitmap;
1058 r = -ENOENT;
1059 if (!dirty_bitmap)
1060 goto out;
1061
1062 n = kvm_dirty_bitmap_bytes(memslot);
1063
1064 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
1065 memset(dirty_bitmap_buffer, 0, n);
1066
1067 spin_lock(&kvm->mmu_lock);
1068 *is_dirty = false;
1069 for (i = 0; i < n / sizeof(long); i++) {
1070 unsigned long mask;
1071 gfn_t offset;
1072
1073 if (!dirty_bitmap[i])
1074 continue;
1075
1076 *is_dirty = true;
1077
1078 mask = xchg(&dirty_bitmap[i], 0);
1079 dirty_bitmap_buffer[i] = mask;
1080
Takuya Yoshikawa58d29302015-03-17 16:19:58 +09001081 if (mask) {
1082 offset = i * BITS_PER_LONG;
1083 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1084 offset, mask);
1085 }
Mario Smarduchba0513b2015-01-15 15:58:53 -08001086 }
1087
1088 spin_unlock(&kvm->mmu_lock);
1089
1090 r = -EFAULT;
1091 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
1092 goto out;
1093
1094 r = 0;
1095out:
1096 return r;
1097}
1098EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
1099#endif
1100
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001101bool kvm_largepages_enabled(void)
1102{
1103 return largepages_enabled;
1104}
1105
Marcelo Tosatti54dee992009-06-11 12:07:44 -03001106void kvm_disable_largepages(void)
1107{
1108 largepages_enabled = false;
1109}
1110EXPORT_SYMBOL_GPL(kvm_disable_largepages);
1111
Gleb Natapov49c77542010-10-18 15:22:23 +02001112struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1113{
1114 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
1115}
Avi Kivitya1f4d3952010-06-21 11:44:20 +03001116EXPORT_SYMBOL_GPL(gfn_to_memslot);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001117
Paolo Bonzini8e734852015-05-17 13:58:53 +02001118struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
1119{
1120 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
1121}
1122
Izik Eiduse0d62c72007-10-24 23:57:46 +02001123int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1124{
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +08001125 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
Izik Eiduse0d62c72007-10-24 23:57:46 +02001126
Alex Williamsonbbacc0c2012-12-10 10:33:09 -07001127 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +08001128 memslot->flags & KVM_MEMSLOT_INVALID)
1129 return 0;
Izik Eiduse0d62c72007-10-24 23:57:46 +02001130
Xiao Guangrongbf3e05b2011-11-24 17:40:57 +08001131 return 1;
Izik Eiduse0d62c72007-10-24 23:57:46 +02001132}
1133EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1134
Joerg Roedel8f0b1ab2010-01-28 12:37:56 +01001135unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
1136{
1137 struct vm_area_struct *vma;
1138 unsigned long addr, size;
1139
1140 size = PAGE_SIZE;
1141
1142 addr = gfn_to_hva(kvm, gfn);
1143 if (kvm_is_error_hva(addr))
1144 return PAGE_SIZE;
1145
1146 down_read(&current->mm->mmap_sem);
1147 vma = find_vma(current->mm, addr);
1148 if (!vma)
1149 goto out;
1150
1151 size = vma_kernel_pagesize(vma);
1152
1153out:
1154 up_read(&current->mm->mmap_sem);
1155
1156 return size;
1157}
1158
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001159static bool memslot_is_readonly(struct kvm_memory_slot *slot)
1160{
1161 return slot->flags & KVM_MEM_READONLY;
1162}
1163
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001164static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1165 gfn_t *nr_pages, bool write)
Izik Eidus539cb662007-11-11 22:05:04 +02001166{
Marcelo Tosattibc6678a2009-12-23 14:35:21 -02001167 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
Xiao Guangrongca3a4902012-08-21 11:01:50 +08001168 return KVM_HVA_ERR_BAD;
Xiao Guangrong48987782010-08-22 19:11:43 +08001169
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001170 if (memslot_is_readonly(slot) && write)
1171 return KVM_HVA_ERR_RO_BAD;
Xiao Guangrong48987782010-08-22 19:11:43 +08001172
1173 if (nr_pages)
1174 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1175
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001176 return __gfn_to_hva_memslot(slot, gfn);
Izik Eidus539cb662007-11-11 22:05:04 +02001177}
Xiao Guangrong48987782010-08-22 19:11:43 +08001178
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001179static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1180 gfn_t *nr_pages)
1181{
1182 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
1183}
1184
1185unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
Stephen Hemminger79408762013-12-29 12:12:29 -08001186 gfn_t gfn)
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001187{
1188 return gfn_to_hva_many(slot, gfn, NULL);
1189}
1190EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
1191
Xiao Guangrong48987782010-08-22 19:11:43 +08001192unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1193{
Gleb Natapov49c77542010-10-18 15:22:23 +02001194 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
Xiao Guangrong48987782010-08-22 19:11:43 +08001195}
Sheng Yang0d150292008-04-25 21:44:50 +08001196EXPORT_SYMBOL_GPL(gfn_to_hva);
Izik Eidus539cb662007-11-11 22:05:04 +02001197
Paolo Bonzini8e734852015-05-17 13:58:53 +02001198unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
1199{
1200 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
1201}
1202EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
1203
Xiao Guangrong86ab8cf2012-08-21 10:59:53 +08001204/*
Paolo Bonziniba6a3542013-09-09 13:52:33 +02001205 * If writable is set to false, the hva returned by this function is only
1206 * allowed to be read.
Xiao Guangrong86ab8cf2012-08-21 10:59:53 +08001207 */
Christoffer Dall64d83122014-08-19 12:15:00 +02001208unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
1209 gfn_t gfn, bool *writable)
Gleb Natapov80300892010-10-19 18:13:41 +02001210{
Gleb Natapova2ac07f2013-10-01 19:58:36 +03001211 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1212
1213 if (!kvm_is_error_hva(hva) && writable)
Paolo Bonziniba6a3542013-09-09 13:52:33 +02001214 *writable = !memslot_is_readonly(slot);
1215
Gleb Natapova2ac07f2013-10-01 19:58:36 +03001216 return hva;
Xiao Guangrong86ab8cf2012-08-21 10:59:53 +08001217}
1218
Christoffer Dall64d83122014-08-19 12:15:00 +02001219unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1220{
1221 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1222
1223 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1224}
1225
Paolo Bonzini8e734852015-05-17 13:58:53 +02001226unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
1227{
1228 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1229
1230 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1231}
1232
Geoff Levand39369f72013-04-05 19:20:30 +00001233static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
Gleb Natapov0857b9e2011-02-01 13:21:47 +02001234 unsigned long start, int write, struct page **page)
1235{
1236 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
1237
1238 if (write)
1239 flags |= FOLL_WRITE;
1240
1241 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
1242}
1243
Huang Yingfafc3db2011-01-30 11:15:49 +08001244static inline int check_user_page_hwpoison(unsigned long addr)
1245{
1246 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
1247
1248 rc = __get_user_pages(current, current->mm, addr, 1,
1249 flags, NULL, NULL, NULL);
1250 return rc == -EHWPOISON;
1251}
1252
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001253/*
1254 * The atomic path to get the writable pfn which will be stored in @pfn,
1255 * true indicates success, otherwise false is returned.
1256 */
1257static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
1258 bool write_fault, bool *writable, pfn_t *pfn)
1259{
1260 struct page *page[1];
1261 int npages;
1262
1263 if (!(async || atomic))
1264 return false;
1265
Xiao Guangrong12ce13f2012-08-21 11:00:49 +08001266 /*
1267 * Fast pin a writable pfn only if it is a write fault request
1268 * or the caller allows to map a writable pfn for a read fault
1269 * request.
1270 */
1271 if (!(write_fault || writable))
1272 return false;
1273
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001274 npages = __get_user_pages_fast(addr, 1, 1, page);
1275 if (npages == 1) {
1276 *pfn = page_to_pfn(page[0]);
1277
1278 if (writable)
1279 *writable = true;
1280 return true;
1281 }
1282
1283 return false;
1284}
1285
1286/*
1287 * The slow path to get the pfn of the specified host virtual address,
1288 * 1 indicates success, -errno is returned if error is detected.
1289 */
1290static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1291 bool *writable, pfn_t *pfn)
Avi Kivity954bbbc2007-03-30 14:02:32 +03001292{
Anthony Liguori8d4e1282007-10-18 09:59:34 -05001293 struct page *page[1];
Gleb Natapovaf585b92010-10-14 11:22:46 +02001294 int npages = 0;
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001295
1296 might_sleep();
1297
1298 if (writable)
1299 *writable = write_fault;
1300
1301 if (async) {
1302 down_read(&current->mm->mmap_sem);
1303 npages = get_user_page_nowait(current, current->mm,
1304 addr, write_fault, page);
1305 up_read(&current->mm->mmap_sem);
Andrea Arcangeli0664e572015-02-11 15:27:28 -08001306 } else
1307 npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
1308 write_fault, 0, page,
1309 FOLL_TOUCH|FOLL_HWPOISON);
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001310 if (npages != 1)
1311 return npages;
1312
1313 /* map read fault as writable if possible */
Xiao Guangrong12ce13f2012-08-21 11:00:49 +08001314 if (unlikely(!write_fault) && writable) {
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001315 struct page *wpage[1];
1316
1317 npages = __get_user_pages_fast(addr, 1, 1, wpage);
1318 if (npages == 1) {
1319 *writable = true;
1320 put_page(page[0]);
1321 page[0] = wpage[0];
1322 }
1323
1324 npages = 1;
1325 }
1326 *pfn = page_to_pfn(page[0]);
1327 return npages;
1328}
1329
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001330static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
1331{
1332 if (unlikely(!(vma->vm_flags & VM_READ)))
1333 return false;
1334
1335 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
1336 return false;
1337
1338 return true;
1339}
1340
Xiao Guangrong12ce13f2012-08-21 11:00:49 +08001341/*
1342 * Pin guest page in memory and return its pfn.
1343 * @addr: host virtual address which maps memory to the guest
1344 * @atomic: whether this function can sleep
1345 * @async: whether this function need to wait IO complete if the
1346 * host page is not in the memory
1347 * @write_fault: whether we should get a writable host page
1348 * @writable: whether it allows to map a writable host page for !@write_fault
1349 *
1350 * The function will map a writable host page for these two cases:
1351 * 1): @write_fault = true
1352 * 2): @write_fault = false && @writable, @writable will tell the caller
1353 * whether the mapping is writable.
1354 */
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001355static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1356 bool write_fault, bool *writable)
1357{
1358 struct vm_area_struct *vma;
1359 pfn_t pfn = 0;
1360 int npages;
Avi Kivity954bbbc2007-03-30 14:02:32 +03001361
Gleb Natapovaf585b92010-10-14 11:22:46 +02001362 /* we can do it either atomically or asynchronously, not both */
1363 BUG_ON(atomic && async);
1364
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001365 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
1366 return pfn;
Marcelo Tosatti612819c2010-10-22 14:18:18 -02001367
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001368 if (atomic)
1369 return KVM_PFN_ERR_FAULT;
Marcelo Tosatti612819c2010-10-22 14:18:18 -02001370
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001371 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
1372 if (npages == 1)
1373 return pfn;
Gleb Natapovaf585b92010-10-14 11:22:46 +02001374
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001375 down_read(&current->mm->mmap_sem);
1376 if (npages == -EHWPOISON ||
1377 (!async && check_user_page_hwpoison(addr))) {
1378 pfn = KVM_PFN_ERR_HWPOISON;
1379 goto exit;
Xiao Guangrong887c08a2010-08-22 19:10:28 +08001380 }
Izik Eidus539cb662007-11-11 22:05:04 +02001381
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001382 vma = find_vma_intersection(current->mm, addr, addr + 1);
Anthony Liguori8d4e1282007-10-18 09:59:34 -05001383
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001384 if (vma == NULL)
1385 pfn = KVM_PFN_ERR_FAULT;
1386 else if ((vma->vm_flags & VM_PFNMAP)) {
1387 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1388 vma->vm_pgoff;
Ard Biesheuvelbf4bea82014-11-10 08:33:56 +00001389 BUG_ON(!kvm_is_reserved_pfn(pfn));
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001390 } else {
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001391 if (async && vma_is_valid(vma, write_fault))
Xiao Guangrong2fc84312012-08-21 11:00:22 +08001392 *async = true;
1393 pfn = KVM_PFN_ERR_FAULT;
1394 }
1395exit:
1396 up_read(&current->mm->mmap_sem);
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001397 return pfn;
Anthony Liguori35149e22008-04-02 14:46:56 -05001398}
1399
Paolo Bonzini35204692015-04-02 11:20:48 +02001400pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
1401 bool *async, bool write_fault, bool *writable)
Xiao Guangrong887c08a2010-08-22 19:10:28 +08001402{
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001403 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
1404
1405 if (addr == KVM_HVA_ERR_RO_BAD)
1406 return KVM_PFN_ERR_RO_FAULT;
1407
1408 if (kvm_is_error_hva(addr))
Xiao Guangrong81c52c52012-10-16 20:10:59 +08001409 return KVM_PFN_NOSLOT;
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001410
1411 /* Do not map writable pfn in the readonly memslot. */
1412 if (writable && memslot_is_readonly(slot)) {
1413 *writable = false;
1414 writable = NULL;
1415 }
1416
1417 return hva_to_pfn(addr, atomic, async, write_fault,
1418 writable);
Xiao Guangrong887c08a2010-08-22 19:10:28 +08001419}
Paolo Bonzini35204692015-04-02 11:20:48 +02001420EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
Xiao Guangrong887c08a2010-08-22 19:10:28 +08001421
Marcelo Tosatti612819c2010-10-22 14:18:18 -02001422pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1423 bool *writable)
1424{
Paolo Bonzinie37afc62015-05-19 16:09:04 +02001425 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
1426 write_fault, writable);
Marcelo Tosatti612819c2010-10-22 14:18:18 -02001427}
1428EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1429
Xiao Guangrongd5661042012-07-17 21:56:16 +08001430pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
Marcelo Tosatti506f0d62009-12-23 14:35:19 -02001431{
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001432 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
Marcelo Tosatti506f0d62009-12-23 14:35:19 -02001433}
Paolo Bonzinie37afc62015-05-19 16:09:04 +02001434EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
Marcelo Tosatti506f0d62009-12-23 14:35:19 -02001435
Xiao Guangrong037d92d2012-08-21 10:59:12 +08001436pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
1437{
Xiao Guangrong4d8b81a2012-08-21 11:02:51 +08001438 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
Xiao Guangrong037d92d2012-08-21 10:59:12 +08001439}
1440EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
1441
Paolo Bonzinie37afc62015-05-19 16:09:04 +02001442pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1443{
1444 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
1445}
1446EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1447
Paolo Bonzini8e734852015-05-17 13:58:53 +02001448pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
1449{
1450 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1451}
1452EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
1453
Paolo Bonzinie37afc62015-05-19 16:09:04 +02001454pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1455{
1456 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
1457}
1458EXPORT_SYMBOL_GPL(gfn_to_pfn);
1459
Paolo Bonzini8e734852015-05-17 13:58:53 +02001460pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
1461{
1462 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1463}
1464EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
1465
Paolo Bonzinid9ef13c2015-05-19 16:01:50 +02001466int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1467 struct page **pages, int nr_pages)
Xiao Guangrong48987782010-08-22 19:11:43 +08001468{
1469 unsigned long addr;
1470 gfn_t entry;
1471
Paolo Bonzinid9ef13c2015-05-19 16:01:50 +02001472 addr = gfn_to_hva_many(slot, gfn, &entry);
Xiao Guangrong48987782010-08-22 19:11:43 +08001473 if (kvm_is_error_hva(addr))
1474 return -1;
1475
1476 if (entry < nr_pages)
1477 return 0;
1478
1479 return __get_user_pages_fast(addr, nr_pages, 1, pages);
1480}
1481EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1482
Xiao Guangronga2766322012-07-26 11:58:59 +08001483static struct page *kvm_pfn_to_page(pfn_t pfn)
1484{
Xiao Guangrong81c52c52012-10-16 20:10:59 +08001485 if (is_error_noslot_pfn(pfn))
Xiao Guangrong6cede2e2012-08-03 15:41:22 +08001486 return KVM_ERR_PTR_BAD_PAGE;
Xiao Guangronga2766322012-07-26 11:58:59 +08001487
Ard Biesheuvelbf4bea82014-11-10 08:33:56 +00001488 if (kvm_is_reserved_pfn(pfn)) {
Xiao Guangrongcb9aaa32012-08-03 15:42:10 +08001489 WARN_ON(1);
1490 return KVM_ERR_PTR_BAD_PAGE;
1491 }
1492
Xiao Guangronga2766322012-07-26 11:58:59 +08001493 return pfn_to_page(pfn);
1494}
1495
Anthony Liguori35149e22008-04-02 14:46:56 -05001496struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1497{
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001498 pfn_t pfn;
1499
1500 pfn = gfn_to_pfn(kvm, gfn);
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001501
Xiao Guangronga2766322012-07-26 11:58:59 +08001502 return kvm_pfn_to_page(pfn);
Avi Kivity954bbbc2007-03-30 14:02:32 +03001503}
1504EXPORT_SYMBOL_GPL(gfn_to_page);
1505
Paolo Bonzini8e734852015-05-17 13:58:53 +02001506struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1507{
1508 pfn_t pfn;
1509
1510 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
1511
1512 return kvm_pfn_to_page(pfn);
1513}
1514EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
1515
Izik Eidusb4231d62007-11-20 11:49:33 +02001516void kvm_release_page_clean(struct page *page)
1517{
Xiao Guangrong32cad842012-08-03 15:42:52 +08001518 WARN_ON(is_error_page(page));
1519
Anthony Liguori35149e22008-04-02 14:46:56 -05001520 kvm_release_pfn_clean(page_to_pfn(page));
Izik Eidusb4231d62007-11-20 11:49:33 +02001521}
1522EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1523
Anthony Liguori35149e22008-04-02 14:46:56 -05001524void kvm_release_pfn_clean(pfn_t pfn)
1525{
Ard Biesheuvelbf4bea82014-11-10 08:33:56 +00001526 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001527 put_page(pfn_to_page(pfn));
Anthony Liguori35149e22008-04-02 14:46:56 -05001528}
1529EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1530
Izik Eidusb4231d62007-11-20 11:49:33 +02001531void kvm_release_page_dirty(struct page *page)
Izik Eidus8a7ae052007-10-18 11:09:33 +02001532{
Xiao Guangronga2766322012-07-26 11:58:59 +08001533 WARN_ON(is_error_page(page));
1534
Anthony Liguori35149e22008-04-02 14:46:56 -05001535 kvm_release_pfn_dirty(page_to_pfn(page));
Izik Eidus8a7ae052007-10-18 11:09:33 +02001536}
Izik Eidusb4231d62007-11-20 11:49:33 +02001537EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
Izik Eidus8a7ae052007-10-18 11:09:33 +02001538
Stephen Hemminger79408762013-12-29 12:12:29 -08001539static void kvm_release_pfn_dirty(pfn_t pfn)
Anthony Liguori35149e22008-04-02 14:46:56 -05001540{
1541 kvm_set_pfn_dirty(pfn);
1542 kvm_release_pfn_clean(pfn);
1543}
Anthony Liguori35149e22008-04-02 14:46:56 -05001544
1545void kvm_set_pfn_dirty(pfn_t pfn)
1546{
Ard Biesheuvelbf4bea82014-11-10 08:33:56 +00001547 if (!kvm_is_reserved_pfn(pfn)) {
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001548 struct page *page = pfn_to_page(pfn);
Xiubo Lif95ef0c2015-02-26 14:58:23 +08001549
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001550 if (!PageReserved(page))
1551 SetPageDirty(page);
1552 }
Anthony Liguori35149e22008-04-02 14:46:56 -05001553}
1554EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1555
1556void kvm_set_pfn_accessed(pfn_t pfn)
1557{
Ard Biesheuvelbf4bea82014-11-10 08:33:56 +00001558 if (!kvm_is_reserved_pfn(pfn))
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001559 mark_page_accessed(pfn_to_page(pfn));
Anthony Liguori35149e22008-04-02 14:46:56 -05001560}
1561EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1562
1563void kvm_get_pfn(pfn_t pfn)
1564{
Ard Biesheuvelbf4bea82014-11-10 08:33:56 +00001565 if (!kvm_is_reserved_pfn(pfn))
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001566 get_page(pfn_to_page(pfn));
Anthony Liguori35149e22008-04-02 14:46:56 -05001567}
1568EXPORT_SYMBOL_GPL(kvm_get_pfn);
1569
Izik Eidus195aefd2007-10-01 22:14:18 +02001570static int next_segment(unsigned long len, int offset)
1571{
1572 if (len > PAGE_SIZE - offset)
1573 return PAGE_SIZE - offset;
1574 else
1575 return len;
1576}
1577
Paolo Bonzini8e734852015-05-17 13:58:53 +02001578static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
1579 void *data, int offset, int len)
Izik Eidus195aefd2007-10-01 22:14:18 +02001580{
Izik Eiduse0506bc2007-11-11 22:10:22 +02001581 int r;
1582 unsigned long addr;
Izik Eidus195aefd2007-10-01 22:14:18 +02001583
Paolo Bonzini8e734852015-05-17 13:58:53 +02001584 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
Izik Eiduse0506bc2007-11-11 22:10:22 +02001585 if (kvm_is_error_hva(addr))
Izik Eidus195aefd2007-10-01 22:14:18 +02001586 return -EFAULT;
Paolo Bonzini3180a7f2015-04-02 14:08:20 +02001587 r = __copy_from_user(data, (void __user *)addr + offset, len);
Izik Eiduse0506bc2007-11-11 22:10:22 +02001588 if (r)
1589 return -EFAULT;
Izik Eidus195aefd2007-10-01 22:14:18 +02001590 return 0;
1591}
Paolo Bonzini8e734852015-05-17 13:58:53 +02001592
1593int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1594 int len)
1595{
1596 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1597
1598 return __kvm_read_guest_page(slot, gfn, data, offset, len);
1599}
Izik Eidus195aefd2007-10-01 22:14:18 +02001600EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1601
Paolo Bonzini8e734852015-05-17 13:58:53 +02001602int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
1603 int offset, int len)
1604{
1605 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1606
1607 return __kvm_read_guest_page(slot, gfn, data, offset, len);
1608}
1609EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
1610
Izik Eidus195aefd2007-10-01 22:14:18 +02001611int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1612{
1613 gfn_t gfn = gpa >> PAGE_SHIFT;
1614 int seg;
1615 int offset = offset_in_page(gpa);
1616 int ret;
1617
1618 while ((seg = next_segment(len, offset)) != 0) {
1619 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1620 if (ret < 0)
1621 return ret;
1622 offset = 0;
1623 len -= seg;
1624 data += seg;
1625 ++gfn;
1626 }
1627 return 0;
1628}
1629EXPORT_SYMBOL_GPL(kvm_read_guest);
1630
Paolo Bonzini8e734852015-05-17 13:58:53 +02001631int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
1632{
1633 gfn_t gfn = gpa >> PAGE_SHIFT;
1634 int seg;
1635 int offset = offset_in_page(gpa);
1636 int ret;
1637
1638 while ((seg = next_segment(len, offset)) != 0) {
1639 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
1640 if (ret < 0)
1641 return ret;
1642 offset = 0;
1643 len -= seg;
1644 data += seg;
1645 ++gfn;
1646 }
1647 return 0;
1648}
1649EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
1650
1651static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1652 void *data, int offset, unsigned long len)
Marcelo Tosatti7ec54582007-12-20 19:18:23 -05001653{
1654 int r;
1655 unsigned long addr;
Marcelo Tosatti7ec54582007-12-20 19:18:23 -05001656
Paolo Bonzini8e734852015-05-17 13:58:53 +02001657 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
Marcelo Tosatti7ec54582007-12-20 19:18:23 -05001658 if (kvm_is_error_hva(addr))
1659 return -EFAULT;
Andrea Arcangeli0aac03f2008-01-30 19:57:35 +01001660 pagefault_disable();
Paolo Bonzini3180a7f2015-04-02 14:08:20 +02001661 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
Andrea Arcangeli0aac03f2008-01-30 19:57:35 +01001662 pagefault_enable();
Marcelo Tosatti7ec54582007-12-20 19:18:23 -05001663 if (r)
1664 return -EFAULT;
1665 return 0;
1666}
Marcelo Tosatti7ec54582007-12-20 19:18:23 -05001667
Paolo Bonzini8e734852015-05-17 13:58:53 +02001668int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1669 unsigned long len)
1670{
1671 gfn_t gfn = gpa >> PAGE_SHIFT;
1672 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1673 int offset = offset_in_page(gpa);
1674
1675 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
1676}
1677EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);
1678
1679int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
1680 void *data, unsigned long len)
1681{
1682 gfn_t gfn = gpa >> PAGE_SHIFT;
1683 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1684 int offset = offset_in_page(gpa);
1685
1686 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
1687}
1688EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
1689
1690static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
1691 const void *data, int offset, int len)
Izik Eidus195aefd2007-10-01 22:14:18 +02001692{
Izik Eiduse0506bc2007-11-11 22:10:22 +02001693 int r;
1694 unsigned long addr;
Izik Eidus195aefd2007-10-01 22:14:18 +02001695
Radim Krčmář251eb842015-04-10 21:47:27 +02001696 addr = gfn_to_hva_memslot(memslot, gfn);
Izik Eiduse0506bc2007-11-11 22:10:22 +02001697 if (kvm_is_error_hva(addr))
Izik Eidus195aefd2007-10-01 22:14:18 +02001698 return -EFAULT;
Xiao Guangrong8b0cedf2011-05-15 23:22:04 +08001699 r = __copy_to_user((void __user *)addr + offset, data, len);
Izik Eiduse0506bc2007-11-11 22:10:22 +02001700 if (r)
1701 return -EFAULT;
Paolo Bonzinibc009e42015-05-26 12:43:41 +02001702 mark_page_dirty_in_slot(memslot, gfn);
Izik Eidus195aefd2007-10-01 22:14:18 +02001703 return 0;
1704}
Paolo Bonzini8e734852015-05-17 13:58:53 +02001705
1706int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
1707 const void *data, int offset, int len)
1708{
1709 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1710
1711 return __kvm_write_guest_page(slot, gfn, data, offset, len);
1712}
Izik Eidus195aefd2007-10-01 22:14:18 +02001713EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1714
Paolo Bonzini8e734852015-05-17 13:58:53 +02001715int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
1716 const void *data, int offset, int len)
1717{
1718 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1719
1720 return __kvm_write_guest_page(slot, gfn, data, offset, len);
1721}
1722EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
1723
Izik Eidus195aefd2007-10-01 22:14:18 +02001724int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1725 unsigned long len)
1726{
1727 gfn_t gfn = gpa >> PAGE_SHIFT;
1728 int seg;
1729 int offset = offset_in_page(gpa);
1730 int ret;
1731
1732 while ((seg = next_segment(len, offset)) != 0) {
1733 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1734 if (ret < 0)
1735 return ret;
1736 offset = 0;
1737 len -= seg;
1738 data += seg;
1739 ++gfn;
1740 }
1741 return 0;
1742}
Wincy Vanff651cb2014-12-11 08:52:58 +03001743EXPORT_SYMBOL_GPL(kvm_write_guest);
Izik Eidus195aefd2007-10-01 22:14:18 +02001744
Paolo Bonzini8e734852015-05-17 13:58:53 +02001745int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
1746 unsigned long len)
1747{
1748 gfn_t gfn = gpa >> PAGE_SHIFT;
1749 int seg;
1750 int offset = offset_in_page(gpa);
1751 int ret;
1752
1753 while ((seg = next_segment(len, offset)) != 0) {
1754 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
1755 if (ret < 0)
1756 return ret;
1757 offset = 0;
1758 len -= seg;
1759 data += seg;
1760 ++gfn;
1761 }
1762 return 0;
1763}
1764EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
1765
Gleb Natapov49c77542010-10-18 15:22:23 +02001766int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
Andrew Honig8f964522013-03-29 09:35:21 -07001767 gpa_t gpa, unsigned long len)
Gleb Natapov49c77542010-10-18 15:22:23 +02001768{
1769 struct kvm_memslots *slots = kvm_memslots(kvm);
1770 int offset = offset_in_page(gpa);
Andrew Honig8f964522013-03-29 09:35:21 -07001771 gfn_t start_gfn = gpa >> PAGE_SHIFT;
1772 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
1773 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
1774 gfn_t nr_pages_avail;
Gleb Natapov49c77542010-10-18 15:22:23 +02001775
1776 ghc->gpa = gpa;
1777 ghc->generation = slots->generation;
Andrew Honig8f964522013-03-29 09:35:21 -07001778 ghc->len = len;
1779 ghc->memslot = gfn_to_memslot(kvm, start_gfn);
Radim Krčmářca3f0872015-04-08 14:16:48 +02001780 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
1781 if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
Gleb Natapov49c77542010-10-18 15:22:23 +02001782 ghc->hva += offset;
Andrew Honig8f964522013-03-29 09:35:21 -07001783 } else {
1784 /*
1785 * If the requested region crosses two memslots, we still
1786 * verify that the entire region is valid here.
1787 */
1788 while (start_gfn <= end_gfn) {
1789 ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1790 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
1791 &nr_pages_avail);
1792 if (kvm_is_error_hva(ghc->hva))
1793 return -EFAULT;
1794 start_gfn += nr_pages_avail;
1795 }
1796 /* Use the slow path for cross page reads and writes. */
1797 ghc->memslot = NULL;
1798 }
Gleb Natapov49c77542010-10-18 15:22:23 +02001799 return 0;
1800}
1801EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1802
1803int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1804 void *data, unsigned long len)
1805{
1806 struct kvm_memslots *slots = kvm_memslots(kvm);
1807 int r;
1808
Andrew Honig8f964522013-03-29 09:35:21 -07001809 BUG_ON(len > ghc->len);
1810
Gleb Natapov49c77542010-10-18 15:22:23 +02001811 if (slots->generation != ghc->generation)
Andrew Honig8f964522013-03-29 09:35:21 -07001812 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1813
1814 if (unlikely(!ghc->memslot))
1815 return kvm_write_guest(kvm, ghc->gpa, data, len);
Gleb Natapov49c77542010-10-18 15:22:23 +02001816
1817 if (kvm_is_error_hva(ghc->hva))
1818 return -EFAULT;
1819
Xiao Guangrong8b0cedf2011-05-15 23:22:04 +08001820 r = __copy_to_user((void __user *)ghc->hva, data, len);
Gleb Natapov49c77542010-10-18 15:22:23 +02001821 if (r)
1822 return -EFAULT;
Paolo Bonzinibc009e42015-05-26 12:43:41 +02001823 mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT);
Gleb Natapov49c77542010-10-18 15:22:23 +02001824
1825 return 0;
1826}
1827EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1828
Gleb Natapove03b6442011-07-11 15:28:11 -04001829int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1830 void *data, unsigned long len)
1831{
1832 struct kvm_memslots *slots = kvm_memslots(kvm);
1833 int r;
1834
Andrew Honig8f964522013-03-29 09:35:21 -07001835 BUG_ON(len > ghc->len);
1836
Gleb Natapove03b6442011-07-11 15:28:11 -04001837 if (slots->generation != ghc->generation)
Andrew Honig8f964522013-03-29 09:35:21 -07001838 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1839
1840 if (unlikely(!ghc->memslot))
1841 return kvm_read_guest(kvm, ghc->gpa, data, len);
Gleb Natapove03b6442011-07-11 15:28:11 -04001842
1843 if (kvm_is_error_hva(ghc->hva))
1844 return -EFAULT;
1845
1846 r = __copy_from_user(data, (void __user *)ghc->hva, len);
1847 if (r)
1848 return -EFAULT;
1849
1850 return 0;
1851}
1852EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
1853
Izik Eidus195aefd2007-10-01 22:14:18 +02001854int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1855{
Heiko Carstens8a3caa62013-11-18 10:35:55 +01001856 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
1857
1858 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
Izik Eidus195aefd2007-10-01 22:14:18 +02001859}
1860EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1861
1862int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1863{
1864 gfn_t gfn = gpa >> PAGE_SHIFT;
1865 int seg;
1866 int offset = offset_in_page(gpa);
1867 int ret;
1868
Kevin Mulveybfda0e82015-02-20 08:21:36 -05001869 while ((seg = next_segment(len, offset)) != 0) {
Izik Eidus195aefd2007-10-01 22:14:18 +02001870 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1871 if (ret < 0)
1872 return ret;
1873 offset = 0;
1874 len -= seg;
1875 ++gfn;
1876 }
1877 return 0;
1878}
1879EXPORT_SYMBOL_GPL(kvm_clear_guest);
1880
Paolo Bonzinibc009e42015-05-26 12:43:41 +02001881static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
Stephen Hemminger79408762013-12-29 12:12:29 -08001882 gfn_t gfn)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001883{
Rusty Russell7e9d6192007-07-31 20:41:14 +10001884 if (memslot && memslot->dirty_bitmap) {
1885 unsigned long rel_gfn = gfn - memslot->base_gfn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001886
Takuya Yoshikawab74ca3b2012-10-04 17:13:12 -07001887 set_bit_le(rel_gfn, memslot->dirty_bitmap);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001888 }
1889}
1890
Gleb Natapov49c77542010-10-18 15:22:23 +02001891void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1892{
1893 struct kvm_memory_slot *memslot;
1894
1895 memslot = gfn_to_memslot(kvm, gfn);
Paolo Bonzinibc009e42015-05-26 12:43:41 +02001896 mark_page_dirty_in_slot(memslot, gfn);
Gleb Natapov49c77542010-10-18 15:22:23 +02001897}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301898EXPORT_SYMBOL_GPL(mark_page_dirty);
Gleb Natapov49c77542010-10-18 15:22:23 +02001899
Paolo Bonzini8e734852015-05-17 13:58:53 +02001900void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
1901{
1902 struct kvm_memory_slot *memslot;
1903
1904 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1905 mark_page_dirty_in_slot(memslot, gfn);
1906}
1907EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
1908
Paolo Bonzinif7819512015-02-04 18:20:58 +01001909static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
1910{
1911 if (kvm_arch_vcpu_runnable(vcpu)) {
1912 kvm_make_request(KVM_REQ_UNHALT, vcpu);
1913 return -EINTR;
1914 }
1915 if (kvm_cpu_has_pending_timer(vcpu))
1916 return -EINTR;
1917 if (signal_pending(current))
1918 return -EINTR;
1919
1920 return 0;
1921}
1922
Eddie Dongb6958ce2007-07-18 12:15:21 +03001923/*
1924 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1925 */
Hollis Blanchard8776e512007-10-31 17:24:24 -05001926void kvm_vcpu_block(struct kvm_vcpu *vcpu)
Eddie Dongb6958ce2007-07-18 12:15:21 +03001927{
Paolo Bonzinif7819512015-02-04 18:20:58 +01001928 ktime_t start, cur;
Marcelo Tosattie5c239c2008-05-08 19:47:01 -03001929 DEFINE_WAIT(wait);
Paolo Bonzinif7819512015-02-04 18:20:58 +01001930 bool waited = false;
1931
1932 start = cur = ktime_get();
1933 if (halt_poll_ns) {
1934 ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
Xiubo Lif95ef0c2015-02-26 14:58:23 +08001935
Paolo Bonzinif7819512015-02-04 18:20:58 +01001936 do {
1937 /*
1938 * This sets KVM_REQ_UNHALT if an interrupt
1939 * arrives.
1940 */
1941 if (kvm_vcpu_check_block(vcpu) < 0) {
1942 ++vcpu->stat.halt_successful_poll;
1943 goto out;
1944 }
1945 cur = ktime_get();
1946 } while (single_task_running() && ktime_before(cur, stop));
1947 }
Eddie Dongb6958ce2007-07-18 12:15:21 +03001948
Marcelo Tosattie5c239c2008-05-08 19:47:01 -03001949 for (;;) {
1950 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
Eddie Dongb6958ce2007-07-18 12:15:21 +03001951
Paolo Bonzinif7819512015-02-04 18:20:58 +01001952 if (kvm_vcpu_check_block(vcpu) < 0)
Marcelo Tosattie5c239c2008-05-08 19:47:01 -03001953 break;
1954
Paolo Bonzinif7819512015-02-04 18:20:58 +01001955 waited = true;
Eddie Dongb6958ce2007-07-18 12:15:21 +03001956 schedule();
Eddie Dongb6958ce2007-07-18 12:15:21 +03001957 }
1958
Marcelo Tosattie5c239c2008-05-08 19:47:01 -03001959 finish_wait(&vcpu->wq, &wait);
Paolo Bonzinif7819512015-02-04 18:20:58 +01001960 cur = ktime_get();
1961
1962out:
1963 trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited);
Eddie Dongb6958ce2007-07-18 12:15:21 +03001964}
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301965EXPORT_SYMBOL_GPL(kvm_vcpu_block);
Eddie Dongb6958ce2007-07-18 12:15:21 +03001966
Marcelo Tosatti8c847802012-03-14 17:58:48 -03001967#ifndef CONFIG_S390
Christoffer Dallb6d33832012-03-08 16:44:24 -05001968/*
1969 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
1970 */
1971void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1972{
1973 int me;
1974 int cpu = vcpu->cpu;
1975 wait_queue_head_t *wqp;
1976
1977 wqp = kvm_arch_vcpu_wq(vcpu);
1978 if (waitqueue_active(wqp)) {
1979 wake_up_interruptible(wqp);
1980 ++vcpu->stat.halt_wakeup;
1981 }
1982
1983 me = get_cpu();
1984 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
1985 if (kvm_arch_vcpu_should_kick(vcpu))
1986 smp_send_reschedule(cpu);
1987 put_cpu();
1988}
Yang Zhanga20ed542013-04-11 19:25:15 +08001989EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
Marcelo Tosatti8c847802012-03-14 17:58:48 -03001990#endif /* !CONFIG_S390 */
Christoffer Dallb6d33832012-03-08 16:44:24 -05001991
Dan Carpenterfa933842014-05-23 13:20:42 +03001992int kvm_vcpu_yield_to(struct kvm_vcpu *target)
Konstantin Weitz41628d32012-04-25 15:30:38 +02001993{
1994 struct pid *pid;
1995 struct task_struct *task = NULL;
Dan Carpenterfa933842014-05-23 13:20:42 +03001996 int ret = 0;
Konstantin Weitz41628d32012-04-25 15:30:38 +02001997
1998 rcu_read_lock();
1999 pid = rcu_dereference(target->pid);
2000 if (pid)
Sam Bobroff27fbe64b2014-09-19 09:40:41 +10002001 task = get_pid_task(pid, PIDTYPE_PID);
Konstantin Weitz41628d32012-04-25 15:30:38 +02002002 rcu_read_unlock();
2003 if (!task)
Raghavendra K Tc45c5282013-01-22 13:09:24 +05302004 return ret;
Raghavendra K Tc45c5282013-01-22 13:09:24 +05302005 ret = yield_to(task, 1);
Konstantin Weitz41628d32012-04-25 15:30:38 +02002006 put_task_struct(task);
Raghavendra K Tc45c5282013-01-22 13:09:24 +05302007
2008 return ret;
Konstantin Weitz41628d32012-04-25 15:30:38 +02002009}
2010EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
2011
Raghavendra K T06e48c52012-07-19 15:17:52 +05302012/*
2013 * Helper that checks whether a VCPU is eligible for directed yield.
2014 * Most eligible candidate to yield is decided by following heuristics:
2015 *
2016 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
2017 * (preempted lock holder), indicated by @in_spin_loop.
2018 * Set at the beiginning and cleared at the end of interception/PLE handler.
2019 *
2020 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
2021 * chance last time (mostly it has become eligible now since we have probably
2022 * yielded to lockholder in last iteration. This is done by toggling
2023 * @dy_eligible each time a VCPU checked for eligibility.)
2024 *
2025 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
2026 * to preempted lock-holder could result in wrong VCPU selection and CPU
2027 * burning. Giving priority for a potential lock-holder increases lock
2028 * progress.
2029 *
2030 * Since algorithm is based on heuristics, accessing another VCPU data without
2031 * locking does not harm. It may result in trying to yield to same VCPU, fail
2032 * and continue with next VCPU and so on.
2033 */
Stephen Hemminger79408762013-12-29 12:12:29 -08002034static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
Raghavendra K T06e48c52012-07-19 15:17:52 +05302035{
Scott Wood4a55dd72014-01-09 18:43:16 -06002036#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
Raghavendra K T06e48c52012-07-19 15:17:52 +05302037 bool eligible;
2038
2039 eligible = !vcpu->spin_loop.in_spin_loop ||
Christian Borntraeger34656112014-09-04 21:13:31 +02002040 vcpu->spin_loop.dy_eligible;
Raghavendra K T06e48c52012-07-19 15:17:52 +05302041
2042 if (vcpu->spin_loop.in_spin_loop)
2043 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
2044
2045 return eligible;
Scott Wood4a55dd72014-01-09 18:43:16 -06002046#else
2047 return true;
Raghavendra K T06e48c52012-07-19 15:17:52 +05302048#endif
Scott Wood4a55dd72014-01-09 18:43:16 -06002049}
Raghavendra K Tc45c5282013-01-22 13:09:24 +05302050
Rik van Riel217ece62011-02-01 09:53:28 -05002051void kvm_vcpu_on_spin(struct kvm_vcpu *me)
Zhai, Edwind255f4f2009-10-09 18:03:20 +08002052{
Rik van Riel217ece62011-02-01 09:53:28 -05002053 struct kvm *kvm = me->kvm;
2054 struct kvm_vcpu *vcpu;
2055 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
2056 int yielded = 0;
Raghavendra K Tc45c5282013-01-22 13:09:24 +05302057 int try = 3;
Rik van Riel217ece62011-02-01 09:53:28 -05002058 int pass;
2059 int i;
Zhai, Edwind255f4f2009-10-09 18:03:20 +08002060
Raghavendra K T4c088492012-07-18 19:07:46 +05302061 kvm_vcpu_set_in_spin_loop(me, true);
Rik van Riel217ece62011-02-01 09:53:28 -05002062 /*
2063 * We boost the priority of a VCPU that is runnable but not
2064 * currently running, because it got preempted by something
2065 * else and called schedule in __vcpu_run. Hopefully that
2066 * VCPU is holding the lock that we need and will release it.
2067 * We approximate round-robin by starting at the last boosted VCPU.
2068 */
Raghavendra K Tc45c5282013-01-22 13:09:24 +05302069 for (pass = 0; pass < 2 && !yielded && try; pass++) {
Rik van Riel217ece62011-02-01 09:53:28 -05002070 kvm_for_each_vcpu(i, vcpu, kvm) {
Rik van Riel5cfc2aa2012-06-19 16:51:04 -04002071 if (!pass && i <= last_boosted_vcpu) {
Rik van Riel217ece62011-02-01 09:53:28 -05002072 i = last_boosted_vcpu;
2073 continue;
2074 } else if (pass && i > last_boosted_vcpu)
2075 break;
Raghavendra K T7bc7ae22013-03-04 23:32:27 +05302076 if (!ACCESS_ONCE(vcpu->preempted))
2077 continue;
Rik van Riel217ece62011-02-01 09:53:28 -05002078 if (vcpu == me)
2079 continue;
Michael Mueller98f4a142014-02-26 16:14:18 +01002080 if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
Rik van Riel217ece62011-02-01 09:53:28 -05002081 continue;
Raghavendra K T06e48c52012-07-19 15:17:52 +05302082 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
2083 continue;
Raghavendra K Tc45c5282013-01-22 13:09:24 +05302084
2085 yielded = kvm_vcpu_yield_to(vcpu);
2086 if (yielded > 0) {
Rik van Riel217ece62011-02-01 09:53:28 -05002087 kvm->last_boosted_vcpu = i;
Rik van Riel217ece62011-02-01 09:53:28 -05002088 break;
Raghavendra K Tc45c5282013-01-22 13:09:24 +05302089 } else if (yielded < 0) {
2090 try--;
2091 if (!try)
2092 break;
Rik van Riel217ece62011-02-01 09:53:28 -05002093 }
Rik van Riel217ece62011-02-01 09:53:28 -05002094 }
2095 }
Raghavendra K T4c088492012-07-18 19:07:46 +05302096 kvm_vcpu_set_in_spin_loop(me, false);
Raghavendra K T06e48c52012-07-19 15:17:52 +05302097
2098 /* Ensure vcpu is not eligible during next spinloop */
2099 kvm_vcpu_set_dy_eligible(me, false);
Zhai, Edwind255f4f2009-10-09 18:03:20 +08002100}
2101EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
2102
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11002103static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002104{
2105 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002106 struct page *page;
2107
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11002108 if (vmf->pgoff == 0)
Avi Kivity039576c2007-03-20 12:46:50 +02002109 page = virt_to_page(vcpu->run);
Avi Kivity09566762008-01-23 18:14:23 +02002110#ifdef CONFIG_X86
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11002111 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002112 page = virt_to_page(vcpu->arch.pio_data);
Avi Kivity09566762008-01-23 18:14:23 +02002113#endif
Laurent Vivier5f94c172008-05-30 16:05:54 +02002114#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2115 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
2116 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
2117#endif
Avi Kivity039576c2007-03-20 12:46:50 +02002118 else
Carsten Otte5b1c1492012-01-04 10:25:23 +01002119 return kvm_arch_vcpu_fault(vcpu, vmf);
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002120 get_page(page);
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11002121 vmf->page = page;
2122 return 0;
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002123}
2124
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04002125static const struct vm_operations_struct kvm_vcpu_vm_ops = {
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11002126 .fault = kvm_vcpu_fault,
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002127};
2128
2129static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2130{
2131 vma->vm_ops = &kvm_vcpu_vm_ops;
2132 return 0;
2133}
2134
Avi Kivitybccf2152007-02-21 18:04:26 +02002135static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2136{
2137 struct kvm_vcpu *vcpu = filp->private_data;
2138
Al Viro66c0b392008-04-19 20:33:56 +01002139 kvm_put_kvm(vcpu->kvm);
Avi Kivitybccf2152007-02-21 18:04:26 +02002140 return 0;
2141}
2142
Christian Borntraeger3d3aab12008-12-02 11:17:32 +01002143static struct file_operations kvm_vcpu_fops = {
Avi Kivitybccf2152007-02-21 18:04:26 +02002144 .release = kvm_vcpu_release,
2145 .unlocked_ioctl = kvm_vcpu_ioctl,
Christian Borntraegerde8e5d72015-02-03 09:35:15 +01002146#ifdef CONFIG_KVM_COMPAT
Alexander Graf1dda6062011-06-08 02:45:37 +02002147 .compat_ioctl = kvm_vcpu_compat_ioctl,
2148#endif
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002149 .mmap = kvm_vcpu_mmap,
Arnd Bergmann6038f372010-08-15 18:52:59 +02002150 .llseek = noop_llseek,
Avi Kivitybccf2152007-02-21 18:04:26 +02002151};
2152
2153/*
2154 * Allocates an inode for the vcpu.
2155 */
2156static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2157{
Yann Droneaud24009b02013-08-24 22:14:07 +02002158 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
Avi Kivitybccf2152007-02-21 18:04:26 +02002159}
2160
Avi Kivityc5ea7662007-02-20 18:41:05 +02002161/*
2162 * Creates some virtual cpus. Good luck creating more than one.
2163 */
Gleb Natapov73880c82009-06-09 15:56:28 +03002164static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
Avi Kivityc5ea7662007-02-20 18:41:05 +02002165{
2166 int r;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002167 struct kvm_vcpu *vcpu, *v;
Avi Kivityc5ea7662007-02-20 18:41:05 +02002168
Andy Honig338c7db2013-11-18 16:09:22 -08002169 if (id >= KVM_MAX_VCPUS)
2170 return -EINVAL;
2171
Gleb Natapov73880c82009-06-09 15:56:28 +03002172 vcpu = kvm_arch_vcpu_create(kvm, id);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002173 if (IS_ERR(vcpu))
2174 return PTR_ERR(vcpu);
Avi Kivityc5ea7662007-02-20 18:41:05 +02002175
Avi Kivity15ad7142007-07-11 18:17:21 +03002176 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2177
Avi Kivity26e52152007-11-20 15:30:24 +02002178 r = kvm_arch_vcpu_setup(vcpu);
2179 if (r)
Jan Kiszkad7805922011-05-23 10:33:05 +02002180 goto vcpu_destroy;
Avi Kivity26e52152007-11-20 15:30:24 +02002181
Shaohua Li11ec2802007-07-23 14:51:37 +08002182 mutex_lock(&kvm->lock);
Avi Kivity3e515702012-03-05 14:23:29 +02002183 if (!kvm_vcpu_compatible(vcpu)) {
2184 r = -EINVAL;
2185 goto unlock_vcpu_destroy;
2186 }
Gleb Natapov73880c82009-06-09 15:56:28 +03002187 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
2188 r = -EINVAL;
Jan Kiszkad7805922011-05-23 10:33:05 +02002189 goto unlock_vcpu_destroy;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002190 }
Gleb Natapov73880c82009-06-09 15:56:28 +03002191
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002192 kvm_for_each_vcpu(r, v, kvm)
2193 if (v->vcpu_id == id) {
Gleb Natapov73880c82009-06-09 15:56:28 +03002194 r = -EEXIST;
Jan Kiszkad7805922011-05-23 10:33:05 +02002195 goto unlock_vcpu_destroy;
Gleb Natapov73880c82009-06-09 15:56:28 +03002196 }
2197
2198 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002199
2200 /* Now it's all set up, let userspace reach it */
Al Viro66c0b392008-04-19 20:33:56 +01002201 kvm_get_kvm(kvm);
Avi Kivitybccf2152007-02-21 18:04:26 +02002202 r = create_vcpu_fd(vcpu);
Gleb Natapov73880c82009-06-09 15:56:28 +03002203 if (r < 0) {
2204 kvm_put_kvm(kvm);
Jan Kiszkad7805922011-05-23 10:33:05 +02002205 goto unlock_vcpu_destroy;
Gleb Natapov73880c82009-06-09 15:56:28 +03002206 }
2207
2208 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
2209 smp_wmb();
2210 atomic_inc(&kvm->online_vcpus);
2211
Gleb Natapov73880c82009-06-09 15:56:28 +03002212 mutex_unlock(&kvm->lock);
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002213 kvm_arch_vcpu_postcreate(vcpu);
Avi Kivitybccf2152007-02-21 18:04:26 +02002214 return r;
Avi Kivityc5ea7662007-02-20 18:41:05 +02002215
Jan Kiszkad7805922011-05-23 10:33:05 +02002216unlock_vcpu_destroy:
Glauber Costa7d8fece2008-09-17 23:16:59 -03002217 mutex_unlock(&kvm->lock);
Jan Kiszkad7805922011-05-23 10:33:05 +02002218vcpu_destroy:
Hollis Blanchardd40ccc62007-11-19 14:04:43 -06002219 kvm_arch_vcpu_destroy(vcpu);
Avi Kivityc5ea7662007-02-20 18:41:05 +02002220 return r;
2221}
2222
Avi Kivity1961d272007-03-05 19:46:05 +02002223static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2224{
2225 if (sigset) {
2226 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2227 vcpu->sigset_active = 1;
2228 vcpu->sigset = *sigset;
2229 } else
2230 vcpu->sigset_active = 0;
2231 return 0;
2232}
2233
Avi Kivitybccf2152007-02-21 18:04:26 +02002234static long kvm_vcpu_ioctl(struct file *filp,
2235 unsigned int ioctl, unsigned long arg)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002236{
Avi Kivitybccf2152007-02-21 18:04:26 +02002237 struct kvm_vcpu *vcpu = filp->private_data;
Al Viro2f3669872007-02-09 16:38:35 +00002238 void __user *argp = (void __user *)arg;
Carsten Otte313a3dc2007-10-11 19:16:52 +02002239 int r;
Dave Hansenfa3795a2008-08-11 10:01:46 -07002240 struct kvm_fpu *fpu = NULL;
2241 struct kvm_sregs *kvm_sregs = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002242
Avi Kivity6d4e4c42007-11-21 16:41:05 +02002243 if (vcpu->kvm->mm != current->mm)
2244 return -EIO;
Avi Kivity2122ff52010-05-13 11:25:04 +03002245
David Matlack2ea75be2014-09-19 16:03:25 -07002246 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
2247 return -EINVAL;
2248
Sanjay Lal2f4d9b52012-11-21 18:34:15 -08002249#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
Avi Kivity2122ff52010-05-13 11:25:04 +03002250 /*
2251 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
2252 * so vcpu_load() would break it.
2253 */
Jens Freimann47b43c52014-11-11 20:57:06 +01002254 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT)
Avi Kivity2122ff52010-05-13 11:25:04 +03002255 return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
2256#endif
2257
2258
Michael S. Tsirkin9fc77442012-09-16 11:50:30 +03002259 r = vcpu_load(vcpu);
2260 if (r)
2261 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002262 switch (ioctl) {
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002263 case KVM_RUN:
Avi Kivityf0fe5102007-03-07 13:11:17 +02002264 r = -EINVAL;
2265 if (arg)
2266 goto out;
Christian Borntraeger7a72f7a2014-08-05 16:44:14 +02002267 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
2268 /* The thread running this VCPU changed. */
2269 struct pid *oldpid = vcpu->pid;
2270 struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
Xiubo Lif95ef0c2015-02-26 14:58:23 +08002271
Christian Borntraeger7a72f7a2014-08-05 16:44:14 +02002272 rcu_assign_pointer(vcpu->pid, newpid);
2273 if (oldpid)
2274 synchronize_rcu();
2275 put_pid(oldpid);
2276 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05002277 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
Gleb Natapov64be5002010-10-24 16:49:08 +02002278 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002279 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002280 case KVM_GET_REGS: {
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08002281 struct kvm_regs *kvm_regs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002282
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08002283 r = -ENOMEM;
2284 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
2285 if (!kvm_regs)
2286 goto out;
2287 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002288 if (r)
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08002289 goto out_free1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002290 r = -EFAULT;
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08002291 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
2292 goto out_free1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002293 r = 0;
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08002294out_free1:
2295 kfree(kvm_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002296 break;
2297 }
2298 case KVM_SET_REGS: {
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08002299 struct kvm_regs *kvm_regs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002300
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08002301 r = -ENOMEM;
Sasha Levinff5c2c02011-12-04 19:36:29 +02002302 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
2303 if (IS_ERR(kvm_regs)) {
2304 r = PTR_ERR(kvm_regs);
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08002305 goto out;
Sasha Levinff5c2c02011-12-04 19:36:29 +02002306 }
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08002307 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08002308 kfree(kvm_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002309 break;
2310 }
2311 case KVM_GET_SREGS: {
Dave Hansenfa3795a2008-08-11 10:01:46 -07002312 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
2313 r = -ENOMEM;
2314 if (!kvm_sregs)
2315 goto out;
2316 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002317 if (r)
2318 goto out;
2319 r = -EFAULT;
Dave Hansenfa3795a2008-08-11 10:01:46 -07002320 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002321 goto out;
2322 r = 0;
2323 break;
2324 }
2325 case KVM_SET_SREGS: {
Sasha Levinff5c2c02011-12-04 19:36:29 +02002326 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
2327 if (IS_ERR(kvm_sregs)) {
2328 r = PTR_ERR(kvm_sregs);
Guo Chao18595412012-11-02 18:33:21 +08002329 kvm_sregs = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002330 goto out;
Sasha Levinff5c2c02011-12-04 19:36:29 +02002331 }
Dave Hansenfa3795a2008-08-11 10:01:46 -07002332 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002333 break;
2334 }
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002335 case KVM_GET_MP_STATE: {
2336 struct kvm_mp_state mp_state;
2337
2338 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
2339 if (r)
2340 goto out;
2341 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002342 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002343 goto out;
2344 r = 0;
2345 break;
2346 }
2347 case KVM_SET_MP_STATE: {
2348 struct kvm_mp_state mp_state;
2349
2350 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002351 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002352 goto out;
2353 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002354 break;
2355 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002356 case KVM_TRANSLATE: {
2357 struct kvm_translation tr;
2358
2359 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002360 if (copy_from_user(&tr, argp, sizeof(tr)))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002361 goto out;
Zhang Xiantao8b006792007-11-16 13:05:55 +08002362 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002363 if (r)
2364 goto out;
2365 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002366 if (copy_to_user(argp, &tr, sizeof(tr)))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002367 goto out;
2368 r = 0;
2369 break;
2370 }
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002371 case KVM_SET_GUEST_DEBUG: {
2372 struct kvm_guest_debug dbg;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002373
2374 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002375 if (copy_from_user(&dbg, argp, sizeof(dbg)))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002376 goto out;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002377 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002378 break;
2379 }
Avi Kivity1961d272007-03-05 19:46:05 +02002380 case KVM_SET_SIGNAL_MASK: {
2381 struct kvm_signal_mask __user *sigmask_arg = argp;
2382 struct kvm_signal_mask kvm_sigmask;
2383 sigset_t sigset, *p;
2384
2385 p = NULL;
2386 if (argp) {
2387 r = -EFAULT;
2388 if (copy_from_user(&kvm_sigmask, argp,
Xiubo Li893bdbf2015-02-26 14:58:19 +08002389 sizeof(kvm_sigmask)))
Avi Kivity1961d272007-03-05 19:46:05 +02002390 goto out;
2391 r = -EINVAL;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002392 if (kvm_sigmask.len != sizeof(sigset))
Avi Kivity1961d272007-03-05 19:46:05 +02002393 goto out;
2394 r = -EFAULT;
2395 if (copy_from_user(&sigset, sigmask_arg->sigset,
Xiubo Li893bdbf2015-02-26 14:58:19 +08002396 sizeof(sigset)))
Avi Kivity1961d272007-03-05 19:46:05 +02002397 goto out;
2398 p = &sigset;
2399 }
Andi Kleen376d41f2010-06-10 13:10:47 +02002400 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
Avi Kivity1961d272007-03-05 19:46:05 +02002401 break;
2402 }
Avi Kivityb8836732007-04-01 16:34:31 +03002403 case KVM_GET_FPU: {
Dave Hansenfa3795a2008-08-11 10:01:46 -07002404 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2405 r = -ENOMEM;
2406 if (!fpu)
2407 goto out;
2408 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
Avi Kivityb8836732007-04-01 16:34:31 +03002409 if (r)
2410 goto out;
2411 r = -EFAULT;
Dave Hansenfa3795a2008-08-11 10:01:46 -07002412 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
Avi Kivityb8836732007-04-01 16:34:31 +03002413 goto out;
2414 r = 0;
2415 break;
2416 }
2417 case KVM_SET_FPU: {
Sasha Levinff5c2c02011-12-04 19:36:29 +02002418 fpu = memdup_user(argp, sizeof(*fpu));
2419 if (IS_ERR(fpu)) {
2420 r = PTR_ERR(fpu);
Guo Chao18595412012-11-02 18:33:21 +08002421 fpu = NULL;
Avi Kivityb8836732007-04-01 16:34:31 +03002422 goto out;
Sasha Levinff5c2c02011-12-04 19:36:29 +02002423 }
Dave Hansenfa3795a2008-08-11 10:01:46 -07002424 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
Avi Kivityb8836732007-04-01 16:34:31 +03002425 break;
2426 }
Avi Kivitybccf2152007-02-21 18:04:26 +02002427 default:
Carsten Otte313a3dc2007-10-11 19:16:52 +02002428 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
Avi Kivitybccf2152007-02-21 18:04:26 +02002429 }
2430out:
Avi Kivity2122ff52010-05-13 11:25:04 +03002431 vcpu_put(vcpu);
Dave Hansenfa3795a2008-08-11 10:01:46 -07002432 kfree(fpu);
2433 kfree(kvm_sregs);
Avi Kivitybccf2152007-02-21 18:04:26 +02002434 return r;
2435}
2436
Christian Borntraegerde8e5d72015-02-03 09:35:15 +01002437#ifdef CONFIG_KVM_COMPAT
Alexander Graf1dda6062011-06-08 02:45:37 +02002438static long kvm_vcpu_compat_ioctl(struct file *filp,
2439 unsigned int ioctl, unsigned long arg)
2440{
2441 struct kvm_vcpu *vcpu = filp->private_data;
2442 void __user *argp = compat_ptr(arg);
2443 int r;
2444
2445 if (vcpu->kvm->mm != current->mm)
2446 return -EIO;
2447
2448 switch (ioctl) {
2449 case KVM_SET_SIGNAL_MASK: {
2450 struct kvm_signal_mask __user *sigmask_arg = argp;
2451 struct kvm_signal_mask kvm_sigmask;
2452 compat_sigset_t csigset;
2453 sigset_t sigset;
2454
2455 if (argp) {
2456 r = -EFAULT;
2457 if (copy_from_user(&kvm_sigmask, argp,
Xiubo Li893bdbf2015-02-26 14:58:19 +08002458 sizeof(kvm_sigmask)))
Alexander Graf1dda6062011-06-08 02:45:37 +02002459 goto out;
2460 r = -EINVAL;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002461 if (kvm_sigmask.len != sizeof(csigset))
Alexander Graf1dda6062011-06-08 02:45:37 +02002462 goto out;
2463 r = -EFAULT;
2464 if (copy_from_user(&csigset, sigmask_arg->sigset,
Xiubo Li893bdbf2015-02-26 14:58:19 +08002465 sizeof(csigset)))
Alexander Graf1dda6062011-06-08 02:45:37 +02002466 goto out;
Alan Cox760a9a32012-08-22 14:34:11 +01002467 sigset_from_compat(&sigset, &csigset);
2468 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2469 } else
2470 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
Alexander Graf1dda6062011-06-08 02:45:37 +02002471 break;
2472 }
2473 default:
2474 r = kvm_vcpu_ioctl(filp, ioctl, arg);
2475 }
2476
2477out:
2478 return r;
2479}
2480#endif
2481
Scott Wood852b6d52013-04-12 14:08:42 +00002482static int kvm_device_ioctl_attr(struct kvm_device *dev,
2483 int (*accessor)(struct kvm_device *dev,
2484 struct kvm_device_attr *attr),
2485 unsigned long arg)
2486{
2487 struct kvm_device_attr attr;
2488
2489 if (!accessor)
2490 return -EPERM;
2491
2492 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2493 return -EFAULT;
2494
2495 return accessor(dev, &attr);
2496}
2497
2498static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
2499 unsigned long arg)
2500{
2501 struct kvm_device *dev = filp->private_data;
2502
2503 switch (ioctl) {
2504 case KVM_SET_DEVICE_ATTR:
2505 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
2506 case KVM_GET_DEVICE_ATTR:
2507 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
2508 case KVM_HAS_DEVICE_ATTR:
2509 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
2510 default:
2511 if (dev->ops->ioctl)
2512 return dev->ops->ioctl(dev, ioctl, arg);
2513
2514 return -ENOTTY;
2515 }
2516}
2517
Scott Wood852b6d52013-04-12 14:08:42 +00002518static int kvm_device_release(struct inode *inode, struct file *filp)
2519{
2520 struct kvm_device *dev = filp->private_data;
2521 struct kvm *kvm = dev->kvm;
2522
Scott Wood852b6d52013-04-12 14:08:42 +00002523 kvm_put_kvm(kvm);
2524 return 0;
2525}
2526
2527static const struct file_operations kvm_device_fops = {
2528 .unlocked_ioctl = kvm_device_ioctl,
Christian Borntraegerde8e5d72015-02-03 09:35:15 +01002529#ifdef CONFIG_KVM_COMPAT
Scott Wooddb6ae612013-04-30 20:00:45 -05002530 .compat_ioctl = kvm_device_ioctl,
2531#endif
Scott Wood852b6d52013-04-12 14:08:42 +00002532 .release = kvm_device_release,
2533};
2534
2535struct kvm_device *kvm_device_from_filp(struct file *filp)
2536{
2537 if (filp->f_op != &kvm_device_fops)
2538 return NULL;
2539
2540 return filp->private_data;
2541}
2542
Will Deacond60eacb2014-09-02 10:27:33 +01002543static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
2544#ifdef CONFIG_KVM_MPIC
2545 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
2546 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
2547#endif
2548
2549#ifdef CONFIG_KVM_XICS
2550 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops,
2551#endif
Will Deacond60eacb2014-09-02 10:27:33 +01002552};
2553
2554int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
2555{
2556 if (type >= ARRAY_SIZE(kvm_device_ops_table))
2557 return -ENOSPC;
2558
2559 if (kvm_device_ops_table[type] != NULL)
2560 return -EEXIST;
2561
2562 kvm_device_ops_table[type] = ops;
2563 return 0;
2564}
2565
Wanpeng Li571ee1b2014-10-09 18:30:08 +08002566void kvm_unregister_device_ops(u32 type)
2567{
2568 if (kvm_device_ops_table[type] != NULL)
2569 kvm_device_ops_table[type] = NULL;
2570}
2571
Scott Wood852b6d52013-04-12 14:08:42 +00002572static int kvm_ioctl_create_device(struct kvm *kvm,
2573 struct kvm_create_device *cd)
2574{
2575 struct kvm_device_ops *ops = NULL;
2576 struct kvm_device *dev;
2577 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
2578 int ret;
2579
Will Deacond60eacb2014-09-02 10:27:33 +01002580 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
Scott Wood852b6d52013-04-12 14:08:42 +00002581 return -ENODEV;
Will Deacond60eacb2014-09-02 10:27:33 +01002582
2583 ops = kvm_device_ops_table[cd->type];
2584 if (ops == NULL)
2585 return -ENODEV;
Scott Wood852b6d52013-04-12 14:08:42 +00002586
2587 if (test)
2588 return 0;
2589
2590 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2591 if (!dev)
2592 return -ENOMEM;
2593
2594 dev->ops = ops;
2595 dev->kvm = kvm;
Scott Wood852b6d52013-04-12 14:08:42 +00002596
2597 ret = ops->create(dev, cd->type);
2598 if (ret < 0) {
2599 kfree(dev);
2600 return ret;
2601 }
2602
Yann Droneaud24009b02013-08-24 22:14:07 +02002603 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
Scott Wood852b6d52013-04-12 14:08:42 +00002604 if (ret < 0) {
2605 ops->destroy(dev);
2606 return ret;
2607 }
2608
Scott Wood07f0a7b2013-04-25 14:11:23 +00002609 list_add(&dev->vm_node, &kvm->devices);
Scott Wood852b6d52013-04-12 14:08:42 +00002610 kvm_get_kvm(kvm);
2611 cd->fd = ret;
2612 return 0;
2613}
2614
Alexander Graf92b591a2014-07-14 18:33:08 +02002615static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
2616{
2617 switch (arg) {
2618 case KVM_CAP_USER_MEMORY:
2619 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2620 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2621#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2622 case KVM_CAP_SET_BOOT_CPU_ID:
2623#endif
2624 case KVM_CAP_INTERNAL_ERROR_DATA:
2625#ifdef CONFIG_HAVE_KVM_MSI
2626 case KVM_CAP_SIGNAL_MSI:
2627#endif
Paul Mackerras297e2102014-06-30 20:51:13 +10002628#ifdef CONFIG_HAVE_KVM_IRQFD
Paolo Bonzinidc9be0f2015-03-05 11:54:46 +01002629 case KVM_CAP_IRQFD:
Alexander Graf92b591a2014-07-14 18:33:08 +02002630 case KVM_CAP_IRQFD_RESAMPLE:
2631#endif
2632 case KVM_CAP_CHECK_EXTENSION_VM:
2633 return 1;
2634#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2635 case KVM_CAP_IRQ_ROUTING:
2636 return KVM_MAX_IRQ_ROUTES;
2637#endif
Paolo Bonzinif481b062015-05-17 17:30:37 +02002638#if KVM_ADDRESS_SPACE_NUM > 1
2639 case KVM_CAP_MULTI_ADDRESS_SPACE:
2640 return KVM_ADDRESS_SPACE_NUM;
2641#endif
Alexander Graf92b591a2014-07-14 18:33:08 +02002642 default:
2643 break;
2644 }
2645 return kvm_vm_ioctl_check_extension(kvm, arg);
2646}
2647
Avi Kivitybccf2152007-02-21 18:04:26 +02002648static long kvm_vm_ioctl(struct file *filp,
2649 unsigned int ioctl, unsigned long arg)
2650{
2651 struct kvm *kvm = filp->private_data;
2652 void __user *argp = (void __user *)arg;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002653 int r;
Avi Kivitybccf2152007-02-21 18:04:26 +02002654
Avi Kivity6d4e4c42007-11-21 16:41:05 +02002655 if (kvm->mm != current->mm)
2656 return -EIO;
Avi Kivitybccf2152007-02-21 18:04:26 +02002657 switch (ioctl) {
2658 case KVM_CREATE_VCPU:
2659 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
Avi Kivitybccf2152007-02-21 18:04:26 +02002660 break;
Izik Eidus6fc138d2007-10-09 19:20:39 +02002661 case KVM_SET_USER_MEMORY_REGION: {
2662 struct kvm_userspace_memory_region kvm_userspace_mem;
2663
2664 r = -EFAULT;
2665 if (copy_from_user(&kvm_userspace_mem, argp,
Xiubo Li893bdbf2015-02-26 14:58:19 +08002666 sizeof(kvm_userspace_mem)))
Izik Eidus6fc138d2007-10-09 19:20:39 +02002667 goto out;
2668
Takuya Yoshikawa47ae31e2013-02-27 19:43:00 +09002669 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002670 break;
2671 }
2672 case KVM_GET_DIRTY_LOG: {
2673 struct kvm_dirty_log log;
2674
2675 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002676 if (copy_from_user(&log, argp, sizeof(log)))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002677 goto out;
Avi Kivity2c6f5df2007-02-20 18:27:58 +02002678 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002679 break;
2680 }
Laurent Vivier5f94c172008-05-30 16:05:54 +02002681#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2682 case KVM_REGISTER_COALESCED_MMIO: {
2683 struct kvm_coalesced_mmio_zone zone;
Xiubo Lif95ef0c2015-02-26 14:58:23 +08002684
Laurent Vivier5f94c172008-05-30 16:05:54 +02002685 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002686 if (copy_from_user(&zone, argp, sizeof(zone)))
Laurent Vivier5f94c172008-05-30 16:05:54 +02002687 goto out;
Laurent Vivier5f94c172008-05-30 16:05:54 +02002688 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
Laurent Vivier5f94c172008-05-30 16:05:54 +02002689 break;
2690 }
2691 case KVM_UNREGISTER_COALESCED_MMIO: {
2692 struct kvm_coalesced_mmio_zone zone;
Xiubo Lif95ef0c2015-02-26 14:58:23 +08002693
Laurent Vivier5f94c172008-05-30 16:05:54 +02002694 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002695 if (copy_from_user(&zone, argp, sizeof(zone)))
Laurent Vivier5f94c172008-05-30 16:05:54 +02002696 goto out;
Laurent Vivier5f94c172008-05-30 16:05:54 +02002697 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
Laurent Vivier5f94c172008-05-30 16:05:54 +02002698 break;
2699 }
2700#endif
Gregory Haskins721eecb2009-05-20 10:30:49 -04002701 case KVM_IRQFD: {
2702 struct kvm_irqfd data;
2703
2704 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002705 if (copy_from_user(&data, argp, sizeof(data)))
Gregory Haskins721eecb2009-05-20 10:30:49 -04002706 goto out;
Alex Williamsond4db2932012-06-29 09:56:08 -06002707 r = kvm_irqfd(kvm, &data);
Gregory Haskins721eecb2009-05-20 10:30:49 -04002708 break;
2709 }
Gregory Haskinsd34e6b12009-07-07 17:08:49 -04002710 case KVM_IOEVENTFD: {
2711 struct kvm_ioeventfd data;
2712
2713 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002714 if (copy_from_user(&data, argp, sizeof(data)))
Gregory Haskinsd34e6b12009-07-07 17:08:49 -04002715 goto out;
2716 r = kvm_ioeventfd(kvm, &data);
2717 break;
2718 }
Gleb Natapov73880c82009-06-09 15:56:28 +03002719#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2720 case KVM_SET_BOOT_CPU_ID:
2721 r = 0;
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002722 mutex_lock(&kvm->lock);
Gleb Natapov73880c82009-06-09 15:56:28 +03002723 if (atomic_read(&kvm->online_vcpus) != 0)
2724 r = -EBUSY;
2725 else
2726 kvm->bsp_vcpu_id = arg;
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002727 mutex_unlock(&kvm->lock);
Gleb Natapov73880c82009-06-09 15:56:28 +03002728 break;
2729#endif
Jan Kiszka07975ad2012-03-29 21:14:12 +02002730#ifdef CONFIG_HAVE_KVM_MSI
2731 case KVM_SIGNAL_MSI: {
2732 struct kvm_msi msi;
2733
2734 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002735 if (copy_from_user(&msi, argp, sizeof(msi)))
Jan Kiszka07975ad2012-03-29 21:14:12 +02002736 goto out;
2737 r = kvm_send_userspace_msi(kvm, &msi);
2738 break;
2739 }
2740#endif
Christoffer Dall23d43cf2012-07-24 08:51:20 -04002741#ifdef __KVM_HAVE_IRQ_LINE
2742 case KVM_IRQ_LINE_STATUS:
2743 case KVM_IRQ_LINE: {
2744 struct kvm_irq_level irq_event;
2745
2746 r = -EFAULT;
Xiubo Li893bdbf2015-02-26 14:58:19 +08002747 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
Christoffer Dall23d43cf2012-07-24 08:51:20 -04002748 goto out;
2749
Yang Zhangaa2fbe62013-04-11 19:21:40 +08002750 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
2751 ioctl == KVM_IRQ_LINE_STATUS);
Christoffer Dall23d43cf2012-07-24 08:51:20 -04002752 if (r)
2753 goto out;
2754
2755 r = -EFAULT;
2756 if (ioctl == KVM_IRQ_LINE_STATUS) {
Xiubo Li893bdbf2015-02-26 14:58:19 +08002757 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
Christoffer Dall23d43cf2012-07-24 08:51:20 -04002758 goto out;
2759 }
2760
2761 r = 0;
2762 break;
2763 }
2764#endif
Alexander Grafaa8d5942013-04-15 21:12:53 +02002765#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2766 case KVM_SET_GSI_ROUTING: {
2767 struct kvm_irq_routing routing;
2768 struct kvm_irq_routing __user *urouting;
2769 struct kvm_irq_routing_entry *entries;
2770
2771 r = -EFAULT;
2772 if (copy_from_user(&routing, argp, sizeof(routing)))
2773 goto out;
2774 r = -EINVAL;
2775 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2776 goto out;
2777 if (routing.flags)
2778 goto out;
2779 r = -ENOMEM;
2780 entries = vmalloc(routing.nr * sizeof(*entries));
2781 if (!entries)
2782 goto out;
2783 r = -EFAULT;
2784 urouting = argp;
2785 if (copy_from_user(entries, urouting->entries,
2786 routing.nr * sizeof(*entries)))
2787 goto out_free_irq_routing;
2788 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2789 routing.flags);
Xiubo Lia642a172015-02-26 14:58:20 +08002790out_free_irq_routing:
Alexander Grafaa8d5942013-04-15 21:12:53 +02002791 vfree(entries);
2792 break;
2793 }
2794#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
Scott Wood852b6d52013-04-12 14:08:42 +00002795 case KVM_CREATE_DEVICE: {
2796 struct kvm_create_device cd;
2797
2798 r = -EFAULT;
2799 if (copy_from_user(&cd, argp, sizeof(cd)))
2800 goto out;
2801
2802 r = kvm_ioctl_create_device(kvm, &cd);
2803 if (r)
2804 goto out;
2805
2806 r = -EFAULT;
2807 if (copy_to_user(argp, &cd, sizeof(cd)))
2808 goto out;
2809
2810 r = 0;
2811 break;
2812 }
Alexander Graf92b591a2014-07-14 18:33:08 +02002813 case KVM_CHECK_EXTENSION:
2814 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
2815 break;
Avi Kivityf17abe92007-02-21 19:28:04 +02002816 default:
Carsten Otte1fe779f2007-10-29 16:08:35 +01002817 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
Avi Kivityf17abe92007-02-21 19:28:04 +02002818 }
2819out:
2820 return r;
2821}
2822
Christian Borntraegerde8e5d72015-02-03 09:35:15 +01002823#ifdef CONFIG_KVM_COMPAT
Arnd Bergmann6ff58942009-10-22 14:19:27 +02002824struct compat_kvm_dirty_log {
2825 __u32 slot;
2826 __u32 padding1;
2827 union {
2828 compat_uptr_t dirty_bitmap; /* one bit per page */
2829 __u64 padding2;
2830 };
2831};
2832
2833static long kvm_vm_compat_ioctl(struct file *filp,
2834 unsigned int ioctl, unsigned long arg)
2835{
2836 struct kvm *kvm = filp->private_data;
2837 int r;
2838
2839 if (kvm->mm != current->mm)
2840 return -EIO;
2841 switch (ioctl) {
2842 case KVM_GET_DIRTY_LOG: {
2843 struct compat_kvm_dirty_log compat_log;
2844 struct kvm_dirty_log log;
2845
2846 r = -EFAULT;
2847 if (copy_from_user(&compat_log, (void __user *)arg,
2848 sizeof(compat_log)))
2849 goto out;
2850 log.slot = compat_log.slot;
2851 log.padding1 = compat_log.padding1;
2852 log.padding2 = compat_log.padding2;
2853 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
2854
2855 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
Arnd Bergmann6ff58942009-10-22 14:19:27 +02002856 break;
2857 }
2858 default:
2859 r = kvm_vm_ioctl(filp, ioctl, arg);
2860 }
2861
2862out:
2863 return r;
2864}
2865#endif
2866
Christian Borntraeger3d3aab12008-12-02 11:17:32 +01002867static struct file_operations kvm_vm_fops = {
Avi Kivityf17abe92007-02-21 19:28:04 +02002868 .release = kvm_vm_release,
2869 .unlocked_ioctl = kvm_vm_ioctl,
Christian Borntraegerde8e5d72015-02-03 09:35:15 +01002870#ifdef CONFIG_KVM_COMPAT
Arnd Bergmann6ff58942009-10-22 14:19:27 +02002871 .compat_ioctl = kvm_vm_compat_ioctl,
2872#endif
Arnd Bergmann6038f372010-08-15 18:52:59 +02002873 .llseek = noop_llseek,
Avi Kivityf17abe92007-02-21 19:28:04 +02002874};
2875
Carsten Ottee08b9632012-01-04 10:25:20 +01002876static int kvm_dev_ioctl_create_vm(unsigned long type)
Avi Kivityf17abe92007-02-21 19:28:04 +02002877{
Heiko Carstensaac87632010-10-27 17:22:10 +02002878 int r;
Avi Kivityf17abe92007-02-21 19:28:04 +02002879 struct kvm *kvm;
2880
Carsten Ottee08b9632012-01-04 10:25:20 +01002881 kvm = kvm_create_vm(type);
Avi Kivityd6d28162007-06-28 08:38:16 -04002882 if (IS_ERR(kvm))
2883 return PTR_ERR(kvm);
Takuya Yoshikawa6ce5a092010-03-15 22:13:30 +09002884#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2885 r = kvm_coalesced_mmio_init(kvm);
2886 if (r < 0) {
2887 kvm_put_kvm(kvm);
2888 return r;
2889 }
2890#endif
Yann Droneaud24009b02013-08-24 22:14:07 +02002891 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC);
Heiko Carstensaac87632010-10-27 17:22:10 +02002892 if (r < 0)
Al Viro66c0b392008-04-19 20:33:56 +01002893 kvm_put_kvm(kvm);
Avi Kivityf17abe92007-02-21 19:28:04 +02002894
Heiko Carstensaac87632010-10-27 17:22:10 +02002895 return r;
Avi Kivityf17abe92007-02-21 19:28:04 +02002896}
2897
2898static long kvm_dev_ioctl(struct file *filp,
2899 unsigned int ioctl, unsigned long arg)
2900{
Avi Kivity07c45a32007-03-07 13:05:38 +02002901 long r = -EINVAL;
Avi Kivityf17abe92007-02-21 19:28:04 +02002902
2903 switch (ioctl) {
2904 case KVM_GET_API_VERSION:
Avi Kivityf0fe5102007-03-07 13:11:17 +02002905 if (arg)
2906 goto out;
Avi Kivityf17abe92007-02-21 19:28:04 +02002907 r = KVM_API_VERSION;
2908 break;
2909 case KVM_CREATE_VM:
Carsten Ottee08b9632012-01-04 10:25:20 +01002910 r = kvm_dev_ioctl_create_vm(arg);
Avi Kivityf17abe92007-02-21 19:28:04 +02002911 break;
Zhang Xiantao018d00d2007-11-15 23:07:47 +08002912 case KVM_CHECK_EXTENSION:
Alexander Graf784aa3d2014-07-14 18:27:35 +02002913 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
Avi Kivity5d308f42007-03-01 17:56:20 +02002914 break;
Avi Kivity07c45a32007-03-07 13:05:38 +02002915 case KVM_GET_VCPU_MMAP_SIZE:
Avi Kivity07c45a32007-03-07 13:05:38 +02002916 if (arg)
2917 goto out;
Avi Kivityadb1ff42008-01-24 15:13:08 +02002918 r = PAGE_SIZE; /* struct kvm_run */
2919#ifdef CONFIG_X86
2920 r += PAGE_SIZE; /* pio data page */
2921#endif
Laurent Vivier5f94c172008-05-30 16:05:54 +02002922#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2923 r += PAGE_SIZE; /* coalesced mmio ring page */
2924#endif
Avi Kivity07c45a32007-03-07 13:05:38 +02002925 break;
Feng(Eric) Liud4c9ff22008-04-10 08:47:53 -04002926 case KVM_TRACE_ENABLE:
2927 case KVM_TRACE_PAUSE:
2928 case KVM_TRACE_DISABLE:
Marcelo Tosatti2023a292009-06-18 11:47:28 -03002929 r = -EOPNOTSUPP;
Feng(Eric) Liud4c9ff22008-04-10 08:47:53 -04002930 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002931 default:
Carsten Otte043405e2007-10-10 17:16:19 +02002932 return kvm_arch_dev_ioctl(filp, ioctl, arg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002933 }
2934out:
2935 return r;
2936}
2937
Avi Kivity6aa8b732006-12-10 02:21:36 -08002938static struct file_operations kvm_chardev_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002939 .unlocked_ioctl = kvm_dev_ioctl,
2940 .compat_ioctl = kvm_dev_ioctl,
Arnd Bergmann6038f372010-08-15 18:52:59 +02002941 .llseek = noop_llseek,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002942};
2943
2944static struct miscdevice kvm_dev = {
Avi Kivitybbe44322007-03-04 13:27:36 +02002945 KVM_MINOR,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002946 "kvm",
2947 &kvm_chardev_ops,
2948};
2949
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09002950static void hardware_enable_nolock(void *junk)
Avi Kivity1b6c0162007-05-24 13:03:52 +03002951{
2952 int cpu = raw_smp_processor_id();
Alexander Graf10474ae2009-09-15 11:37:46 +02002953 int r;
Avi Kivity1b6c0162007-05-24 13:03:52 +03002954
Rusty Russell7f59f492008-12-07 21:25:45 +10302955 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
Avi Kivity1b6c0162007-05-24 13:03:52 +03002956 return;
Alexander Graf10474ae2009-09-15 11:37:46 +02002957
Rusty Russell7f59f492008-12-07 21:25:45 +10302958 cpumask_set_cpu(cpu, cpus_hardware_enabled);
Alexander Graf10474ae2009-09-15 11:37:46 +02002959
Radim Krčmář13a34e02014-08-28 15:13:03 +02002960 r = kvm_arch_hardware_enable();
Alexander Graf10474ae2009-09-15 11:37:46 +02002961
2962 if (r) {
2963 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2964 atomic_inc(&hardware_enable_failed);
Xiubo Li1170adc2015-02-26 14:58:26 +08002965 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
Alexander Graf10474ae2009-09-15 11:37:46 +02002966 }
Avi Kivity1b6c0162007-05-24 13:03:52 +03002967}
2968
Paolo Bonzini4fa92fb2013-09-10 12:57:17 +02002969static void hardware_enable(void)
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09002970{
Paolo Bonzini4a937f92013-09-10 12:58:35 +02002971 raw_spin_lock(&kvm_count_lock);
Paolo Bonzini4fa92fb2013-09-10 12:57:17 +02002972 if (kvm_usage_count)
2973 hardware_enable_nolock(NULL);
Paolo Bonzini4a937f92013-09-10 12:58:35 +02002974 raw_spin_unlock(&kvm_count_lock);
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09002975}
2976
2977static void hardware_disable_nolock(void *junk)
Avi Kivity1b6c0162007-05-24 13:03:52 +03002978{
2979 int cpu = raw_smp_processor_id();
2980
Rusty Russell7f59f492008-12-07 21:25:45 +10302981 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
Avi Kivity1b6c0162007-05-24 13:03:52 +03002982 return;
Rusty Russell7f59f492008-12-07 21:25:45 +10302983 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
Radim Krčmář13a34e02014-08-28 15:13:03 +02002984 kvm_arch_hardware_disable();
Avi Kivity1b6c0162007-05-24 13:03:52 +03002985}
2986
Paolo Bonzini4fa92fb2013-09-10 12:57:17 +02002987static void hardware_disable(void)
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09002988{
Paolo Bonzini4a937f92013-09-10 12:58:35 +02002989 raw_spin_lock(&kvm_count_lock);
Paolo Bonzini4fa92fb2013-09-10 12:57:17 +02002990 if (kvm_usage_count)
2991 hardware_disable_nolock(NULL);
Paolo Bonzini4a937f92013-09-10 12:58:35 +02002992 raw_spin_unlock(&kvm_count_lock);
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09002993}
2994
Alexander Graf10474ae2009-09-15 11:37:46 +02002995static void hardware_disable_all_nolock(void)
2996{
2997 BUG_ON(!kvm_usage_count);
2998
2999 kvm_usage_count--;
3000 if (!kvm_usage_count)
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09003001 on_each_cpu(hardware_disable_nolock, NULL, 1);
Alexander Graf10474ae2009-09-15 11:37:46 +02003002}
3003
3004static void hardware_disable_all(void)
3005{
Paolo Bonzini4a937f92013-09-10 12:58:35 +02003006 raw_spin_lock(&kvm_count_lock);
Alexander Graf10474ae2009-09-15 11:37:46 +02003007 hardware_disable_all_nolock();
Paolo Bonzini4a937f92013-09-10 12:58:35 +02003008 raw_spin_unlock(&kvm_count_lock);
Alexander Graf10474ae2009-09-15 11:37:46 +02003009}
3010
3011static int hardware_enable_all(void)
3012{
3013 int r = 0;
3014
Paolo Bonzini4a937f92013-09-10 12:58:35 +02003015 raw_spin_lock(&kvm_count_lock);
Alexander Graf10474ae2009-09-15 11:37:46 +02003016
3017 kvm_usage_count++;
3018 if (kvm_usage_count == 1) {
3019 atomic_set(&hardware_enable_failed, 0);
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09003020 on_each_cpu(hardware_enable_nolock, NULL, 1);
Alexander Graf10474ae2009-09-15 11:37:46 +02003021
3022 if (atomic_read(&hardware_enable_failed)) {
3023 hardware_disable_all_nolock();
3024 r = -EBUSY;
3025 }
3026 }
3027
Paolo Bonzini4a937f92013-09-10 12:58:35 +02003028 raw_spin_unlock(&kvm_count_lock);
Alexander Graf10474ae2009-09-15 11:37:46 +02003029
3030 return r;
3031}
3032
Avi Kivity774c47f2007-02-12 00:54:47 -08003033static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
3034 void *v)
3035{
Avi Kivity1a6f4d72007-11-11 18:37:32 +02003036 val &= ~CPU_TASKS_FROZEN;
Avi Kivity774c47f2007-02-12 00:54:47 -08003037 switch (val) {
Avi Kivitycec9ad22007-05-24 13:11:41 +03003038 case CPU_DYING:
Paolo Bonzini4fa92fb2013-09-10 12:57:17 +02003039 hardware_disable();
Avi Kivity6ec8a8562007-08-19 15:57:26 +03003040 break;
Zachary Amsdenda908f22010-08-19 22:07:27 -10003041 case CPU_STARTING:
Paolo Bonzini4fa92fb2013-09-10 12:57:17 +02003042 hardware_enable();
Avi Kivity774c47f2007-02-12 00:54:47 -08003043 break;
3044 }
3045 return NOTIFY_OK;
3046}
3047
Rusty Russell9a2b85c2007-07-17 23:17:55 +10003048static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
Mike Dayd77c26f2007-10-08 09:02:08 -04003049 void *v)
Rusty Russell9a2b85c2007-07-17 23:17:55 +10003050{
Sheng Yang8e1c1812009-04-29 11:09:04 +08003051 /*
3052 * Some (well, at least mine) BIOSes hang on reboot if
3053 * in vmx root mode.
3054 *
3055 * And Intel TXT required VMX off for all cpu when system shutdown.
3056 */
Xiubo Li1170adc2015-02-26 14:58:26 +08003057 pr_info("kvm: exiting hardware virtualization\n");
Sheng Yang8e1c1812009-04-29 11:09:04 +08003058 kvm_rebooting = true;
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09003059 on_each_cpu(hardware_disable_nolock, NULL, 1);
Rusty Russell9a2b85c2007-07-17 23:17:55 +10003060 return NOTIFY_OK;
3061}
3062
3063static struct notifier_block kvm_reboot_notifier = {
3064 .notifier_call = kvm_reboot,
3065 .priority = 0,
3066};
3067
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003068static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04003069{
3070 int i;
3071
3072 for (i = 0; i < bus->dev_count; i++) {
Sasha Levin743eeb02011-07-27 16:00:48 +03003073 struct kvm_io_device *pos = bus->range[i].dev;
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04003074
3075 kvm_iodevice_destructor(pos);
3076 }
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003077 kfree(bus);
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04003078}
3079
Paolo Bonzinic21fbff2013-08-27 15:41:41 +02003080static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
Xiubo Li20e87b72015-02-26 14:58:25 +08003081 const struct kvm_io_range *r2)
Sasha Levin743eeb02011-07-27 16:00:48 +03003082{
Sasha Levin743eeb02011-07-27 16:00:48 +03003083 if (r1->addr < r2->addr)
3084 return -1;
3085 if (r1->addr + r1->len > r2->addr + r2->len)
3086 return 1;
3087 return 0;
3088}
3089
Paolo Bonzinia343c9b2013-07-16 13:03:29 +02003090static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
3091{
Paolo Bonzinic21fbff2013-08-27 15:41:41 +02003092 return kvm_io_bus_cmp(p1, p2);
Paolo Bonzinia343c9b2013-07-16 13:03:29 +02003093}
3094
Geoff Levand39369f72013-04-05 19:20:30 +00003095static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
Sasha Levin743eeb02011-07-27 16:00:48 +03003096 gpa_t addr, int len)
3097{
Sasha Levin743eeb02011-07-27 16:00:48 +03003098 bus->range[bus->dev_count++] = (struct kvm_io_range) {
3099 .addr = addr,
3100 .len = len,
3101 .dev = dev,
3102 };
3103
3104 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
3105 kvm_io_bus_sort_cmp, NULL);
3106
3107 return 0;
3108}
3109
Geoff Levand39369f72013-04-05 19:20:30 +00003110static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
Sasha Levin743eeb02011-07-27 16:00:48 +03003111 gpa_t addr, int len)
3112{
3113 struct kvm_io_range *range, key;
3114 int off;
3115
3116 key = (struct kvm_io_range) {
3117 .addr = addr,
3118 .len = len,
3119 };
3120
3121 range = bsearch(&key, bus->range, bus->dev_count,
3122 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
3123 if (range == NULL)
3124 return -ENOENT;
3125
3126 off = range - bus->range;
3127
Paolo Bonzinic21fbff2013-08-27 15:41:41 +02003128 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
Sasha Levin743eeb02011-07-27 16:00:48 +03003129 off--;
3130
3131 return off;
3132}
3133
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003134static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
Cornelia Huck126a5af2013-07-03 16:30:53 +02003135 struct kvm_io_range *range, const void *val)
3136{
3137 int idx;
3138
3139 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
3140 if (idx < 0)
3141 return -EOPNOTSUPP;
3142
3143 while (idx < bus->dev_count &&
Paolo Bonzinic21fbff2013-08-27 15:41:41 +02003144 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003145 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
Cornelia Huck126a5af2013-07-03 16:30:53 +02003146 range->len, val))
3147 return idx;
3148 idx++;
3149 }
3150
3151 return -EOPNOTSUPP;
3152}
3153
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003154/* kvm_io_bus_write - called under kvm->slots_lock */
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003155int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003156 int len, const void *val)
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04003157{
Cornelia Huck126a5af2013-07-03 16:30:53 +02003158 struct kvm_io_bus *bus;
3159 struct kvm_io_range range;
3160 int r;
3161
3162 range = (struct kvm_io_range) {
3163 .addr = addr,
3164 .len = len,
3165 };
3166
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003167 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3168 r = __kvm_io_bus_write(vcpu, bus, &range, val);
Cornelia Huck126a5af2013-07-03 16:30:53 +02003169 return r < 0 ? r : 0;
3170}
3171
3172/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003173int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3174 gpa_t addr, int len, const void *val, long cookie)
Cornelia Huck126a5af2013-07-03 16:30:53 +02003175{
Lai Jiangshan90d83dc2010-04-19 17:41:23 +08003176 struct kvm_io_bus *bus;
Sasha Levin743eeb02011-07-27 16:00:48 +03003177 struct kvm_io_range range;
3178
3179 range = (struct kvm_io_range) {
3180 .addr = addr,
3181 .len = len,
3182 };
Lai Jiangshan90d83dc2010-04-19 17:41:23 +08003183
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003184 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
Cornelia Huck126a5af2013-07-03 16:30:53 +02003185
3186 /* First try the device referenced by cookie. */
3187 if ((cookie >= 0) && (cookie < bus->dev_count) &&
Paolo Bonzinic21fbff2013-08-27 15:41:41 +02003188 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003189 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
Cornelia Huck126a5af2013-07-03 16:30:53 +02003190 val))
3191 return cookie;
3192
3193 /*
3194 * cookie contained garbage; fall back to search and return the
3195 * correct cookie value.
3196 */
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003197 return __kvm_io_bus_write(vcpu, bus, &range, val);
Cornelia Huck126a5af2013-07-03 16:30:53 +02003198}
3199
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003200static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
3201 struct kvm_io_range *range, void *val)
Cornelia Huck126a5af2013-07-03 16:30:53 +02003202{
3203 int idx;
3204
3205 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
Sasha Levin743eeb02011-07-27 16:00:48 +03003206 if (idx < 0)
3207 return -EOPNOTSUPP;
3208
3209 while (idx < bus->dev_count &&
Paolo Bonzinic21fbff2013-08-27 15:41:41 +02003210 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003211 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
Cornelia Huck126a5af2013-07-03 16:30:53 +02003212 range->len, val))
3213 return idx;
Sasha Levin743eeb02011-07-27 16:00:48 +03003214 idx++;
3215 }
3216
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003217 return -EOPNOTSUPP;
3218}
Michael S. Tsirkin68c3b4d2014-03-31 21:50:44 +03003219EXPORT_SYMBOL_GPL(kvm_io_bus_write);
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04003220
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003221/* kvm_io_bus_read - called under kvm->slots_lock */
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003222int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003223 int len, void *val)
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003224{
Cornelia Huck126a5af2013-07-03 16:30:53 +02003225 struct kvm_io_bus *bus;
3226 struct kvm_io_range range;
3227 int r;
3228
3229 range = (struct kvm_io_range) {
3230 .addr = addr,
3231 .len = len,
3232 };
3233
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +00003234 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3235 r = __kvm_io_bus_read(vcpu, bus, &range, val);
Cornelia Huck126a5af2013-07-03 16:30:53 +02003236 return r < 0 ? r : 0;
3237}
3238
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04003239
Marcelo Tosatti79fac952009-12-23 14:35:26 -02003240/* Caller must hold slots_lock. */
Sasha Levin743eeb02011-07-27 16:00:48 +03003241int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3242 int len, struct kvm_io_device *dev)
Michael S. Tsirkin6c474692009-06-29 22:24:26 +03003243{
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003244 struct kvm_io_bus *new_bus, *bus;
Gregory Haskins090b7af2009-07-07 17:08:44 -04003245
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003246 bus = kvm->buses[bus_idx];
Amos Kong6ea34c92013-05-25 06:44:15 +08003247 /* exclude ioeventfd which is limited by maximum fd */
3248 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
Gregory Haskins090b7af2009-07-07 17:08:44 -04003249 return -ENOSPC;
3250
Amos Konga13007162012-03-09 12:17:32 +08003251 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
3252 sizeof(struct kvm_io_range)), GFP_KERNEL);
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003253 if (!new_bus)
3254 return -ENOMEM;
Amos Konga13007162012-03-09 12:17:32 +08003255 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
3256 sizeof(struct kvm_io_range)));
Sasha Levin743eeb02011-07-27 16:00:48 +03003257 kvm_io_bus_insert_dev(new_bus, dev, addr, len);
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003258 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3259 synchronize_srcu_expedited(&kvm->srcu);
3260 kfree(bus);
Gregory Haskins090b7af2009-07-07 17:08:44 -04003261
3262 return 0;
3263}
3264
Marcelo Tosatti79fac952009-12-23 14:35:26 -02003265/* Caller must hold slots_lock. */
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003266int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3267 struct kvm_io_device *dev)
Gregory Haskins090b7af2009-07-07 17:08:44 -04003268{
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003269 int i, r;
3270 struct kvm_io_bus *new_bus, *bus;
Michael S. Tsirkin6c474692009-06-29 22:24:26 +03003271
Sasha Levincdfca7b2011-12-04 19:36:28 +02003272 bus = kvm->buses[bus_idx];
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003273 r = -ENOENT;
Amos Konga13007162012-03-09 12:17:32 +08003274 for (i = 0; i < bus->dev_count; i++)
3275 if (bus->range[i].dev == dev) {
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003276 r = 0;
Gregory Haskins090b7af2009-07-07 17:08:44 -04003277 break;
3278 }
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003279
Amos Konga13007162012-03-09 12:17:32 +08003280 if (r)
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003281 return r;
Amos Konga13007162012-03-09 12:17:32 +08003282
3283 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
3284 sizeof(struct kvm_io_range)), GFP_KERNEL);
3285 if (!new_bus)
3286 return -ENOMEM;
3287
3288 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3289 new_bus->dev_count--;
3290 memcpy(new_bus->range + i, bus->range + i + 1,
3291 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003292
3293 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3294 synchronize_srcu_expedited(&kvm->srcu);
3295 kfree(bus);
3296 return r;
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04003297}
3298
Avi Kivity774c47f2007-02-12 00:54:47 -08003299static struct notifier_block kvm_cpu_notifier = {
3300 .notifier_call = kvm_cpu_hotplug,
Avi Kivity774c47f2007-02-12 00:54:47 -08003301};
3302
Christoph Hellwig8b88b092008-02-08 04:20:26 -08003303static int vm_stat_get(void *_offset, u64 *val)
Avi Kivityba1389b2007-11-18 16:24:12 +02003304{
3305 unsigned offset = (long)_offset;
Avi Kivityba1389b2007-11-18 16:24:12 +02003306 struct kvm *kvm;
3307
Christoph Hellwig8b88b092008-02-08 04:20:26 -08003308 *val = 0;
Paolo Bonzini2f303b72013-09-25 13:53:07 +02003309 spin_lock(&kvm_lock);
Avi Kivityba1389b2007-11-18 16:24:12 +02003310 list_for_each_entry(kvm, &vm_list, vm_list)
Christoph Hellwig8b88b092008-02-08 04:20:26 -08003311 *val += *(u32 *)((void *)kvm + offset);
Paolo Bonzini2f303b72013-09-25 13:53:07 +02003312 spin_unlock(&kvm_lock);
Christoph Hellwig8b88b092008-02-08 04:20:26 -08003313 return 0;
Avi Kivityba1389b2007-11-18 16:24:12 +02003314}
3315
3316DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
3317
Christoph Hellwig8b88b092008-02-08 04:20:26 -08003318static int vcpu_stat_get(void *_offset, u64 *val)
Avi Kivity1165f5f2007-04-19 17:27:43 +03003319{
3320 unsigned offset = (long)_offset;
Avi Kivity1165f5f2007-04-19 17:27:43 +03003321 struct kvm *kvm;
3322 struct kvm_vcpu *vcpu;
3323 int i;
3324
Christoph Hellwig8b88b092008-02-08 04:20:26 -08003325 *val = 0;
Paolo Bonzini2f303b72013-09-25 13:53:07 +02003326 spin_lock(&kvm_lock);
Avi Kivity1165f5f2007-04-19 17:27:43 +03003327 list_for_each_entry(kvm, &vm_list, vm_list)
Gleb Natapov988a2ca2009-06-09 15:56:29 +03003328 kvm_for_each_vcpu(i, vcpu, kvm)
3329 *val += *(u32 *)((void *)vcpu + offset);
3330
Paolo Bonzini2f303b72013-09-25 13:53:07 +02003331 spin_unlock(&kvm_lock);
Christoph Hellwig8b88b092008-02-08 04:20:26 -08003332 return 0;
Avi Kivity1165f5f2007-04-19 17:27:43 +03003333}
3334
Avi Kivityba1389b2007-11-18 16:24:12 +02003335DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
3336
Alexey Dobriyan828c0952009-10-01 15:43:56 -07003337static const struct file_operations *stat_fops[] = {
Avi Kivityba1389b2007-11-18 16:24:12 +02003338 [KVM_STAT_VCPU] = &vcpu_stat_fops,
3339 [KVM_STAT_VM] = &vm_stat_fops,
3340};
Avi Kivity1165f5f2007-04-19 17:27:43 +03003341
Hamo4f69b682011-12-15 14:23:16 +08003342static int kvm_init_debug(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003343{
Paolo Bonzini0c8eb042013-10-30 12:12:13 +01003344 int r = -EEXIST;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003345 struct kvm_stats_debugfs_item *p;
3346
Hollis Blanchard76f7c872008-04-15 16:05:42 -05003347 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
Hamo4f69b682011-12-15 14:23:16 +08003348 if (kvm_debugfs_dir == NULL)
3349 goto out;
3350
3351 for (p = debugfs_entries; p->name; ++p) {
Hollis Blanchard76f7c872008-04-15 16:05:42 -05003352 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
Avi Kivity1165f5f2007-04-19 17:27:43 +03003353 (void *)(long)p->offset,
Avi Kivityba1389b2007-11-18 16:24:12 +02003354 stat_fops[p->kind]);
Hamo4f69b682011-12-15 14:23:16 +08003355 if (p->dentry == NULL)
3356 goto out_dir;
3357 }
3358
3359 return 0;
3360
3361out_dir:
3362 debugfs_remove_recursive(kvm_debugfs_dir);
3363out:
3364 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003365}
3366
3367static void kvm_exit_debug(void)
3368{
3369 struct kvm_stats_debugfs_item *p;
3370
3371 for (p = debugfs_entries; p->name; ++p)
3372 debugfs_remove(p->dentry);
Hollis Blanchard76f7c872008-04-15 16:05:42 -05003373 debugfs_remove(kvm_debugfs_dir);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003374}
3375
Rafael J. Wysockifb3600c2011-03-23 22:16:23 +01003376static int kvm_suspend(void)
Avi Kivity59ae6c62007-02-12 00:54:48 -08003377{
Alexander Graf10474ae2009-09-15 11:37:46 +02003378 if (kvm_usage_count)
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09003379 hardware_disable_nolock(NULL);
Avi Kivity59ae6c62007-02-12 00:54:48 -08003380 return 0;
3381}
3382
Rafael J. Wysockifb3600c2011-03-23 22:16:23 +01003383static void kvm_resume(void)
Avi Kivity59ae6c62007-02-12 00:54:48 -08003384{
Zachary Amsdenca84d1a2010-08-19 22:07:28 -10003385 if (kvm_usage_count) {
Paolo Bonzini4a937f92013-09-10 12:58:35 +02003386 WARN_ON(raw_spin_is_locked(&kvm_count_lock));
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09003387 hardware_enable_nolock(NULL);
Zachary Amsdenca84d1a2010-08-19 22:07:28 -10003388 }
Avi Kivity59ae6c62007-02-12 00:54:48 -08003389}
3390
Rafael J. Wysockifb3600c2011-03-23 22:16:23 +01003391static struct syscore_ops kvm_syscore_ops = {
Avi Kivity59ae6c62007-02-12 00:54:48 -08003392 .suspend = kvm_suspend,
3393 .resume = kvm_resume,
3394};
3395
Avi Kivity15ad7142007-07-11 18:17:21 +03003396static inline
3397struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
3398{
3399 return container_of(pn, struct kvm_vcpu, preempt_notifier);
3400}
3401
3402static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
3403{
3404 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
Xiubo Lif95ef0c2015-02-26 14:58:23 +08003405
Raghavendra K T3a08a8f2013-03-04 23:32:07 +05303406 if (vcpu->preempted)
3407 vcpu->preempted = false;
Avi Kivity15ad7142007-07-11 18:17:21 +03003408
Radim Krčmáře790d9e2014-08-21 18:08:05 +02003409 kvm_arch_sched_in(vcpu, cpu);
3410
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08003411 kvm_arch_vcpu_load(vcpu, cpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03003412}
3413
3414static void kvm_sched_out(struct preempt_notifier *pn,
3415 struct task_struct *next)
3416{
3417 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3418
Raghavendra K T3a08a8f2013-03-04 23:32:07 +05303419 if (current->state == TASK_RUNNING)
3420 vcpu->preempted = true;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08003421 kvm_arch_vcpu_put(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03003422}
3423
Avi Kivity0ee75be2010-04-28 15:39:01 +03003424int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
Rusty Russellc16f8622007-07-30 21:12:19 +10003425 struct module *module)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003426{
3427 int r;
Yang, Sheng002c7f72007-07-31 14:23:01 +03003428 int cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003429
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003430 r = kvm_arch_init(opaque);
3431 if (r)
Zhang Xiantaod23087842007-11-29 15:35:39 +08003432 goto out_fail;
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08003433
Asias He7dac16c2013-05-08 10:57:29 +08003434 /*
3435 * kvm_arch_init makes sure there's at most one caller
3436 * for architectures that support multiple implementations,
3437 * like intel and amd on x86.
3438 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
3439 * conflicts in case kvm is already setup for another implementation.
3440 */
3441 r = kvm_irqfd_init();
3442 if (r)
3443 goto out_irqfd;
3444
Avi Kivity8437a612009-06-06 14:52:35 -07003445 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
Rusty Russell7f59f492008-12-07 21:25:45 +10303446 r = -ENOMEM;
3447 goto out_free_0;
3448 }
3449
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08003450 r = kvm_arch_hardware_setup();
Avi Kivity6aa8b732006-12-10 02:21:36 -08003451 if (r < 0)
Rusty Russell7f59f492008-12-07 21:25:45 +10303452 goto out_free_0a;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003453
Yang, Sheng002c7f72007-07-31 14:23:01 +03003454 for_each_online_cpu(cpu) {
3455 smp_call_function_single(cpu,
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08003456 kvm_arch_check_processor_compat,
Jens Axboe8691e5a2008-06-06 11:18:06 +02003457 &r, 1);
Yang, Sheng002c7f72007-07-31 14:23:01 +03003458 if (r < 0)
Zhang Xiantaod23087842007-11-29 15:35:39 +08003459 goto out_free_1;
Yang, Sheng002c7f72007-07-31 14:23:01 +03003460 }
3461
Avi Kivity774c47f2007-02-12 00:54:47 -08003462 r = register_cpu_notifier(&kvm_cpu_notifier);
3463 if (r)
Zhang Xiantaod23087842007-11-29 15:35:39 +08003464 goto out_free_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003465 register_reboot_notifier(&kvm_reboot_notifier);
3466
Rusty Russellc16f8622007-07-30 21:12:19 +10003467 /* A kmem cache lets us meet the alignment requirements of fx_save. */
Avi Kivity0ee75be2010-04-28 15:39:01 +03003468 if (!vcpu_align)
3469 vcpu_align = __alignof__(struct kvm_vcpu);
3470 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
Joe Perches56919c52007-11-12 20:06:51 -08003471 0, NULL);
Rusty Russellc16f8622007-07-30 21:12:19 +10003472 if (!kvm_vcpu_cache) {
3473 r = -ENOMEM;
Rafael J. Wysockifb3600c2011-03-23 22:16:23 +01003474 goto out_free_3;
Rusty Russellc16f8622007-07-30 21:12:19 +10003475 }
3476
Gleb Natapovaf585b92010-10-14 11:22:46 +02003477 r = kvm_async_pf_init();
3478 if (r)
3479 goto out_free;
3480
Avi Kivity6aa8b732006-12-10 02:21:36 -08003481 kvm_chardev_ops.owner = module;
Christian Borntraeger3d3aab12008-12-02 11:17:32 +01003482 kvm_vm_fops.owner = module;
3483 kvm_vcpu_fops.owner = module;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003484
3485 r = misc_register(&kvm_dev);
3486 if (r) {
Xiubo Li1170adc2015-02-26 14:58:26 +08003487 pr_err("kvm: misc device register failed\n");
Gleb Natapovaf585b92010-10-14 11:22:46 +02003488 goto out_unreg;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003489 }
3490
Rafael J. Wysockifb3600c2011-03-23 22:16:23 +01003491 register_syscore_ops(&kvm_syscore_ops);
3492
Avi Kivity15ad7142007-07-11 18:17:21 +03003493 kvm_preempt_ops.sched_in = kvm_sched_in;
3494 kvm_preempt_ops.sched_out = kvm_sched_out;
3495
Hamo4f69b682011-12-15 14:23:16 +08003496 r = kvm_init_debug();
3497 if (r) {
Xiubo Li1170adc2015-02-26 14:58:26 +08003498 pr_err("kvm: create debugfs files failed\n");
Hamo4f69b682011-12-15 14:23:16 +08003499 goto out_undebugfs;
3500 }
Darrick J. Wong0ea4ed82009-10-14 16:21:00 -07003501
Paolo Bonzini3c3c29f2014-09-24 13:02:46 +02003502 r = kvm_vfio_ops_init();
3503 WARN_ON(r);
3504
Avi Kivityc7addb92007-09-16 18:58:32 +02003505 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003506
Hamo4f69b682011-12-15 14:23:16 +08003507out_undebugfs:
3508 unregister_syscore_ops(&kvm_syscore_ops);
Wei Yongjunafc2f792013-05-05 20:03:40 +08003509 misc_deregister(&kvm_dev);
Gleb Natapovaf585b92010-10-14 11:22:46 +02003510out_unreg:
3511 kvm_async_pf_deinit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08003512out_free:
Rusty Russellc16f8622007-07-30 21:12:19 +10003513 kmem_cache_destroy(kvm_vcpu_cache);
Zhang Xiantaod23087842007-11-29 15:35:39 +08003514out_free_3:
Avi Kivity6aa8b732006-12-10 02:21:36 -08003515 unregister_reboot_notifier(&kvm_reboot_notifier);
Avi Kivity774c47f2007-02-12 00:54:47 -08003516 unregister_cpu_notifier(&kvm_cpu_notifier);
Zhang Xiantaod23087842007-11-29 15:35:39 +08003517out_free_2:
Zhang Xiantaod23087842007-11-29 15:35:39 +08003518out_free_1:
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08003519 kvm_arch_hardware_unsetup();
Rusty Russell7f59f492008-12-07 21:25:45 +10303520out_free_0a:
3521 free_cpumask_var(cpus_hardware_enabled);
Zhang Xiantaod23087842007-11-29 15:35:39 +08003522out_free_0:
Cornelia Hucka0f155e2013-02-28 12:33:18 +01003523 kvm_irqfd_exit();
3524out_irqfd:
Asias He7dac16c2013-05-08 10:57:29 +08003525 kvm_arch_exit();
3526out_fail:
Avi Kivity6aa8b732006-12-10 02:21:36 -08003527 return r;
3528}
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08003529EXPORT_SYMBOL_GPL(kvm_init);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003530
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08003531void kvm_exit(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003532{
Darrick J. Wong0ea4ed82009-10-14 16:21:00 -07003533 kvm_exit_debug();
Avi Kivity6aa8b732006-12-10 02:21:36 -08003534 misc_deregister(&kvm_dev);
Rusty Russellc16f8622007-07-30 21:12:19 +10003535 kmem_cache_destroy(kvm_vcpu_cache);
Gleb Natapovaf585b92010-10-14 11:22:46 +02003536 kvm_async_pf_deinit();
Rafael J. Wysockifb3600c2011-03-23 22:16:23 +01003537 unregister_syscore_ops(&kvm_syscore_ops);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003538 unregister_reboot_notifier(&kvm_reboot_notifier);
Avi Kivity59ae6c62007-02-12 00:54:48 -08003539 unregister_cpu_notifier(&kvm_cpu_notifier);
Takuya Yoshikawa75b71272010-11-16 17:37:41 +09003540 on_each_cpu(hardware_disable_nolock, NULL, 1);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08003541 kvm_arch_hardware_unsetup();
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08003542 kvm_arch_exit();
Cornelia Hucka0f155e2013-02-28 12:33:18 +01003543 kvm_irqfd_exit();
Rusty Russell7f59f492008-12-07 21:25:45 +10303544 free_cpumask_var(cpus_hardware_enabled);
Wanpeng Li571ee1b2014-10-09 18:30:08 +08003545 kvm_vfio_ops_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08003546}
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08003547EXPORT_SYMBOL_GPL(kvm_exit);