blob: ed3af15d4404a8bb21de1a45602e81032a9d5b0b [file] [log] [blame]
Carsten Otte043405e2007-10-10 17:16:19 +02001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +03007 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
Avi Kivity221d0592010-05-23 18:37:00 +03009 * Copyright 2010 Red Hat, Inc. and/or its affilates.
Carsten Otte043405e2007-10-10 17:16:19 +020010 *
11 * Authors:
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +030014 * Amit Shah <amit.shah@qumranet.com>
15 * Ben-Ami Yassour <benami@il.ibm.com>
Carsten Otte043405e2007-10-10 17:16:19 +020016 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 */
21
Avi Kivityedf88412007-12-16 11:02:48 +020022#include <linux/kvm_host.h>
Carsten Otte313a3dc2007-10-11 19:16:52 +020023#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080024#include "mmu.h"
Sheng Yang78376992008-01-28 05:10:22 +080025#include "i8254.h"
Izik Eidus37817f22008-03-24 23:14:53 +020026#include "tss.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030027#include "kvm_cache_regs.h"
Avi Kivity26eef702008-07-03 14:59:22 +030028#include "x86.h"
Carsten Otte313a3dc2007-10-11 19:16:52 +020029
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -020030#include <linux/clocksource.h>
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +030031#include <linux/interrupt.h>
Carsten Otte313a3dc2007-10-11 19:16:52 +020032#include <linux/kvm.h>
33#include <linux/fs.h>
34#include <linux/vmalloc.h>
Carsten Otte5fb76f92007-10-29 16:08:51 +010035#include <linux/module.h>
Zhang Xiantao0de10342007-11-20 16:25:04 +080036#include <linux/mman.h>
Marcelo Tosatti2bacc552007-12-12 10:46:12 -050037#include <linux/highmem.h>
Joerg Roedel19de40a2008-12-03 14:43:34 +010038#include <linux/iommu.h>
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +030039#include <linux/intel-iommu.h>
Gerd Hoffmannc8076602009-02-04 17:52:04 +010040#include <linux/cpufreq.h>
Avi Kivity18863bd2009-09-07 11:12:18 +030041#include <linux/user-return-notifier.h>
Marcelo Tosattia983fb22009-12-23 14:35:23 -020042#include <linux/srcu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090043#include <linux/slab.h>
Zhang, Yanminff9d07a2010-04-19 13:32:45 +080044#include <linux/perf_event.h>
Lai Jiangshan7bee3422010-06-02 17:06:03 +080045#include <linux/uaccess.h>
Avi Kivityaec51dc2009-07-01 16:01:02 +030046#include <trace/events/kvm.h>
Xiao Guangrong2ed152a2010-03-10 19:00:43 +080047
Marcelo Tosatti229456f2009-06-17 09:22:14 -030048#define CREATE_TRACE_POINTS
49#include "trace.h"
Carsten Otte043405e2007-10-10 17:16:19 +020050
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020051#include <asm/debugreg.h>
Zhang Xiantaod825ed02007-11-14 20:08:51 +080052#include <asm/msr.h>
Avi Kivitya5f61302008-02-20 17:57:21 +020053#include <asm/desc.h>
Sheng Yang0bed3b52008-10-09 16:01:54 +080054#include <asm/mtrr.h>
Huang Ying890ca9a2009-05-11 16:48:15 +080055#include <asm/mce.h>
Sheng Yang7cf30852010-05-17 17:08:27 +080056#include <asm/i387.h>
Sheng Yang98918832010-05-17 17:08:28 +080057#include <asm/xcr.h>
Carsten Otte043405e2007-10-10 17:16:19 +020058
Carsten Otte313a3dc2007-10-11 19:16:52 +020059#define MAX_IO_MSRS 256
Carsten Ottea03490e2007-10-29 16:09:35 +010060#define CR0_RESERVED_BITS \
61 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
62 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
63 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
64#define CR4_RESERVED_BITS \
65 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
66 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
67 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
Dexuan Cui2acf9232010-06-10 11:27:12 +080068 | X86_CR4_OSXSAVE \
Carsten Ottea03490e2007-10-29 16:09:35 +010069 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
70
71#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
Huang Ying890ca9a2009-05-11 16:48:15 +080072
73#define KVM_MAX_MCE_BANKS 32
74#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
75
Joerg Roedel50a37eb2008-01-31 14:57:38 +010076/* EFER defaults:
77 * - enable syscall per default because its emulated by KVM
78 * - enable LME and LMA per default on 64 bit KVM
79 */
80#ifdef CONFIG_X86_64
81static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
82#else
83static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
84#endif
Carsten Otte313a3dc2007-10-11 19:16:52 +020085
Avi Kivityba1389b2007-11-18 16:24:12 +020086#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
87#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Hollis Blanchard417bc302007-10-31 17:24:23 -050088
Gleb Natapovcb142eb2009-08-09 15:17:40 +030089static void update_cr8_intercept(struct kvm_vcpu *vcpu);
Avi Kivity674eea02008-02-11 18:37:23 +020090static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
91 struct kvm_cpuid_entry2 __user *entries);
92
Zhang Xiantao97896d02007-11-14 20:09:30 +080093struct kvm_x86_ops *kvm_x86_ops;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030094EXPORT_SYMBOL_GPL(kvm_x86_ops);
Zhang Xiantao97896d02007-11-14 20:09:30 +080095
Andre Przywaraed85c062009-06-25 12:36:49 +020096int ignore_msrs = 0;
97module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
98
Avi Kivity18863bd2009-09-07 11:12:18 +030099#define KVM_NR_SHARED_MSRS 16
100
101struct kvm_shared_msrs_global {
102 int nr;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800103 u32 msrs[KVM_NR_SHARED_MSRS];
Avi Kivity18863bd2009-09-07 11:12:18 +0300104};
105
106struct kvm_shared_msrs {
107 struct user_return_notifier urn;
108 bool registered;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800109 struct kvm_shared_msr_values {
110 u64 host;
111 u64 curr;
112 } values[KVM_NR_SHARED_MSRS];
Avi Kivity18863bd2009-09-07 11:12:18 +0300113};
114
115static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
116static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
117
Hollis Blanchard417bc302007-10-31 17:24:23 -0500118struct kvm_stats_debugfs_item debugfs_entries[] = {
Avi Kivityba1389b2007-11-18 16:24:12 +0200119 { "pf_fixed", VCPU_STAT(pf_fixed) },
120 { "pf_guest", VCPU_STAT(pf_guest) },
121 { "tlb_flush", VCPU_STAT(tlb_flush) },
122 { "invlpg", VCPU_STAT(invlpg) },
123 { "exits", VCPU_STAT(exits) },
124 { "io_exits", VCPU_STAT(io_exits) },
125 { "mmio_exits", VCPU_STAT(mmio_exits) },
126 { "signal_exits", VCPU_STAT(signal_exits) },
127 { "irq_window", VCPU_STAT(irq_window_exits) },
Sheng Yangf08864b2008-05-15 18:23:25 +0800128 { "nmi_window", VCPU_STAT(nmi_window_exits) },
Avi Kivityba1389b2007-11-18 16:24:12 +0200129 { "halt_exits", VCPU_STAT(halt_exits) },
130 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Amit Shahf11c3a82008-02-21 01:00:30 +0530131 { "hypercalls", VCPU_STAT(hypercalls) },
Avi Kivityba1389b2007-11-18 16:24:12 +0200132 { "request_irq", VCPU_STAT(request_irq_exits) },
133 { "irq_exits", VCPU_STAT(irq_exits) },
134 { "host_state_reload", VCPU_STAT(host_state_reload) },
135 { "efer_reload", VCPU_STAT(efer_reload) },
136 { "fpu_reload", VCPU_STAT(fpu_reload) },
137 { "insn_emulation", VCPU_STAT(insn_emulation) },
138 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
Avi Kivityfa89a812008-09-01 15:57:51 +0300139 { "irq_injections", VCPU_STAT(irq_injections) },
Jan Kiszkac4abb7c2008-09-26 09:30:55 +0200140 { "nmi_injections", VCPU_STAT(nmi_injections) },
Avi Kivity4cee5762007-11-18 16:37:07 +0200141 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
142 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
143 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
144 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
145 { "mmu_flooded", VM_STAT(mmu_flooded) },
146 { "mmu_recycled", VM_STAT(mmu_recycled) },
Avi Kivitydfc5aa02007-12-18 19:47:18 +0200147 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300148 { "mmu_unsync", VM_STAT(mmu_unsync) },
Avi Kivity0f74a242007-11-20 23:01:14 +0200149 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300150 { "largepages", VM_STAT(lpages) },
Hollis Blanchard417bc302007-10-31 17:24:23 -0500151 { NULL }
152};
153
Dexuan Cui2acf9232010-06-10 11:27:12 +0800154u64 __read_mostly host_xcr0;
155
156static inline u32 bit(int bitno)
157{
158 return 1 << (bitno & 31);
159}
160
Avi Kivity18863bd2009-09-07 11:12:18 +0300161static void kvm_on_user_return(struct user_return_notifier *urn)
162{
163 unsigned slot;
Avi Kivity18863bd2009-09-07 11:12:18 +0300164 struct kvm_shared_msrs *locals
165 = container_of(urn, struct kvm_shared_msrs, urn);
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800166 struct kvm_shared_msr_values *values;
Avi Kivity18863bd2009-09-07 11:12:18 +0300167
168 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800169 values = &locals->values[slot];
170 if (values->host != values->curr) {
171 wrmsrl(shared_msrs_global.msrs[slot], values->host);
172 values->curr = values->host;
Avi Kivity18863bd2009-09-07 11:12:18 +0300173 }
174 }
175 locals->registered = false;
176 user_return_notifier_unregister(urn);
177}
178
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800179static void shared_msr_update(unsigned slot, u32 msr)
Avi Kivity18863bd2009-09-07 11:12:18 +0300180{
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800181 struct kvm_shared_msrs *smsr;
Avi Kivity18863bd2009-09-07 11:12:18 +0300182 u64 value;
183
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800184 smsr = &__get_cpu_var(shared_msrs);
185 /* only read, and nobody should modify it at this time,
186 * so don't need lock */
187 if (slot >= shared_msrs_global.nr) {
188 printk(KERN_ERR "kvm: invalid MSR slot!");
189 return;
190 }
191 rdmsrl_safe(msr, &value);
192 smsr->values[slot].host = value;
193 smsr->values[slot].curr = value;
194}
195
196void kvm_define_shared_msr(unsigned slot, u32 msr)
197{
Avi Kivity18863bd2009-09-07 11:12:18 +0300198 if (slot >= shared_msrs_global.nr)
199 shared_msrs_global.nr = slot + 1;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800200 shared_msrs_global.msrs[slot] = msr;
201 /* we need ensured the shared_msr_global have been updated */
202 smp_wmb();
Avi Kivity18863bd2009-09-07 11:12:18 +0300203}
204EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
205
206static void kvm_shared_msr_cpu_online(void)
207{
208 unsigned i;
Avi Kivity18863bd2009-09-07 11:12:18 +0300209
210 for (i = 0; i < shared_msrs_global.nr; ++i)
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800211 shared_msr_update(i, shared_msrs_global.msrs[i]);
Avi Kivity18863bd2009-09-07 11:12:18 +0300212}
213
Avi Kivityd5696722009-12-02 12:28:47 +0200214void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
Avi Kivity18863bd2009-09-07 11:12:18 +0300215{
216 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
217
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800218 if (((value ^ smsr->values[slot].curr) & mask) == 0)
Avi Kivity18863bd2009-09-07 11:12:18 +0300219 return;
Sheng Yang2bf78fa2009-12-18 16:48:44 +0800220 smsr->values[slot].curr = value;
221 wrmsrl(shared_msrs_global.msrs[slot], value);
Avi Kivity18863bd2009-09-07 11:12:18 +0300222 if (!smsr->registered) {
223 smsr->urn.on_user_return = kvm_on_user_return;
224 user_return_notifier_register(&smsr->urn);
225 smsr->registered = true;
226 }
227}
228EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
229
Avi Kivity3548bab2009-11-28 14:18:47 +0200230static void drop_user_return_notifiers(void *ignore)
231{
232 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
233
234 if (smsr->registered)
235 kvm_on_user_return(&smsr->urn);
236}
237
Carsten Otte6866b832007-10-29 16:09:10 +0100238u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
239{
240 if (irqchip_in_kernel(vcpu->kvm))
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800241 return vcpu->arch.apic_base;
Carsten Otte6866b832007-10-29 16:09:10 +0100242 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800243 return vcpu->arch.apic_base;
Carsten Otte6866b832007-10-29 16:09:10 +0100244}
245EXPORT_SYMBOL_GPL(kvm_get_apic_base);
246
247void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
248{
249 /* TODO: reserve bits check */
250 if (irqchip_in_kernel(vcpu->kvm))
251 kvm_lapic_set_base(vcpu, data);
252 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800253 vcpu->arch.apic_base = data;
Carsten Otte6866b832007-10-29 16:09:10 +0100254}
255EXPORT_SYMBOL_GPL(kvm_set_apic_base);
256
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200257#define EXCPT_BENIGN 0
258#define EXCPT_CONTRIBUTORY 1
259#define EXCPT_PF 2
260
261static int exception_class(int vector)
262{
263 switch (vector) {
264 case PF_VECTOR:
265 return EXCPT_PF;
266 case DE_VECTOR:
267 case TS_VECTOR:
268 case NP_VECTOR:
269 case SS_VECTOR:
270 case GP_VECTOR:
271 return EXCPT_CONTRIBUTORY;
272 default:
273 break;
274 }
275 return EXCPT_BENIGN;
276}
277
278static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200279 unsigned nr, bool has_error, u32 error_code,
280 bool reinject)
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200281{
282 u32 prev_nr;
283 int class1, class2;
284
285 if (!vcpu->arch.exception.pending) {
286 queue:
287 vcpu->arch.exception.pending = true;
288 vcpu->arch.exception.has_error_code = has_error;
289 vcpu->arch.exception.nr = nr;
290 vcpu->arch.exception.error_code = error_code;
Joerg Roedel3f0fd292010-05-05 16:04:41 +0200291 vcpu->arch.exception.reinject = reinject;
Eddie Dong3fd28fc2009-11-19 17:54:07 +0200292 return;
293 }
294
295 /* to check exception */
296 prev_nr = vcpu->arch.exception.nr;
297 if (prev_nr == DF_VECTOR) {
298 /* triple fault -> shutdown */
299 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
300 return;
301 }
302 class1 = exception_class(prev_nr);
303 class2 = exception_class(nr);
304 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
305 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
306 /* generate double fault per SDM Table 5-5 */
307 vcpu->arch.exception.pending = true;
308 vcpu->arch.exception.has_error_code = true;
309 vcpu->arch.exception.nr = DF_VECTOR;
310 vcpu->arch.exception.error_code = 0;
311 } else
312 /* replace previous exception with a new one in a hope
313 that instruction re-execution will regenerate lost
314 exception */
315 goto queue;
316}
317
Avi Kivity298101d2007-11-25 13:41:11 +0200318void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
319{
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200320 kvm_multiple_exception(vcpu, nr, false, 0, false);
Avi Kivity298101d2007-11-25 13:41:11 +0200321}
322EXPORT_SYMBOL_GPL(kvm_queue_exception);
323
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200324void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
325{
326 kvm_multiple_exception(vcpu, nr, false, 0, true);
327}
328EXPORT_SYMBOL_GPL(kvm_requeue_exception);
329
Avi Kivityc3c91fe2007-11-25 14:04:58 +0200330void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
331 u32 error_code)
332{
333 ++vcpu->stat.pf_guest;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800334 vcpu->arch.cr2 = addr;
Avi Kivityc3c91fe2007-11-25 14:04:58 +0200335 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
336}
337
Sheng Yang3419ffc2008-05-15 09:52:48 +0800338void kvm_inject_nmi(struct kvm_vcpu *vcpu)
339{
340 vcpu->arch.nmi_pending = 1;
341}
342EXPORT_SYMBOL_GPL(kvm_inject_nmi);
343
Avi Kivity298101d2007-11-25 13:41:11 +0200344void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
345{
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200346 kvm_multiple_exception(vcpu, nr, true, error_code, false);
Avi Kivity298101d2007-11-25 13:41:11 +0200347}
348EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
349
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200350void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
351{
352 kvm_multiple_exception(vcpu, nr, true, error_code, true);
353}
354EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
355
Carsten Ottea03490e2007-10-29 16:09:35 +0100356/*
Avi Kivity0a79b002009-09-01 12:03:25 +0300357 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
358 * a #GP and return false.
359 */
360bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
Carsten Otte043405e2007-10-10 17:16:19 +0200361{
Avi Kivity0a79b002009-09-01 12:03:25 +0300362 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
363 return true;
364 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
365 return false;
Carsten Ottea03490e2007-10-29 16:09:35 +0100366}
Avi Kivity0a79b002009-09-01 12:03:25 +0300367EXPORT_SYMBOL_GPL(kvm_require_cpl);
Carsten Ottea03490e2007-10-29 16:09:35 +0100368
369/*
370 * Load the pae pdptrs. Return true is they are all valid.
371 */
372int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
373{
374 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
375 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
376 int i;
377 int ret;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800378 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
Carsten Ottea03490e2007-10-29 16:09:35 +0100379
Carsten Ottea03490e2007-10-29 16:09:35 +0100380 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
381 offset * sizeof(u64), sizeof(pdpte));
382 if (ret < 0) {
383 ret = 0;
384 goto out;
385 }
386 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
Avi Kivity43a37952009-06-10 14:12:05 +0300387 if (is_present_gpte(pdpte[i]) &&
Dong, Eddie20c466b2009-03-31 23:03:45 +0800388 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100389 ret = 0;
390 goto out;
391 }
392 }
393 ret = 1;
394
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800395 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300396 __set_bit(VCPU_EXREG_PDPTR,
397 (unsigned long *)&vcpu->arch.regs_avail);
398 __set_bit(VCPU_EXREG_PDPTR,
399 (unsigned long *)&vcpu->arch.regs_dirty);
Carsten Ottea03490e2007-10-29 16:09:35 +0100400out:
Carsten Ottea03490e2007-10-29 16:09:35 +0100401
402 return ret;
403}
Joerg Roedelcc4b6872008-02-07 13:47:43 +0100404EXPORT_SYMBOL_GPL(load_pdptrs);
Carsten Ottea03490e2007-10-29 16:09:35 +0100405
Avi Kivityd835dfe2007-11-21 02:57:59 +0200406static bool pdptrs_changed(struct kvm_vcpu *vcpu)
407{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800408 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
Avi Kivityd835dfe2007-11-21 02:57:59 +0200409 bool changed = true;
410 int r;
411
412 if (is_long_mode(vcpu) || !is_pae(vcpu))
413 return false;
414
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300415 if (!test_bit(VCPU_EXREG_PDPTR,
416 (unsigned long *)&vcpu->arch.regs_avail))
417 return true;
418
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800419 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
Avi Kivityd835dfe2007-11-21 02:57:59 +0200420 if (r < 0)
421 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800422 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
Avi Kivityd835dfe2007-11-21 02:57:59 +0200423out:
Avi Kivityd835dfe2007-11-21 02:57:59 +0200424
425 return changed;
426}
427
Avi Kivity49a9b072010-06-10 17:02:14 +0300428int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
Carsten Ottea03490e2007-10-29 16:09:35 +0100429{
Sheng Yangaad82702010-05-12 16:40:42 +0800430 unsigned long old_cr0 = kvm_read_cr0(vcpu);
431 unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
432 X86_CR0_CD | X86_CR0_NW;
433
Avi Kivityf9a48e62010-01-06 19:10:22 +0200434 cr0 |= X86_CR0_ET;
435
Gleb Natapovab344822010-01-21 15:28:46 +0200436#ifdef CONFIG_X86_64
Gleb Natapov0f122442010-04-28 19:15:31 +0300437 if (cr0 & 0xffffffff00000000UL)
438 return 1;
Gleb Natapovab344822010-01-21 15:28:46 +0200439#endif
440
441 cr0 &= ~CR0_RESERVED_BITS;
Carsten Ottea03490e2007-10-29 16:09:35 +0100442
Gleb Natapov0f122442010-04-28 19:15:31 +0300443 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
444 return 1;
Carsten Ottea03490e2007-10-29 16:09:35 +0100445
Gleb Natapov0f122442010-04-28 19:15:31 +0300446 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
447 return 1;
Carsten Ottea03490e2007-10-29 16:09:35 +0100448
449 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
450#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +0200451 if ((vcpu->arch.efer & EFER_LME)) {
Carsten Ottea03490e2007-10-29 16:09:35 +0100452 int cs_db, cs_l;
453
Gleb Natapov0f122442010-04-28 19:15:31 +0300454 if (!is_pae(vcpu))
455 return 1;
Carsten Ottea03490e2007-10-29 16:09:35 +0100456 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
Gleb Natapov0f122442010-04-28 19:15:31 +0300457 if (cs_l)
458 return 1;
Carsten Ottea03490e2007-10-29 16:09:35 +0100459 } else
460#endif
Gleb Natapov0f122442010-04-28 19:15:31 +0300461 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3))
462 return 1;
Carsten Ottea03490e2007-10-29 16:09:35 +0100463 }
464
465 kvm_x86_ops->set_cr0(vcpu, cr0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100466
Sheng Yangaad82702010-05-12 16:40:42 +0800467 if ((cr0 ^ old_cr0) & update_bits)
468 kvm_mmu_reset_context(vcpu);
Gleb Natapov0f122442010-04-28 19:15:31 +0300469 return 0;
470}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200471EXPORT_SYMBOL_GPL(kvm_set_cr0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100472
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200473void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
Carsten Ottea03490e2007-10-29 16:09:35 +0100474{
Avi Kivity49a9b072010-06-10 17:02:14 +0300475 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
Carsten Ottea03490e2007-10-29 16:09:35 +0100476}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200477EXPORT_SYMBOL_GPL(kvm_lmsw);
Carsten Ottea03490e2007-10-29 16:09:35 +0100478
Dexuan Cui2acf9232010-06-10 11:27:12 +0800479int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
480{
481 u64 xcr0;
482
483 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
484 if (index != XCR_XFEATURE_ENABLED_MASK)
485 return 1;
486 xcr0 = xcr;
487 if (kvm_x86_ops->get_cpl(vcpu) != 0)
488 return 1;
489 if (!(xcr0 & XSTATE_FP))
490 return 1;
491 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
492 return 1;
493 if (xcr0 & ~host_xcr0)
494 return 1;
495 vcpu->arch.xcr0 = xcr0;
496 vcpu->guest_xcr0_loaded = 0;
497 return 0;
498}
499
500int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
501{
502 if (__kvm_set_xcr(vcpu, index, xcr)) {
503 kvm_inject_gp(vcpu, 0);
504 return 1;
505 }
506 return 0;
507}
508EXPORT_SYMBOL_GPL(kvm_set_xcr);
509
510static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
511{
512 struct kvm_cpuid_entry2 *best;
513
514 best = kvm_find_cpuid_entry(vcpu, 1, 0);
515 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
516}
517
518static void update_cpuid(struct kvm_vcpu *vcpu)
519{
520 struct kvm_cpuid_entry2 *best;
521
522 best = kvm_find_cpuid_entry(vcpu, 1, 0);
523 if (!best)
524 return;
525
526 /* Update OSXSAVE bit */
527 if (cpu_has_xsave && best->function == 0x1) {
528 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
529 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
530 best->ecx |= bit(X86_FEATURE_OSXSAVE);
531 }
532}
533
Avi Kivitya83b29c2010-06-10 17:02:15 +0300534int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Carsten Ottea03490e2007-10-29 16:09:35 +0100535{
Avi Kivityfc78f512009-12-07 12:16:48 +0200536 unsigned long old_cr4 = kvm_read_cr4(vcpu);
Avi Kivitya2edf572009-05-24 22:19:00 +0300537 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
538
Gleb Natapov0f122442010-04-28 19:15:31 +0300539 if (cr4 & CR4_RESERVED_BITS)
540 return 1;
Carsten Ottea03490e2007-10-29 16:09:35 +0100541
Dexuan Cui2acf9232010-06-10 11:27:12 +0800542 if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
543 return 1;
544
Carsten Ottea03490e2007-10-29 16:09:35 +0100545 if (is_long_mode(vcpu)) {
Gleb Natapov0f122442010-04-28 19:15:31 +0300546 if (!(cr4 & X86_CR4_PAE))
547 return 1;
Avi Kivitya2edf572009-05-24 22:19:00 +0300548 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
549 && ((cr4 ^ old_cr4) & pdptr_bits)
Gleb Natapov0f122442010-04-28 19:15:31 +0300550 && !load_pdptrs(vcpu, vcpu->arch.cr3))
551 return 1;
Carsten Ottea03490e2007-10-29 16:09:35 +0100552
Gleb Natapov0f122442010-04-28 19:15:31 +0300553 if (cr4 & X86_CR4_VMXE)
554 return 1;
555
Carsten Ottea03490e2007-10-29 16:09:35 +0100556 kvm_x86_ops->set_cr4(vcpu, cr4);
Sheng Yang62ad0752010-05-12 16:40:41 +0800557
Sheng Yangaad82702010-05-12 16:40:42 +0800558 if ((cr4 ^ old_cr4) & pdptr_bits)
559 kvm_mmu_reset_context(vcpu);
Gleb Natapov0f122442010-04-28 19:15:31 +0300560
Dexuan Cui2acf9232010-06-10 11:27:12 +0800561 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
562 update_cpuid(vcpu);
563
Gleb Natapov0f122442010-04-28 19:15:31 +0300564 return 0;
565}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200566EXPORT_SYMBOL_GPL(kvm_set_cr4);
Carsten Ottea03490e2007-10-29 16:09:35 +0100567
Gleb Natapov0f122442010-04-28 19:15:31 +0300568static int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
Carsten Ottea03490e2007-10-29 16:09:35 +0100569{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800570 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
Marcelo Tosatti0ba73cd2008-09-23 13:18:34 -0300571 kvm_mmu_sync_roots(vcpu);
Avi Kivityd835dfe2007-11-21 02:57:59 +0200572 kvm_mmu_flush_tlb(vcpu);
Gleb Natapov0f122442010-04-28 19:15:31 +0300573 return 0;
Avi Kivityd835dfe2007-11-21 02:57:59 +0200574 }
575
Carsten Ottea03490e2007-10-29 16:09:35 +0100576 if (is_long_mode(vcpu)) {
Gleb Natapov0f122442010-04-28 19:15:31 +0300577 if (cr3 & CR3_L_MODE_RESERVED_BITS)
578 return 1;
Carsten Ottea03490e2007-10-29 16:09:35 +0100579 } else {
580 if (is_pae(vcpu)) {
Gleb Natapov0f122442010-04-28 19:15:31 +0300581 if (cr3 & CR3_PAE_RESERVED_BITS)
582 return 1;
583 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3))
584 return 1;
Carsten Ottea03490e2007-10-29 16:09:35 +0100585 }
586 /*
587 * We don't check reserved bits in nonpae mode, because
588 * this isn't enforced, and VMware depends on this.
589 */
590 }
591
Carsten Ottea03490e2007-10-29 16:09:35 +0100592 /*
593 * Does the new cr3 value map to physical memory? (Note, we
594 * catch an invalid cr3 even in real-mode, because it would
595 * cause trouble later on when we turn on paging anyway.)
596 *
597 * A real CPU would silently accept an invalid cr3 and would
598 * attempt to use it - with largely undefined (and often hard
599 * to debug) behavior on the guest side.
600 */
601 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
Gleb Natapov0f122442010-04-28 19:15:31 +0300602 return 1;
603 vcpu->arch.cr3 = cr3;
604 vcpu->arch.mmu.new_cr3(vcpu);
605 return 0;
606}
607
608void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
609{
610 if (__kvm_set_cr3(vcpu, cr3))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200611 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100612}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200613EXPORT_SYMBOL_GPL(kvm_set_cr3);
Carsten Ottea03490e2007-10-29 16:09:35 +0100614
Gleb Natapov0f122442010-04-28 19:15:31 +0300615int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
Carsten Ottea03490e2007-10-29 16:09:35 +0100616{
Gleb Natapov0f122442010-04-28 19:15:31 +0300617 if (cr8 & CR8_RESERVED_BITS)
618 return 1;
Carsten Ottea03490e2007-10-29 16:09:35 +0100619 if (irqchip_in_kernel(vcpu->kvm))
620 kvm_lapic_set_tpr(vcpu, cr8);
621 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800622 vcpu->arch.cr8 = cr8;
Gleb Natapov0f122442010-04-28 19:15:31 +0300623 return 0;
624}
625
626void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
627{
628 if (__kvm_set_cr8(vcpu, cr8))
629 kvm_inject_gp(vcpu, 0);
Carsten Ottea03490e2007-10-29 16:09:35 +0100630}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200631EXPORT_SYMBOL_GPL(kvm_set_cr8);
Carsten Ottea03490e2007-10-29 16:09:35 +0100632
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200633unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
Carsten Ottea03490e2007-10-29 16:09:35 +0100634{
635 if (irqchip_in_kernel(vcpu->kvm))
636 return kvm_lapic_get_cr8(vcpu);
637 else
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800638 return vcpu->arch.cr8;
Carsten Ottea03490e2007-10-29 16:09:35 +0100639}
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200640EXPORT_SYMBOL_GPL(kvm_get_cr8);
Carsten Ottea03490e2007-10-29 16:09:35 +0100641
Gleb Natapov338dbc92010-04-28 19:15:32 +0300642static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
Gleb Natapov020df072010-04-13 10:05:23 +0300643{
644 switch (dr) {
645 case 0 ... 3:
646 vcpu->arch.db[dr] = val;
647 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
648 vcpu->arch.eff_db[dr] = val;
649 break;
650 case 4:
Gleb Natapov338dbc92010-04-28 19:15:32 +0300651 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
652 return 1; /* #UD */
Gleb Natapov020df072010-04-13 10:05:23 +0300653 /* fall through */
654 case 6:
Gleb Natapov338dbc92010-04-28 19:15:32 +0300655 if (val & 0xffffffff00000000ULL)
656 return -1; /* #GP */
Gleb Natapov020df072010-04-13 10:05:23 +0300657 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
658 break;
659 case 5:
Gleb Natapov338dbc92010-04-28 19:15:32 +0300660 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
661 return 1; /* #UD */
Gleb Natapov020df072010-04-13 10:05:23 +0300662 /* fall through */
663 default: /* 7 */
Gleb Natapov338dbc92010-04-28 19:15:32 +0300664 if (val & 0xffffffff00000000ULL)
665 return -1; /* #GP */
Gleb Natapov020df072010-04-13 10:05:23 +0300666 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
667 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
668 kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
669 vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
670 }
671 break;
672 }
673
674 return 0;
675}
Gleb Natapov338dbc92010-04-28 19:15:32 +0300676
677int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
678{
679 int res;
680
681 res = __kvm_set_dr(vcpu, dr, val);
682 if (res > 0)
683 kvm_queue_exception(vcpu, UD_VECTOR);
684 else if (res < 0)
685 kvm_inject_gp(vcpu, 0);
686
687 return res;
688}
Gleb Natapov020df072010-04-13 10:05:23 +0300689EXPORT_SYMBOL_GPL(kvm_set_dr);
690
Gleb Natapov338dbc92010-04-28 19:15:32 +0300691static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
Gleb Natapov020df072010-04-13 10:05:23 +0300692{
693 switch (dr) {
694 case 0 ... 3:
695 *val = vcpu->arch.db[dr];
696 break;
697 case 4:
Gleb Natapov338dbc92010-04-28 19:15:32 +0300698 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
Gleb Natapov020df072010-04-13 10:05:23 +0300699 return 1;
Gleb Natapov020df072010-04-13 10:05:23 +0300700 /* fall through */
701 case 6:
702 *val = vcpu->arch.dr6;
703 break;
704 case 5:
Gleb Natapov338dbc92010-04-28 19:15:32 +0300705 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
Gleb Natapov020df072010-04-13 10:05:23 +0300706 return 1;
Gleb Natapov020df072010-04-13 10:05:23 +0300707 /* fall through */
708 default: /* 7 */
709 *val = vcpu->arch.dr7;
710 break;
711 }
712
713 return 0;
714}
Gleb Natapov338dbc92010-04-28 19:15:32 +0300715
716int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
717{
718 if (_kvm_get_dr(vcpu, dr, val)) {
719 kvm_queue_exception(vcpu, UD_VECTOR);
720 return 1;
721 }
722 return 0;
723}
Gleb Natapov020df072010-04-13 10:05:23 +0300724EXPORT_SYMBOL_GPL(kvm_get_dr);
725
Carsten Otte043405e2007-10-10 17:16:19 +0200726/*
727 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
728 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
729 *
730 * This list is modified at module load time to reflect the
Glauber Costae3267cb2009-10-06 13:24:50 -0400731 * capabilities of the host cpu. This capabilities test skips MSRs that are
732 * kvm-specific. Those are put in the beginning of the list.
Carsten Otte043405e2007-10-10 17:16:19 +0200733 */
Glauber Costae3267cb2009-10-06 13:24:50 -0400734
Glauber Costa11c6bff2010-05-11 12:17:41 -0400735#define KVM_SAVE_MSRS_BEGIN 7
Carsten Otte043405e2007-10-10 17:16:19 +0200736static u32 msrs_to_save[] = {
Glauber Costae3267cb2009-10-06 13:24:50 -0400737 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
Glauber Costa11c6bff2010-05-11 12:17:41 -0400738 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
Gleb Natapov55cd8e52010-01-17 15:51:22 +0200739 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
Gleb Natapov10388a02010-01-17 15:51:23 +0200740 HV_X64_MSR_APIC_ASSIST_PAGE,
Carsten Otte043405e2007-10-10 17:16:19 +0200741 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
742 MSR_K6_STAR,
743#ifdef CONFIG_X86_64
744 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
745#endif
Glauber Costae3267cb2009-10-06 13:24:50 -0400746 MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
Carsten Otte043405e2007-10-10 17:16:19 +0200747};
748
749static unsigned num_msrs_to_save;
750
751static u32 emulated_msrs[] = {
752 MSR_IA32_MISC_ENABLE,
753};
754
Roedel, Joergb69e8ca2010-05-06 11:38:43 +0200755static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
Carsten Otte15c4a642007-10-30 18:44:17 +0100756{
Sheng Yangaad82702010-05-12 16:40:42 +0800757 u64 old_efer = vcpu->arch.efer;
758
Roedel, Joergb69e8ca2010-05-06 11:38:43 +0200759 if (efer & efer_reserved_bits)
760 return 1;
Carsten Otte15c4a642007-10-30 18:44:17 +0100761
762 if (is_paging(vcpu)
Roedel, Joergb69e8ca2010-05-06 11:38:43 +0200763 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
764 return 1;
Carsten Otte15c4a642007-10-30 18:44:17 +0100765
Alexander Graf1b2fd702009-02-02 16:23:51 +0100766 if (efer & EFER_FFXSR) {
767 struct kvm_cpuid_entry2 *feat;
768
769 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
Roedel, Joergb69e8ca2010-05-06 11:38:43 +0200770 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
771 return 1;
Alexander Graf1b2fd702009-02-02 16:23:51 +0100772 }
773
Alexander Grafd8017472008-11-25 20:17:11 +0100774 if (efer & EFER_SVME) {
775 struct kvm_cpuid_entry2 *feat;
776
777 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
Roedel, Joergb69e8ca2010-05-06 11:38:43 +0200778 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
779 return 1;
Alexander Grafd8017472008-11-25 20:17:11 +0100780 }
781
Carsten Otte15c4a642007-10-30 18:44:17 +0100782 efer &= ~EFER_LMA;
Avi Kivityf6801df2010-01-21 15:31:50 +0200783 efer |= vcpu->arch.efer & EFER_LMA;
Carsten Otte15c4a642007-10-30 18:44:17 +0100784
Sheng Yanga3d204e2010-05-12 16:40:40 +0800785 kvm_x86_ops->set_efer(vcpu, efer);
786
Avi Kivity9645bb562009-03-31 11:31:54 +0300787 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
788 kvm_mmu_reset_context(vcpu);
Roedel, Joergb69e8ca2010-05-06 11:38:43 +0200789
Sheng Yangaad82702010-05-12 16:40:42 +0800790 /* Update reserved bits */
791 if ((efer ^ old_efer) & EFER_NX)
792 kvm_mmu_reset_context(vcpu);
793
Roedel, Joergb69e8ca2010-05-06 11:38:43 +0200794 return 0;
Carsten Otte15c4a642007-10-30 18:44:17 +0100795}
796
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100797void kvm_enable_efer_bits(u64 mask)
798{
799 efer_reserved_bits &= ~mask;
800}
801EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
802
803
Carsten Otte15c4a642007-10-30 18:44:17 +0100804/*
805 * Writes msr value into into the appropriate "register".
806 * Returns 0 on success, non-0 otherwise.
807 * Assumes vcpu_load() was already called.
808 */
809int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
810{
811 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
812}
813
Carsten Otte313a3dc2007-10-11 19:16:52 +0200814/*
815 * Adapt set_msr() to msr_io()'s calling convention
816 */
817static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
818{
819 return kvm_set_msr(vcpu, index, *data);
820}
821
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200822static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
823{
Avi Kivity9ed3c442010-05-04 15:00:37 +0300824 int version;
825 int r;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200826 struct pvclock_wall_clock wc;
Jason Wang923de3c2010-01-27 19:13:49 +0800827 struct timespec boot;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200828
829 if (!wall_clock)
830 return;
831
Avi Kivity9ed3c442010-05-04 15:00:37 +0300832 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
833 if (r)
834 return;
835
836 if (version & 1)
837 ++version; /* first time write, random junk */
838
839 ++version;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200840
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200841 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
842
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200843 /*
844 * The guest calculates current wall clock time by adding
845 * system time (updated by kvm_write_guest_time below) to the
846 * wall clock specified here. guest system time equals host
847 * system time for us, thus we must fill in host boot time here.
848 */
Jason Wang923de3c2010-01-27 19:13:49 +0800849 getboottime(&boot);
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200850
851 wc.sec = boot.tv_sec;
852 wc.nsec = boot.tv_nsec;
853 wc.version = version;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200854
855 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
856
857 version++;
858 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200859}
860
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200861static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
862{
863 uint32_t quotient, remainder;
864
865 /* Don't try to replace with do_div(), this one calculates
866 * "(dividend << 32) / divisor" */
867 __asm__ ( "divl %4"
868 : "=a" (quotient), "=d" (remainder)
869 : "0" (0), "1" (dividend), "r" (divisor) );
870 return quotient;
871}
872
873static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
874{
875 uint64_t nsecs = 1000000000LL;
876 int32_t shift = 0;
877 uint64_t tps64;
878 uint32_t tps32;
879
880 tps64 = tsc_khz * 1000LL;
881 while (tps64 > nsecs*2) {
882 tps64 >>= 1;
883 shift--;
884 }
885
886 tps32 = (uint32_t)tps64;
887 while (tps32 <= (uint32_t)nsecs) {
888 tps32 <<= 1;
889 shift++;
890 }
891
892 hv_clock->tsc_shift = shift;
893 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
894
895 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
Harvey Harrison80a914d2008-10-15 22:01:25 -0700896 __func__, tsc_khz, hv_clock->tsc_shift,
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200897 hv_clock->tsc_to_system_mul);
898}
899
Gerd Hoffmannc8076602009-02-04 17:52:04 +0100900static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
901
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200902static void kvm_write_guest_time(struct kvm_vcpu *v)
903{
904 struct timespec ts;
905 unsigned long flags;
906 struct kvm_vcpu_arch *vcpu = &v->arch;
907 void *shared_kaddr;
Avi Kivity463656c2009-04-12 15:49:07 +0300908 unsigned long this_tsc_khz;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200909
910 if ((!vcpu->time_page))
911 return;
912
Avi Kivity463656c2009-04-12 15:49:07 +0300913 this_tsc_khz = get_cpu_var(cpu_tsc_khz);
914 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
915 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
916 vcpu->hv_clock_tsc_khz = this_tsc_khz;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200917 }
Avi Kivity463656c2009-04-12 15:49:07 +0300918 put_cpu_var(cpu_tsc_khz);
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200919
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200920 /* Keep irq disabled to prevent changes to the clock */
921 local_irq_save(flags);
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +0530922 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200923 ktime_get_ts(&ts);
Jason Wang923de3c2010-01-27 19:13:49 +0800924 monotonic_to_bootbased(&ts);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200925 local_irq_restore(flags);
926
927 /* With all the info we got, fill in the values */
928
929 vcpu->hv_clock.system_time = ts.tv_nsec +
Glauber Costaafbcf7a2009-10-16 15:28:36 -0400930 (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
931
Glauber Costa371bcf62010-05-11 12:17:46 -0400932 vcpu->hv_clock.flags = 0;
933
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200934 /*
935 * The interface expects us to write an even number signaling that the
936 * update is finished. Since the guest won't see the intermediate
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200937 * state, we just increase by 2 at the end.
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200938 */
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200939 vcpu->hv_clock.version += 2;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200940
941 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
942
943 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200944 sizeof(vcpu->hv_clock));
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200945
946 kunmap_atomic(shared_kaddr, KM_USER0);
947
948 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
949}
950
Gerd Hoffmannc8076602009-02-04 17:52:04 +0100951static int kvm_request_guest_time_update(struct kvm_vcpu *v)
952{
953 struct kvm_vcpu_arch *vcpu = &v->arch;
954
955 if (!vcpu->time_page)
956 return 0;
957 set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
958 return 1;
959}
960
Avi Kivity9ba075a2008-05-26 20:06:35 +0300961static bool msr_mtrr_valid(unsigned msr)
962{
963 switch (msr) {
964 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
965 case MSR_MTRRfix64K_00000:
966 case MSR_MTRRfix16K_80000:
967 case MSR_MTRRfix16K_A0000:
968 case MSR_MTRRfix4K_C0000:
969 case MSR_MTRRfix4K_C8000:
970 case MSR_MTRRfix4K_D0000:
971 case MSR_MTRRfix4K_D8000:
972 case MSR_MTRRfix4K_E0000:
973 case MSR_MTRRfix4K_E8000:
974 case MSR_MTRRfix4K_F0000:
975 case MSR_MTRRfix4K_F8000:
976 case MSR_MTRRdefType:
977 case MSR_IA32_CR_PAT:
978 return true;
979 case 0x2f8:
980 return true;
981 }
982 return false;
983}
984
Marcelo Tosattid6289b92009-06-22 15:27:56 -0300985static bool valid_pat_type(unsigned t)
986{
987 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
988}
989
990static bool valid_mtrr_type(unsigned t)
991{
992 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
993}
994
995static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
996{
997 int i;
998
999 if (!msr_mtrr_valid(msr))
1000 return false;
1001
1002 if (msr == MSR_IA32_CR_PAT) {
1003 for (i = 0; i < 8; i++)
1004 if (!valid_pat_type((data >> (i * 8)) & 0xff))
1005 return false;
1006 return true;
1007 } else if (msr == MSR_MTRRdefType) {
1008 if (data & ~0xcff)
1009 return false;
1010 return valid_mtrr_type(data & 0xff);
1011 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1012 for (i = 0; i < 8 ; i++)
1013 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1014 return false;
1015 return true;
1016 }
1017
1018 /* variable MTRRs */
1019 return valid_mtrr_type(data & 0xff);
1020}
1021
Avi Kivity9ba075a2008-05-26 20:06:35 +03001022static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1023{
Sheng Yang0bed3b52008-10-09 16:01:54 +08001024 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1025
Marcelo Tosattid6289b92009-06-22 15:27:56 -03001026 if (!mtrr_valid(vcpu, msr, data))
Avi Kivity9ba075a2008-05-26 20:06:35 +03001027 return 1;
1028
Sheng Yang0bed3b52008-10-09 16:01:54 +08001029 if (msr == MSR_MTRRdefType) {
1030 vcpu->arch.mtrr_state.def_type = data;
1031 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1032 } else if (msr == MSR_MTRRfix64K_00000)
1033 p[0] = data;
1034 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1035 p[1 + msr - MSR_MTRRfix16K_80000] = data;
1036 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1037 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1038 else if (msr == MSR_IA32_CR_PAT)
1039 vcpu->arch.pat = data;
1040 else { /* Variable MTRRs */
1041 int idx, is_mtrr_mask;
1042 u64 *pt;
1043
1044 idx = (msr - 0x200) / 2;
1045 is_mtrr_mask = msr - 0x200 - 2 * idx;
1046 if (!is_mtrr_mask)
1047 pt =
1048 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1049 else
1050 pt =
1051 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1052 *pt = data;
1053 }
1054
1055 kvm_mmu_reset_context(vcpu);
Avi Kivity9ba075a2008-05-26 20:06:35 +03001056 return 0;
1057}
Carsten Otte15c4a642007-10-30 18:44:17 +01001058
Huang Ying890ca9a2009-05-11 16:48:15 +08001059static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1060{
1061 u64 mcg_cap = vcpu->arch.mcg_cap;
1062 unsigned bank_num = mcg_cap & 0xff;
1063
1064 switch (msr) {
1065 case MSR_IA32_MCG_STATUS:
1066 vcpu->arch.mcg_status = data;
1067 break;
1068 case MSR_IA32_MCG_CTL:
1069 if (!(mcg_cap & MCG_CTL_P))
1070 return 1;
1071 if (data != 0 && data != ~(u64)0)
1072 return -1;
1073 vcpu->arch.mcg_ctl = data;
1074 break;
1075 default:
1076 if (msr >= MSR_IA32_MC0_CTL &&
1077 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1078 u32 offset = msr - MSR_IA32_MC0_CTL;
Andre Przywara114be422010-03-24 17:46:42 +01001079 /* only 0 or all 1s can be written to IA32_MCi_CTL
1080 * some Linux kernels though clear bit 10 in bank 4 to
1081 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1082 * this to avoid an uncatched #GP in the guest
1083 */
Huang Ying890ca9a2009-05-11 16:48:15 +08001084 if ((offset & 0x3) == 0 &&
Andre Przywara114be422010-03-24 17:46:42 +01001085 data != 0 && (data | (1 << 10)) != ~(u64)0)
Huang Ying890ca9a2009-05-11 16:48:15 +08001086 return -1;
1087 vcpu->arch.mce_banks[offset] = data;
1088 break;
1089 }
1090 return 1;
1091 }
1092 return 0;
1093}
1094
Ed Swierkffde22a2009-10-15 15:21:43 -07001095static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1096{
1097 struct kvm *kvm = vcpu->kvm;
1098 int lm = is_long_mode(vcpu);
1099 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1100 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1101 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1102 : kvm->arch.xen_hvm_config.blob_size_32;
1103 u32 page_num = data & ~PAGE_MASK;
1104 u64 page_addr = data & PAGE_MASK;
1105 u8 *page;
1106 int r;
1107
1108 r = -E2BIG;
1109 if (page_num >= blob_size)
1110 goto out;
1111 r = -ENOMEM;
1112 page = kzalloc(PAGE_SIZE, GFP_KERNEL);
1113 if (!page)
1114 goto out;
1115 r = -EFAULT;
1116 if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
1117 goto out_free;
1118 if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1119 goto out_free;
1120 r = 0;
1121out_free:
1122 kfree(page);
1123out:
1124 return r;
1125}
1126
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001127static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1128{
1129 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1130}
1131
1132static bool kvm_hv_msr_partition_wide(u32 msr)
1133{
1134 bool r = false;
1135 switch (msr) {
1136 case HV_X64_MSR_GUEST_OS_ID:
1137 case HV_X64_MSR_HYPERCALL:
1138 r = true;
1139 break;
1140 }
1141
1142 return r;
1143}
1144
1145static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1146{
1147 struct kvm *kvm = vcpu->kvm;
1148
1149 switch (msr) {
1150 case HV_X64_MSR_GUEST_OS_ID:
1151 kvm->arch.hv_guest_os_id = data;
1152 /* setting guest os id to zero disables hypercall page */
1153 if (!kvm->arch.hv_guest_os_id)
1154 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1155 break;
1156 case HV_X64_MSR_HYPERCALL: {
1157 u64 gfn;
1158 unsigned long addr;
1159 u8 instructions[4];
1160
1161 /* if guest os id is not set hypercall should remain disabled */
1162 if (!kvm->arch.hv_guest_os_id)
1163 break;
1164 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1165 kvm->arch.hv_hypercall = data;
1166 break;
1167 }
1168 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1169 addr = gfn_to_hva(kvm, gfn);
1170 if (kvm_is_error_hva(addr))
1171 return 1;
1172 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1173 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1174 if (copy_to_user((void __user *)addr, instructions, 4))
1175 return 1;
1176 kvm->arch.hv_hypercall = data;
1177 break;
1178 }
1179 default:
1180 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1181 "data 0x%llx\n", msr, data);
1182 return 1;
1183 }
1184 return 0;
1185}
1186
1187static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1188{
Gleb Natapov10388a02010-01-17 15:51:23 +02001189 switch (msr) {
1190 case HV_X64_MSR_APIC_ASSIST_PAGE: {
1191 unsigned long addr;
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001192
Gleb Natapov10388a02010-01-17 15:51:23 +02001193 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1194 vcpu->arch.hv_vapic = data;
1195 break;
1196 }
1197 addr = gfn_to_hva(vcpu->kvm, data >>
1198 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1199 if (kvm_is_error_hva(addr))
1200 return 1;
1201 if (clear_user((void __user *)addr, PAGE_SIZE))
1202 return 1;
1203 vcpu->arch.hv_vapic = data;
1204 break;
1205 }
1206 case HV_X64_MSR_EOI:
1207 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1208 case HV_X64_MSR_ICR:
1209 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1210 case HV_X64_MSR_TPR:
1211 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1212 default:
1213 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1214 "data 0x%llx\n", msr, data);
1215 return 1;
1216 }
1217
1218 return 0;
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001219}
1220
Carsten Otte15c4a642007-10-30 18:44:17 +01001221int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1222{
1223 switch (msr) {
Carsten Otte15c4a642007-10-30 18:44:17 +01001224 case MSR_EFER:
Roedel, Joergb69e8ca2010-05-06 11:38:43 +02001225 return set_efer(vcpu, data);
Andre Przywara8f1589d2009-06-24 12:44:33 +02001226 case MSR_K7_HWCR:
1227 data &= ~(u64)0x40; /* ignore flush filter disable */
Joerg Roedel82494022010-02-24 18:59:16 +01001228 data &= ~(u64)0x100; /* ignore ignne emulation enable */
Andre Przywara8f1589d2009-06-24 12:44:33 +02001229 if (data != 0) {
1230 pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1231 data);
1232 return 1;
1233 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001234 break;
Andre Przywaraf7c6d142009-07-02 15:04:14 +02001235 case MSR_FAM10H_MMIO_CONF_BASE:
1236 if (data != 0) {
1237 pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1238 "0x%llx\n", data);
1239 return 1;
1240 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001241 break;
Andre Przywarac323c0e2009-06-24 15:37:05 +02001242 case MSR_AMD64_NB_CFG:
Joerg Roedelc7ac6792008-02-11 20:28:27 +01001243 break;
Alexander Grafb5e2fec2008-07-22 08:00:45 +02001244 case MSR_IA32_DEBUGCTLMSR:
1245 if (!data) {
1246 /* We support the non-activated case already */
1247 break;
1248 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1249 /* Values other than LBR and BTF are vendor-specific,
1250 thus reserved and should throw a #GP */
1251 return 1;
1252 }
1253 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1254 __func__, data);
1255 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001256 case MSR_IA32_UCODE_REV:
1257 case MSR_IA32_UCODE_WRITE:
Avi Kivity61a6bd62008-12-29 17:32:28 +02001258 case MSR_VM_HSAVE_PA:
Andre Przywara6098ca92009-07-03 16:00:14 +02001259 case MSR_AMD64_PATCH_LOADER:
Carsten Otte15c4a642007-10-30 18:44:17 +01001260 break;
Avi Kivity9ba075a2008-05-26 20:06:35 +03001261 case 0x200 ... 0x2ff:
1262 return set_msr_mtrr(vcpu, msr, data);
Carsten Otte15c4a642007-10-30 18:44:17 +01001263 case MSR_IA32_APICBASE:
1264 kvm_set_apic_base(vcpu, data);
1265 break;
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001266 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1267 return kvm_x2apic_msr_write(vcpu, msr, data);
Carsten Otte15c4a642007-10-30 18:44:17 +01001268 case MSR_IA32_MISC_ENABLE:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001269 vcpu->arch.ia32_misc_enable_msr = data;
Carsten Otte15c4a642007-10-30 18:44:17 +01001270 break;
Glauber Costa11c6bff2010-05-11 12:17:41 -04001271 case MSR_KVM_WALL_CLOCK_NEW:
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001272 case MSR_KVM_WALL_CLOCK:
1273 vcpu->kvm->arch.wall_clock = data;
1274 kvm_write_wall_clock(vcpu->kvm, data);
1275 break;
Glauber Costa11c6bff2010-05-11 12:17:41 -04001276 case MSR_KVM_SYSTEM_TIME_NEW:
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001277 case MSR_KVM_SYSTEM_TIME: {
1278 if (vcpu->arch.time_page) {
1279 kvm_release_page_dirty(vcpu->arch.time_page);
1280 vcpu->arch.time_page = NULL;
1281 }
1282
1283 vcpu->arch.time = data;
1284
1285 /* we verify if the enable bit is set... */
1286 if (!(data & 1))
1287 break;
1288
1289 /* ...but clean it before doing the actual write */
1290 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1291
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001292 vcpu->arch.time_page =
1293 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001294
1295 if (is_error_page(vcpu->arch.time_page)) {
1296 kvm_release_page_clean(vcpu->arch.time_page);
1297 vcpu->arch.time_page = NULL;
1298 }
1299
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001300 kvm_request_guest_time_update(vcpu);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001301 break;
1302 }
Huang Ying890ca9a2009-05-11 16:48:15 +08001303 case MSR_IA32_MCG_CTL:
1304 case MSR_IA32_MCG_STATUS:
1305 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1306 return set_msr_mce(vcpu, msr, data);
Andre Przywara71db6022009-06-12 22:01:29 +02001307
1308 /* Performance counters are not protected by a CPUID bit,
1309 * so we should check all of them in the generic path for the sake of
1310 * cross vendor migration.
1311 * Writing a zero into the event select MSRs disables them,
1312 * which we perfectly emulate ;-). Any other value should be at least
1313 * reported, some guests depend on them.
1314 */
1315 case MSR_P6_EVNTSEL0:
1316 case MSR_P6_EVNTSEL1:
1317 case MSR_K7_EVNTSEL0:
1318 case MSR_K7_EVNTSEL1:
1319 case MSR_K7_EVNTSEL2:
1320 case MSR_K7_EVNTSEL3:
1321 if (data != 0)
1322 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1323 "0x%x data 0x%llx\n", msr, data);
1324 break;
1325 /* at least RHEL 4 unconditionally writes to the perfctr registers,
1326 * so we ignore writes to make it happy.
1327 */
1328 case MSR_P6_PERFCTR0:
1329 case MSR_P6_PERFCTR1:
1330 case MSR_K7_PERFCTR0:
1331 case MSR_K7_PERFCTR1:
1332 case MSR_K7_PERFCTR2:
1333 case MSR_K7_PERFCTR3:
1334 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1335 "0x%x data 0x%llx\n", msr, data);
1336 break;
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001337 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1338 if (kvm_hv_msr_partition_wide(msr)) {
1339 int r;
1340 mutex_lock(&vcpu->kvm->lock);
1341 r = set_msr_hyperv_pw(vcpu, msr, data);
1342 mutex_unlock(&vcpu->kvm->lock);
1343 return r;
1344 } else
1345 return set_msr_hyperv(vcpu, msr, data);
1346 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001347 default:
Ed Swierkffde22a2009-10-15 15:21:43 -07001348 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1349 return xen_hvm_config(vcpu, data);
Andre Przywaraed85c062009-06-25 12:36:49 +02001350 if (!ignore_msrs) {
1351 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1352 msr, data);
1353 return 1;
1354 } else {
1355 pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1356 msr, data);
1357 break;
1358 }
Carsten Otte15c4a642007-10-30 18:44:17 +01001359 }
1360 return 0;
1361}
1362EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1363
1364
1365/*
1366 * Reads an msr value (of 'msr_index') into 'pdata'.
1367 * Returns 0 on success, non-0 otherwise.
1368 * Assumes vcpu_load() was already called.
1369 */
1370int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1371{
1372 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1373}
1374
Avi Kivity9ba075a2008-05-26 20:06:35 +03001375static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1376{
Sheng Yang0bed3b52008-10-09 16:01:54 +08001377 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1378
Avi Kivity9ba075a2008-05-26 20:06:35 +03001379 if (!msr_mtrr_valid(msr))
1380 return 1;
1381
Sheng Yang0bed3b52008-10-09 16:01:54 +08001382 if (msr == MSR_MTRRdefType)
1383 *pdata = vcpu->arch.mtrr_state.def_type +
1384 (vcpu->arch.mtrr_state.enabled << 10);
1385 else if (msr == MSR_MTRRfix64K_00000)
1386 *pdata = p[0];
1387 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1388 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1389 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1390 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1391 else if (msr == MSR_IA32_CR_PAT)
1392 *pdata = vcpu->arch.pat;
1393 else { /* Variable MTRRs */
1394 int idx, is_mtrr_mask;
1395 u64 *pt;
1396
1397 idx = (msr - 0x200) / 2;
1398 is_mtrr_mask = msr - 0x200 - 2 * idx;
1399 if (!is_mtrr_mask)
1400 pt =
1401 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1402 else
1403 pt =
1404 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1405 *pdata = *pt;
1406 }
1407
Avi Kivity9ba075a2008-05-26 20:06:35 +03001408 return 0;
1409}
1410
Huang Ying890ca9a2009-05-11 16:48:15 +08001411static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1412{
1413 u64 data;
1414 u64 mcg_cap = vcpu->arch.mcg_cap;
1415 unsigned bank_num = mcg_cap & 0xff;
1416
1417 switch (msr) {
1418 case MSR_IA32_P5_MC_ADDR:
1419 case MSR_IA32_P5_MC_TYPE:
1420 data = 0;
1421 break;
1422 case MSR_IA32_MCG_CAP:
1423 data = vcpu->arch.mcg_cap;
1424 break;
1425 case MSR_IA32_MCG_CTL:
1426 if (!(mcg_cap & MCG_CTL_P))
1427 return 1;
1428 data = vcpu->arch.mcg_ctl;
1429 break;
1430 case MSR_IA32_MCG_STATUS:
1431 data = vcpu->arch.mcg_status;
1432 break;
1433 default:
1434 if (msr >= MSR_IA32_MC0_CTL &&
1435 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1436 u32 offset = msr - MSR_IA32_MC0_CTL;
1437 data = vcpu->arch.mce_banks[offset];
1438 break;
1439 }
1440 return 1;
1441 }
1442 *pdata = data;
1443 return 0;
1444}
1445
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001446static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1447{
1448 u64 data = 0;
1449 struct kvm *kvm = vcpu->kvm;
1450
1451 switch (msr) {
1452 case HV_X64_MSR_GUEST_OS_ID:
1453 data = kvm->arch.hv_guest_os_id;
1454 break;
1455 case HV_X64_MSR_HYPERCALL:
1456 data = kvm->arch.hv_hypercall;
1457 break;
1458 default:
1459 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1460 return 1;
1461 }
1462
1463 *pdata = data;
1464 return 0;
1465}
1466
1467static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1468{
1469 u64 data = 0;
1470
1471 switch (msr) {
1472 case HV_X64_MSR_VP_INDEX: {
1473 int r;
1474 struct kvm_vcpu *v;
1475 kvm_for_each_vcpu(r, v, vcpu->kvm)
1476 if (v == vcpu)
1477 data = r;
1478 break;
1479 }
Gleb Natapov10388a02010-01-17 15:51:23 +02001480 case HV_X64_MSR_EOI:
1481 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1482 case HV_X64_MSR_ICR:
1483 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1484 case HV_X64_MSR_TPR:
1485 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001486 default:
1487 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1488 return 1;
1489 }
1490 *pdata = data;
1491 return 0;
1492}
1493
Carsten Otte15c4a642007-10-30 18:44:17 +01001494int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1495{
1496 u64 data;
1497
1498 switch (msr) {
Carsten Otte15c4a642007-10-30 18:44:17 +01001499 case MSR_IA32_PLATFORM_ID:
Carsten Otte15c4a642007-10-30 18:44:17 +01001500 case MSR_IA32_UCODE_REV:
Carsten Otte15c4a642007-10-30 18:44:17 +01001501 case MSR_IA32_EBL_CR_POWERON:
Alexander Grafb5e2fec2008-07-22 08:00:45 +02001502 case MSR_IA32_DEBUGCTLMSR:
1503 case MSR_IA32_LASTBRANCHFROMIP:
1504 case MSR_IA32_LASTBRANCHTOIP:
1505 case MSR_IA32_LASTINTFROMIP:
1506 case MSR_IA32_LASTINTTOIP:
Jaswinder Singh Rajput60af2ec2009-05-14 11:00:10 +05301507 case MSR_K8_SYSCFG:
1508 case MSR_K7_HWCR:
Avi Kivity61a6bd62008-12-29 17:32:28 +02001509 case MSR_VM_HSAVE_PA:
Amit Shah1f3ee612009-06-30 16:24:28 +05301510 case MSR_P6_PERFCTR0:
1511 case MSR_P6_PERFCTR1:
Amit Shah7fe29e02009-03-20 12:39:00 +05301512 case MSR_P6_EVNTSEL0:
1513 case MSR_P6_EVNTSEL1:
Amit Shah9e699622009-06-15 13:25:34 +05301514 case MSR_K7_EVNTSEL0:
Amit Shah1f3ee612009-06-30 16:24:28 +05301515 case MSR_K7_PERFCTR0:
Andre Przywara1fdbd482009-06-24 12:44:34 +02001516 case MSR_K8_INT_PENDING_MSG:
Andre Przywarac323c0e2009-06-24 15:37:05 +02001517 case MSR_AMD64_NB_CFG:
Andre Przywaraf7c6d142009-07-02 15:04:14 +02001518 case MSR_FAM10H_MMIO_CONF_BASE:
Carsten Otte15c4a642007-10-30 18:44:17 +01001519 data = 0;
1520 break;
Avi Kivity9ba075a2008-05-26 20:06:35 +03001521 case MSR_MTRRcap:
1522 data = 0x500 | KVM_NR_VAR_MTRR;
1523 break;
1524 case 0x200 ... 0x2ff:
1525 return get_msr_mtrr(vcpu, msr, pdata);
Carsten Otte15c4a642007-10-30 18:44:17 +01001526 case 0xcd: /* fsb frequency */
1527 data = 3;
1528 break;
1529 case MSR_IA32_APICBASE:
1530 data = kvm_get_apic_base(vcpu);
1531 break;
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001532 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1533 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1534 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001535 case MSR_IA32_MISC_ENABLE:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001536 data = vcpu->arch.ia32_misc_enable_msr;
Carsten Otte15c4a642007-10-30 18:44:17 +01001537 break;
Alexander Graf847f0ad2008-02-21 12:11:01 +01001538 case MSR_IA32_PERF_STATUS:
1539 /* TSC increment by tick */
1540 data = 1000ULL;
1541 /* CPU multiplier */
1542 data |= (((uint64_t)4ULL) << 40);
1543 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001544 case MSR_EFER:
Avi Kivityf6801df2010-01-21 15:31:50 +02001545 data = vcpu->arch.efer;
Carsten Otte15c4a642007-10-30 18:44:17 +01001546 break;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001547 case MSR_KVM_WALL_CLOCK:
Glauber Costa11c6bff2010-05-11 12:17:41 -04001548 case MSR_KVM_WALL_CLOCK_NEW:
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001549 data = vcpu->kvm->arch.wall_clock;
1550 break;
1551 case MSR_KVM_SYSTEM_TIME:
Glauber Costa11c6bff2010-05-11 12:17:41 -04001552 case MSR_KVM_SYSTEM_TIME_NEW:
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -02001553 data = vcpu->arch.time;
1554 break;
Huang Ying890ca9a2009-05-11 16:48:15 +08001555 case MSR_IA32_P5_MC_ADDR:
1556 case MSR_IA32_P5_MC_TYPE:
1557 case MSR_IA32_MCG_CAP:
1558 case MSR_IA32_MCG_CTL:
1559 case MSR_IA32_MCG_STATUS:
1560 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1561 return get_msr_mce(vcpu, msr, pdata);
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001562 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1563 if (kvm_hv_msr_partition_wide(msr)) {
1564 int r;
1565 mutex_lock(&vcpu->kvm->lock);
1566 r = get_msr_hyperv_pw(vcpu, msr, pdata);
1567 mutex_unlock(&vcpu->kvm->lock);
1568 return r;
1569 } else
1570 return get_msr_hyperv(vcpu, msr, pdata);
1571 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001572 default:
Andre Przywaraed85c062009-06-25 12:36:49 +02001573 if (!ignore_msrs) {
1574 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1575 return 1;
1576 } else {
1577 pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1578 data = 0;
1579 }
1580 break;
Carsten Otte15c4a642007-10-30 18:44:17 +01001581 }
1582 *pdata = data;
1583 return 0;
1584}
1585EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1586
Carsten Otte313a3dc2007-10-11 19:16:52 +02001587/*
1588 * Read or write a bunch of msrs. All parameters are kernel addresses.
1589 *
1590 * @return number of msrs set successfully.
1591 */
1592static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1593 struct kvm_msr_entry *entries,
1594 int (*do_msr)(struct kvm_vcpu *vcpu,
1595 unsigned index, u64 *data))
1596{
Marcelo Tosattif656ce02009-12-23 14:35:25 -02001597 int i, idx;
Carsten Otte313a3dc2007-10-11 19:16:52 +02001598
Marcelo Tosattif656ce02009-12-23 14:35:25 -02001599 idx = srcu_read_lock(&vcpu->kvm->srcu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001600 for (i = 0; i < msrs->nmsrs; ++i)
1601 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1602 break;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02001603 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001604
Carsten Otte313a3dc2007-10-11 19:16:52 +02001605 return i;
1606}
1607
1608/*
1609 * Read or write a bunch of msrs. Parameters are user addresses.
1610 *
1611 * @return number of msrs set successfully.
1612 */
1613static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1614 int (*do_msr)(struct kvm_vcpu *vcpu,
1615 unsigned index, u64 *data),
1616 int writeback)
1617{
1618 struct kvm_msrs msrs;
1619 struct kvm_msr_entry *entries;
1620 int r, n;
1621 unsigned size;
1622
1623 r = -EFAULT;
1624 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1625 goto out;
1626
1627 r = -E2BIG;
1628 if (msrs.nmsrs >= MAX_IO_MSRS)
1629 goto out;
1630
1631 r = -ENOMEM;
1632 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
Avi Kivity7a73c022010-07-22 23:24:52 +03001633 entries = kmalloc(size, GFP_KERNEL);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001634 if (!entries)
1635 goto out;
1636
1637 r = -EFAULT;
1638 if (copy_from_user(entries, user_msrs->entries, size))
1639 goto out_free;
1640
1641 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1642 if (r < 0)
1643 goto out_free;
1644
1645 r = -EFAULT;
1646 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1647 goto out_free;
1648
1649 r = n;
1650
1651out_free:
Avi Kivity7a73c022010-07-22 23:24:52 +03001652 kfree(entries);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001653out:
1654 return r;
1655}
1656
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001657int kvm_dev_ioctl_check_extension(long ext)
1658{
1659 int r;
1660
1661 switch (ext) {
1662 case KVM_CAP_IRQCHIP:
1663 case KVM_CAP_HLT:
1664 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001665 case KVM_CAP_SET_TSS_ADDR:
Dan Kenigsberg07716712007-11-21 17:10:04 +02001666 case KVM_CAP_EXT_CPUID:
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001667 case KVM_CAP_CLOCKSOURCE:
Sheng Yang78376992008-01-28 05:10:22 +08001668 case KVM_CAP_PIT:
Marcelo Tosattia28e4f52008-02-22 12:21:36 -05001669 case KVM_CAP_NOP_IO_DELAY:
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001670 case KVM_CAP_MP_STATE:
Avi Kivityed848622008-07-29 11:30:57 +03001671 case KVM_CAP_SYNC_MMU:
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02001672 case KVM_CAP_REINJECT_CONTROL:
Gleb Natapov49256632009-02-04 17:28:14 +02001673 case KVM_CAP_IRQ_INJECT_STATUS:
Sheng Yange56d5322009-03-12 21:45:39 +08001674 case KVM_CAP_ASSIGN_DEV_IRQ:
Gregory Haskins721eecbf2009-05-20 10:30:49 -04001675 case KVM_CAP_IRQFD:
Gregory Haskinsd34e6b12009-07-07 17:08:49 -04001676 case KVM_CAP_IOEVENTFD:
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02001677 case KVM_CAP_PIT2:
Beth Kone9f42752009-07-07 11:50:38 -04001678 case KVM_CAP_PIT_STATE2:
Sheng Yangb927a3c2009-07-21 10:42:48 +08001679 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
Ed Swierkffde22a2009-10-15 15:21:43 -07001680 case KVM_CAP_XEN_HVM:
Glauber Costaafbcf7a2009-10-16 15:28:36 -04001681 case KVM_CAP_ADJUST_CLOCK:
Jan Kiszka3cfc3092009-11-12 01:04:25 +01001682 case KVM_CAP_VCPU_EVENTS:
Gleb Natapov55cd8e52010-01-17 15:51:22 +02001683 case KVM_CAP_HYPERV:
Gleb Natapov10388a02010-01-17 15:51:23 +02001684 case KVM_CAP_HYPERV_VAPIC:
Gleb Natapovc25bc162010-01-17 15:51:24 +02001685 case KVM_CAP_HYPERV_SPIN:
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +08001686 case KVM_CAP_PCI_SEGMENT:
Jan Kiszkaa1efbe72010-02-15 10:45:43 +01001687 case KVM_CAP_DEBUGREGS:
Jan Kiszkad2be1652010-02-23 17:47:57 +01001688 case KVM_CAP_X86_ROBUST_SINGLESTEP:
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001689 r = 1;
1690 break;
Laurent Vivier542472b2008-05-30 16:05:55 +02001691 case KVM_CAP_COALESCED_MMIO:
1692 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1693 break;
Avi Kivity774ead32007-12-26 13:57:04 +02001694 case KVM_CAP_VAPIC:
1695 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1696 break;
Avi Kivityf7252302008-02-20 11:53:16 +02001697 case KVM_CAP_NR_VCPUS:
1698 r = KVM_MAX_VCPUS;
1699 break;
Avi Kivitya988b912008-02-20 11:59:20 +02001700 case KVM_CAP_NR_MEMSLOTS:
1701 r = KVM_MEMORY_SLOTS;
1702 break;
Marcelo Tosattia68a6a72009-10-01 19:28:39 -03001703 case KVM_CAP_PV_MMU: /* obsolete */
1704 r = 0;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05001705 break;
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001706 case KVM_CAP_IOMMU:
Joerg Roedel19de40a2008-12-03 14:43:34 +01001707 r = iommu_found();
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001708 break;
Huang Ying890ca9a2009-05-11 16:48:15 +08001709 case KVM_CAP_MCE:
1710 r = KVM_MAX_MCE_BANKS;
1711 break;
Zhang Xiantao018d00d2007-11-15 23:07:47 +08001712 default:
1713 r = 0;
1714 break;
1715 }
1716 return r;
1717
1718}
1719
Carsten Otte043405e2007-10-10 17:16:19 +02001720long kvm_arch_dev_ioctl(struct file *filp,
1721 unsigned int ioctl, unsigned long arg)
1722{
1723 void __user *argp = (void __user *)arg;
1724 long r;
1725
1726 switch (ioctl) {
1727 case KVM_GET_MSR_INDEX_LIST: {
1728 struct kvm_msr_list __user *user_msr_list = argp;
1729 struct kvm_msr_list msr_list;
1730 unsigned n;
1731
1732 r = -EFAULT;
1733 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1734 goto out;
1735 n = msr_list.nmsrs;
1736 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1737 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1738 goto out;
1739 r = -E2BIG;
Jan Kiszkae125e7b2009-07-02 21:45:47 +02001740 if (n < msr_list.nmsrs)
Carsten Otte043405e2007-10-10 17:16:19 +02001741 goto out;
1742 r = -EFAULT;
1743 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1744 num_msrs_to_save * sizeof(u32)))
1745 goto out;
Jan Kiszkae125e7b2009-07-02 21:45:47 +02001746 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
Carsten Otte043405e2007-10-10 17:16:19 +02001747 &emulated_msrs,
1748 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1749 goto out;
1750 r = 0;
1751 break;
1752 }
Avi Kivity674eea02008-02-11 18:37:23 +02001753 case KVM_GET_SUPPORTED_CPUID: {
1754 struct kvm_cpuid2 __user *cpuid_arg = argp;
1755 struct kvm_cpuid2 cpuid;
1756
1757 r = -EFAULT;
1758 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1759 goto out;
1760 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
Amit Shah19355472009-01-14 16:56:00 +00001761 cpuid_arg->entries);
Avi Kivity674eea02008-02-11 18:37:23 +02001762 if (r)
1763 goto out;
1764
1765 r = -EFAULT;
1766 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1767 goto out;
1768 r = 0;
1769 break;
1770 }
Huang Ying890ca9a2009-05-11 16:48:15 +08001771 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1772 u64 mce_cap;
1773
1774 mce_cap = KVM_MCE_CAP_SUPPORTED;
1775 r = -EFAULT;
1776 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1777 goto out;
1778 r = 0;
1779 break;
1780 }
Carsten Otte043405e2007-10-10 17:16:19 +02001781 default:
1782 r = -EINVAL;
1783 }
1784out:
1785 return r;
1786}
1787
Carsten Otte313a3dc2007-10-11 19:16:52 +02001788void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1789{
1790 kvm_x86_ops->vcpu_load(vcpu, cpu);
Zachary Amsden6b7d7e72009-10-09 16:26:08 -10001791 if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
1792 unsigned long khz = cpufreq_quick_get(cpu);
1793 if (!khz)
1794 khz = tsc_khz;
1795 per_cpu(cpu_tsc_khz, cpu) = khz;
1796 }
Gerd Hoffmannc8076602009-02-04 17:52:04 +01001797 kvm_request_guest_time_update(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001798}
1799
1800void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1801{
Avi Kivity02daab22009-12-30 12:40:26 +02001802 kvm_x86_ops->vcpu_put(vcpu);
Avi Kivity1c11e712010-05-03 16:05:44 +03001803 kvm_put_guest_fpu(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001804}
1805
Dan Kenigsberg07716712007-11-21 17:10:04 +02001806static int is_efer_nx(void)
Carsten Otte313a3dc2007-10-11 19:16:52 +02001807{
Avi Kivitye286e862009-05-03 18:50:55 +03001808 unsigned long long efer = 0;
Carsten Otte313a3dc2007-10-11 19:16:52 +02001809
Avi Kivitye286e862009-05-03 18:50:55 +03001810 rdmsrl_safe(MSR_EFER, &efer);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001811 return efer & EFER_NX;
1812}
1813
1814static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1815{
1816 int i;
1817 struct kvm_cpuid_entry2 *e, *entry;
1818
Carsten Otte313a3dc2007-10-11 19:16:52 +02001819 entry = NULL;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001820 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1821 e = &vcpu->arch.cpuid_entries[i];
Carsten Otte313a3dc2007-10-11 19:16:52 +02001822 if (e->function == 0x80000001) {
1823 entry = e;
1824 break;
1825 }
1826 }
Dan Kenigsberg07716712007-11-21 17:10:04 +02001827 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
Carsten Otte313a3dc2007-10-11 19:16:52 +02001828 entry->edx &= ~(1 << 20);
1829 printk(KERN_INFO "kvm: guest NX capability removed\n");
1830 }
1831}
1832
Dan Kenigsberg07716712007-11-21 17:10:04 +02001833/* when an old userspace process fills a new kernel module */
Carsten Otte313a3dc2007-10-11 19:16:52 +02001834static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1835 struct kvm_cpuid *cpuid,
1836 struct kvm_cpuid_entry __user *entries)
1837{
Dan Kenigsberg07716712007-11-21 17:10:04 +02001838 int r, i;
1839 struct kvm_cpuid_entry *cpuid_entries;
1840
1841 r = -E2BIG;
1842 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1843 goto out;
1844 r = -ENOMEM;
1845 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1846 if (!cpuid_entries)
1847 goto out;
1848 r = -EFAULT;
1849 if (copy_from_user(cpuid_entries, entries,
1850 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1851 goto out_free;
1852 for (i = 0; i < cpuid->nent; i++) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001853 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1854 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1855 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1856 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1857 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1858 vcpu->arch.cpuid_entries[i].index = 0;
1859 vcpu->arch.cpuid_entries[i].flags = 0;
1860 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1861 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1862 vcpu->arch.cpuid_entries[i].padding[2] = 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001863 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001864 vcpu->arch.cpuid_nent = cpuid->nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001865 cpuid_fix_nx_cap(vcpu);
1866 r = 0;
Gleb Natapovfc61b802009-07-05 17:39:35 +03001867 kvm_apic_set_version(vcpu);
Sheng Yang0e851882009-12-18 16:48:46 +08001868 kvm_x86_ops->cpuid_update(vcpu);
Dexuan Cui2acf9232010-06-10 11:27:12 +08001869 update_cpuid(vcpu);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001870
1871out_free:
1872 vfree(cpuid_entries);
1873out:
1874 return r;
1875}
1876
1877static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
Amit Shah19355472009-01-14 16:56:00 +00001878 struct kvm_cpuid2 *cpuid,
1879 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001880{
Carsten Otte313a3dc2007-10-11 19:16:52 +02001881 int r;
1882
1883 r = -E2BIG;
1884 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1885 goto out;
1886 r = -EFAULT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001887 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
Dan Kenigsberg07716712007-11-21 17:10:04 +02001888 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
Carsten Otte313a3dc2007-10-11 19:16:52 +02001889 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001890 vcpu->arch.cpuid_nent = cpuid->nent;
Gleb Natapovfc61b802009-07-05 17:39:35 +03001891 kvm_apic_set_version(vcpu);
Sheng Yang0e851882009-12-18 16:48:46 +08001892 kvm_x86_ops->cpuid_update(vcpu);
Dexuan Cui2acf9232010-06-10 11:27:12 +08001893 update_cpuid(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02001894 return 0;
1895
1896out:
1897 return r;
1898}
1899
Dan Kenigsberg07716712007-11-21 17:10:04 +02001900static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
Amit Shah19355472009-01-14 16:56:00 +00001901 struct kvm_cpuid2 *cpuid,
1902 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001903{
1904 int r;
1905
1906 r = -E2BIG;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001907 if (cpuid->nent < vcpu->arch.cpuid_nent)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001908 goto out;
1909 r = -EFAULT;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001910 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
Amit Shah19355472009-01-14 16:56:00 +00001911 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
Dan Kenigsberg07716712007-11-21 17:10:04 +02001912 goto out;
1913 return 0;
1914
1915out:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001916 cpuid->nent = vcpu->arch.cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001917 return r;
1918}
1919
Dan Kenigsberg07716712007-11-21 17:10:04 +02001920static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
Amit Shah19355472009-01-14 16:56:00 +00001921 u32 index)
Dan Kenigsberg07716712007-11-21 17:10:04 +02001922{
1923 entry->function = function;
1924 entry->index = index;
1925 cpuid_count(entry->function, entry->index,
Amit Shah19355472009-01-14 16:56:00 +00001926 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001927 entry->flags = 0;
1928}
1929
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001930#define F(x) bit(X86_FEATURE_##x)
1931
Dan Kenigsberg07716712007-11-21 17:10:04 +02001932static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1933 u32 index, int *nent, int maxnent)
1934{
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001935 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001936#ifdef CONFIG_X86_64
Sheng Yang17cc3932010-01-05 19:02:27 +08001937 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
1938 ? F(GBPAGES) : 0;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001939 unsigned f_lm = F(LM);
1940#else
Sheng Yang17cc3932010-01-05 19:02:27 +08001941 unsigned f_gbpages = 0;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001942 unsigned f_lm = 0;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001943#endif
Sheng Yang4e47c7a2009-12-18 16:48:47 +08001944 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001945
1946 /* cpuid 1.edx */
1947 const u32 kvm_supported_word0_x86_features =
1948 F(FPU) | F(VME) | F(DE) | F(PSE) |
1949 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1950 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1951 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1952 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1953 0 /* Reserved, DS, ACPI */ | F(MMX) |
1954 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1955 0 /* HTT, TM, Reserved, PBE */;
1956 /* cpuid 0x80000001.edx */
1957 const u32 kvm_supported_word1_x86_features =
1958 F(FPU) | F(VME) | F(DE) | F(PSE) |
1959 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1960 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1961 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1962 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1963 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
Sheng Yang4e47c7a2009-12-18 16:48:47 +08001964 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001965 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1966 /* cpuid 1.ecx */
1967 const u32 kvm_supported_word4_x86_features =
Avi Kivityd149c732009-05-10 14:41:56 +03001968 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1969 0 /* DS-CPL, VMX, SMX, EST */ |
1970 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1971 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1972 0 /* Reserved, DCA */ | F(XMM4_1) |
Gleb Natapov0105d1a2009-07-05 17:39:36 +03001973 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
Dexuan Cui2acf9232010-06-10 11:27:12 +08001974 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001975 /* cpuid 0x80000001.ecx */
Dan Kenigsberg07716712007-11-21 17:10:04 +02001976 const u32 kvm_supported_word6_x86_features =
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001977 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1978 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1979 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1980 0 /* SKINIT */ | 0 /* WDT */;
Dan Kenigsberg07716712007-11-21 17:10:04 +02001981
Amit Shah19355472009-01-14 16:56:00 +00001982 /* all calls to cpuid_count() should be made on the same cpu */
Dan Kenigsberg07716712007-11-21 17:10:04 +02001983 get_cpu();
1984 do_cpuid_1_ent(entry, function, index);
1985 ++*nent;
1986
1987 switch (function) {
1988 case 0:
Dexuan Cui2acf9232010-06-10 11:27:12 +08001989 entry->eax = min(entry->eax, (u32)0xd);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001990 break;
1991 case 1:
1992 entry->edx &= kvm_supported_word0_x86_features;
Avi Kivity7faa4ee2009-05-10 13:55:35 +03001993 entry->ecx &= kvm_supported_word4_x86_features;
Gleb Natapov0d1de2d92009-07-12 16:10:55 +03001994 /* we support x2apic emulation even if host does not support
1995 * it since we emulate x2apic in software */
1996 entry->ecx |= F(X2APIC);
Dan Kenigsberg07716712007-11-21 17:10:04 +02001997 break;
1998 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1999 * may return different values. This forces us to get_cpu() before
2000 * issuing the first command, and also to emulate this annoying behavior
2001 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
2002 case 2: {
2003 int t, times = entry->eax & 0xff;
2004
2005 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
Nitin A Kamble0fdf8e52008-11-05 15:56:21 -08002006 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
Dan Kenigsberg07716712007-11-21 17:10:04 +02002007 for (t = 1; t < times && *nent < maxnent; ++t) {
2008 do_cpuid_1_ent(&entry[t], function, 0);
2009 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2010 ++*nent;
2011 }
2012 break;
2013 }
2014 /* function 4 and 0xb have additional index. */
2015 case 4: {
Harvey Harrison14af3f32008-02-19 10:25:50 -08002016 int i, cache_type;
Dan Kenigsberg07716712007-11-21 17:10:04 +02002017
2018 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2019 /* read more entries until cache_type is zero */
Harvey Harrison14af3f32008-02-19 10:25:50 -08002020 for (i = 1; *nent < maxnent; ++i) {
2021 cache_type = entry[i - 1].eax & 0x1f;
Dan Kenigsberg07716712007-11-21 17:10:04 +02002022 if (!cache_type)
2023 break;
Harvey Harrison14af3f32008-02-19 10:25:50 -08002024 do_cpuid_1_ent(&entry[i], function, i);
2025 entry[i].flags |=
Dan Kenigsberg07716712007-11-21 17:10:04 +02002026 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2027 ++*nent;
2028 }
2029 break;
2030 }
2031 case 0xb: {
Harvey Harrison14af3f32008-02-19 10:25:50 -08002032 int i, level_type;
Dan Kenigsberg07716712007-11-21 17:10:04 +02002033
2034 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2035 /* read more entries until level_type is zero */
Harvey Harrison14af3f32008-02-19 10:25:50 -08002036 for (i = 1; *nent < maxnent; ++i) {
Nitin A Kamble0853d2c2008-11-05 15:37:36 -08002037 level_type = entry[i - 1].ecx & 0xff00;
Dan Kenigsberg07716712007-11-21 17:10:04 +02002038 if (!level_type)
2039 break;
Harvey Harrison14af3f32008-02-19 10:25:50 -08002040 do_cpuid_1_ent(&entry[i], function, i);
2041 entry[i].flags |=
Dan Kenigsberg07716712007-11-21 17:10:04 +02002042 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2043 ++*nent;
2044 }
2045 break;
2046 }
Dexuan Cui2acf9232010-06-10 11:27:12 +08002047 case 0xd: {
2048 int i;
2049
2050 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2051 for (i = 1; *nent < maxnent; ++i) {
2052 if (entry[i - 1].eax == 0 && i != 2)
2053 break;
2054 do_cpuid_1_ent(&entry[i], function, i);
2055 entry[i].flags |=
2056 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2057 ++*nent;
2058 }
2059 break;
2060 }
Glauber Costa84478c82010-05-11 12:17:43 -04002061 case KVM_CPUID_SIGNATURE: {
2062 char signature[12] = "KVMKVMKVM\0\0";
2063 u32 *sigptr = (u32 *)signature;
2064 entry->eax = 0;
2065 entry->ebx = sigptr[0];
2066 entry->ecx = sigptr[1];
2067 entry->edx = sigptr[2];
2068 break;
2069 }
2070 case KVM_CPUID_FEATURES:
2071 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
2072 (1 << KVM_FEATURE_NOP_IO_DELAY) |
Glauber Costa371bcf62010-05-11 12:17:46 -04002073 (1 << KVM_FEATURE_CLOCKSOURCE2) |
2074 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
Glauber Costa84478c82010-05-11 12:17:43 -04002075 entry->ebx = 0;
2076 entry->ecx = 0;
2077 entry->edx = 0;
2078 break;
Dan Kenigsberg07716712007-11-21 17:10:04 +02002079 case 0x80000000:
2080 entry->eax = min(entry->eax, 0x8000001a);
2081 break;
2082 case 0x80000001:
2083 entry->edx &= kvm_supported_word1_x86_features;
2084 entry->ecx &= kvm_supported_word6_x86_features;
2085 break;
2086 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02002087
2088 kvm_x86_ops->set_supported_cpuid(function, entry);
2089
Dan Kenigsberg07716712007-11-21 17:10:04 +02002090 put_cpu();
2091}
2092
Avi Kivity7faa4ee2009-05-10 13:55:35 +03002093#undef F
2094
Avi Kivity674eea02008-02-11 18:37:23 +02002095static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
Amit Shah19355472009-01-14 16:56:00 +00002096 struct kvm_cpuid_entry2 __user *entries)
Dan Kenigsberg07716712007-11-21 17:10:04 +02002097{
2098 struct kvm_cpuid_entry2 *cpuid_entries;
2099 int limit, nent = 0, r = -E2BIG;
2100 u32 func;
2101
2102 if (cpuid->nent < 1)
2103 goto out;
Avi Kivity6a544352009-10-04 16:45:13 +02002104 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2105 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
Dan Kenigsberg07716712007-11-21 17:10:04 +02002106 r = -ENOMEM;
2107 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
2108 if (!cpuid_entries)
2109 goto out;
2110
2111 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
2112 limit = cpuid_entries[0].eax;
2113 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
2114 do_cpuid_ent(&cpuid_entries[nent], func, 0,
Amit Shah19355472009-01-14 16:56:00 +00002115 &nent, cpuid->nent);
Dan Kenigsberg07716712007-11-21 17:10:04 +02002116 r = -E2BIG;
2117 if (nent >= cpuid->nent)
2118 goto out_free;
2119
2120 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
2121 limit = cpuid_entries[nent - 1].eax;
2122 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
2123 do_cpuid_ent(&cpuid_entries[nent], func, 0,
Amit Shah19355472009-01-14 16:56:00 +00002124 &nent, cpuid->nent);
Glauber Costa84478c82010-05-11 12:17:43 -04002125
2126
2127
2128 r = -E2BIG;
2129 if (nent >= cpuid->nent)
2130 goto out_free;
2131
2132 do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
2133 cpuid->nent);
2134
2135 r = -E2BIG;
2136 if (nent >= cpuid->nent)
2137 goto out_free;
2138
2139 do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
2140 cpuid->nent);
2141
Mark McLoughlincb007642009-05-12 12:36:44 +01002142 r = -E2BIG;
2143 if (nent >= cpuid->nent)
2144 goto out_free;
2145
Dan Kenigsberg07716712007-11-21 17:10:04 +02002146 r = -EFAULT;
2147 if (copy_to_user(entries, cpuid_entries,
Amit Shah19355472009-01-14 16:56:00 +00002148 nent * sizeof(struct kvm_cpuid_entry2)))
Dan Kenigsberg07716712007-11-21 17:10:04 +02002149 goto out_free;
2150 cpuid->nent = nent;
2151 r = 0;
2152
2153out_free:
2154 vfree(cpuid_entries);
2155out:
2156 return r;
2157}
2158
Carsten Otte313a3dc2007-10-11 19:16:52 +02002159static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2160 struct kvm_lapic_state *s)
2161{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002162 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002163
2164 return 0;
2165}
2166
2167static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2168 struct kvm_lapic_state *s)
2169{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002170 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002171 kvm_apic_post_state_restore(vcpu);
Gleb Natapovcb142eb2009-08-09 15:17:40 +03002172 update_cr8_intercept(vcpu);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002173
2174 return 0;
2175}
2176
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002177static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2178 struct kvm_interrupt *irq)
2179{
2180 if (irq->irq < 0 || irq->irq >= 256)
2181 return -EINVAL;
2182 if (irqchip_in_kernel(vcpu->kvm))
2183 return -ENXIO;
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002184
Gleb Natapov66fd3f72009-05-11 13:35:50 +03002185 kvm_queue_interrupt(vcpu, irq->irq, false);
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002186
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002187 return 0;
2188}
2189
Jan Kiszkac4abb7c2008-09-26 09:30:55 +02002190static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2191{
Jan Kiszkac4abb7c2008-09-26 09:30:55 +02002192 kvm_inject_nmi(vcpu);
Jan Kiszkac4abb7c2008-09-26 09:30:55 +02002193
2194 return 0;
2195}
2196
Avi Kivityb209749f2007-10-22 16:50:39 +02002197static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2198 struct kvm_tpr_access_ctl *tac)
2199{
2200 if (tac->flags)
2201 return -EINVAL;
2202 vcpu->arch.tpr_access_reporting = !!tac->enabled;
2203 return 0;
2204}
2205
Huang Ying890ca9a2009-05-11 16:48:15 +08002206static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2207 u64 mcg_cap)
2208{
2209 int r;
2210 unsigned bank_num = mcg_cap & 0xff, bank;
2211
2212 r = -EINVAL;
Jan Kiszkaa9e38c3e2009-10-23 09:37:00 +02002213 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
Huang Ying890ca9a2009-05-11 16:48:15 +08002214 goto out;
2215 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2216 goto out;
2217 r = 0;
2218 vcpu->arch.mcg_cap = mcg_cap;
2219 /* Init IA32_MCG_CTL to all 1s */
2220 if (mcg_cap & MCG_CTL_P)
2221 vcpu->arch.mcg_ctl = ~(u64)0;
2222 /* Init IA32_MCi_CTL to all 1s */
2223 for (bank = 0; bank < bank_num; bank++)
2224 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2225out:
2226 return r;
2227}
2228
2229static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2230 struct kvm_x86_mce *mce)
2231{
2232 u64 mcg_cap = vcpu->arch.mcg_cap;
2233 unsigned bank_num = mcg_cap & 0xff;
2234 u64 *banks = vcpu->arch.mce_banks;
2235
2236 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2237 return -EINVAL;
2238 /*
2239 * if IA32_MCG_CTL is not all 1s, the uncorrected error
2240 * reporting is disabled
2241 */
2242 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2243 vcpu->arch.mcg_ctl != ~(u64)0)
2244 return 0;
2245 banks += 4 * mce->bank;
2246 /*
2247 * if IA32_MCi_CTL is not all 1s, the uncorrected error
2248 * reporting is disabled for the bank
2249 */
2250 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2251 return 0;
2252 if (mce->status & MCI_STATUS_UC) {
2253 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
Avi Kivityfc78f512009-12-07 12:16:48 +02002254 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
Huang Ying890ca9a2009-05-11 16:48:15 +08002255 printk(KERN_DEBUG "kvm: set_mce: "
2256 "injects mce exception while "
2257 "previous one is in progress!\n");
2258 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
2259 return 0;
2260 }
2261 if (banks[1] & MCI_STATUS_VAL)
2262 mce->status |= MCI_STATUS_OVER;
2263 banks[2] = mce->addr;
2264 banks[3] = mce->misc;
2265 vcpu->arch.mcg_status = mce->mcg_status;
2266 banks[1] = mce->status;
2267 kvm_queue_exception(vcpu, MC_VECTOR);
2268 } else if (!(banks[1] & MCI_STATUS_VAL)
2269 || !(banks[1] & MCI_STATUS_UC)) {
2270 if (banks[1] & MCI_STATUS_VAL)
2271 mce->status |= MCI_STATUS_OVER;
2272 banks[2] = mce->addr;
2273 banks[3] = mce->misc;
2274 banks[1] = mce->status;
2275 } else
2276 banks[1] |= MCI_STATUS_OVER;
2277 return 0;
2278}
2279
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002280static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2281 struct kvm_vcpu_events *events)
2282{
Jan Kiszka03b82a32010-02-15 10:45:41 +01002283 events->exception.injected =
2284 vcpu->arch.exception.pending &&
2285 !kvm_exception_is_soft(vcpu->arch.exception.nr);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002286 events->exception.nr = vcpu->arch.exception.nr;
2287 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2288 events->exception.error_code = vcpu->arch.exception.error_code;
2289
Jan Kiszka03b82a32010-02-15 10:45:41 +01002290 events->interrupt.injected =
2291 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002292 events->interrupt.nr = vcpu->arch.interrupt.nr;
Jan Kiszka03b82a32010-02-15 10:45:41 +01002293 events->interrupt.soft = 0;
Jan Kiszka48005f62010-02-19 19:38:07 +01002294 events->interrupt.shadow =
2295 kvm_x86_ops->get_interrupt_shadow(vcpu,
2296 KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002297
2298 events->nmi.injected = vcpu->arch.nmi_injected;
2299 events->nmi.pending = vcpu->arch.nmi_pending;
2300 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2301
2302 events->sipi_vector = vcpu->arch.sipi_vector;
2303
Jan Kiszkadab4b912009-12-06 18:24:15 +01002304 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
Jan Kiszka48005f62010-02-19 19:38:07 +01002305 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2306 | KVM_VCPUEVENT_VALID_SHADOW);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002307}
2308
2309static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2310 struct kvm_vcpu_events *events)
2311{
Jan Kiszkadab4b912009-12-06 18:24:15 +01002312 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
Jan Kiszka48005f62010-02-19 19:38:07 +01002313 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2314 | KVM_VCPUEVENT_VALID_SHADOW))
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002315 return -EINVAL;
2316
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002317 vcpu->arch.exception.pending = events->exception.injected;
2318 vcpu->arch.exception.nr = events->exception.nr;
2319 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2320 vcpu->arch.exception.error_code = events->exception.error_code;
2321
2322 vcpu->arch.interrupt.pending = events->interrupt.injected;
2323 vcpu->arch.interrupt.nr = events->interrupt.nr;
2324 vcpu->arch.interrupt.soft = events->interrupt.soft;
2325 if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
2326 kvm_pic_clear_isr_ack(vcpu->kvm);
Jan Kiszka48005f62010-02-19 19:38:07 +01002327 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2328 kvm_x86_ops->set_interrupt_shadow(vcpu,
2329 events->interrupt.shadow);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002330
2331 vcpu->arch.nmi_injected = events->nmi.injected;
Jan Kiszkadab4b912009-12-06 18:24:15 +01002332 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2333 vcpu->arch.nmi_pending = events->nmi.pending;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002334 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2335
Jan Kiszkadab4b912009-12-06 18:24:15 +01002336 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2337 vcpu->arch.sipi_vector = events->sipi_vector;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002338
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002339 return 0;
2340}
2341
Jan Kiszkaa1efbe72010-02-15 10:45:43 +01002342static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2343 struct kvm_debugregs *dbgregs)
2344{
Jan Kiszkaa1efbe72010-02-15 10:45:43 +01002345 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2346 dbgregs->dr6 = vcpu->arch.dr6;
2347 dbgregs->dr7 = vcpu->arch.dr7;
2348 dbgregs->flags = 0;
Jan Kiszkaa1efbe72010-02-15 10:45:43 +01002349}
2350
2351static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2352 struct kvm_debugregs *dbgregs)
2353{
2354 if (dbgregs->flags)
2355 return -EINVAL;
2356
Jan Kiszkaa1efbe72010-02-15 10:45:43 +01002357 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2358 vcpu->arch.dr6 = dbgregs->dr6;
2359 vcpu->arch.dr7 = dbgregs->dr7;
2360
Jan Kiszkaa1efbe72010-02-15 10:45:43 +01002361 return 0;
2362}
2363
Carsten Otte313a3dc2007-10-11 19:16:52 +02002364long kvm_arch_vcpu_ioctl(struct file *filp,
2365 unsigned int ioctl, unsigned long arg)
2366{
2367 struct kvm_vcpu *vcpu = filp->private_data;
2368 void __user *argp = (void __user *)arg;
2369 int r;
Dave Hansenb772ff32008-08-11 10:01:47 -07002370 struct kvm_lapic_state *lapic = NULL;
Carsten Otte313a3dc2007-10-11 19:16:52 +02002371
2372 switch (ioctl) {
2373 case KVM_GET_LAPIC: {
Marcelo Tosatti2204ae32009-10-29 13:44:16 -02002374 r = -EINVAL;
2375 if (!vcpu->arch.apic)
2376 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002377 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002378
Dave Hansenb772ff32008-08-11 10:01:47 -07002379 r = -ENOMEM;
2380 if (!lapic)
2381 goto out;
2382 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002383 if (r)
2384 goto out;
2385 r = -EFAULT;
Dave Hansenb772ff32008-08-11 10:01:47 -07002386 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
Carsten Otte313a3dc2007-10-11 19:16:52 +02002387 goto out;
2388 r = 0;
2389 break;
2390 }
2391 case KVM_SET_LAPIC: {
Marcelo Tosatti2204ae32009-10-29 13:44:16 -02002392 r = -EINVAL;
2393 if (!vcpu->arch.apic)
2394 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002395 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2396 r = -ENOMEM;
2397 if (!lapic)
Carsten Otte313a3dc2007-10-11 19:16:52 +02002398 goto out;
Dave Hansenb772ff32008-08-11 10:01:47 -07002399 r = -EFAULT;
2400 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
2401 goto out;
2402 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002403 if (r)
2404 goto out;
2405 r = 0;
2406 break;
2407 }
Zhang Xiantaof77bc6a2007-11-21 04:36:41 +08002408 case KVM_INTERRUPT: {
2409 struct kvm_interrupt irq;
2410
2411 r = -EFAULT;
2412 if (copy_from_user(&irq, argp, sizeof irq))
2413 goto out;
2414 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2415 if (r)
2416 goto out;
2417 r = 0;
2418 break;
2419 }
Jan Kiszkac4abb7c2008-09-26 09:30:55 +02002420 case KVM_NMI: {
2421 r = kvm_vcpu_ioctl_nmi(vcpu);
2422 if (r)
2423 goto out;
2424 r = 0;
2425 break;
2426 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002427 case KVM_SET_CPUID: {
2428 struct kvm_cpuid __user *cpuid_arg = argp;
2429 struct kvm_cpuid cpuid;
2430
2431 r = -EFAULT;
2432 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2433 goto out;
2434 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2435 if (r)
2436 goto out;
2437 break;
2438 }
Dan Kenigsberg07716712007-11-21 17:10:04 +02002439 case KVM_SET_CPUID2: {
2440 struct kvm_cpuid2 __user *cpuid_arg = argp;
2441 struct kvm_cpuid2 cpuid;
2442
2443 r = -EFAULT;
2444 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2445 goto out;
2446 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
Amit Shah19355472009-01-14 16:56:00 +00002447 cpuid_arg->entries);
Dan Kenigsberg07716712007-11-21 17:10:04 +02002448 if (r)
2449 goto out;
2450 break;
2451 }
2452 case KVM_GET_CPUID2: {
2453 struct kvm_cpuid2 __user *cpuid_arg = argp;
2454 struct kvm_cpuid2 cpuid;
2455
2456 r = -EFAULT;
2457 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2458 goto out;
2459 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
Amit Shah19355472009-01-14 16:56:00 +00002460 cpuid_arg->entries);
Dan Kenigsberg07716712007-11-21 17:10:04 +02002461 if (r)
2462 goto out;
2463 r = -EFAULT;
2464 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2465 goto out;
2466 r = 0;
2467 break;
2468 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002469 case KVM_GET_MSRS:
2470 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2471 break;
2472 case KVM_SET_MSRS:
2473 r = msr_io(vcpu, argp, do_set_msr, 0);
2474 break;
Avi Kivityb209749f2007-10-22 16:50:39 +02002475 case KVM_TPR_ACCESS_REPORTING: {
2476 struct kvm_tpr_access_ctl tac;
2477
2478 r = -EFAULT;
2479 if (copy_from_user(&tac, argp, sizeof tac))
2480 goto out;
2481 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2482 if (r)
2483 goto out;
2484 r = -EFAULT;
2485 if (copy_to_user(argp, &tac, sizeof tac))
2486 goto out;
2487 r = 0;
2488 break;
2489 };
Avi Kivityb93463a2007-10-25 16:52:32 +02002490 case KVM_SET_VAPIC_ADDR: {
2491 struct kvm_vapic_addr va;
2492
2493 r = -EINVAL;
2494 if (!irqchip_in_kernel(vcpu->kvm))
2495 goto out;
2496 r = -EFAULT;
2497 if (copy_from_user(&va, argp, sizeof va))
2498 goto out;
2499 r = 0;
2500 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2501 break;
2502 }
Huang Ying890ca9a2009-05-11 16:48:15 +08002503 case KVM_X86_SETUP_MCE: {
2504 u64 mcg_cap;
2505
2506 r = -EFAULT;
2507 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2508 goto out;
2509 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2510 break;
2511 }
2512 case KVM_X86_SET_MCE: {
2513 struct kvm_x86_mce mce;
2514
2515 r = -EFAULT;
2516 if (copy_from_user(&mce, argp, sizeof mce))
2517 goto out;
2518 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2519 break;
2520 }
Jan Kiszka3cfc3092009-11-12 01:04:25 +01002521 case KVM_GET_VCPU_EVENTS: {
2522 struct kvm_vcpu_events events;
2523
2524 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2525
2526 r = -EFAULT;
2527 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2528 break;
2529 r = 0;
2530 break;
2531 }
2532 case KVM_SET_VCPU_EVENTS: {
2533 struct kvm_vcpu_events events;
2534
2535 r = -EFAULT;
2536 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2537 break;
2538
2539 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2540 break;
2541 }
Jan Kiszkaa1efbe72010-02-15 10:45:43 +01002542 case KVM_GET_DEBUGREGS: {
2543 struct kvm_debugregs dbgregs;
2544
2545 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
2546
2547 r = -EFAULT;
2548 if (copy_to_user(argp, &dbgregs,
2549 sizeof(struct kvm_debugregs)))
2550 break;
2551 r = 0;
2552 break;
2553 }
2554 case KVM_SET_DEBUGREGS: {
2555 struct kvm_debugregs dbgregs;
2556
2557 r = -EFAULT;
2558 if (copy_from_user(&dbgregs, argp,
2559 sizeof(struct kvm_debugregs)))
2560 break;
2561
2562 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
2563 break;
2564 }
Carsten Otte313a3dc2007-10-11 19:16:52 +02002565 default:
2566 r = -EINVAL;
2567 }
2568out:
Wei Yongjun7a6ce842009-03-31 16:47:44 +08002569 kfree(lapic);
Carsten Otte313a3dc2007-10-11 19:16:52 +02002570 return r;
2571}
2572
Carsten Otte1fe779f2007-10-29 16:08:35 +01002573static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2574{
2575 int ret;
2576
2577 if (addr > (unsigned int)(-3 * PAGE_SIZE))
2578 return -1;
2579 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2580 return ret;
2581}
2582
Sheng Yangb927a3c2009-07-21 10:42:48 +08002583static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2584 u64 ident_addr)
2585{
2586 kvm->arch.ept_identity_map_addr = ident_addr;
2587 return 0;
2588}
2589
Carsten Otte1fe779f2007-10-29 16:08:35 +01002590static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2591 u32 kvm_nr_mmu_pages)
2592{
2593 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2594 return -EINVAL;
2595
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002596 mutex_lock(&kvm->slots_lock);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002597 spin_lock(&kvm->mmu_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002598
2599 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002600 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002601
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002602 spin_unlock(&kvm->mmu_lock);
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002603 mutex_unlock(&kvm->slots_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002604 return 0;
2605}
2606
2607static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2608{
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002609 return kvm->arch.n_alloc_mmu_pages;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002610}
2611
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002612gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
2613{
2614 int i;
2615 struct kvm_mem_alias *alias;
2616 struct kvm_mem_aliases *aliases;
2617
Lai Jiangshan90d83dc2010-04-19 17:41:23 +08002618 aliases = kvm_aliases(kvm);
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002619
2620 for (i = 0; i < aliases->naliases; ++i) {
2621 alias = &aliases->aliases[i];
2622 if (alias->flags & KVM_ALIAS_INVALID)
2623 continue;
2624 if (gfn >= alias->base_gfn
2625 && gfn < alias->base_gfn + alias->npages)
2626 return alias->target_gfn + gfn - alias->base_gfn;
2627 }
2628 return gfn;
2629}
2630
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002631gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
2632{
2633 int i;
2634 struct kvm_mem_alias *alias;
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002635 struct kvm_mem_aliases *aliases;
2636
Lai Jiangshan90d83dc2010-04-19 17:41:23 +08002637 aliases = kvm_aliases(kvm);
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002638
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002639 for (i = 0; i < aliases->naliases; ++i) {
2640 alias = &aliases->aliases[i];
Zhang Xiantaoe9f85cd2007-11-22 11:20:33 +08002641 if (gfn >= alias->base_gfn
2642 && gfn < alias->base_gfn + alias->npages)
2643 return alias->target_gfn + gfn - alias->base_gfn;
2644 }
2645 return gfn;
2646}
2647
Carsten Otte1fe779f2007-10-29 16:08:35 +01002648/*
2649 * Set a new alias region. Aliases map a portion of physical memory into
2650 * another portion. This is useful for memory windows, for example the PC
2651 * VGA region.
2652 */
2653static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
2654 struct kvm_memory_alias *alias)
2655{
2656 int r, n;
2657 struct kvm_mem_alias *p;
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002658 struct kvm_mem_aliases *aliases, *old_aliases;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002659
2660 r = -EINVAL;
2661 /* General sanity checks */
2662 if (alias->memory_size & (PAGE_SIZE - 1))
2663 goto out;
2664 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
2665 goto out;
2666 if (alias->slot >= KVM_ALIAS_SLOTS)
2667 goto out;
2668 if (alias->guest_phys_addr + alias->memory_size
2669 < alias->guest_phys_addr)
2670 goto out;
2671 if (alias->target_phys_addr + alias->memory_size
2672 < alias->target_phys_addr)
2673 goto out;
2674
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002675 r = -ENOMEM;
2676 aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2677 if (!aliases)
2678 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002679
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002680 mutex_lock(&kvm->slots_lock);
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002681
2682 /* invalidate any gfn reference in case of deletion/shrinking */
2683 memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2684 aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
2685 old_aliases = kvm->arch.aliases;
2686 rcu_assign_pointer(kvm->arch.aliases, aliases);
2687 synchronize_srcu_expedited(&kvm->srcu);
2688 kvm_mmu_zap_all(kvm);
2689 kfree(old_aliases);
2690
2691 r = -ENOMEM;
2692 aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2693 if (!aliases)
2694 goto out_unlock;
2695
2696 memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002697
2698 p = &aliases->aliases[alias->slot];
Carsten Otte1fe779f2007-10-29 16:08:35 +01002699 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
2700 p->npages = alias->memory_size >> PAGE_SHIFT;
2701 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002702 p->flags &= ~(KVM_ALIAS_INVALID);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002703
2704 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002705 if (aliases->aliases[n - 1].npages)
Carsten Otte1fe779f2007-10-29 16:08:35 +01002706 break;
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02002707 aliases->naliases = n;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002708
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002709 old_aliases = kvm->arch.aliases;
2710 rcu_assign_pointer(kvm->arch.aliases, aliases);
2711 synchronize_srcu_expedited(&kvm->srcu);
2712 kfree(old_aliases);
2713 r = 0;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002714
Marcelo Tosattia983fb22009-12-23 14:35:23 -02002715out_unlock:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002716 mutex_unlock(&kvm->slots_lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002717out:
2718 return r;
2719}
2720
2721static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2722{
2723 int r;
2724
2725 r = 0;
2726 switch (chip->chip_id) {
2727 case KVM_IRQCHIP_PIC_MASTER:
2728 memcpy(&chip->chip.pic,
2729 &pic_irqchip(kvm)->pics[0],
2730 sizeof(struct kvm_pic_state));
2731 break;
2732 case KVM_IRQCHIP_PIC_SLAVE:
2733 memcpy(&chip->chip.pic,
2734 &pic_irqchip(kvm)->pics[1],
2735 sizeof(struct kvm_pic_state));
2736 break;
2737 case KVM_IRQCHIP_IOAPIC:
Gleb Natapoveba02262009-08-24 11:54:25 +03002738 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002739 break;
2740 default:
2741 r = -EINVAL;
2742 break;
2743 }
2744 return r;
2745}
2746
2747static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2748{
2749 int r;
2750
2751 r = 0;
2752 switch (chip->chip_id) {
2753 case KVM_IRQCHIP_PIC_MASTER:
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002754 raw_spin_lock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002755 memcpy(&pic_irqchip(kvm)->pics[0],
2756 &chip->chip.pic,
2757 sizeof(struct kvm_pic_state));
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002758 raw_spin_unlock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002759 break;
2760 case KVM_IRQCHIP_PIC_SLAVE:
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002761 raw_spin_lock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002762 memcpy(&pic_irqchip(kvm)->pics[1],
2763 &chip->chip.pic,
2764 sizeof(struct kvm_pic_state));
Thomas Gleixnerfa8273e2010-02-17 14:00:41 +00002765 raw_spin_unlock(&pic_irqchip(kvm)->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002766 break;
2767 case KVM_IRQCHIP_IOAPIC:
Gleb Natapoveba02262009-08-24 11:54:25 +03002768 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002769 break;
2770 default:
2771 r = -EINVAL;
2772 break;
2773 }
2774 kvm_pic_update_irq(pic_irqchip(kvm));
2775 return r;
2776}
2777
Sheng Yange0f63cb2008-03-04 00:50:59 +08002778static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2779{
2780 int r = 0;
2781
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002782 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002783 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002784 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002785 return r;
2786}
2787
2788static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2789{
2790 int r = 0;
2791
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002792 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002793 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
Beth Kone9f42752009-07-07 11:50:38 -04002794 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2795 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2796 return r;
2797}
2798
2799static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2800{
2801 int r = 0;
2802
2803 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2804 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2805 sizeof(ps->channels));
2806 ps->flags = kvm->arch.vpit->pit_state.flags;
2807 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2808 return r;
2809}
2810
2811static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2812{
2813 int r = 0, start = 0;
2814 u32 prev_legacy, cur_legacy;
2815 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2816 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2817 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2818 if (!prev_legacy && cur_legacy)
2819 start = 1;
2820 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2821 sizeof(kvm->arch.vpit->pit_state.channels));
2822 kvm->arch.vpit->pit_state.flags = ps->flags;
2823 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002824 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Sheng Yange0f63cb2008-03-04 00:50:59 +08002825 return r;
2826}
2827
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002828static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2829 struct kvm_reinject_control *control)
2830{
2831 if (!kvm->arch.vpit)
2832 return -ENXIO;
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002833 mutex_lock(&kvm->arch.vpit->pit_state.lock);
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002834 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
Marcelo Tosatti894a9c52009-06-23 15:05:14 -03002835 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02002836 return 0;
2837}
2838
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002839/*
2840 * Get (and clear) the dirty memory log for a memory slot.
2841 */
2842int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2843 struct kvm_dirty_log *log)
2844{
Takuya Yoshikawa87bf6e72010-04-12 19:35:35 +09002845 int r, i;
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002846 struct kvm_memory_slot *memslot;
Takuya Yoshikawa87bf6e72010-04-12 19:35:35 +09002847 unsigned long n;
Marcelo Tosattib050b012009-12-23 14:35:22 -02002848 unsigned long is_dirty = 0;
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002849
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002850 mutex_lock(&kvm->slots_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002851
Marcelo Tosattib050b012009-12-23 14:35:22 -02002852 r = -EINVAL;
2853 if (log->slot >= KVM_MEMORY_SLOTS)
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002854 goto out;
2855
Marcelo Tosattib050b012009-12-23 14:35:22 -02002856 memslot = &kvm->memslots->memslots[log->slot];
2857 r = -ENOENT;
2858 if (!memslot->dirty_bitmap)
2859 goto out;
2860
Takuya Yoshikawa87bf6e72010-04-12 19:35:35 +09002861 n = kvm_dirty_bitmap_bytes(memslot);
Marcelo Tosattib050b012009-12-23 14:35:22 -02002862
Marcelo Tosattib050b012009-12-23 14:35:22 -02002863 for (i = 0; !is_dirty && i < n/sizeof(long); i++)
2864 is_dirty = memslot->dirty_bitmap[i];
2865
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002866 /* If nothing is dirty, don't bother messing with page tables. */
2867 if (is_dirty) {
Marcelo Tosattib050b012009-12-23 14:35:22 -02002868 struct kvm_memslots *slots, *old_slots;
Takuya Yoshikawa914ebcc2010-04-28 18:50:36 +09002869 unsigned long *dirty_bitmap;
Marcelo Tosattib050b012009-12-23 14:35:22 -02002870
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002871 spin_lock(&kvm->mmu_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002872 kvm_mmu_slot_remove_write_access(kvm, log->slot);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03002873 spin_unlock(&kvm->mmu_lock);
Marcelo Tosattib050b012009-12-23 14:35:22 -02002874
Takuya Yoshikawa914ebcc2010-04-28 18:50:36 +09002875 r = -ENOMEM;
2876 dirty_bitmap = vmalloc(n);
2877 if (!dirty_bitmap)
2878 goto out;
2879 memset(dirty_bitmap, 0, n);
Marcelo Tosattib050b012009-12-23 14:35:22 -02002880
Takuya Yoshikawa914ebcc2010-04-28 18:50:36 +09002881 r = -ENOMEM;
2882 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
2883 if (!slots) {
2884 vfree(dirty_bitmap);
2885 goto out;
2886 }
Marcelo Tosattib050b012009-12-23 14:35:22 -02002887 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
2888 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
2889
2890 old_slots = kvm->memslots;
2891 rcu_assign_pointer(kvm->memslots, slots);
2892 synchronize_srcu_expedited(&kvm->srcu);
2893 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
2894 kfree(old_slots);
Takuya Yoshikawa914ebcc2010-04-28 18:50:36 +09002895
2896 r = -EFAULT;
2897 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
2898 vfree(dirty_bitmap);
2899 goto out;
2900 }
2901 vfree(dirty_bitmap);
2902 } else {
2903 r = -EFAULT;
2904 if (clear_user(log->dirty_bitmap, n))
2905 goto out;
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002906 }
Marcelo Tosattib050b012009-12-23 14:35:22 -02002907
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002908 r = 0;
2909out:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02002910 mutex_unlock(&kvm->slots_lock);
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08002911 return r;
2912}
2913
Carsten Otte1fe779f2007-10-29 16:08:35 +01002914long kvm_arch_vm_ioctl(struct file *filp,
2915 unsigned int ioctl, unsigned long arg)
2916{
2917 struct kvm *kvm = filp->private_data;
2918 void __user *argp = (void __user *)arg;
Avi Kivity367e1312009-08-26 14:57:07 +03002919 int r = -ENOTTY;
Dave Hansenf0d66272008-08-11 10:01:45 -07002920 /*
2921 * This union makes it completely explicit to gcc-3.x
2922 * that these two variables' stack usage should be
2923 * combined, not added together.
2924 */
2925 union {
2926 struct kvm_pit_state ps;
Beth Kone9f42752009-07-07 11:50:38 -04002927 struct kvm_pit_state2 ps2;
Dave Hansenf0d66272008-08-11 10:01:45 -07002928 struct kvm_memory_alias alias;
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02002929 struct kvm_pit_config pit_config;
Dave Hansenf0d66272008-08-11 10:01:45 -07002930 } u;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002931
2932 switch (ioctl) {
2933 case KVM_SET_TSS_ADDR:
2934 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2935 if (r < 0)
2936 goto out;
2937 break;
Sheng Yangb927a3c2009-07-21 10:42:48 +08002938 case KVM_SET_IDENTITY_MAP_ADDR: {
2939 u64 ident_addr;
2940
2941 r = -EFAULT;
2942 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
2943 goto out;
2944 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
2945 if (r < 0)
2946 goto out;
2947 break;
2948 }
Carsten Otte1fe779f2007-10-29 16:08:35 +01002949 case KVM_SET_MEMORY_REGION: {
2950 struct kvm_memory_region kvm_mem;
2951 struct kvm_userspace_memory_region kvm_userspace_mem;
2952
2953 r = -EFAULT;
2954 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2955 goto out;
2956 kvm_userspace_mem.slot = kvm_mem.slot;
2957 kvm_userspace_mem.flags = kvm_mem.flags;
2958 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
2959 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
2960 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
2961 if (r)
2962 goto out;
2963 break;
2964 }
2965 case KVM_SET_NR_MMU_PAGES:
2966 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
2967 if (r)
2968 goto out;
2969 break;
2970 case KVM_GET_NR_MMU_PAGES:
2971 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
2972 break;
Dave Hansenf0d66272008-08-11 10:01:45 -07002973 case KVM_SET_MEMORY_ALIAS:
Carsten Otte1fe779f2007-10-29 16:08:35 +01002974 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07002975 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
Carsten Otte1fe779f2007-10-29 16:08:35 +01002976 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07002977 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
Carsten Otte1fe779f2007-10-29 16:08:35 +01002978 if (r)
2979 goto out;
2980 break;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002981 case KVM_CREATE_IRQCHIP: {
2982 struct kvm_pic *vpic;
2983
2984 mutex_lock(&kvm->lock);
2985 r = -EEXIST;
2986 if (kvm->arch.vpic)
2987 goto create_irqchip_unlock;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002988 r = -ENOMEM;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002989 vpic = kvm_create_pic(kvm);
2990 if (vpic) {
Carsten Otte1fe779f2007-10-29 16:08:35 +01002991 r = kvm_ioapic_init(kvm);
2992 if (r) {
Wei Yongjun72bb2fc2010-02-09 10:33:03 +08002993 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
2994 &vpic->dev);
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002995 kfree(vpic);
2996 goto create_irqchip_unlock;
Carsten Otte1fe779f2007-10-29 16:08:35 +01002997 }
2998 } else
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02002999 goto create_irqchip_unlock;
3000 smp_wmb();
3001 kvm->arch.vpic = vpic;
3002 smp_wmb();
Avi Kivity399ec802008-11-19 13:58:46 +02003003 r = kvm_setup_default_irq_routing(kvm);
3004 if (r) {
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02003005 mutex_lock(&kvm->irq_lock);
Wei Yongjun72bb2fc2010-02-09 10:33:03 +08003006 kvm_ioapic_destroy(kvm);
3007 kvm_destroy_pic(kvm);
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02003008 mutex_unlock(&kvm->irq_lock);
Avi Kivity399ec802008-11-19 13:58:46 +02003009 }
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02003010 create_irqchip_unlock:
3011 mutex_unlock(&kvm->lock);
Carsten Otte1fe779f2007-10-29 16:08:35 +01003012 break;
Marcelo Tosatti3ddea122009-10-29 13:44:15 -02003013 }
Sheng Yang78376992008-01-28 05:10:22 +08003014 case KVM_CREATE_PIT:
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02003015 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3016 goto create_pit;
3017 case KVM_CREATE_PIT2:
3018 r = -EFAULT;
3019 if (copy_from_user(&u.pit_config, argp,
3020 sizeof(struct kvm_pit_config)))
3021 goto out;
3022 create_pit:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02003023 mutex_lock(&kvm->slots_lock);
Avi Kivity269e05e2009-01-05 15:21:42 +02003024 r = -EEXIST;
3025 if (kvm->arch.vpit)
3026 goto create_pit_unlock;
Sheng Yang78376992008-01-28 05:10:22 +08003027 r = -ENOMEM;
Jan Kiszkac5ff41c2009-05-14 22:42:53 +02003028 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
Sheng Yang78376992008-01-28 05:10:22 +08003029 if (kvm->arch.vpit)
3030 r = 0;
Avi Kivity269e05e2009-01-05 15:21:42 +02003031 create_pit_unlock:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02003032 mutex_unlock(&kvm->slots_lock);
Sheng Yang78376992008-01-28 05:10:22 +08003033 break;
Gleb Natapov49256632009-02-04 17:28:14 +02003034 case KVM_IRQ_LINE_STATUS:
Carsten Otte1fe779f2007-10-29 16:08:35 +01003035 case KVM_IRQ_LINE: {
3036 struct kvm_irq_level irq_event;
3037
3038 r = -EFAULT;
3039 if (copy_from_user(&irq_event, argp, sizeof irq_event))
3040 goto out;
Wei Yongjun160d2f62010-03-12 10:09:45 +08003041 r = -ENXIO;
Carsten Otte1fe779f2007-10-29 16:08:35 +01003042 if (irqchip_in_kernel(kvm)) {
Gleb Natapov49256632009-02-04 17:28:14 +02003043 __s32 status;
Gleb Natapov49256632009-02-04 17:28:14 +02003044 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3045 irq_event.irq, irq_event.level);
Gleb Natapov49256632009-02-04 17:28:14 +02003046 if (ioctl == KVM_IRQ_LINE_STATUS) {
Wei Yongjun160d2f62010-03-12 10:09:45 +08003047 r = -EFAULT;
Gleb Natapov49256632009-02-04 17:28:14 +02003048 irq_event.status = status;
3049 if (copy_to_user(argp, &irq_event,
3050 sizeof irq_event))
3051 goto out;
3052 }
Carsten Otte1fe779f2007-10-29 16:08:35 +01003053 r = 0;
3054 }
3055 break;
3056 }
3057 case KVM_GET_IRQCHIP: {
3058 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
Dave Hansenf0d66272008-08-11 10:01:45 -07003059 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
Carsten Otte1fe779f2007-10-29 16:08:35 +01003060
Dave Hansenf0d66272008-08-11 10:01:45 -07003061 r = -ENOMEM;
3062 if (!chip)
Carsten Otte1fe779f2007-10-29 16:08:35 +01003063 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07003064 r = -EFAULT;
3065 if (copy_from_user(chip, argp, sizeof *chip))
3066 goto get_irqchip_out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01003067 r = -ENXIO;
3068 if (!irqchip_in_kernel(kvm))
Dave Hansenf0d66272008-08-11 10:01:45 -07003069 goto get_irqchip_out;
3070 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3071 if (r)
3072 goto get_irqchip_out;
3073 r = -EFAULT;
3074 if (copy_to_user(argp, chip, sizeof *chip))
3075 goto get_irqchip_out;
3076 r = 0;
3077 get_irqchip_out:
3078 kfree(chip);
Carsten Otte1fe779f2007-10-29 16:08:35 +01003079 if (r)
3080 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01003081 break;
3082 }
3083 case KVM_SET_IRQCHIP: {
3084 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
Dave Hansenf0d66272008-08-11 10:01:45 -07003085 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
Carsten Otte1fe779f2007-10-29 16:08:35 +01003086
Dave Hansenf0d66272008-08-11 10:01:45 -07003087 r = -ENOMEM;
3088 if (!chip)
Carsten Otte1fe779f2007-10-29 16:08:35 +01003089 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07003090 r = -EFAULT;
3091 if (copy_from_user(chip, argp, sizeof *chip))
3092 goto set_irqchip_out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01003093 r = -ENXIO;
3094 if (!irqchip_in_kernel(kvm))
Dave Hansenf0d66272008-08-11 10:01:45 -07003095 goto set_irqchip_out;
3096 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3097 if (r)
3098 goto set_irqchip_out;
3099 r = 0;
3100 set_irqchip_out:
3101 kfree(chip);
Carsten Otte1fe779f2007-10-29 16:08:35 +01003102 if (r)
3103 goto out;
Carsten Otte1fe779f2007-10-29 16:08:35 +01003104 break;
3105 }
Sheng Yange0f63cb2008-03-04 00:50:59 +08003106 case KVM_GET_PIT: {
Sheng Yange0f63cb2008-03-04 00:50:59 +08003107 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07003108 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
Sheng Yange0f63cb2008-03-04 00:50:59 +08003109 goto out;
3110 r = -ENXIO;
3111 if (!kvm->arch.vpit)
3112 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07003113 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
Sheng Yange0f63cb2008-03-04 00:50:59 +08003114 if (r)
3115 goto out;
3116 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07003117 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
Sheng Yange0f63cb2008-03-04 00:50:59 +08003118 goto out;
3119 r = 0;
3120 break;
3121 }
3122 case KVM_SET_PIT: {
Sheng Yange0f63cb2008-03-04 00:50:59 +08003123 r = -EFAULT;
Dave Hansenf0d66272008-08-11 10:01:45 -07003124 if (copy_from_user(&u.ps, argp, sizeof u.ps))
Sheng Yange0f63cb2008-03-04 00:50:59 +08003125 goto out;
3126 r = -ENXIO;
3127 if (!kvm->arch.vpit)
3128 goto out;
Dave Hansenf0d66272008-08-11 10:01:45 -07003129 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
Sheng Yange0f63cb2008-03-04 00:50:59 +08003130 if (r)
3131 goto out;
3132 r = 0;
3133 break;
3134 }
Beth Kone9f42752009-07-07 11:50:38 -04003135 case KVM_GET_PIT2: {
3136 r = -ENXIO;
3137 if (!kvm->arch.vpit)
3138 goto out;
3139 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3140 if (r)
3141 goto out;
3142 r = -EFAULT;
3143 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3144 goto out;
3145 r = 0;
3146 break;
3147 }
3148 case KVM_SET_PIT2: {
3149 r = -EFAULT;
3150 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3151 goto out;
3152 r = -ENXIO;
3153 if (!kvm->arch.vpit)
3154 goto out;
3155 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3156 if (r)
3157 goto out;
3158 r = 0;
3159 break;
3160 }
Marcelo Tosatti52d939a2008-12-30 15:55:06 -02003161 case KVM_REINJECT_CONTROL: {
3162 struct kvm_reinject_control control;
3163 r = -EFAULT;
3164 if (copy_from_user(&control, argp, sizeof(control)))
3165 goto out;
3166 r = kvm_vm_ioctl_reinject(kvm, &control);
3167 if (r)
3168 goto out;
3169 r = 0;
3170 break;
3171 }
Ed Swierkffde22a2009-10-15 15:21:43 -07003172 case KVM_XEN_HVM_CONFIG: {
3173 r = -EFAULT;
3174 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3175 sizeof(struct kvm_xen_hvm_config)))
3176 goto out;
3177 r = -EINVAL;
3178 if (kvm->arch.xen_hvm_config.flags)
3179 goto out;
3180 r = 0;
3181 break;
3182 }
Glauber Costaafbcf7a2009-10-16 15:28:36 -04003183 case KVM_SET_CLOCK: {
3184 struct timespec now;
3185 struct kvm_clock_data user_ns;
3186 u64 now_ns;
3187 s64 delta;
3188
3189 r = -EFAULT;
3190 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3191 goto out;
3192
3193 r = -EINVAL;
3194 if (user_ns.flags)
3195 goto out;
3196
3197 r = 0;
3198 ktime_get_ts(&now);
3199 now_ns = timespec_to_ns(&now);
3200 delta = user_ns.clock - now_ns;
3201 kvm->arch.kvmclock_offset = delta;
3202 break;
3203 }
3204 case KVM_GET_CLOCK: {
3205 struct timespec now;
3206 struct kvm_clock_data user_ns;
3207 u64 now_ns;
3208
3209 ktime_get_ts(&now);
3210 now_ns = timespec_to_ns(&now);
3211 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3212 user_ns.flags = 0;
3213
3214 r = -EFAULT;
3215 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3216 goto out;
3217 r = 0;
3218 break;
3219 }
3220
Carsten Otte1fe779f2007-10-29 16:08:35 +01003221 default:
3222 ;
3223 }
3224out:
3225 return r;
3226}
3227
Zhang Xiantaoa16b0432007-11-16 14:38:21 +08003228static void kvm_init_msr_list(void)
Carsten Otte043405e2007-10-10 17:16:19 +02003229{
3230 u32 dummy[2];
3231 unsigned i, j;
3232
Glauber Costae3267cb2009-10-06 13:24:50 -04003233 /* skip the first msrs in the list. KVM-specific */
3234 for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
Carsten Otte043405e2007-10-10 17:16:19 +02003235 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3236 continue;
3237 if (j < i)
3238 msrs_to_save[j] = msrs_to_save[i];
3239 j++;
3240 }
3241 num_msrs_to_save = j;
3242}
3243
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003244static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3245 const void *v)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003246{
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003247 if (vcpu->arch.apic &&
3248 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
3249 return 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003250
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003251 return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003252}
3253
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003254static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003255{
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003256 if (vcpu->arch.apic &&
3257 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
3258 return 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003259
Marcelo Tosattie93f8a02009-12-23 14:35:24 -02003260 return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003261}
3262
Gleb Natapov2dafc6c2010-03-18 15:20:16 +02003263static void kvm_set_segment(struct kvm_vcpu *vcpu,
3264 struct kvm_segment *var, int seg)
3265{
3266 kvm_x86_ops->set_segment(vcpu, var, seg);
3267}
3268
3269void kvm_get_segment(struct kvm_vcpu *vcpu,
3270 struct kvm_segment *var, int seg)
3271{
3272 kvm_x86_ops->get_segment(vcpu, var, seg);
3273}
3274
Gleb Natapov1871c602010-02-10 14:21:32 +02003275gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3276{
3277 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3278 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3279}
3280
3281 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3282{
3283 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3284 access |= PFERR_FETCH_MASK;
3285 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3286}
3287
3288gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3289{
3290 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3291 access |= PFERR_WRITE_MASK;
3292 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3293}
3294
3295/* uses this to access any guest's mapped memory without checking CPL */
3296gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3297{
3298 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
3299}
3300
3301static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3302 struct kvm_vcpu *vcpu, u32 access,
3303 u32 *error)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003304{
3305 void *data = val;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003306 int r = X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003307
3308 while (bytes) {
Gleb Natapov1871c602010-02-10 14:21:32 +02003309 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003310 unsigned offset = addr & (PAGE_SIZE-1);
Izik Eidus77c20022008-12-29 01:42:19 +02003311 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003312 int ret;
3313
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003314 if (gpa == UNMAPPED_GVA) {
3315 r = X86EMUL_PROPAGATE_FAULT;
3316 goto out;
3317 }
Izik Eidus77c20022008-12-29 01:42:19 +02003318 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003319 if (ret < 0) {
Gleb Natapovc3cd7ff2010-04-28 19:15:35 +03003320 r = X86EMUL_IO_NEEDED;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003321 goto out;
3322 }
Carsten Ottebbd9b642007-10-30 18:44:21 +01003323
Izik Eidus77c20022008-12-29 01:42:19 +02003324 bytes -= toread;
3325 data += toread;
3326 addr += toread;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003327 }
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003328out:
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003329 return r;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003330}
Izik Eidus77c20022008-12-29 01:42:19 +02003331
Gleb Natapov1871c602010-02-10 14:21:32 +02003332/* used for instruction fetching */
3333static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
3334 struct kvm_vcpu *vcpu, u32 *error)
3335{
3336 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3337 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3338 access | PFERR_FETCH_MASK, error);
3339}
3340
3341static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
3342 struct kvm_vcpu *vcpu, u32 *error)
3343{
3344 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3345 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3346 error);
3347}
3348
3349static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
3350 struct kvm_vcpu *vcpu, u32 *error)
3351{
3352 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
3353}
3354
Gleb Natapov79729952010-03-18 15:20:24 +02003355static int kvm_write_guest_virt_system(gva_t addr, void *val,
Gleb Natapov2dafc6c2010-03-18 15:20:16 +02003356 unsigned int bytes,
Gleb Natapov79729952010-03-18 15:20:24 +02003357 struct kvm_vcpu *vcpu,
Gleb Natapov2dafc6c2010-03-18 15:20:16 +02003358 u32 *error)
Izik Eidus77c20022008-12-29 01:42:19 +02003359{
3360 void *data = val;
3361 int r = X86EMUL_CONTINUE;
3362
3363 while (bytes) {
Gleb Natapov79729952010-03-18 15:20:24 +02003364 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
3365 PFERR_WRITE_MASK, error);
Izik Eidus77c20022008-12-29 01:42:19 +02003366 unsigned offset = addr & (PAGE_SIZE-1);
3367 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3368 int ret;
3369
3370 if (gpa == UNMAPPED_GVA) {
3371 r = X86EMUL_PROPAGATE_FAULT;
3372 goto out;
3373 }
3374 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3375 if (ret < 0) {
Gleb Natapovc3cd7ff2010-04-28 19:15:35 +03003376 r = X86EMUL_IO_NEEDED;
Izik Eidus77c20022008-12-29 01:42:19 +02003377 goto out;
3378 }
3379
3380 bytes -= towrite;
3381 data += towrite;
3382 addr += towrite;
3383 }
3384out:
3385 return r;
3386}
3387
Carsten Ottebbd9b642007-10-30 18:44:21 +01003388static int emulator_read_emulated(unsigned long addr,
3389 void *val,
3390 unsigned int bytes,
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003391 unsigned int *error_code,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003392 struct kvm_vcpu *vcpu)
3393{
Carsten Ottebbd9b642007-10-30 18:44:21 +01003394 gpa_t gpa;
3395
3396 if (vcpu->mmio_read_completed) {
3397 memcpy(val, vcpu->mmio_data, bytes);
Avi Kivityaec51dc2009-07-01 16:01:02 +03003398 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3399 vcpu->mmio_phys_addr, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003400 vcpu->mmio_read_completed = 0;
3401 return X86EMUL_CONTINUE;
3402 }
3403
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003404 gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
Gleb Natapov1871c602010-02-10 14:21:32 +02003405
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003406 if (gpa == UNMAPPED_GVA)
Gleb Natapov1871c602010-02-10 14:21:32 +02003407 return X86EMUL_PROPAGATE_FAULT;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003408
3409 /* For APIC access vmexit */
3410 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3411 goto mmio;
3412
Gleb Natapov1871c602010-02-10 14:21:32 +02003413 if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
Izik Eidus77c20022008-12-29 01:42:19 +02003414 == X86EMUL_CONTINUE)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003415 return X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003416
3417mmio:
3418 /*
3419 * Is this MMIO handled locally?
3420 */
Avi Kivityaec51dc2009-07-01 16:01:02 +03003421 if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
3422 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003423 return X86EMUL_CONTINUE;
3424 }
Avi Kivityaec51dc2009-07-01 16:01:02 +03003425
3426 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003427
3428 vcpu->mmio_needed = 1;
Gleb Natapov411c35b2010-04-28 19:15:34 +03003429 vcpu->run->exit_reason = KVM_EXIT_MMIO;
3430 vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3431 vcpu->run->mmio.len = vcpu->mmio_size = bytes;
3432 vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003433
Gleb Natapovc3cd7ff2010-04-28 19:15:35 +03003434 return X86EMUL_IO_NEEDED;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003435}
3436
Marcelo Tosatti3200f402008-03-29 20:17:59 -03003437int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
Avi Kivity9f811282008-03-02 14:06:05 +02003438 const void *val, int bytes)
3439{
3440 int ret;
3441
3442 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
3443 if (ret < 0)
3444 return 0;
Marcelo Tosattiad218f82008-12-01 22:32:05 -02003445 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
Avi Kivity9f811282008-03-02 14:06:05 +02003446 return 1;
3447}
3448
Carsten Ottebbd9b642007-10-30 18:44:21 +01003449static int emulator_write_emulated_onepage(unsigned long addr,
3450 const void *val,
3451 unsigned int bytes,
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003452 unsigned int *error_code,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003453 struct kvm_vcpu *vcpu)
3454{
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003455 gpa_t gpa;
3456
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003457 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003458
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003459 if (gpa == UNMAPPED_GVA)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003460 return X86EMUL_PROPAGATE_FAULT;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003461
3462 /* For APIC access vmexit */
3463 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3464 goto mmio;
3465
3466 if (emulator_write_phys(vcpu, gpa, val, bytes))
3467 return X86EMUL_CONTINUE;
3468
3469mmio:
Avi Kivityaec51dc2009-07-01 16:01:02 +03003470 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003471 /*
3472 * Is this MMIO handled locally?
3473 */
Michael S. Tsirkinbda90202009-06-29 22:24:32 +03003474 if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
Carsten Ottebbd9b642007-10-30 18:44:21 +01003475 return X86EMUL_CONTINUE;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003476
3477 vcpu->mmio_needed = 1;
Gleb Natapov411c35b2010-04-28 19:15:34 +03003478 vcpu->run->exit_reason = KVM_EXIT_MMIO;
3479 vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3480 vcpu->run->mmio.len = vcpu->mmio_size = bytes;
3481 vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1;
3482 memcpy(vcpu->run->mmio.data, val, bytes);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003483
3484 return X86EMUL_CONTINUE;
3485}
3486
3487int emulator_write_emulated(unsigned long addr,
Gleb Natapov8f6abd02010-04-13 10:21:56 +03003488 const void *val,
3489 unsigned int bytes,
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003490 unsigned int *error_code,
Gleb Natapov8f6abd02010-04-13 10:21:56 +03003491 struct kvm_vcpu *vcpu)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003492{
3493 /* Crossing a page boundary? */
3494 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3495 int rc, now;
3496
3497 now = -addr & ~PAGE_MASK;
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003498 rc = emulator_write_emulated_onepage(addr, val, now, error_code,
3499 vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003500 if (rc != X86EMUL_CONTINUE)
3501 return rc;
3502 addr += now;
3503 val += now;
3504 bytes -= now;
3505 }
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003506 return emulator_write_emulated_onepage(addr, val, bytes, error_code,
3507 vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003508}
Carsten Ottebbd9b642007-10-30 18:44:21 +01003509
Avi Kivitydaea3e72010-03-15 13:59:54 +02003510#define CMPXCHG_TYPE(t, ptr, old, new) \
3511 (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
3512
3513#ifdef CONFIG_X86_64
3514# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
3515#else
3516# define CMPXCHG64(ptr, old, new) \
Jan Kiszka9749a6c2010-03-20 10:14:13 +01003517 (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
Avi Kivitydaea3e72010-03-15 13:59:54 +02003518#endif
3519
Carsten Ottebbd9b642007-10-30 18:44:21 +01003520static int emulator_cmpxchg_emulated(unsigned long addr,
3521 const void *old,
3522 const void *new,
3523 unsigned int bytes,
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003524 unsigned int *error_code,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003525 struct kvm_vcpu *vcpu)
3526{
Avi Kivitydaea3e72010-03-15 13:59:54 +02003527 gpa_t gpa;
3528 struct page *page;
3529 char *kaddr;
3530 bool exchanged;
3531
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003532 /* guests cmpxchg8b have to be emulated atomically */
Avi Kivitydaea3e72010-03-15 13:59:54 +02003533 if (bytes > 8 || (bytes & (bytes - 1)))
3534 goto emul_write;
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003535
Avi Kivitydaea3e72010-03-15 13:59:54 +02003536 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003537
Avi Kivitydaea3e72010-03-15 13:59:54 +02003538 if (gpa == UNMAPPED_GVA ||
3539 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3540 goto emul_write;
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003541
Avi Kivitydaea3e72010-03-15 13:59:54 +02003542 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
3543 goto emul_write;
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003544
Avi Kivitydaea3e72010-03-15 13:59:54 +02003545 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
Izik Eidus72dc67a2008-02-10 18:04:15 +02003546
Avi Kivitydaea3e72010-03-15 13:59:54 +02003547 kaddr = kmap_atomic(page, KM_USER0);
3548 kaddr += offset_in_page(gpa);
3549 switch (bytes) {
3550 case 1:
3551 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
3552 break;
3553 case 2:
3554 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
3555 break;
3556 case 4:
3557 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
3558 break;
3559 case 8:
3560 exchanged = CMPXCHG64(kaddr, old, new);
3561 break;
3562 default:
3563 BUG();
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003564 }
Avi Kivitydaea3e72010-03-15 13:59:54 +02003565 kunmap_atomic(kaddr, KM_USER0);
3566 kvm_release_page_dirty(page);
3567
3568 if (!exchanged)
3569 return X86EMUL_CMPXCHG_FAILED;
3570
Gleb Natapov8f6abd02010-04-13 10:21:56 +03003571 kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
3572
3573 return X86EMUL_CONTINUE;
Avi Kivity4a5f48f2010-03-15 13:59:55 +02003574
Marcelo Tosatti3200f402008-03-29 20:17:59 -03003575emul_write:
Avi Kivitydaea3e72010-03-15 13:59:54 +02003576 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
Marcelo Tosatti2bacc552007-12-12 10:46:12 -05003577
Gleb Natapov8fe681e2010-04-28 19:15:37 +03003578 return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003579}
3580
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02003581static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
3582{
3583 /* TODO: String I/O for in kernel device */
3584 int r;
3585
3586 if (vcpu->arch.pio.in)
3587 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
3588 vcpu->arch.pio.size, pd);
3589 else
3590 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3591 vcpu->arch.pio.port, vcpu->arch.pio.size,
3592 pd);
3593 return r;
3594}
3595
3596
3597static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
3598 unsigned int count, struct kvm_vcpu *vcpu)
3599{
Gleb Natapov79729952010-03-18 15:20:24 +02003600 if (vcpu->arch.pio.count)
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02003601 goto data_avail;
3602
3603 trace_kvm_pio(1, port, size, 1);
3604
3605 vcpu->arch.pio.port = port;
3606 vcpu->arch.pio.in = 1;
Gleb Natapov79729952010-03-18 15:20:24 +02003607 vcpu->arch.pio.count = count;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02003608 vcpu->arch.pio.size = size;
3609
3610 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3611 data_avail:
3612 memcpy(val, vcpu->arch.pio_data, size * count);
Gleb Natapov79729952010-03-18 15:20:24 +02003613 vcpu->arch.pio.count = 0;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02003614 return 1;
3615 }
3616
3617 vcpu->run->exit_reason = KVM_EXIT_IO;
3618 vcpu->run->io.direction = KVM_EXIT_IO_IN;
3619 vcpu->run->io.size = size;
3620 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3621 vcpu->run->io.count = count;
3622 vcpu->run->io.port = port;
3623
3624 return 0;
3625}
3626
3627static int emulator_pio_out_emulated(int size, unsigned short port,
3628 const void *val, unsigned int count,
3629 struct kvm_vcpu *vcpu)
3630{
3631 trace_kvm_pio(0, port, size, 1);
3632
3633 vcpu->arch.pio.port = port;
3634 vcpu->arch.pio.in = 0;
Gleb Natapov79729952010-03-18 15:20:24 +02003635 vcpu->arch.pio.count = count;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02003636 vcpu->arch.pio.size = size;
3637
3638 memcpy(vcpu->arch.pio_data, val, size * count);
3639
3640 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
Gleb Natapov79729952010-03-18 15:20:24 +02003641 vcpu->arch.pio.count = 0;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02003642 return 1;
3643 }
3644
3645 vcpu->run->exit_reason = KVM_EXIT_IO;
3646 vcpu->run->io.direction = KVM_EXIT_IO_OUT;
3647 vcpu->run->io.size = size;
3648 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3649 vcpu->run->io.count = count;
3650 vcpu->run->io.port = port;
3651
3652 return 0;
3653}
3654
Carsten Ottebbd9b642007-10-30 18:44:21 +01003655static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
3656{
3657 return kvm_x86_ops->get_segment_base(vcpu, seg);
3658}
3659
3660int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3661{
Marcelo Tosattia7052892008-09-23 13:18:35 -03003662 kvm_mmu_invlpg(vcpu, address);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003663 return X86EMUL_CONTINUE;
3664}
3665
3666int emulate_clts(struct kvm_vcpu *vcpu)
3667{
Avi Kivity4d4ec082009-12-29 18:07:30 +02003668 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
Avi Kivity6b52d182010-01-21 15:31:47 +02003669 kvm_x86_ops->fpu_activate(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003670 return X86EMUL_CONTINUE;
3671}
3672
Gleb Natapov35aa5372010-04-28 19:15:27 +03003673int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003674{
Gleb Natapov338dbc92010-04-28 19:15:32 +03003675 return _kvm_get_dr(vcpu, dr, dest);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003676}
3677
Gleb Natapov35aa5372010-04-28 19:15:27 +03003678int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003679{
Gleb Natapov338dbc92010-04-28 19:15:32 +03003680
3681 return __kvm_set_dr(vcpu, dr, value);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003682}
3683
Gleb Natapov52a46612010-03-18 15:20:03 +02003684static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3685{
3686 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3687}
3688
3689static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
3690{
3691 unsigned long value;
3692
3693 switch (cr) {
3694 case 0:
3695 value = kvm_read_cr0(vcpu);
3696 break;
3697 case 2:
3698 value = vcpu->arch.cr2;
3699 break;
3700 case 3:
3701 value = vcpu->arch.cr3;
3702 break;
3703 case 4:
3704 value = kvm_read_cr4(vcpu);
3705 break;
3706 case 8:
3707 value = kvm_get_cr8(vcpu);
3708 break;
3709 default:
3710 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3711 return 0;
3712 }
3713
3714 return value;
3715}
3716
Gleb Natapov0f122442010-04-28 19:15:31 +03003717static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
Gleb Natapov52a46612010-03-18 15:20:03 +02003718{
Gleb Natapov0f122442010-04-28 19:15:31 +03003719 int res = 0;
3720
Gleb Natapov52a46612010-03-18 15:20:03 +02003721 switch (cr) {
3722 case 0:
Avi Kivity49a9b072010-06-10 17:02:14 +03003723 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
Gleb Natapov52a46612010-03-18 15:20:03 +02003724 break;
3725 case 2:
3726 vcpu->arch.cr2 = val;
3727 break;
3728 case 3:
Gleb Natapov0f122442010-04-28 19:15:31 +03003729 res = __kvm_set_cr3(vcpu, val);
Gleb Natapov52a46612010-03-18 15:20:03 +02003730 break;
3731 case 4:
Avi Kivitya83b29c2010-06-10 17:02:15 +03003732 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
Gleb Natapov52a46612010-03-18 15:20:03 +02003733 break;
3734 case 8:
Gleb Natapov0f122442010-04-28 19:15:31 +03003735 res = __kvm_set_cr8(vcpu, val & 0xfUL);
Gleb Natapov52a46612010-03-18 15:20:03 +02003736 break;
3737 default:
3738 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
Gleb Natapov0f122442010-04-28 19:15:31 +03003739 res = -1;
Gleb Natapov52a46612010-03-18 15:20:03 +02003740 }
Gleb Natapov0f122442010-04-28 19:15:31 +03003741
3742 return res;
Gleb Natapov52a46612010-03-18 15:20:03 +02003743}
3744
Gleb Natapov9c537242010-03-18 15:20:05 +02003745static int emulator_get_cpl(struct kvm_vcpu *vcpu)
3746{
3747 return kvm_x86_ops->get_cpl(vcpu);
3748}
3749
Gleb Natapov2dafc6c2010-03-18 15:20:16 +02003750static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
3751{
3752 kvm_x86_ops->get_gdt(vcpu, dt);
3753}
3754
Gleb Natapov5951c442010-04-28 19:15:29 +03003755static unsigned long emulator_get_cached_segment_base(int seg,
3756 struct kvm_vcpu *vcpu)
3757{
3758 return get_segment_base(vcpu, seg);
3759}
3760
Gleb Natapov2dafc6c2010-03-18 15:20:16 +02003761static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
3762 struct kvm_vcpu *vcpu)
3763{
3764 struct kvm_segment var;
3765
3766 kvm_get_segment(vcpu, &var, seg);
3767
3768 if (var.unusable)
3769 return false;
3770
3771 if (var.g)
3772 var.limit >>= 12;
3773 set_desc_limit(desc, var.limit);
3774 set_desc_base(desc, (unsigned long)var.base);
3775 desc->type = var.type;
3776 desc->s = var.s;
3777 desc->dpl = var.dpl;
3778 desc->p = var.present;
3779 desc->avl = var.avl;
3780 desc->l = var.l;
3781 desc->d = var.db;
3782 desc->g = var.g;
3783
3784 return true;
3785}
3786
3787static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
3788 struct kvm_vcpu *vcpu)
3789{
3790 struct kvm_segment var;
3791
3792 /* needed to preserve selector */
3793 kvm_get_segment(vcpu, &var, seg);
3794
3795 var.base = get_desc_base(desc);
3796 var.limit = get_desc_limit(desc);
3797 if (desc->g)
3798 var.limit = (var.limit << 12) | 0xfff;
3799 var.type = desc->type;
3800 var.present = desc->p;
3801 var.dpl = desc->dpl;
3802 var.db = desc->d;
3803 var.s = desc->s;
3804 var.l = desc->l;
3805 var.g = desc->g;
3806 var.avl = desc->avl;
3807 var.present = desc->p;
3808 var.unusable = !var.present;
3809 var.padding = 0;
3810
3811 kvm_set_segment(vcpu, &var, seg);
3812 return;
3813}
3814
3815static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
3816{
3817 struct kvm_segment kvm_seg;
3818
3819 kvm_get_segment(vcpu, &kvm_seg, seg);
3820 return kvm_seg.selector;
3821}
3822
3823static void emulator_set_segment_selector(u16 sel, int seg,
3824 struct kvm_vcpu *vcpu)
3825{
3826 struct kvm_segment kvm_seg;
3827
3828 kvm_get_segment(vcpu, &kvm_seg, seg);
3829 kvm_seg.selector = sel;
3830 kvm_set_segment(vcpu, &kvm_seg, seg);
3831}
3832
Harvey Harrison14af3f32008-02-19 10:25:50 -08003833static struct x86_emulate_ops emulate_ops = {
Gleb Natapov1871c602010-02-10 14:21:32 +02003834 .read_std = kvm_read_guest_virt_system,
Gleb Natapov2dafc6c2010-03-18 15:20:16 +02003835 .write_std = kvm_write_guest_virt_system,
Gleb Natapov1871c602010-02-10 14:21:32 +02003836 .fetch = kvm_fetch_guest_virt,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003837 .read_emulated = emulator_read_emulated,
3838 .write_emulated = emulator_write_emulated,
3839 .cmpxchg_emulated = emulator_cmpxchg_emulated,
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02003840 .pio_in_emulated = emulator_pio_in_emulated,
3841 .pio_out_emulated = emulator_pio_out_emulated,
Gleb Natapov2dafc6c2010-03-18 15:20:16 +02003842 .get_cached_descriptor = emulator_get_cached_descriptor,
3843 .set_cached_descriptor = emulator_set_cached_descriptor,
3844 .get_segment_selector = emulator_get_segment_selector,
3845 .set_segment_selector = emulator_set_segment_selector,
Gleb Natapov5951c442010-04-28 19:15:29 +03003846 .get_cached_segment_base = emulator_get_cached_segment_base,
Gleb Natapov2dafc6c2010-03-18 15:20:16 +02003847 .get_gdt = emulator_get_gdt,
Gleb Natapov52a46612010-03-18 15:20:03 +02003848 .get_cr = emulator_get_cr,
3849 .set_cr = emulator_set_cr,
Gleb Natapov9c537242010-03-18 15:20:05 +02003850 .cpl = emulator_get_cpl,
Gleb Natapov35aa5372010-04-28 19:15:27 +03003851 .get_dr = emulator_get_dr,
3852 .set_dr = emulator_set_dr,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03003853 .set_msr = kvm_set_msr,
3854 .get_msr = kvm_get_msr,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003855};
3856
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003857static void cache_all_regs(struct kvm_vcpu *vcpu)
3858{
3859 kvm_register_read(vcpu, VCPU_REGS_RAX);
3860 kvm_register_read(vcpu, VCPU_REGS_RSP);
3861 kvm_register_read(vcpu, VCPU_REGS_RIP);
3862 vcpu->arch.regs_dirty = ~0;
3863}
3864
Gleb Natapov95cb2292010-04-28 19:15:43 +03003865static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
3866{
3867 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
3868 /*
3869 * an sti; sti; sequence only disable interrupts for the first
3870 * instruction. So, if the last instruction, be it emulated or
3871 * not, left the system with the INT_STI flag enabled, it
3872 * means that the last instruction is an sti. We should not
3873 * leave the flag on in this case. The same goes for mov ss
3874 */
3875 if (!(int_shadow & mask))
3876 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
3877}
3878
Gleb Natapov54b84862010-04-28 19:15:44 +03003879static void inject_emulated_exception(struct kvm_vcpu *vcpu)
3880{
3881 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
3882 if (ctxt->exception == PF_VECTOR)
3883 kvm_inject_page_fault(vcpu, ctxt->cr2, ctxt->error_code);
3884 else if (ctxt->error_code_valid)
3885 kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
3886 else
3887 kvm_queue_exception(vcpu, ctxt->exception);
3888}
3889
Gleb Natapov6d77dbf2010-05-10 11:16:56 +03003890static int handle_emulation_failure(struct kvm_vcpu *vcpu)
3891{
Gleb Natapov6d77dbf2010-05-10 11:16:56 +03003892 ++vcpu->stat.insn_emulation_fail;
3893 trace_kvm_emulate_insn_failed(vcpu);
3894 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3895 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3896 vcpu->run->internal.ndata = 0;
3897 kvm_queue_exception(vcpu, UD_VECTOR);
3898 return EMULATE_FAIL;
3899}
3900
Carsten Ottebbd9b642007-10-30 18:44:21 +01003901int emulate_instruction(struct kvm_vcpu *vcpu,
Carsten Ottebbd9b642007-10-30 18:44:21 +01003902 unsigned long cr2,
3903 u16 error_code,
Sheng Yang571008d2008-01-02 14:49:22 +08003904 int emulation_type)
Carsten Ottebbd9b642007-10-30 18:44:21 +01003905{
Gleb Natapov95cb2292010-04-28 19:15:43 +03003906 int r;
Gleb Natapov4d2179e2010-04-28 19:15:42 +03003907 struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003908
Avi Kivity26eef702008-07-03 14:59:22 +03003909 kvm_clear_exception_queue(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003910 vcpu->arch.mmio_fault_cr2 = cr2;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003911 /*
Avi Kivity56e82312009-08-12 15:04:37 +03003912 * TODO: fix emulate.c to use guest_read/write_register
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003913 * instead of direct ->regs accesses, can save hundred cycles
3914 * on Intel for instructions that don't read/change RSP, for
3915 * for example.
3916 */
3917 cache_all_regs(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003918
Sheng Yang571008d2008-01-02 14:49:22 +08003919 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
Carsten Ottebbd9b642007-10-30 18:44:21 +01003920 int cs_db, cs_l;
3921 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3922
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003923 vcpu->arch.emulate_ctxt.vcpu = vcpu;
Jan Kiszka83bf0002010-02-23 17:47:59 +01003924 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
Gleb Natapov063db062010-03-18 15:20:06 +02003925 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003926 vcpu->arch.emulate_ctxt.mode =
Gleb Natapova0044752010-02-10 14:21:31 +02003927 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003928 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
Gleb Natapova0044752010-02-10 14:21:31 +02003929 ? X86EMUL_MODE_VM86 : cs_l
Carsten Ottebbd9b642007-10-30 18:44:21 +01003930 ? X86EMUL_MODE_PROT64 : cs_db
3931 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
Gleb Natapov4d2179e2010-04-28 19:15:42 +03003932 memset(c, 0, sizeof(struct decode_cache));
3933 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
Gleb Natapov95cb2292010-04-28 19:15:43 +03003934 vcpu->arch.emulate_ctxt.interruptibility = 0;
Gleb Natapov54b84862010-04-28 19:15:44 +03003935 vcpu->arch.emulate_ctxt.exception = -1;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003936
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003937 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
Avi Kivitye46479f2010-04-11 13:05:16 +03003938 trace_kvm_emulate_insn_start(vcpu);
Sheng Yang571008d2008-01-02 14:49:22 +08003939
Andre Przywara0cb57622009-06-17 15:50:31 +02003940 /* Only allow emulation of specific instructions on #UD
3941 * (namely VMMCALL, sysenter, sysexit, syscall)*/
Andre Przywara0cb57622009-06-17 15:50:31 +02003942 if (emulation_type & EMULTYPE_TRAP_UD) {
3943 if (!c->twobyte)
3944 return EMULATE_FAIL;
3945 switch (c->b) {
3946 case 0x01: /* VMMCALL */
3947 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3948 return EMULATE_FAIL;
3949 break;
3950 case 0x34: /* sysenter */
3951 case 0x35: /* sysexit */
3952 if (c->modrm_mod != 0 || c->modrm_rm != 0)
3953 return EMULATE_FAIL;
3954 break;
3955 case 0x05: /* syscall */
3956 if (c->modrm_mod != 0 || c->modrm_rm != 0)
3957 return EMULATE_FAIL;
3958 break;
3959 default:
3960 return EMULATE_FAIL;
3961 }
3962
3963 if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
3964 return EMULATE_FAIL;
3965 }
Sheng Yang571008d2008-01-02 14:49:22 +08003966
Avi Kivityf2b57562007-11-18 15:17:51 +02003967 ++vcpu->stat.insn_emulation;
Carsten Ottebbd9b642007-10-30 18:44:21 +01003968 if (r) {
3969 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
3970 return EMULATE_DONE;
Gleb Natapov6d77dbf2010-05-10 11:16:56 +03003971 if (emulation_type & EMULTYPE_SKIP)
3972 return EMULATE_FAIL;
3973 return handle_emulation_failure(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003974 }
3975 }
3976
Gleb Natapovba8afb62009-04-12 13:36:57 +03003977 if (emulation_type & EMULTYPE_SKIP) {
3978 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
3979 return EMULATE_DONE;
3980 }
3981
Gleb Natapov4d2179e2010-04-28 19:15:42 +03003982 /* this is needed for vmware backdor interface to work since it
3983 changes registers values during IO operation */
3984 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
3985
Gleb Natapov5cd21912010-03-18 15:20:26 +02003986restart:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003987 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
Gleb Natapovc3cd7ff2010-04-28 19:15:35 +03003988
3989 if (r) { /* emulation failed */
3990 /*
3991 * if emulation was due to access to shadowed page table
3992 * and it failed try to unshadow page and re-entetr the
3993 * guest to let CPU execute the instruction.
3994 */
Carsten Ottebbd9b642007-10-30 18:44:21 +01003995 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
Gleb Natapovc3cd7ff2010-04-28 19:15:35 +03003996 return EMULATE_DONE;
3997
Gleb Natapov6d77dbf2010-05-10 11:16:56 +03003998 return handle_emulation_failure(vcpu);
Carsten Ottebbd9b642007-10-30 18:44:21 +01003999 }
4000
Gleb Natapov95cb2292010-04-28 19:15:43 +03004001 toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
Gleb Natapovef050dc2010-04-28 19:15:40 +03004002 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
Gleb Natapov4d2179e2010-04-28 19:15:42 +03004003 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
Gleb Natapov95c55882010-04-28 19:15:39 +03004004 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
Gleb Natapov3457e412010-04-28 19:15:38 +03004005
Gleb Natapov54b84862010-04-28 19:15:44 +03004006 if (vcpu->arch.emulate_ctxt.exception >= 0) {
4007 inject_emulated_exception(vcpu);
4008 return EMULATE_DONE;
4009 }
4010
Gleb Natapov3457e412010-04-28 19:15:38 +03004011 if (vcpu->arch.pio.count) {
4012 if (!vcpu->arch.pio.in)
4013 vcpu->arch.pio.count = 0;
4014 return EMULATE_DO_MMIO;
4015 }
4016
4017 if (vcpu->mmio_needed) {
4018 if (vcpu->mmio_is_write)
4019 vcpu->mmio_needed = 0;
4020 return EMULATE_DO_MMIO;
4021 }
4022
Gleb Natapov5cd21912010-03-18 15:20:26 +02004023 if (vcpu->arch.emulate_ctxt.restart)
4024 goto restart;
4025
Carsten Ottebbd9b642007-10-30 18:44:21 +01004026 return EMULATE_DONE;
4027}
4028EXPORT_SYMBOL_GPL(emulate_instruction);
4029
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02004030int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
Carsten Ottede7d7892007-10-30 18:44:25 +01004031{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02004032 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
4033 int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
4034 /* do not return to emulator after return from userspace */
Gleb Natapov79729952010-03-18 15:20:24 +02004035 vcpu->arch.pio.count = 0;
Izik Eidus0f346072008-12-29 01:42:20 +02004036 return ret;
Carsten Ottede7d7892007-10-30 18:44:25 +01004037}
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02004038EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
Carsten Ottede7d7892007-10-30 18:44:25 +01004039
Gerd Hoffmannc8076602009-02-04 17:52:04 +01004040static void bounce_off(void *info)
4041{
4042 /* nothing */
4043}
4044
Gerd Hoffmannc8076602009-02-04 17:52:04 +01004045static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
4046 void *data)
4047{
4048 struct cpufreq_freqs *freq = data;
4049 struct kvm *kvm;
4050 struct kvm_vcpu *vcpu;
4051 int i, send_ipi = 0;
4052
Gerd Hoffmannc8076602009-02-04 17:52:04 +01004053 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
4054 return 0;
4055 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
4056 return 0;
Zachary Amsden0cca7902009-09-29 11:38:35 -10004057 per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
Gerd Hoffmannc8076602009-02-04 17:52:04 +01004058
4059 spin_lock(&kvm_lock);
4060 list_for_each_entry(kvm, &vm_list, vm_list) {
Gleb Natapov988a2ca2009-06-09 15:56:29 +03004061 kvm_for_each_vcpu(i, vcpu, kvm) {
Gerd Hoffmannc8076602009-02-04 17:52:04 +01004062 if (vcpu->cpu != freq->cpu)
4063 continue;
4064 if (!kvm_request_guest_time_update(vcpu))
4065 continue;
4066 if (vcpu->cpu != smp_processor_id())
4067 send_ipi++;
4068 }
4069 }
4070 spin_unlock(&kvm_lock);
4071
4072 if (freq->old < freq->new && send_ipi) {
4073 /*
4074 * We upscale the frequency. Must make the guest
4075 * doesn't see old kvmclock values while running with
4076 * the new frequency, otherwise we risk the guest sees
4077 * time go backwards.
4078 *
4079 * In case we update the frequency for another cpu
4080 * (which might be in guest context) send an interrupt
4081 * to kick the cpu out of guest context. Next time
4082 * guest context is entered kvmclock will be updated,
4083 * so the guest will not see stale values.
4084 */
4085 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
4086 }
4087 return 0;
4088}
4089
4090static struct notifier_block kvmclock_cpufreq_notifier_block = {
4091 .notifier_call = kvmclock_cpufreq_notifier
4092};
4093
Zachary Amsdenb820cc02009-09-29 11:38:34 -10004094static void kvm_timer_init(void)
4095{
4096 int cpu;
4097
Zachary Amsdenb820cc02009-09-29 11:38:34 -10004098 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
Zachary Amsdenb820cc02009-09-29 11:38:34 -10004099 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
4100 CPUFREQ_TRANSITION_NOTIFIER);
Zachary Amsden6b7d7e72009-10-09 16:26:08 -10004101 for_each_online_cpu(cpu) {
4102 unsigned long khz = cpufreq_get(cpu);
4103 if (!khz)
4104 khz = tsc_khz;
4105 per_cpu(cpu_tsc_khz, cpu) = khz;
4106 }
Zachary Amsden0cca7902009-09-29 11:38:35 -10004107 } else {
4108 for_each_possible_cpu(cpu)
4109 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
Zachary Amsdenb820cc02009-09-29 11:38:34 -10004110 }
4111}
4112
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08004113static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
4114
4115static int kvm_is_in_guest(void)
4116{
4117 return percpu_read(current_vcpu) != NULL;
4118}
4119
4120static int kvm_is_user_mode(void)
4121{
4122 int user_mode = 3;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08004123
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08004124 if (percpu_read(current_vcpu))
4125 user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08004126
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08004127 return user_mode != 0;
4128}
4129
4130static unsigned long kvm_get_guest_ip(void)
4131{
4132 unsigned long ip = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08004133
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08004134 if (percpu_read(current_vcpu))
4135 ip = kvm_rip_read(percpu_read(current_vcpu));
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08004136
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08004137 return ip;
4138}
4139
4140static struct perf_guest_info_callbacks kvm_guest_cbs = {
4141 .is_in_guest = kvm_is_in_guest,
4142 .is_user_mode = kvm_is_user_mode,
4143 .get_guest_ip = kvm_get_guest_ip,
4144};
4145
4146void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
4147{
4148 percpu_write(current_vcpu, vcpu);
4149}
4150EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
4151
4152void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
4153{
4154 percpu_write(current_vcpu, NULL);
4155}
4156EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
4157
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004158int kvm_arch_init(void *opaque)
Carsten Otte043405e2007-10-10 17:16:19 +02004159{
Zachary Amsdenb820cc02009-09-29 11:38:34 -10004160 int r;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004161 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
4162
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004163 if (kvm_x86_ops) {
4164 printk(KERN_ERR "kvm: already loaded the other module\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08004165 r = -EEXIST;
4166 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004167 }
4168
4169 if (!ops->cpu_has_kvm_support()) {
4170 printk(KERN_ERR "kvm: no hardware support\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08004171 r = -EOPNOTSUPP;
4172 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004173 }
4174 if (ops->disabled_by_bios()) {
4175 printk(KERN_ERR "kvm: disabled by bios\n");
Zhang Xiantao56c6d282007-11-18 20:43:21 +08004176 r = -EOPNOTSUPP;
4177 goto out;
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004178 }
4179
Avi Kivity97db56c2008-01-13 13:23:56 +02004180 r = kvm_mmu_module_init();
4181 if (r)
4182 goto out;
4183
4184 kvm_init_msr_list();
4185
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004186 kvm_x86_ops = ops;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08004187 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
Sheng Yang7b523452008-04-25 21:13:50 +08004188 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
4189 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08004190 PT_DIRTY_MASK, PT64_NX_MASK, 0);
Gerd Hoffmannc8076602009-02-04 17:52:04 +01004191
Zachary Amsdenb820cc02009-09-29 11:38:34 -10004192 kvm_timer_init();
Gerd Hoffmannc8076602009-02-04 17:52:04 +01004193
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08004194 perf_register_guest_info_callbacks(&kvm_guest_cbs);
4195
Dexuan Cui2acf9232010-06-10 11:27:12 +08004196 if (cpu_has_xsave)
4197 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
4198
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004199 return 0;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08004200
4201out:
Zhang Xiantao56c6d282007-11-18 20:43:21 +08004202 return r;
Carsten Otte043405e2007-10-10 17:16:19 +02004203}
Hollis Blanchard8776e512007-10-31 17:24:24 -05004204
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004205void kvm_arch_exit(void)
4206{
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08004207 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
4208
Jan Kiszka888d2562009-04-17 19:24:58 +02004209 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4210 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
4211 CPUFREQ_TRANSITION_NOTIFIER);
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004212 kvm_x86_ops = NULL;
Zhang Xiantao56c6d282007-11-18 20:43:21 +08004213 kvm_mmu_module_exit();
4214}
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08004215
Hollis Blanchard8776e512007-10-31 17:24:24 -05004216int kvm_emulate_halt(struct kvm_vcpu *vcpu)
4217{
4218 ++vcpu->stat.halt_exits;
4219 if (irqchip_in_kernel(vcpu->kvm)) {
Avi Kivitya4535292008-04-13 17:54:35 +03004220 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004221 return 1;
4222 } else {
4223 vcpu->run->exit_reason = KVM_EXIT_HLT;
4224 return 0;
4225 }
4226}
4227EXPORT_SYMBOL_GPL(kvm_emulate_halt);
4228
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05004229static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
4230 unsigned long a1)
4231{
4232 if (is_long_mode(vcpu))
4233 return a0;
4234 else
4235 return a0 | ((gpa_t)a1 << 32);
4236}
4237
Gleb Natapov55cd8e52010-01-17 15:51:22 +02004238int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
4239{
4240 u64 param, ingpa, outgpa, ret;
4241 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
4242 bool fast, longmode;
4243 int cs_db, cs_l;
4244
4245 /*
4246 * hypercall generates UD from non zero cpl and real mode
4247 * per HYPER-V spec
4248 */
Avi Kivity3eeb3282010-01-21 15:31:48 +02004249 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
Gleb Natapov55cd8e52010-01-17 15:51:22 +02004250 kvm_queue_exception(vcpu, UD_VECTOR);
4251 return 0;
4252 }
4253
4254 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4255 longmode = is_long_mode(vcpu) && cs_l == 1;
4256
4257 if (!longmode) {
Gleb Natapovccd46932010-01-19 15:06:38 +02004258 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
4259 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
4260 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
4261 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
4262 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
4263 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
Gleb Natapov55cd8e52010-01-17 15:51:22 +02004264 }
4265#ifdef CONFIG_X86_64
4266 else {
4267 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
4268 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
4269 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
4270 }
4271#endif
4272
4273 code = param & 0xffff;
4274 fast = (param >> 16) & 0x1;
4275 rep_cnt = (param >> 32) & 0xfff;
4276 rep_idx = (param >> 48) & 0xfff;
4277
4278 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
4279
Gleb Natapovc25bc162010-01-17 15:51:24 +02004280 switch (code) {
4281 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
4282 kvm_vcpu_on_spin(vcpu);
4283 break;
4284 default:
4285 res = HV_STATUS_INVALID_HYPERCALL_CODE;
4286 break;
4287 }
Gleb Natapov55cd8e52010-01-17 15:51:22 +02004288
4289 ret = res | (((u64)rep_done & 0xfff) << 32);
4290 if (longmode) {
4291 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4292 } else {
4293 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
4294 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
4295 }
4296
4297 return 1;
4298}
4299
Hollis Blanchard8776e512007-10-31 17:24:24 -05004300int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
4301{
4302 unsigned long nr, a0, a1, a2, a3, ret;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05004303 int r = 1;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004304
Gleb Natapov55cd8e52010-01-17 15:51:22 +02004305 if (kvm_hv_hypercall_enabled(vcpu->kvm))
4306 return kvm_hv_hypercall(vcpu);
4307
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004308 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
4309 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
4310 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
4311 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
4312 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004313
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004314 trace_kvm_hypercall(nr, a0, a1, a2, a3);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04004315
Hollis Blanchard8776e512007-10-31 17:24:24 -05004316 if (!is_long_mode(vcpu)) {
4317 nr &= 0xFFFFFFFF;
4318 a0 &= 0xFFFFFFFF;
4319 a1 &= 0xFFFFFFFF;
4320 a2 &= 0xFFFFFFFF;
4321 a3 &= 0xFFFFFFFF;
4322 }
4323
Jan Kiszka07708c42009-08-03 18:43:28 +02004324 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
4325 ret = -KVM_EPERM;
4326 goto out;
4327 }
4328
Hollis Blanchard8776e512007-10-31 17:24:24 -05004329 switch (nr) {
Avi Kivityb93463a2007-10-25 16:52:32 +02004330 case KVM_HC_VAPIC_POLL_IRQ:
4331 ret = 0;
4332 break;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05004333 case KVM_HC_MMU_OP:
4334 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
4335 break;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004336 default:
4337 ret = -KVM_ENOSYS;
4338 break;
4339 }
Jan Kiszka07708c42009-08-03 18:43:28 +02004340out:
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004341 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
Amit Shahf11c3a82008-02-21 01:00:30 +05304342 ++vcpu->stat.hypercalls;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05004343 return r;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004344}
4345EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
4346
4347int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
4348{
4349 char instruction[3];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004350 unsigned long rip = kvm_rip_read(vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004351
Hollis Blanchard8776e512007-10-31 17:24:24 -05004352 /*
4353 * Blow out the MMU to ensure that no other VCPU has an active mapping
4354 * to ensure that the updated hypercall appears atomically across all
4355 * VCPUs.
4356 */
4357 kvm_mmu_zap_all(vcpu->kvm);
4358
Hollis Blanchard8776e512007-10-31 17:24:24 -05004359 kvm_x86_ops->patch_hypercall(vcpu, instruction);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004360
Gleb Natapov8fe681e2010-04-28 19:15:37 +03004361 return emulator_write_emulated(rip, instruction, 3, NULL, vcpu);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004362}
4363
Hollis Blanchard8776e512007-10-31 17:24:24 -05004364void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4365{
Gleb Natapov89a27f42010-02-16 10:51:48 +02004366 struct desc_ptr dt = { limit, base };
Hollis Blanchard8776e512007-10-31 17:24:24 -05004367
4368 kvm_x86_ops->set_gdt(vcpu, &dt);
4369}
4370
4371void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4372{
Gleb Natapov89a27f42010-02-16 10:51:48 +02004373 struct desc_ptr dt = { limit, base };
Hollis Blanchard8776e512007-10-31 17:24:24 -05004374
4375 kvm_x86_ops->set_idt(vcpu, &dt);
4376}
4377
Dan Kenigsberg07716712007-11-21 17:10:04 +02004378static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
4379{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004380 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
4381 int j, nent = vcpu->arch.cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +02004382
4383 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
4384 /* when no next entry is found, the current entry[i] is reselected */
Nitin A Kamble0fdf8e52008-11-05 15:56:21 -08004385 for (j = i + 1; ; j = (j + 1) % nent) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004386 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
Dan Kenigsberg07716712007-11-21 17:10:04 +02004387 if (ej->function == e->function) {
4388 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
4389 return j;
4390 }
4391 }
4392 return 0; /* silence gcc, even though control never reaches here */
4393}
4394
4395/* find an entry with matching function, matching index (if needed), and that
4396 * should be read next (if it's stateful) */
4397static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
4398 u32 function, u32 index)
4399{
4400 if (e->function != function)
4401 return 0;
4402 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
4403 return 0;
4404 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
Amit Shah19355472009-01-14 16:56:00 +00004405 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
Dan Kenigsberg07716712007-11-21 17:10:04 +02004406 return 0;
4407 return 1;
4408}
4409
Alexander Grafd8017472008-11-25 20:17:11 +01004410struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
4411 u32 function, u32 index)
Hollis Blanchard8776e512007-10-31 17:24:24 -05004412{
4413 int i;
Alexander Grafd8017472008-11-25 20:17:11 +01004414 struct kvm_cpuid_entry2 *best = NULL;
Hollis Blanchard8776e512007-10-31 17:24:24 -05004415
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004416 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
Alexander Grafd8017472008-11-25 20:17:11 +01004417 struct kvm_cpuid_entry2 *e;
4418
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004419 e = &vcpu->arch.cpuid_entries[i];
Dan Kenigsberg07716712007-11-21 17:10:04 +02004420 if (is_matching_cpuid_entry(e, function, index)) {
4421 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
4422 move_to_next_stateful_cpuid_entry(vcpu, i);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004423 best = e;
4424 break;
4425 }
4426 /*
4427 * Both basic or both extended?
4428 */
4429 if (((e->function ^ function) & 0x80000000) == 0)
4430 if (!best || e->function > best->function)
4431 best = e;
4432 }
Alexander Grafd8017472008-11-25 20:17:11 +01004433 return best;
4434}
Sheng Yang0e851882009-12-18 16:48:46 +08004435EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
Alexander Grafd8017472008-11-25 20:17:11 +01004436
Dong, Eddie82725b22009-03-30 16:21:08 +08004437int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
4438{
4439 struct kvm_cpuid_entry2 *best;
4440
Avi Kivityf7a71192010-04-11 15:33:32 +03004441 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
4442 if (!best || best->eax < 0x80000008)
4443 goto not_found;
Dong, Eddie82725b22009-03-30 16:21:08 +08004444 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
4445 if (best)
4446 return best->eax & 0xff;
Avi Kivityf7a71192010-04-11 15:33:32 +03004447not_found:
Dong, Eddie82725b22009-03-30 16:21:08 +08004448 return 36;
4449}
4450
Alexander Grafd8017472008-11-25 20:17:11 +01004451void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
4452{
4453 u32 function, index;
4454 struct kvm_cpuid_entry2 *best;
4455
4456 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
4457 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4458 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
4459 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
4460 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
4461 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
4462 best = kvm_find_cpuid_entry(vcpu, function, index);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004463 if (best) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004464 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
4465 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
4466 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
4467 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
Hollis Blanchard8776e512007-10-31 17:24:24 -05004468 }
Hollis Blanchard8776e512007-10-31 17:24:24 -05004469 kvm_x86_ops->skip_emulated_instruction(vcpu);
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004470 trace_kvm_cpuid(function,
4471 kvm_register_read(vcpu, VCPU_REGS_RAX),
4472 kvm_register_read(vcpu, VCPU_REGS_RBX),
4473 kvm_register_read(vcpu, VCPU_REGS_RCX),
4474 kvm_register_read(vcpu, VCPU_REGS_RDX));
Hollis Blanchard8776e512007-10-31 17:24:24 -05004475}
4476EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
Hollis Blanchardd0752062007-10-31 17:24:25 -05004477
4478/*
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004479 * Check if userspace requested an interrupt window, and that the
4480 * interrupt window is open.
4481 *
4482 * No need to exit to userspace if we already have an interrupt queued.
4483 */
Avi Kivity851ba692009-08-24 11:10:17 +03004484static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004485{
Gleb Natapov80618232009-04-21 17:44:56 +03004486 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
Avi Kivity851ba692009-08-24 11:10:17 +03004487 vcpu->run->request_interrupt_window &&
Gleb Natapov5df56642009-04-21 17:44:59 +03004488 kvm_arch_interrupt_allowed(vcpu));
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004489}
4490
Avi Kivity851ba692009-08-24 11:10:17 +03004491static void post_kvm_run_save(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004492{
Avi Kivity851ba692009-08-24 11:10:17 +03004493 struct kvm_run *kvm_run = vcpu->run;
4494
Jan Kiszka91586a32009-10-05 13:07:21 +02004495 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004496 kvm_run->cr8 = kvm_get_cr8(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004497 kvm_run->apic_base = kvm_get_apic_base(vcpu);
Jan Kiszka45312202008-12-11 16:54:54 +01004498 if (irqchip_in_kernel(vcpu->kvm))
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004499 kvm_run->ready_for_interrupt_injection = 1;
Jan Kiszka45312202008-12-11 16:54:54 +01004500 else
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004501 kvm_run->ready_for_interrupt_injection =
Gleb Natapovfa9726b2009-05-11 13:35:47 +03004502 kvm_arch_interrupt_allowed(vcpu) &&
4503 !kvm_cpu_has_interrupt(vcpu) &&
4504 !kvm_event_needs_reinjection(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004505}
4506
Avi Kivityb93463a2007-10-25 16:52:32 +02004507static void vapic_enter(struct kvm_vcpu *vcpu)
4508{
4509 struct kvm_lapic *apic = vcpu->arch.apic;
4510 struct page *page;
4511
4512 if (!apic || !apic->vapic_addr)
4513 return;
4514
4515 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
Izik Eidus72dc67a2008-02-10 18:04:15 +02004516
4517 vcpu->arch.apic->vapic_page = page;
Avi Kivityb93463a2007-10-25 16:52:32 +02004518}
4519
4520static void vapic_exit(struct kvm_vcpu *vcpu)
4521{
4522 struct kvm_lapic *apic = vcpu->arch.apic;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004523 int idx;
Avi Kivityb93463a2007-10-25 16:52:32 +02004524
4525 if (!apic || !apic->vapic_addr)
4526 return;
4527
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004528 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivityb93463a2007-10-25 16:52:32 +02004529 kvm_release_page_dirty(apic->vapic_page);
4530 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004531 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivityb93463a2007-10-25 16:52:32 +02004532}
4533
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004534static void update_cr8_intercept(struct kvm_vcpu *vcpu)
4535{
4536 int max_irr, tpr;
4537
4538 if (!kvm_x86_ops->update_cr8_intercept)
4539 return;
4540
Avi Kivity88c808f2009-08-17 22:49:40 +03004541 if (!vcpu->arch.apic)
4542 return;
4543
Gleb Natapov8db3baa2009-05-11 13:35:54 +03004544 if (!vcpu->arch.apic->vapic_addr)
4545 max_irr = kvm_lapic_find_highest_irr(vcpu);
4546 else
4547 max_irr = -1;
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004548
4549 if (max_irr != -1)
4550 max_irr >>= 4;
4551
4552 tpr = kvm_lapic_get_cr8(vcpu);
4553
4554 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
4555}
4556
Avi Kivity851ba692009-08-24 11:10:17 +03004557static void inject_pending_event(struct kvm_vcpu *vcpu)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004558{
4559 /* try to reinject previous events if any */
Gleb Natapovb59bb7b2009-07-09 15:33:51 +03004560 if (vcpu->arch.exception.pending) {
Avi Kivity5c1c85d02010-03-11 13:01:59 +02004561 trace_kvm_inj_exception(vcpu->arch.exception.nr,
4562 vcpu->arch.exception.has_error_code,
4563 vcpu->arch.exception.error_code);
Gleb Natapovb59bb7b2009-07-09 15:33:51 +03004564 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
4565 vcpu->arch.exception.has_error_code,
Joerg Roedelce7ddec2010-04-22 12:33:13 +02004566 vcpu->arch.exception.error_code,
4567 vcpu->arch.exception.reinject);
Gleb Natapovb59bb7b2009-07-09 15:33:51 +03004568 return;
4569 }
4570
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004571 if (vcpu->arch.nmi_injected) {
4572 kvm_x86_ops->set_nmi(vcpu);
4573 return;
4574 }
4575
4576 if (vcpu->arch.interrupt.pending) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004577 kvm_x86_ops->set_irq(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004578 return;
4579 }
4580
4581 /* try to inject new event if pending */
4582 if (vcpu->arch.nmi_pending) {
4583 if (kvm_x86_ops->nmi_allowed(vcpu)) {
4584 vcpu->arch.nmi_pending = false;
4585 vcpu->arch.nmi_injected = true;
4586 kvm_x86_ops->set_nmi(vcpu);
4587 }
4588 } else if (kvm_cpu_has_interrupt(vcpu)) {
4589 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004590 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
4591 false);
4592 kvm_x86_ops->set_irq(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004593 }
4594 }
4595}
4596
Dexuan Cui2acf9232010-06-10 11:27:12 +08004597static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
4598{
4599 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
4600 !vcpu->guest_xcr0_loaded) {
4601 /* kvm_set_xcr() also depends on this */
4602 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
4603 vcpu->guest_xcr0_loaded = 1;
4604 }
4605}
4606
4607static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
4608{
4609 if (vcpu->guest_xcr0_loaded) {
4610 if (vcpu->arch.xcr0 != host_xcr0)
4611 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
4612 vcpu->guest_xcr0_loaded = 0;
4613 }
4614}
4615
Avi Kivity851ba692009-08-24 11:10:17 +03004616static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004617{
4618 int r;
Gleb Natapov6a8b1d12009-05-11 13:35:51 +03004619 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
Avi Kivity851ba692009-08-24 11:10:17 +03004620 vcpu->run->request_interrupt_window;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004621
Marcelo Tosatti2e53d632008-02-20 14:47:24 -05004622 if (vcpu->requests)
4623 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
4624 kvm_mmu_unload(vcpu);
4625
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004626 r = kvm_mmu_reload(vcpu);
4627 if (unlikely(r))
4628 goto out;
4629
Avi Kivity2f52d582008-01-16 12:49:30 +02004630 if (vcpu->requests) {
4631 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
Marcelo Tosatti2f599712008-05-27 12:10:20 -03004632 __kvm_migrate_timers(vcpu);
Gerd Hoffmannc8076602009-02-04 17:52:04 +01004633 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
4634 kvm_write_guest_time(vcpu);
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -03004635 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
4636 kvm_mmu_sync_roots(vcpu);
Marcelo Tosattid4acf7e2008-06-06 16:37:35 -03004637 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
4638 kvm_x86_ops->tlb_flush(vcpu);
Avi Kivityb93463a2007-10-25 16:52:32 +02004639 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
4640 &vcpu->requests)) {
Avi Kivity851ba692009-08-24 11:10:17 +03004641 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
Avi Kivityb93463a2007-10-25 16:52:32 +02004642 r = 0;
4643 goto out;
4644 }
Joerg Roedel71c4dfa2008-02-26 16:49:16 +01004645 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
Avi Kivity851ba692009-08-24 11:10:17 +03004646 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
Joerg Roedel71c4dfa2008-02-26 16:49:16 +01004647 r = 0;
4648 goto out;
4649 }
Avi Kivity02daab22009-12-30 12:40:26 +02004650 if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
4651 vcpu->fpu_active = 0;
4652 kvm_x86_ops->fpu_deactivate(vcpu);
4653 }
Avi Kivity2f52d582008-01-16 12:49:30 +02004654 }
Avi Kivityb93463a2007-10-25 16:52:32 +02004655
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004656 preempt_disable();
4657
4658 kvm_x86_ops->prepare_guest_switch(vcpu);
Avi Kivity2608d7a2010-01-21 15:31:45 +02004659 if (vcpu->fpu_active)
4660 kvm_load_guest_fpu(vcpu);
Dexuan Cui2acf9232010-06-10 11:27:12 +08004661 kvm_load_guest_xcr0(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004662
Avi Kivityd94e1dc2010-05-03 16:54:48 +03004663 atomic_set(&vcpu->guest_mode, 1);
4664 smp_wmb();
4665
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004666 local_irq_disable();
4667
Avi Kivityd94e1dc2010-05-03 16:54:48 +03004668 if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
4669 || need_resched() || signal_pending(current)) {
4670 atomic_set(&vcpu->guest_mode, 0);
4671 smp_wmb();
Avi Kivity6c142802008-01-15 18:27:32 +02004672 local_irq_enable();
4673 preempt_enable();
4674 r = 1;
4675 goto out;
4676 }
4677
Avi Kivity851ba692009-08-24 11:10:17 +03004678 inject_pending_event(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004679
Gleb Natapov6a8b1d12009-05-11 13:35:51 +03004680 /* enable NMI/IRQ window open exits if needed */
4681 if (vcpu->arch.nmi_pending)
4682 kvm_x86_ops->enable_nmi_window(vcpu);
4683 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
4684 kvm_x86_ops->enable_irq_window(vcpu);
4685
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004686 if (kvm_lapic_enabled(vcpu)) {
Gleb Natapov8db3baa2009-05-11 13:35:54 +03004687 update_cr8_intercept(vcpu);
4688 kvm_lapic_sync_to_vapic(vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004689 }
Avi Kivityb93463a2007-10-25 16:52:32 +02004690
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004691 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004692
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004693 kvm_guest_enter();
4694
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004695 if (unlikely(vcpu->arch.switch_db_regs)) {
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004696 set_debugreg(0, 7);
4697 set_debugreg(vcpu->arch.eff_db[0], 0);
4698 set_debugreg(vcpu->arch.eff_db[1], 1);
4699 set_debugreg(vcpu->arch.eff_db[2], 2);
4700 set_debugreg(vcpu->arch.eff_db[3], 3);
4701 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004702
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004703 trace_kvm_entry(vcpu->vcpu_id);
Avi Kivity851ba692009-08-24 11:10:17 +03004704 kvm_x86_ops->run(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004705
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004706 /*
4707 * If the guest has used debug registers, at least dr7
4708 * will be disabled while returning to the host.
4709 * If we don't have active breakpoints in the host, we don't
4710 * care about the messed up debug address registers. But if
4711 * we have some of them active, restore the old state.
4712 */
Frederic Weisbecker59d8eb52009-11-10 11:03:12 +01004713 if (hw_breakpoint_active())
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004714 hw_breakpoint_restore();
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004715
Avi Kivityd94e1dc2010-05-03 16:54:48 +03004716 atomic_set(&vcpu->guest_mode, 0);
4717 smp_wmb();
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004718 local_irq_enable();
4719
4720 ++vcpu->stat.exits;
4721
4722 /*
4723 * We must have an instruction between local_irq_enable() and
4724 * kvm_guest_exit(), so the timer interrupt isn't delayed by
4725 * the interrupt shadow. The stat.exits increment will do nicely.
4726 * But we need to prevent reordering, hence this barrier():
4727 */
4728 barrier();
4729
4730 kvm_guest_exit();
4731
4732 preempt_enable();
4733
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004734 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Marcelo Tosatti3200f402008-03-29 20:17:59 -03004735
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004736 /*
4737 * Profile KVM exit RIPs:
4738 */
4739 if (unlikely(prof_on == KVM_PROFILING)) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004740 unsigned long rip = kvm_rip_read(vcpu);
4741 profile_hit(KVM_PROFILING, (void *)rip);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004742 }
4743
Avi Kivity298101d2007-11-25 13:41:11 +02004744
Avi Kivityb93463a2007-10-25 16:52:32 +02004745 kvm_lapic_sync_from_vapic(vcpu);
4746
Avi Kivity851ba692009-08-24 11:10:17 +03004747 r = kvm_x86_ops->handle_exit(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004748out:
Marcelo Tosattid7690172008-09-08 15:23:48 -03004749 return r;
4750}
4751
Gleb Natapov09cec752009-03-23 15:11:44 +02004752
Avi Kivity851ba692009-08-24 11:10:17 +03004753static int __vcpu_run(struct kvm_vcpu *vcpu)
Marcelo Tosattid7690172008-09-08 15:23:48 -03004754{
4755 int r;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004756 struct kvm *kvm = vcpu->kvm;
Marcelo Tosattid7690172008-09-08 15:23:48 -03004757
4758 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
Jan Kiszka1b10bf32008-09-30 10:41:06 +02004759 pr_debug("vcpu %d received sipi with vector # %x\n",
4760 vcpu->vcpu_id, vcpu->arch.sipi_vector);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004761 kvm_lapic_reset(vcpu);
Gleb Natapov5f179282008-10-07 15:42:33 +02004762 r = kvm_arch_vcpu_reset(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004763 if (r)
4764 return r;
4765 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004766 }
4767
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004768 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004769 vapic_enter(vcpu);
4770
4771 r = 1;
4772 while (r > 0) {
Gleb Natapovaf2152f2008-09-22 14:28:53 +03004773 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
Avi Kivity851ba692009-08-24 11:10:17 +03004774 r = vcpu_enter_guest(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004775 else {
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004776 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004777 kvm_vcpu_block(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004778 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004779 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
Gleb Natapov09cec752009-03-23 15:11:44 +02004780 {
4781 switch(vcpu->arch.mp_state) {
4782 case KVM_MP_STATE_HALTED:
Marcelo Tosattid7690172008-09-08 15:23:48 -03004783 vcpu->arch.mp_state =
Gleb Natapov09cec752009-03-23 15:11:44 +02004784 KVM_MP_STATE_RUNNABLE;
4785 case KVM_MP_STATE_RUNNABLE:
4786 break;
4787 case KVM_MP_STATE_SIPI_RECEIVED:
4788 default:
4789 r = -EINTR;
4790 break;
4791 }
4792 }
Marcelo Tosattid7690172008-09-08 15:23:48 -03004793 }
4794
Gleb Natapov09cec752009-03-23 15:11:44 +02004795 if (r <= 0)
4796 break;
4797
4798 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
4799 if (kvm_cpu_has_pending_timer(vcpu))
4800 kvm_inject_pending_timer_irqs(vcpu);
4801
Avi Kivity851ba692009-08-24 11:10:17 +03004802 if (dm_request_for_irq_injection(vcpu)) {
Gleb Natapov09cec752009-03-23 15:11:44 +02004803 r = -EINTR;
Avi Kivity851ba692009-08-24 11:10:17 +03004804 vcpu->run->exit_reason = KVM_EXIT_INTR;
Gleb Natapov09cec752009-03-23 15:11:44 +02004805 ++vcpu->stat.request_irq_exits;
4806 }
4807 if (signal_pending(current)) {
4808 r = -EINTR;
Avi Kivity851ba692009-08-24 11:10:17 +03004809 vcpu->run->exit_reason = KVM_EXIT_INTR;
Gleb Natapov09cec752009-03-23 15:11:44 +02004810 ++vcpu->stat.signal_exits;
4811 }
4812 if (need_resched()) {
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004813 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
Gleb Natapov09cec752009-03-23 15:11:44 +02004814 kvm_resched(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004815 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004816 }
4817 }
4818
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004819 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004820
Avi Kivityb93463a2007-10-25 16:52:32 +02004821 vapic_exit(vcpu);
4822
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004823 return r;
4824}
4825
4826int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4827{
4828 int r;
4829 sigset_t sigsaved;
4830
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004831 if (vcpu->sigset_active)
4832 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
4833
Avi Kivityac9f6dc2008-07-06 15:48:31 +03004834 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
4835 kvm_vcpu_block(vcpu);
Marcelo Tosattid7690172008-09-08 15:23:48 -03004836 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Avi Kivityac9f6dc2008-07-06 15:48:31 +03004837 r = -EAGAIN;
4838 goto out;
4839 }
4840
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004841 /* re-sync apic's tpr */
4842 if (!irqchip_in_kernel(vcpu->kvm))
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004843 kvm_set_cr8(vcpu, kvm_run->cr8);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004844
Gleb Natapov92bf9742010-03-18 15:20:28 +02004845 if (vcpu->arch.pio.count || vcpu->mmio_needed ||
4846 vcpu->arch.emulate_ctxt.restart) {
4847 if (vcpu->mmio_needed) {
4848 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
4849 vcpu->mmio_read_completed = 1;
4850 vcpu->mmio_needed = 0;
Gleb Natapov79729952010-03-18 15:20:24 +02004851 }
Gleb Natapov7567cae2010-03-09 12:01:10 +02004852 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Gleb Natapov5cd21912010-03-18 15:20:26 +02004853 r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02004854 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Gleb Natapov6d77dbf2010-05-10 11:16:56 +03004855 if (r != EMULATE_DONE) {
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004856 r = 0;
4857 goto out;
4858 }
4859 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004860 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
4861 kvm_register_write(vcpu, VCPU_REGS_RAX,
4862 kvm_run->hypercall.ret);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004863
Avi Kivity851ba692009-08-24 11:10:17 +03004864 r = __vcpu_run(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004865
4866out:
Marcelo Tosattif1d86e42010-05-03 23:04:27 -03004867 post_kvm_run_save(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004868 if (vcpu->sigset_active)
4869 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
4870
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004871 return r;
4872}
4873
4874int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4875{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004876 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4877 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4878 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4879 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4880 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4881 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
4882 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4883 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004884#ifdef CONFIG_X86_64
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004885 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
4886 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
4887 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
4888 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
4889 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
4890 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
4891 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
4892 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004893#endif
4894
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004895 regs->rip = kvm_rip_read(vcpu);
Jan Kiszka91586a32009-10-05 13:07:21 +02004896 regs->rflags = kvm_get_rflags(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004897
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004898 return 0;
4899}
4900
4901int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4902{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004903 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
4904 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
4905 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
4906 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
4907 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
4908 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
4909 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
4910 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004911#ifdef CONFIG_X86_64
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004912 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
4913 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
4914 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
4915 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
4916 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
4917 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
4918 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
4919 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004920#endif
4921
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004922 kvm_rip_write(vcpu, regs->rip);
Jan Kiszka91586a32009-10-05 13:07:21 +02004923 kvm_set_rflags(vcpu, regs->rflags);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004924
Jan Kiszkab4f14ab2008-04-30 17:59:04 +02004925 vcpu->arch.exception.pending = false;
4926
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004927 return 0;
4928}
4929
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004930void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
4931{
4932 struct kvm_segment cs;
4933
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004934 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004935 *db = cs.db;
4936 *l = cs.l;
4937}
4938EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
4939
4940int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4941 struct kvm_sregs *sregs)
4942{
Gleb Natapov89a27f42010-02-16 10:51:48 +02004943 struct desc_ptr dt;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004944
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004945 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4946 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4947 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4948 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
4949 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
4950 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004951
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02004952 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4953 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004954
4955 kvm_x86_ops->get_idt(vcpu, &dt);
Gleb Natapov89a27f42010-02-16 10:51:48 +02004956 sregs->idt.limit = dt.size;
4957 sregs->idt.base = dt.address;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004958 kvm_x86_ops->get_gdt(vcpu, &dt);
Gleb Natapov89a27f42010-02-16 10:51:48 +02004959 sregs->gdt.limit = dt.size;
4960 sregs->gdt.base = dt.address;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004961
Avi Kivity4d4ec082009-12-29 18:07:30 +02004962 sregs->cr0 = kvm_read_cr0(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004963 sregs->cr2 = vcpu->arch.cr2;
4964 sregs->cr3 = vcpu->arch.cr3;
Avi Kivityfc78f512009-12-07 12:16:48 +02004965 sregs->cr4 = kvm_read_cr4(vcpu);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02004966 sregs->cr8 = kvm_get_cr8(vcpu);
Avi Kivityf6801df2010-01-21 15:31:50 +02004967 sregs->efer = vcpu->arch.efer;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004968 sregs->apic_base = kvm_get_apic_base(vcpu);
4969
Gleb Natapov923c61b2009-05-11 13:35:48 +03004970 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004971
Gleb Natapov36752c92009-05-11 13:35:53 +03004972 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
Gleb Natapov14d0bc12009-04-21 17:45:11 +03004973 set_bit(vcpu->arch.interrupt.nr,
4974 (unsigned long *)sregs->interrupt_bitmap);
Gleb Natapov16d7a192009-04-21 17:45:10 +03004975
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004976 return 0;
4977}
4978
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03004979int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4980 struct kvm_mp_state *mp_state)
4981{
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03004982 mp_state->mp_state = vcpu->arch.mp_state;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03004983 return 0;
4984}
4985
4986int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4987 struct kvm_mp_state *mp_state)
4988{
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03004989 vcpu->arch.mp_state = mp_state->mp_state;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03004990 return 0;
4991}
4992
Jan Kiszkae269fb22010-04-14 15:51:09 +02004993int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
4994 bool has_error_code, u32 error_code)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004995{
Gleb Natapov4d2179e2010-04-28 19:15:42 +03004996 struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
Gleb Natapovceffb452010-03-18 15:20:19 +02004997 int cs_db, cs_l, ret;
4998 cache_all_regs(vcpu);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05004999
Gleb Natapovceffb452010-03-18 15:20:19 +02005000 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
Izik Eidus37817f22008-03-24 23:14:53 +02005001
Gleb Natapovceffb452010-03-18 15:20:19 +02005002 vcpu->arch.emulate_ctxt.vcpu = vcpu;
5003 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
5004 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
5005 vcpu->arch.emulate_ctxt.mode =
5006 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
5007 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
5008 ? X86EMUL_MODE_VM86 : cs_l
5009 ? X86EMUL_MODE_PROT64 : cs_db
5010 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
Gleb Natapov4d2179e2010-04-28 19:15:42 +03005011 memset(c, 0, sizeof(struct decode_cache));
5012 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
Izik Eidus37817f22008-03-24 23:14:53 +02005013
Gleb Natapovceffb452010-03-18 15:20:19 +02005014 ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
Jan Kiszkae269fb22010-04-14 15:51:09 +02005015 tss_selector, reason, has_error_code,
5016 error_code);
Izik Eidus37817f22008-03-24 23:14:53 +02005017
Gleb Natapovc6975182010-02-18 12:15:01 +02005018 if (ret)
Gleb Natapov19d04432010-04-15 12:29:50 +03005019 return EMULATE_FAIL;
Gleb Natapovc6975182010-02-18 12:15:01 +02005020
Gleb Natapov4d2179e2010-04-28 19:15:42 +03005021 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
Gleb Natapov95c55882010-04-28 19:15:39 +03005022 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
Gleb Natapov19d04432010-04-15 12:29:50 +03005023 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
5024 return EMULATE_DONE;
Izik Eidus37817f22008-03-24 23:14:53 +02005025}
5026EXPORT_SYMBOL_GPL(kvm_task_switch);
5027
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005028int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5029 struct kvm_sregs *sregs)
5030{
5031 int mmu_reset_needed = 0;
Gleb Natapov923c61b2009-05-11 13:35:48 +03005032 int pending_vec, max_bits;
Gleb Natapov89a27f42010-02-16 10:51:48 +02005033 struct desc_ptr dt;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005034
Gleb Natapov89a27f42010-02-16 10:51:48 +02005035 dt.size = sregs->idt.limit;
5036 dt.address = sregs->idt.base;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005037 kvm_x86_ops->set_idt(vcpu, &dt);
Gleb Natapov89a27f42010-02-16 10:51:48 +02005038 dt.size = sregs->gdt.limit;
5039 dt.address = sregs->gdt.base;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005040 kvm_x86_ops->set_gdt(vcpu, &dt);
5041
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005042 vcpu->arch.cr2 = sregs->cr2;
5043 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
Jan Kiszkadc7e7952009-07-01 20:52:03 +02005044 vcpu->arch.cr3 = sregs->cr3;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005045
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02005046 kvm_set_cr8(vcpu, sregs->cr8);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005047
Avi Kivityf6801df2010-01-21 15:31:50 +02005048 mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005049 kvm_x86_ops->set_efer(vcpu, sregs->efer);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005050 kvm_set_apic_base(vcpu, sregs->apic_base);
5051
Avi Kivity4d4ec082009-12-29 18:07:30 +02005052 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005053 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
Paul Knowlesd7306162008-02-06 11:02:35 +00005054 vcpu->arch.cr0 = sregs->cr0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005055
Avi Kivityfc78f512009-12-07 12:16:48 +02005056 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005057 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
Marcelo Tosatti7c93be442009-10-26 16:48:33 -02005058 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005059 load_pdptrs(vcpu, vcpu->arch.cr3);
Marcelo Tosatti7c93be442009-10-26 16:48:33 -02005060 mmu_reset_needed = 1;
5061 }
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005062
5063 if (mmu_reset_needed)
5064 kvm_mmu_reset_context(vcpu);
5065
Gleb Natapov923c61b2009-05-11 13:35:48 +03005066 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
5067 pending_vec = find_first_bit(
5068 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
5069 if (pending_vec < max_bits) {
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005070 kvm_queue_interrupt(vcpu, pending_vec, false);
Gleb Natapov923c61b2009-05-11 13:35:48 +03005071 pr_debug("Set back pending irq %d\n", pending_vec);
5072 if (irqchip_in_kernel(vcpu->kvm))
5073 kvm_pic_clear_isr_ack(vcpu->kvm);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005074 }
5075
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02005076 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5077 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5078 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5079 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5080 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5081 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005082
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +02005083 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5084 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005085
Mikhail Ershov5f0269f2009-08-03 14:58:25 +03005086 update_cr8_intercept(vcpu);
5087
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03005088 /* Older userspace won't unhalt the vcpu on reset. */
Gleb Natapovc5af89b2009-06-09 15:56:26 +03005089 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03005090 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
Avi Kivity3eeb3282010-01-21 15:31:48 +02005091 !is_protmode(vcpu))
Marcelo Tosatti9c3e4aa2008-09-10 16:40:55 -03005092 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5093
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005094 return 0;
5095}
5096
Jan Kiszkad0bfb942008-12-15 13:52:10 +01005097int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5098 struct kvm_guest_debug *dbg)
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005099{
Jan Kiszka355be0b2009-10-03 00:31:21 +02005100 unsigned long rflags;
Jan Kiszkaae675ef2008-12-15 13:52:10 +01005101 int i, r;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005102
Jan Kiszka4f926bf22009-10-30 12:46:59 +01005103 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5104 r = -EBUSY;
5105 if (vcpu->arch.exception.pending)
Avi Kivity2122ff52010-05-13 11:25:04 +03005106 goto out;
Jan Kiszka4f926bf22009-10-30 12:46:59 +01005107 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5108 kvm_queue_exception(vcpu, DB_VECTOR);
5109 else
5110 kvm_queue_exception(vcpu, BP_VECTOR);
5111 }
5112
Jan Kiszka91586a32009-10-05 13:07:21 +02005113 /*
5114 * Read rflags as long as potentially injected trace flags are still
5115 * filtered out.
5116 */
5117 rflags = kvm_get_rflags(vcpu);
Jan Kiszka355be0b2009-10-03 00:31:21 +02005118
5119 vcpu->guest_debug = dbg->control;
5120 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
5121 vcpu->guest_debug = 0;
5122
5123 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
Jan Kiszkaae675ef2008-12-15 13:52:10 +01005124 for (i = 0; i < KVM_NR_DB_REGS; ++i)
5125 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5126 vcpu->arch.switch_db_regs =
5127 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
5128 } else {
5129 for (i = 0; i < KVM_NR_DB_REGS; i++)
5130 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5131 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
5132 }
5133
Jan Kiszkaf92653e2010-02-23 17:47:55 +01005134 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5135 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
5136 get_segment_base(vcpu, VCPU_SREG_CS);
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005137
Jan Kiszka91586a32009-10-05 13:07:21 +02005138 /*
5139 * Trigger an rflags update that will inject or remove the trace
5140 * flags.
5141 */
5142 kvm_set_rflags(vcpu, rflags);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01005143
Jan Kiszka355be0b2009-10-03 00:31:21 +02005144 kvm_x86_ops->set_guest_debug(vcpu, dbg);
5145
Jan Kiszka4f926bf22009-10-30 12:46:59 +01005146 r = 0;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005147
Avi Kivity2122ff52010-05-13 11:25:04 +03005148out:
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05005149
5150 return r;
5151}
5152
5153/*
Zhang Xiantao8b006792007-11-16 13:05:55 +08005154 * Translate a guest virtual address to a guest physical address.
5155 */
5156int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5157 struct kvm_translation *tr)
5158{
5159 unsigned long vaddr = tr->linear_address;
5160 gpa_t gpa;
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005161 int idx;
Zhang Xiantao8b006792007-11-16 13:05:55 +08005162
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005163 idx = srcu_read_lock(&vcpu->kvm->srcu);
Gleb Natapov1871c602010-02-10 14:21:32 +02005164 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005165 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Zhang Xiantao8b006792007-11-16 13:05:55 +08005166 tr->physical_address = gpa;
5167 tr->valid = gpa != UNMAPPED_GVA;
5168 tr->writeable = 1;
5169 tr->usermode = 0;
Zhang Xiantao8b006792007-11-16 13:05:55 +08005170
5171 return 0;
5172}
5173
Hollis Blanchardd0752062007-10-31 17:24:25 -05005174int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5175{
Sheng Yang98918832010-05-17 17:08:28 +08005176 struct i387_fxsave_struct *fxsave =
5177 &vcpu->arch.guest_fpu.state->fxsave;
Hollis Blanchardd0752062007-10-31 17:24:25 -05005178
Hollis Blanchardd0752062007-10-31 17:24:25 -05005179 memcpy(fpu->fpr, fxsave->st_space, 128);
5180 fpu->fcw = fxsave->cwd;
5181 fpu->fsw = fxsave->swd;
5182 fpu->ftwx = fxsave->twd;
5183 fpu->last_opcode = fxsave->fop;
5184 fpu->last_ip = fxsave->rip;
5185 fpu->last_dp = fxsave->rdp;
5186 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5187
Hollis Blanchardd0752062007-10-31 17:24:25 -05005188 return 0;
5189}
5190
5191int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5192{
Sheng Yang98918832010-05-17 17:08:28 +08005193 struct i387_fxsave_struct *fxsave =
5194 &vcpu->arch.guest_fpu.state->fxsave;
Hollis Blanchardd0752062007-10-31 17:24:25 -05005195
Hollis Blanchardd0752062007-10-31 17:24:25 -05005196 memcpy(fxsave->st_space, fpu->fpr, 128);
5197 fxsave->cwd = fpu->fcw;
5198 fxsave->swd = fpu->fsw;
5199 fxsave->twd = fpu->ftwx;
5200 fxsave->fop = fpu->last_opcode;
5201 fxsave->rip = fpu->last_ip;
5202 fxsave->rdp = fpu->last_dp;
5203 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5204
Hollis Blanchardd0752062007-10-31 17:24:25 -05005205 return 0;
5206}
5207
Jan Kiszka10ab25c2010-05-25 16:01:50 +02005208int fx_init(struct kvm_vcpu *vcpu)
Hollis Blanchardd0752062007-10-31 17:24:25 -05005209{
Jan Kiszka10ab25c2010-05-25 16:01:50 +02005210 int err;
5211
5212 err = fpu_alloc(&vcpu->arch.guest_fpu);
5213 if (err)
5214 return err;
5215
Sheng Yang98918832010-05-17 17:08:28 +08005216 fpu_finit(&vcpu->arch.guest_fpu);
Hollis Blanchardd0752062007-10-31 17:24:25 -05005217
Dexuan Cui2acf9232010-06-10 11:27:12 +08005218 /*
5219 * Ensure guest xcr0 is valid for loading
5220 */
5221 vcpu->arch.xcr0 = XSTATE_FP;
5222
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005223 vcpu->arch.cr0 |= X86_CR0_ET;
Jan Kiszka10ab25c2010-05-25 16:01:50 +02005224
5225 return 0;
Hollis Blanchardd0752062007-10-31 17:24:25 -05005226}
5227EXPORT_SYMBOL_GPL(fx_init);
5228
Sheng Yang98918832010-05-17 17:08:28 +08005229static void fx_free(struct kvm_vcpu *vcpu)
5230{
5231 fpu_free(&vcpu->arch.guest_fpu);
5232}
5233
Hollis Blanchardd0752062007-10-31 17:24:25 -05005234void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5235{
Avi Kivity2608d7a2010-01-21 15:31:45 +02005236 if (vcpu->guest_fpu_loaded)
Hollis Blanchardd0752062007-10-31 17:24:25 -05005237 return;
5238
Dexuan Cui2acf9232010-06-10 11:27:12 +08005239 /*
5240 * Restore all possible states in the guest,
5241 * and assume host would use all available bits.
5242 * Guest xcr0 would be loaded later.
5243 */
5244 kvm_put_guest_xcr0(vcpu);
Hollis Blanchardd0752062007-10-31 17:24:25 -05005245 vcpu->guest_fpu_loaded = 1;
Sheng Yang7cf30852010-05-17 17:08:27 +08005246 unlazy_fpu(current);
Sheng Yang98918832010-05-17 17:08:28 +08005247 fpu_restore_checking(&vcpu->arch.guest_fpu);
Avi Kivity0c048512010-01-21 15:31:52 +02005248 trace_kvm_fpu(1);
Hollis Blanchardd0752062007-10-31 17:24:25 -05005249}
Hollis Blanchardd0752062007-10-31 17:24:25 -05005250
5251void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5252{
Dexuan Cui2acf9232010-06-10 11:27:12 +08005253 kvm_put_guest_xcr0(vcpu);
5254
Hollis Blanchardd0752062007-10-31 17:24:25 -05005255 if (!vcpu->guest_fpu_loaded)
5256 return;
5257
5258 vcpu->guest_fpu_loaded = 0;
Sheng Yang98918832010-05-17 17:08:28 +08005259 fpu_save_init(&vcpu->arch.guest_fpu);
Avi Kivityf096ed82007-11-18 13:54:33 +02005260 ++vcpu->stat.fpu_reload;
Avi Kivity02daab22009-12-30 12:40:26 +02005261 set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
Avi Kivity0c048512010-01-21 15:31:52 +02005262 trace_kvm_fpu(0);
Hollis Blanchardd0752062007-10-31 17:24:25 -05005263}
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005264
5265void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5266{
Joerg Roedel7f1ea202009-02-25 16:08:31 +01005267 if (vcpu->arch.time_page) {
5268 kvm_release_page_dirty(vcpu->arch.time_page);
5269 vcpu->arch.time_page = NULL;
5270 }
5271
Sheng Yang98918832010-05-17 17:08:28 +08005272 fx_free(vcpu);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005273 kvm_x86_ops->vcpu_free(vcpu);
5274}
5275
5276struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5277 unsigned int id)
5278{
Avi Kivity26e52152007-11-20 15:30:24 +02005279 return kvm_x86_ops->vcpu_create(kvm, id);
5280}
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005281
Avi Kivity26e52152007-11-20 15:30:24 +02005282int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5283{
5284 int r;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005285
Sheng Yang0bed3b52008-10-09 16:01:54 +08005286 vcpu->arch.mtrr_state.have_fixed = 1;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005287 vcpu_load(vcpu);
5288 r = kvm_arch_vcpu_reset(vcpu);
5289 if (r == 0)
5290 r = kvm_mmu_setup(vcpu);
5291 vcpu_put(vcpu);
5292 if (r < 0)
5293 goto free_vcpu;
5294
Avi Kivity26e52152007-11-20 15:30:24 +02005295 return 0;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005296free_vcpu:
5297 kvm_x86_ops->vcpu_free(vcpu);
Avi Kivity26e52152007-11-20 15:30:24 +02005298 return r;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005299}
5300
Hollis Blanchardd40ccc62007-11-19 14:04:43 -06005301void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005302{
5303 vcpu_load(vcpu);
5304 kvm_mmu_unload(vcpu);
5305 vcpu_put(vcpu);
5306
Sheng Yang98918832010-05-17 17:08:28 +08005307 fx_free(vcpu);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005308 kvm_x86_ops->vcpu_free(vcpu);
5309}
5310
5311int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5312{
Jan Kiszka448fa4a2008-09-26 09:30:48 +02005313 vcpu->arch.nmi_pending = false;
5314 vcpu->arch.nmi_injected = false;
5315
Jan Kiszka42dbaa52008-12-15 13:52:10 +01005316 vcpu->arch.switch_db_regs = 0;
5317 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5318 vcpu->arch.dr6 = DR6_FIXED_1;
5319 vcpu->arch.dr7 = DR7_FIXED_1;
5320
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005321 return kvm_x86_ops->vcpu_reset(vcpu);
5322}
5323
Alexander Graf10474ae2009-09-15 11:37:46 +02005324int kvm_arch_hardware_enable(void *garbage)
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005325{
Zachary Amsden0cca7902009-09-29 11:38:35 -10005326 /*
5327 * Since this may be called from a hotplug notifcation,
5328 * we can't get the CPU frequency directly.
5329 */
5330 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5331 int cpu = raw_smp_processor_id();
5332 per_cpu(cpu_tsc_khz, cpu) = 0;
5333 }
Avi Kivity18863bd2009-09-07 11:12:18 +03005334
5335 kvm_shared_msr_cpu_online();
5336
Alexander Graf10474ae2009-09-15 11:37:46 +02005337 return kvm_x86_ops->hardware_enable(garbage);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005338}
5339
5340void kvm_arch_hardware_disable(void *garbage)
5341{
5342 kvm_x86_ops->hardware_disable(garbage);
Avi Kivity3548bab2009-11-28 14:18:47 +02005343 drop_user_return_notifiers(garbage);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005344}
5345
5346int kvm_arch_hardware_setup(void)
5347{
5348 return kvm_x86_ops->hardware_setup();
5349}
5350
5351void kvm_arch_hardware_unsetup(void)
5352{
5353 kvm_x86_ops->hardware_unsetup();
5354}
5355
5356void kvm_arch_check_processor_compat(void *rtn)
5357{
5358 kvm_x86_ops->check_processor_compatibility(rtn);
5359}
5360
5361int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5362{
5363 struct page *page;
5364 struct kvm *kvm;
5365 int r;
5366
5367 BUG_ON(vcpu->kvm == NULL);
5368 kvm = vcpu->kvm;
5369
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005370 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
Gleb Natapovc5af89b2009-06-09 15:56:26 +03005371 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
Avi Kivitya4535292008-04-13 17:54:35 +03005372 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005373 else
Avi Kivitya4535292008-04-13 17:54:35 +03005374 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005375
5376 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
5377 if (!page) {
5378 r = -ENOMEM;
5379 goto fail;
5380 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005381 vcpu->arch.pio_data = page_address(page);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005382
5383 r = kvm_mmu_create(vcpu);
5384 if (r < 0)
5385 goto fail_free_pio_data;
5386
5387 if (irqchip_in_kernel(kvm)) {
5388 r = kvm_create_lapic(vcpu);
5389 if (r < 0)
5390 goto fail_mmu_destroy;
5391 }
5392
Huang Ying890ca9a2009-05-11 16:48:15 +08005393 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
5394 GFP_KERNEL);
5395 if (!vcpu->arch.mce_banks) {
5396 r = -ENOMEM;
Wei Yongjun443c39b2010-01-22 14:21:29 +08005397 goto fail_free_lapic;
Huang Ying890ca9a2009-05-11 16:48:15 +08005398 }
5399 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5400
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005401 return 0;
Wei Yongjun443c39b2010-01-22 14:21:29 +08005402fail_free_lapic:
5403 kvm_free_lapic(vcpu);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005404fail_mmu_destroy:
5405 kvm_mmu_destroy(vcpu);
5406fail_free_pio_data:
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005407 free_page((unsigned long)vcpu->arch.pio_data);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005408fail:
5409 return r;
5410}
5411
5412void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5413{
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005414 int idx;
5415
Wei Yongjun36cb93f2010-01-22 14:18:47 +08005416 kfree(vcpu->arch.mce_banks);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005417 kvm_free_lapic(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005418 idx = srcu_read_lock(&vcpu->kvm->srcu);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005419 kvm_mmu_destroy(vcpu);
Marcelo Tosattif656ce02009-12-23 14:35:25 -02005420 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005421 free_page((unsigned long)vcpu->arch.pio_data);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08005422}
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005423
5424struct kvm *kvm_arch_create_vm(void)
5425{
5426 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
5427
5428 if (!kvm)
5429 return ERR_PTR(-ENOMEM);
5430
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02005431 kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
5432 if (!kvm->arch.aliases) {
5433 kfree(kvm);
5434 return ERR_PTR(-ENOMEM);
5435 }
5436
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08005437 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +03005438 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005439
Sheng Yang5550af42008-10-15 20:15:06 +08005440 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5441 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5442
Marcelo Tosatti53f658b32008-12-11 20:45:05 +01005443 rdtscll(kvm->arch.vm_init_tsc);
5444
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005445 return kvm;
5446}
5447
5448static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
5449{
5450 vcpu_load(vcpu);
5451 kvm_mmu_unload(vcpu);
5452 vcpu_put(vcpu);
5453}
5454
5455static void kvm_free_vcpus(struct kvm *kvm)
5456{
5457 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005458 struct kvm_vcpu *vcpu;
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005459
5460 /*
5461 * Unpin any mmu pages first.
5462 */
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005463 kvm_for_each_vcpu(i, vcpu, kvm)
5464 kvm_unload_vcpu_mmu(vcpu);
5465 kvm_for_each_vcpu(i, vcpu, kvm)
5466 kvm_arch_vcpu_free(vcpu);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005467
Gleb Natapov988a2ca2009-06-09 15:56:29 +03005468 mutex_lock(&kvm->lock);
5469 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
5470 kvm->vcpus[i] = NULL;
5471
5472 atomic_set(&kvm->online_vcpus, 0);
5473 mutex_unlock(&kvm->lock);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005474}
5475
Sheng Yangad8ba2c2009-01-06 10:03:02 +08005476void kvm_arch_sync_events(struct kvm *kvm)
5477{
Sheng Yangba4cef32009-01-06 10:03:03 +08005478 kvm_free_all_assigned_devices(kvm);
Sheng Yangad8ba2c2009-01-06 10:03:02 +08005479}
5480
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005481void kvm_arch_destroy_vm(struct kvm *kvm)
5482{
Sheng Yang6eb55812008-10-31 12:37:41 +08005483 kvm_iommu_unmap_guest(kvm);
Sheng Yang78376992008-01-28 05:10:22 +08005484 kvm_free_pit(kvm);
Zhang Xiantaod7deeeb02007-12-14 10:17:34 +08005485 kfree(kvm->arch.vpic);
5486 kfree(kvm->arch.vioapic);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005487 kvm_free_vcpus(kvm);
5488 kvm_free_physmem(kvm);
Avi Kivity3d458302008-03-25 11:26:13 +02005489 if (kvm->arch.apic_access_page)
5490 put_page(kvm->arch.apic_access_page);
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005491 if (kvm->arch.ept_identity_pagetable)
5492 put_page(kvm->arch.ept_identity_pagetable);
Marcelo Tosatti64749202010-01-19 12:45:23 -02005493 cleanup_srcu_struct(&kvm->srcu);
Marcelo Tosattifef9cce2009-12-23 14:35:17 -02005494 kfree(kvm->arch.aliases);
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +08005495 kfree(kvm);
5496}
Zhang Xiantao0de10342007-11-20 16:25:04 +08005497
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005498int kvm_arch_prepare_memory_region(struct kvm *kvm,
5499 struct kvm_memory_slot *memslot,
Zhang Xiantao0de10342007-11-20 16:25:04 +08005500 struct kvm_memory_slot old,
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005501 struct kvm_userspace_memory_region *mem,
Zhang Xiantao0de10342007-11-20 16:25:04 +08005502 int user_alloc)
5503{
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005504 int npages = memslot->npages;
Zhang Xiantao0de10342007-11-20 16:25:04 +08005505
5506 /*To keep backward compatibility with older userspace,
5507 *x86 needs to hanlde !user_alloc case.
5508 */
5509 if (!user_alloc) {
5510 if (npages && !old.rmap) {
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005511 unsigned long userspace_addr;
5512
Izik Eidus72dc67a2008-02-10 18:04:15 +02005513 down_write(&current->mm->mmap_sem);
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005514 userspace_addr = do_mmap(NULL, 0,
5515 npages * PAGE_SIZE,
5516 PROT_READ | PROT_WRITE,
Avi Kivityacee3c02008-08-26 17:22:47 +03005517 MAP_PRIVATE | MAP_ANONYMOUS,
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005518 0);
Izik Eidus72dc67a2008-02-10 18:04:15 +02005519 up_write(&current->mm->mmap_sem);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005520
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005521 if (IS_ERR((void *)userspace_addr))
5522 return PTR_ERR((void *)userspace_addr);
5523
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02005524 memslot->userspace_addr = userspace_addr;
Zhang Xiantao0de10342007-11-20 16:25:04 +08005525 }
5526 }
5527
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005528
5529 return 0;
5530}
5531
5532void kvm_arch_commit_memory_region(struct kvm *kvm,
5533 struct kvm_userspace_memory_region *mem,
5534 struct kvm_memory_slot old,
5535 int user_alloc)
5536{
5537
5538 int npages = mem->memory_size >> PAGE_SHIFT;
5539
5540 if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
5541 int ret;
5542
5543 down_write(&current->mm->mmap_sem);
5544 ret = do_munmap(current->mm, old.userspace_addr,
5545 old.npages * PAGE_SIZE);
5546 up_write(&current->mm->mmap_sem);
5547 if (ret < 0)
5548 printk(KERN_WARNING
5549 "kvm_vm_ioctl_set_memory_region: "
5550 "failed to munmap memory\n");
5551 }
5552
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03005553 spin_lock(&kvm->mmu_lock);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08005554 if (!kvm->arch.n_requested_mmu_pages) {
Zhang Xiantao0de10342007-11-20 16:25:04 +08005555 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
5556 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
5557 }
5558
5559 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
Marcelo Tosatti7c8a83b2009-05-12 18:55:43 -03005560 spin_unlock(&kvm->mmu_lock);
Zhang Xiantao0de10342007-11-20 16:25:04 +08005561}
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005562
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03005563void kvm_arch_flush_shadow(struct kvm *kvm)
5564{
5565 kvm_mmu_zap_all(kvm);
Marcelo Tosatti8986ecc2009-05-12 18:55:45 -03005566 kvm_reload_remote_mmus(kvm);
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03005567}
5568
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005569int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
5570{
Avi Kivitya4535292008-04-13 17:54:35 +03005571 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
Gleb Natapova1b37102009-07-09 15:33:52 +03005572 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
5573 || vcpu->arch.nmi_pending ||
5574 (kvm_arch_interrupt_allowed(vcpu) &&
5575 kvm_cpu_has_interrupt(vcpu));
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005576}
Zhang Xiantao57361992007-12-17 14:21:40 +08005577
Zhang Xiantao57361992007-12-17 14:21:40 +08005578void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
5579{
Marcelo Tosatti32f88402009-05-07 17:55:12 -03005580 int me;
5581 int cpu = vcpu->cpu;
Zhang Xiantao57361992007-12-17 14:21:40 +08005582
5583 if (waitqueue_active(&vcpu->wq)) {
5584 wake_up_interruptible(&vcpu->wq);
5585 ++vcpu->stat.halt_wakeup;
5586 }
Marcelo Tosatti32f88402009-05-07 17:55:12 -03005587
5588 me = get_cpu();
5589 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
Avi Kivityd94e1dc2010-05-03 16:54:48 +03005590 if (atomic_xchg(&vcpu->guest_mode, 0))
Marcelo Tosatti32f88402009-05-07 17:55:12 -03005591 smp_send_reschedule(cpu);
Marcelo Tosattie9571ed2008-04-11 15:01:22 -03005592 put_cpu();
Zhang Xiantao57361992007-12-17 14:21:40 +08005593}
Gleb Natapov78646122009-03-23 12:12:11 +02005594
5595int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
5596{
5597 return kvm_x86_ops->interrupt_allowed(vcpu);
5598}
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005599
Jan Kiszkaf92653e2010-02-23 17:47:55 +01005600bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
5601{
5602 unsigned long current_rip = kvm_rip_read(vcpu) +
5603 get_segment_base(vcpu, VCPU_SREG_CS);
5604
5605 return current_rip == linear_rip;
5606}
5607EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
5608
Jan Kiszka94fe45d2009-10-18 13:24:44 +02005609unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
5610{
5611 unsigned long rflags;
5612
5613 rflags = kvm_x86_ops->get_rflags(vcpu);
5614 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
Jan Kiszkac310bac2010-02-23 17:47:58 +01005615 rflags &= ~X86_EFLAGS_TF;
Jan Kiszka94fe45d2009-10-18 13:24:44 +02005616 return rflags;
5617}
5618EXPORT_SYMBOL_GPL(kvm_get_rflags);
5619
5620void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
5621{
5622 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
Jan Kiszkaf92653e2010-02-23 17:47:55 +01005623 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
Jan Kiszkac310bac2010-02-23 17:47:58 +01005624 rflags |= X86_EFLAGS_TF;
Jan Kiszka94fe45d2009-10-18 13:24:44 +02005625 kvm_x86_ops->set_rflags(vcpu, rflags);
5626}
5627EXPORT_SYMBOL_GPL(kvm_set_rflags);
5628
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005629EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
5630EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
5631EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
5632EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
5633EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
Joerg Roedel0ac406d2009-10-09 16:08:27 +02005634EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02005635EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
Joerg Roedel17897f32009-10-09 16:08:29 +02005636EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
Joerg Roedel236649d2009-10-09 16:08:30 +02005637EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
Joerg Roedelec1ff792009-10-09 16:08:31 +02005638EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
Joerg Roedel532a46b2009-10-09 16:08:32 +02005639EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
Joerg Roedel2e554e82010-02-24 18:59:14 +01005640EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);