blob: a39fde4f5fe86a2c54b2621da640414d96cd9b8c [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
Avi Kivityedf88412007-12-16 11:02:48 +020017#include <linux/kvm_host.h>
18
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030021#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +020022#include "x86.h"
Avi Kivitye4956062007-06-28 14:15:57 -040023
Avi Kivity6aa8b732006-12-10 02:21:36 -080024#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020025#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080026#include <linux/vmalloc.h>
27#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040028#include <linux/sched.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030029#include <linux/ftrace_event.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080031
Joerg Roedel67ec6602010-05-17 14:43:35 +020032#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040033#include <asm/desc.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020034#include <asm/kvm_para.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080035
Eduardo Habkost63d11422008-11-17 19:03:20 -020036#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030037#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020038
Avi Kivity4ecac3f2008-05-13 13:23:38 +030039#define __ex(x) __kvm_handle_fault_on_reboot(x)
40
Avi Kivity6aa8b732006-12-10 02:21:36 -080041MODULE_AUTHOR("Qumranet");
42MODULE_LICENSE("GPL");
43
44#define IOPM_ALLOC_ORDER 2
45#define MSRPM_ALLOC_ORDER 1
46
Avi Kivity6aa8b732006-12-10 02:21:36 -080047#define SEG_TYPE_LDT 2
48#define SEG_TYPE_BUSY_TSS16 3
49
Andre Przywara6bc31bd2010-04-11 23:07:28 +020050#define SVM_FEATURE_NPT (1 << 0)
51#define SVM_FEATURE_LBRV (1 << 1)
52#define SVM_FEATURE_SVML (1 << 2)
53#define SVM_FEATURE_NRIP (1 << 3)
Andre Przywaraddce97a2010-12-21 11:12:03 +010054#define SVM_FEATURE_TSC_RATE (1 << 4)
55#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
56#define SVM_FEATURE_FLUSH_ASID (1 << 6)
57#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020058#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030059
Joerg Roedel410e4d52009-08-07 11:49:44 +020060#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
61#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
62#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
63
Joerg Roedel24e09cb2008-02-13 18:58:47 +010064#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
65
Joerg Roedelfbc0db72011-03-25 09:44:46 +010066#define TSC_RATIO_RSVD 0xffffff0000000000ULL
67
Joerg Roedel67ec6602010-05-17 14:43:35 +020068static bool erratum_383_found __read_mostly;
69
Avi Kivity6c8166a2009-05-31 18:15:37 +030070static const u32 host_save_user_msrs[] = {
71#ifdef CONFIG_X86_64
72 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
73 MSR_FS_BASE,
74#endif
75 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
76};
77
78#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
79
80struct kvm_vcpu;
81
Joerg Roedele6aa9ab2009-08-07 11:49:33 +020082struct nested_state {
83 struct vmcb *hsave;
84 u64 hsave_msr;
Joerg Roedel4a810182010-02-24 18:59:15 +010085 u64 vm_cr_msr;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +020086 u64 vmcb;
87
88 /* These are the merged vectors */
89 u32 *msrpm;
90
91 /* gpa pointers to the real vectors */
92 u64 vmcb_msrpm;
Joerg Roedelce2ac082010-03-01 15:34:39 +010093 u64 vmcb_iopm;
Joerg Roedelaad42c62009-08-07 11:49:34 +020094
Joerg Roedelcd3ff652009-10-09 16:08:26 +020095 /* A VMEXIT is required but not yet emulated */
96 bool exit_required;
97
Joerg Roedelaad42c62009-08-07 11:49:34 +020098 /* cache for intercepts of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +010099 u32 intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100100 u32 intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200101 u32 intercept_exceptions;
102 u64 intercept;
103
Joerg Roedel5bd2edc2010-09-10 17:31:02 +0200104 /* Nested Paging related state */
105 u64 nested_cr3;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200106};
107
Joerg Roedel323c3d82010-03-01 15:34:37 +0100108#define MSRPM_OFFSETS 16
109static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
110
Avi Kivity6c8166a2009-05-31 18:15:37 +0300111struct vcpu_svm {
112 struct kvm_vcpu vcpu;
113 struct vmcb *vmcb;
114 unsigned long vmcb_pa;
115 struct svm_cpu_data *svm_data;
116 uint64_t asid_generation;
117 uint64_t sysenter_esp;
118 uint64_t sysenter_eip;
119
120 u64 next_rip;
121
122 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Avi Kivityafe9e662010-10-21 12:20:32 +0200123 struct {
Avi Kivitydacccfd2010-10-21 12:20:33 +0200124 u16 fs;
125 u16 gs;
126 u16 ldt;
Avi Kivityafe9e662010-10-21 12:20:32 +0200127 u64 gs_base;
128 } host;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300129
130 u32 *msrpm;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300131
Avi Kivitybd3d1ec2011-02-03 15:29:52 +0200132 ulong nmi_iret_rip;
133
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200134 struct nested_state nested;
Jan Kiszka6be7d302009-10-18 13:24:54 +0200135
136 bool nmi_singlestep;
Jan Kiszka66b71382010-02-23 17:47:56 +0100137
138 unsigned int3_injected;
139 unsigned long int3_rip;
Gleb Natapov631bc482010-10-14 11:22:52 +0200140 u32 apf_reason;
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100141
142 u64 tsc_ratio;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300143};
144
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100145static DEFINE_PER_CPU(u64, current_tsc_ratio);
146#define TSC_RATIO_DEFAULT 0x0100000000ULL
147
Joerg Roedel455716f2010-03-01 15:34:35 +0100148#define MSR_INVALID 0xffffffffU
149
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100150static struct svm_direct_access_msrs {
151 u32 index; /* Index of the MSR */
152 bool always; /* True if intercept is always on */
153} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -0400154 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100155 { .index = MSR_IA32_SYSENTER_CS, .always = true },
156#ifdef CONFIG_X86_64
157 { .index = MSR_GS_BASE, .always = true },
158 { .index = MSR_FS_BASE, .always = true },
159 { .index = MSR_KERNEL_GS_BASE, .always = true },
160 { .index = MSR_LSTAR, .always = true },
161 { .index = MSR_CSTAR, .always = true },
162 { .index = MSR_SYSCALL_MASK, .always = true },
163#endif
164 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
165 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
166 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
167 { .index = MSR_IA32_LASTINTTOIP, .always = false },
168 { .index = MSR_INVALID, .always = false },
Avi Kivity6aa8b732006-12-10 02:21:36 -0800169};
170
171/* enable NPT for AMD64 and X86 with PAE */
172#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
173static bool npt_enabled = true;
174#else
Joerg Roedele0231712010-02-24 18:59:10 +0100175static bool npt_enabled;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800176#endif
177static int npt = 1;
178
179module_param(npt, int, S_IRUGO);
180
Joerg Roedel4b6e4dc2009-08-07 11:49:48 +0200181static int nested = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800182module_param(nested, int, S_IRUGO);
183
184static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedela5c38322009-08-07 11:49:32 +0200185static void svm_complete_interrupts(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800186
Joerg Roedel410e4d52009-08-07 11:49:44 +0200187static int nested_svm_exit_handled(struct vcpu_svm *svm);
Joerg Roedelb8e88bc2010-02-19 16:23:02 +0100188static int nested_svm_intercept(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800189static int nested_svm_vmexit(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800190static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
191 bool has_error_code, u32 error_code);
192
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100193enum {
Joerg Roedel116a0a22010-12-03 11:45:49 +0100194 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
195 pause filter count */
Joerg Roedelf56838e2010-12-03 11:45:50 +0100196 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
Joerg Roedeld48086d2010-12-03 11:45:51 +0100197 VMCB_ASID, /* ASID */
Joerg Roedeldecdbf62010-12-03 11:45:52 +0100198 VMCB_INTR, /* int_ctl, int_vector */
Joerg Roedelb2747162010-12-03 11:45:53 +0100199 VMCB_NPT, /* npt_en, nCR3, gPAT */
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100200 VMCB_CR, /* CR0, CR3, CR4, EFER */
Joerg Roedel72214b92010-12-03 11:45:55 +0100201 VMCB_DR, /* DR6, DR7 */
Joerg Roedel17a703c2010-12-03 11:45:56 +0100202 VMCB_DT, /* GDT, IDT */
Joerg Roedel060d0c92010-12-03 11:45:57 +0100203 VMCB_SEG, /* CS, DS, SS, ES, CPL */
Joerg Roedel0574dec2010-12-03 11:45:58 +0100204 VMCB_CR2, /* CR2 only */
Joerg Roedelb53ba3f2010-12-03 11:45:59 +0100205 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100206 VMCB_DIRTY_MAX,
207};
208
Joerg Roedel0574dec2010-12-03 11:45:58 +0100209/* TPR and CR2 are always written before VMRUN */
210#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100211
212static inline void mark_all_dirty(struct vmcb *vmcb)
213{
214 vmcb->control.clean = 0;
215}
216
217static inline void mark_all_clean(struct vmcb *vmcb)
218{
219 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
220 & ~VMCB_ALWAYS_DIRTY_MASK;
221}
222
223static inline void mark_dirty(struct vmcb *vmcb, int bit)
224{
225 vmcb->control.clean &= ~(1 << bit);
226}
227
Avi Kivity6aa8b732006-12-10 02:21:36 -0800228static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
229{
230 return container_of(vcpu, struct vcpu_svm, vcpu);
231}
232
Joerg Roedel384c6362010-11-30 18:03:56 +0100233static void recalc_intercepts(struct vcpu_svm *svm)
234{
235 struct vmcb_control_area *c, *h;
236 struct nested_state *g;
237
Joerg Roedel116a0a22010-12-03 11:45:49 +0100238 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
239
Joerg Roedel384c6362010-11-30 18:03:56 +0100240 if (!is_guest_mode(&svm->vcpu))
241 return;
242
243 c = &svm->vmcb->control;
244 h = &svm->nested.hsave->control;
245 g = &svm->nested;
246
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100247 c->intercept_cr = h->intercept_cr | g->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100248 c->intercept_dr = h->intercept_dr | g->intercept_dr;
Joerg Roedel384c6362010-11-30 18:03:56 +0100249 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
250 c->intercept = h->intercept | g->intercept;
251}
252
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100253static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
254{
255 if (is_guest_mode(&svm->vcpu))
256 return svm->nested.hsave;
257 else
258 return svm->vmcb;
259}
260
261static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
262{
263 struct vmcb *vmcb = get_host_vmcb(svm);
264
265 vmcb->control.intercept_cr |= (1U << bit);
266
267 recalc_intercepts(svm);
268}
269
270static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
271{
272 struct vmcb *vmcb = get_host_vmcb(svm);
273
274 vmcb->control.intercept_cr &= ~(1U << bit);
275
276 recalc_intercepts(svm);
277}
278
279static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
280{
281 struct vmcb *vmcb = get_host_vmcb(svm);
282
283 return vmcb->control.intercept_cr & (1U << bit);
284}
285
Joerg Roedel3aed0412010-11-30 18:03:58 +0100286static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
287{
288 struct vmcb *vmcb = get_host_vmcb(svm);
289
290 vmcb->control.intercept_dr |= (1U << bit);
291
292 recalc_intercepts(svm);
293}
294
295static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
296{
297 struct vmcb *vmcb = get_host_vmcb(svm);
298
299 vmcb->control.intercept_dr &= ~(1U << bit);
300
301 recalc_intercepts(svm);
302}
303
Joerg Roedel18c918c2010-11-30 18:03:59 +0100304static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
305{
306 struct vmcb *vmcb = get_host_vmcb(svm);
307
308 vmcb->control.intercept_exceptions |= (1U << bit);
309
310 recalc_intercepts(svm);
311}
312
313static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
314{
315 struct vmcb *vmcb = get_host_vmcb(svm);
316
317 vmcb->control.intercept_exceptions &= ~(1U << bit);
318
319 recalc_intercepts(svm);
320}
321
Joerg Roedel8a05a1b2010-11-30 18:04:00 +0100322static inline void set_intercept(struct vcpu_svm *svm, int bit)
323{
324 struct vmcb *vmcb = get_host_vmcb(svm);
325
326 vmcb->control.intercept |= (1ULL << bit);
327
328 recalc_intercepts(svm);
329}
330
331static inline void clr_intercept(struct vcpu_svm *svm, int bit)
332{
333 struct vmcb *vmcb = get_host_vmcb(svm);
334
335 vmcb->control.intercept &= ~(1ULL << bit);
336
337 recalc_intercepts(svm);
338}
339
Joerg Roedel2af91942009-08-07 11:49:28 +0200340static inline void enable_gif(struct vcpu_svm *svm)
341{
342 svm->vcpu.arch.hflags |= HF_GIF_MASK;
343}
344
345static inline void disable_gif(struct vcpu_svm *svm)
346{
347 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
348}
349
350static inline bool gif_set(struct vcpu_svm *svm)
351{
352 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
353}
354
Avi Kivity6aa8b732006-12-10 02:21:36 -0800355static unsigned long iopm_base;
356
357struct kvm_ldttss_desc {
358 u16 limit0;
359 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100360 unsigned base1:8, type:5, dpl:2, p:1;
361 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800362 u32 base3;
363 u32 zero1;
364} __attribute__((packed));
365
366struct svm_cpu_data {
367 int cpu;
368
Avi Kivity5008fdf2007-04-02 13:05:50 +0300369 u64 asid_generation;
370 u32 max_asid;
371 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800372 struct kvm_ldttss_desc *tss_desc;
373
374 struct page *save_area;
375};
376
377static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
378
379struct svm_init_data {
380 int cpu;
381 int r;
382};
383
384static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
385
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200386#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800387#define MSRS_RANGE_SIZE 2048
388#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
389
Joerg Roedel455716f2010-03-01 15:34:35 +0100390static u32 svm_msrpm_offset(u32 msr)
391{
392 u32 offset;
393 int i;
394
395 for (i = 0; i < NUM_MSR_MAPS; i++) {
396 if (msr < msrpm_ranges[i] ||
397 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
398 continue;
399
400 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
401 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
402
403 /* Now we have the u8 offset - but need the u32 offset */
404 return offset / 4;
405 }
406
407 /* MSR not in any range */
408 return MSR_INVALID;
409}
410
Avi Kivity6aa8b732006-12-10 02:21:36 -0800411#define MAX_INST_SIZE 15
412
Avi Kivity6aa8b732006-12-10 02:21:36 -0800413static inline void clgi(void)
414{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300415 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800416}
417
418static inline void stgi(void)
419{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300420 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800421}
422
423static inline void invlpga(unsigned long addr, u32 asid)
424{
Joerg Roedele0231712010-02-24 18:59:10 +0100425 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800426}
427
Joerg Roedel4b161842010-09-10 17:31:03 +0200428static int get_npt_level(void)
429{
430#ifdef CONFIG_X86_64
431 return PT64_ROOT_LEVEL;
432#else
433 return PT32E_ROOT_LEVEL;
434#endif
435}
436
Avi Kivity6aa8b732006-12-10 02:21:36 -0800437static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
438{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000439 vcpu->arch.efer = efer;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100440 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600441 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800442
Alexander Graf9962d032008-11-25 20:17:02 +0100443 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100444 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800445}
446
Avi Kivity6aa8b732006-12-10 02:21:36 -0800447static int is_external_interrupt(u32 info)
448{
449 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
450 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
451}
452
Glauber Costa2809f5d2009-05-12 16:21:05 -0400453static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
454{
455 struct vcpu_svm *svm = to_svm(vcpu);
456 u32 ret = 0;
457
458 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Jan Kiszka48005f62010-02-19 19:38:07 +0100459 ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400460 return ret & mask;
461}
462
463static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
464{
465 struct vcpu_svm *svm = to_svm(vcpu);
466
467 if (mask == 0)
468 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
469 else
470 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
471
472}
473
Avi Kivity6aa8b732006-12-10 02:21:36 -0800474static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
475{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400476 struct vcpu_svm *svm = to_svm(vcpu);
477
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200478 if (svm->vmcb->control.next_rip != 0)
479 svm->next_rip = svm->vmcb->control.next_rip;
480
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400481 if (!svm->next_rip) {
Andre Przywara51d8b662010-12-21 11:12:02 +0100482 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
Gleb Natapovf629cf82009-05-11 13:35:49 +0300483 EMULATE_DONE)
484 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800485 return;
486 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300487 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
488 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
489 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800490
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300491 kvm_rip_write(vcpu, svm->next_rip);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400492 svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800493}
494
Jan Kiszka116a4752010-02-23 17:47:54 +0100495static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200496 bool has_error_code, u32 error_code,
497 bool reinject)
Jan Kiszka116a4752010-02-23 17:47:54 +0100498{
499 struct vcpu_svm *svm = to_svm(vcpu);
500
Joerg Roedele0231712010-02-24 18:59:10 +0100501 /*
502 * If we are within a nested VM we'd better #VMEXIT and let the guest
503 * handle the exception
504 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200505 if (!reinject &&
506 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100507 return;
508
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200509 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100510 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
511
512 /*
513 * For guest debugging where we have to reinject #BP if some
514 * INT3 is guest-owned:
515 * Emulate nRIP by moving RIP forward. Will fail if injection
516 * raises a fault that is not intercepted. Still better than
517 * failing in all cases.
518 */
519 skip_emulated_instruction(&svm->vcpu);
520 rip = kvm_rip_read(&svm->vcpu);
521 svm->int3_rip = rip + svm->vmcb->save.cs.base;
522 svm->int3_injected = rip - old_rip;
523 }
524
Jan Kiszka116a4752010-02-23 17:47:54 +0100525 svm->vmcb->control.event_inj = nr
526 | SVM_EVTINJ_VALID
527 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
528 | SVM_EVTINJ_TYPE_EXEPT;
529 svm->vmcb->control.event_inj_err = error_code;
530}
531
Joerg Roedel67ec6602010-05-17 14:43:35 +0200532static void svm_init_erratum_383(void)
533{
534 u32 low, high;
535 int err;
536 u64 val;
537
Hans Rosenfeld1be85a62010-07-28 19:09:32 +0200538 if (!cpu_has_amd_erratum(amd_erratum_383))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200539 return;
540
541 /* Use _safe variants to not break nested virtualization */
542 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
543 if (err)
544 return;
545
546 val |= (1ULL << 47);
547
548 low = lower_32_bits(val);
549 high = upper_32_bits(val);
550
551 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
552
553 erratum_383_found = true;
554}
555
Avi Kivity6aa8b732006-12-10 02:21:36 -0800556static int has_svm(void)
557{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200558 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800559
Eduardo Habkost63d11422008-11-17 19:03:20 -0200560 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800561 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800562 return 0;
563 }
564
Avi Kivity6aa8b732006-12-10 02:21:36 -0800565 return 1;
566}
567
568static void svm_hardware_disable(void *garbage)
569{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100570 /* Make sure we clean up behind us */
571 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
572 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
573
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200574 cpu_svm_disable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800575}
576
Alexander Graf10474ae2009-09-15 11:37:46 +0200577static int svm_hardware_enable(void *garbage)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800578{
579
Tejun Heo0fe1e002009-10-29 22:34:14 +0900580 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800581 uint64_t efer;
Gleb Natapov89a27f42010-02-16 10:51:48 +0200582 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800583 struct desc_struct *gdt;
584 int me = raw_smp_processor_id();
585
Alexander Graf10474ae2009-09-15 11:37:46 +0200586 rdmsrl(MSR_EFER, efer);
587 if (efer & EFER_SVME)
588 return -EBUSY;
589
Avi Kivity6aa8b732006-12-10 02:21:36 -0800590 if (!has_svm()) {
Zachary Amsdene6732a52009-09-29 11:38:36 -1000591 printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
592 me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200593 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800594 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900595 sd = per_cpu(svm_data, me);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800596
Tejun Heo0fe1e002009-10-29 22:34:14 +0900597 if (!sd) {
Zachary Amsdene6732a52009-09-29 11:38:36 -1000598 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
Avi Kivity6aa8b732006-12-10 02:21:36 -0800599 me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200600 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800601 }
602
Tejun Heo0fe1e002009-10-29 22:34:14 +0900603 sd->asid_generation = 1;
604 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
605 sd->next_asid = sd->max_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800606
Gleb Natapovd6ab1ed2010-02-25 12:43:07 +0200607 native_store_gdt(&gdt_descr);
Gleb Natapov89a27f42010-02-16 10:51:48 +0200608 gdt = (struct desc_struct *)gdt_descr.address;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900609 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800610
Alexander Graf9962d032008-11-25 20:17:02 +0100611 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800612
Linus Torvaldsd0316552009-12-14 09:58:24 -0800613 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200614
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100615 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
616 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
617 __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
618 }
619
Joerg Roedel67ec6602010-05-17 14:43:35 +0200620 svm_init_erratum_383();
621
Alexander Graf10474ae2009-09-15 11:37:46 +0200622 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800623}
624
Joerg Roedel0da1db752008-07-02 16:02:11 +0200625static void svm_cpu_uninit(int cpu)
626{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900627 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200628
Tejun Heo0fe1e002009-10-29 22:34:14 +0900629 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200630 return;
631
632 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900633 __free_page(sd->save_area);
634 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200635}
636
Avi Kivity6aa8b732006-12-10 02:21:36 -0800637static int svm_cpu_init(int cpu)
638{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900639 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800640 int r;
641
Tejun Heo0fe1e002009-10-29 22:34:14 +0900642 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
643 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800644 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900645 sd->cpu = cpu;
646 sd->save_area = alloc_page(GFP_KERNEL);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800647 r = -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900648 if (!sd->save_area)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800649 goto err_1;
650
Tejun Heo0fe1e002009-10-29 22:34:14 +0900651 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800652
653 return 0;
654
655err_1:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900656 kfree(sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800657 return r;
658
659}
660
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100661static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800662{
663 int i;
664
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100665 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
666 if (direct_access_msrs[i].index == index)
667 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800668
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100669 return false;
670}
671
Avi Kivity6aa8b732006-12-10 02:21:36 -0800672static void set_msr_interception(u32 *msrpm, unsigned msr,
673 int read, int write)
674{
Joerg Roedel455716f2010-03-01 15:34:35 +0100675 u8 bit_read, bit_write;
676 unsigned long tmp;
677 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800678
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100679 /*
680 * If this warning triggers extend the direct_access_msrs list at the
681 * beginning of the file
682 */
683 WARN_ON(!valid_msr_intercept(msr));
684
Joerg Roedel455716f2010-03-01 15:34:35 +0100685 offset = svm_msrpm_offset(msr);
686 bit_read = 2 * (msr & 0x0f);
687 bit_write = 2 * (msr & 0x0f) + 1;
688 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800689
Joerg Roedel455716f2010-03-01 15:34:35 +0100690 BUG_ON(offset == MSR_INVALID);
691
692 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
693 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
694
695 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800696}
697
Joerg Roedelf65c2292008-02-13 18:58:46 +0100698static void svm_vcpu_init_msrpm(u32 *msrpm)
699{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100700 int i;
701
Joerg Roedelf65c2292008-02-13 18:58:46 +0100702 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
703
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100704 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
705 if (!direct_access_msrs[i].always)
706 continue;
707
708 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
709 }
Joerg Roedelf65c2292008-02-13 18:58:46 +0100710}
711
Joerg Roedel323c3d82010-03-01 15:34:37 +0100712static void add_msr_offset(u32 offset)
713{
714 int i;
715
716 for (i = 0; i < MSRPM_OFFSETS; ++i) {
717
718 /* Offset already in list? */
719 if (msrpm_offsets[i] == offset)
720 return;
721
722 /* Slot used by another offset? */
723 if (msrpm_offsets[i] != MSR_INVALID)
724 continue;
725
726 /* Add offset to list */
727 msrpm_offsets[i] = offset;
728
729 return;
730 }
731
732 /*
733 * If this BUG triggers the msrpm_offsets table has an overflow. Just
734 * increase MSRPM_OFFSETS in this case.
735 */
736 BUG();
737}
738
739static void init_msrpm_offsets(void)
740{
741 int i;
742
743 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
744
745 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
746 u32 offset;
747
748 offset = svm_msrpm_offset(direct_access_msrs[i].index);
749 BUG_ON(offset == MSR_INVALID);
750
751 add_msr_offset(offset);
752 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800753}
754
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100755static void svm_enable_lbrv(struct vcpu_svm *svm)
756{
757 u32 *msrpm = svm->msrpm;
758
759 svm->vmcb->control.lbr_ctl = 1;
760 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
761 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
762 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
763 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
764}
765
766static void svm_disable_lbrv(struct vcpu_svm *svm)
767{
768 u32 *msrpm = svm->msrpm;
769
770 svm->vmcb->control.lbr_ctl = 0;
771 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
772 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
773 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
774 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
775}
776
Avi Kivity6aa8b732006-12-10 02:21:36 -0800777static __init int svm_hardware_setup(void)
778{
779 int cpu;
780 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100781 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800782 int r;
783
Avi Kivity6aa8b732006-12-10 02:21:36 -0800784 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
785
786 if (!iopm_pages)
787 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300788
789 iopm_va = page_address(iopm_pages);
790 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800791 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
792
Joerg Roedel323c3d82010-03-01 15:34:37 +0100793 init_msrpm_offsets();
794
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100795 if (boot_cpu_has(X86_FEATURE_NX))
796 kvm_enable_efer_bits(EFER_NX);
797
Alexander Graf1b2fd702009-02-02 16:23:51 +0100798 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
799 kvm_enable_efer_bits(EFER_FFXSR);
800
Alexander Graf236de052008-11-25 20:17:10 +0100801 if (nested) {
802 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +0200803 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +0100804 }
805
Zachary Amsden3230bb42009-09-29 11:38:37 -1000806 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800807 r = svm_cpu_init(cpu);
808 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100809 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800810 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100811
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200812 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100813 npt_enabled = false;
814
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100815 if (npt_enabled && !npt) {
816 printk(KERN_INFO "kvm: Nested Paging disabled\n");
817 npt_enabled = false;
818 }
819
Joerg Roedel18552672008-02-07 13:47:41 +0100820 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100821 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100822 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200823 } else
824 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100825
Avi Kivity6aa8b732006-12-10 02:21:36 -0800826 return 0;
827
Joerg Roedelf65c2292008-02-13 18:58:46 +0100828err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800829 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
830 iopm_base = 0;
831 return r;
832}
833
834static __exit void svm_hardware_unsetup(void)
835{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200836 int cpu;
837
Zachary Amsden3230bb42009-09-29 11:38:37 -1000838 for_each_possible_cpu(cpu)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200839 svm_cpu_uninit(cpu);
840
Avi Kivity6aa8b732006-12-10 02:21:36 -0800841 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100842 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800843}
844
845static void init_seg(struct vmcb_seg *seg)
846{
847 seg->selector = 0;
848 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +0100849 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800850 seg->limit = 0xffff;
851 seg->base = 0;
852}
853
854static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
855{
856 seg->selector = 0;
857 seg->attrib = SVM_SELECTOR_P_MASK | type;
858 seg->limit = 0xffff;
859 seg->base = 0;
860}
861
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100862static u64 __scale_tsc(u64 ratio, u64 tsc)
863{
864 u64 mult, frac, _tsc;
865
866 mult = ratio >> 32;
867 frac = ratio & ((1ULL << 32) - 1);
868
869 _tsc = tsc;
870 _tsc *= mult;
871 _tsc += (tsc >> 32) * frac;
872 _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
873
874 return _tsc;
875}
876
877static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
878{
879 struct vcpu_svm *svm = to_svm(vcpu);
880 u64 _tsc = tsc;
881
882 if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
883 _tsc = __scale_tsc(svm->tsc_ratio, tsc);
884
885 return _tsc;
886}
887
Joerg Roedel4051b182011-03-25 09:44:49 +0100888static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
889{
890 struct vcpu_svm *svm = to_svm(vcpu);
891 u64 ratio;
892 u64 khz;
893
894 /* TSC scaling supported? */
895 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR))
896 return;
897
898 /* TSC-Scaling disabled or guest TSC same frequency as host TSC? */
899 if (user_tsc_khz == 0) {
900 vcpu->arch.virtual_tsc_khz = 0;
901 svm->tsc_ratio = TSC_RATIO_DEFAULT;
902 return;
903 }
904
905 khz = user_tsc_khz;
906
907 /* TSC scaling required - calculate ratio */
908 ratio = khz << 32;
909 do_div(ratio, tsc_khz);
910
911 if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
912 WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
913 user_tsc_khz);
914 return;
915 }
916 vcpu->arch.virtual_tsc_khz = user_tsc_khz;
917 svm->tsc_ratio = ratio;
918}
919
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000920static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
921{
922 struct vcpu_svm *svm = to_svm(vcpu);
923 u64 g_tsc_offset = 0;
924
Joerg Roedel20307532010-11-29 17:51:48 +0100925 if (is_guest_mode(vcpu)) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000926 g_tsc_offset = svm->vmcb->control.tsc_offset -
927 svm->nested.hsave->control.tsc_offset;
928 svm->nested.hsave->control.tsc_offset = offset;
929 }
930
931 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +0100932
933 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000934}
935
Zachary Amsdene48672f2010-08-19 22:07:23 -1000936static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
937{
938 struct vcpu_svm *svm = to_svm(vcpu);
939
940 svm->vmcb->control.tsc_offset += adjustment;
Joerg Roedel20307532010-11-29 17:51:48 +0100941 if (is_guest_mode(vcpu))
Zachary Amsdene48672f2010-08-19 22:07:23 -1000942 svm->nested.hsave->control.tsc_offset += adjustment;
Joerg Roedel116a0a22010-12-03 11:45:49 +0100943 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdene48672f2010-08-19 22:07:23 -1000944}
945
Joerg Roedele6101a92008-02-13 18:58:45 +0100946static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800947{
Joerg Roedele6101a92008-02-13 18:58:45 +0100948 struct vmcb_control_area *control = &svm->vmcb->control;
949 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800950
Avi Kivitybff78272010-01-07 13:16:08 +0200951 svm->vcpu.fpu_active = 1;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100952 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +0200953
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100954 set_cr_intercept(svm, INTERCEPT_CR0_READ);
955 set_cr_intercept(svm, INTERCEPT_CR3_READ);
956 set_cr_intercept(svm, INTERCEPT_CR4_READ);
957 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
958 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
959 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
960 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800961
Joerg Roedel3aed0412010-11-30 18:03:58 +0100962 set_dr_intercept(svm, INTERCEPT_DR0_READ);
963 set_dr_intercept(svm, INTERCEPT_DR1_READ);
964 set_dr_intercept(svm, INTERCEPT_DR2_READ);
965 set_dr_intercept(svm, INTERCEPT_DR3_READ);
966 set_dr_intercept(svm, INTERCEPT_DR4_READ);
967 set_dr_intercept(svm, INTERCEPT_DR5_READ);
968 set_dr_intercept(svm, INTERCEPT_DR6_READ);
969 set_dr_intercept(svm, INTERCEPT_DR7_READ);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800970
Joerg Roedel3aed0412010-11-30 18:03:58 +0100971 set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
972 set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
973 set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
974 set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
975 set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
976 set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
977 set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
978 set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800979
Joerg Roedel18c918c2010-11-30 18:03:59 +0100980 set_exception_intercept(svm, PF_VECTOR);
981 set_exception_intercept(svm, UD_VECTOR);
982 set_exception_intercept(svm, MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800983
Joerg Roedel8a05a1b2010-11-30 18:04:00 +0100984 set_intercept(svm, INTERCEPT_INTR);
985 set_intercept(svm, INTERCEPT_NMI);
986 set_intercept(svm, INTERCEPT_SMI);
987 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
988 set_intercept(svm, INTERCEPT_CPUID);
989 set_intercept(svm, INTERCEPT_INVD);
990 set_intercept(svm, INTERCEPT_HLT);
991 set_intercept(svm, INTERCEPT_INVLPG);
992 set_intercept(svm, INTERCEPT_INVLPGA);
993 set_intercept(svm, INTERCEPT_IOIO_PROT);
994 set_intercept(svm, INTERCEPT_MSR_PROT);
995 set_intercept(svm, INTERCEPT_TASK_SWITCH);
996 set_intercept(svm, INTERCEPT_SHUTDOWN);
997 set_intercept(svm, INTERCEPT_VMRUN);
998 set_intercept(svm, INTERCEPT_VMMCALL);
999 set_intercept(svm, INTERCEPT_VMLOAD);
1000 set_intercept(svm, INTERCEPT_VMSAVE);
1001 set_intercept(svm, INTERCEPT_STGI);
1002 set_intercept(svm, INTERCEPT_CLGI);
1003 set_intercept(svm, INTERCEPT_SKINIT);
1004 set_intercept(svm, INTERCEPT_WBINVD);
1005 set_intercept(svm, INTERCEPT_MONITOR);
1006 set_intercept(svm, INTERCEPT_MWAIT);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01001007 set_intercept(svm, INTERCEPT_XSETBV);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001008
1009 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001010 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001011 control->int_ctl = V_INTR_MASKING_MASK;
1012
1013 init_seg(&save->es);
1014 init_seg(&save->ss);
1015 init_seg(&save->ds);
1016 init_seg(&save->fs);
1017 init_seg(&save->gs);
1018
1019 save->cs.selector = 0xf000;
1020 /* Executable/Readable Code Segment */
1021 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1022 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1023 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -08001024 /*
1025 * cs.base should really be 0xffff0000, but vmx can't handle that, so
1026 * be consistent with it.
1027 *
1028 * Replace when we have real mode working for vmx.
1029 */
1030 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001031
1032 save->gdtr.limit = 0xffff;
1033 save->idtr.limit = 0xffff;
1034
1035 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1036 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1037
Marcelo Tosattieaa48512010-08-31 19:13:14 -03001038 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -04001039 save->dr6 = 0xffff0ff0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001040 save->dr7 = 0x400;
Avi Kivityf6e78472010-08-02 15:30:20 +03001041 kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001042 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001043 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001044
Joerg Roedele0231712010-02-24 18:59:10 +01001045 /*
1046 * This is the guest-visible cr0 value.
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001047 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001048 */
Marcelo Tosatti678041a2010-08-31 19:13:13 -03001049 svm->vcpu.arch.cr0 = 0;
1050 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001051
Rusty Russell66aee912007-07-17 23:34:16 +10001052 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001053 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001054
1055 if (npt_enabled) {
1056 /* Setup VMCB for Nested Paging */
1057 control->nested_ctl = 1;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001058 clr_intercept(svm, INTERCEPT_TASK_SWITCH);
1059 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001060 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001061 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1062 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001063 save->g_pat = 0x0007040600070406ULL;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001064 save->cr3 = 0;
1065 save->cr4 = 0;
1066 }
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001067 svm->asid_generation = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001068
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001069 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001070 svm->vcpu.arch.hflags = 0;
1071
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001072 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
Mark Langsdorf565d0992009-10-06 14:25:02 -05001073 control->pause_filter_count = 3000;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001074 set_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001075 }
1076
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001077 mark_all_dirty(svm->vmcb);
1078
Joerg Roedel2af91942009-08-07 11:49:28 +02001079 enable_gif(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001080}
1081
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001082static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001083{
1084 struct vcpu_svm *svm = to_svm(vcpu);
1085
Joerg Roedele6101a92008-02-13 18:58:45 +01001086 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001087
Gleb Natapovc5af89b2009-06-09 15:56:26 +03001088 if (!kvm_vcpu_is_bsp(vcpu)) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001089 kvm_rip_write(vcpu, 0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001090 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
1091 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +02001092 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001093 vcpu->arch.regs_avail = ~0;
1094 vcpu->arch.regs_dirty = ~0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001095
1096 return 0;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001097}
1098
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001099static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001100{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001101 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001102 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001103 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001104 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001105 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001106 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001107
Rusty Russellc16f8622007-07-30 21:12:19 +10001108 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001109 if (!svm) {
1110 err = -ENOMEM;
1111 goto out;
1112 }
1113
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001114 svm->tsc_ratio = TSC_RATIO_DEFAULT;
1115
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001116 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1117 if (err)
1118 goto free_svm;
1119
Joerg Roedelf65c2292008-02-13 18:58:46 +01001120 err = -ENOMEM;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001121 page = alloc_page(GFP_KERNEL);
1122 if (!page)
1123 goto uninit;
1124
Joerg Roedelf65c2292008-02-13 18:58:46 +01001125 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1126 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001127 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001128
1129 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1130 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001131 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001132
Alexander Grafb286d5d2008-11-25 20:17:05 +01001133 hsave_page = alloc_page(GFP_KERNEL);
1134 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001135 goto free_page3;
1136
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001137 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001138
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001139 svm->msrpm = page_address(msrpm_pages);
1140 svm_vcpu_init_msrpm(svm->msrpm);
1141
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001142 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01001143 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001144
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001145 svm->vmcb = page_address(page);
1146 clear_page(svm->vmcb);
1147 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1148 svm->asid_generation = 0;
Joerg Roedele6101a92008-02-13 18:58:45 +01001149 init_vmcb(svm);
Zachary Amsden99e3e302010-08-19 22:07:17 -10001150 kvm_write_tsc(&svm->vcpu, 0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001151
Jan Kiszka10ab25c2010-05-25 16:01:50 +02001152 err = fx_init(&svm->vcpu);
1153 if (err)
1154 goto free_page4;
1155
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001156 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Gleb Natapovc5af89b2009-06-09 15:56:26 +03001157 if (kvm_vcpu_is_bsp(&svm->vcpu))
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001158 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001159
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001160 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -08001161
Jan Kiszka10ab25c2010-05-25 16:01:50 +02001162free_page4:
1163 __free_page(hsave_page);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001164free_page3:
1165 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1166free_page2:
1167 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1168free_page1:
1169 __free_page(page);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001170uninit:
1171 kvm_vcpu_uninit(&svm->vcpu);
1172free_svm:
Rusty Russella4770342007-08-01 14:46:11 +10001173 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001174out:
1175 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001176}
1177
1178static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1179{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001180 struct vcpu_svm *svm = to_svm(vcpu);
1181
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001182 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01001183 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001184 __free_page(virt_to_page(svm->nested.hsave));
1185 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001186 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10001187 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001188}
1189
Avi Kivity15ad7142007-07-11 18:17:21 +03001190static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001191{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001192 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03001193 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02001194
Avi Kivity0cc50642007-03-25 12:07:27 +02001195 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03001196 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001197 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02001198 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001199
Avi Kivity82ca2d12010-10-21 12:20:34 +02001200#ifdef CONFIG_X86_64
1201 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1202#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02001203 savesegment(fs, svm->host.fs);
1204 savesegment(gs, svm->host.gs);
1205 svm->host.ldt = kvm_read_ldt();
1206
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001207 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001208 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001209
1210 if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
1211 svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
1212 __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
1213 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
1214 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001215}
1216
1217static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1218{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001219 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001220 int i;
1221
Avi Kivitye1beb1d2007-11-18 13:50:24 +02001222 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02001223 kvm_load_ldt(svm->host.ldt);
1224#ifdef CONFIG_X86_64
1225 loadsegment(fs, svm->host.fs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02001226 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
Joerg Roedel893a5ab2011-01-14 16:45:01 +01001227 load_gs_index(svm->host.gs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02001228#else
Avi Kivity831ca602011-03-08 16:09:51 +02001229#ifdef CONFIG_X86_32_LAZY_GS
Avi Kivitydacccfd2010-10-21 12:20:33 +02001230 loadsegment(gs, svm->host.gs);
1231#endif
Avi Kivity831ca602011-03-08 16:09:51 +02001232#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001233 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001234 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001235}
1236
Avi Kivity6aa8b732006-12-10 02:21:36 -08001237static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1238{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001239 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001240}
1241
1242static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1243{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001244 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001245}
1246
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001247static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1248{
1249 switch (reg) {
1250 case VCPU_EXREG_PDPTR:
1251 BUG_ON(!npt_enabled);
Avi Kivity9f8fe502010-12-05 17:30:00 +02001252 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001253 break;
1254 default:
1255 BUG();
1256 }
1257}
1258
Alexander Graff0b85052008-11-25 20:17:01 +01001259static void svm_set_vintr(struct vcpu_svm *svm)
1260{
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001261 set_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001262}
1263
1264static void svm_clear_vintr(struct vcpu_svm *svm)
1265{
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001266 clr_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001267}
1268
Avi Kivity6aa8b732006-12-10 02:21:36 -08001269static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1270{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001271 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001272
1273 switch (seg) {
1274 case VCPU_SREG_CS: return &save->cs;
1275 case VCPU_SREG_DS: return &save->ds;
1276 case VCPU_SREG_ES: return &save->es;
1277 case VCPU_SREG_FS: return &save->fs;
1278 case VCPU_SREG_GS: return &save->gs;
1279 case VCPU_SREG_SS: return &save->ss;
1280 case VCPU_SREG_TR: return &save->tr;
1281 case VCPU_SREG_LDTR: return &save->ldtr;
1282 }
1283 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00001284 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001285}
1286
1287static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1288{
1289 struct vmcb_seg *s = svm_seg(vcpu, seg);
1290
1291 return s->base;
1292}
1293
1294static void svm_get_segment(struct kvm_vcpu *vcpu,
1295 struct kvm_segment *var, int seg)
1296{
1297 struct vmcb_seg *s = svm_seg(vcpu, seg);
1298
1299 var->base = s->base;
1300 var->limit = s->limit;
1301 var->selector = s->selector;
1302 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1303 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1304 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1305 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1306 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1307 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1308 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1309 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
Amit Shah25022ac2008-10-27 09:04:17 +00001310
Joerg Roedele0231712010-02-24 18:59:10 +01001311 /*
1312 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02001313 * for cross vendor migration purposes by "not present"
1314 */
1315 var->unusable = !var->present || (var->type == 0);
1316
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001317 switch (seg) {
1318 case VCPU_SREG_CS:
1319 /*
1320 * SVM always stores 0 for the 'G' bit in the CS selector in
1321 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
1322 * Intel's VMENTRY has a check on the 'G' bit.
1323 */
Amit Shah25022ac2008-10-27 09:04:17 +00001324 var->g = s->limit > 0xfffff;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001325 break;
1326 case VCPU_SREG_TR:
1327 /*
1328 * Work around a bug where the busy flag in the tr selector
1329 * isn't exposed
1330 */
Amit Shahc0d09822008-10-27 09:04:18 +00001331 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001332 break;
1333 case VCPU_SREG_DS:
1334 case VCPU_SREG_ES:
1335 case VCPU_SREG_FS:
1336 case VCPU_SREG_GS:
1337 /*
1338 * The accessed bit must always be set in the segment
1339 * descriptor cache, although it can be cleared in the
1340 * descriptor, the cached bit always remains at 1. Since
1341 * Intel has a check on this, set it here to support
1342 * cross-vendor migration.
1343 */
1344 if (!var->unusable)
1345 var->type |= 0x1;
1346 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02001347 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01001348 /*
1349 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02001350 * descriptor is left as 1, although the whole segment has
1351 * been made unusable. Clear it here to pass an Intel VMX
1352 * entry check when cross vendor migrating.
1353 */
1354 if (var->unusable)
1355 var->db = 0;
1356 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001357 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001358}
1359
Izik Eidus2e4d2652008-03-24 19:38:34 +02001360static int svm_get_cpl(struct kvm_vcpu *vcpu)
1361{
1362 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1363
1364 return save->cpl;
1365}
1366
Gleb Natapov89a27f42010-02-16 10:51:48 +02001367static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001368{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001369 struct vcpu_svm *svm = to_svm(vcpu);
1370
Gleb Natapov89a27f42010-02-16 10:51:48 +02001371 dt->size = svm->vmcb->save.idtr.limit;
1372 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001373}
1374
Gleb Natapov89a27f42010-02-16 10:51:48 +02001375static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001376{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001377 struct vcpu_svm *svm = to_svm(vcpu);
1378
Gleb Natapov89a27f42010-02-16 10:51:48 +02001379 svm->vmcb->save.idtr.limit = dt->size;
1380 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001381 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001382}
1383
Gleb Natapov89a27f42010-02-16 10:51:48 +02001384static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001385{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001386 struct vcpu_svm *svm = to_svm(vcpu);
1387
Gleb Natapov89a27f42010-02-16 10:51:48 +02001388 dt->size = svm->vmcb->save.gdtr.limit;
1389 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001390}
1391
Gleb Natapov89a27f42010-02-16 10:51:48 +02001392static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001393{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001394 struct vcpu_svm *svm = to_svm(vcpu);
1395
Gleb Natapov89a27f42010-02-16 10:51:48 +02001396 svm->vmcb->save.gdtr.limit = dt->size;
1397 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001398 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001399}
1400
Avi Kivitye8467fd2009-12-29 18:43:06 +02001401static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1402{
1403}
1404
Avi Kivityaff48ba2010-12-05 18:56:11 +02001405static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1406{
1407}
1408
Anthony Liguori25c4c272007-04-27 09:29:21 +03001409static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08001410{
1411}
1412
Avi Kivityd2251572010-01-06 10:55:27 +02001413static void update_cr0_intercept(struct vcpu_svm *svm)
1414{
1415 ulong gcr0 = svm->vcpu.arch.cr0;
1416 u64 *hcr0 = &svm->vmcb->save.cr0;
1417
1418 if (!svm->vcpu.fpu_active)
1419 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
1420 else
1421 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1422 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1423
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001424 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001425
1426 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001427 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1428 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001429 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001430 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1431 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001432 }
1433}
1434
Avi Kivity6aa8b732006-12-10 02:21:36 -08001435static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1436{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001437 struct vcpu_svm *svm = to_svm(vcpu);
1438
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001439#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02001440 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10001441 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001442 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001443 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001444 }
1445
Mike Dayd77c26f2007-10-08 09:02:08 -04001446 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001447 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001448 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001449 }
1450 }
1451#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001452 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02001453
1454 if (!npt_enabled)
1455 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02001456
1457 if (!vcpu->fpu_active)
Joerg Roedel334df502008-01-21 13:09:33 +01001458 cr0 |= X86_CR0_TS;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001459 /*
1460 * re-enable caching here because the QEMU bios
1461 * does not do it - this results in some delay at
1462 * reboot
1463 */
1464 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001465 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001466 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001467 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001468}
1469
1470static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1471{
Joerg Roedel6394b642008-04-09 14:15:29 +02001472 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001473 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1474
1475 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001476 svm_flush_tlb(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02001477
Joerg Roedelec077262008-04-09 14:15:28 +02001478 vcpu->arch.cr4 = cr4;
1479 if (!npt_enabled)
1480 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02001481 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02001482 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001483 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001484}
1485
1486static void svm_set_segment(struct kvm_vcpu *vcpu,
1487 struct kvm_segment *var, int seg)
1488{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001489 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001490 struct vmcb_seg *s = svm_seg(vcpu, seg);
1491
1492 s->base = var->base;
1493 s->limit = var->limit;
1494 s->selector = var->selector;
1495 if (var->unusable)
1496 s->attrib = 0;
1497 else {
1498 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1499 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1500 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1501 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1502 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1503 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1504 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1505 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1506 }
1507 if (seg == VCPU_SREG_CS)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001508 svm->vmcb->save.cpl
1509 = (svm->vmcb->save.cs.attrib
Avi Kivity6aa8b732006-12-10 02:21:36 -08001510 >> SVM_SELECTOR_DPL_SHIFT) & 3;
1511
Joerg Roedel060d0c92010-12-03 11:45:57 +01001512 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001513}
1514
Gleb Natapov44c11432009-05-11 13:35:52 +03001515static void update_db_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001516{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001517 struct vcpu_svm *svm = to_svm(vcpu);
1518
Joerg Roedel18c918c2010-11-30 18:03:59 +01001519 clr_exception_intercept(svm, DB_VECTOR);
1520 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001521
Jan Kiszka6be7d302009-10-18 13:24:54 +02001522 if (svm->nmi_singlestep)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001523 set_exception_intercept(svm, DB_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001524
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001525 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1526 if (vcpu->guest_debug &
1527 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
Joerg Roedel18c918c2010-11-30 18:03:59 +01001528 set_exception_intercept(svm, DB_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001529 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001530 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001531 } else
1532 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03001533}
1534
Jan Kiszka355be0b2009-10-03 00:31:21 +02001535static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
Gleb Natapov44c11432009-05-11 13:35:52 +03001536{
Gleb Natapov44c11432009-05-11 13:35:52 +03001537 struct vcpu_svm *svm = to_svm(vcpu);
1538
Jan Kiszkaae675ef2008-12-15 13:52:10 +01001539 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1540 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
1541 else
1542 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1543
Joerg Roedel72214b92010-12-03 11:45:55 +01001544 mark_dirty(svm->vmcb, VMCB_DR);
1545
Jan Kiszka355be0b2009-10-03 00:31:21 +02001546 update_db_intercept(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001547}
1548
Tejun Heo0fe1e002009-10-29 22:34:14 +09001549static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001550{
Tejun Heo0fe1e002009-10-29 22:34:14 +09001551 if (sd->next_asid > sd->max_asid) {
1552 ++sd->asid_generation;
1553 sd->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001554 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001555 }
1556
Tejun Heo0fe1e002009-10-29 22:34:14 +09001557 svm->asid_generation = sd->asid_generation;
1558 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01001559
1560 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001561}
1562
Gleb Natapov020df072010-04-13 10:05:23 +03001563static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001564{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001565 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001566
Gleb Natapov020df072010-04-13 10:05:23 +03001567 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01001568 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001569}
1570
Avi Kivity851ba692009-08-24 11:10:17 +03001571static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001572{
Gleb Natapov631bc482010-10-14 11:22:52 +02001573 u64 fault_address = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001574 u32 error_code;
Gleb Natapov631bc482010-10-14 11:22:52 +02001575 int r = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001576
Gleb Natapov631bc482010-10-14 11:22:52 +02001577 switch (svm->apf_reason) {
1578 default:
1579 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001580
Gleb Natapov631bc482010-10-14 11:22:52 +02001581 trace_kvm_page_fault(fault_address, error_code);
1582 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1583 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
Andre Przywaradc25e892010-12-21 11:12:07 +01001584 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1585 svm->vmcb->control.insn_bytes,
1586 svm->vmcb->control.insn_len);
Gleb Natapov631bc482010-10-14 11:22:52 +02001587 break;
1588 case KVM_PV_REASON_PAGE_NOT_PRESENT:
1589 svm->apf_reason = 0;
1590 local_irq_disable();
1591 kvm_async_pf_task_wait(fault_address);
1592 local_irq_enable();
1593 break;
1594 case KVM_PV_REASON_PAGE_READY:
1595 svm->apf_reason = 0;
1596 local_irq_disable();
1597 kvm_async_pf_task_wake(fault_address);
1598 local_irq_enable();
1599 break;
1600 }
1601 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001602}
1603
Avi Kivity851ba692009-08-24 11:10:17 +03001604static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001605{
Avi Kivity851ba692009-08-24 11:10:17 +03001606 struct kvm_run *kvm_run = svm->vcpu.run;
1607
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001608 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03001609 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02001610 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001611 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1612 return 1;
1613 }
Gleb Natapov44c11432009-05-11 13:35:52 +03001614
Jan Kiszka6be7d302009-10-18 13:24:54 +02001615 if (svm->nmi_singlestep) {
1616 svm->nmi_singlestep = false;
Gleb Natapov44c11432009-05-11 13:35:52 +03001617 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1618 svm->vmcb->save.rflags &=
1619 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1620 update_db_intercept(&svm->vcpu);
1621 }
1622
1623 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01001624 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03001625 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1626 kvm_run->debug.arch.pc =
1627 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1628 kvm_run->debug.arch.exception = DB_VECTOR;
1629 return 0;
1630 }
1631
1632 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001633}
1634
Avi Kivity851ba692009-08-24 11:10:17 +03001635static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001636{
Avi Kivity851ba692009-08-24 11:10:17 +03001637 struct kvm_run *kvm_run = svm->vcpu.run;
1638
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001639 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1640 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1641 kvm_run->debug.arch.exception = BP_VECTOR;
1642 return 0;
1643}
1644
Avi Kivity851ba692009-08-24 11:10:17 +03001645static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001646{
1647 int er;
1648
Andre Przywara51d8b662010-12-21 11:12:02 +01001649 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001650 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001651 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001652 return 1;
1653}
1654
Avi Kivity6b52d182010-01-21 15:31:47 +02001655static void svm_fpu_activate(struct kvm_vcpu *vcpu)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001656{
Avi Kivity6b52d182010-01-21 15:31:47 +02001657 struct vcpu_svm *svm = to_svm(vcpu);
Joerg Roedel66a562f2010-02-19 16:23:08 +01001658
Joerg Roedel18c918c2010-11-30 18:03:59 +01001659 clr_exception_intercept(svm, NM_VECTOR);
Joerg Roedel66a562f2010-02-19 16:23:08 +01001660
Rusty Russelle756fc62007-07-30 20:07:08 +10001661 svm->vcpu.fpu_active = 1;
Avi Kivityd2251572010-01-06 10:55:27 +02001662 update_cr0_intercept(svm);
Avi Kivity6b52d182010-01-21 15:31:47 +02001663}
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001664
Avi Kivity6b52d182010-01-21 15:31:47 +02001665static int nm_interception(struct vcpu_svm *svm)
1666{
1667 svm_fpu_activate(&svm->vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001668 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001669}
1670
Joerg Roedel67ec6602010-05-17 14:43:35 +02001671static bool is_erratum_383(void)
1672{
1673 int err, i;
1674 u64 value;
1675
1676 if (!erratum_383_found)
1677 return false;
1678
1679 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1680 if (err)
1681 return false;
1682
1683 /* Bit 62 may or may not be set for this mce */
1684 value &= ~(1ULL << 62);
1685
1686 if (value != 0xb600000000010015ULL)
1687 return false;
1688
1689 /* Clear MCi_STATUS registers */
1690 for (i = 0; i < 6; ++i)
1691 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1692
1693 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1694 if (!err) {
1695 u32 low, high;
1696
1697 value &= ~(1ULL << 2);
1698 low = lower_32_bits(value);
1699 high = upper_32_bits(value);
1700
1701 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1702 }
1703
1704 /* Flush tlb to evict multi-match entries */
1705 __flush_tlb_all();
1706
1707 return true;
1708}
1709
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001710static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02001711{
Joerg Roedel67ec6602010-05-17 14:43:35 +02001712 if (is_erratum_383()) {
1713 /*
1714 * Erratum 383 triggered. Guest state is corrupt so kill the
1715 * guest.
1716 */
1717 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1718
Avi Kivitya8eeb042010-05-10 12:34:53 +03001719 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02001720
1721 return;
1722 }
1723
Joerg Roedel53371b52008-04-09 14:15:30 +02001724 /*
1725 * On an #MC intercept the MCE handler is not called automatically in
1726 * the host. So do it by hand here.
1727 */
1728 asm volatile (
1729 "int $0x12\n");
1730 /* not sure if we ever come back to this point */
1731
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001732 return;
1733}
1734
1735static int mc_interception(struct vcpu_svm *svm)
1736{
Joerg Roedel53371b52008-04-09 14:15:30 +02001737 return 1;
1738}
1739
Avi Kivity851ba692009-08-24 11:10:17 +03001740static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001741{
Avi Kivity851ba692009-08-24 11:10:17 +03001742 struct kvm_run *kvm_run = svm->vcpu.run;
1743
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001744 /*
1745 * VMCB is undefined after a SHUTDOWN intercept
1746 * so reinitialize it.
1747 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001748 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001749 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001750
1751 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1752 return 0;
1753}
1754
Avi Kivity851ba692009-08-24 11:10:17 +03001755static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001756{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001757 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04001758 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Jan Kiszka34c33d12009-02-08 13:28:15 +01001759 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02001760 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001761
Rusty Russelle756fc62007-07-30 20:07:08 +10001762 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03001763 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02001764 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001765 if (string || in)
Andre Przywara51d8b662010-12-21 11:12:02 +01001766 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001767
Avi Kivity039576c2007-03-20 12:46:50 +02001768 port = io_info >> 16;
1769 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001770 svm->next_rip = svm->vmcb->control.exit_info_2;
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01001771 skip_emulated_instruction(&svm->vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001772
1773 return kvm_fast_pio_out(vcpu, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001774}
1775
Avi Kivity851ba692009-08-24 11:10:17 +03001776static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02001777{
1778 return 1;
1779}
1780
Avi Kivity851ba692009-08-24 11:10:17 +03001781static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02001782{
1783 ++svm->vcpu.stat.irq_exits;
1784 return 1;
1785}
1786
Avi Kivity851ba692009-08-24 11:10:17 +03001787static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001788{
1789 return 1;
1790}
1791
Avi Kivity851ba692009-08-24 11:10:17 +03001792static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001793{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001794 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001795 skip_emulated_instruction(&svm->vcpu);
1796 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001797}
1798
Avi Kivity851ba692009-08-24 11:10:17 +03001799static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02001800{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001801 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001802 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001803 kvm_emulate_hypercall(&svm->vcpu);
1804 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001805}
1806
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001807static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1808{
1809 struct vcpu_svm *svm = to_svm(vcpu);
1810
1811 return svm->nested.nested_cr3;
1812}
1813
1814static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1815 unsigned long root)
1816{
1817 struct vcpu_svm *svm = to_svm(vcpu);
1818
1819 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01001820 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001821 svm_flush_tlb(vcpu);
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001822}
1823
Avi Kivity6389ee92010-11-29 16:12:30 +02001824static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
1825 struct x86_exception *fault)
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001826{
1827 struct vcpu_svm *svm = to_svm(vcpu);
1828
1829 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1830 svm->vmcb->control.exit_code_hi = 0;
Avi Kivity6389ee92010-11-29 16:12:30 +02001831 svm->vmcb->control.exit_info_1 = fault->error_code;
1832 svm->vmcb->control.exit_info_2 = fault->address;
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001833
1834 nested_svm_vmexit(svm);
1835}
1836
Joerg Roedel4b161842010-09-10 17:31:03 +02001837static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1838{
1839 int r;
1840
1841 r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
1842
1843 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1844 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
1845 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1846 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1847 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
1848
1849 return r;
1850}
1851
1852static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
1853{
1854 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
1855}
1856
Alexander Grafc0725422008-11-25 20:17:03 +01001857static int nested_svm_check_permissions(struct vcpu_svm *svm)
1858{
Avi Kivityf6801df2010-01-21 15:31:50 +02001859 if (!(svm->vcpu.arch.efer & EFER_SVME)
Alexander Grafc0725422008-11-25 20:17:03 +01001860 || !is_paging(&svm->vcpu)) {
1861 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1862 return 1;
1863 }
1864
1865 if (svm->vmcb->save.cpl) {
1866 kvm_inject_gp(&svm->vcpu, 0);
1867 return 1;
1868 }
1869
1870 return 0;
1871}
1872
Alexander Grafcf74a782008-11-25 20:17:08 +01001873static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1874 bool has_error_code, u32 error_code)
1875{
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01001876 int vmexit;
1877
Joerg Roedel20307532010-11-29 17:51:48 +01001878 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel0295ad72009-08-07 11:49:37 +02001879 return 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01001880
Joerg Roedel0295ad72009-08-07 11:49:37 +02001881 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1882 svm->vmcb->control.exit_code_hi = 0;
1883 svm->vmcb->control.exit_info_1 = error_code;
1884 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1885
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01001886 vmexit = nested_svm_intercept(svm);
1887 if (vmexit == NESTED_EXIT_DONE)
1888 svm->nested.exit_required = true;
1889
1890 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01001891}
1892
Joerg Roedel8fe54652010-02-19 16:23:01 +01001893/* This function returns true if it is save to enable the irq window */
1894static inline bool nested_svm_intr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01001895{
Joerg Roedel20307532010-11-29 17:51:48 +01001896 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel8fe54652010-02-19 16:23:01 +01001897 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01001898
Joerg Roedel26666952009-08-07 11:49:46 +02001899 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01001900 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01001901
Joerg Roedel26666952009-08-07 11:49:46 +02001902 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01001903 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01001904
Gleb Natapova0a07cd2010-09-20 10:15:32 +02001905 /*
1906 * if vmexit was already requested (by intercepted exception
1907 * for instance) do not overwrite it with "external interrupt"
1908 * vmexit.
1909 */
1910 if (svm->nested.exit_required)
1911 return false;
1912
Joerg Roedel197717d2010-02-24 18:59:19 +01001913 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1914 svm->vmcb->control.exit_info_1 = 0;
1915 svm->vmcb->control.exit_info_2 = 0;
Joerg Roedel26666952009-08-07 11:49:46 +02001916
Joerg Roedelcd3ff652009-10-09 16:08:26 +02001917 if (svm->nested.intercept & 1ULL) {
1918 /*
1919 * The #vmexit can't be emulated here directly because this
1920 * code path runs with irqs and preemtion disabled. A
1921 * #vmexit emulation might sleep. Only signal request for
1922 * the #vmexit here.
1923 */
1924 svm->nested.exit_required = true;
Joerg Roedel236649d2009-10-09 16:08:30 +02001925 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
Joerg Roedel8fe54652010-02-19 16:23:01 +01001926 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01001927 }
1928
Joerg Roedel8fe54652010-02-19 16:23:01 +01001929 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01001930}
1931
Joerg Roedel887f5002010-02-24 18:59:12 +01001932/* This function returns true if it is save to enable the nmi window */
1933static inline bool nested_svm_nmi(struct vcpu_svm *svm)
1934{
Joerg Roedel20307532010-11-29 17:51:48 +01001935 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel887f5002010-02-24 18:59:12 +01001936 return true;
1937
1938 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
1939 return true;
1940
1941 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
1942 svm->nested.exit_required = true;
1943
1944 return false;
1945}
1946
Joerg Roedel7597f122010-02-19 16:23:00 +01001947static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001948{
1949 struct page *page;
1950
Joerg Roedel6c3bd3d2010-02-19 16:23:04 +01001951 might_sleep();
1952
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001953 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001954 if (is_error_page(page))
1955 goto error;
1956
Joerg Roedel7597f122010-02-19 16:23:00 +01001957 *_page = page;
1958
1959 return kmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001960
1961error:
1962 kvm_release_page_clean(page);
1963 kvm_inject_gp(&svm->vcpu, 0);
1964
1965 return NULL;
1966}
1967
Joerg Roedel7597f122010-02-19 16:23:00 +01001968static void nested_svm_unmap(struct page *page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001969{
Joerg Roedel7597f122010-02-19 16:23:00 +01001970 kunmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001971 kvm_release_page_dirty(page);
1972}
1973
Joerg Roedelce2ac082010-03-01 15:34:39 +01001974static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01001975{
Joerg Roedelce2ac082010-03-01 15:34:39 +01001976 unsigned port;
1977 u8 val, bit;
1978 u64 gpa;
1979
1980 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
1981 return NESTED_EXIT_HOST;
1982
1983 port = svm->vmcb->control.exit_info_1 >> 16;
1984 gpa = svm->nested.vmcb_iopm + (port / 8);
1985 bit = port % 8;
1986 val = 0;
1987
1988 if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
1989 val &= (1 << bit);
1990
1991 return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1992}
1993
Joerg Roedeld2477822010-03-01 15:34:34 +01001994static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01001995{
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001996 u32 offset, msr, value;
1997 int write, mask;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001998
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02001999 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
Joerg Roedeld2477822010-03-01 15:34:34 +01002000 return NESTED_EXIT_HOST;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002001
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002002 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2003 offset = svm_msrpm_offset(msr);
2004 write = svm->vmcb->control.exit_info_1 & 1;
2005 mask = 1 << ((2 * (msr & 0xf)) + write);
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002006
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002007 if (offset == MSR_INVALID)
2008 return NESTED_EXIT_DONE;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002009
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002010 /* Offset is in 32 bit units but need in 8 bit units */
2011 offset *= 4;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002012
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002013 if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
2014 return NESTED_EXIT_DONE;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002015
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002016 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002017}
2018
Joerg Roedel410e4d52009-08-07 11:49:44 +02002019static int nested_svm_exit_special(struct vcpu_svm *svm)
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002020{
Alexander Grafcf74a782008-11-25 20:17:08 +01002021 u32 exit_code = svm->vmcb->control.exit_code;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002022
Joerg Roedel410e4d52009-08-07 11:49:44 +02002023 switch (exit_code) {
2024 case SVM_EXIT_INTR:
2025 case SVM_EXIT_NMI:
Joerg Roedelff47a492010-04-22 12:33:14 +02002026 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
Joerg Roedel410e4d52009-08-07 11:49:44 +02002027 return NESTED_EXIT_HOST;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002028 case SVM_EXIT_NPF:
Joerg Roedele0231712010-02-24 18:59:10 +01002029 /* For now we are always handling NPFs when using them */
Joerg Roedel410e4d52009-08-07 11:49:44 +02002030 if (npt_enabled)
2031 return NESTED_EXIT_HOST;
2032 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002033 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
Gleb Natapov631bc482010-10-14 11:22:52 +02002034 /* When we're shadowing, trap PFs, but not async PF */
2035 if (!npt_enabled && svm->apf_reason == 0)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002036 return NESTED_EXIT_HOST;
2037 break;
Joerg Roedel66a562f2010-02-19 16:23:08 +01002038 case SVM_EXIT_EXCP_BASE + NM_VECTOR:
2039 nm_interception(svm);
2040 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002041 default:
2042 break;
Alexander Grafcf74a782008-11-25 20:17:08 +01002043 }
2044
Joerg Roedel410e4d52009-08-07 11:49:44 +02002045 return NESTED_EXIT_CONTINUE;
2046}
2047
2048/*
2049 * If this function returns true, this #vmexit was already handled
2050 */
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002051static int nested_svm_intercept(struct vcpu_svm *svm)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002052{
2053 u32 exit_code = svm->vmcb->control.exit_code;
2054 int vmexit = NESTED_EXIT_HOST;
2055
Alexander Grafcf74a782008-11-25 20:17:08 +01002056 switch (exit_code) {
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002057 case SVM_EXIT_MSR:
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002058 vmexit = nested_svm_exit_handled_msr(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002059 break;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002060 case SVM_EXIT_IOIO:
2061 vmexit = nested_svm_intercept_ioio(svm);
2062 break;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002063 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2064 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2065 if (svm->nested.intercept_cr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002066 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002067 break;
2068 }
Joerg Roedel3aed0412010-11-30 18:03:58 +01002069 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2070 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2071 if (svm->nested.intercept_dr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002072 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002073 break;
2074 }
2075 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2076 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002077 if (svm->nested.intercept_exceptions & excp_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002078 vmexit = NESTED_EXIT_DONE;
Gleb Natapov631bc482010-10-14 11:22:52 +02002079 /* async page fault always cause vmexit */
2080 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2081 svm->apf_reason != 0)
2082 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002083 break;
2084 }
Joerg Roedel228070b2010-04-22 12:33:10 +02002085 case SVM_EXIT_ERR: {
2086 vmexit = NESTED_EXIT_DONE;
2087 break;
2088 }
Alexander Grafcf74a782008-11-25 20:17:08 +01002089 default: {
2090 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002091 if (svm->nested.intercept & exit_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002092 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002093 }
2094 }
2095
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002096 return vmexit;
2097}
2098
2099static int nested_svm_exit_handled(struct vcpu_svm *svm)
2100{
2101 int vmexit;
2102
2103 vmexit = nested_svm_intercept(svm);
2104
2105 if (vmexit == NESTED_EXIT_DONE)
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002106 nested_svm_vmexit(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002107
2108 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002109}
2110
Joerg Roedel0460a972009-08-07 11:49:31 +02002111static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2112{
2113 struct vmcb_control_area *dst = &dst_vmcb->control;
2114 struct vmcb_control_area *from = &from_vmcb->control;
2115
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002116 dst->intercept_cr = from->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002117 dst->intercept_dr = from->intercept_dr;
Joerg Roedel0460a972009-08-07 11:49:31 +02002118 dst->intercept_exceptions = from->intercept_exceptions;
2119 dst->intercept = from->intercept;
2120 dst->iopm_base_pa = from->iopm_base_pa;
2121 dst->msrpm_base_pa = from->msrpm_base_pa;
2122 dst->tsc_offset = from->tsc_offset;
2123 dst->asid = from->asid;
2124 dst->tlb_ctl = from->tlb_ctl;
2125 dst->int_ctl = from->int_ctl;
2126 dst->int_vector = from->int_vector;
2127 dst->int_state = from->int_state;
2128 dst->exit_code = from->exit_code;
2129 dst->exit_code_hi = from->exit_code_hi;
2130 dst->exit_info_1 = from->exit_info_1;
2131 dst->exit_info_2 = from->exit_info_2;
2132 dst->exit_int_info = from->exit_int_info;
2133 dst->exit_int_info_err = from->exit_int_info_err;
2134 dst->nested_ctl = from->nested_ctl;
2135 dst->event_inj = from->event_inj;
2136 dst->event_inj_err = from->event_inj_err;
2137 dst->nested_cr3 = from->nested_cr3;
2138 dst->lbr_ctl = from->lbr_ctl;
2139}
2140
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002141static int nested_svm_vmexit(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002142{
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002143 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002144 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel33740e42009-08-07 11:49:29 +02002145 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002146 struct page *page;
Alexander Grafcf74a782008-11-25 20:17:08 +01002147
Joerg Roedel17897f32009-10-09 16:08:29 +02002148 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2149 vmcb->control.exit_info_1,
2150 vmcb->control.exit_info_2,
2151 vmcb->control.exit_int_info,
2152 vmcb->control.exit_int_info_err);
2153
Joerg Roedel7597f122010-02-19 16:23:00 +01002154 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002155 if (!nested_vmcb)
2156 return 1;
2157
Joerg Roedel20307532010-11-29 17:51:48 +01002158 /* Exit Guest-Mode */
2159 leave_guest_mode(&svm->vcpu);
Joerg Roedel06fc77722010-02-19 16:23:07 +01002160 svm->nested.vmcb = 0;
2161
Alexander Grafcf74a782008-11-25 20:17:08 +01002162 /* Give the current vmcb to the guest */
Joerg Roedel33740e42009-08-07 11:49:29 +02002163 disable_gif(svm);
2164
2165 nested_vmcb->save.es = vmcb->save.es;
2166 nested_vmcb->save.cs = vmcb->save.cs;
2167 nested_vmcb->save.ss = vmcb->save.ss;
2168 nested_vmcb->save.ds = vmcb->save.ds;
2169 nested_vmcb->save.gdtr = vmcb->save.gdtr;
2170 nested_vmcb->save.idtr = vmcb->save.idtr;
Joerg Roedel3f6a9d12010-07-27 18:14:20 +02002171 nested_vmcb->save.efer = svm->vcpu.arch.efer;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002172 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
Avi Kivity9f8fe502010-12-05 17:30:00 +02002173 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02002174 nested_vmcb->save.cr2 = vmcb->save.cr2;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002175 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03002176 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02002177 nested_vmcb->save.rip = vmcb->save.rip;
2178 nested_vmcb->save.rsp = vmcb->save.rsp;
2179 nested_vmcb->save.rax = vmcb->save.rax;
2180 nested_vmcb->save.dr7 = vmcb->save.dr7;
2181 nested_vmcb->save.dr6 = vmcb->save.dr6;
2182 nested_vmcb->save.cpl = vmcb->save.cpl;
2183
2184 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
2185 nested_vmcb->control.int_vector = vmcb->control.int_vector;
2186 nested_vmcb->control.int_state = vmcb->control.int_state;
2187 nested_vmcb->control.exit_code = vmcb->control.exit_code;
2188 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
2189 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
2190 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
2191 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
2192 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
Joerg Roedel7a190662010-07-27 18:14:21 +02002193 nested_vmcb->control.next_rip = vmcb->control.next_rip;
Alexander Graf8d23c462009-10-09 16:08:25 +02002194
2195 /*
2196 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2197 * to make sure that we do not lose injected events. So check event_inj
2198 * here and copy it to exit_int_info if it is valid.
2199 * Exit_int_info and event_inj can't be both valid because the case
2200 * below only happens on a VMRUN instruction intercept which has
2201 * no valid exit_int_info set.
2202 */
2203 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2204 struct vmcb_control_area *nc = &nested_vmcb->control;
2205
2206 nc->exit_int_info = vmcb->control.event_inj;
2207 nc->exit_int_info_err = vmcb->control.event_inj_err;
2208 }
2209
Joerg Roedel33740e42009-08-07 11:49:29 +02002210 nested_vmcb->control.tlb_ctl = 0;
2211 nested_vmcb->control.event_inj = 0;
2212 nested_vmcb->control.event_inj_err = 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002213
2214 /* We always set V_INTR_MASKING and remember the old value in hflags */
2215 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2216 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2217
Alexander Grafcf74a782008-11-25 20:17:08 +01002218 /* Restore the original control entries */
Joerg Roedel0460a972009-08-07 11:49:31 +02002219 copy_vmcb_control_area(vmcb, hsave);
Alexander Grafcf74a782008-11-25 20:17:08 +01002220
Alexander Graf219b65d2009-06-15 15:21:25 +02002221 kvm_clear_exception_queue(&svm->vcpu);
2222 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002223
Joerg Roedel4b161842010-09-10 17:31:03 +02002224 svm->nested.nested_cr3 = 0;
2225
Alexander Grafcf74a782008-11-25 20:17:08 +01002226 /* Restore selected save entries */
2227 svm->vmcb->save.es = hsave->save.es;
2228 svm->vmcb->save.cs = hsave->save.cs;
2229 svm->vmcb->save.ss = hsave->save.ss;
2230 svm->vmcb->save.ds = hsave->save.ds;
2231 svm->vmcb->save.gdtr = hsave->save.gdtr;
2232 svm->vmcb->save.idtr = hsave->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03002233 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
Alexander Grafcf74a782008-11-25 20:17:08 +01002234 svm_set_efer(&svm->vcpu, hsave->save.efer);
2235 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2236 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2237 if (npt_enabled) {
2238 svm->vmcb->save.cr3 = hsave->save.cr3;
2239 svm->vcpu.arch.cr3 = hsave->save.cr3;
2240 } else {
Avi Kivity23902182010-06-10 17:02:16 +03002241 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
Alexander Grafcf74a782008-11-25 20:17:08 +01002242 }
2243 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2244 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2245 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2246 svm->vmcb->save.dr7 = 0;
2247 svm->vmcb->save.cpl = 0;
2248 svm->vmcb->control.exit_int_info = 0;
2249
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002250 mark_all_dirty(svm->vmcb);
2251
Joerg Roedel7597f122010-02-19 16:23:00 +01002252 nested_svm_unmap(page);
Alexander Grafcf74a782008-11-25 20:17:08 +01002253
Joerg Roedel4b161842010-09-10 17:31:03 +02002254 nested_svm_uninit_mmu_context(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002255 kvm_mmu_reset_context(&svm->vcpu);
2256 kvm_mmu_load(&svm->vcpu);
2257
2258 return 0;
2259}
Alexander Graf3d6368e2008-11-25 20:17:07 +01002260
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002261static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002262{
Joerg Roedel323c3d82010-03-01 15:34:37 +01002263 /*
2264 * This function merges the msr permission bitmaps of kvm and the
2265 * nested vmcb. It is omptimized in that it only merges the parts where
2266 * the kvm msr permission bitmap may contain zero bits
2267 */
Alexander Graf3d6368e2008-11-25 20:17:07 +01002268 int i;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002269
Joerg Roedel323c3d82010-03-01 15:34:37 +01002270 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2271 return true;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002272
Joerg Roedel323c3d82010-03-01 15:34:37 +01002273 for (i = 0; i < MSRPM_OFFSETS; i++) {
2274 u32 value, p;
2275 u64 offset;
2276
2277 if (msrpm_offsets[i] == 0xffffffff)
2278 break;
2279
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002280 p = msrpm_offsets[i];
2281 offset = svm->nested.vmcb_msrpm + (p * 4);
Joerg Roedel323c3d82010-03-01 15:34:37 +01002282
2283 if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
2284 return false;
2285
2286 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2287 }
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002288
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002289 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002290
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002291 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002292}
2293
Joerg Roedel52c65a302010-08-02 16:46:44 +02002294static bool nested_vmcb_checks(struct vmcb *vmcb)
2295{
2296 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2297 return false;
2298
Joerg Roedeldbe77582010-08-02 16:46:45 +02002299 if (vmcb->control.asid == 0)
2300 return false;
2301
Joerg Roedel4b161842010-09-10 17:31:03 +02002302 if (vmcb->control.nested_ctl && !npt_enabled)
2303 return false;
2304
Joerg Roedel52c65a302010-08-02 16:46:44 +02002305 return true;
2306}
2307
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002308static bool nested_svm_vmrun(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002309{
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002310 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002311 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedeldefbba52009-08-07 11:49:30 +02002312 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002313 struct page *page;
Joerg Roedel06fc77722010-02-19 16:23:07 +01002314 u64 vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002315
Joerg Roedel06fc77722010-02-19 16:23:07 +01002316 vmcb_gpa = svm->vmcb->save.rax;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002317
Joerg Roedel7597f122010-02-19 16:23:00 +01002318 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002319 if (!nested_vmcb)
2320 return false;
2321
Joerg Roedel52c65a302010-08-02 16:46:44 +02002322 if (!nested_vmcb_checks(nested_vmcb)) {
2323 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2324 nested_vmcb->control.exit_code_hi = 0;
2325 nested_vmcb->control.exit_info_1 = 0;
2326 nested_vmcb->control.exit_info_2 = 0;
2327
2328 nested_svm_unmap(page);
2329
2330 return false;
2331 }
2332
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002333 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
Joerg Roedel0ac406d2009-10-09 16:08:27 +02002334 nested_vmcb->save.rip,
2335 nested_vmcb->control.int_ctl,
2336 nested_vmcb->control.event_inj,
2337 nested_vmcb->control.nested_ctl);
2338
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002339 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2340 nested_vmcb->control.intercept_cr >> 16,
Joerg Roedel2e554e82010-02-24 18:59:14 +01002341 nested_vmcb->control.intercept_exceptions,
2342 nested_vmcb->control.intercept);
2343
Alexander Graf3d6368e2008-11-25 20:17:07 +01002344 /* Clear internal status */
Alexander Graf219b65d2009-06-15 15:21:25 +02002345 kvm_clear_exception_queue(&svm->vcpu);
2346 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002347
Joerg Roedele0231712010-02-24 18:59:10 +01002348 /*
2349 * Save the old vmcb, so we don't need to pick what we save, but can
2350 * restore everything when a VMEXIT occurs
2351 */
Joerg Roedeldefbba52009-08-07 11:49:30 +02002352 hsave->save.es = vmcb->save.es;
2353 hsave->save.cs = vmcb->save.cs;
2354 hsave->save.ss = vmcb->save.ss;
2355 hsave->save.ds = vmcb->save.ds;
2356 hsave->save.gdtr = vmcb->save.gdtr;
2357 hsave->save.idtr = vmcb->save.idtr;
Avi Kivityf6801df2010-01-21 15:31:50 +02002358 hsave->save.efer = svm->vcpu.arch.efer;
Avi Kivity4d4ec082009-12-29 18:07:30 +02002359 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002360 hsave->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03002361 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002362 hsave->save.rip = kvm_rip_read(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002363 hsave->save.rsp = vmcb->save.rsp;
2364 hsave->save.rax = vmcb->save.rax;
2365 if (npt_enabled)
2366 hsave->save.cr3 = vmcb->save.cr3;
2367 else
Avi Kivity9f8fe502010-12-05 17:30:00 +02002368 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002369
Joerg Roedel0460a972009-08-07 11:49:31 +02002370 copy_vmcb_control_area(hsave, vmcb);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002371
Avi Kivityf6e78472010-08-02 15:30:20 +03002372 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002373 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2374 else
2375 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2376
Joerg Roedel4b161842010-09-10 17:31:03 +02002377 if (nested_vmcb->control.nested_ctl) {
2378 kvm_mmu_unload(&svm->vcpu);
2379 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2380 nested_svm_init_mmu_context(&svm->vcpu);
2381 }
2382
Alexander Graf3d6368e2008-11-25 20:17:07 +01002383 /* Load the nested guest state */
2384 svm->vmcb->save.es = nested_vmcb->save.es;
2385 svm->vmcb->save.cs = nested_vmcb->save.cs;
2386 svm->vmcb->save.ss = nested_vmcb->save.ss;
2387 svm->vmcb->save.ds = nested_vmcb->save.ds;
2388 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2389 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03002390 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002391 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2392 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2393 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2394 if (npt_enabled) {
2395 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2396 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002397 } else
Avi Kivity23902182010-06-10 17:02:16 +03002398 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002399
2400 /* Guest paging mode is active - reset mmu */
2401 kvm_mmu_reset_context(&svm->vcpu);
2402
Joerg Roedeldefbba52009-08-07 11:49:30 +02002403 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002404 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2405 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2406 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedele0231712010-02-24 18:59:10 +01002407
Alexander Graf3d6368e2008-11-25 20:17:07 +01002408 /* In case we don't even reach vcpu_run, the fields are not updated */
2409 svm->vmcb->save.rax = nested_vmcb->save.rax;
2410 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2411 svm->vmcb->save.rip = nested_vmcb->save.rip;
2412 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
2413 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
2414 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
2415
Joerg Roedelf7138532010-03-01 15:34:40 +01002416 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002417 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002418
Joerg Roedelaad42c62009-08-07 11:49:34 +02002419 /* cache intercepts */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002420 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002421 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +02002422 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
2423 svm->nested.intercept = nested_vmcb->control.intercept;
2424
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002425 svm_flush_tlb(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002426 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002427 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
2428 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
2429 else
2430 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
2431
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002432 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2433 /* We only want the cr8 intercept bits of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002434 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2435 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002436 }
2437
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002438 /* We don't want to see VMMCALLs from a nested guest */
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01002439 clr_intercept(svm, INTERCEPT_VMMCALL);
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002440
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002441 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002442 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
2443 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
2444 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002445 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
2446 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
2447
Joerg Roedel7597f122010-02-19 16:23:00 +01002448 nested_svm_unmap(page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002449
Joerg Roedel20307532010-11-29 17:51:48 +01002450 /* Enter Guest-Mode */
2451 enter_guest_mode(&svm->vcpu);
2452
Joerg Roedel384c6362010-11-30 18:03:56 +01002453 /*
2454 * Merge guest and host intercepts - must be called with vcpu in
2455 * guest-mode to take affect here
2456 */
2457 recalc_intercepts(svm);
2458
Joerg Roedel06fc77722010-02-19 16:23:07 +01002459 svm->nested.vmcb = vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002460
Joerg Roedel2af91942009-08-07 11:49:28 +02002461 enable_gif(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002462
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002463 mark_all_dirty(svm->vmcb);
2464
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002465 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002466}
2467
Joerg Roedel9966bf62009-08-07 11:49:40 +02002468static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Alexander Graf55426752008-11-25 20:17:06 +01002469{
2470 to_vmcb->save.fs = from_vmcb->save.fs;
2471 to_vmcb->save.gs = from_vmcb->save.gs;
2472 to_vmcb->save.tr = from_vmcb->save.tr;
2473 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2474 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2475 to_vmcb->save.star = from_vmcb->save.star;
2476 to_vmcb->save.lstar = from_vmcb->save.lstar;
2477 to_vmcb->save.cstar = from_vmcb->save.cstar;
2478 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2479 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2480 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2481 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
Alexander Graf55426752008-11-25 20:17:06 +01002482}
2483
Avi Kivity851ba692009-08-24 11:10:17 +03002484static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002485{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002486 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002487 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002488
Alexander Graf55426752008-11-25 20:17:06 +01002489 if (nested_svm_check_permissions(svm))
2490 return 1;
2491
2492 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2493 skip_emulated_instruction(&svm->vcpu);
2494
Joerg Roedel7597f122010-02-19 16:23:00 +01002495 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002496 if (!nested_vmcb)
2497 return 1;
2498
2499 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01002500 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01002501
2502 return 1;
2503}
2504
Avi Kivity851ba692009-08-24 11:10:17 +03002505static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002506{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002507 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002508 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002509
Alexander Graf55426752008-11-25 20:17:06 +01002510 if (nested_svm_check_permissions(svm))
2511 return 1;
2512
2513 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2514 skip_emulated_instruction(&svm->vcpu);
2515
Joerg Roedel7597f122010-02-19 16:23:00 +01002516 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002517 if (!nested_vmcb)
2518 return 1;
2519
2520 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01002521 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01002522
2523 return 1;
2524}
2525
Avi Kivity851ba692009-08-24 11:10:17 +03002526static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002527{
Alexander Graf3d6368e2008-11-25 20:17:07 +01002528 if (nested_svm_check_permissions(svm))
2529 return 1;
2530
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002531 /* Save rip after vmrun instruction */
2532 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002533
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002534 if (!nested_svm_vmrun(svm))
Alexander Graf3d6368e2008-11-25 20:17:07 +01002535 return 1;
2536
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002537 if (!nested_svm_vmrun_msrpm(svm))
Joerg Roedel1f8da472009-08-07 11:49:43 +02002538 goto failed;
2539
2540 return 1;
2541
2542failed:
2543
2544 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
2545 svm->vmcb->control.exit_code_hi = 0;
2546 svm->vmcb->control.exit_info_1 = 0;
2547 svm->vmcb->control.exit_info_2 = 0;
2548
2549 nested_svm_vmexit(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002550
2551 return 1;
2552}
2553
Avi Kivity851ba692009-08-24 11:10:17 +03002554static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002555{
2556 if (nested_svm_check_permissions(svm))
2557 return 1;
2558
2559 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2560 skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03002561 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01002562
Joerg Roedel2af91942009-08-07 11:49:28 +02002563 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002564
2565 return 1;
2566}
2567
Avi Kivity851ba692009-08-24 11:10:17 +03002568static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002569{
2570 if (nested_svm_check_permissions(svm))
2571 return 1;
2572
2573 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2574 skip_emulated_instruction(&svm->vcpu);
2575
Joerg Roedel2af91942009-08-07 11:49:28 +02002576 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002577
2578 /* After a CLGI no interrupts should come */
2579 svm_clear_vintr(svm);
2580 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2581
Joerg Roedeldecdbf62010-12-03 11:45:52 +01002582 mark_dirty(svm->vmcb, VMCB_INTR);
2583
Alexander Graf1371d902008-11-25 20:17:04 +01002584 return 1;
2585}
2586
Avi Kivity851ba692009-08-24 11:10:17 +03002587static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02002588{
2589 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02002590
Joerg Roedelec1ff792009-10-09 16:08:31 +02002591 trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
2592 vcpu->arch.regs[VCPU_REGS_RAX]);
2593
Alexander Grafff092382009-06-15 15:21:24 +02002594 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2595 kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
2596
2597 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2598 skip_emulated_instruction(&svm->vcpu);
2599 return 1;
2600}
2601
Joerg Roedel532a46b2009-10-09 16:08:32 +02002602static int skinit_interception(struct vcpu_svm *svm)
2603{
2604 trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
2605
2606 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2607 return 1;
2608}
2609
Joerg Roedel81dd35d2010-12-07 17:15:06 +01002610static int xsetbv_interception(struct vcpu_svm *svm)
2611{
2612 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2613 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
2614
2615 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2616 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2617 skip_emulated_instruction(&svm->vcpu);
2618 }
2619
2620 return 1;
2621}
2622
Avi Kivity851ba692009-08-24 11:10:17 +03002623static int invalid_op_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002624{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02002625 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002626 return 1;
2627}
2628
Avi Kivity851ba692009-08-24 11:10:17 +03002629static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002630{
Izik Eidus37817f22008-03-24 23:14:53 +02002631 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002632 int reason;
2633 int int_type = svm->vmcb->control.exit_int_info &
2634 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03002635 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002636 uint32_t type =
2637 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2638 uint32_t idt_v =
2639 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02002640 bool has_error_code = false;
2641 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02002642
2643 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002644
Izik Eidus37817f22008-03-24 23:14:53 +02002645 if (svm->vmcb->control.exit_info_2 &
2646 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002647 reason = TASK_SWITCH_IRET;
2648 else if (svm->vmcb->control.exit_info_2 &
2649 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2650 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002651 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002652 reason = TASK_SWITCH_GATE;
2653 else
2654 reason = TASK_SWITCH_CALL;
2655
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002656 if (reason == TASK_SWITCH_GATE) {
2657 switch (type) {
2658 case SVM_EXITINTINFO_TYPE_NMI:
2659 svm->vcpu.arch.nmi_injected = false;
2660 break;
2661 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02002662 if (svm->vmcb->control.exit_info_2 &
2663 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2664 has_error_code = true;
2665 error_code =
2666 (u32)svm->vmcb->control.exit_info_2;
2667 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002668 kvm_clear_exception_queue(&svm->vcpu);
2669 break;
2670 case SVM_EXITINTINFO_TYPE_INTR:
2671 kvm_clear_interrupt_queue(&svm->vcpu);
2672 break;
2673 default:
2674 break;
2675 }
2676 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002677
Gleb Natapov8317c292009-04-12 13:37:02 +03002678 if (reason != TASK_SWITCH_GATE ||
2679 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2680 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Gleb Natapovf629cf82009-05-11 13:35:49 +03002681 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2682 skip_emulated_instruction(&svm->vcpu);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002683
Gleb Natapovacb54512010-04-15 21:03:50 +03002684 if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
2685 has_error_code, error_code) == EMULATE_FAIL) {
2686 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2687 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2688 svm->vcpu.run->internal.ndata = 0;
2689 return 0;
2690 }
2691 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002692}
2693
Avi Kivity851ba692009-08-24 11:10:17 +03002694static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002695{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002696 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10002697 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02002698 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002699}
2700
Avi Kivity851ba692009-08-24 11:10:17 +03002701static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002702{
2703 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01002704 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03002705 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02002706 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002707 return 1;
2708}
2709
Avi Kivity851ba692009-08-24 11:10:17 +03002710static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03002711{
Andre Przywaradf4f31082010-12-21 11:12:06 +01002712 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2713 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2714
2715 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2716 skip_emulated_instruction(&svm->vcpu);
2717 return 1;
Marcelo Tosattia7052892008-09-23 13:18:35 -03002718}
2719
Avi Kivity851ba692009-08-24 11:10:17 +03002720static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002721{
Andre Przywara51d8b662010-12-21 11:12:02 +01002722 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002723}
2724
Joerg Roedel628afd22011-04-04 12:39:36 +02002725bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
2726{
2727 unsigned long cr0 = svm->vcpu.arch.cr0;
2728 bool ret = false;
2729 u64 intercept;
2730
2731 intercept = svm->nested.intercept;
2732
2733 if (!is_guest_mode(&svm->vcpu) ||
2734 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2735 return false;
2736
2737 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2738 val &= ~SVM_CR0_SELECTIVE_MASK;
2739
2740 if (cr0 ^ val) {
2741 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2742 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2743 }
2744
2745 return ret;
2746}
2747
Andre Przywara7ff76d52010-12-21 11:12:04 +01002748#define CR_VALID (1ULL << 63)
2749
2750static int cr_interception(struct vcpu_svm *svm)
2751{
2752 int reg, cr;
2753 unsigned long val;
2754 int err;
2755
2756 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2757 return emulate_on_interception(svm);
2758
2759 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2760 return emulate_on_interception(svm);
2761
2762 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2763 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2764
2765 err = 0;
2766 if (cr >= 16) { /* mov to cr */
2767 cr -= 16;
2768 val = kvm_register_read(&svm->vcpu, reg);
2769 switch (cr) {
2770 case 0:
Joerg Roedel628afd22011-04-04 12:39:36 +02002771 if (!check_selective_cr0_intercepted(svm, val))
2772 err = kvm_set_cr0(&svm->vcpu, val);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002773 break;
2774 case 3:
2775 err = kvm_set_cr3(&svm->vcpu, val);
2776 break;
2777 case 4:
2778 err = kvm_set_cr4(&svm->vcpu, val);
2779 break;
2780 case 8:
2781 err = kvm_set_cr8(&svm->vcpu, val);
2782 break;
2783 default:
2784 WARN(1, "unhandled write to CR%d", cr);
2785 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2786 return 1;
2787 }
2788 } else { /* mov from cr */
2789 switch (cr) {
2790 case 0:
2791 val = kvm_read_cr0(&svm->vcpu);
2792 break;
2793 case 2:
2794 val = svm->vcpu.arch.cr2;
2795 break;
2796 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02002797 val = kvm_read_cr3(&svm->vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002798 break;
2799 case 4:
2800 val = kvm_read_cr4(&svm->vcpu);
2801 break;
2802 case 8:
2803 val = kvm_get_cr8(&svm->vcpu);
2804 break;
2805 default:
2806 WARN(1, "unhandled read from CR%d", cr);
2807 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2808 return 1;
2809 }
2810 kvm_register_write(&svm->vcpu, reg, val);
2811 }
2812 kvm_complete_insn_gp(&svm->vcpu, err);
2813
2814 return 1;
2815}
2816
Andre Przywaracae37972010-12-21 11:12:05 +01002817static int dr_interception(struct vcpu_svm *svm)
2818{
2819 int reg, dr;
2820 unsigned long val;
2821 int err;
2822
2823 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2824 return emulate_on_interception(svm);
2825
2826 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2827 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2828
2829 if (dr >= 16) { /* mov to DRn */
2830 val = kvm_register_read(&svm->vcpu, reg);
2831 kvm_set_dr(&svm->vcpu, dr - 16, val);
2832 } else {
2833 err = kvm_get_dr(&svm->vcpu, dr, &val);
2834 if (!err)
2835 kvm_register_write(&svm->vcpu, reg, val);
2836 }
2837
Joerg Roedel2c46d2a2011-02-09 18:29:39 +01002838 skip_emulated_instruction(&svm->vcpu);
2839
Andre Przywaracae37972010-12-21 11:12:05 +01002840 return 1;
2841}
2842
Avi Kivity851ba692009-08-24 11:10:17 +03002843static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01002844{
Avi Kivity851ba692009-08-24 11:10:17 +03002845 struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01002846 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03002847
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002848 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2849 /* instruction emulation calls kvm_set_cr8() */
Andre Przywara7ff76d52010-12-21 11:12:04 +01002850 r = cr_interception(svm);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002851 if (irqchip_in_kernel(svm->vcpu.kvm)) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002852 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002853 return r;
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002854 }
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002855 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01002856 return r;
Joerg Roedel1d075432007-12-06 21:02:25 +01002857 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2858 return 0;
2859}
2860
Avi Kivity6aa8b732006-12-10 02:21:36 -08002861static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2862{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002863 struct vcpu_svm *svm = to_svm(vcpu);
2864
Avi Kivity6aa8b732006-12-10 02:21:36 -08002865 switch (ecx) {
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05302866 case MSR_IA32_TSC: {
Joerg Roedel4cc70312010-11-30 18:04:01 +01002867 struct vmcb *vmcb = get_host_vmcb(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002868
Joerg Roedelfbc0db72011-03-25 09:44:46 +01002869 *data = vmcb->control.tsc_offset +
2870 svm_scale_tsc(vcpu, native_read_tsc());
2871
Avi Kivity6aa8b732006-12-10 02:21:36 -08002872 break;
2873 }
Brian Gerst8c065852010-07-17 09:03:26 -04002874 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002875 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002876 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08002877#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002878 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002879 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002880 break;
2881 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002882 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002883 break;
2884 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002885 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002886 break;
2887 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002888 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002889 break;
2890#endif
2891 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002892 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002893 break;
2894 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02002895 *data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002896 break;
2897 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02002898 *data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002899 break;
Joerg Roedele0231712010-02-24 18:59:10 +01002900 /*
2901 * Nobody will change the following 5 values in the VMCB so we can
2902 * safely return them on rdmsr. They will always be 0 until LBRV is
2903 * implemented.
2904 */
Joerg Roedela2938c82008-02-13 16:30:28 +01002905 case MSR_IA32_DEBUGCTLMSR:
2906 *data = svm->vmcb->save.dbgctl;
2907 break;
2908 case MSR_IA32_LASTBRANCHFROMIP:
2909 *data = svm->vmcb->save.br_from;
2910 break;
2911 case MSR_IA32_LASTBRANCHTOIP:
2912 *data = svm->vmcb->save.br_to;
2913 break;
2914 case MSR_IA32_LASTINTFROMIP:
2915 *data = svm->vmcb->save.last_excp_from;
2916 break;
2917 case MSR_IA32_LASTINTTOIP:
2918 *data = svm->vmcb->save.last_excp_to;
2919 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002920 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002921 *data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002922 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01002923 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01002924 *data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01002925 break;
Alexander Grafc8a73f12009-01-05 16:02:47 +01002926 case MSR_IA32_UCODE_REV:
2927 *data = 0x01000065;
2928 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002929 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08002930 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002931 }
2932 return 0;
2933}
2934
Avi Kivity851ba692009-08-24 11:10:17 +03002935static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002936{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002937 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08002938 u64 data;
2939
Avi Kivity59200272010-01-25 19:47:02 +02002940 if (svm_get_msr(&svm->vcpu, ecx, &data)) {
2941 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02002942 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02002943 } else {
Marcelo Tosatti229456f2009-06-17 09:22:14 -03002944 trace_kvm_msr_read(ecx, data);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002945
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002946 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002947 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002948 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10002949 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002950 }
2951 return 1;
2952}
2953
Joerg Roedel4a810182010-02-24 18:59:15 +01002954static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2955{
2956 struct vcpu_svm *svm = to_svm(vcpu);
2957 int svm_dis, chg_mask;
2958
2959 if (data & ~SVM_VM_CR_VALID_MASK)
2960 return 1;
2961
2962 chg_mask = SVM_VM_CR_VALID_MASK;
2963
2964 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2965 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2966
2967 svm->nested.vm_cr_msr &= ~chg_mask;
2968 svm->nested.vm_cr_msr |= (data & chg_mask);
2969
2970 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2971
2972 /* check for svm_disable while efer.svme is set */
2973 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2974 return 1;
2975
2976 return 0;
2977}
2978
Avi Kivity6aa8b732006-12-10 02:21:36 -08002979static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2980{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002981 struct vcpu_svm *svm = to_svm(vcpu);
2982
Avi Kivity6aa8b732006-12-10 02:21:36 -08002983 switch (ecx) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10002984 case MSR_IA32_TSC:
Zachary Amsden99e3e302010-08-19 22:07:17 -10002985 kvm_write_tsc(vcpu, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002986 break;
Brian Gerst8c065852010-07-17 09:03:26 -04002987 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002988 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002989 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08002990#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002991 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002992 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002993 break;
2994 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002995 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002996 break;
2997 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002998 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002999 break;
3000 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003001 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003002 break;
3003#endif
3004 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003005 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003006 break;
3007 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02003008 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003009 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003010 break;
3011 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02003012 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003013 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003014 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01003015 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02003016 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003017 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003018 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003019 break;
3020 }
3021 if (data & DEBUGCTL_RESERVED_BITS)
3022 return 1;
3023
3024 svm->vmcb->save.dbgctl = data;
Joerg Roedelb53ba3f2010-12-03 11:45:59 +01003025 mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003026 if (data & (1ULL<<0))
3027 svm_enable_lbrv(svm);
3028 else
3029 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01003030 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003031 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003032 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003033 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003034 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01003035 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003036 case MSR_VM_IGNNE:
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003037 pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3038 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003039 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08003040 return kvm_set_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003041 }
3042 return 0;
3043}
3044
Avi Kivity851ba692009-08-24 11:10:17 +03003045static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003046{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003047 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003048 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003049 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003050
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003051
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003052 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Avi Kivity59200272010-01-25 19:47:02 +02003053 if (svm_set_msr(&svm->vcpu, ecx, data)) {
3054 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003055 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02003056 } else {
3057 trace_kvm_msr_write(ecx, data);
Rusty Russelle756fc62007-07-30 20:07:08 +10003058 skip_emulated_instruction(&svm->vcpu);
Avi Kivity59200272010-01-25 19:47:02 +02003059 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003060 return 1;
3061}
3062
Avi Kivity851ba692009-08-24 11:10:17 +03003063static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003064{
Rusty Russelle756fc62007-07-30 20:07:08 +10003065 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03003066 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003067 else
Avi Kivity851ba692009-08-24 11:10:17 +03003068 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003069}
3070
Avi Kivity851ba692009-08-24 11:10:17 +03003071static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08003072{
Avi Kivity851ba692009-08-24 11:10:17 +03003073 struct kvm_run *kvm_run = svm->vcpu.run;
3074
Avi Kivity3842d132010-07-27 12:30:24 +03003075 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01003076 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03003077 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003078 mark_dirty(svm->vmcb, VMCB_INTR);
Dor Laorc1150d82007-01-05 16:36:24 -08003079 /*
3080 * If the user space waits to inject interrupts, exit as soon as
3081 * possible
3082 */
Gleb Natapov80618232009-04-21 17:44:56 +03003083 if (!irqchip_in_kernel(svm->vcpu.kvm) &&
3084 kvm_run->request_interrupt_window &&
3085 !kvm_cpu_has_interrupt(&svm->vcpu)) {
Rusty Russelle756fc62007-07-30 20:07:08 +10003086 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08003087 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
3088 return 0;
3089 }
3090
3091 return 1;
3092}
3093
Mark Langsdorf565d0992009-10-06 14:25:02 -05003094static int pause_interception(struct vcpu_svm *svm)
3095{
3096 kvm_vcpu_on_spin(&(svm->vcpu));
3097 return 1;
3098}
3099
Avi Kivity851ba692009-08-24 11:10:17 +03003100static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01003101 [SVM_EXIT_READ_CR0] = cr_interception,
3102 [SVM_EXIT_READ_CR3] = cr_interception,
3103 [SVM_EXIT_READ_CR4] = cr_interception,
3104 [SVM_EXIT_READ_CR8] = cr_interception,
Avi Kivityd2251572010-01-06 10:55:27 +02003105 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02003106 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01003107 [SVM_EXIT_WRITE_CR3] = cr_interception,
3108 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003109 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01003110 [SVM_EXIT_READ_DR0] = dr_interception,
3111 [SVM_EXIT_READ_DR1] = dr_interception,
3112 [SVM_EXIT_READ_DR2] = dr_interception,
3113 [SVM_EXIT_READ_DR3] = dr_interception,
3114 [SVM_EXIT_READ_DR4] = dr_interception,
3115 [SVM_EXIT_READ_DR5] = dr_interception,
3116 [SVM_EXIT_READ_DR6] = dr_interception,
3117 [SVM_EXIT_READ_DR7] = dr_interception,
3118 [SVM_EXIT_WRITE_DR0] = dr_interception,
3119 [SVM_EXIT_WRITE_DR1] = dr_interception,
3120 [SVM_EXIT_WRITE_DR2] = dr_interception,
3121 [SVM_EXIT_WRITE_DR3] = dr_interception,
3122 [SVM_EXIT_WRITE_DR4] = dr_interception,
3123 [SVM_EXIT_WRITE_DR5] = dr_interception,
3124 [SVM_EXIT_WRITE_DR6] = dr_interception,
3125 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003126 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3127 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05003128 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003129 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
3130 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
3131 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
3132 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02003133 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003134 [SVM_EXIT_SMI] = nop_on_interception,
3135 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08003136 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003137 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003138 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02003139 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05003140 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003141 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03003142 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02003143 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003144 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003145 [SVM_EXIT_MSR] = msr_interception,
3146 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08003147 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01003148 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02003149 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01003150 [SVM_EXIT_VMLOAD] = vmload_interception,
3151 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01003152 [SVM_EXIT_STGI] = stgi_interception,
3153 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02003154 [SVM_EXIT_SKINIT] = skinit_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02003155 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01003156 [SVM_EXIT_MONITOR] = invalid_op_interception,
3157 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003158 [SVM_EXIT_XSETBV] = xsetbv_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01003159 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003160};
3161
Joerg Roedel3f10c842010-05-05 16:04:42 +02003162void dump_vmcb(struct kvm_vcpu *vcpu)
3163{
3164 struct vcpu_svm *svm = to_svm(vcpu);
3165 struct vmcb_control_area *control = &svm->vmcb->control;
3166 struct vmcb_save_area *save = &svm->vmcb->save;
3167
3168 pr_err("VMCB Control Area:\n");
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003169 pr_err("cr_read: %04x\n", control->intercept_cr & 0xffff);
3170 pr_err("cr_write: %04x\n", control->intercept_cr >> 16);
Joerg Roedel3aed0412010-11-30 18:03:58 +01003171 pr_err("dr_read: %04x\n", control->intercept_dr & 0xffff);
3172 pr_err("dr_write: %04x\n", control->intercept_dr >> 16);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003173 pr_err("exceptions: %08x\n", control->intercept_exceptions);
3174 pr_err("intercepts: %016llx\n", control->intercept);
3175 pr_err("pause filter count: %d\n", control->pause_filter_count);
3176 pr_err("iopm_base_pa: %016llx\n", control->iopm_base_pa);
3177 pr_err("msrpm_base_pa: %016llx\n", control->msrpm_base_pa);
3178 pr_err("tsc_offset: %016llx\n", control->tsc_offset);
3179 pr_err("asid: %d\n", control->asid);
3180 pr_err("tlb_ctl: %d\n", control->tlb_ctl);
3181 pr_err("int_ctl: %08x\n", control->int_ctl);
3182 pr_err("int_vector: %08x\n", control->int_vector);
3183 pr_err("int_state: %08x\n", control->int_state);
3184 pr_err("exit_code: %08x\n", control->exit_code);
3185 pr_err("exit_info1: %016llx\n", control->exit_info_1);
3186 pr_err("exit_info2: %016llx\n", control->exit_info_2);
3187 pr_err("exit_int_info: %08x\n", control->exit_int_info);
3188 pr_err("exit_int_info_err: %08x\n", control->exit_int_info_err);
3189 pr_err("nested_ctl: %lld\n", control->nested_ctl);
3190 pr_err("nested_cr3: %016llx\n", control->nested_cr3);
3191 pr_err("event_inj: %08x\n", control->event_inj);
3192 pr_err("event_inj_err: %08x\n", control->event_inj_err);
3193 pr_err("lbr_ctl: %lld\n", control->lbr_ctl);
3194 pr_err("next_rip: %016llx\n", control->next_rip);
3195 pr_err("VMCB State Save Area:\n");
3196 pr_err("es: s: %04x a: %04x l: %08x b: %016llx\n",
3197 save->es.selector, save->es.attrib,
3198 save->es.limit, save->es.base);
3199 pr_err("cs: s: %04x a: %04x l: %08x b: %016llx\n",
3200 save->cs.selector, save->cs.attrib,
3201 save->cs.limit, save->cs.base);
3202 pr_err("ss: s: %04x a: %04x l: %08x b: %016llx\n",
3203 save->ss.selector, save->ss.attrib,
3204 save->ss.limit, save->ss.base);
3205 pr_err("ds: s: %04x a: %04x l: %08x b: %016llx\n",
3206 save->ds.selector, save->ds.attrib,
3207 save->ds.limit, save->ds.base);
3208 pr_err("fs: s: %04x a: %04x l: %08x b: %016llx\n",
3209 save->fs.selector, save->fs.attrib,
3210 save->fs.limit, save->fs.base);
3211 pr_err("gs: s: %04x a: %04x l: %08x b: %016llx\n",
3212 save->gs.selector, save->gs.attrib,
3213 save->gs.limit, save->gs.base);
3214 pr_err("gdtr: s: %04x a: %04x l: %08x b: %016llx\n",
3215 save->gdtr.selector, save->gdtr.attrib,
3216 save->gdtr.limit, save->gdtr.base);
3217 pr_err("ldtr: s: %04x a: %04x l: %08x b: %016llx\n",
3218 save->ldtr.selector, save->ldtr.attrib,
3219 save->ldtr.limit, save->ldtr.base);
3220 pr_err("idtr: s: %04x a: %04x l: %08x b: %016llx\n",
3221 save->idtr.selector, save->idtr.attrib,
3222 save->idtr.limit, save->idtr.base);
3223 pr_err("tr: s: %04x a: %04x l: %08x b: %016llx\n",
3224 save->tr.selector, save->tr.attrib,
3225 save->tr.limit, save->tr.base);
3226 pr_err("cpl: %d efer: %016llx\n",
3227 save->cpl, save->efer);
3228 pr_err("cr0: %016llx cr2: %016llx\n",
3229 save->cr0, save->cr2);
3230 pr_err("cr3: %016llx cr4: %016llx\n",
3231 save->cr3, save->cr4);
3232 pr_err("dr6: %016llx dr7: %016llx\n",
3233 save->dr6, save->dr7);
3234 pr_err("rip: %016llx rflags: %016llx\n",
3235 save->rip, save->rflags);
3236 pr_err("rsp: %016llx rax: %016llx\n",
3237 save->rsp, save->rax);
3238 pr_err("star: %016llx lstar: %016llx\n",
3239 save->star, save->lstar);
3240 pr_err("cstar: %016llx sfmask: %016llx\n",
3241 save->cstar, save->sfmask);
3242 pr_err("kernel_gs_base: %016llx sysenter_cs: %016llx\n",
3243 save->kernel_gs_base, save->sysenter_cs);
3244 pr_err("sysenter_esp: %016llx sysenter_eip: %016llx\n",
3245 save->sysenter_esp, save->sysenter_eip);
3246 pr_err("gpat: %016llx dbgctl: %016llx\n",
3247 save->g_pat, save->dbgctl);
3248 pr_err("br_from: %016llx br_to: %016llx\n",
3249 save->br_from, save->br_to);
3250 pr_err("excp_from: %016llx excp_to: %016llx\n",
3251 save->last_excp_from, save->last_excp_to);
3252
3253}
3254
Avi Kivity586f9602010-11-18 13:09:54 +02003255static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
3256{
3257 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3258
3259 *info1 = control->exit_info_1;
3260 *info2 = control->exit_info_2;
3261}
3262
Avi Kivity851ba692009-08-24 11:10:17 +03003263static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003264{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003265 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03003266 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003267 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003268
Avi Kivityaa179112010-11-17 18:44:19 +02003269 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003270
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003271 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02003272 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3273 if (npt_enabled)
3274 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003275
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003276 if (unlikely(svm->nested.exit_required)) {
3277 nested_svm_vmexit(svm);
3278 svm->nested.exit_required = false;
3279
3280 return 1;
3281 }
3282
Joerg Roedel20307532010-11-29 17:51:48 +01003283 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02003284 int vmexit;
3285
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02003286 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
3287 svm->vmcb->control.exit_info_1,
3288 svm->vmcb->control.exit_info_2,
3289 svm->vmcb->control.exit_int_info,
3290 svm->vmcb->control.exit_int_info_err);
3291
Joerg Roedel410e4d52009-08-07 11:49:44 +02003292 vmexit = nested_svm_exit_special(svm);
3293
3294 if (vmexit == NESTED_EXIT_CONTINUE)
3295 vmexit = nested_svm_exit_handled(svm);
3296
3297 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01003298 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01003299 }
3300
Joerg Roedela5c38322009-08-07 11:49:32 +02003301 svm_complete_interrupts(svm);
3302
Avi Kivity04d2cc72007-09-10 18:10:54 +03003303 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3304 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3305 kvm_run->fail_entry.hardware_entry_failure_reason
3306 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02003307 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
3308 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03003309 return 0;
3310 }
3311
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003312 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01003313 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02003314 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3315 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003316 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
3317 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003318 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003319 exit_code);
3320
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02003321 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08003322 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08003323 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03003324 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003325 return 0;
3326 }
3327
Avi Kivity851ba692009-08-24 11:10:17 +03003328 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003329}
3330
3331static void reload_tss(struct kvm_vcpu *vcpu)
3332{
3333 int cpu = raw_smp_processor_id();
3334
Tejun Heo0fe1e002009-10-29 22:34:14 +09003335 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3336 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08003337 load_TR_desc();
3338}
3339
Rusty Russelle756fc62007-07-30 20:07:08 +10003340static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003341{
3342 int cpu = raw_smp_processor_id();
3343
Tejun Heo0fe1e002009-10-29 22:34:14 +09003344 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003345
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03003346 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09003347 if (svm->asid_generation != sd->asid_generation)
3348 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003349}
3350
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003351static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3352{
3353 struct vcpu_svm *svm = to_svm(vcpu);
3354
3355 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3356 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003357 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003358 ++vcpu->stat.nmi_injections;
3359}
Avi Kivity6aa8b732006-12-10 02:21:36 -08003360
Eddie Dong85f455f2007-07-06 12:20:49 +03003361static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003362{
3363 struct vmcb_control_area *control;
3364
Rusty Russelle756fc62007-07-30 20:07:08 +10003365 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03003366 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003367 control->int_ctl &= ~V_INTR_PRIO_MASK;
3368 control->int_ctl |= V_IRQ_MASK |
3369 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003370 mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003371}
3372
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003373static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03003374{
3375 struct vcpu_svm *svm = to_svm(vcpu);
3376
Joerg Roedel2af91942009-08-07 11:49:28 +02003377 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01003378
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03003379 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3380 ++vcpu->stat.irq_injections;
3381
Alexander Graf219b65d2009-06-15 15:21:25 +02003382 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3383 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03003384}
3385
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003386static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3387{
3388 struct vcpu_svm *svm = to_svm(vcpu);
3389
Joerg Roedel20307532010-11-29 17:51:48 +01003390 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003391 return;
3392
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003393 if (irr == -1)
3394 return;
3395
3396 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003397 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003398}
3399
3400static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003401{
3402 struct vcpu_svm *svm = to_svm(vcpu);
3403 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02003404 int ret;
3405 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3406 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3407 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3408
3409 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003410}
3411
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003412static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3413{
3414 struct vcpu_svm *svm = to_svm(vcpu);
3415
3416 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3417}
3418
3419static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3420{
3421 struct vcpu_svm *svm = to_svm(vcpu);
3422
3423 if (masked) {
3424 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003425 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003426 } else {
3427 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003428 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003429 }
3430}
3431
Gleb Natapov78646122009-03-23 12:12:11 +02003432static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3433{
3434 struct vcpu_svm *svm = to_svm(vcpu);
3435 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003436 int ret;
3437
3438 if (!gif_set(svm) ||
3439 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3440 return 0;
3441
Avi Kivityf6e78472010-08-02 15:30:20 +03003442 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003443
Joerg Roedel20307532010-11-29 17:51:48 +01003444 if (is_guest_mode(vcpu))
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003445 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
3446
3447 return ret;
Gleb Natapov78646122009-03-23 12:12:11 +02003448}
3449
Gleb Natapov9222be12009-04-23 17:14:37 +03003450static void enable_irq_window(struct kvm_vcpu *vcpu)
3451{
Alexander Graf219b65d2009-06-15 15:21:25 +02003452 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02003453
Joerg Roedele0231712010-02-24 18:59:10 +01003454 /*
3455 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3456 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3457 * get that intercept, this function will be called again though and
3458 * we'll get the vintr intercept.
3459 */
Joerg Roedel8fe54652010-02-19 16:23:01 +01003460 if (gif_set(svm) && nested_svm_intr(svm)) {
Alexander Graf219b65d2009-06-15 15:21:25 +02003461 svm_set_vintr(svm);
3462 svm_inject_irq(svm, 0x0);
3463 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003464}
3465
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003466static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003467{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003468 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003469
Gleb Natapov44c11432009-05-11 13:35:52 +03003470 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3471 == HF_NMI_MASK)
3472 return; /* IRET will cause a vm exit */
3473
Joerg Roedele0231712010-02-24 18:59:10 +01003474 /*
3475 * Something prevents NMI from been injected. Single step over possible
3476 * problem (IRET or exception injection or interrupt shadow)
3477 */
Jan Kiszka6be7d302009-10-18 13:24:54 +02003478 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03003479 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3480 update_db_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003481}
3482
Izik Eiduscbc94022007-10-25 00:29:55 +02003483static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3484{
3485 return 0;
3486}
3487
Avi Kivityd9e368d2007-06-07 19:18:30 +03003488static void svm_flush_tlb(struct kvm_vcpu *vcpu)
3489{
Joerg Roedel38e5e922010-12-03 15:25:16 +01003490 struct vcpu_svm *svm = to_svm(vcpu);
3491
3492 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3493 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3494 else
3495 svm->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03003496}
3497
Avi Kivity04d2cc72007-09-10 18:10:54 +03003498static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3499{
3500}
3501
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003502static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3503{
3504 struct vcpu_svm *svm = to_svm(vcpu);
3505
Joerg Roedel20307532010-11-29 17:51:48 +01003506 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003507 return;
3508
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003509 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003510 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03003511 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003512 }
3513}
3514
Joerg Roedel649d6862008-04-16 16:51:15 +02003515static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3516{
3517 struct vcpu_svm *svm = to_svm(vcpu);
3518 u64 cr8;
3519
Joerg Roedel20307532010-11-29 17:51:48 +01003520 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003521 return;
3522
Joerg Roedel649d6862008-04-16 16:51:15 +02003523 cr8 = kvm_get_cr8(vcpu);
3524 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3525 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3526}
3527
Gleb Natapov9222be12009-04-23 17:14:37 +03003528static void svm_complete_interrupts(struct vcpu_svm *svm)
3529{
3530 u8 vector;
3531 int type;
3532 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01003533 unsigned int3_injected = svm->int3_injected;
3534
3535 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003536
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003537 /*
3538 * If we've made progress since setting HF_IRET_MASK, we've
3539 * executed an IRET and can allow NMI injection.
3540 */
3541 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3542 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
Gleb Natapov44c11432009-05-11 13:35:52 +03003543 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03003544 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3545 }
Gleb Natapov44c11432009-05-11 13:35:52 +03003546
Gleb Natapov9222be12009-04-23 17:14:37 +03003547 svm->vcpu.arch.nmi_injected = false;
3548 kvm_clear_exception_queue(&svm->vcpu);
3549 kvm_clear_interrupt_queue(&svm->vcpu);
3550
3551 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3552 return;
3553
Avi Kivity3842d132010-07-27 12:30:24 +03003554 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3555
Gleb Natapov9222be12009-04-23 17:14:37 +03003556 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3557 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3558
3559 switch (type) {
3560 case SVM_EXITINTINFO_TYPE_NMI:
3561 svm->vcpu.arch.nmi_injected = true;
3562 break;
3563 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01003564 /*
3565 * In case of software exceptions, do not reinject the vector,
3566 * but re-execute the instruction instead. Rewind RIP first
3567 * if we emulated INT3 before.
3568 */
3569 if (kvm_exception_is_soft(vector)) {
3570 if (vector == BP_VECTOR && int3_injected &&
3571 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3572 kvm_rip_write(&svm->vcpu,
3573 kvm_rip_read(&svm->vcpu) -
3574 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02003575 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01003576 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003577 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3578 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003579 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03003580
3581 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003582 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03003583 break;
3584 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003585 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03003586 break;
3587 default:
3588 break;
3589 }
3590}
3591
Avi Kivityb463a6f2010-07-20 15:06:17 +03003592static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3593{
3594 struct vcpu_svm *svm = to_svm(vcpu);
3595 struct vmcb_control_area *control = &svm->vmcb->control;
3596
3597 control->exit_int_info = control->event_inj;
3598 control->exit_int_info_err = control->event_inj_err;
3599 control->event_inj = 0;
3600 svm_complete_interrupts(svm);
3601}
3602
Avi Kivity80e31d42008-07-14 14:44:59 +03003603#ifdef CONFIG_X86_64
3604#define R "r"
3605#else
3606#define R "e"
3607#endif
3608
Avi Kivity851ba692009-08-24 11:10:17 +03003609static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003610{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003611 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03003612
Joerg Roedel2041a062010-04-22 12:33:08 +02003613 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3614 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3615 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3616
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003617 /*
3618 * A vmexit emulation is required before the vcpu can be executed
3619 * again.
3620 */
3621 if (unlikely(svm->nested.exit_required))
3622 return;
3623
Rusty Russelle756fc62007-07-30 20:07:08 +10003624 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003625
Joerg Roedel649d6862008-04-16 16:51:15 +02003626 sync_lapic_to_cr8(vcpu);
3627
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02003628 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003629
Avi Kivity04d2cc72007-09-10 18:10:54 +03003630 clgi();
3631
3632 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08003633
Avi Kivity6aa8b732006-12-10 02:21:36 -08003634 asm volatile (
Avi Kivity80e31d42008-07-14 14:44:59 +03003635 "push %%"R"bp; \n\t"
3636 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
3637 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
3638 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
3639 "mov %c[rsi](%[svm]), %%"R"si \n\t"
3640 "mov %c[rdi](%[svm]), %%"R"di \n\t"
3641 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003642#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003643 "mov %c[r8](%[svm]), %%r8 \n\t"
3644 "mov %c[r9](%[svm]), %%r9 \n\t"
3645 "mov %c[r10](%[svm]), %%r10 \n\t"
3646 "mov %c[r11](%[svm]), %%r11 \n\t"
3647 "mov %c[r12](%[svm]), %%r12 \n\t"
3648 "mov %c[r13](%[svm]), %%r13 \n\t"
3649 "mov %c[r14](%[svm]), %%r14 \n\t"
3650 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003651#endif
3652
Avi Kivity6aa8b732006-12-10 02:21:36 -08003653 /* Enter guest mode */
Avi Kivity80e31d42008-07-14 14:44:59 +03003654 "push %%"R"ax \n\t"
3655 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03003656 __ex(SVM_VMLOAD) "\n\t"
3657 __ex(SVM_VMRUN) "\n\t"
3658 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity80e31d42008-07-14 14:44:59 +03003659 "pop %%"R"ax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003660
3661 /* Save guest registers, load host registers */
Avi Kivity80e31d42008-07-14 14:44:59 +03003662 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
3663 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
3664 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
3665 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
3666 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
3667 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003668#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003669 "mov %%r8, %c[r8](%[svm]) \n\t"
3670 "mov %%r9, %c[r9](%[svm]) \n\t"
3671 "mov %%r10, %c[r10](%[svm]) \n\t"
3672 "mov %%r11, %c[r11](%[svm]) \n\t"
3673 "mov %%r12, %c[r12](%[svm]) \n\t"
3674 "mov %%r13, %c[r13](%[svm]) \n\t"
3675 "mov %%r14, %c[r14](%[svm]) \n\t"
3676 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003677#endif
Avi Kivity80e31d42008-07-14 14:44:59 +03003678 "pop %%"R"bp"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003679 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003680 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08003681 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003682 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
3683 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
3684 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
3685 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
3686 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
3687 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003688#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003689 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
3690 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
3691 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
3692 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
3693 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
3694 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
3695 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
3696 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08003697#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02003698 : "cc", "memory"
Avi Kivity80e31d42008-07-14 14:44:59 +03003699 , R"bx", R"cx", R"dx", R"si", R"di"
Laurent Vivier54a08c02007-10-25 14:18:53 +02003700#ifdef CONFIG_X86_64
Laurent Vivier54a08c02007-10-25 14:18:53 +02003701 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
3702#endif
3703 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08003704
Avi Kivity82ca2d12010-10-21 12:20:34 +02003705#ifdef CONFIG_X86_64
3706 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3707#else
Avi Kivitydacccfd2010-10-21 12:20:33 +02003708 loadsegment(fs, svm->host.fs);
Avi Kivity831ca602011-03-08 16:09:51 +02003709#ifndef CONFIG_X86_32_LAZY_GS
3710 loadsegment(gs, svm->host.gs);
3711#endif
Avi Kivity9581d442010-10-19 16:46:55 +02003712#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08003713
3714 reload_tss(vcpu);
3715
Avi Kivity56ba47d2007-11-07 17:14:18 +02003716 local_irq_disable();
3717
Avi Kivity13c34e02010-10-21 12:20:31 +02003718 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3719 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3720 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3721 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3722
Joerg Roedel3781c012011-01-14 16:45:02 +01003723 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3724 kvm_before_handle_nmi(&svm->vcpu);
3725
3726 stgi();
3727
3728 /* Any pending NMI will happen here */
3729
3730 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3731 kvm_after_handle_nmi(&svm->vcpu);
3732
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003733 sync_cr8_to_lapic(vcpu);
3734
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003735 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003736
Joerg Roedel38e5e922010-12-03 15:25:16 +01003737 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3738
Gleb Natapov631bc482010-10-14 11:22:52 +02003739 /* if exit due to PF check for async PF */
3740 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3741 svm->apf_reason = kvm_read_and_reset_pf_reason();
3742
Avi Kivity6de4f3a2009-05-31 22:58:47 +03003743 if (npt_enabled) {
3744 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3745 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3746 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02003747
3748 /*
3749 * We need to handle MC intercepts here before the vcpu has a chance to
3750 * change the physical cpu
3751 */
3752 if (unlikely(svm->vmcb->control.exit_code ==
3753 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3754 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003755
3756 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003757}
3758
Avi Kivity80e31d42008-07-14 14:44:59 +03003759#undef R
3760
Avi Kivity6aa8b732006-12-10 02:21:36 -08003761static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3762{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003763 struct vcpu_svm *svm = to_svm(vcpu);
3764
3765 svm->vmcb->save.cr3 = root;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01003766 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003767 svm_flush_tlb(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003768}
3769
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003770static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3771{
3772 struct vcpu_svm *svm = to_svm(vcpu);
3773
3774 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01003775 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003776
3777 /* Also sync guest cr3 here in case we live migrate */
Avi Kivity9f8fe502010-12-05 17:30:00 +02003778 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01003779 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003780
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003781 svm_flush_tlb(vcpu);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003782}
3783
Avi Kivity6aa8b732006-12-10 02:21:36 -08003784static int is_disabled(void)
3785{
Joerg Roedel6031a612007-06-22 12:29:50 +03003786 u64 vm_cr;
3787
3788 rdmsrl(MSR_VM_CR, vm_cr);
3789 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3790 return 1;
3791
Avi Kivity6aa8b732006-12-10 02:21:36 -08003792 return 0;
3793}
3794
Ingo Molnar102d8322007-02-19 14:37:47 +02003795static void
3796svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3797{
3798 /*
3799 * Patch in the VMMCALL instruction:
3800 */
3801 hypercall[0] = 0x0f;
3802 hypercall[1] = 0x01;
3803 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02003804}
3805
Yang, Sheng002c7f72007-07-31 14:23:01 +03003806static void svm_check_processor_compat(void *rtn)
3807{
3808 *(int *)rtn = 0;
3809}
3810
Avi Kivity774ead32007-12-26 13:57:04 +02003811static bool svm_cpu_has_accelerated_tpr(void)
3812{
3813 return false;
3814}
3815
Sheng Yang4b12f0d2009-04-27 20:35:42 +08003816static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
Sheng Yang64d4d522008-10-09 16:01:57 +08003817{
3818 return 0;
3819}
3820
Sheng Yang0e851882009-12-18 16:48:46 +08003821static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3822{
3823}
3824
Joerg Roedeld4330ef2010-04-22 12:33:11 +02003825static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
3826{
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003827 switch (func) {
Joerg Roedel4c62a2d2010-09-10 17:31:06 +02003828 case 0x80000001:
3829 if (nested)
3830 entry->ecx |= (1 << 2); /* Set SVM bit */
3831 break;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003832 case 0x8000000A:
3833 entry->eax = 1; /* SVM revision 1 */
3834 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
3835 ASID emulation to nested SVM */
3836 entry->ecx = 0; /* Reserved */
Joerg Roedel7a190662010-07-27 18:14:21 +02003837 entry->edx = 0; /* Per default do not support any
3838 additional features */
3839
3840 /* Support next_rip if host supports it */
Avi Kivity2a6b20b2010-11-09 16:15:42 +02003841 if (boot_cpu_has(X86_FEATURE_NRIPS))
Joerg Roedel7a190662010-07-27 18:14:21 +02003842 entry->edx |= SVM_FEATURE_NRIP;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003843
Joerg Roedel3d4aeaa2010-09-10 17:31:05 +02003844 /* Support NPT for the guest if enabled */
3845 if (npt_enabled)
3846 entry->edx |= SVM_FEATURE_NPT;
3847
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003848 break;
3849 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02003850}
3851
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003852static const struct trace_print_flags svm_exit_reasons_str[] = {
Joerg Roedele0231712010-02-24 18:59:10 +01003853 { SVM_EXIT_READ_CR0, "read_cr0" },
3854 { SVM_EXIT_READ_CR3, "read_cr3" },
3855 { SVM_EXIT_READ_CR4, "read_cr4" },
3856 { SVM_EXIT_READ_CR8, "read_cr8" },
3857 { SVM_EXIT_WRITE_CR0, "write_cr0" },
3858 { SVM_EXIT_WRITE_CR3, "write_cr3" },
3859 { SVM_EXIT_WRITE_CR4, "write_cr4" },
3860 { SVM_EXIT_WRITE_CR8, "write_cr8" },
3861 { SVM_EXIT_READ_DR0, "read_dr0" },
3862 { SVM_EXIT_READ_DR1, "read_dr1" },
3863 { SVM_EXIT_READ_DR2, "read_dr2" },
3864 { SVM_EXIT_READ_DR3, "read_dr3" },
3865 { SVM_EXIT_WRITE_DR0, "write_dr0" },
3866 { SVM_EXIT_WRITE_DR1, "write_dr1" },
3867 { SVM_EXIT_WRITE_DR2, "write_dr2" },
3868 { SVM_EXIT_WRITE_DR3, "write_dr3" },
3869 { SVM_EXIT_WRITE_DR5, "write_dr5" },
3870 { SVM_EXIT_WRITE_DR7, "write_dr7" },
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003871 { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
3872 { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
3873 { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
3874 { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" },
3875 { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" },
3876 { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" },
3877 { SVM_EXIT_INTR, "interrupt" },
3878 { SVM_EXIT_NMI, "nmi" },
3879 { SVM_EXIT_SMI, "smi" },
3880 { SVM_EXIT_INIT, "init" },
3881 { SVM_EXIT_VINTR, "vintr" },
3882 { SVM_EXIT_CPUID, "cpuid" },
3883 { SVM_EXIT_INVD, "invd" },
3884 { SVM_EXIT_HLT, "hlt" },
3885 { SVM_EXIT_INVLPG, "invlpg" },
3886 { SVM_EXIT_INVLPGA, "invlpga" },
3887 { SVM_EXIT_IOIO, "io" },
3888 { SVM_EXIT_MSR, "msr" },
3889 { SVM_EXIT_TASK_SWITCH, "task_switch" },
3890 { SVM_EXIT_SHUTDOWN, "shutdown" },
3891 { SVM_EXIT_VMRUN, "vmrun" },
3892 { SVM_EXIT_VMMCALL, "hypercall" },
3893 { SVM_EXIT_VMLOAD, "vmload" },
3894 { SVM_EXIT_VMSAVE, "vmsave" },
3895 { SVM_EXIT_STGI, "stgi" },
3896 { SVM_EXIT_CLGI, "clgi" },
3897 { SVM_EXIT_SKINIT, "skinit" },
3898 { SVM_EXIT_WBINVD, "wbinvd" },
3899 { SVM_EXIT_MONITOR, "monitor" },
3900 { SVM_EXIT_MWAIT, "mwait" },
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003901 { SVM_EXIT_XSETBV, "xsetbv" },
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003902 { SVM_EXIT_NPF, "npf" },
3903 { -1, NULL }
3904};
3905
Sheng Yang17cc3932010-01-05 19:02:27 +08003906static int svm_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02003907{
Sheng Yang17cc3932010-01-05 19:02:27 +08003908 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02003909}
3910
Sheng Yang4e47c7a2009-12-18 16:48:47 +08003911static bool svm_rdtscp_supported(void)
3912{
3913 return false;
3914}
3915
Sheng Yangf5f48ee2010-06-30 12:25:15 +08003916static bool svm_has_wbinvd_exit(void)
3917{
3918 return true;
3919}
3920
Avi Kivity02daab22009-12-30 12:40:26 +02003921static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
3922{
3923 struct vcpu_svm *svm = to_svm(vcpu);
3924
Joerg Roedel18c918c2010-11-30 18:03:59 +01003925 set_exception_intercept(svm, NM_VECTOR);
Joerg Roedel66a562f2010-02-19 16:23:08 +01003926 update_cr0_intercept(svm);
Avi Kivity02daab22009-12-30 12:40:26 +02003927}
3928
Joerg Roedel80612522011-04-04 12:39:33 +02003929#define PRE_EX(exit) { .exit_code = (exit), \
3930 .stage = X86_ICPT_PRE_EXCEPT, \
3931 .valid = true }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003932#define POST_EX(exit) { .exit_code = (exit), \
3933 .stage = X86_ICPT_POST_EXCEPT, \
3934 .valid = true }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003935#define POST_MEM(exit) { .exit_code = (exit), \
3936 .stage = X86_ICPT_POST_MEMACCESS, \
3937 .valid = true }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003938
3939static struct __x86_intercept {
3940 u32 exit_code;
3941 enum x86_intercept_stage stage;
3942 bool valid;
3943} x86_intercept_map[] = {
3944 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
3945 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
3946 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
3947 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
3948 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02003949 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
3950 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003951 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
3952 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
3953 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
3954 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
3955 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
3956 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
3957 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
3958 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003959 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
3960 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
3961 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
3962 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
3963 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
3964 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
3965 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
3966 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003967 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
3968 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
3969 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02003970 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
3971 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
3972 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
3973 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
3974 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
3975 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
3976 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
3977 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
3978 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02003979 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
3980 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
3981 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
3982 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
3983 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
3984 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
3985 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02003986 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
3987 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
3988 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
3989 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003990};
3991
Joerg Roedel80612522011-04-04 12:39:33 +02003992#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003993#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003994#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003995
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02003996static int svm_check_intercept(struct kvm_vcpu *vcpu,
3997 struct x86_instruction_info *info,
3998 enum x86_intercept_stage stage)
3999{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004000 struct vcpu_svm *svm = to_svm(vcpu);
4001 int vmexit, ret = X86EMUL_CONTINUE;
4002 struct __x86_intercept icpt_info;
4003 struct vmcb *vmcb = svm->vmcb;
4004
4005 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4006 goto out;
4007
4008 icpt_info = x86_intercept_map[info->intercept];
4009
4010 if (!icpt_info.valid || stage != icpt_info.stage)
4011 goto out;
4012
4013 switch (icpt_info.exit_code) {
4014 case SVM_EXIT_READ_CR0:
4015 if (info->intercept == x86_intercept_cr_read)
4016 icpt_info.exit_code += info->modrm_reg;
4017 break;
4018 case SVM_EXIT_WRITE_CR0: {
4019 unsigned long cr0, val;
4020 u64 intercept;
4021
4022 if (info->intercept == x86_intercept_cr_write)
4023 icpt_info.exit_code += info->modrm_reg;
4024
4025 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
4026 break;
4027
4028 intercept = svm->nested.intercept;
4029
4030 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
4031 break;
4032
4033 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4034 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4035
4036 if (info->intercept == x86_intercept_lmsw) {
4037 cr0 &= 0xfUL;
4038 val &= 0xfUL;
4039 /* lmsw can't clear PE - catch this here */
4040 if (cr0 & X86_CR0_PE)
4041 val |= X86_CR0_PE;
4042 }
4043
4044 if (cr0 ^ val)
4045 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4046
4047 break;
4048 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02004049 case SVM_EXIT_READ_DR0:
4050 case SVM_EXIT_WRITE_DR0:
4051 icpt_info.exit_code += info->modrm_reg;
4052 break;
Joerg Roedel80612522011-04-04 12:39:33 +02004053 case SVM_EXIT_MSR:
4054 if (info->intercept == x86_intercept_wrmsr)
4055 vmcb->control.exit_info_1 = 1;
4056 else
4057 vmcb->control.exit_info_1 = 0;
4058 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02004059 case SVM_EXIT_PAUSE:
4060 /*
4061 * We get this for NOP only, but pause
4062 * is rep not, check this here
4063 */
4064 if (info->rep_prefix != REPE_PREFIX)
4065 goto out;
Joerg Roedelf6511932011-04-04 12:39:35 +02004066 case SVM_EXIT_IOIO: {
4067 u64 exit_info;
4068 u32 bytes;
4069
4070 exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
4071
4072 if (info->intercept == x86_intercept_in ||
4073 info->intercept == x86_intercept_ins) {
4074 exit_info |= SVM_IOIO_TYPE_MASK;
4075 bytes = info->src_bytes;
4076 } else {
4077 bytes = info->dst_bytes;
4078 }
4079
4080 if (info->intercept == x86_intercept_outs ||
4081 info->intercept == x86_intercept_ins)
4082 exit_info |= SVM_IOIO_STR_MASK;
4083
4084 if (info->rep_prefix)
4085 exit_info |= SVM_IOIO_REP_MASK;
4086
4087 bytes = min(bytes, 4u);
4088
4089 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4090
4091 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4092
4093 vmcb->control.exit_info_1 = exit_info;
4094 vmcb->control.exit_info_2 = info->next_rip;
4095
4096 break;
4097 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004098 default:
4099 break;
4100 }
4101
4102 vmcb->control.next_rip = info->next_rip;
4103 vmcb->control.exit_code = icpt_info.exit_code;
4104 vmexit = nested_svm_exit_handled(svm);
4105
4106 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4107 : X86EMUL_CONTINUE;
4108
4109out:
4110 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004111}
4112
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03004113static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004114 .cpu_has_kvm_support = has_svm,
4115 .disabled_by_bios = is_disabled,
4116 .hardware_setup = svm_hardware_setup,
4117 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03004118 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004119 .hardware_enable = svm_hardware_enable,
4120 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02004121 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004122
4123 .vcpu_create = svm_create_vcpu,
4124 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03004125 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004126
Avi Kivity04d2cc72007-09-10 18:10:54 +03004127 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004128 .vcpu_load = svm_vcpu_load,
4129 .vcpu_put = svm_vcpu_put,
4130
4131 .set_guest_debug = svm_guest_debug,
4132 .get_msr = svm_get_msr,
4133 .set_msr = svm_set_msr,
4134 .get_segment_base = svm_get_segment_base,
4135 .get_segment = svm_get_segment,
4136 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02004137 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10004138 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02004139 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +02004140 .decache_cr3 = svm_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +03004141 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004142 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004143 .set_cr3 = svm_set_cr3,
4144 .set_cr4 = svm_set_cr4,
4145 .set_efer = svm_set_efer,
4146 .get_idt = svm_get_idt,
4147 .set_idt = svm_set_idt,
4148 .get_gdt = svm_get_gdt,
4149 .set_gdt = svm_set_gdt,
Gleb Natapov020df072010-04-13 10:05:23 +03004150 .set_dr7 = svm_set_dr7,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03004151 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004152 .get_rflags = svm_get_rflags,
4153 .set_rflags = svm_set_rflags,
Avi Kivity6b52d182010-01-21 15:31:47 +02004154 .fpu_activate = svm_fpu_activate,
Avi Kivity02daab22009-12-30 12:40:26 +02004155 .fpu_deactivate = svm_fpu_deactivate,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004156
Avi Kivity6aa8b732006-12-10 02:21:36 -08004157 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004158
Avi Kivity6aa8b732006-12-10 02:21:36 -08004159 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03004160 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004161 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04004162 .set_interrupt_shadow = svm_set_interrupt_shadow,
4163 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02004164 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03004165 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004166 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02004167 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03004168 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02004169 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004170 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004171 .get_nmi_mask = svm_get_nmi_mask,
4172 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004173 .enable_nmi_window = enable_nmi_window,
4174 .enable_irq_window = enable_irq_window,
4175 .update_cr8_intercept = update_cr8_intercept,
Izik Eiduscbc94022007-10-25 00:29:55 +02004176
4177 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08004178 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08004179 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004180
Avi Kivity586f9602010-11-18 13:09:54 +02004181 .get_exit_info = svm_get_exit_info,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004182 .exit_reasons_str = svm_exit_reasons_str,
Avi Kivity586f9602010-11-18 13:09:54 +02004183
Sheng Yang17cc3932010-01-05 19:02:27 +08004184 .get_lpage_level = svm_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08004185
4186 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004187
4188 .rdtscp_supported = svm_rdtscp_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02004189
4190 .set_supported_cpuid = svm_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004191
4192 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10004193
Joerg Roedel4051b182011-03-25 09:44:49 +01004194 .set_tsc_khz = svm_set_tsc_khz,
Zachary Amsden99e3e302010-08-19 22:07:17 -10004195 .write_tsc_offset = svm_write_tsc_offset,
Zachary Amsdene48672f2010-08-19 22:07:23 -10004196 .adjust_tsc_offset = svm_adjust_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004197
4198 .set_tdp_cr3 = set_tdp_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004199
4200 .check_intercept = svm_check_intercept,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004201};
4202
4203static int __init svm_init(void)
4204{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08004205 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03004206 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004207}
4208
4209static void __exit svm_exit(void)
4210{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08004211 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08004212}
4213
4214module_init(svm_init)
4215module_exit(svm_exit)