blob: d2ddad9ca630ca2a0cd9b2fd12ac206ead2cd5db [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
Avi Kivityedf88412007-12-16 11:02:48 +020017#include <linux/kvm_host.h>
18
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030021#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +020022#include "x86.h"
Avi Kivitye4956062007-06-28 14:15:57 -040023
Avi Kivity6aa8b732006-12-10 02:21:36 -080024#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020025#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080026#include <linux/vmalloc.h>
27#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040028#include <linux/sched.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030029#include <linux/ftrace_event.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080031
Joerg Roedel67ec6602010-05-17 14:43:35 +020032#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040033#include <asm/desc.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020034#include <asm/kvm_para.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080035
Eduardo Habkost63d11422008-11-17 19:03:20 -020036#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030037#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020038
Avi Kivity4ecac3f2008-05-13 13:23:38 +030039#define __ex(x) __kvm_handle_fault_on_reboot(x)
40
Avi Kivity6aa8b732006-12-10 02:21:36 -080041MODULE_AUTHOR("Qumranet");
42MODULE_LICENSE("GPL");
43
44#define IOPM_ALLOC_ORDER 2
45#define MSRPM_ALLOC_ORDER 1
46
Avi Kivity6aa8b732006-12-10 02:21:36 -080047#define SEG_TYPE_LDT 2
48#define SEG_TYPE_BUSY_TSS16 3
49
Andre Przywara6bc31bd2010-04-11 23:07:28 +020050#define SVM_FEATURE_NPT (1 << 0)
51#define SVM_FEATURE_LBRV (1 << 1)
52#define SVM_FEATURE_SVML (1 << 2)
53#define SVM_FEATURE_NRIP (1 << 3)
54#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030055
Joerg Roedel410e4d52009-08-07 11:49:44 +020056#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
57#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
58#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
59
Joerg Roedel24e09cb2008-02-13 18:58:47 +010060#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
61
Joerg Roedel67ec6602010-05-17 14:43:35 +020062static bool erratum_383_found __read_mostly;
63
Avi Kivity6c8166a2009-05-31 18:15:37 +030064static const u32 host_save_user_msrs[] = {
65#ifdef CONFIG_X86_64
66 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
67 MSR_FS_BASE,
68#endif
69 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
70};
71
72#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
73
74struct kvm_vcpu;
75
Joerg Roedele6aa9ab2009-08-07 11:49:33 +020076struct nested_state {
77 struct vmcb *hsave;
78 u64 hsave_msr;
Joerg Roedel4a810182010-02-24 18:59:15 +010079 u64 vm_cr_msr;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +020080 u64 vmcb;
81
82 /* These are the merged vectors */
83 u32 *msrpm;
84
85 /* gpa pointers to the real vectors */
86 u64 vmcb_msrpm;
Joerg Roedelce2ac082010-03-01 15:34:39 +010087 u64 vmcb_iopm;
Joerg Roedelaad42c62009-08-07 11:49:34 +020088
Joerg Roedelcd3ff652009-10-09 16:08:26 +020089 /* A VMEXIT is required but not yet emulated */
90 bool exit_required;
91
Joerg Roedelcda00082010-09-02 17:29:46 +020092 /*
93 * If we vmexit during an instruction emulation we need this to restore
94 * the l1 guest rip after the emulation
95 */
96 unsigned long vmexit_rip;
97 unsigned long vmexit_rsp;
98 unsigned long vmexit_rax;
99
Joerg Roedelaad42c62009-08-07 11:49:34 +0200100 /* cache for intercepts of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100101 u32 intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100102 u32 intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200103 u32 intercept_exceptions;
104 u64 intercept;
105
Joerg Roedel5bd2edc2010-09-10 17:31:02 +0200106 /* Nested Paging related state */
107 u64 nested_cr3;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200108};
109
Joerg Roedel323c3d82010-03-01 15:34:37 +0100110#define MSRPM_OFFSETS 16
111static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
112
Avi Kivity6c8166a2009-05-31 18:15:37 +0300113struct vcpu_svm {
114 struct kvm_vcpu vcpu;
115 struct vmcb *vmcb;
116 unsigned long vmcb_pa;
117 struct svm_cpu_data *svm_data;
118 uint64_t asid_generation;
119 uint64_t sysenter_esp;
120 uint64_t sysenter_eip;
121
122 u64 next_rip;
123
124 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Avi Kivityafe9e662010-10-21 12:20:32 +0200125 struct {
Avi Kivitydacccfd2010-10-21 12:20:33 +0200126 u16 fs;
127 u16 gs;
128 u16 ldt;
Avi Kivityafe9e662010-10-21 12:20:32 +0200129 u64 gs_base;
130 } host;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300131
132 u32 *msrpm;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300133
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200134 struct nested_state nested;
Jan Kiszka6be7d302009-10-18 13:24:54 +0200135
136 bool nmi_singlestep;
Jan Kiszka66b71382010-02-23 17:47:56 +0100137
138 unsigned int3_injected;
139 unsigned long int3_rip;
Gleb Natapov631bc482010-10-14 11:22:52 +0200140 u32 apf_reason;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300141};
142
Joerg Roedel455716f2010-03-01 15:34:35 +0100143#define MSR_INVALID 0xffffffffU
144
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100145static struct svm_direct_access_msrs {
146 u32 index; /* Index of the MSR */
147 bool always; /* True if intercept is always on */
148} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -0400149 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100150 { .index = MSR_IA32_SYSENTER_CS, .always = true },
151#ifdef CONFIG_X86_64
152 { .index = MSR_GS_BASE, .always = true },
153 { .index = MSR_FS_BASE, .always = true },
154 { .index = MSR_KERNEL_GS_BASE, .always = true },
155 { .index = MSR_LSTAR, .always = true },
156 { .index = MSR_CSTAR, .always = true },
157 { .index = MSR_SYSCALL_MASK, .always = true },
158#endif
159 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
160 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
161 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
162 { .index = MSR_IA32_LASTINTTOIP, .always = false },
163 { .index = MSR_INVALID, .always = false },
Avi Kivity6aa8b732006-12-10 02:21:36 -0800164};
165
166/* enable NPT for AMD64 and X86 with PAE */
167#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
168static bool npt_enabled = true;
169#else
Joerg Roedele0231712010-02-24 18:59:10 +0100170static bool npt_enabled;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800171#endif
172static int npt = 1;
173
174module_param(npt, int, S_IRUGO);
175
Joerg Roedel4b6e4dc2009-08-07 11:49:48 +0200176static int nested = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800177module_param(nested, int, S_IRUGO);
178
179static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedela5c38322009-08-07 11:49:32 +0200180static void svm_complete_interrupts(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800181
Joerg Roedel410e4d52009-08-07 11:49:44 +0200182static int nested_svm_exit_handled(struct vcpu_svm *svm);
Joerg Roedelb8e88bc2010-02-19 16:23:02 +0100183static int nested_svm_intercept(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800184static int nested_svm_vmexit(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800185static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
186 bool has_error_code, u32 error_code);
187
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100188enum {
Joerg Roedel116a0a22010-12-03 11:45:49 +0100189 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
190 pause filter count */
Joerg Roedelf56838e2010-12-03 11:45:50 +0100191 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
Joerg Roedeld48086d2010-12-03 11:45:51 +0100192 VMCB_ASID, /* ASID */
Joerg Roedeldecdbf62010-12-03 11:45:52 +0100193 VMCB_INTR, /* int_ctl, int_vector */
Joerg Roedelb2747162010-12-03 11:45:53 +0100194 VMCB_NPT, /* npt_en, nCR3, gPAT */
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100195 VMCB_CR, /* CR0, CR3, CR4, EFER */
Joerg Roedel72214b92010-12-03 11:45:55 +0100196 VMCB_DR, /* DR6, DR7 */
Joerg Roedel17a703c2010-12-03 11:45:56 +0100197 VMCB_DT, /* GDT, IDT */
Joerg Roedel060d0c92010-12-03 11:45:57 +0100198 VMCB_SEG, /* CS, DS, SS, ES, CPL */
Joerg Roedel0574dec2010-12-03 11:45:58 +0100199 VMCB_CR2, /* CR2 only */
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100200 VMCB_DIRTY_MAX,
201};
202
Joerg Roedel0574dec2010-12-03 11:45:58 +0100203/* TPR and CR2 are always written before VMRUN */
204#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100205
206static inline void mark_all_dirty(struct vmcb *vmcb)
207{
208 vmcb->control.clean = 0;
209}
210
211static inline void mark_all_clean(struct vmcb *vmcb)
212{
213 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
214 & ~VMCB_ALWAYS_DIRTY_MASK;
215}
216
217static inline void mark_dirty(struct vmcb *vmcb, int bit)
218{
219 vmcb->control.clean &= ~(1 << bit);
220}
221
Avi Kivity6aa8b732006-12-10 02:21:36 -0800222static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
223{
224 return container_of(vcpu, struct vcpu_svm, vcpu);
225}
226
Joerg Roedel384c6362010-11-30 18:03:56 +0100227static void recalc_intercepts(struct vcpu_svm *svm)
228{
229 struct vmcb_control_area *c, *h;
230 struct nested_state *g;
231
Joerg Roedel116a0a22010-12-03 11:45:49 +0100232 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
233
Joerg Roedel384c6362010-11-30 18:03:56 +0100234 if (!is_guest_mode(&svm->vcpu))
235 return;
236
237 c = &svm->vmcb->control;
238 h = &svm->nested.hsave->control;
239 g = &svm->nested;
240
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100241 c->intercept_cr = h->intercept_cr | g->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100242 c->intercept_dr = h->intercept_dr | g->intercept_dr;
Joerg Roedel384c6362010-11-30 18:03:56 +0100243 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
244 c->intercept = h->intercept | g->intercept;
245}
246
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100247static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
248{
249 if (is_guest_mode(&svm->vcpu))
250 return svm->nested.hsave;
251 else
252 return svm->vmcb;
253}
254
255static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
256{
257 struct vmcb *vmcb = get_host_vmcb(svm);
258
259 vmcb->control.intercept_cr |= (1U << bit);
260
261 recalc_intercepts(svm);
262}
263
264static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
265{
266 struct vmcb *vmcb = get_host_vmcb(svm);
267
268 vmcb->control.intercept_cr &= ~(1U << bit);
269
270 recalc_intercepts(svm);
271}
272
273static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
274{
275 struct vmcb *vmcb = get_host_vmcb(svm);
276
277 return vmcb->control.intercept_cr & (1U << bit);
278}
279
Joerg Roedel3aed0412010-11-30 18:03:58 +0100280static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
281{
282 struct vmcb *vmcb = get_host_vmcb(svm);
283
284 vmcb->control.intercept_dr |= (1U << bit);
285
286 recalc_intercepts(svm);
287}
288
289static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
290{
291 struct vmcb *vmcb = get_host_vmcb(svm);
292
293 vmcb->control.intercept_dr &= ~(1U << bit);
294
295 recalc_intercepts(svm);
296}
297
Joerg Roedel18c918c2010-11-30 18:03:59 +0100298static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
299{
300 struct vmcb *vmcb = get_host_vmcb(svm);
301
302 vmcb->control.intercept_exceptions |= (1U << bit);
303
304 recalc_intercepts(svm);
305}
306
307static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
308{
309 struct vmcb *vmcb = get_host_vmcb(svm);
310
311 vmcb->control.intercept_exceptions &= ~(1U << bit);
312
313 recalc_intercepts(svm);
314}
315
Joerg Roedel8a05a1b2010-11-30 18:04:00 +0100316static inline void set_intercept(struct vcpu_svm *svm, int bit)
317{
318 struct vmcb *vmcb = get_host_vmcb(svm);
319
320 vmcb->control.intercept |= (1ULL << bit);
321
322 recalc_intercepts(svm);
323}
324
325static inline void clr_intercept(struct vcpu_svm *svm, int bit)
326{
327 struct vmcb *vmcb = get_host_vmcb(svm);
328
329 vmcb->control.intercept &= ~(1ULL << bit);
330
331 recalc_intercepts(svm);
332}
333
Joerg Roedel2af91942009-08-07 11:49:28 +0200334static inline void enable_gif(struct vcpu_svm *svm)
335{
336 svm->vcpu.arch.hflags |= HF_GIF_MASK;
337}
338
339static inline void disable_gif(struct vcpu_svm *svm)
340{
341 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
342}
343
344static inline bool gif_set(struct vcpu_svm *svm)
345{
346 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
347}
348
Avi Kivity6aa8b732006-12-10 02:21:36 -0800349static unsigned long iopm_base;
350
351struct kvm_ldttss_desc {
352 u16 limit0;
353 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100354 unsigned base1:8, type:5, dpl:2, p:1;
355 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800356 u32 base3;
357 u32 zero1;
358} __attribute__((packed));
359
360struct svm_cpu_data {
361 int cpu;
362
Avi Kivity5008fdf2007-04-02 13:05:50 +0300363 u64 asid_generation;
364 u32 max_asid;
365 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800366 struct kvm_ldttss_desc *tss_desc;
367
368 struct page *save_area;
369};
370
371static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
Joerg Roedel80b77062007-03-30 17:02:14 +0300372static uint32_t svm_features;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800373
374struct svm_init_data {
375 int cpu;
376 int r;
377};
378
379static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
380
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200381#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800382#define MSRS_RANGE_SIZE 2048
383#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
384
Joerg Roedel455716f2010-03-01 15:34:35 +0100385static u32 svm_msrpm_offset(u32 msr)
386{
387 u32 offset;
388 int i;
389
390 for (i = 0; i < NUM_MSR_MAPS; i++) {
391 if (msr < msrpm_ranges[i] ||
392 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
393 continue;
394
395 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
396 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
397
398 /* Now we have the u8 offset - but need the u32 offset */
399 return offset / 4;
400 }
401
402 /* MSR not in any range */
403 return MSR_INVALID;
404}
405
Avi Kivity6aa8b732006-12-10 02:21:36 -0800406#define MAX_INST_SIZE 15
407
Avi Kivity6aa8b732006-12-10 02:21:36 -0800408static inline void clgi(void)
409{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300410 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800411}
412
413static inline void stgi(void)
414{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300415 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800416}
417
418static inline void invlpga(unsigned long addr, u32 asid)
419{
Joerg Roedele0231712010-02-24 18:59:10 +0100420 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800421}
422
Avi Kivity6aa8b732006-12-10 02:21:36 -0800423static inline void force_new_asid(struct kvm_vcpu *vcpu)
424{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400425 to_svm(vcpu)->asid_generation--;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800426}
427
428static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
429{
430 force_new_asid(vcpu);
431}
432
Joerg Roedel4b161842010-09-10 17:31:03 +0200433static int get_npt_level(void)
434{
435#ifdef CONFIG_X86_64
436 return PT64_ROOT_LEVEL;
437#else
438 return PT32E_ROOT_LEVEL;
439#endif
440}
441
Avi Kivity6aa8b732006-12-10 02:21:36 -0800442static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
443{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000444 vcpu->arch.efer = efer;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100445 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600446 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800447
Alexander Graf9962d032008-11-25 20:17:02 +0100448 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100449 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800450}
451
Avi Kivity6aa8b732006-12-10 02:21:36 -0800452static int is_external_interrupt(u32 info)
453{
454 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
455 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
456}
457
Glauber Costa2809f5d2009-05-12 16:21:05 -0400458static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
459{
460 struct vcpu_svm *svm = to_svm(vcpu);
461 u32 ret = 0;
462
463 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Jan Kiszka48005f62010-02-19 19:38:07 +0100464 ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400465 return ret & mask;
466}
467
468static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
469{
470 struct vcpu_svm *svm = to_svm(vcpu);
471
472 if (mask == 0)
473 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
474 else
475 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
476
477}
478
Avi Kivity6aa8b732006-12-10 02:21:36 -0800479static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
480{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400481 struct vcpu_svm *svm = to_svm(vcpu);
482
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200483 if (svm->vmcb->control.next_rip != 0)
484 svm->next_rip = svm->vmcb->control.next_rip;
485
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400486 if (!svm->next_rip) {
Avi Kivity851ba692009-08-24 11:10:17 +0300487 if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
Gleb Natapovf629cf82009-05-11 13:35:49 +0300488 EMULATE_DONE)
489 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800490 return;
491 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300492 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
493 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
494 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800495
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300496 kvm_rip_write(vcpu, svm->next_rip);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400497 svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800498}
499
Jan Kiszka116a4752010-02-23 17:47:54 +0100500static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200501 bool has_error_code, u32 error_code,
502 bool reinject)
Jan Kiszka116a4752010-02-23 17:47:54 +0100503{
504 struct vcpu_svm *svm = to_svm(vcpu);
505
Joerg Roedele0231712010-02-24 18:59:10 +0100506 /*
507 * If we are within a nested VM we'd better #VMEXIT and let the guest
508 * handle the exception
509 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200510 if (!reinject &&
511 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100512 return;
513
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200514 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100515 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
516
517 /*
518 * For guest debugging where we have to reinject #BP if some
519 * INT3 is guest-owned:
520 * Emulate nRIP by moving RIP forward. Will fail if injection
521 * raises a fault that is not intercepted. Still better than
522 * failing in all cases.
523 */
524 skip_emulated_instruction(&svm->vcpu);
525 rip = kvm_rip_read(&svm->vcpu);
526 svm->int3_rip = rip + svm->vmcb->save.cs.base;
527 svm->int3_injected = rip - old_rip;
528 }
529
Jan Kiszka116a4752010-02-23 17:47:54 +0100530 svm->vmcb->control.event_inj = nr
531 | SVM_EVTINJ_VALID
532 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
533 | SVM_EVTINJ_TYPE_EXEPT;
534 svm->vmcb->control.event_inj_err = error_code;
535}
536
Joerg Roedel67ec6602010-05-17 14:43:35 +0200537static void svm_init_erratum_383(void)
538{
539 u32 low, high;
540 int err;
541 u64 val;
542
Hans Rosenfeld1be85a62010-07-28 19:09:32 +0200543 if (!cpu_has_amd_erratum(amd_erratum_383))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200544 return;
545
546 /* Use _safe variants to not break nested virtualization */
547 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
548 if (err)
549 return;
550
551 val |= (1ULL << 47);
552
553 low = lower_32_bits(val);
554 high = upper_32_bits(val);
555
556 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
557
558 erratum_383_found = true;
559}
560
Avi Kivity6aa8b732006-12-10 02:21:36 -0800561static int has_svm(void)
562{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200563 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800564
Eduardo Habkost63d11422008-11-17 19:03:20 -0200565 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800566 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800567 return 0;
568 }
569
Avi Kivity6aa8b732006-12-10 02:21:36 -0800570 return 1;
571}
572
573static void svm_hardware_disable(void *garbage)
574{
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200575 cpu_svm_disable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800576}
577
Alexander Graf10474ae2009-09-15 11:37:46 +0200578static int svm_hardware_enable(void *garbage)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800579{
580
Tejun Heo0fe1e002009-10-29 22:34:14 +0900581 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800582 uint64_t efer;
Gleb Natapov89a27f42010-02-16 10:51:48 +0200583 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800584 struct desc_struct *gdt;
585 int me = raw_smp_processor_id();
586
Alexander Graf10474ae2009-09-15 11:37:46 +0200587 rdmsrl(MSR_EFER, efer);
588 if (efer & EFER_SVME)
589 return -EBUSY;
590
Avi Kivity6aa8b732006-12-10 02:21:36 -0800591 if (!has_svm()) {
Zachary Amsdene6732a52009-09-29 11:38:36 -1000592 printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
593 me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200594 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800595 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900596 sd = per_cpu(svm_data, me);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800597
Tejun Heo0fe1e002009-10-29 22:34:14 +0900598 if (!sd) {
Zachary Amsdene6732a52009-09-29 11:38:36 -1000599 printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
Avi Kivity6aa8b732006-12-10 02:21:36 -0800600 me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200601 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800602 }
603
Tejun Heo0fe1e002009-10-29 22:34:14 +0900604 sd->asid_generation = 1;
605 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
606 sd->next_asid = sd->max_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800607
Gleb Natapovd6ab1ed2010-02-25 12:43:07 +0200608 native_store_gdt(&gdt_descr);
Gleb Natapov89a27f42010-02-16 10:51:48 +0200609 gdt = (struct desc_struct *)gdt_descr.address;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900610 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800611
Alexander Graf9962d032008-11-25 20:17:02 +0100612 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800613
Linus Torvaldsd0316552009-12-14 09:58:24 -0800614 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200615
Joerg Roedel67ec6602010-05-17 14:43:35 +0200616 svm_init_erratum_383();
617
Alexander Graf10474ae2009-09-15 11:37:46 +0200618 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800619}
620
Joerg Roedel0da1db752008-07-02 16:02:11 +0200621static void svm_cpu_uninit(int cpu)
622{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900623 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200624
Tejun Heo0fe1e002009-10-29 22:34:14 +0900625 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200626 return;
627
628 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900629 __free_page(sd->save_area);
630 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200631}
632
Avi Kivity6aa8b732006-12-10 02:21:36 -0800633static int svm_cpu_init(int cpu)
634{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900635 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800636 int r;
637
Tejun Heo0fe1e002009-10-29 22:34:14 +0900638 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
639 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800640 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900641 sd->cpu = cpu;
642 sd->save_area = alloc_page(GFP_KERNEL);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800643 r = -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900644 if (!sd->save_area)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800645 goto err_1;
646
Tejun Heo0fe1e002009-10-29 22:34:14 +0900647 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800648
649 return 0;
650
651err_1:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900652 kfree(sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800653 return r;
654
655}
656
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100657static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800658{
659 int i;
660
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100661 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
662 if (direct_access_msrs[i].index == index)
663 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800664
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100665 return false;
666}
667
Avi Kivity6aa8b732006-12-10 02:21:36 -0800668static void set_msr_interception(u32 *msrpm, unsigned msr,
669 int read, int write)
670{
Joerg Roedel455716f2010-03-01 15:34:35 +0100671 u8 bit_read, bit_write;
672 unsigned long tmp;
673 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800674
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100675 /*
676 * If this warning triggers extend the direct_access_msrs list at the
677 * beginning of the file
678 */
679 WARN_ON(!valid_msr_intercept(msr));
680
Joerg Roedel455716f2010-03-01 15:34:35 +0100681 offset = svm_msrpm_offset(msr);
682 bit_read = 2 * (msr & 0x0f);
683 bit_write = 2 * (msr & 0x0f) + 1;
684 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800685
Joerg Roedel455716f2010-03-01 15:34:35 +0100686 BUG_ON(offset == MSR_INVALID);
687
688 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
689 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
690
691 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800692}
693
Joerg Roedelf65c2292008-02-13 18:58:46 +0100694static void svm_vcpu_init_msrpm(u32 *msrpm)
695{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100696 int i;
697
Joerg Roedelf65c2292008-02-13 18:58:46 +0100698 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
699
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100700 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
701 if (!direct_access_msrs[i].always)
702 continue;
703
704 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
705 }
Joerg Roedelf65c2292008-02-13 18:58:46 +0100706}
707
Joerg Roedel323c3d82010-03-01 15:34:37 +0100708static void add_msr_offset(u32 offset)
709{
710 int i;
711
712 for (i = 0; i < MSRPM_OFFSETS; ++i) {
713
714 /* Offset already in list? */
715 if (msrpm_offsets[i] == offset)
716 return;
717
718 /* Slot used by another offset? */
719 if (msrpm_offsets[i] != MSR_INVALID)
720 continue;
721
722 /* Add offset to list */
723 msrpm_offsets[i] = offset;
724
725 return;
726 }
727
728 /*
729 * If this BUG triggers the msrpm_offsets table has an overflow. Just
730 * increase MSRPM_OFFSETS in this case.
731 */
732 BUG();
733}
734
735static void init_msrpm_offsets(void)
736{
737 int i;
738
739 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
740
741 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
742 u32 offset;
743
744 offset = svm_msrpm_offset(direct_access_msrs[i].index);
745 BUG_ON(offset == MSR_INVALID);
746
747 add_msr_offset(offset);
748 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800749}
750
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100751static void svm_enable_lbrv(struct vcpu_svm *svm)
752{
753 u32 *msrpm = svm->msrpm;
754
755 svm->vmcb->control.lbr_ctl = 1;
756 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
757 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
758 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
759 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
760}
761
762static void svm_disable_lbrv(struct vcpu_svm *svm)
763{
764 u32 *msrpm = svm->msrpm;
765
766 svm->vmcb->control.lbr_ctl = 0;
767 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
768 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
769 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
770 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
771}
772
Avi Kivity6aa8b732006-12-10 02:21:36 -0800773static __init int svm_hardware_setup(void)
774{
775 int cpu;
776 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100777 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800778 int r;
779
Avi Kivity6aa8b732006-12-10 02:21:36 -0800780 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
781
782 if (!iopm_pages)
783 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300784
785 iopm_va = page_address(iopm_pages);
786 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800787 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
788
Joerg Roedel323c3d82010-03-01 15:34:37 +0100789 init_msrpm_offsets();
790
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100791 if (boot_cpu_has(X86_FEATURE_NX))
792 kvm_enable_efer_bits(EFER_NX);
793
Alexander Graf1b2fd702009-02-02 16:23:51 +0100794 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
795 kvm_enable_efer_bits(EFER_FFXSR);
796
Alexander Graf236de052008-11-25 20:17:10 +0100797 if (nested) {
798 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +0200799 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +0100800 }
801
Zachary Amsden3230bb42009-09-29 11:38:37 -1000802 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800803 r = svm_cpu_init(cpu);
804 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100805 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800806 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100807
808 svm_features = cpuid_edx(SVM_CPUID_FUNC);
809
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200810 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100811 npt_enabled = false;
812
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100813 if (npt_enabled && !npt) {
814 printk(KERN_INFO "kvm: Nested Paging disabled\n");
815 npt_enabled = false;
816 }
817
Joerg Roedel18552672008-02-07 13:47:41 +0100818 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100819 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100820 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200821 } else
822 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100823
Avi Kivity6aa8b732006-12-10 02:21:36 -0800824 return 0;
825
Joerg Roedelf65c2292008-02-13 18:58:46 +0100826err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800827 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
828 iopm_base = 0;
829 return r;
830}
831
832static __exit void svm_hardware_unsetup(void)
833{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200834 int cpu;
835
Zachary Amsden3230bb42009-09-29 11:38:37 -1000836 for_each_possible_cpu(cpu)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200837 svm_cpu_uninit(cpu);
838
Avi Kivity6aa8b732006-12-10 02:21:36 -0800839 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100840 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800841}
842
843static void init_seg(struct vmcb_seg *seg)
844{
845 seg->selector = 0;
846 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +0100847 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800848 seg->limit = 0xffff;
849 seg->base = 0;
850}
851
852static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
853{
854 seg->selector = 0;
855 seg->attrib = SVM_SELECTOR_P_MASK | type;
856 seg->limit = 0xffff;
857 seg->base = 0;
858}
859
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000860static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
861{
862 struct vcpu_svm *svm = to_svm(vcpu);
863 u64 g_tsc_offset = 0;
864
Joerg Roedel20307532010-11-29 17:51:48 +0100865 if (is_guest_mode(vcpu)) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000866 g_tsc_offset = svm->vmcb->control.tsc_offset -
867 svm->nested.hsave->control.tsc_offset;
868 svm->nested.hsave->control.tsc_offset = offset;
869 }
870
871 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +0100872
873 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000874}
875
Zachary Amsdene48672f2010-08-19 22:07:23 -1000876static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
877{
878 struct vcpu_svm *svm = to_svm(vcpu);
879
880 svm->vmcb->control.tsc_offset += adjustment;
Joerg Roedel20307532010-11-29 17:51:48 +0100881 if (is_guest_mode(vcpu))
Zachary Amsdene48672f2010-08-19 22:07:23 -1000882 svm->nested.hsave->control.tsc_offset += adjustment;
Joerg Roedel116a0a22010-12-03 11:45:49 +0100883 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdene48672f2010-08-19 22:07:23 -1000884}
885
Joerg Roedele6101a92008-02-13 18:58:45 +0100886static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800887{
Joerg Roedele6101a92008-02-13 18:58:45 +0100888 struct vmcb_control_area *control = &svm->vmcb->control;
889 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800890
Avi Kivitybff78272010-01-07 13:16:08 +0200891 svm->vcpu.fpu_active = 1;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100892 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +0200893
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100894 set_cr_intercept(svm, INTERCEPT_CR0_READ);
895 set_cr_intercept(svm, INTERCEPT_CR3_READ);
896 set_cr_intercept(svm, INTERCEPT_CR4_READ);
897 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
898 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
899 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
900 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800901
Joerg Roedel3aed0412010-11-30 18:03:58 +0100902 set_dr_intercept(svm, INTERCEPT_DR0_READ);
903 set_dr_intercept(svm, INTERCEPT_DR1_READ);
904 set_dr_intercept(svm, INTERCEPT_DR2_READ);
905 set_dr_intercept(svm, INTERCEPT_DR3_READ);
906 set_dr_intercept(svm, INTERCEPT_DR4_READ);
907 set_dr_intercept(svm, INTERCEPT_DR5_READ);
908 set_dr_intercept(svm, INTERCEPT_DR6_READ);
909 set_dr_intercept(svm, INTERCEPT_DR7_READ);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800910
Joerg Roedel3aed0412010-11-30 18:03:58 +0100911 set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
912 set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
913 set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
914 set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
915 set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
916 set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
917 set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
918 set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800919
Joerg Roedel18c918c2010-11-30 18:03:59 +0100920 set_exception_intercept(svm, PF_VECTOR);
921 set_exception_intercept(svm, UD_VECTOR);
922 set_exception_intercept(svm, MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800923
Joerg Roedel8a05a1b2010-11-30 18:04:00 +0100924 set_intercept(svm, INTERCEPT_INTR);
925 set_intercept(svm, INTERCEPT_NMI);
926 set_intercept(svm, INTERCEPT_SMI);
927 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
928 set_intercept(svm, INTERCEPT_CPUID);
929 set_intercept(svm, INTERCEPT_INVD);
930 set_intercept(svm, INTERCEPT_HLT);
931 set_intercept(svm, INTERCEPT_INVLPG);
932 set_intercept(svm, INTERCEPT_INVLPGA);
933 set_intercept(svm, INTERCEPT_IOIO_PROT);
934 set_intercept(svm, INTERCEPT_MSR_PROT);
935 set_intercept(svm, INTERCEPT_TASK_SWITCH);
936 set_intercept(svm, INTERCEPT_SHUTDOWN);
937 set_intercept(svm, INTERCEPT_VMRUN);
938 set_intercept(svm, INTERCEPT_VMMCALL);
939 set_intercept(svm, INTERCEPT_VMLOAD);
940 set_intercept(svm, INTERCEPT_VMSAVE);
941 set_intercept(svm, INTERCEPT_STGI);
942 set_intercept(svm, INTERCEPT_CLGI);
943 set_intercept(svm, INTERCEPT_SKINIT);
944 set_intercept(svm, INTERCEPT_WBINVD);
945 set_intercept(svm, INTERCEPT_MONITOR);
946 set_intercept(svm, INTERCEPT_MWAIT);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800947
948 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100949 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800950 control->int_ctl = V_INTR_MASKING_MASK;
951
952 init_seg(&save->es);
953 init_seg(&save->ss);
954 init_seg(&save->ds);
955 init_seg(&save->fs);
956 init_seg(&save->gs);
957
958 save->cs.selector = 0xf000;
959 /* Executable/Readable Code Segment */
960 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
961 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
962 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -0800963 /*
964 * cs.base should really be 0xffff0000, but vmx can't handle that, so
965 * be consistent with it.
966 *
967 * Replace when we have real mode working for vmx.
968 */
969 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800970
971 save->gdtr.limit = 0xffff;
972 save->idtr.limit = 0xffff;
973
974 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
975 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
976
Marcelo Tosattieaa48512010-08-31 19:13:14 -0300977 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -0400978 save->dr6 = 0xffff0ff0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800979 save->dr7 = 0x400;
980 save->rflags = 2;
981 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300982 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800983
Joerg Roedele0231712010-02-24 18:59:10 +0100984 /*
985 * This is the guest-visible cr0 value.
Eduardo Habkost18fa0002009-10-24 02:49:59 -0200986 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800987 */
Marcelo Tosatti678041a2010-08-31 19:13:13 -0300988 svm->vcpu.arch.cr0 = 0;
989 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Eduardo Habkost18fa0002009-10-24 02:49:59 -0200990
Rusty Russell66aee912007-07-17 23:34:16 +1000991 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800992 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100993
994 if (npt_enabled) {
995 /* Setup VMCB for Nested Paging */
996 control->nested_ctl = 1;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +0100997 clr_intercept(svm, INTERCEPT_TASK_SWITCH);
998 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +0100999 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001000 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1001 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001002 save->g_pat = 0x0007040600070406ULL;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001003 save->cr3 = 0;
1004 save->cr4 = 0;
1005 }
Avi Kivitya79d2f12008-04-14 13:10:21 +03001006 force_new_asid(&svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01001007
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001008 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001009 svm->vcpu.arch.hflags = 0;
1010
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001011 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
Mark Langsdorf565d0992009-10-06 14:25:02 -05001012 control->pause_filter_count = 3000;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001013 set_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001014 }
1015
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001016 mark_all_dirty(svm->vmcb);
1017
Joerg Roedel2af91942009-08-07 11:49:28 +02001018 enable_gif(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001019}
1020
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001021static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001022{
1023 struct vcpu_svm *svm = to_svm(vcpu);
1024
Joerg Roedele6101a92008-02-13 18:58:45 +01001025 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001026
Gleb Natapovc5af89b2009-06-09 15:56:26 +03001027 if (!kvm_vcpu_is_bsp(vcpu)) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001028 kvm_rip_write(vcpu, 0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001029 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
1030 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +02001031 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001032 vcpu->arch.regs_avail = ~0;
1033 vcpu->arch.regs_dirty = ~0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02001034
1035 return 0;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001036}
1037
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001038static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001039{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001040 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001041 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001042 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001043 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001044 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001045 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001046
Rusty Russellc16f8622007-07-30 21:12:19 +10001047 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001048 if (!svm) {
1049 err = -ENOMEM;
1050 goto out;
1051 }
1052
1053 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1054 if (err)
1055 goto free_svm;
1056
Joerg Roedelf65c2292008-02-13 18:58:46 +01001057 err = -ENOMEM;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001058 page = alloc_page(GFP_KERNEL);
1059 if (!page)
1060 goto uninit;
1061
Joerg Roedelf65c2292008-02-13 18:58:46 +01001062 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1063 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001064 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001065
1066 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1067 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001068 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001069
Alexander Grafb286d5d2008-11-25 20:17:05 +01001070 hsave_page = alloc_page(GFP_KERNEL);
1071 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001072 goto free_page3;
1073
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001074 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001075
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001076 svm->msrpm = page_address(msrpm_pages);
1077 svm_vcpu_init_msrpm(svm->msrpm);
1078
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001079 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01001080 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001081
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001082 svm->vmcb = page_address(page);
1083 clear_page(svm->vmcb);
1084 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1085 svm->asid_generation = 0;
Joerg Roedele6101a92008-02-13 18:58:45 +01001086 init_vmcb(svm);
Zachary Amsden99e3e302010-08-19 22:07:17 -10001087 kvm_write_tsc(&svm->vcpu, 0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001088
Jan Kiszka10ab25c2010-05-25 16:01:50 +02001089 err = fx_init(&svm->vcpu);
1090 if (err)
1091 goto free_page4;
1092
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001093 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Gleb Natapovc5af89b2009-06-09 15:56:26 +03001094 if (kvm_vcpu_is_bsp(&svm->vcpu))
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001095 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001096
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001097 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -08001098
Jan Kiszka10ab25c2010-05-25 16:01:50 +02001099free_page4:
1100 __free_page(hsave_page);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001101free_page3:
1102 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1103free_page2:
1104 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1105free_page1:
1106 __free_page(page);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001107uninit:
1108 kvm_vcpu_uninit(&svm->vcpu);
1109free_svm:
Rusty Russella4770342007-08-01 14:46:11 +10001110 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001111out:
1112 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001113}
1114
1115static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1116{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001117 struct vcpu_svm *svm = to_svm(vcpu);
1118
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001119 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01001120 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001121 __free_page(virt_to_page(svm->nested.hsave));
1122 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001123 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10001124 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001125}
1126
Avi Kivity15ad7142007-07-11 18:17:21 +03001127static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001128{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001129 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03001130 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02001131
Avi Kivity0cc50642007-03-25 12:07:27 +02001132 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03001133 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001134 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02001135 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001136
Avi Kivity82ca2d12010-10-21 12:20:34 +02001137#ifdef CONFIG_X86_64
1138 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1139#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02001140 savesegment(fs, svm->host.fs);
1141 savesegment(gs, svm->host.gs);
1142 svm->host.ldt = kvm_read_ldt();
1143
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001144 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001145 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001146}
1147
1148static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1149{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001150 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001151 int i;
1152
Avi Kivitye1beb1d2007-11-18 13:50:24 +02001153 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02001154 kvm_load_ldt(svm->host.ldt);
1155#ifdef CONFIG_X86_64
1156 loadsegment(fs, svm->host.fs);
1157 load_gs_index(svm->host.gs);
1158 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
1159#else
1160 loadsegment(gs, svm->host.gs);
1161#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001162 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001163 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001164}
1165
Avi Kivity6aa8b732006-12-10 02:21:36 -08001166static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1167{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001168 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001169}
1170
1171static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1172{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001173 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001174}
1175
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001176static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1177{
1178 switch (reg) {
1179 case VCPU_EXREG_PDPTR:
1180 BUG_ON(!npt_enabled);
Joerg Roedelff03a072010-09-10 17:30:57 +02001181 load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001182 break;
1183 default:
1184 BUG();
1185 }
1186}
1187
Alexander Graff0b85052008-11-25 20:17:01 +01001188static void svm_set_vintr(struct vcpu_svm *svm)
1189{
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001190 set_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001191}
1192
1193static void svm_clear_vintr(struct vcpu_svm *svm)
1194{
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001195 clr_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001196}
1197
Avi Kivity6aa8b732006-12-10 02:21:36 -08001198static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1199{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001200 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001201
1202 switch (seg) {
1203 case VCPU_SREG_CS: return &save->cs;
1204 case VCPU_SREG_DS: return &save->ds;
1205 case VCPU_SREG_ES: return &save->es;
1206 case VCPU_SREG_FS: return &save->fs;
1207 case VCPU_SREG_GS: return &save->gs;
1208 case VCPU_SREG_SS: return &save->ss;
1209 case VCPU_SREG_TR: return &save->tr;
1210 case VCPU_SREG_LDTR: return &save->ldtr;
1211 }
1212 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00001213 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001214}
1215
1216static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1217{
1218 struct vmcb_seg *s = svm_seg(vcpu, seg);
1219
1220 return s->base;
1221}
1222
1223static void svm_get_segment(struct kvm_vcpu *vcpu,
1224 struct kvm_segment *var, int seg)
1225{
1226 struct vmcb_seg *s = svm_seg(vcpu, seg);
1227
1228 var->base = s->base;
1229 var->limit = s->limit;
1230 var->selector = s->selector;
1231 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1232 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1233 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1234 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1235 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1236 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1237 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1238 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
Amit Shah25022ac2008-10-27 09:04:17 +00001239
Joerg Roedele0231712010-02-24 18:59:10 +01001240 /*
1241 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02001242 * for cross vendor migration purposes by "not present"
1243 */
1244 var->unusable = !var->present || (var->type == 0);
1245
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001246 switch (seg) {
1247 case VCPU_SREG_CS:
1248 /*
1249 * SVM always stores 0 for the 'G' bit in the CS selector in
1250 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
1251 * Intel's VMENTRY has a check on the 'G' bit.
1252 */
Amit Shah25022ac2008-10-27 09:04:17 +00001253 var->g = s->limit > 0xfffff;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001254 break;
1255 case VCPU_SREG_TR:
1256 /*
1257 * Work around a bug where the busy flag in the tr selector
1258 * isn't exposed
1259 */
Amit Shahc0d09822008-10-27 09:04:18 +00001260 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001261 break;
1262 case VCPU_SREG_DS:
1263 case VCPU_SREG_ES:
1264 case VCPU_SREG_FS:
1265 case VCPU_SREG_GS:
1266 /*
1267 * The accessed bit must always be set in the segment
1268 * descriptor cache, although it can be cleared in the
1269 * descriptor, the cached bit always remains at 1. Since
1270 * Intel has a check on this, set it here to support
1271 * cross-vendor migration.
1272 */
1273 if (!var->unusable)
1274 var->type |= 0x1;
1275 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02001276 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01001277 /*
1278 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02001279 * descriptor is left as 1, although the whole segment has
1280 * been made unusable. Clear it here to pass an Intel VMX
1281 * entry check when cross vendor migrating.
1282 */
1283 if (var->unusable)
1284 var->db = 0;
1285 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001286 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001287}
1288
Izik Eidus2e4d2652008-03-24 19:38:34 +02001289static int svm_get_cpl(struct kvm_vcpu *vcpu)
1290{
1291 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1292
1293 return save->cpl;
1294}
1295
Gleb Natapov89a27f42010-02-16 10:51:48 +02001296static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001297{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001298 struct vcpu_svm *svm = to_svm(vcpu);
1299
Gleb Natapov89a27f42010-02-16 10:51:48 +02001300 dt->size = svm->vmcb->save.idtr.limit;
1301 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001302}
1303
Gleb Natapov89a27f42010-02-16 10:51:48 +02001304static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001305{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001306 struct vcpu_svm *svm = to_svm(vcpu);
1307
Gleb Natapov89a27f42010-02-16 10:51:48 +02001308 svm->vmcb->save.idtr.limit = dt->size;
1309 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001310 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001311}
1312
Gleb Natapov89a27f42010-02-16 10:51:48 +02001313static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001314{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001315 struct vcpu_svm *svm = to_svm(vcpu);
1316
Gleb Natapov89a27f42010-02-16 10:51:48 +02001317 dt->size = svm->vmcb->save.gdtr.limit;
1318 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001319}
1320
Gleb Natapov89a27f42010-02-16 10:51:48 +02001321static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001322{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001323 struct vcpu_svm *svm = to_svm(vcpu);
1324
Gleb Natapov89a27f42010-02-16 10:51:48 +02001325 svm->vmcb->save.gdtr.limit = dt->size;
1326 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001327 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001328}
1329
Avi Kivitye8467fd2009-12-29 18:43:06 +02001330static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1331{
1332}
1333
Anthony Liguori25c4c272007-04-27 09:29:21 +03001334static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08001335{
1336}
1337
Avi Kivityd2251572010-01-06 10:55:27 +02001338static void update_cr0_intercept(struct vcpu_svm *svm)
1339{
1340 ulong gcr0 = svm->vcpu.arch.cr0;
1341 u64 *hcr0 = &svm->vmcb->save.cr0;
1342
1343 if (!svm->vcpu.fpu_active)
1344 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
1345 else
1346 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1347 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1348
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001349 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001350
1351 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001352 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1353 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001354 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001355 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1356 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001357 }
1358}
1359
Avi Kivity6aa8b732006-12-10 02:21:36 -08001360static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1361{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001362 struct vcpu_svm *svm = to_svm(vcpu);
1363
Joerg Roedel20307532010-11-29 17:51:48 +01001364 if (is_guest_mode(vcpu)) {
Joerg Roedel7f5d8b52010-02-24 18:59:18 +01001365 /*
1366 * We are here because we run in nested mode, the host kvm
1367 * intercepts cr0 writes but the l1 hypervisor does not.
1368 * But the L1 hypervisor may intercept selective cr0 writes.
1369 * This needs to be checked here.
1370 */
1371 unsigned long old, new;
1372
1373 /* Remove bits that would trigger a real cr0 write intercept */
1374 old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
1375 new = cr0 & SVM_CR0_SELECTIVE_MASK;
1376
1377 if (old == new) {
1378 /* cr0 write with ts and mp unchanged */
1379 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
Joerg Roedelcda00082010-09-02 17:29:46 +02001380 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
1381 svm->nested.vmexit_rip = kvm_rip_read(vcpu);
1382 svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
1383 svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
Joerg Roedel7f5d8b52010-02-24 18:59:18 +01001384 return;
Joerg Roedelcda00082010-09-02 17:29:46 +02001385 }
Joerg Roedel7f5d8b52010-02-24 18:59:18 +01001386 }
1387 }
1388
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001389#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02001390 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10001391 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001392 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001393 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001394 }
1395
Mike Dayd77c26f2007-10-08 09:02:08 -04001396 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001397 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001398 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001399 }
1400 }
1401#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001402 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02001403
1404 if (!npt_enabled)
1405 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02001406
1407 if (!vcpu->fpu_active)
Joerg Roedel334df502008-01-21 13:09:33 +01001408 cr0 |= X86_CR0_TS;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001409 /*
1410 * re-enable caching here because the QEMU bios
1411 * does not do it - this results in some delay at
1412 * reboot
1413 */
1414 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001415 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001416 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001417 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001418}
1419
1420static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1421{
Joerg Roedel6394b642008-04-09 14:15:29 +02001422 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001423 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1424
1425 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1426 force_new_asid(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02001427
Joerg Roedelec077262008-04-09 14:15:28 +02001428 vcpu->arch.cr4 = cr4;
1429 if (!npt_enabled)
1430 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02001431 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02001432 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001433 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001434}
1435
1436static void svm_set_segment(struct kvm_vcpu *vcpu,
1437 struct kvm_segment *var, int seg)
1438{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001439 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001440 struct vmcb_seg *s = svm_seg(vcpu, seg);
1441
1442 s->base = var->base;
1443 s->limit = var->limit;
1444 s->selector = var->selector;
1445 if (var->unusable)
1446 s->attrib = 0;
1447 else {
1448 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1449 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1450 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1451 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1452 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1453 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1454 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1455 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1456 }
1457 if (seg == VCPU_SREG_CS)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001458 svm->vmcb->save.cpl
1459 = (svm->vmcb->save.cs.attrib
Avi Kivity6aa8b732006-12-10 02:21:36 -08001460 >> SVM_SELECTOR_DPL_SHIFT) & 3;
1461
Joerg Roedel060d0c92010-12-03 11:45:57 +01001462 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001463}
1464
Gleb Natapov44c11432009-05-11 13:35:52 +03001465static void update_db_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001466{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001467 struct vcpu_svm *svm = to_svm(vcpu);
1468
Joerg Roedel18c918c2010-11-30 18:03:59 +01001469 clr_exception_intercept(svm, DB_VECTOR);
1470 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001471
Jan Kiszka6be7d302009-10-18 13:24:54 +02001472 if (svm->nmi_singlestep)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001473 set_exception_intercept(svm, DB_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001474
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001475 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1476 if (vcpu->guest_debug &
1477 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
Joerg Roedel18c918c2010-11-30 18:03:59 +01001478 set_exception_intercept(svm, DB_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001479 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001480 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001481 } else
1482 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03001483}
1484
Jan Kiszka355be0b2009-10-03 00:31:21 +02001485static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
Gleb Natapov44c11432009-05-11 13:35:52 +03001486{
Gleb Natapov44c11432009-05-11 13:35:52 +03001487 struct vcpu_svm *svm = to_svm(vcpu);
1488
Jan Kiszkaae675ef2008-12-15 13:52:10 +01001489 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1490 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
1491 else
1492 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1493
Joerg Roedel72214b92010-12-03 11:45:55 +01001494 mark_dirty(svm->vmcb, VMCB_DR);
1495
Jan Kiszka355be0b2009-10-03 00:31:21 +02001496 update_db_intercept(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001497}
1498
Tejun Heo0fe1e002009-10-29 22:34:14 +09001499static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001500{
Tejun Heo0fe1e002009-10-29 22:34:14 +09001501 if (sd->next_asid > sd->max_asid) {
1502 ++sd->asid_generation;
1503 sd->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001504 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001505 }
1506
Tejun Heo0fe1e002009-10-29 22:34:14 +09001507 svm->asid_generation = sd->asid_generation;
1508 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01001509
1510 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001511}
1512
Gleb Natapov020df072010-04-13 10:05:23 +03001513static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001514{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001515 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001516
Gleb Natapov020df072010-04-13 10:05:23 +03001517 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01001518 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001519}
1520
Avi Kivity851ba692009-08-24 11:10:17 +03001521static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001522{
Gleb Natapov631bc482010-10-14 11:22:52 +02001523 u64 fault_address = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001524 u32 error_code;
Gleb Natapov631bc482010-10-14 11:22:52 +02001525 int r = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001526
Gleb Natapov631bc482010-10-14 11:22:52 +02001527 switch (svm->apf_reason) {
1528 default:
1529 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001530
Gleb Natapov631bc482010-10-14 11:22:52 +02001531 trace_kvm_page_fault(fault_address, error_code);
1532 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1533 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1534 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1535 break;
1536 case KVM_PV_REASON_PAGE_NOT_PRESENT:
1537 svm->apf_reason = 0;
1538 local_irq_disable();
1539 kvm_async_pf_task_wait(fault_address);
1540 local_irq_enable();
1541 break;
1542 case KVM_PV_REASON_PAGE_READY:
1543 svm->apf_reason = 0;
1544 local_irq_disable();
1545 kvm_async_pf_task_wake(fault_address);
1546 local_irq_enable();
1547 break;
1548 }
1549 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001550}
1551
Avi Kivity851ba692009-08-24 11:10:17 +03001552static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001553{
Avi Kivity851ba692009-08-24 11:10:17 +03001554 struct kvm_run *kvm_run = svm->vcpu.run;
1555
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001556 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03001557 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02001558 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001559 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1560 return 1;
1561 }
Gleb Natapov44c11432009-05-11 13:35:52 +03001562
Jan Kiszka6be7d302009-10-18 13:24:54 +02001563 if (svm->nmi_singlestep) {
1564 svm->nmi_singlestep = false;
Gleb Natapov44c11432009-05-11 13:35:52 +03001565 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1566 svm->vmcb->save.rflags &=
1567 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1568 update_db_intercept(&svm->vcpu);
1569 }
1570
1571 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01001572 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03001573 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1574 kvm_run->debug.arch.pc =
1575 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1576 kvm_run->debug.arch.exception = DB_VECTOR;
1577 return 0;
1578 }
1579
1580 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001581}
1582
Avi Kivity851ba692009-08-24 11:10:17 +03001583static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001584{
Avi Kivity851ba692009-08-24 11:10:17 +03001585 struct kvm_run *kvm_run = svm->vcpu.run;
1586
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001587 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1588 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1589 kvm_run->debug.arch.exception = BP_VECTOR;
1590 return 0;
1591}
1592
Avi Kivity851ba692009-08-24 11:10:17 +03001593static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001594{
1595 int er;
1596
Avi Kivity851ba692009-08-24 11:10:17 +03001597 er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001598 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001599 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001600 return 1;
1601}
1602
Avi Kivity6b52d182010-01-21 15:31:47 +02001603static void svm_fpu_activate(struct kvm_vcpu *vcpu)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001604{
Avi Kivity6b52d182010-01-21 15:31:47 +02001605 struct vcpu_svm *svm = to_svm(vcpu);
Joerg Roedel66a562f2010-02-19 16:23:08 +01001606
Joerg Roedel18c918c2010-11-30 18:03:59 +01001607 clr_exception_intercept(svm, NM_VECTOR);
Joerg Roedel66a562f2010-02-19 16:23:08 +01001608
Rusty Russelle756fc62007-07-30 20:07:08 +10001609 svm->vcpu.fpu_active = 1;
Avi Kivityd2251572010-01-06 10:55:27 +02001610 update_cr0_intercept(svm);
Avi Kivity6b52d182010-01-21 15:31:47 +02001611}
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001612
Avi Kivity6b52d182010-01-21 15:31:47 +02001613static int nm_interception(struct vcpu_svm *svm)
1614{
1615 svm_fpu_activate(&svm->vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001616 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001617}
1618
Joerg Roedel67ec6602010-05-17 14:43:35 +02001619static bool is_erratum_383(void)
1620{
1621 int err, i;
1622 u64 value;
1623
1624 if (!erratum_383_found)
1625 return false;
1626
1627 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1628 if (err)
1629 return false;
1630
1631 /* Bit 62 may or may not be set for this mce */
1632 value &= ~(1ULL << 62);
1633
1634 if (value != 0xb600000000010015ULL)
1635 return false;
1636
1637 /* Clear MCi_STATUS registers */
1638 for (i = 0; i < 6; ++i)
1639 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1640
1641 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1642 if (!err) {
1643 u32 low, high;
1644
1645 value &= ~(1ULL << 2);
1646 low = lower_32_bits(value);
1647 high = upper_32_bits(value);
1648
1649 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1650 }
1651
1652 /* Flush tlb to evict multi-match entries */
1653 __flush_tlb_all();
1654
1655 return true;
1656}
1657
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001658static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02001659{
Joerg Roedel67ec6602010-05-17 14:43:35 +02001660 if (is_erratum_383()) {
1661 /*
1662 * Erratum 383 triggered. Guest state is corrupt so kill the
1663 * guest.
1664 */
1665 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1666
Avi Kivitya8eeb042010-05-10 12:34:53 +03001667 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02001668
1669 return;
1670 }
1671
Joerg Roedel53371b52008-04-09 14:15:30 +02001672 /*
1673 * On an #MC intercept the MCE handler is not called automatically in
1674 * the host. So do it by hand here.
1675 */
1676 asm volatile (
1677 "int $0x12\n");
1678 /* not sure if we ever come back to this point */
1679
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001680 return;
1681}
1682
1683static int mc_interception(struct vcpu_svm *svm)
1684{
Joerg Roedel53371b52008-04-09 14:15:30 +02001685 return 1;
1686}
1687
Avi Kivity851ba692009-08-24 11:10:17 +03001688static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001689{
Avi Kivity851ba692009-08-24 11:10:17 +03001690 struct kvm_run *kvm_run = svm->vcpu.run;
1691
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001692 /*
1693 * VMCB is undefined after a SHUTDOWN intercept
1694 * so reinitialize it.
1695 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001696 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001697 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001698
1699 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1700 return 0;
1701}
1702
Avi Kivity851ba692009-08-24 11:10:17 +03001703static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001704{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001705 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04001706 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Jan Kiszka34c33d12009-02-08 13:28:15 +01001707 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02001708 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001709
Rusty Russelle756fc62007-07-30 20:07:08 +10001710 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03001711 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02001712 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001713 if (string || in)
Gleb Natapov6d77dbf2010-05-10 11:16:56 +03001714 return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001715
Avi Kivity039576c2007-03-20 12:46:50 +02001716 port = io_info >> 16;
1717 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001718 svm->next_rip = svm->vmcb->control.exit_info_2;
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01001719 skip_emulated_instruction(&svm->vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001720
1721 return kvm_fast_pio_out(vcpu, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001722}
1723
Avi Kivity851ba692009-08-24 11:10:17 +03001724static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02001725{
1726 return 1;
1727}
1728
Avi Kivity851ba692009-08-24 11:10:17 +03001729static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02001730{
1731 ++svm->vcpu.stat.irq_exits;
1732 return 1;
1733}
1734
Avi Kivity851ba692009-08-24 11:10:17 +03001735static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001736{
1737 return 1;
1738}
1739
Avi Kivity851ba692009-08-24 11:10:17 +03001740static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001741{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001742 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001743 skip_emulated_instruction(&svm->vcpu);
1744 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001745}
1746
Avi Kivity851ba692009-08-24 11:10:17 +03001747static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02001748{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001749 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001750 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001751 kvm_emulate_hypercall(&svm->vcpu);
1752 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001753}
1754
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001755static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1756{
1757 struct vcpu_svm *svm = to_svm(vcpu);
1758
1759 return svm->nested.nested_cr3;
1760}
1761
1762static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1763 unsigned long root)
1764{
1765 struct vcpu_svm *svm = to_svm(vcpu);
1766
1767 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01001768 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001769 force_new_asid(vcpu);
1770}
1771
Avi Kivity6389ee92010-11-29 16:12:30 +02001772static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
1773 struct x86_exception *fault)
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001774{
1775 struct vcpu_svm *svm = to_svm(vcpu);
1776
1777 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1778 svm->vmcb->control.exit_code_hi = 0;
Avi Kivity6389ee92010-11-29 16:12:30 +02001779 svm->vmcb->control.exit_info_1 = fault->error_code;
1780 svm->vmcb->control.exit_info_2 = fault->address;
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001781
1782 nested_svm_vmexit(svm);
1783}
1784
Joerg Roedel4b161842010-09-10 17:31:03 +02001785static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1786{
1787 int r;
1788
1789 r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
1790
1791 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1792 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
1793 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1794 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1795 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
1796
1797 return r;
1798}
1799
1800static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
1801{
1802 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
1803}
1804
Alexander Grafc0725422008-11-25 20:17:03 +01001805static int nested_svm_check_permissions(struct vcpu_svm *svm)
1806{
Avi Kivityf6801df2010-01-21 15:31:50 +02001807 if (!(svm->vcpu.arch.efer & EFER_SVME)
Alexander Grafc0725422008-11-25 20:17:03 +01001808 || !is_paging(&svm->vcpu)) {
1809 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1810 return 1;
1811 }
1812
1813 if (svm->vmcb->save.cpl) {
1814 kvm_inject_gp(&svm->vcpu, 0);
1815 return 1;
1816 }
1817
1818 return 0;
1819}
1820
Alexander Grafcf74a782008-11-25 20:17:08 +01001821static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1822 bool has_error_code, u32 error_code)
1823{
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01001824 int vmexit;
1825
Joerg Roedel20307532010-11-29 17:51:48 +01001826 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel0295ad72009-08-07 11:49:37 +02001827 return 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01001828
Joerg Roedel0295ad72009-08-07 11:49:37 +02001829 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1830 svm->vmcb->control.exit_code_hi = 0;
1831 svm->vmcb->control.exit_info_1 = error_code;
1832 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1833
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01001834 vmexit = nested_svm_intercept(svm);
1835 if (vmexit == NESTED_EXIT_DONE)
1836 svm->nested.exit_required = true;
1837
1838 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01001839}
1840
Joerg Roedel8fe54652010-02-19 16:23:01 +01001841/* This function returns true if it is save to enable the irq window */
1842static inline bool nested_svm_intr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01001843{
Joerg Roedel20307532010-11-29 17:51:48 +01001844 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel8fe54652010-02-19 16:23:01 +01001845 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01001846
Joerg Roedel26666952009-08-07 11:49:46 +02001847 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01001848 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01001849
Joerg Roedel26666952009-08-07 11:49:46 +02001850 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01001851 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01001852
Gleb Natapova0a07cd2010-09-20 10:15:32 +02001853 /*
1854 * if vmexit was already requested (by intercepted exception
1855 * for instance) do not overwrite it with "external interrupt"
1856 * vmexit.
1857 */
1858 if (svm->nested.exit_required)
1859 return false;
1860
Joerg Roedel197717d2010-02-24 18:59:19 +01001861 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1862 svm->vmcb->control.exit_info_1 = 0;
1863 svm->vmcb->control.exit_info_2 = 0;
Joerg Roedel26666952009-08-07 11:49:46 +02001864
Joerg Roedelcd3ff652009-10-09 16:08:26 +02001865 if (svm->nested.intercept & 1ULL) {
1866 /*
1867 * The #vmexit can't be emulated here directly because this
1868 * code path runs with irqs and preemtion disabled. A
1869 * #vmexit emulation might sleep. Only signal request for
1870 * the #vmexit here.
1871 */
1872 svm->nested.exit_required = true;
Joerg Roedel236649d2009-10-09 16:08:30 +02001873 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
Joerg Roedel8fe54652010-02-19 16:23:01 +01001874 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01001875 }
1876
Joerg Roedel8fe54652010-02-19 16:23:01 +01001877 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01001878}
1879
Joerg Roedel887f5002010-02-24 18:59:12 +01001880/* This function returns true if it is save to enable the nmi window */
1881static inline bool nested_svm_nmi(struct vcpu_svm *svm)
1882{
Joerg Roedel20307532010-11-29 17:51:48 +01001883 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel887f5002010-02-24 18:59:12 +01001884 return true;
1885
1886 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
1887 return true;
1888
1889 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
1890 svm->nested.exit_required = true;
1891
1892 return false;
1893}
1894
Joerg Roedel7597f122010-02-19 16:23:00 +01001895static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001896{
1897 struct page *page;
1898
Joerg Roedel6c3bd3d2010-02-19 16:23:04 +01001899 might_sleep();
1900
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001901 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001902 if (is_error_page(page))
1903 goto error;
1904
Joerg Roedel7597f122010-02-19 16:23:00 +01001905 *_page = page;
1906
1907 return kmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001908
1909error:
1910 kvm_release_page_clean(page);
1911 kvm_inject_gp(&svm->vcpu, 0);
1912
1913 return NULL;
1914}
1915
Joerg Roedel7597f122010-02-19 16:23:00 +01001916static void nested_svm_unmap(struct page *page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001917{
Joerg Roedel7597f122010-02-19 16:23:00 +01001918 kunmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02001919 kvm_release_page_dirty(page);
1920}
1921
Joerg Roedelce2ac082010-03-01 15:34:39 +01001922static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01001923{
Joerg Roedelce2ac082010-03-01 15:34:39 +01001924 unsigned port;
1925 u8 val, bit;
1926 u64 gpa;
1927
1928 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
1929 return NESTED_EXIT_HOST;
1930
1931 port = svm->vmcb->control.exit_info_1 >> 16;
1932 gpa = svm->nested.vmcb_iopm + (port / 8);
1933 bit = port % 8;
1934 val = 0;
1935
1936 if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
1937 val &= (1 << bit);
1938
1939 return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1940}
1941
Joerg Roedeld2477822010-03-01 15:34:34 +01001942static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01001943{
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001944 u32 offset, msr, value;
1945 int write, mask;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001946
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02001947 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
Joerg Roedeld2477822010-03-01 15:34:34 +01001948 return NESTED_EXIT_HOST;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02001949
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001950 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1951 offset = svm_msrpm_offset(msr);
1952 write = svm->vmcb->control.exit_info_1 & 1;
1953 mask = 1 << ((2 * (msr & 0xf)) + write);
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02001954
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001955 if (offset == MSR_INVALID)
1956 return NESTED_EXIT_DONE;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001957
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001958 /* Offset is in 32 bit units but need in 8 bit units */
1959 offset *= 4;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001960
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001961 if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
1962 return NESTED_EXIT_DONE;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02001963
Joerg Roedel0d6b3532010-03-01 15:34:38 +01001964 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001965}
1966
Joerg Roedel410e4d52009-08-07 11:49:44 +02001967static int nested_svm_exit_special(struct vcpu_svm *svm)
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001968{
Alexander Grafcf74a782008-11-25 20:17:08 +01001969 u32 exit_code = svm->vmcb->control.exit_code;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02001970
Joerg Roedel410e4d52009-08-07 11:49:44 +02001971 switch (exit_code) {
1972 case SVM_EXIT_INTR:
1973 case SVM_EXIT_NMI:
Joerg Roedelff47a492010-04-22 12:33:14 +02001974 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
Joerg Roedel410e4d52009-08-07 11:49:44 +02001975 return NESTED_EXIT_HOST;
Joerg Roedel410e4d52009-08-07 11:49:44 +02001976 case SVM_EXIT_NPF:
Joerg Roedele0231712010-02-24 18:59:10 +01001977 /* For now we are always handling NPFs when using them */
Joerg Roedel410e4d52009-08-07 11:49:44 +02001978 if (npt_enabled)
1979 return NESTED_EXIT_HOST;
1980 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02001981 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
Gleb Natapov631bc482010-10-14 11:22:52 +02001982 /* When we're shadowing, trap PFs, but not async PF */
1983 if (!npt_enabled && svm->apf_reason == 0)
Joerg Roedel410e4d52009-08-07 11:49:44 +02001984 return NESTED_EXIT_HOST;
1985 break;
Joerg Roedel66a562f2010-02-19 16:23:08 +01001986 case SVM_EXIT_EXCP_BASE + NM_VECTOR:
1987 nm_interception(svm);
1988 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02001989 default:
1990 break;
Alexander Grafcf74a782008-11-25 20:17:08 +01001991 }
1992
Joerg Roedel410e4d52009-08-07 11:49:44 +02001993 return NESTED_EXIT_CONTINUE;
1994}
1995
1996/*
1997 * If this function returns true, this #vmexit was already handled
1998 */
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01001999static int nested_svm_intercept(struct vcpu_svm *svm)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002000{
2001 u32 exit_code = svm->vmcb->control.exit_code;
2002 int vmexit = NESTED_EXIT_HOST;
2003
Alexander Grafcf74a782008-11-25 20:17:08 +01002004 switch (exit_code) {
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002005 case SVM_EXIT_MSR:
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002006 vmexit = nested_svm_exit_handled_msr(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002007 break;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002008 case SVM_EXIT_IOIO:
2009 vmexit = nested_svm_intercept_ioio(svm);
2010 break;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002011 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2012 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2013 if (svm->nested.intercept_cr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002014 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002015 break;
2016 }
Joerg Roedel3aed0412010-11-30 18:03:58 +01002017 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2018 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2019 if (svm->nested.intercept_dr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002020 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002021 break;
2022 }
2023 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2024 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002025 if (svm->nested.intercept_exceptions & excp_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002026 vmexit = NESTED_EXIT_DONE;
Gleb Natapov631bc482010-10-14 11:22:52 +02002027 /* async page fault always cause vmexit */
2028 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2029 svm->apf_reason != 0)
2030 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002031 break;
2032 }
Joerg Roedel228070b2010-04-22 12:33:10 +02002033 case SVM_EXIT_ERR: {
2034 vmexit = NESTED_EXIT_DONE;
2035 break;
2036 }
Alexander Grafcf74a782008-11-25 20:17:08 +01002037 default: {
2038 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002039 if (svm->nested.intercept & exit_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002040 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002041 }
2042 }
2043
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002044 return vmexit;
2045}
2046
2047static int nested_svm_exit_handled(struct vcpu_svm *svm)
2048{
2049 int vmexit;
2050
2051 vmexit = nested_svm_intercept(svm);
2052
2053 if (vmexit == NESTED_EXIT_DONE)
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002054 nested_svm_vmexit(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002055
2056 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002057}
2058
Joerg Roedel0460a972009-08-07 11:49:31 +02002059static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2060{
2061 struct vmcb_control_area *dst = &dst_vmcb->control;
2062 struct vmcb_control_area *from = &from_vmcb->control;
2063
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002064 dst->intercept_cr = from->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002065 dst->intercept_dr = from->intercept_dr;
Joerg Roedel0460a972009-08-07 11:49:31 +02002066 dst->intercept_exceptions = from->intercept_exceptions;
2067 dst->intercept = from->intercept;
2068 dst->iopm_base_pa = from->iopm_base_pa;
2069 dst->msrpm_base_pa = from->msrpm_base_pa;
2070 dst->tsc_offset = from->tsc_offset;
2071 dst->asid = from->asid;
2072 dst->tlb_ctl = from->tlb_ctl;
2073 dst->int_ctl = from->int_ctl;
2074 dst->int_vector = from->int_vector;
2075 dst->int_state = from->int_state;
2076 dst->exit_code = from->exit_code;
2077 dst->exit_code_hi = from->exit_code_hi;
2078 dst->exit_info_1 = from->exit_info_1;
2079 dst->exit_info_2 = from->exit_info_2;
2080 dst->exit_int_info = from->exit_int_info;
2081 dst->exit_int_info_err = from->exit_int_info_err;
2082 dst->nested_ctl = from->nested_ctl;
2083 dst->event_inj = from->event_inj;
2084 dst->event_inj_err = from->event_inj_err;
2085 dst->nested_cr3 = from->nested_cr3;
2086 dst->lbr_ctl = from->lbr_ctl;
2087}
2088
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002089static int nested_svm_vmexit(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002090{
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002091 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002092 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel33740e42009-08-07 11:49:29 +02002093 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002094 struct page *page;
Alexander Grafcf74a782008-11-25 20:17:08 +01002095
Joerg Roedel17897f32009-10-09 16:08:29 +02002096 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2097 vmcb->control.exit_info_1,
2098 vmcb->control.exit_info_2,
2099 vmcb->control.exit_int_info,
2100 vmcb->control.exit_int_info_err);
2101
Joerg Roedel7597f122010-02-19 16:23:00 +01002102 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002103 if (!nested_vmcb)
2104 return 1;
2105
Joerg Roedel20307532010-11-29 17:51:48 +01002106 /* Exit Guest-Mode */
2107 leave_guest_mode(&svm->vcpu);
Joerg Roedel06fc77722010-02-19 16:23:07 +01002108 svm->nested.vmcb = 0;
2109
Alexander Grafcf74a782008-11-25 20:17:08 +01002110 /* Give the current vmcb to the guest */
Joerg Roedel33740e42009-08-07 11:49:29 +02002111 disable_gif(svm);
2112
2113 nested_vmcb->save.es = vmcb->save.es;
2114 nested_vmcb->save.cs = vmcb->save.cs;
2115 nested_vmcb->save.ss = vmcb->save.ss;
2116 nested_vmcb->save.ds = vmcb->save.ds;
2117 nested_vmcb->save.gdtr = vmcb->save.gdtr;
2118 nested_vmcb->save.idtr = vmcb->save.idtr;
Joerg Roedel3f6a9d12010-07-27 18:14:20 +02002119 nested_vmcb->save.efer = svm->vcpu.arch.efer;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002120 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
Joerg Roedel2be4fc72010-04-22 12:33:09 +02002121 nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
Joerg Roedel33740e42009-08-07 11:49:29 +02002122 nested_vmcb->save.cr2 = vmcb->save.cr2;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002123 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
Joerg Roedel33740e42009-08-07 11:49:29 +02002124 nested_vmcb->save.rflags = vmcb->save.rflags;
2125 nested_vmcb->save.rip = vmcb->save.rip;
2126 nested_vmcb->save.rsp = vmcb->save.rsp;
2127 nested_vmcb->save.rax = vmcb->save.rax;
2128 nested_vmcb->save.dr7 = vmcb->save.dr7;
2129 nested_vmcb->save.dr6 = vmcb->save.dr6;
2130 nested_vmcb->save.cpl = vmcb->save.cpl;
2131
2132 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
2133 nested_vmcb->control.int_vector = vmcb->control.int_vector;
2134 nested_vmcb->control.int_state = vmcb->control.int_state;
2135 nested_vmcb->control.exit_code = vmcb->control.exit_code;
2136 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
2137 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
2138 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
2139 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
2140 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
Joerg Roedel7a190662010-07-27 18:14:21 +02002141 nested_vmcb->control.next_rip = vmcb->control.next_rip;
Alexander Graf8d23c462009-10-09 16:08:25 +02002142
2143 /*
2144 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2145 * to make sure that we do not lose injected events. So check event_inj
2146 * here and copy it to exit_int_info if it is valid.
2147 * Exit_int_info and event_inj can't be both valid because the case
2148 * below only happens on a VMRUN instruction intercept which has
2149 * no valid exit_int_info set.
2150 */
2151 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2152 struct vmcb_control_area *nc = &nested_vmcb->control;
2153
2154 nc->exit_int_info = vmcb->control.event_inj;
2155 nc->exit_int_info_err = vmcb->control.event_inj_err;
2156 }
2157
Joerg Roedel33740e42009-08-07 11:49:29 +02002158 nested_vmcb->control.tlb_ctl = 0;
2159 nested_vmcb->control.event_inj = 0;
2160 nested_vmcb->control.event_inj_err = 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002161
2162 /* We always set V_INTR_MASKING and remember the old value in hflags */
2163 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2164 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2165
Alexander Grafcf74a782008-11-25 20:17:08 +01002166 /* Restore the original control entries */
Joerg Roedel0460a972009-08-07 11:49:31 +02002167 copy_vmcb_control_area(vmcb, hsave);
Alexander Grafcf74a782008-11-25 20:17:08 +01002168
Alexander Graf219b65d2009-06-15 15:21:25 +02002169 kvm_clear_exception_queue(&svm->vcpu);
2170 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002171
Joerg Roedel4b161842010-09-10 17:31:03 +02002172 svm->nested.nested_cr3 = 0;
2173
Alexander Grafcf74a782008-11-25 20:17:08 +01002174 /* Restore selected save entries */
2175 svm->vmcb->save.es = hsave->save.es;
2176 svm->vmcb->save.cs = hsave->save.cs;
2177 svm->vmcb->save.ss = hsave->save.ss;
2178 svm->vmcb->save.ds = hsave->save.ds;
2179 svm->vmcb->save.gdtr = hsave->save.gdtr;
2180 svm->vmcb->save.idtr = hsave->save.idtr;
2181 svm->vmcb->save.rflags = hsave->save.rflags;
2182 svm_set_efer(&svm->vcpu, hsave->save.efer);
2183 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2184 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2185 if (npt_enabled) {
2186 svm->vmcb->save.cr3 = hsave->save.cr3;
2187 svm->vcpu.arch.cr3 = hsave->save.cr3;
2188 } else {
Avi Kivity23902182010-06-10 17:02:16 +03002189 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
Alexander Grafcf74a782008-11-25 20:17:08 +01002190 }
2191 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2192 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2193 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2194 svm->vmcb->save.dr7 = 0;
2195 svm->vmcb->save.cpl = 0;
2196 svm->vmcb->control.exit_int_info = 0;
2197
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002198 mark_all_dirty(svm->vmcb);
2199
Joerg Roedel7597f122010-02-19 16:23:00 +01002200 nested_svm_unmap(page);
Alexander Grafcf74a782008-11-25 20:17:08 +01002201
Joerg Roedel4b161842010-09-10 17:31:03 +02002202 nested_svm_uninit_mmu_context(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002203 kvm_mmu_reset_context(&svm->vcpu);
2204 kvm_mmu_load(&svm->vcpu);
2205
2206 return 0;
2207}
Alexander Graf3d6368e2008-11-25 20:17:07 +01002208
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002209static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002210{
Joerg Roedel323c3d82010-03-01 15:34:37 +01002211 /*
2212 * This function merges the msr permission bitmaps of kvm and the
2213 * nested vmcb. It is omptimized in that it only merges the parts where
2214 * the kvm msr permission bitmap may contain zero bits
2215 */
Alexander Graf3d6368e2008-11-25 20:17:07 +01002216 int i;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002217
Joerg Roedel323c3d82010-03-01 15:34:37 +01002218 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2219 return true;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002220
Joerg Roedel323c3d82010-03-01 15:34:37 +01002221 for (i = 0; i < MSRPM_OFFSETS; i++) {
2222 u32 value, p;
2223 u64 offset;
2224
2225 if (msrpm_offsets[i] == 0xffffffff)
2226 break;
2227
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002228 p = msrpm_offsets[i];
2229 offset = svm->nested.vmcb_msrpm + (p * 4);
Joerg Roedel323c3d82010-03-01 15:34:37 +01002230
2231 if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
2232 return false;
2233
2234 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2235 }
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002236
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002237 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002238
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002239 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002240}
2241
Joerg Roedel52c65a302010-08-02 16:46:44 +02002242static bool nested_vmcb_checks(struct vmcb *vmcb)
2243{
2244 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2245 return false;
2246
Joerg Roedeldbe77582010-08-02 16:46:45 +02002247 if (vmcb->control.asid == 0)
2248 return false;
2249
Joerg Roedel4b161842010-09-10 17:31:03 +02002250 if (vmcb->control.nested_ctl && !npt_enabled)
2251 return false;
2252
Joerg Roedel52c65a302010-08-02 16:46:44 +02002253 return true;
2254}
2255
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002256static bool nested_svm_vmrun(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002257{
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002258 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002259 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedeldefbba52009-08-07 11:49:30 +02002260 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002261 struct page *page;
Joerg Roedel06fc77722010-02-19 16:23:07 +01002262 u64 vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002263
Joerg Roedel06fc77722010-02-19 16:23:07 +01002264 vmcb_gpa = svm->vmcb->save.rax;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002265
Joerg Roedel7597f122010-02-19 16:23:00 +01002266 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002267 if (!nested_vmcb)
2268 return false;
2269
Joerg Roedel52c65a302010-08-02 16:46:44 +02002270 if (!nested_vmcb_checks(nested_vmcb)) {
2271 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2272 nested_vmcb->control.exit_code_hi = 0;
2273 nested_vmcb->control.exit_info_1 = 0;
2274 nested_vmcb->control.exit_info_2 = 0;
2275
2276 nested_svm_unmap(page);
2277
2278 return false;
2279 }
2280
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002281 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
Joerg Roedel0ac406d2009-10-09 16:08:27 +02002282 nested_vmcb->save.rip,
2283 nested_vmcb->control.int_ctl,
2284 nested_vmcb->control.event_inj,
2285 nested_vmcb->control.nested_ctl);
2286
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002287 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2288 nested_vmcb->control.intercept_cr >> 16,
Joerg Roedel2e554e82010-02-24 18:59:14 +01002289 nested_vmcb->control.intercept_exceptions,
2290 nested_vmcb->control.intercept);
2291
Alexander Graf3d6368e2008-11-25 20:17:07 +01002292 /* Clear internal status */
Alexander Graf219b65d2009-06-15 15:21:25 +02002293 kvm_clear_exception_queue(&svm->vcpu);
2294 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002295
Joerg Roedele0231712010-02-24 18:59:10 +01002296 /*
2297 * Save the old vmcb, so we don't need to pick what we save, but can
2298 * restore everything when a VMEXIT occurs
2299 */
Joerg Roedeldefbba52009-08-07 11:49:30 +02002300 hsave->save.es = vmcb->save.es;
2301 hsave->save.cs = vmcb->save.cs;
2302 hsave->save.ss = vmcb->save.ss;
2303 hsave->save.ds = vmcb->save.ds;
2304 hsave->save.gdtr = vmcb->save.gdtr;
2305 hsave->save.idtr = vmcb->save.idtr;
Avi Kivityf6801df2010-01-21 15:31:50 +02002306 hsave->save.efer = svm->vcpu.arch.efer;
Avi Kivity4d4ec082009-12-29 18:07:30 +02002307 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002308 hsave->save.cr4 = svm->vcpu.arch.cr4;
2309 hsave->save.rflags = vmcb->save.rflags;
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002310 hsave->save.rip = kvm_rip_read(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002311 hsave->save.rsp = vmcb->save.rsp;
2312 hsave->save.rax = vmcb->save.rax;
2313 if (npt_enabled)
2314 hsave->save.cr3 = vmcb->save.cr3;
2315 else
2316 hsave->save.cr3 = svm->vcpu.arch.cr3;
2317
Joerg Roedel0460a972009-08-07 11:49:31 +02002318 copy_vmcb_control_area(hsave, vmcb);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002319
2320 if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
2321 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2322 else
2323 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2324
Joerg Roedel4b161842010-09-10 17:31:03 +02002325 if (nested_vmcb->control.nested_ctl) {
2326 kvm_mmu_unload(&svm->vcpu);
2327 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2328 nested_svm_init_mmu_context(&svm->vcpu);
2329 }
2330
Alexander Graf3d6368e2008-11-25 20:17:07 +01002331 /* Load the nested guest state */
2332 svm->vmcb->save.es = nested_vmcb->save.es;
2333 svm->vmcb->save.cs = nested_vmcb->save.cs;
2334 svm->vmcb->save.ss = nested_vmcb->save.ss;
2335 svm->vmcb->save.ds = nested_vmcb->save.ds;
2336 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2337 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
2338 svm->vmcb->save.rflags = nested_vmcb->save.rflags;
2339 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2340 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2341 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2342 if (npt_enabled) {
2343 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2344 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002345 } else
Avi Kivity23902182010-06-10 17:02:16 +03002346 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002347
2348 /* Guest paging mode is active - reset mmu */
2349 kvm_mmu_reset_context(&svm->vcpu);
2350
Joerg Roedeldefbba52009-08-07 11:49:30 +02002351 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002352 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2353 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2354 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedele0231712010-02-24 18:59:10 +01002355
Alexander Graf3d6368e2008-11-25 20:17:07 +01002356 /* In case we don't even reach vcpu_run, the fields are not updated */
2357 svm->vmcb->save.rax = nested_vmcb->save.rax;
2358 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2359 svm->vmcb->save.rip = nested_vmcb->save.rip;
2360 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
2361 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
2362 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
2363
Joerg Roedelf7138532010-03-01 15:34:40 +01002364 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002365 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002366
Joerg Roedelaad42c62009-08-07 11:49:34 +02002367 /* cache intercepts */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002368 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002369 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +02002370 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
2371 svm->nested.intercept = nested_vmcb->control.intercept;
2372
Alexander Graf3d6368e2008-11-25 20:17:07 +01002373 force_new_asid(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002374 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002375 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
2376 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
2377 else
2378 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
2379
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002380 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2381 /* We only want the cr8 intercept bits of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002382 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2383 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002384 }
2385
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002386 /* We don't want to see VMMCALLs from a nested guest */
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01002387 clr_intercept(svm, INTERCEPT_VMMCALL);
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002388
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002389 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002390 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
2391 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
2392 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002393 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
2394 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
2395
Joerg Roedel7597f122010-02-19 16:23:00 +01002396 nested_svm_unmap(page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002397
Joerg Roedel20307532010-11-29 17:51:48 +01002398 /* Enter Guest-Mode */
2399 enter_guest_mode(&svm->vcpu);
2400
Joerg Roedel384c6362010-11-30 18:03:56 +01002401 /*
2402 * Merge guest and host intercepts - must be called with vcpu in
2403 * guest-mode to take affect here
2404 */
2405 recalc_intercepts(svm);
2406
Joerg Roedel06fc77722010-02-19 16:23:07 +01002407 svm->nested.vmcb = vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002408
Joerg Roedel2af91942009-08-07 11:49:28 +02002409 enable_gif(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002410
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002411 mark_all_dirty(svm->vmcb);
2412
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002413 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002414}
2415
Joerg Roedel9966bf62009-08-07 11:49:40 +02002416static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Alexander Graf55426752008-11-25 20:17:06 +01002417{
2418 to_vmcb->save.fs = from_vmcb->save.fs;
2419 to_vmcb->save.gs = from_vmcb->save.gs;
2420 to_vmcb->save.tr = from_vmcb->save.tr;
2421 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2422 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2423 to_vmcb->save.star = from_vmcb->save.star;
2424 to_vmcb->save.lstar = from_vmcb->save.lstar;
2425 to_vmcb->save.cstar = from_vmcb->save.cstar;
2426 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2427 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2428 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2429 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
Alexander Graf55426752008-11-25 20:17:06 +01002430}
2431
Avi Kivity851ba692009-08-24 11:10:17 +03002432static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002433{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002434 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002435 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002436
Alexander Graf55426752008-11-25 20:17:06 +01002437 if (nested_svm_check_permissions(svm))
2438 return 1;
2439
2440 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2441 skip_emulated_instruction(&svm->vcpu);
2442
Joerg Roedel7597f122010-02-19 16:23:00 +01002443 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002444 if (!nested_vmcb)
2445 return 1;
2446
2447 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01002448 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01002449
2450 return 1;
2451}
2452
Avi Kivity851ba692009-08-24 11:10:17 +03002453static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002454{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002455 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002456 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002457
Alexander Graf55426752008-11-25 20:17:06 +01002458 if (nested_svm_check_permissions(svm))
2459 return 1;
2460
2461 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2462 skip_emulated_instruction(&svm->vcpu);
2463
Joerg Roedel7597f122010-02-19 16:23:00 +01002464 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002465 if (!nested_vmcb)
2466 return 1;
2467
2468 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01002469 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01002470
2471 return 1;
2472}
2473
Avi Kivity851ba692009-08-24 11:10:17 +03002474static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002475{
Alexander Graf3d6368e2008-11-25 20:17:07 +01002476 if (nested_svm_check_permissions(svm))
2477 return 1;
2478
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002479 /* Save rip after vmrun instruction */
2480 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002481
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002482 if (!nested_svm_vmrun(svm))
Alexander Graf3d6368e2008-11-25 20:17:07 +01002483 return 1;
2484
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002485 if (!nested_svm_vmrun_msrpm(svm))
Joerg Roedel1f8da472009-08-07 11:49:43 +02002486 goto failed;
2487
2488 return 1;
2489
2490failed:
2491
2492 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
2493 svm->vmcb->control.exit_code_hi = 0;
2494 svm->vmcb->control.exit_info_1 = 0;
2495 svm->vmcb->control.exit_info_2 = 0;
2496
2497 nested_svm_vmexit(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002498
2499 return 1;
2500}
2501
Avi Kivity851ba692009-08-24 11:10:17 +03002502static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002503{
2504 if (nested_svm_check_permissions(svm))
2505 return 1;
2506
2507 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2508 skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03002509 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01002510
Joerg Roedel2af91942009-08-07 11:49:28 +02002511 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002512
2513 return 1;
2514}
2515
Avi Kivity851ba692009-08-24 11:10:17 +03002516static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002517{
2518 if (nested_svm_check_permissions(svm))
2519 return 1;
2520
2521 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2522 skip_emulated_instruction(&svm->vcpu);
2523
Joerg Roedel2af91942009-08-07 11:49:28 +02002524 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002525
2526 /* After a CLGI no interrupts should come */
2527 svm_clear_vintr(svm);
2528 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2529
Joerg Roedeldecdbf62010-12-03 11:45:52 +01002530 mark_dirty(svm->vmcb, VMCB_INTR);
2531
Alexander Graf1371d902008-11-25 20:17:04 +01002532 return 1;
2533}
2534
Avi Kivity851ba692009-08-24 11:10:17 +03002535static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02002536{
2537 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02002538
Joerg Roedelec1ff792009-10-09 16:08:31 +02002539 trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
2540 vcpu->arch.regs[VCPU_REGS_RAX]);
2541
Alexander Grafff092382009-06-15 15:21:24 +02002542 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2543 kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
2544
2545 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2546 skip_emulated_instruction(&svm->vcpu);
2547 return 1;
2548}
2549
Joerg Roedel532a46b2009-10-09 16:08:32 +02002550static int skinit_interception(struct vcpu_svm *svm)
2551{
2552 trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
2553
2554 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2555 return 1;
2556}
2557
Avi Kivity851ba692009-08-24 11:10:17 +03002558static int invalid_op_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002559{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02002560 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002561 return 1;
2562}
2563
Avi Kivity851ba692009-08-24 11:10:17 +03002564static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002565{
Izik Eidus37817f22008-03-24 23:14:53 +02002566 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002567 int reason;
2568 int int_type = svm->vmcb->control.exit_int_info &
2569 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03002570 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002571 uint32_t type =
2572 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2573 uint32_t idt_v =
2574 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02002575 bool has_error_code = false;
2576 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02002577
2578 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002579
Izik Eidus37817f22008-03-24 23:14:53 +02002580 if (svm->vmcb->control.exit_info_2 &
2581 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002582 reason = TASK_SWITCH_IRET;
2583 else if (svm->vmcb->control.exit_info_2 &
2584 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2585 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002586 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002587 reason = TASK_SWITCH_GATE;
2588 else
2589 reason = TASK_SWITCH_CALL;
2590
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002591 if (reason == TASK_SWITCH_GATE) {
2592 switch (type) {
2593 case SVM_EXITINTINFO_TYPE_NMI:
2594 svm->vcpu.arch.nmi_injected = false;
2595 break;
2596 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02002597 if (svm->vmcb->control.exit_info_2 &
2598 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2599 has_error_code = true;
2600 error_code =
2601 (u32)svm->vmcb->control.exit_info_2;
2602 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002603 kvm_clear_exception_queue(&svm->vcpu);
2604 break;
2605 case SVM_EXITINTINFO_TYPE_INTR:
2606 kvm_clear_interrupt_queue(&svm->vcpu);
2607 break;
2608 default:
2609 break;
2610 }
2611 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002612
Gleb Natapov8317c292009-04-12 13:37:02 +03002613 if (reason != TASK_SWITCH_GATE ||
2614 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2615 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Gleb Natapovf629cf82009-05-11 13:35:49 +03002616 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2617 skip_emulated_instruction(&svm->vcpu);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002618
Gleb Natapovacb54512010-04-15 21:03:50 +03002619 if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
2620 has_error_code, error_code) == EMULATE_FAIL) {
2621 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2622 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2623 svm->vcpu.run->internal.ndata = 0;
2624 return 0;
2625 }
2626 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002627}
2628
Avi Kivity851ba692009-08-24 11:10:17 +03002629static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002630{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002631 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10002632 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02002633 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002634}
2635
Avi Kivity851ba692009-08-24 11:10:17 +03002636static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002637{
2638 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01002639 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03002640 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002641 return 1;
2642}
2643
Avi Kivity851ba692009-08-24 11:10:17 +03002644static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03002645{
Gleb Natapov6d77dbf2010-05-10 11:16:56 +03002646 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
Marcelo Tosattia7052892008-09-23 13:18:35 -03002647}
2648
Avi Kivity851ba692009-08-24 11:10:17 +03002649static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002650{
Gleb Natapov6d77dbf2010-05-10 11:16:56 +03002651 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002652}
2653
Joerg Roedelcda00082010-09-02 17:29:46 +02002654static int cr0_write_interception(struct vcpu_svm *svm)
2655{
2656 struct kvm_vcpu *vcpu = &svm->vcpu;
2657 int r;
2658
2659 r = emulate_instruction(&svm->vcpu, 0, 0, 0);
2660
2661 if (svm->nested.vmexit_rip) {
2662 kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
2663 kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
2664 kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
2665 svm->nested.vmexit_rip = 0;
2666 }
2667
2668 return r == EMULATE_DONE;
2669}
2670
Avi Kivity851ba692009-08-24 11:10:17 +03002671static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01002672{
Avi Kivity851ba692009-08-24 11:10:17 +03002673 struct kvm_run *kvm_run = svm->vcpu.run;
2674
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002675 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2676 /* instruction emulation calls kvm_set_cr8() */
Avi Kivity851ba692009-08-24 11:10:17 +03002677 emulate_instruction(&svm->vcpu, 0, 0, 0);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002678 if (irqchip_in_kernel(svm->vcpu.kvm)) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002679 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel1d075432007-12-06 21:02:25 +01002680 return 1;
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002681 }
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002682 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
2683 return 1;
Joerg Roedel1d075432007-12-06 21:02:25 +01002684 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2685 return 0;
2686}
2687
Avi Kivity6aa8b732006-12-10 02:21:36 -08002688static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2689{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002690 struct vcpu_svm *svm = to_svm(vcpu);
2691
Avi Kivity6aa8b732006-12-10 02:21:36 -08002692 switch (ecx) {
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05302693 case MSR_IA32_TSC: {
Joerg Roedel4cc70312010-11-30 18:04:01 +01002694 struct vmcb *vmcb = get_host_vmcb(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002695
Joerg Roedel4cc70312010-11-30 18:04:01 +01002696 *data = vmcb->control.tsc_offset + native_read_tsc();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002697 break;
2698 }
Brian Gerst8c065852010-07-17 09:03:26 -04002699 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002700 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002701 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08002702#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002703 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002704 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002705 break;
2706 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002707 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002708 break;
2709 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002710 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002711 break;
2712 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002713 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002714 break;
2715#endif
2716 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002717 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002718 break;
2719 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02002720 *data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002721 break;
2722 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02002723 *data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002724 break;
Joerg Roedele0231712010-02-24 18:59:10 +01002725 /*
2726 * Nobody will change the following 5 values in the VMCB so we can
2727 * safely return them on rdmsr. They will always be 0 until LBRV is
2728 * implemented.
2729 */
Joerg Roedela2938c82008-02-13 16:30:28 +01002730 case MSR_IA32_DEBUGCTLMSR:
2731 *data = svm->vmcb->save.dbgctl;
2732 break;
2733 case MSR_IA32_LASTBRANCHFROMIP:
2734 *data = svm->vmcb->save.br_from;
2735 break;
2736 case MSR_IA32_LASTBRANCHTOIP:
2737 *data = svm->vmcb->save.br_to;
2738 break;
2739 case MSR_IA32_LASTINTFROMIP:
2740 *data = svm->vmcb->save.last_excp_from;
2741 break;
2742 case MSR_IA32_LASTINTTOIP:
2743 *data = svm->vmcb->save.last_excp_to;
2744 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002745 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002746 *data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002747 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01002748 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01002749 *data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01002750 break;
Alexander Grafc8a73f12009-01-05 16:02:47 +01002751 case MSR_IA32_UCODE_REV:
2752 *data = 0x01000065;
2753 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002754 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08002755 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002756 }
2757 return 0;
2758}
2759
Avi Kivity851ba692009-08-24 11:10:17 +03002760static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002761{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002762 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08002763 u64 data;
2764
Avi Kivity59200272010-01-25 19:47:02 +02002765 if (svm_get_msr(&svm->vcpu, ecx, &data)) {
2766 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02002767 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02002768 } else {
Marcelo Tosatti229456f2009-06-17 09:22:14 -03002769 trace_kvm_msr_read(ecx, data);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002770
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002771 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002772 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002773 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10002774 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002775 }
2776 return 1;
2777}
2778
Joerg Roedel4a810182010-02-24 18:59:15 +01002779static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2780{
2781 struct vcpu_svm *svm = to_svm(vcpu);
2782 int svm_dis, chg_mask;
2783
2784 if (data & ~SVM_VM_CR_VALID_MASK)
2785 return 1;
2786
2787 chg_mask = SVM_VM_CR_VALID_MASK;
2788
2789 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2790 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2791
2792 svm->nested.vm_cr_msr &= ~chg_mask;
2793 svm->nested.vm_cr_msr |= (data & chg_mask);
2794
2795 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2796
2797 /* check for svm_disable while efer.svme is set */
2798 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2799 return 1;
2800
2801 return 0;
2802}
2803
Avi Kivity6aa8b732006-12-10 02:21:36 -08002804static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2805{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002806 struct vcpu_svm *svm = to_svm(vcpu);
2807
Avi Kivity6aa8b732006-12-10 02:21:36 -08002808 switch (ecx) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10002809 case MSR_IA32_TSC:
Zachary Amsden99e3e302010-08-19 22:07:17 -10002810 kvm_write_tsc(vcpu, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002811 break;
Brian Gerst8c065852010-07-17 09:03:26 -04002812 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002813 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002814 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08002815#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002816 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002817 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002818 break;
2819 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002820 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002821 break;
2822 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002823 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002824 break;
2825 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002826 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002827 break;
2828#endif
2829 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002830 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002831 break;
2832 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02002833 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002834 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002835 break;
2836 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02002837 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002838 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002839 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01002840 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02002841 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Joerg Roedel24e09cb2008-02-13 18:58:47 +01002842 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08002843 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01002844 break;
2845 }
2846 if (data & DEBUGCTL_RESERVED_BITS)
2847 return 1;
2848
2849 svm->vmcb->save.dbgctl = data;
2850 if (data & (1ULL<<0))
2851 svm_enable_lbrv(svm);
2852 else
2853 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01002854 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002855 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002856 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002857 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02002858 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01002859 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02002860 case MSR_VM_IGNNE:
Alexander Graf3c5d0a42009-06-15 15:21:23 +02002861 pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
2862 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002863 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08002864 return kvm_set_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002865 }
2866 return 0;
2867}
2868
Avi Kivity851ba692009-08-24 11:10:17 +03002869static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002870{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002871 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002872 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002873 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002874
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002875
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002876 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Avi Kivity59200272010-01-25 19:47:02 +02002877 if (svm_set_msr(&svm->vcpu, ecx, data)) {
2878 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02002879 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02002880 } else {
2881 trace_kvm_msr_write(ecx, data);
Rusty Russelle756fc62007-07-30 20:07:08 +10002882 skip_emulated_instruction(&svm->vcpu);
Avi Kivity59200272010-01-25 19:47:02 +02002883 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002884 return 1;
2885}
2886
Avi Kivity851ba692009-08-24 11:10:17 +03002887static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002888{
Rusty Russelle756fc62007-07-30 20:07:08 +10002889 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03002890 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002891 else
Avi Kivity851ba692009-08-24 11:10:17 +03002892 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002893}
2894
Avi Kivity851ba692009-08-24 11:10:17 +03002895static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08002896{
Avi Kivity851ba692009-08-24 11:10:17 +03002897 struct kvm_run *kvm_run = svm->vcpu.run;
2898
Avi Kivity3842d132010-07-27 12:30:24 +03002899 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01002900 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03002901 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01002902 mark_dirty(svm->vmcb, VMCB_INTR);
Dor Laorc1150d82007-01-05 16:36:24 -08002903 /*
2904 * If the user space waits to inject interrupts, exit as soon as
2905 * possible
2906 */
Gleb Natapov80618232009-04-21 17:44:56 +03002907 if (!irqchip_in_kernel(svm->vcpu.kvm) &&
2908 kvm_run->request_interrupt_window &&
2909 !kvm_cpu_has_interrupt(&svm->vcpu)) {
Rusty Russelle756fc62007-07-30 20:07:08 +10002910 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08002911 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2912 return 0;
2913 }
2914
2915 return 1;
2916}
2917
Mark Langsdorf565d0992009-10-06 14:25:02 -05002918static int pause_interception(struct vcpu_svm *svm)
2919{
2920 kvm_vcpu_on_spin(&(svm->vcpu));
2921 return 1;
2922}
2923
Avi Kivity851ba692009-08-24 11:10:17 +03002924static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
Joerg Roedele0231712010-02-24 18:59:10 +01002925 [SVM_EXIT_READ_CR0] = emulate_on_interception,
2926 [SVM_EXIT_READ_CR3] = emulate_on_interception,
2927 [SVM_EXIT_READ_CR4] = emulate_on_interception,
2928 [SVM_EXIT_READ_CR8] = emulate_on_interception,
Avi Kivityd2251572010-01-06 10:55:27 +02002929 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
Joerg Roedelcda00082010-09-02 17:29:46 +02002930 [SVM_EXIT_WRITE_CR0] = cr0_write_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01002931 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2932 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
2933 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
2934 [SVM_EXIT_READ_DR0] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002935 [SVM_EXIT_READ_DR1] = emulate_on_interception,
2936 [SVM_EXIT_READ_DR2] = emulate_on_interception,
2937 [SVM_EXIT_READ_DR3] = emulate_on_interception,
Jan Kiszka727f5a22010-01-20 18:20:20 +01002938 [SVM_EXIT_READ_DR4] = emulate_on_interception,
2939 [SVM_EXIT_READ_DR5] = emulate_on_interception,
2940 [SVM_EXIT_READ_DR6] = emulate_on_interception,
2941 [SVM_EXIT_READ_DR7] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002942 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
2943 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
2944 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
2945 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
Jan Kiszka727f5a22010-01-20 18:20:20 +01002946 [SVM_EXIT_WRITE_DR4] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002947 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
Jan Kiszka727f5a22010-01-20 18:20:20 +01002948 [SVM_EXIT_WRITE_DR6] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002949 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002950 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
2951 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002952 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01002953 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
2954 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
2955 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
2956 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02002957 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002958 [SVM_EXIT_SMI] = nop_on_interception,
2959 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08002960 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002961 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002962 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02002963 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05002964 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002965 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03002966 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02002967 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01002968 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002969 [SVM_EXIT_MSR] = msr_interception,
2970 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002971 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01002972 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02002973 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01002974 [SVM_EXIT_VMLOAD] = vmload_interception,
2975 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01002976 [SVM_EXIT_STGI] = stgi_interception,
2977 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02002978 [SVM_EXIT_SKINIT] = skinit_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02002979 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01002980 [SVM_EXIT_MONITOR] = invalid_op_interception,
2981 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01002982 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002983};
2984
Joerg Roedel3f10c842010-05-05 16:04:42 +02002985void dump_vmcb(struct kvm_vcpu *vcpu)
2986{
2987 struct vcpu_svm *svm = to_svm(vcpu);
2988 struct vmcb_control_area *control = &svm->vmcb->control;
2989 struct vmcb_save_area *save = &svm->vmcb->save;
2990
2991 pr_err("VMCB Control Area:\n");
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002992 pr_err("cr_read: %04x\n", control->intercept_cr & 0xffff);
2993 pr_err("cr_write: %04x\n", control->intercept_cr >> 16);
Joerg Roedel3aed0412010-11-30 18:03:58 +01002994 pr_err("dr_read: %04x\n", control->intercept_dr & 0xffff);
2995 pr_err("dr_write: %04x\n", control->intercept_dr >> 16);
Joerg Roedel3f10c842010-05-05 16:04:42 +02002996 pr_err("exceptions: %08x\n", control->intercept_exceptions);
2997 pr_err("intercepts: %016llx\n", control->intercept);
2998 pr_err("pause filter count: %d\n", control->pause_filter_count);
2999 pr_err("iopm_base_pa: %016llx\n", control->iopm_base_pa);
3000 pr_err("msrpm_base_pa: %016llx\n", control->msrpm_base_pa);
3001 pr_err("tsc_offset: %016llx\n", control->tsc_offset);
3002 pr_err("asid: %d\n", control->asid);
3003 pr_err("tlb_ctl: %d\n", control->tlb_ctl);
3004 pr_err("int_ctl: %08x\n", control->int_ctl);
3005 pr_err("int_vector: %08x\n", control->int_vector);
3006 pr_err("int_state: %08x\n", control->int_state);
3007 pr_err("exit_code: %08x\n", control->exit_code);
3008 pr_err("exit_info1: %016llx\n", control->exit_info_1);
3009 pr_err("exit_info2: %016llx\n", control->exit_info_2);
3010 pr_err("exit_int_info: %08x\n", control->exit_int_info);
3011 pr_err("exit_int_info_err: %08x\n", control->exit_int_info_err);
3012 pr_err("nested_ctl: %lld\n", control->nested_ctl);
3013 pr_err("nested_cr3: %016llx\n", control->nested_cr3);
3014 pr_err("event_inj: %08x\n", control->event_inj);
3015 pr_err("event_inj_err: %08x\n", control->event_inj_err);
3016 pr_err("lbr_ctl: %lld\n", control->lbr_ctl);
3017 pr_err("next_rip: %016llx\n", control->next_rip);
3018 pr_err("VMCB State Save Area:\n");
3019 pr_err("es: s: %04x a: %04x l: %08x b: %016llx\n",
3020 save->es.selector, save->es.attrib,
3021 save->es.limit, save->es.base);
3022 pr_err("cs: s: %04x a: %04x l: %08x b: %016llx\n",
3023 save->cs.selector, save->cs.attrib,
3024 save->cs.limit, save->cs.base);
3025 pr_err("ss: s: %04x a: %04x l: %08x b: %016llx\n",
3026 save->ss.selector, save->ss.attrib,
3027 save->ss.limit, save->ss.base);
3028 pr_err("ds: s: %04x a: %04x l: %08x b: %016llx\n",
3029 save->ds.selector, save->ds.attrib,
3030 save->ds.limit, save->ds.base);
3031 pr_err("fs: s: %04x a: %04x l: %08x b: %016llx\n",
3032 save->fs.selector, save->fs.attrib,
3033 save->fs.limit, save->fs.base);
3034 pr_err("gs: s: %04x a: %04x l: %08x b: %016llx\n",
3035 save->gs.selector, save->gs.attrib,
3036 save->gs.limit, save->gs.base);
3037 pr_err("gdtr: s: %04x a: %04x l: %08x b: %016llx\n",
3038 save->gdtr.selector, save->gdtr.attrib,
3039 save->gdtr.limit, save->gdtr.base);
3040 pr_err("ldtr: s: %04x a: %04x l: %08x b: %016llx\n",
3041 save->ldtr.selector, save->ldtr.attrib,
3042 save->ldtr.limit, save->ldtr.base);
3043 pr_err("idtr: s: %04x a: %04x l: %08x b: %016llx\n",
3044 save->idtr.selector, save->idtr.attrib,
3045 save->idtr.limit, save->idtr.base);
3046 pr_err("tr: s: %04x a: %04x l: %08x b: %016llx\n",
3047 save->tr.selector, save->tr.attrib,
3048 save->tr.limit, save->tr.base);
3049 pr_err("cpl: %d efer: %016llx\n",
3050 save->cpl, save->efer);
3051 pr_err("cr0: %016llx cr2: %016llx\n",
3052 save->cr0, save->cr2);
3053 pr_err("cr3: %016llx cr4: %016llx\n",
3054 save->cr3, save->cr4);
3055 pr_err("dr6: %016llx dr7: %016llx\n",
3056 save->dr6, save->dr7);
3057 pr_err("rip: %016llx rflags: %016llx\n",
3058 save->rip, save->rflags);
3059 pr_err("rsp: %016llx rax: %016llx\n",
3060 save->rsp, save->rax);
3061 pr_err("star: %016llx lstar: %016llx\n",
3062 save->star, save->lstar);
3063 pr_err("cstar: %016llx sfmask: %016llx\n",
3064 save->cstar, save->sfmask);
3065 pr_err("kernel_gs_base: %016llx sysenter_cs: %016llx\n",
3066 save->kernel_gs_base, save->sysenter_cs);
3067 pr_err("sysenter_esp: %016llx sysenter_eip: %016llx\n",
3068 save->sysenter_esp, save->sysenter_eip);
3069 pr_err("gpat: %016llx dbgctl: %016llx\n",
3070 save->g_pat, save->dbgctl);
3071 pr_err("br_from: %016llx br_to: %016llx\n",
3072 save->br_from, save->br_to);
3073 pr_err("excp_from: %016llx excp_to: %016llx\n",
3074 save->last_excp_from, save->last_excp_to);
3075
3076}
3077
Avi Kivity586f9602010-11-18 13:09:54 +02003078static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
3079{
3080 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3081
3082 *info1 = control->exit_info_1;
3083 *info2 = control->exit_info_2;
3084}
3085
Avi Kivity851ba692009-08-24 11:10:17 +03003086static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003087{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003088 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03003089 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003090 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003091
Avi Kivityaa179112010-11-17 18:44:19 +02003092 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003093
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003094 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02003095 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3096 if (npt_enabled)
3097 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003098
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003099 if (unlikely(svm->nested.exit_required)) {
3100 nested_svm_vmexit(svm);
3101 svm->nested.exit_required = false;
3102
3103 return 1;
3104 }
3105
Joerg Roedel20307532010-11-29 17:51:48 +01003106 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02003107 int vmexit;
3108
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02003109 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
3110 svm->vmcb->control.exit_info_1,
3111 svm->vmcb->control.exit_info_2,
3112 svm->vmcb->control.exit_int_info,
3113 svm->vmcb->control.exit_int_info_err);
3114
Joerg Roedel410e4d52009-08-07 11:49:44 +02003115 vmexit = nested_svm_exit_special(svm);
3116
3117 if (vmexit == NESTED_EXIT_CONTINUE)
3118 vmexit = nested_svm_exit_handled(svm);
3119
3120 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01003121 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01003122 }
3123
Joerg Roedela5c38322009-08-07 11:49:32 +02003124 svm_complete_interrupts(svm);
3125
Avi Kivity04d2cc72007-09-10 18:10:54 +03003126 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3127 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3128 kvm_run->fail_entry.hardware_entry_failure_reason
3129 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02003130 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
3131 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03003132 return 0;
3133 }
3134
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003135 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01003136 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02003137 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3138 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003139 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
3140 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003141 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003142 exit_code);
3143
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02003144 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08003145 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08003146 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03003147 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003148 return 0;
3149 }
3150
Avi Kivity851ba692009-08-24 11:10:17 +03003151 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003152}
3153
3154static void reload_tss(struct kvm_vcpu *vcpu)
3155{
3156 int cpu = raw_smp_processor_id();
3157
Tejun Heo0fe1e002009-10-29 22:34:14 +09003158 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3159 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08003160 load_TR_desc();
3161}
3162
Rusty Russelle756fc62007-07-30 20:07:08 +10003163static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003164{
3165 int cpu = raw_smp_processor_id();
3166
Tejun Heo0fe1e002009-10-29 22:34:14 +09003167 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003168
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003169 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03003170 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09003171 if (svm->asid_generation != sd->asid_generation)
3172 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003173}
3174
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003175static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3176{
3177 struct vcpu_svm *svm = to_svm(vcpu);
3178
3179 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3180 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003181 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003182 ++vcpu->stat.nmi_injections;
3183}
Avi Kivity6aa8b732006-12-10 02:21:36 -08003184
Eddie Dong85f455f2007-07-06 12:20:49 +03003185static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003186{
3187 struct vmcb_control_area *control;
3188
Rusty Russelle756fc62007-07-30 20:07:08 +10003189 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03003190 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003191 control->int_ctl &= ~V_INTR_PRIO_MASK;
3192 control->int_ctl |= V_IRQ_MASK |
3193 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003194 mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003195}
3196
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003197static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03003198{
3199 struct vcpu_svm *svm = to_svm(vcpu);
3200
Joerg Roedel2af91942009-08-07 11:49:28 +02003201 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01003202
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03003203 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3204 ++vcpu->stat.irq_injections;
3205
Alexander Graf219b65d2009-06-15 15:21:25 +02003206 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3207 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03003208}
3209
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003210static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3211{
3212 struct vcpu_svm *svm = to_svm(vcpu);
3213
Joerg Roedel20307532010-11-29 17:51:48 +01003214 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003215 return;
3216
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003217 if (irr == -1)
3218 return;
3219
3220 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003221 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003222}
3223
3224static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003225{
3226 struct vcpu_svm *svm = to_svm(vcpu);
3227 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02003228 int ret;
3229 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3230 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3231 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3232
3233 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003234}
3235
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003236static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3237{
3238 struct vcpu_svm *svm = to_svm(vcpu);
3239
3240 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3241}
3242
3243static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3244{
3245 struct vcpu_svm *svm = to_svm(vcpu);
3246
3247 if (masked) {
3248 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003249 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003250 } else {
3251 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003252 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003253 }
3254}
3255
Gleb Natapov78646122009-03-23 12:12:11 +02003256static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3257{
3258 struct vcpu_svm *svm = to_svm(vcpu);
3259 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003260 int ret;
3261
3262 if (!gif_set(svm) ||
3263 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3264 return 0;
3265
3266 ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
3267
Joerg Roedel20307532010-11-29 17:51:48 +01003268 if (is_guest_mode(vcpu))
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003269 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
3270
3271 return ret;
Gleb Natapov78646122009-03-23 12:12:11 +02003272}
3273
Gleb Natapov9222be12009-04-23 17:14:37 +03003274static void enable_irq_window(struct kvm_vcpu *vcpu)
3275{
Alexander Graf219b65d2009-06-15 15:21:25 +02003276 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02003277
Joerg Roedele0231712010-02-24 18:59:10 +01003278 /*
3279 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3280 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3281 * get that intercept, this function will be called again though and
3282 * we'll get the vintr intercept.
3283 */
Joerg Roedel8fe54652010-02-19 16:23:01 +01003284 if (gif_set(svm) && nested_svm_intr(svm)) {
Alexander Graf219b65d2009-06-15 15:21:25 +02003285 svm_set_vintr(svm);
3286 svm_inject_irq(svm, 0x0);
3287 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003288}
3289
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003290static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003291{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003292 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003293
Gleb Natapov44c11432009-05-11 13:35:52 +03003294 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3295 == HF_NMI_MASK)
3296 return; /* IRET will cause a vm exit */
3297
Joerg Roedele0231712010-02-24 18:59:10 +01003298 /*
3299 * Something prevents NMI from been injected. Single step over possible
3300 * problem (IRET or exception injection or interrupt shadow)
3301 */
Jan Kiszka6be7d302009-10-18 13:24:54 +02003302 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03003303 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3304 update_db_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003305}
3306
Izik Eiduscbc94022007-10-25 00:29:55 +02003307static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3308{
3309 return 0;
3310}
3311
Avi Kivityd9e368d2007-06-07 19:18:30 +03003312static void svm_flush_tlb(struct kvm_vcpu *vcpu)
3313{
3314 force_new_asid(vcpu);
3315}
3316
Avi Kivity04d2cc72007-09-10 18:10:54 +03003317static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3318{
3319}
3320
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003321static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3322{
3323 struct vcpu_svm *svm = to_svm(vcpu);
3324
Joerg Roedel20307532010-11-29 17:51:48 +01003325 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003326 return;
3327
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003328 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003329 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03003330 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003331 }
3332}
3333
Joerg Roedel649d6862008-04-16 16:51:15 +02003334static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3335{
3336 struct vcpu_svm *svm = to_svm(vcpu);
3337 u64 cr8;
3338
Joerg Roedel20307532010-11-29 17:51:48 +01003339 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003340 return;
3341
Joerg Roedel649d6862008-04-16 16:51:15 +02003342 cr8 = kvm_get_cr8(vcpu);
3343 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3344 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3345}
3346
Gleb Natapov9222be12009-04-23 17:14:37 +03003347static void svm_complete_interrupts(struct vcpu_svm *svm)
3348{
3349 u8 vector;
3350 int type;
3351 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01003352 unsigned int3_injected = svm->int3_injected;
3353
3354 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003355
Avi Kivity3842d132010-07-27 12:30:24 +03003356 if (svm->vcpu.arch.hflags & HF_IRET_MASK) {
Gleb Natapov44c11432009-05-11 13:35:52 +03003357 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03003358 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3359 }
Gleb Natapov44c11432009-05-11 13:35:52 +03003360
Gleb Natapov9222be12009-04-23 17:14:37 +03003361 svm->vcpu.arch.nmi_injected = false;
3362 kvm_clear_exception_queue(&svm->vcpu);
3363 kvm_clear_interrupt_queue(&svm->vcpu);
3364
3365 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3366 return;
3367
Avi Kivity3842d132010-07-27 12:30:24 +03003368 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3369
Gleb Natapov9222be12009-04-23 17:14:37 +03003370 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3371 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3372
3373 switch (type) {
3374 case SVM_EXITINTINFO_TYPE_NMI:
3375 svm->vcpu.arch.nmi_injected = true;
3376 break;
3377 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01003378 /*
3379 * In case of software exceptions, do not reinject the vector,
3380 * but re-execute the instruction instead. Rewind RIP first
3381 * if we emulated INT3 before.
3382 */
3383 if (kvm_exception_is_soft(vector)) {
3384 if (vector == BP_VECTOR && int3_injected &&
3385 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3386 kvm_rip_write(&svm->vcpu,
3387 kvm_rip_read(&svm->vcpu) -
3388 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02003389 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01003390 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003391 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3392 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003393 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03003394
3395 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003396 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03003397 break;
3398 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003399 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03003400 break;
3401 default:
3402 break;
3403 }
3404}
3405
Avi Kivityb463a6f2010-07-20 15:06:17 +03003406static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3407{
3408 struct vcpu_svm *svm = to_svm(vcpu);
3409 struct vmcb_control_area *control = &svm->vmcb->control;
3410
3411 control->exit_int_info = control->event_inj;
3412 control->exit_int_info_err = control->event_inj_err;
3413 control->event_inj = 0;
3414 svm_complete_interrupts(svm);
3415}
3416
Avi Kivity80e31d42008-07-14 14:44:59 +03003417#ifdef CONFIG_X86_64
3418#define R "r"
3419#else
3420#define R "e"
3421#endif
3422
Avi Kivity851ba692009-08-24 11:10:17 +03003423static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003424{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003425 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03003426
Joerg Roedel2041a062010-04-22 12:33:08 +02003427 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3428 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3429 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3430
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003431 /*
3432 * A vmexit emulation is required before the vcpu can be executed
3433 * again.
3434 */
3435 if (unlikely(svm->nested.exit_required))
3436 return;
3437
Rusty Russelle756fc62007-07-30 20:07:08 +10003438 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003439
Joerg Roedel649d6862008-04-16 16:51:15 +02003440 sync_lapic_to_cr8(vcpu);
3441
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02003442 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003443
Avi Kivity04d2cc72007-09-10 18:10:54 +03003444 clgi();
3445
3446 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08003447
Avi Kivity6aa8b732006-12-10 02:21:36 -08003448 asm volatile (
Avi Kivity80e31d42008-07-14 14:44:59 +03003449 "push %%"R"bp; \n\t"
3450 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
3451 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
3452 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
3453 "mov %c[rsi](%[svm]), %%"R"si \n\t"
3454 "mov %c[rdi](%[svm]), %%"R"di \n\t"
3455 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003456#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003457 "mov %c[r8](%[svm]), %%r8 \n\t"
3458 "mov %c[r9](%[svm]), %%r9 \n\t"
3459 "mov %c[r10](%[svm]), %%r10 \n\t"
3460 "mov %c[r11](%[svm]), %%r11 \n\t"
3461 "mov %c[r12](%[svm]), %%r12 \n\t"
3462 "mov %c[r13](%[svm]), %%r13 \n\t"
3463 "mov %c[r14](%[svm]), %%r14 \n\t"
3464 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003465#endif
3466
Avi Kivity6aa8b732006-12-10 02:21:36 -08003467 /* Enter guest mode */
Avi Kivity80e31d42008-07-14 14:44:59 +03003468 "push %%"R"ax \n\t"
3469 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03003470 __ex(SVM_VMLOAD) "\n\t"
3471 __ex(SVM_VMRUN) "\n\t"
3472 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity80e31d42008-07-14 14:44:59 +03003473 "pop %%"R"ax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003474
3475 /* Save guest registers, load host registers */
Avi Kivity80e31d42008-07-14 14:44:59 +03003476 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
3477 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
3478 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
3479 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
3480 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
3481 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003482#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003483 "mov %%r8, %c[r8](%[svm]) \n\t"
3484 "mov %%r9, %c[r9](%[svm]) \n\t"
3485 "mov %%r10, %c[r10](%[svm]) \n\t"
3486 "mov %%r11, %c[r11](%[svm]) \n\t"
3487 "mov %%r12, %c[r12](%[svm]) \n\t"
3488 "mov %%r13, %c[r13](%[svm]) \n\t"
3489 "mov %%r14, %c[r14](%[svm]) \n\t"
3490 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003491#endif
Avi Kivity80e31d42008-07-14 14:44:59 +03003492 "pop %%"R"bp"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003493 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003494 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08003495 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003496 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
3497 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
3498 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
3499 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
3500 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
3501 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003502#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003503 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
3504 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
3505 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
3506 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
3507 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
3508 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
3509 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
3510 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08003511#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02003512 : "cc", "memory"
Avi Kivity80e31d42008-07-14 14:44:59 +03003513 , R"bx", R"cx", R"dx", R"si", R"di"
Laurent Vivier54a08c02007-10-25 14:18:53 +02003514#ifdef CONFIG_X86_64
Laurent Vivier54a08c02007-10-25 14:18:53 +02003515 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
3516#endif
3517 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08003518
Avi Kivity82ca2d12010-10-21 12:20:34 +02003519#ifdef CONFIG_X86_64
3520 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3521#else
Avi Kivitydacccfd2010-10-21 12:20:33 +02003522 loadsegment(fs, svm->host.fs);
Avi Kivity9581d442010-10-19 16:46:55 +02003523#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08003524
3525 reload_tss(vcpu);
3526
Avi Kivity56ba47d2007-11-07 17:14:18 +02003527 local_irq_disable();
3528
3529 stgi();
3530
Avi Kivity13c34e02010-10-21 12:20:31 +02003531 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3532 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3533 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3534 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3535
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003536 sync_cr8_to_lapic(vcpu);
3537
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003538 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003539
Gleb Natapov631bc482010-10-14 11:22:52 +02003540 /* if exit due to PF check for async PF */
3541 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3542 svm->apf_reason = kvm_read_and_reset_pf_reason();
3543
Avi Kivity6de4f3a2009-05-31 22:58:47 +03003544 if (npt_enabled) {
3545 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3546 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3547 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02003548
3549 /*
3550 * We need to handle MC intercepts here before the vcpu has a chance to
3551 * change the physical cpu
3552 */
3553 if (unlikely(svm->vmcb->control.exit_code ==
3554 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3555 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003556
3557 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003558}
3559
Avi Kivity80e31d42008-07-14 14:44:59 +03003560#undef R
3561
Avi Kivity6aa8b732006-12-10 02:21:36 -08003562static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3563{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003564 struct vcpu_svm *svm = to_svm(vcpu);
3565
3566 svm->vmcb->save.cr3 = root;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01003567 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003568 force_new_asid(vcpu);
3569}
3570
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003571static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3572{
3573 struct vcpu_svm *svm = to_svm(vcpu);
3574
3575 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01003576 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003577
3578 /* Also sync guest cr3 here in case we live migrate */
3579 svm->vmcb->save.cr3 = vcpu->arch.cr3;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01003580 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003581
3582 force_new_asid(vcpu);
3583}
3584
Avi Kivity6aa8b732006-12-10 02:21:36 -08003585static int is_disabled(void)
3586{
Joerg Roedel6031a612007-06-22 12:29:50 +03003587 u64 vm_cr;
3588
3589 rdmsrl(MSR_VM_CR, vm_cr);
3590 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3591 return 1;
3592
Avi Kivity6aa8b732006-12-10 02:21:36 -08003593 return 0;
3594}
3595
Ingo Molnar102d8322007-02-19 14:37:47 +02003596static void
3597svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3598{
3599 /*
3600 * Patch in the VMMCALL instruction:
3601 */
3602 hypercall[0] = 0x0f;
3603 hypercall[1] = 0x01;
3604 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02003605}
3606
Yang, Sheng002c7f72007-07-31 14:23:01 +03003607static void svm_check_processor_compat(void *rtn)
3608{
3609 *(int *)rtn = 0;
3610}
3611
Avi Kivity774ead32007-12-26 13:57:04 +02003612static bool svm_cpu_has_accelerated_tpr(void)
3613{
3614 return false;
3615}
3616
Sheng Yang4b12f0d2009-04-27 20:35:42 +08003617static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
Sheng Yang64d4d522008-10-09 16:01:57 +08003618{
3619 return 0;
3620}
3621
Sheng Yang0e851882009-12-18 16:48:46 +08003622static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3623{
3624}
3625
Joerg Roedeld4330ef2010-04-22 12:33:11 +02003626static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
3627{
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003628 switch (func) {
Joerg Roedel24d1b152010-12-07 17:15:05 +01003629 case 0x00000001:
3630 /* Mask out xsave bit as long as it is not supported by SVM */
3631 entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
3632 break;
Joerg Roedel4c62a2d2010-09-10 17:31:06 +02003633 case 0x80000001:
3634 if (nested)
3635 entry->ecx |= (1 << 2); /* Set SVM bit */
3636 break;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003637 case 0x8000000A:
3638 entry->eax = 1; /* SVM revision 1 */
3639 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
3640 ASID emulation to nested SVM */
3641 entry->ecx = 0; /* Reserved */
Joerg Roedel7a190662010-07-27 18:14:21 +02003642 entry->edx = 0; /* Per default do not support any
3643 additional features */
3644
3645 /* Support next_rip if host supports it */
Avi Kivity2a6b20b2010-11-09 16:15:42 +02003646 if (boot_cpu_has(X86_FEATURE_NRIPS))
Joerg Roedel7a190662010-07-27 18:14:21 +02003647 entry->edx |= SVM_FEATURE_NRIP;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003648
Joerg Roedel3d4aeaa2010-09-10 17:31:05 +02003649 /* Support NPT for the guest if enabled */
3650 if (npt_enabled)
3651 entry->edx |= SVM_FEATURE_NPT;
3652
Joerg Roedelc2c63a42010-04-22 12:33:12 +02003653 break;
3654 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02003655}
3656
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003657static const struct trace_print_flags svm_exit_reasons_str[] = {
Joerg Roedele0231712010-02-24 18:59:10 +01003658 { SVM_EXIT_READ_CR0, "read_cr0" },
3659 { SVM_EXIT_READ_CR3, "read_cr3" },
3660 { SVM_EXIT_READ_CR4, "read_cr4" },
3661 { SVM_EXIT_READ_CR8, "read_cr8" },
3662 { SVM_EXIT_WRITE_CR0, "write_cr0" },
3663 { SVM_EXIT_WRITE_CR3, "write_cr3" },
3664 { SVM_EXIT_WRITE_CR4, "write_cr4" },
3665 { SVM_EXIT_WRITE_CR8, "write_cr8" },
3666 { SVM_EXIT_READ_DR0, "read_dr0" },
3667 { SVM_EXIT_READ_DR1, "read_dr1" },
3668 { SVM_EXIT_READ_DR2, "read_dr2" },
3669 { SVM_EXIT_READ_DR3, "read_dr3" },
3670 { SVM_EXIT_WRITE_DR0, "write_dr0" },
3671 { SVM_EXIT_WRITE_DR1, "write_dr1" },
3672 { SVM_EXIT_WRITE_DR2, "write_dr2" },
3673 { SVM_EXIT_WRITE_DR3, "write_dr3" },
3674 { SVM_EXIT_WRITE_DR5, "write_dr5" },
3675 { SVM_EXIT_WRITE_DR7, "write_dr7" },
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003676 { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
3677 { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
3678 { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
3679 { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" },
3680 { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" },
3681 { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" },
3682 { SVM_EXIT_INTR, "interrupt" },
3683 { SVM_EXIT_NMI, "nmi" },
3684 { SVM_EXIT_SMI, "smi" },
3685 { SVM_EXIT_INIT, "init" },
3686 { SVM_EXIT_VINTR, "vintr" },
3687 { SVM_EXIT_CPUID, "cpuid" },
3688 { SVM_EXIT_INVD, "invd" },
3689 { SVM_EXIT_HLT, "hlt" },
3690 { SVM_EXIT_INVLPG, "invlpg" },
3691 { SVM_EXIT_INVLPGA, "invlpga" },
3692 { SVM_EXIT_IOIO, "io" },
3693 { SVM_EXIT_MSR, "msr" },
3694 { SVM_EXIT_TASK_SWITCH, "task_switch" },
3695 { SVM_EXIT_SHUTDOWN, "shutdown" },
3696 { SVM_EXIT_VMRUN, "vmrun" },
3697 { SVM_EXIT_VMMCALL, "hypercall" },
3698 { SVM_EXIT_VMLOAD, "vmload" },
3699 { SVM_EXIT_VMSAVE, "vmsave" },
3700 { SVM_EXIT_STGI, "stgi" },
3701 { SVM_EXIT_CLGI, "clgi" },
3702 { SVM_EXIT_SKINIT, "skinit" },
3703 { SVM_EXIT_WBINVD, "wbinvd" },
3704 { SVM_EXIT_MONITOR, "monitor" },
3705 { SVM_EXIT_MWAIT, "mwait" },
3706 { SVM_EXIT_NPF, "npf" },
3707 { -1, NULL }
3708};
3709
Sheng Yang17cc3932010-01-05 19:02:27 +08003710static int svm_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02003711{
Sheng Yang17cc3932010-01-05 19:02:27 +08003712 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02003713}
3714
Sheng Yang4e47c7a2009-12-18 16:48:47 +08003715static bool svm_rdtscp_supported(void)
3716{
3717 return false;
3718}
3719
Sheng Yangf5f48ee2010-06-30 12:25:15 +08003720static bool svm_has_wbinvd_exit(void)
3721{
3722 return true;
3723}
3724
Avi Kivity02daab22009-12-30 12:40:26 +02003725static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
3726{
3727 struct vcpu_svm *svm = to_svm(vcpu);
3728
Joerg Roedel18c918c2010-11-30 18:03:59 +01003729 set_exception_intercept(svm, NM_VECTOR);
Joerg Roedel66a562f2010-02-19 16:23:08 +01003730 update_cr0_intercept(svm);
Avi Kivity02daab22009-12-30 12:40:26 +02003731}
3732
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003733static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08003734 .cpu_has_kvm_support = has_svm,
3735 .disabled_by_bios = is_disabled,
3736 .hardware_setup = svm_hardware_setup,
3737 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03003738 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003739 .hardware_enable = svm_hardware_enable,
3740 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02003741 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003742
3743 .vcpu_create = svm_create_vcpu,
3744 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03003745 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003746
Avi Kivity04d2cc72007-09-10 18:10:54 +03003747 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003748 .vcpu_load = svm_vcpu_load,
3749 .vcpu_put = svm_vcpu_put,
3750
3751 .set_guest_debug = svm_guest_debug,
3752 .get_msr = svm_get_msr,
3753 .set_msr = svm_set_msr,
3754 .get_segment_base = svm_get_segment_base,
3755 .get_segment = svm_get_segment,
3756 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02003757 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10003758 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02003759 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03003760 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003761 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003762 .set_cr3 = svm_set_cr3,
3763 .set_cr4 = svm_set_cr4,
3764 .set_efer = svm_set_efer,
3765 .get_idt = svm_get_idt,
3766 .set_idt = svm_set_idt,
3767 .get_gdt = svm_get_gdt,
3768 .set_gdt = svm_set_gdt,
Gleb Natapov020df072010-04-13 10:05:23 +03003769 .set_dr7 = svm_set_dr7,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03003770 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003771 .get_rflags = svm_get_rflags,
3772 .set_rflags = svm_set_rflags,
Avi Kivity6b52d182010-01-21 15:31:47 +02003773 .fpu_activate = svm_fpu_activate,
Avi Kivity02daab22009-12-30 12:40:26 +02003774 .fpu_deactivate = svm_fpu_deactivate,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003775
Avi Kivity6aa8b732006-12-10 02:21:36 -08003776 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003777
Avi Kivity6aa8b732006-12-10 02:21:36 -08003778 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03003779 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003780 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04003781 .set_interrupt_shadow = svm_set_interrupt_shadow,
3782 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02003783 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03003784 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003785 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02003786 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03003787 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02003788 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003789 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003790 .get_nmi_mask = svm_get_nmi_mask,
3791 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003792 .enable_nmi_window = enable_nmi_window,
3793 .enable_irq_window = enable_irq_window,
3794 .update_cr8_intercept = update_cr8_intercept,
Izik Eiduscbc94022007-10-25 00:29:55 +02003795
3796 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08003797 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08003798 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003799
Avi Kivity586f9602010-11-18 13:09:54 +02003800 .get_exit_info = svm_get_exit_info,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003801 .exit_reasons_str = svm_exit_reasons_str,
Avi Kivity586f9602010-11-18 13:09:54 +02003802
Sheng Yang17cc3932010-01-05 19:02:27 +08003803 .get_lpage_level = svm_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08003804
3805 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08003806
3807 .rdtscp_supported = svm_rdtscp_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02003808
3809 .set_supported_cpuid = svm_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08003810
3811 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10003812
3813 .write_tsc_offset = svm_write_tsc_offset,
Zachary Amsdene48672f2010-08-19 22:07:23 -10003814 .adjust_tsc_offset = svm_adjust_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003815
3816 .set_tdp_cr3 = set_tdp_cr3,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003817};
3818
3819static int __init svm_init(void)
3820{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08003821 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03003822 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003823}
3824
3825static void __exit svm_exit(void)
3826{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08003827 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08003828}
3829
3830module_init(svm_init)
3831module_exit(svm_exit)